1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Solaris external defines.
56  */
57 extern pri_t minclsyspri;
58 extern pri_t maxclsyspri;
59 
60 /*
61  * dev_ops functions prototypes
62  */
63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66 static int ql_power(dev_info_t *, int, int);
67 static int ql_quiesce(dev_info_t *);
68 
69 /*
70  * FCA functions prototypes exported by means of the transport table
71  */
72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73     fc_fca_bind_info_t *);
74 static void ql_unbind_port(opaque_t);
75 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77 static int ql_els_send(opaque_t, fc_packet_t *);
78 static int ql_get_cap(opaque_t, char *, void *);
79 static int ql_set_cap(opaque_t, char *, void *);
80 static int ql_getmap(opaque_t, fc_lilpmap_t *);
81 static int ql_transport(opaque_t, fc_packet_t *);
82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85 static int ql_abort(opaque_t, fc_packet_t *, int);
86 static int ql_reset(opaque_t, uint32_t);
87 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
88 static opaque_t ql_get_device(opaque_t, fc_portid_t);
89 
90 /*
91  * FCA Driver Support Function Prototypes.
92  */
93 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
94 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
95     ql_srb_t *);
96 static void ql_task_daemon(void *);
97 static void ql_task_thread(ql_adapter_state_t *);
98 static void ql_unsol_callback(ql_srb_t *);
99 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
100     fc_unsol_buf_t *);
101 static void ql_timer(void *);
102 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
103 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
104     uint32_t *, uint32_t *);
105 static void ql_halt(ql_adapter_state_t *, int);
106 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_login_port(ql_adapter_state_t *, port_id_t);
122 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
123 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
124 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
126 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
128 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
129 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
130     ql_srb_t *);
131 static int ql_kstat_update(kstat_t *, int);
132 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
133 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
134 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
135 static void ql_rst_aen(ql_adapter_state_t *);
136 static void ql_restart_queues(ql_adapter_state_t *);
137 static void ql_abort_queues(ql_adapter_state_t *);
138 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
139 static void ql_idle_check(ql_adapter_state_t *);
140 static int ql_loop_resync(ql_adapter_state_t *);
141 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
142 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
143 static int ql_save_config_regs(dev_info_t *);
144 static int ql_restore_config_regs(dev_info_t *);
145 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
146 static int ql_handle_rscn_update(ql_adapter_state_t *);
147 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
148 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
149 static int ql_dump_firmware(ql_adapter_state_t *);
150 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
152 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
154 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
155 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
156 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
157     void *);
158 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
159     uint8_t);
160 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
161 static int ql_suspend_adapter(ql_adapter_state_t *);
162 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
163 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
164 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
165 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
166 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
167 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
168 static int ql_setup_interrupts(ql_adapter_state_t *);
169 static int ql_setup_msi(ql_adapter_state_t *);
170 static int ql_setup_msix(ql_adapter_state_t *);
171 static int ql_setup_fixed(ql_adapter_state_t *);
172 static void ql_release_intr(ql_adapter_state_t *);
173 static void ql_disable_intr(ql_adapter_state_t *);
174 static int ql_legacy_intr(ql_adapter_state_t *);
175 static int ql_init_mutex(ql_adapter_state_t *);
176 static void ql_destroy_mutex(ql_adapter_state_t *);
177 static void ql_iidma(ql_adapter_state_t *);
178 
179 static int ql_n_port_plogi(ql_adapter_state_t *);
180 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
181     els_descriptor_t *);
182 static void ql_isp_els_request_ctor(els_descriptor_t *,
183     els_passthru_entry_t *);
184 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
185 static int ql_wait_for_td_stop(ql_adapter_state_t *ha);
186 
187 /*
188  * Global data
189  */
190 static uint8_t	ql_enable_pm = 1;
191 static int	ql_flash_sbus_fpga = 0;
192 uint32_t	ql_os_release_level;
193 uint32_t	ql_disable_aif = 0;
194 uint32_t	ql_disable_msi = 0;
195 uint32_t	ql_disable_msix = 0;
196 
197 /* Timer routine variables. */
198 static timeout_id_t	ql_timer_timeout_id = NULL;
199 static clock_t		ql_timer_ticks;
200 
201 /* Soft state head pointer. */
202 void *ql_state = NULL;
203 
204 /* Head adapter link. */
205 ql_head_t ql_hba = {
206 	NULL,
207 	NULL
208 };
209 
210 /* Global hba index */
211 uint32_t ql_gfru_hba_index = 1;
212 
213 /*
214  * Some IP defines and globals
215  */
216 uint32_t	ql_ip_buffer_count = 128;
217 uint32_t	ql_ip_low_water = 10;
218 uint8_t		ql_ip_fast_post_count = 5;
219 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
220 
221 /* Device AL_PA to Device Head Queue index array. */
222 uint8_t ql_alpa_to_index[] = {
223 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
224 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
225 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
226 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
227 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
228 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
229 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
230 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
231 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
232 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
233 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
234 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
235 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
236 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
237 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
238 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
239 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
240 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
241 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
242 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
243 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
244 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
245 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
246 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
247 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
248 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
249 };
250 
251 /* Device loop_id to ALPA array. */
252 static uint8_t ql_index_to_alpa[] = {
253 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
254 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
255 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
256 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
257 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
258 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
259 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
260 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
261 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
262 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
263 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
264 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
265 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
266 };
267 
268 /* 2200 register offsets */
269 static reg_off_t reg_off_2200 = {
270 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
271 	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
272 	0x00, 0x00, /* intr info lo, hi */
273 	24, /* Number of mailboxes */
274 	/* Mailbox register offsets */
275 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
276 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
277 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
278 	/* 2200 does not have mailbox 24-31 */
279 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
280 	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
281 	/* host to host sema */
282 	0x00,
283 	/* 2200 does not have pri_req_in, pri_req_out, */
284 	/* atio_req_in, atio_req_out, io_base_addr */
285 	0xff, 0xff, 0xff, 0xff,	0xff
286 };
287 
288 /* 2300 register offsets */
289 static reg_off_t reg_off_2300 = {
290 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
291 	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
292 	0x18, 0x1A, /* intr info lo, hi */
293 	32, /* Number of mailboxes */
294 	/* Mailbox register offsets */
295 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
296 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
297 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
298 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
299 	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
300 	/* host to host sema */
301 	0x1c,
302 	/* 2300 does not have pri_req_in, pri_req_out, */
303 	/* atio_req_in, atio_req_out, io_base_addr */
304 	0xff, 0xff, 0xff, 0xff,	0xff
305 };
306 
307 /* 2400/2500 register offsets */
308 reg_off_t reg_off_2400_2500 = {
309 	0x00, 0x04,		/* flash_address, flash_data */
310 	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
311 	/* 2400 does not have semaphore, nvram */
312 	0x14, 0x18,
313 	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
314 	0x44, 0x46,		/* intr info lo, hi */
315 	32,			/* Number of mailboxes */
316 	/* Mailbox register offsets */
317 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
318 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
319 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
320 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
321 	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
322 	0xff, 0xff, 0xff, 0xff,
323 	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
324 	0xff,			/* host to host sema */
325 	0x2c, 0x30,		/* pri_req_in, pri_req_out */
326 	0x3c, 0x40,		/* atio_req_in, atio_req_out */
327 	0x54			/* io_base_addr */
328 };
329 
330 /* mutex for protecting variables shared by all instances of the driver */
331 kmutex_t ql_global_mutex;
332 kmutex_t ql_global_hw_mutex;
333 kmutex_t ql_global_el_mutex;
334 
335 /* DMA access attribute structure. */
336 static ddi_device_acc_attr_t ql_dev_acc_attr = {
337 	DDI_DEVICE_ATTR_V0,
338 	DDI_STRUCTURE_LE_ACC,
339 	DDI_STRICTORDER_ACC
340 };
341 
342 /* I/O DMA attributes structures. */
343 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
344 	DMA_ATTR_V0,			/* dma_attr_version */
345 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
346 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
347 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
348 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
349 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
350 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
351 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
352 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
353 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
354 	QL_DMA_GRANULARITY,		/* granularity of device */
355 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
356 };
357 
358 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
359 	DMA_ATTR_V0,			/* dma_attr_version */
360 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
361 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
362 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
363 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
364 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
365 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
366 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
367 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
368 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
369 	QL_DMA_GRANULARITY,		/* granularity of device */
370 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
371 };
372 
373 /* Load the default dma attributes */
374 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
375 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
376 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
377 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
378 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
379 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
380 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
381 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
382 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
383 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
384 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
385 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
386 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
387 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
388 
389 /* Static declarations of cb_ops entry point functions... */
390 static struct cb_ops ql_cb_ops = {
391 	ql_open,			/* b/c open */
392 	ql_close,			/* b/c close */
393 	nodev,				/* b strategy */
394 	nodev,				/* b print */
395 	nodev,				/* b dump */
396 	nodev,				/* c read */
397 	nodev,				/* c write */
398 	ql_ioctl,			/* c ioctl */
399 	nodev,				/* c devmap */
400 	nodev,				/* c mmap */
401 	nodev,				/* c segmap */
402 	nochpoll,			/* c poll */
403 	nodev,				/* cb_prop_op */
404 	NULL,				/* streamtab  */
405 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
406 	CB_REV,				/* cb_ops revision */
407 	nodev,				/* c aread */
408 	nodev				/* c awrite */
409 };
410 
411 /* Static declarations of dev_ops entry point functions... */
412 static struct dev_ops ql_devops = {
413 	DEVO_REV,			/* devo_rev */
414 	0,				/* refcnt */
415 	ql_getinfo,			/* getinfo */
416 	nulldev,			/* identify */
417 	nulldev,			/* probe */
418 	ql_attach,			/* attach */
419 	ql_detach,			/* detach */
420 	nodev,				/* reset */
421 	&ql_cb_ops,			/* char/block ops */
422 	NULL,				/* bus operations */
423 	ql_power,			/* power management */
424 	ql_quiesce			/* quiesce device */
425 };
426 
427 /* ELS command code to text converter */
428 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
429 /* Mailbox command code to text converter */
430 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
431 
432 char qlc_driver_version[] = QL_VERSION;
433 
434 /*
435  * Loadable Driver Interface Structures.
436  * Declare and initialize the module configuration section...
437  */
438 static struct modldrv modldrv = {
439 	&mod_driverops,				/* type of module: driver */
440 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
441 	&ql_devops				/* driver dev_ops */
442 };
443 
444 static struct modlinkage modlinkage = {
445 	MODREV_1,
446 	&modldrv,
447 	NULL
448 };
449 
450 /* ************************************************************************ */
451 /*				Loadable Module Routines.		    */
452 /* ************************************************************************ */
453 
454 /*
455  * _init
456  *	Initializes a loadable module. It is called before any other
457  *	routine in a loadable module.
458  *
459  * Returns:
460  *	0 = success
461  *
462  * Context:
463  *	Kernel context.
464  */
465 int
466 _init(void)
467 {
468 	uint16_t	w16;
469 	int		rval = 0;
470 
471 	/* Get OS major release level. */
472 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
473 		if (utsname.release[w16] == '.') {
474 			w16++;
475 			break;
476 		}
477 	}
478 	if (w16 < sizeof (utsname.release)) {
479 		(void) ql_bstr_to_dec(&utsname.release[w16],
480 		    &ql_os_release_level, 0);
481 	} else {
482 		ql_os_release_level = 0;
483 	}
484 	if (ql_os_release_level < 6) {
485 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
486 		    QL_NAME, ql_os_release_level);
487 		rval = EINVAL;
488 	}
489 	if (ql_os_release_level == 6) {
490 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
491 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
492 	}
493 
494 	if (rval == 0) {
495 		rval = ddi_soft_state_init(&ql_state,
496 		    sizeof (ql_adapter_state_t), 0);
497 	}
498 	if (rval == 0) {
499 		/* allow the FC Transport to tweak the dev_ops */
500 		fc_fca_init(&ql_devops);
501 
502 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
503 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
504 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
505 		rval = mod_install(&modlinkage);
506 		if (rval != 0) {
507 			mutex_destroy(&ql_global_hw_mutex);
508 			mutex_destroy(&ql_global_mutex);
509 			mutex_destroy(&ql_global_el_mutex);
510 			ddi_soft_state_fini(&ql_state);
511 		} else {
512 			/*EMPTY*/
513 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
514 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
515 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
516 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
517 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
518 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
519 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
520 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
521 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
522 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
523 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
524 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
525 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
526 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
527 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
528 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
529 			    QL_FCSM_CMD_SGLLEN;
530 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
531 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
532 			    QL_FCSM_RSP_SGLLEN;
533 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
534 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
535 			    QL_FCIP_CMD_SGLLEN;
536 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
537 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
538 			    QL_FCIP_RSP_SGLLEN;
539 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
540 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
541 			    QL_FCP_CMD_SGLLEN;
542 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
543 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
544 			    QL_FCP_RSP_SGLLEN;
545 		}
546 	}
547 
548 	if (rval != 0) {
549 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
550 		    QL_NAME);
551 	}
552 
553 	return (rval);
554 }
555 
556 /*
557  * _fini
558  *	Prepares a module for unloading. It is called when the system
559  *	wants to unload a module. If the module determines that it can
560  *	be unloaded, then _fini() returns the value returned by
561  *	mod_remove(). Upon successful return from _fini() no other
562  *	routine in the module will be called before _init() is called.
563  *
564  * Returns:
565  *	0 = success
566  *
567  * Context:
568  *	Kernel context.
569  */
570 int
571 _fini(void)
572 {
573 	int	rval;
574 
575 	rval = mod_remove(&modlinkage);
576 	if (rval == 0) {
577 		mutex_destroy(&ql_global_hw_mutex);
578 		mutex_destroy(&ql_global_mutex);
579 		mutex_destroy(&ql_global_el_mutex);
580 		ddi_soft_state_fini(&ql_state);
581 	}
582 
583 	return (rval);
584 }
585 
586 /*
587  * _info
588  *	Returns information about loadable module.
589  *
590  * Input:
591  *	modinfo = pointer to module information structure.
592  *
593  * Returns:
594  *	Value returned by mod_info().
595  *
596  * Context:
597  *	Kernel context.
598  */
599 int
600 _info(struct modinfo *modinfop)
601 {
602 	return (mod_info(&modlinkage, modinfop));
603 }
604 
605 /* ************************************************************************ */
606 /*			dev_ops functions				    */
607 /* ************************************************************************ */
608 
609 /*
610  * ql_getinfo
611  *	Returns the pointer associated with arg when cmd is
612  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
613  *	instance number associated with arg when cmd is set
614  *	to DDI_INFO_DEV2INSTANCE.
615  *
616  * Input:
617  *	dip = Do not use.
618  *	cmd = command argument.
619  *	arg = command specific argument.
620  *	resultp = pointer to where request information is stored.
621  *
622  * Returns:
623  *	DDI_SUCCESS or DDI_FAILURE.
624  *
625  * Context:
626  *	Kernel context.
627  */
628 /* ARGSUSED */
629 static int
630 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
631 {
632 	ql_adapter_state_t	*ha;
633 	int			minor;
634 	int			rval = DDI_FAILURE;
635 
636 	minor = (int)(getminor((dev_t)arg));
637 	ha = ddi_get_soft_state(ql_state, minor);
638 	if (ha == NULL) {
639 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
640 		    getminor((dev_t)arg));
641 		*resultp = NULL;
642 		return (rval);
643 	}
644 
645 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
646 
647 	switch (cmd) {
648 	case DDI_INFO_DEVT2DEVINFO:
649 		*resultp = ha->dip;
650 		rval = DDI_SUCCESS;
651 		break;
652 	case DDI_INFO_DEVT2INSTANCE:
653 		*resultp = (void *)(uintptr_t)(ha->instance);
654 		rval = DDI_SUCCESS;
655 		break;
656 	default:
657 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
658 		rval = DDI_FAILURE;
659 		break;
660 	}
661 
662 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
663 
664 	return (rval);
665 }
666 
667 /*
668  * ql_attach
669  *	Configure and attach an instance of the driver
670  *	for a port.
671  *
672  * Input:
673  *	dip = pointer to device information structure.
674  *	cmd = attach type.
675  *
676  * Returns:
677  *	DDI_SUCCESS or DDI_FAILURE.
678  *
679  * Context:
680  *	Kernel context.
681  */
682 static int
683 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
684 {
685 	uint32_t		size;
686 	int			rval;
687 	int			instance;
688 	uint_t			progress = 0;
689 	char			*buf;
690 	ushort_t		caps_ptr, cap;
691 	fc_fca_tran_t		*tran;
692 	ql_adapter_state_t	*ha = NULL;
693 
694 	static char *pmcomps[] = {
695 		NULL,
696 		PM_LEVEL_D3_STR,		/* Device OFF */
697 		PM_LEVEL_D0_STR,		/* Device ON */
698 	};
699 
700 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
701 	    ddi_get_instance(dip), cmd);
702 
703 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
704 
705 	switch (cmd) {
706 	case DDI_ATTACH:
707 		/* first get the instance */
708 		instance = ddi_get_instance(dip);
709 
710 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
711 		    QL_NAME, instance, QL_VERSION);
712 
713 		/* Correct OS version? */
714 		if (ql_os_release_level != 11) {
715 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
716 			    "11", QL_NAME, instance);
717 			goto attach_failed;
718 		}
719 
720 		/* Hardware is installed in a DMA-capable slot? */
721 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
722 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
723 			    instance);
724 			goto attach_failed;
725 		}
726 
727 		/* No support for high-level interrupts */
728 		if (ddi_intr_hilevel(dip, 0) != 0) {
729 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
730 			    " not supported", QL_NAME, instance);
731 			goto attach_failed;
732 		}
733 
734 		/* Allocate our per-device-instance structure */
735 		if (ddi_soft_state_zalloc(ql_state,
736 		    instance) != DDI_SUCCESS) {
737 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
738 			    QL_NAME, instance);
739 			goto attach_failed;
740 		}
741 		progress |= QL_SOFT_STATE_ALLOCED;
742 
743 		ha = ddi_get_soft_state(ql_state, instance);
744 		if (ha == NULL) {
745 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
746 			    QL_NAME, instance);
747 			goto attach_failed;
748 		}
749 		ha->dip = dip;
750 		ha->instance = instance;
751 		ha->hba.base_address = ha;
752 		ha->pha = ha;
753 
754 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
755 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
756 			    QL_NAME, instance);
757 			goto attach_failed;
758 		}
759 
760 		/* Get extended logging and dump flags. */
761 		ql_common_properties(ha);
762 
763 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
764 		    "sbus") == 0) {
765 			EL(ha, "%s SBUS card detected", QL_NAME);
766 			ha->cfg_flags |= CFG_SBUS_CARD;
767 		}
768 
769 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
770 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
771 
772 		ha->outstanding_cmds = kmem_zalloc(
773 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
774 		    KM_SLEEP);
775 
776 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
777 		    QL_UB_LIMIT, KM_SLEEP);
778 
779 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
780 		    KM_SLEEP);
781 
782 		(void) ddi_pathname(dip, buf);
783 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
784 		if (ha->devpath == NULL) {
785 			EL(ha, "devpath mem alloc failed\n");
786 		} else {
787 			(void) strcpy(ha->devpath, buf);
788 			EL(ha, "devpath is: %s\n", ha->devpath);
789 		}
790 
791 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
792 			/*
793 			 * For cards where PCI is mapped to sbus e.g. Ivory.
794 			 *
795 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
796 			 *	: 0x100 - 0x3FF PCI IO space for 2200
797 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
798 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
799 			 */
800 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
801 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
802 			    != DDI_SUCCESS) {
803 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
804 				    " registers", QL_NAME, instance);
805 				goto attach_failed;
806 			}
807 			if (ddi_regs_map_setup(dip, 1,
808 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
809 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
810 			    != DDI_SUCCESS) {
811 				/* We should not fail attach here */
812 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
813 				    QL_NAME, instance);
814 				ha->sbus_fpga_iobase = NULL;
815 			}
816 			progress |= QL_REGS_MAPPED;
817 		} else {
818 			/*
819 			 * Setup the ISP2200 registers address mapping to be
820 			 * accessed by this particular driver.
821 			 * 0x0   Configuration Space
822 			 * 0x1   I/O Space
823 			 * 0x2   32-bit Memory Space address
824 			 * 0x3   64-bit Memory Space address
825 			 */
826 			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
827 			    0, 0x100, &ql_dev_acc_attr,
828 			    &ha->dev_handle) != DDI_SUCCESS) {
829 				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
830 				    "failed", QL_NAME, instance);
831 				goto attach_failed;
832 			}
833 			progress |= QL_REGS_MAPPED;
834 
835 			/*
836 			 * We need I/O space mappings for 23xx HBAs for
837 			 * loading flash (FCode). The chip has a bug due to
838 			 * which loading flash fails through mem space
839 			 * mappings in PCI-X mode.
840 			 */
841 			if (ddi_regs_map_setup(dip, 1,
842 			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
843 			    &ql_dev_acc_attr,
844 			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
845 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
846 				    " failed", QL_NAME, instance);
847 				goto attach_failed;
848 			}
849 			progress |= QL_IOMAP_IOBASE_MAPPED;
850 		}
851 
852 		/*
853 		 * We should map config space before adding interrupt
854 		 * So that the chip type (2200 or 2300) can be determined
855 		 * before the interrupt routine gets a chance to execute.
856 		 */
857 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
858 			if (ddi_regs_map_setup(dip, 0,
859 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
860 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
861 			    DDI_SUCCESS) {
862 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
863 				    "config registers", QL_NAME, instance);
864 				goto attach_failed;
865 			}
866 		} else {
867 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
868 			    DDI_SUCCESS) {
869 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
870 				    "config space", QL_NAME, instance);
871 				goto attach_failed;
872 			}
873 		}
874 		progress |= QL_CONFIG_SPACE_SETUP;
875 
876 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
877 		    PCI_CONF_SUBSYSID);
878 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
879 		    PCI_CONF_SUBVENID);
880 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
881 		    PCI_CONF_VENID);
882 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
883 		    PCI_CONF_DEVID);
884 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
885 		    PCI_CONF_REVID);
886 
887 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
888 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
889 		    ha->subven_id, ha->subsys_id);
890 
891 		switch (ha->device_id) {
892 		case 0x2300:
893 		case 0x2312:
894 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
895 		/*
896 		 * per marketing, fibre-lite HBA's are not supported
897 		 * on sparc platforms
898 		 */
899 		case 0x6312:
900 		case 0x6322:
901 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
902 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
903 				ha->flags |= FUNCTION_1;
904 			}
905 			if (ha->device_id == 0x6322) {
906 				ha->cfg_flags |= CFG_CTRL_6322;
907 				ha->fw_class = 0x6322;
908 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
909 			} else {
910 				ha->cfg_flags |= CFG_CTRL_2300;
911 				ha->fw_class = 0x2300;
912 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
913 			}
914 			ha->reg_off = &reg_off_2300;
915 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
916 				goto attach_failed;
917 			}
918 			ha->fcp_cmd = ql_command_iocb;
919 			ha->ip_cmd = ql_ip_iocb;
920 			ha->ms_cmd = ql_ms_iocb;
921 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
922 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
923 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
924 			} else {
925 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
926 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
927 			}
928 			break;
929 
930 		case 0x2200:
931 			ha->cfg_flags |= CFG_CTRL_2200;
932 			ha->reg_off = &reg_off_2200;
933 			ha->fw_class = 0x2200;
934 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
935 				goto attach_failed;
936 			}
937 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
938 			ha->fcp_cmd = ql_command_iocb;
939 			ha->ip_cmd = ql_ip_iocb;
940 			ha->ms_cmd = ql_ms_iocb;
941 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
942 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
943 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
944 			} else {
945 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
946 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
947 			}
948 			break;
949 
950 		case 0x2422:
951 		case 0x2432:
952 		case 0x5422:
953 		case 0x5432:
954 		case 0x8432:
955 #ifdef __sparc
956 			/*
957 			 * Per marketing, the QLA/QLE-2440's (which
958 			 * also use the 2422 & 2432) are only for the
959 			 * x86 platform (SMB market).
960 			 */
961 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
962 			    ha->subsys_id == 0x13e) {
963 				cmn_err(CE_WARN,
964 				    "%s(%d): Unsupported HBA ssid: %x",
965 				    QL_NAME, instance, ha->subsys_id);
966 				goto attach_failed;
967 			}
968 #endif	/* __sparc */
969 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
970 				ha->flags |= FUNCTION_1;
971 			}
972 			ha->cfg_flags |= CFG_CTRL_2422;
973 			if (ha->device_id == 0x8432) {
974 				ha->cfg_flags |= CFG_CTRL_MENLO;
975 			} else {
976 				ha->flags |= VP_ENABLED;
977 			}
978 
979 			ha->reg_off = &reg_off_2400_2500;
980 			ha->fw_class = 0x2400;
981 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
982 				goto attach_failed;
983 			}
984 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
985 			ha->fcp_cmd = ql_command_24xx_iocb;
986 			ha->ip_cmd = ql_ip_24xx_iocb;
987 			ha->ms_cmd = ql_ms_24xx_iocb;
988 			ha->els_cmd = ql_els_24xx_iocb;
989 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
990 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
991 			break;
992 
993 		case 0x2522:
994 		case 0x2532:
995 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
996 				ha->flags |= FUNCTION_1;
997 			}
998 			ha->cfg_flags |= CFG_CTRL_25XX;
999 			ha->flags |= VP_ENABLED;
1000 			ha->fw_class = 0x2500;
1001 			ha->reg_off = &reg_off_2400_2500;
1002 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1003 				goto attach_failed;
1004 			}
1005 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1006 			ha->fcp_cmd = ql_command_24xx_iocb;
1007 			ha->ip_cmd = ql_ip_24xx_iocb;
1008 			ha->ms_cmd = ql_ms_24xx_iocb;
1009 			ha->els_cmd = ql_els_24xx_iocb;
1010 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1011 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1012 			break;
1013 
1014 		case 0x8001:
1015 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1016 				ha->flags |= FUNCTION_1;
1017 			}
1018 			ha->cfg_flags |= CFG_CTRL_81XX;
1019 			ha->flags |= VP_ENABLED;
1020 			ha->fw_class = 0x8100;
1021 			ha->reg_off = &reg_off_2400_2500;
1022 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1023 				goto attach_failed;
1024 			}
1025 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1026 			ha->fcp_cmd = ql_command_24xx_iocb;
1027 			ha->ip_cmd = ql_ip_24xx_iocb;
1028 			ha->ms_cmd = ql_ms_24xx_iocb;
1029 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1030 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1031 			break;
1032 
1033 		default:
1034 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1035 			    QL_NAME, instance, ha->device_id);
1036 			goto attach_failed;
1037 		}
1038 
1039 		/* Setup hba buffer. */
1040 
1041 		size = CFG_IST(ha, CFG_CTRL_242581) ?
1042 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1043 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1044 		    RCVBUF_QUEUE_SIZE);
1045 
1046 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1047 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1048 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1049 			    "alloc failed", QL_NAME, instance);
1050 			goto attach_failed;
1051 		}
1052 		progress |= QL_HBA_BUFFER_SETUP;
1053 
1054 		/* Setup buffer pointers. */
1055 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1056 		    REQUEST_Q_BUFFER_OFFSET;
1057 		ha->request_ring_bp = (struct cmd_entry *)
1058 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1059 
1060 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1061 		    RESPONSE_Q_BUFFER_OFFSET;
1062 		ha->response_ring_bp = (struct sts_entry *)
1063 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1064 
1065 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1066 		    RCVBUF_Q_BUFFER_OFFSET;
1067 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1068 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1069 
1070 		/* Allocate resource for QLogic IOCTL */
1071 		(void) ql_alloc_xioctl_resource(ha);
1072 
1073 		/* Setup interrupts */
1074 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1075 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1076 			    "rval=%xh", QL_NAME, instance, rval);
1077 			goto attach_failed;
1078 		}
1079 
1080 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1081 
1082 		/*
1083 		 * Allocate an N Port information structure
1084 		 * for use when in P2P topology.
1085 		 */
1086 		ha->n_port = (ql_n_port_info_t *)
1087 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1088 		if (ha->n_port == NULL) {
1089 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1090 			    QL_NAME, instance);
1091 			goto attach_failed;
1092 		}
1093 
1094 		progress |= QL_N_PORT_INFO_CREATED;
1095 
1096 		/*
1097 		 * Determine support for Power Management
1098 		 */
1099 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1100 
1101 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1102 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1103 			if (cap == PCI_CAP_ID_PM) {
1104 				ha->pm_capable = 1;
1105 				break;
1106 			}
1107 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1108 			    PCI_CAP_NEXT_PTR);
1109 		}
1110 
1111 		if (ha->pm_capable) {
1112 			/*
1113 			 * Enable PM for 2200 based HBAs only.
1114 			 */
1115 			if (ha->device_id != 0x2200) {
1116 				ha->pm_capable = 0;
1117 			}
1118 		}
1119 
1120 		if (ha->pm_capable) {
1121 			ha->pm_capable = ql_enable_pm;
1122 		}
1123 
1124 		if (ha->pm_capable) {
1125 			/*
1126 			 * Initialize power management bookkeeping;
1127 			 * components are created idle.
1128 			 */
1129 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1130 			pmcomps[0] = buf;
1131 
1132 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1133 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1134 			    dip, "pm-components", pmcomps,
1135 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1136 			    DDI_PROP_SUCCESS) {
1137 				cmn_err(CE_WARN, "%s(%d): failed to create"
1138 				    " pm-components property", QL_NAME,
1139 				    instance);
1140 
1141 				/* Initialize adapter. */
1142 				ha->power_level = PM_LEVEL_D0;
1143 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1144 					cmn_err(CE_WARN, "%s(%d): failed to"
1145 					    " initialize adapter", QL_NAME,
1146 					    instance);
1147 					goto attach_failed;
1148 				}
1149 			} else {
1150 				ha->power_level = PM_LEVEL_D3;
1151 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1152 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1153 					cmn_err(CE_WARN, "%s(%d): failed to"
1154 					    " raise power or initialize"
1155 					    " adapter", QL_NAME, instance);
1156 				}
1157 			}
1158 		} else {
1159 			/* Initialize adapter. */
1160 			ha->power_level = PM_LEVEL_D0;
1161 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1162 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1163 				    " adapter", QL_NAME, instance);
1164 			}
1165 		}
1166 
1167 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1168 		    ha->fw_subminor_version == 0) {
1169 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1170 			    QL_NAME, ha->instance);
1171 		} else {
1172 			int	rval;
1173 			char	ver_fmt[256];
1174 
1175 			rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1176 			    "Firmware version %d.%d.%d", ha->fw_major_version,
1177 			    ha->fw_minor_version, ha->fw_subminor_version);
1178 
1179 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
1180 				rval = (int)snprintf(ver_fmt + rval,
1181 				    (size_t)sizeof (ver_fmt),
1182 				    ", MPI fw version %d.%d.%d",
1183 				    ha->mpi_fw_major_version,
1184 				    ha->mpi_fw_minor_version,
1185 				    ha->mpi_fw_subminor_version);
1186 
1187 				if (ha->subsys_id == 0x17B ||
1188 				    ha->subsys_id == 0x17D) {
1189 					(void) snprintf(ver_fmt + rval,
1190 					    (size_t)sizeof (ver_fmt),
1191 					    ", PHY fw version %d.%d.%d",
1192 					    ha->phy_fw_major_version,
1193 					    ha->phy_fw_minor_version,
1194 					    ha->phy_fw_subminor_version);
1195 				}
1196 			}
1197 			cmn_err(CE_NOTE, "!%s(%d): %s",
1198 			    QL_NAME, ha->instance, ver_fmt);
1199 		}
1200 
1201 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1202 		    "controller", KSTAT_TYPE_RAW,
1203 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1204 		if (ha->k_stats == NULL) {
1205 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1206 			    QL_NAME, instance);
1207 			goto attach_failed;
1208 		}
1209 		progress |= QL_KSTAT_CREATED;
1210 
1211 		ha->adapter_stats->version = 1;
1212 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1213 		ha->k_stats->ks_private = ha;
1214 		ha->k_stats->ks_update = ql_kstat_update;
1215 		ha->k_stats->ks_ndata = 1;
1216 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1217 		kstat_install(ha->k_stats);
1218 
1219 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1220 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1221 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1222 			    QL_NAME, instance);
1223 			goto attach_failed;
1224 		}
1225 		progress |= QL_MINOR_NODE_CREATED;
1226 
1227 		/* Allocate a transport structure for this instance */
1228 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1229 		if (tran == NULL) {
1230 			cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1231 			    QL_NAME, instance);
1232 			goto attach_failed;
1233 		}
1234 
1235 		progress |= QL_FCA_TRAN_ALLOCED;
1236 
1237 		/* fill in the structure */
1238 		tran->fca_numports = 1;
1239 		tran->fca_version = FCTL_FCA_MODREV_5;
1240 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1241 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1242 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1243 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1244 		}
1245 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1246 		    tran->fca_perm_pwwn.raw_wwn, 8);
1247 
1248 		EL(ha, "FCA version %d\n", tran->fca_version);
1249 
1250 		/* Specify the amount of space needed in each packet */
1251 		tran->fca_pkt_size = sizeof (ql_srb_t);
1252 
1253 		/* command limits are usually dictated by hardware */
1254 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1255 
1256 		/* dmaattr are static, set elsewhere. */
1257 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1258 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1259 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1260 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1261 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1262 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1263 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1264 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1265 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1266 		} else {
1267 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1268 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1269 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1270 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1271 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1272 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1273 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1274 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1275 		}
1276 
1277 		tran->fca_acc_attr = &ql_dev_acc_attr;
1278 		tran->fca_iblock = &(ha->iblock_cookie);
1279 
1280 		/* the remaining values are simply function vectors */
1281 		tran->fca_bind_port = ql_bind_port;
1282 		tran->fca_unbind_port = ql_unbind_port;
1283 		tran->fca_init_pkt = ql_init_pkt;
1284 		tran->fca_un_init_pkt = ql_un_init_pkt;
1285 		tran->fca_els_send = ql_els_send;
1286 		tran->fca_get_cap = ql_get_cap;
1287 		tran->fca_set_cap = ql_set_cap;
1288 		tran->fca_getmap = ql_getmap;
1289 		tran->fca_transport = ql_transport;
1290 		tran->fca_ub_alloc = ql_ub_alloc;
1291 		tran->fca_ub_free = ql_ub_free;
1292 		tran->fca_ub_release = ql_ub_release;
1293 		tran->fca_abort = ql_abort;
1294 		tran->fca_reset = ql_reset;
1295 		tran->fca_port_manage = ql_port_manage;
1296 		tran->fca_get_device = ql_get_device;
1297 
1298 		/* give it to the FC transport */
1299 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1300 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1301 			    instance);
1302 			goto attach_failed;
1303 		}
1304 		progress |= QL_FCA_ATTACH_DONE;
1305 
1306 		/* Stash the structure so it can be freed at detach */
1307 		ha->tran = tran;
1308 
1309 		/* Acquire global state lock. */
1310 		GLOBAL_STATE_LOCK();
1311 
1312 		/* Add adapter structure to link list. */
1313 		ql_add_link_b(&ql_hba, &ha->hba);
1314 
1315 		/* Start one second driver timer. */
1316 		if (ql_timer_timeout_id == NULL) {
1317 			ql_timer_ticks = drv_usectohz(1000000);
1318 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1319 			    ql_timer_ticks);
1320 		}
1321 
1322 		/* Release global state lock. */
1323 		GLOBAL_STATE_UNLOCK();
1324 
1325 		/* Determine and populate HBA fru info */
1326 		ql_setup_fruinfo(ha);
1327 
1328 		/* Setup task_daemon thread. */
1329 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1330 		    0, &p0, TS_RUN, minclsyspri);
1331 
1332 		progress |= QL_TASK_DAEMON_STARTED;
1333 
1334 		ddi_report_dev(dip);
1335 
1336 		/* Disable link reset in panic path */
1337 		ha->lip_on_panic = 1;
1338 
1339 		rval = DDI_SUCCESS;
1340 		break;
1341 
1342 attach_failed:
1343 		if (progress & QL_FCA_ATTACH_DONE) {
1344 			(void) fc_fca_detach(dip);
1345 			progress &= ~QL_FCA_ATTACH_DONE;
1346 		}
1347 
1348 		if (progress & QL_FCA_TRAN_ALLOCED) {
1349 			kmem_free(tran, sizeof (fc_fca_tran_t));
1350 			progress &= ~QL_FCA_TRAN_ALLOCED;
1351 		}
1352 
1353 		if (progress & QL_MINOR_NODE_CREATED) {
1354 			ddi_remove_minor_node(dip, "devctl");
1355 			progress &= ~QL_MINOR_NODE_CREATED;
1356 		}
1357 
1358 		if (progress & QL_KSTAT_CREATED) {
1359 			kstat_delete(ha->k_stats);
1360 			progress &= ~QL_KSTAT_CREATED;
1361 		}
1362 
1363 		if (progress & QL_N_PORT_INFO_CREATED) {
1364 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1365 			progress &= ~QL_N_PORT_INFO_CREATED;
1366 		}
1367 
1368 		if (progress & QL_TASK_DAEMON_STARTED) {
1369 			TASK_DAEMON_LOCK(ha);
1370 
1371 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1372 
1373 			cv_signal(&ha->cv_task_daemon);
1374 
1375 			/* Release task daemon lock. */
1376 			TASK_DAEMON_UNLOCK(ha);
1377 
1378 			/* Wait for for task daemon to stop running. */
1379 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1380 				ql_delay(ha, 10000);
1381 			}
1382 			progress &= ~QL_TASK_DAEMON_STARTED;
1383 		}
1384 
1385 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1386 			ddi_regs_map_free(&ha->iomap_dev_handle);
1387 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1388 		}
1389 
1390 		if (progress & QL_CONFIG_SPACE_SETUP) {
1391 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1392 				ddi_regs_map_free(&ha->sbus_config_handle);
1393 			} else {
1394 				pci_config_teardown(&ha->pci_handle);
1395 			}
1396 			progress &= ~QL_CONFIG_SPACE_SETUP;
1397 		}
1398 
1399 		if (progress & QL_INTR_ADDED) {
1400 			ql_disable_intr(ha);
1401 			ql_release_intr(ha);
1402 			progress &= ~QL_INTR_ADDED;
1403 		}
1404 
1405 		if (progress & QL_MUTEX_CV_INITED) {
1406 			ql_destroy_mutex(ha);
1407 			progress &= ~QL_MUTEX_CV_INITED;
1408 		}
1409 
1410 		if (progress & QL_HBA_BUFFER_SETUP) {
1411 			ql_free_phys(ha, &ha->hba_buf);
1412 			progress &= ~QL_HBA_BUFFER_SETUP;
1413 		}
1414 
1415 		if (progress & QL_REGS_MAPPED) {
1416 			ddi_regs_map_free(&ha->dev_handle);
1417 			if (ha->sbus_fpga_iobase != NULL) {
1418 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1419 			}
1420 			progress &= ~QL_REGS_MAPPED;
1421 		}
1422 
1423 		if (progress & QL_SOFT_STATE_ALLOCED) {
1424 
1425 			ql_fcache_rel(ha->fcache);
1426 
1427 			kmem_free(ha->adapter_stats,
1428 			    sizeof (*ha->adapter_stats));
1429 
1430 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1431 			    QL_UB_LIMIT);
1432 
1433 			kmem_free(ha->outstanding_cmds,
1434 			    sizeof (*ha->outstanding_cmds) *
1435 			    MAX_OUTSTANDING_COMMANDS);
1436 
1437 			if (ha->devpath != NULL) {
1438 				kmem_free(ha->devpath,
1439 				    strlen(ha->devpath) + 1);
1440 			}
1441 
1442 			kmem_free(ha->dev, sizeof (*ha->dev) *
1443 			    DEVICE_HEAD_LIST_SIZE);
1444 
1445 			if (ha->xioctl != NULL) {
1446 				ql_free_xioctl_resource(ha);
1447 			}
1448 
1449 			if (ha->fw_module != NULL) {
1450 				(void) ddi_modclose(ha->fw_module);
1451 			}
1452 
1453 			ddi_soft_state_free(ql_state, instance);
1454 			progress &= ~QL_SOFT_STATE_ALLOCED;
1455 		}
1456 
1457 		ddi_prop_remove_all(dip);
1458 		rval = DDI_FAILURE;
1459 		break;
1460 
1461 	case DDI_RESUME:
1462 		rval = DDI_FAILURE;
1463 
1464 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1465 		if (ha == NULL) {
1466 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1467 			    QL_NAME, instance);
1468 			break;
1469 		}
1470 
1471 		ha->power_level = PM_LEVEL_D3;
1472 		if (ha->pm_capable) {
1473 			/*
1474 			 * Get ql_power to do power on initialization
1475 			 */
1476 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1477 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1478 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1479 				    " power", QL_NAME, instance);
1480 			}
1481 		}
1482 
1483 		/*
1484 		 * There is a bug in DR that prevents PM framework
1485 		 * from calling ql_power.
1486 		 */
1487 		if (ha->power_level == PM_LEVEL_D3) {
1488 			ha->power_level = PM_LEVEL_D0;
1489 
1490 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1491 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1492 				    " adapter", QL_NAME, instance);
1493 			}
1494 
1495 			/* Wake up task_daemon. */
1496 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1497 			    0);
1498 		}
1499 
1500 		/* Acquire global state lock. */
1501 		GLOBAL_STATE_LOCK();
1502 
1503 		/* Restart driver timer. */
1504 		if (ql_timer_timeout_id == NULL) {
1505 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1506 			    ql_timer_ticks);
1507 		}
1508 
1509 		/* Release global state lock. */
1510 		GLOBAL_STATE_UNLOCK();
1511 
1512 		/* Wake up command start routine. */
1513 		ADAPTER_STATE_LOCK(ha);
1514 		ha->flags &= ~ADAPTER_SUSPENDED;
1515 		ADAPTER_STATE_UNLOCK(ha);
1516 
1517 		/*
1518 		 * Transport doesn't make FC discovery in polled
1519 		 * mode; So we need the daemon thread's services
1520 		 * right here.
1521 		 */
1522 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1523 
1524 		rval = DDI_SUCCESS;
1525 
1526 		/* Restart IP if it was running. */
1527 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1528 			(void) ql_initialize_ip(ha);
1529 			ql_isp_rcvbuf(ha);
1530 		}
1531 		break;
1532 
1533 	default:
1534 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1535 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1536 		rval = DDI_FAILURE;
1537 		break;
1538 	}
1539 
1540 	kmem_free(buf, MAXPATHLEN);
1541 
1542 	if (rval != DDI_SUCCESS) {
1543 		/*EMPTY*/
1544 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1545 		    ddi_get_instance(dip), rval);
1546 	} else {
1547 		/*EMPTY*/
1548 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1549 	}
1550 
1551 	return (rval);
1552 }
1553 
1554 /*
1555  * ql_detach
1556  *	Used to remove all the states associated with a given
1557  *	instances of a device node prior to the removal of that
1558  *	instance from the system.
1559  *
1560  * Input:
1561  *	dip = pointer to device information structure.
1562  *	cmd = type of detach.
1563  *
1564  * Returns:
1565  *	DDI_SUCCESS or DDI_FAILURE.
1566  *
1567  * Context:
1568  *	Kernel context.
1569  */
1570 static int
1571 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1572 {
1573 	ql_adapter_state_t	*ha, *vha;
1574 	ql_tgt_t		*tq;
1575 	int			delay_cnt;
1576 	uint16_t		index;
1577 	ql_link_t		*link;
1578 	char			*buf;
1579 	timeout_id_t		timer_id = NULL;
1580 	int			suspend, rval = DDI_SUCCESS;
1581 
1582 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1583 	if (ha == NULL) {
1584 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1585 		    ddi_get_instance(dip));
1586 		return (DDI_FAILURE);
1587 	}
1588 
1589 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1590 
1591 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1592 
1593 	switch (cmd) {
1594 	case DDI_DETACH:
1595 		ADAPTER_STATE_LOCK(ha);
1596 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1597 		ADAPTER_STATE_UNLOCK(ha);
1598 
1599 		TASK_DAEMON_LOCK(ha);
1600 
1601 		if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1602 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1603 			cv_signal(&ha->cv_task_daemon);
1604 
1605 			TASK_DAEMON_UNLOCK(ha);
1606 
1607 			(void) ql_wait_for_td_stop(ha);
1608 
1609 			TASK_DAEMON_LOCK(ha);
1610 			if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1611 				ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1612 				EL(ha, "failed, could not stop task daemon\n");
1613 			}
1614 		}
1615 		TASK_DAEMON_UNLOCK(ha);
1616 
1617 		GLOBAL_STATE_LOCK();
1618 
1619 		/* Disable driver timer if no adapters. */
1620 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1621 		    ql_hba.last == &ha->hba) {
1622 			timer_id = ql_timer_timeout_id;
1623 			ql_timer_timeout_id = NULL;
1624 		}
1625 		ql_remove_link(&ql_hba, &ha->hba);
1626 
1627 		GLOBAL_STATE_UNLOCK();
1628 
1629 		if (timer_id) {
1630 			(void) untimeout(timer_id);
1631 		}
1632 
1633 		if (ha->pm_capable) {
1634 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1635 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1636 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1637 				    " power", QL_NAME, ha->instance);
1638 			}
1639 		}
1640 
1641 		/*
1642 		 * If pm_lower_power shutdown the adapter, there
1643 		 * isn't much else to do
1644 		 */
1645 		if (ha->power_level != PM_LEVEL_D3) {
1646 			ql_halt(ha, PM_LEVEL_D3);
1647 		}
1648 
1649 		/* Remove virtual ports. */
1650 		while ((vha = ha->vp_next) != NULL) {
1651 			ql_vport_destroy(vha);
1652 		}
1653 
1654 		/* Free target queues. */
1655 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1656 			link = ha->dev[index].first;
1657 			while (link != NULL) {
1658 				tq = link->base_address;
1659 				link = link->next;
1660 				ql_dev_free(ha, tq);
1661 			}
1662 		}
1663 
1664 		/*
1665 		 * Free unsolicited buffers.
1666 		 * If we are here then there are no ULPs still
1667 		 * alive that wish to talk to ql so free up
1668 		 * any SRB_IP_UB_UNUSED buffers that are
1669 		 * lingering around
1670 		 */
1671 		QL_UB_LOCK(ha);
1672 		for (index = 0; index < QL_UB_LIMIT; index++) {
1673 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1674 
1675 			if (ubp != NULL) {
1676 				ql_srb_t *sp = ubp->ub_fca_private;
1677 
1678 				sp->flags |= SRB_UB_FREE_REQUESTED;
1679 
1680 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1681 				    (sp->flags & (SRB_UB_CALLBACK |
1682 				    SRB_UB_ACQUIRED))) {
1683 					QL_UB_UNLOCK(ha);
1684 					delay(drv_usectohz(100000));
1685 					QL_UB_LOCK(ha);
1686 				}
1687 				ha->ub_array[index] = NULL;
1688 
1689 				QL_UB_UNLOCK(ha);
1690 				ql_free_unsolicited_buffer(ha, ubp);
1691 				QL_UB_LOCK(ha);
1692 			}
1693 		}
1694 		QL_UB_UNLOCK(ha);
1695 
1696 		/* Free any saved RISC code. */
1697 		if (ha->risc_code != NULL) {
1698 			kmem_free(ha->risc_code, ha->risc_code_size);
1699 			ha->risc_code = NULL;
1700 			ha->risc_code_size = 0;
1701 		}
1702 
1703 		if (ha->fw_module != NULL) {
1704 			(void) ddi_modclose(ha->fw_module);
1705 			ha->fw_module = NULL;
1706 		}
1707 
1708 		/* Free resources. */
1709 		ddi_prop_remove_all(dip);
1710 		(void) fc_fca_detach(dip);
1711 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1712 		ddi_remove_minor_node(dip, "devctl");
1713 		if (ha->k_stats != NULL) {
1714 			kstat_delete(ha->k_stats);
1715 		}
1716 
1717 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1718 			ddi_regs_map_free(&ha->sbus_config_handle);
1719 		} else {
1720 			ddi_regs_map_free(&ha->iomap_dev_handle);
1721 			pci_config_teardown(&ha->pci_handle);
1722 		}
1723 
1724 		ql_disable_intr(ha);
1725 		ql_release_intr(ha);
1726 
1727 		ql_free_xioctl_resource(ha);
1728 
1729 		ql_destroy_mutex(ha);
1730 
1731 		ql_free_phys(ha, &ha->hba_buf);
1732 		ql_free_phys(ha, &ha->fwexttracebuf);
1733 		ql_free_phys(ha, &ha->fwfcetracebuf);
1734 
1735 		ddi_regs_map_free(&ha->dev_handle);
1736 		if (ha->sbus_fpga_iobase != NULL) {
1737 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1738 		}
1739 
1740 		ql_fcache_rel(ha->fcache);
1741 		if (ha->vcache != NULL) {
1742 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1743 		}
1744 
1745 		if (ha->pi_attrs != NULL) {
1746 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1747 		}
1748 
1749 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1750 
1751 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1752 
1753 		kmem_free(ha->outstanding_cmds,
1754 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1755 
1756 		if (ha->n_port != NULL) {
1757 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1758 		}
1759 
1760 		if (ha->devpath != NULL) {
1761 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1762 		}
1763 
1764 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1765 
1766 		EL(ha, "detached\n");
1767 
1768 		ddi_soft_state_free(ql_state, (int)ha->instance);
1769 
1770 		break;
1771 
1772 	case DDI_SUSPEND:
1773 		ADAPTER_STATE_LOCK(ha);
1774 
1775 		delay_cnt = 0;
1776 		ha->flags |= ADAPTER_SUSPENDED;
1777 		while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1778 			ADAPTER_STATE_UNLOCK(ha);
1779 			delay(drv_usectohz(1000000));
1780 			ADAPTER_STATE_LOCK(ha);
1781 		}
1782 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1783 			ha->flags &= ~ADAPTER_SUSPENDED;
1784 			ADAPTER_STATE_UNLOCK(ha);
1785 			rval = DDI_FAILURE;
1786 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1787 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1788 			    ha->busy, ha->flags);
1789 			break;
1790 		}
1791 
1792 		ADAPTER_STATE_UNLOCK(ha);
1793 
1794 		if (ha->flags & IP_INITIALIZED) {
1795 			(void) ql_shutdown_ip(ha);
1796 		}
1797 
1798 		if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1799 			ADAPTER_STATE_LOCK(ha);
1800 			ha->flags &= ~ADAPTER_SUSPENDED;
1801 			ADAPTER_STATE_UNLOCK(ha);
1802 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1803 			    QL_NAME, ha->instance, suspend);
1804 
1805 			/* Restart IP if it was running. */
1806 			if (ha->flags & IP_ENABLED &&
1807 			    !(ha->flags & IP_INITIALIZED)) {
1808 				(void) ql_initialize_ip(ha);
1809 				ql_isp_rcvbuf(ha);
1810 			}
1811 			rval = DDI_FAILURE;
1812 			break;
1813 		}
1814 
1815 		/* Acquire global state lock. */
1816 		GLOBAL_STATE_LOCK();
1817 
1818 		/* Disable driver timer if last adapter. */
1819 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1820 		    ql_hba.last == &ha->hba) {
1821 			timer_id = ql_timer_timeout_id;
1822 			ql_timer_timeout_id = NULL;
1823 		}
1824 		GLOBAL_STATE_UNLOCK();
1825 
1826 		if (timer_id) {
1827 			(void) untimeout(timer_id);
1828 		}
1829 
1830 		EL(ha, "suspended\n");
1831 
1832 		break;
1833 
1834 	default:
1835 		rval = DDI_FAILURE;
1836 		break;
1837 	}
1838 
1839 	kmem_free(buf, MAXPATHLEN);
1840 
1841 	if (rval != DDI_SUCCESS) {
1842 		if (ha != NULL) {
1843 			EL(ha, "failed, rval = %xh\n", rval);
1844 		} else {
1845 			/*EMPTY*/
1846 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1847 			    ddi_get_instance(dip), rval);
1848 		}
1849 	} else {
1850 		/*EMPTY*/
1851 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1852 	}
1853 
1854 	return (rval);
1855 }
1856 
1857 
1858 /*
1859  * ql_power
1860  *	Power a device attached to the system.
1861  *
1862  * Input:
1863  *	dip = pointer to device information structure.
1864  *	component = device.
1865  *	level = power level.
1866  *
1867  * Returns:
1868  *	DDI_SUCCESS or DDI_FAILURE.
1869  *
1870  * Context:
1871  *	Kernel context.
1872  */
1873 /* ARGSUSED */
1874 static int
1875 ql_power(dev_info_t *dip, int component, int level)
1876 {
1877 	int			rval = DDI_FAILURE;
1878 	off_t			csr;
1879 	uint8_t			saved_pm_val;
1880 	ql_adapter_state_t	*ha;
1881 	char			*buf;
1882 	char			*path;
1883 
1884 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1885 	if (ha == NULL || ha->pm_capable == 0) {
1886 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1887 		    ddi_get_instance(dip));
1888 		return (rval);
1889 	}
1890 
1891 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1892 
1893 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1894 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1895 
1896 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1897 	    level != PM_LEVEL_D3)) {
1898 		EL(ha, "invalid, component=%xh or level=%xh\n",
1899 		    component, level);
1900 		return (rval);
1901 	}
1902 
1903 	GLOBAL_HW_LOCK();
1904 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1905 	GLOBAL_HW_UNLOCK();
1906 
1907 	(void) snprintf(buf, sizeof (buf),
1908 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1909 	    ddi_pathname(dip, path));
1910 
1911 	switch (level) {
1912 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1913 
1914 		QL_PM_LOCK(ha);
1915 		if (ha->power_level == PM_LEVEL_D0) {
1916 			QL_PM_UNLOCK(ha);
1917 			rval = DDI_SUCCESS;
1918 			break;
1919 		}
1920 
1921 		/*
1922 		 * Enable interrupts now
1923 		 */
1924 		saved_pm_val = ha->power_level;
1925 		ha->power_level = PM_LEVEL_D0;
1926 		QL_PM_UNLOCK(ha);
1927 
1928 		GLOBAL_HW_LOCK();
1929 
1930 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1931 
1932 		/*
1933 		 * Delay after reset, for chip to recover.
1934 		 * Otherwise causes system PANIC
1935 		 */
1936 		drv_usecwait(200000);
1937 
1938 		GLOBAL_HW_UNLOCK();
1939 
1940 		if (ha->config_saved) {
1941 			ha->config_saved = 0;
1942 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1943 				QL_PM_LOCK(ha);
1944 				ha->power_level = saved_pm_val;
1945 				QL_PM_UNLOCK(ha);
1946 				cmn_err(CE_WARN, "%s failed to restore "
1947 				    "config regs", buf);
1948 				break;
1949 			}
1950 		}
1951 
1952 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1953 			cmn_err(CE_WARN, "%s adapter initialization failed",
1954 			    buf);
1955 		}
1956 
1957 		/* Wake up task_daemon. */
1958 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1959 		    TASK_DAEMON_SLEEPING_FLG, 0);
1960 
1961 		/* Restart IP if it was running. */
1962 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1963 			(void) ql_initialize_ip(ha);
1964 			ql_isp_rcvbuf(ha);
1965 		}
1966 
1967 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1968 		    ha->instance, QL_NAME);
1969 
1970 		rval = DDI_SUCCESS;
1971 		break;
1972 
1973 	case PM_LEVEL_D3:	/* power down to D3 state - off */
1974 
1975 		QL_PM_LOCK(ha);
1976 
1977 		if (ha->busy || ((ha->task_daemon_flags &
1978 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1979 			QL_PM_UNLOCK(ha);
1980 			break;
1981 		}
1982 
1983 		if (ha->power_level == PM_LEVEL_D3) {
1984 			rval = DDI_SUCCESS;
1985 			QL_PM_UNLOCK(ha);
1986 			break;
1987 		}
1988 		QL_PM_UNLOCK(ha);
1989 
1990 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1991 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
1992 			    " config regs", QL_NAME, ha->instance, buf);
1993 			break;
1994 		}
1995 		ha->config_saved = 1;
1996 
1997 		/*
1998 		 * Don't enable interrupts. Running mailbox commands with
1999 		 * interrupts enabled could cause hangs since pm_run_scan()
2000 		 * runs out of a callout thread and on single cpu systems
2001 		 * cv_timedwait(), called from ql_mailbox_command(), would
2002 		 * not get to run.
2003 		 */
2004 		TASK_DAEMON_LOCK(ha);
2005 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2006 		TASK_DAEMON_UNLOCK(ha);
2007 
2008 		ql_halt(ha, PM_LEVEL_D3);
2009 
2010 		/*
2011 		 * Setup ql_intr to ignore interrupts from here on.
2012 		 */
2013 		QL_PM_LOCK(ha);
2014 		ha->power_level = PM_LEVEL_D3;
2015 		QL_PM_UNLOCK(ha);
2016 
2017 		/*
2018 		 * Wait for ISR to complete.
2019 		 */
2020 		INTR_LOCK(ha);
2021 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2022 		INTR_UNLOCK(ha);
2023 
2024 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2025 		    ha->instance, QL_NAME);
2026 
2027 		rval = DDI_SUCCESS;
2028 		break;
2029 	}
2030 
2031 	kmem_free(buf, MAXPATHLEN);
2032 	kmem_free(path, MAXPATHLEN);
2033 
2034 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2035 
2036 	return (rval);
2037 }
2038 
2039 /*
2040  * ql_quiesce
2041  *	quiesce a device attached to the system.
2042  *
2043  * Input:
2044  *	dip = pointer to device information structure.
2045  *
2046  * Returns:
2047  *	DDI_SUCCESS
2048  *
2049  * Context:
2050  *	Kernel context.
2051  */
2052 static int
2053 ql_quiesce(dev_info_t *dip)
2054 {
2055 	ql_adapter_state_t	*ha;
2056 	uint32_t		timer;
2057 	uint32_t		stat;
2058 
2059 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2060 	if (ha == NULL) {
2061 		/* Oh well.... */
2062 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2063 		    ddi_get_instance(dip));
2064 		return (DDI_SUCCESS);
2065 	}
2066 
2067 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2068 
2069 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2070 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2071 		WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
2072 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2073 		for (timer = 0; timer < 30000; timer++) {
2074 			stat = RD32_IO_REG(ha, intr_info_lo);
2075 			if (stat & BIT_15) {
2076 				if ((stat & 0xff) < 0x12) {
2077 					WRT32_IO_REG(ha, hccr,
2078 					    HC24_CLR_RISC_INT);
2079 					break;
2080 				}
2081 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2082 			}
2083 			drv_usecwait(100);
2084 		}
2085 		/* Reset the chip. */
2086 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2087 		    MWB_4096_BYTES);
2088 		drv_usecwait(100);
2089 
2090 	} else {
2091 		/* Disable ISP interrupts. */
2092 		WRT16_IO_REG(ha, ictrl, 0);
2093 		/* Select RISC module registers. */
2094 		WRT16_IO_REG(ha, ctrl_status, 0);
2095 		/* Reset ISP semaphore. */
2096 		WRT16_IO_REG(ha, semaphore, 0);
2097 		/* Reset RISC module. */
2098 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2099 		/* Release RISC module. */
2100 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2101 	}
2102 
2103 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2104 
2105 	return (DDI_SUCCESS);
2106 }
2107 
2108 /* ************************************************************************ */
2109 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2110 /* ************************************************************************ */
2111 
2112 /*
2113  * ql_bind_port
2114  *	Handling port binding. The FC Transport attempts to bind an FCA port
2115  *	when it is ready to start transactions on the port. The FC Transport
2116  *	will call the fca_bind_port() function specified in the fca_transport
2117  *	structure it receives. The FCA must fill in the port_info structure
2118  *	passed in the call and also stash the information for future calls.
2119  *
2120  * Input:
2121  *	dip = pointer to FCA information structure.
2122  *	port_info = pointer to port information structure.
2123  *	bind_info = pointer to bind information structure.
2124  *
2125  * Returns:
2126  *	NULL = failure
2127  *
2128  * Context:
2129  *	Kernel context.
2130  */
2131 static opaque_t
2132 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2133     fc_fca_bind_info_t *bind_info)
2134 {
2135 	ql_adapter_state_t	*ha, *vha;
2136 	opaque_t		fca_handle = NULL;
2137 	port_id_t		d_id;
2138 	int			port_npiv = bind_info->port_npiv;
2139 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2140 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2141 
2142 	/* get state info based on the dip */
2143 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2144 	if (ha == NULL) {
2145 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2146 		    ddi_get_instance(dip));
2147 		return (NULL);
2148 	}
2149 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2150 
2151 	/* Verify port number is supported. */
2152 	if (port_npiv != 0) {
2153 		if (!(ha->flags & VP_ENABLED)) {
2154 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2155 			    ha->instance);
2156 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2157 			return (NULL);
2158 		}
2159 		if (!(ha->flags & POINT_TO_POINT)) {
2160 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2161 			    ha->instance);
2162 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2163 			return (NULL);
2164 		}
2165 		if (!(ha->flags & FDISC_ENABLED)) {
2166 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2167 			    "FDISC\n", ha->instance);
2168 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2169 			return (NULL);
2170 		}
2171 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2172 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2173 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2174 			    "FC_OUTOFBOUNDS\n", ha->instance);
2175 			port_info->pi_error = FC_OUTOFBOUNDS;
2176 			return (NULL);
2177 		}
2178 	} else if (bind_info->port_num != 0) {
2179 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2180 		    "supported\n", ha->instance, bind_info->port_num);
2181 		port_info->pi_error = FC_OUTOFBOUNDS;
2182 		return (NULL);
2183 	}
2184 
2185 	/* Locate port context. */
2186 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2187 		if (vha->vp_index == bind_info->port_num) {
2188 			break;
2189 		}
2190 	}
2191 
2192 	/* If virtual port does not exist. */
2193 	if (vha == NULL) {
2194 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2195 	}
2196 
2197 	/* make sure this port isn't already bound */
2198 	if (vha->flags & FCA_BOUND) {
2199 		port_info->pi_error = FC_ALREADY;
2200 	} else {
2201 		if (vha->vp_index != 0) {
2202 			bcopy(port_nwwn,
2203 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2204 			bcopy(port_pwwn,
2205 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2206 		}
2207 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2208 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2209 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2210 				    "virtual port=%d\n", ha->instance,
2211 				    vha->vp_index);
2212 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2213 				return (NULL);
2214 			}
2215 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2216 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2217 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2218 			    QL_NAME, ha->instance, vha->vp_index,
2219 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2220 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2221 			    port_pwwn[6], port_pwwn[7],
2222 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2223 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2224 			    port_nwwn[6], port_nwwn[7]);
2225 		}
2226 
2227 		/* stash the bind_info supplied by the FC Transport */
2228 		vha->bind_info.port_handle = bind_info->port_handle;
2229 		vha->bind_info.port_statec_cb =
2230 		    bind_info->port_statec_cb;
2231 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2232 
2233 		/* Set port's source ID. */
2234 		port_info->pi_s_id.port_id = vha->d_id.b24;
2235 
2236 		/* copy out the default login parameters */
2237 		bcopy((void *)&vha->loginparams,
2238 		    (void *)&port_info->pi_login_params,
2239 		    sizeof (la_els_logi_t));
2240 
2241 		/* Set port's hard address if enabled. */
2242 		port_info->pi_hard_addr.hard_addr = 0;
2243 		if (bind_info->port_num == 0) {
2244 			d_id.b24 = ha->d_id.b24;
2245 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2246 				if (ha->init_ctrl_blk.cb24.
2247 				    firmware_options_1[0] & BIT_0) {
2248 					d_id.b.al_pa = ql_index_to_alpa[ha->
2249 					    init_ctrl_blk.cb24.
2250 					    hard_address[0]];
2251 					port_info->pi_hard_addr.hard_addr =
2252 					    d_id.b24;
2253 				}
2254 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2255 			    BIT_0) {
2256 				d_id.b.al_pa = ql_index_to_alpa[ha->
2257 				    init_ctrl_blk.cb.hard_address[0]];
2258 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2259 			}
2260 
2261 			/* Set the node id data */
2262 			if (ql_get_rnid_params(ha,
2263 			    sizeof (port_info->pi_rnid_params.params),
2264 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2265 			    QL_SUCCESS) {
2266 				port_info->pi_rnid_params.status = FC_SUCCESS;
2267 			} else {
2268 				port_info->pi_rnid_params.status = FC_FAILURE;
2269 			}
2270 
2271 			/* Populate T11 FC-HBA details */
2272 			ql_populate_hba_fru_details(ha, port_info);
2273 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2274 			    KM_SLEEP);
2275 			if (ha->pi_attrs != NULL) {
2276 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2277 				    sizeof (fca_port_attrs_t));
2278 			}
2279 		} else {
2280 			port_info->pi_rnid_params.status = FC_FAILURE;
2281 			if (ha->pi_attrs != NULL) {
2282 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2283 				    sizeof (fca_port_attrs_t));
2284 			}
2285 		}
2286 
2287 		/* Generate handle for this FCA. */
2288 		fca_handle = (opaque_t)vha;
2289 
2290 		ADAPTER_STATE_LOCK(ha);
2291 		vha->flags |= FCA_BOUND;
2292 		ADAPTER_STATE_UNLOCK(ha);
2293 		/* Set port's current state. */
2294 		port_info->pi_port_state = vha->state;
2295 	}
2296 
2297 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2298 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2299 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2300 
2301 	return (fca_handle);
2302 }
2303 
2304 /*
2305  * ql_unbind_port
2306  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2307  *
2308  * Input:
2309  *	fca_handle = handle setup by ql_bind_port().
2310  *
2311  * Context:
2312  *	Kernel context.
2313  */
2314 static void
2315 ql_unbind_port(opaque_t fca_handle)
2316 {
2317 	ql_adapter_state_t	*ha;
2318 	ql_tgt_t		*tq;
2319 	uint32_t		flgs;
2320 
2321 	ha = ql_fca_handle_to_state(fca_handle);
2322 	if (ha == NULL) {
2323 		/*EMPTY*/
2324 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2325 		    (void *)fca_handle);
2326 	} else {
2327 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2328 		    ha->vp_index);
2329 
2330 		if (!(ha->flags & FCA_BOUND)) {
2331 			/*EMPTY*/
2332 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2333 			    ha->instance, ha->vp_index);
2334 		} else {
2335 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2336 				if ((tq = ql_loop_id_to_queue(ha,
2337 				    FL_PORT_24XX_HDL)) != NULL) {
2338 					(void) ql_logout_fabric_port(ha, tq);
2339 				}
2340 				(void) ql_vport_control(ha, (uint8_t)
2341 				    (CFG_IST(ha, CFG_CTRL_2425) ?
2342 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2343 				flgs = FCA_BOUND | VP_ENABLED;
2344 			} else {
2345 				flgs = FCA_BOUND;
2346 			}
2347 			ADAPTER_STATE_LOCK(ha);
2348 			ha->flags &= ~flgs;
2349 			ADAPTER_STATE_UNLOCK(ha);
2350 		}
2351 
2352 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2353 		    ha->vp_index);
2354 	}
2355 }
2356 
2357 /*
2358  * ql_init_pkt
2359  *	Initialize FCA portion of packet.
2360  *
2361  * Input:
2362  *	fca_handle = handle setup by ql_bind_port().
2363  *	pkt = pointer to fc_packet.
2364  *
2365  * Returns:
2366  *	FC_SUCCESS - the packet has successfully been initialized.
2367  *	FC_UNBOUND - the fca_handle specified is not bound.
2368  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2369  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2370  *
2371  * Context:
2372  *	Kernel context.
2373  */
2374 /* ARGSUSED */
2375 static int
2376 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2377 {
2378 	ql_adapter_state_t	*ha;
2379 	ql_srb_t		*sp;
2380 
2381 	ha = ql_fca_handle_to_state(fca_handle);
2382 	if (ha == NULL) {
2383 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2384 		    (void *)fca_handle);
2385 		return (FC_UNBOUND);
2386 	}
2387 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2388 
2389 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2390 	sp->flags = 0;
2391 
2392 	/* init cmd links */
2393 	sp->cmd.base_address = sp;
2394 	sp->cmd.prev = NULL;
2395 	sp->cmd.next = NULL;
2396 	sp->cmd.head = NULL;
2397 
2398 	/* init watchdog links */
2399 	sp->wdg.base_address = sp;
2400 	sp->wdg.prev = NULL;
2401 	sp->wdg.next = NULL;
2402 	sp->wdg.head = NULL;
2403 	sp->pkt = pkt;
2404 	sp->ha = ha;
2405 	sp->magic_number = QL_FCA_BRAND;
2406 
2407 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2408 
2409 	return (FC_SUCCESS);
2410 }
2411 
2412 /*
2413  * ql_un_init_pkt
2414  *	Release all local resources bound to packet.
2415  *
2416  * Input:
2417  *	fca_handle = handle setup by ql_bind_port().
2418  *	pkt = pointer to fc_packet.
2419  *
2420  * Returns:
2421  *	FC_SUCCESS - the packet has successfully been invalidated.
2422  *	FC_UNBOUND - the fca_handle specified is not bound.
2423  *	FC_BADPACKET - the packet has not been initialized or has
2424  *			already been freed by this FCA.
2425  *
2426  * Context:
2427  *	Kernel context.
2428  */
2429 static int
2430 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2431 {
2432 	ql_adapter_state_t *ha;
2433 	int rval;
2434 	ql_srb_t *sp;
2435 
2436 	ha = ql_fca_handle_to_state(fca_handle);
2437 	if (ha == NULL) {
2438 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2439 		    (void *)fca_handle);
2440 		return (FC_UNBOUND);
2441 	}
2442 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2443 
2444 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2445 
2446 	if (sp->magic_number != QL_FCA_BRAND) {
2447 		EL(ha, "failed, FC_BADPACKET\n");
2448 		rval = FC_BADPACKET;
2449 	} else {
2450 		sp->magic_number = NULL;
2451 
2452 		rval = FC_SUCCESS;
2453 	}
2454 
2455 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2456 
2457 	return (rval);
2458 }
2459 
2460 /*
2461  * ql_els_send
2462  *	Issue a extended link service request.
2463  *
2464  * Input:
2465  *	fca_handle = handle setup by ql_bind_port().
2466  *	pkt = pointer to fc_packet.
2467  *
2468  * Returns:
2469  *	FC_SUCCESS - the command was successful.
2470  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2471  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2472  *	FC_TRANSPORT_ERROR - a transport error occurred.
2473  *	FC_UNBOUND - the fca_handle specified is not bound.
2474  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2475  *
2476  * Context:
2477  *	Kernel context.
2478  */
2479 static int
2480 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2481 {
2482 	ql_adapter_state_t	*ha;
2483 	int			rval;
2484 	clock_t			timer = drv_usectohz(30000000);
2485 	ls_code_t		els;
2486 	la_els_rjt_t		rjt;
2487 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2488 
2489 	/* Verify proper command. */
2490 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2491 	if (ha == NULL) {
2492 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2493 		    rval, fca_handle);
2494 		return (FC_INVALID_REQUEST);
2495 	}
2496 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2497 
2498 	/* Wait for suspension to end. */
2499 	TASK_DAEMON_LOCK(ha);
2500 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2501 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2502 
2503 		/* 30 seconds from now */
2504 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2505 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2506 			/*
2507 			 * The timeout time 'timer' was
2508 			 * reached without the condition
2509 			 * being signaled.
2510 			 */
2511 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2512 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2513 
2514 			/* Release task daemon lock. */
2515 			TASK_DAEMON_UNLOCK(ha);
2516 
2517 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2518 			    QL_FUNCTION_TIMEOUT);
2519 			return (FC_TRAN_BUSY);
2520 		}
2521 	}
2522 	/* Release task daemon lock. */
2523 	TASK_DAEMON_UNLOCK(ha);
2524 
2525 	/* Setup response header. */
2526 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2527 	    sizeof (fc_frame_hdr_t));
2528 
2529 	if (pkt->pkt_rsplen) {
2530 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2531 	}
2532 
2533 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2534 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2535 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2536 	    R_CTL_SOLICITED_CONTROL;
2537 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2538 	    F_CTL_END_SEQ;
2539 
2540 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2541 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2542 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2543 
2544 	sp->flags |= SRB_ELS_PKT;
2545 
2546 	/* map the type of ELS to a function */
2547 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2548 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2549 
2550 #if 0
2551 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2552 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2553 	    sizeof (fc_frame_hdr_t) / 4);
2554 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2555 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2556 #endif
2557 
2558 	sp->iocb = ha->els_cmd;
2559 	sp->req_cnt = 1;
2560 
2561 	switch (els.ls_code) {
2562 	case LA_ELS_RJT:
2563 	case LA_ELS_ACC:
2564 		EL(ha, "LA_ELS_RJT\n");
2565 		pkt->pkt_state = FC_PKT_SUCCESS;
2566 		rval = FC_SUCCESS;
2567 		break;
2568 	case LA_ELS_PLOGI:
2569 	case LA_ELS_PDISC:
2570 		rval = ql_els_plogi(ha, pkt);
2571 		break;
2572 	case LA_ELS_FLOGI:
2573 	case LA_ELS_FDISC:
2574 		rval = ql_els_flogi(ha, pkt);
2575 		break;
2576 	case LA_ELS_LOGO:
2577 		rval = ql_els_logo(ha, pkt);
2578 		break;
2579 	case LA_ELS_PRLI:
2580 		rval = ql_els_prli(ha, pkt);
2581 		break;
2582 	case LA_ELS_PRLO:
2583 		rval = ql_els_prlo(ha, pkt);
2584 		break;
2585 	case LA_ELS_ADISC:
2586 		rval = ql_els_adisc(ha, pkt);
2587 		break;
2588 	case LA_ELS_LINIT:
2589 		rval = ql_els_linit(ha, pkt);
2590 		break;
2591 	case LA_ELS_LPC:
2592 		rval = ql_els_lpc(ha, pkt);
2593 		break;
2594 	case LA_ELS_LSTS:
2595 		rval = ql_els_lsts(ha, pkt);
2596 		break;
2597 	case LA_ELS_SCR:
2598 		rval = ql_els_scr(ha, pkt);
2599 		break;
2600 	case LA_ELS_RSCN:
2601 		rval = ql_els_rscn(ha, pkt);
2602 		break;
2603 	case LA_ELS_FARP_REQ:
2604 		rval = ql_els_farp_req(ha, pkt);
2605 		break;
2606 	case LA_ELS_FARP_REPLY:
2607 		rval = ql_els_farp_reply(ha, pkt);
2608 		break;
2609 	case LA_ELS_RLS:
2610 		rval = ql_els_rls(ha, pkt);
2611 		break;
2612 	case LA_ELS_RNID:
2613 		rval = ql_els_rnid(ha, pkt);
2614 		break;
2615 	default:
2616 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2617 		    els.ls_code);
2618 		/* Build RJT. */
2619 		bzero(&rjt, sizeof (rjt));
2620 		rjt.ls_code.ls_code = LA_ELS_RJT;
2621 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2622 
2623 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2624 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2625 
2626 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2627 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2628 		rval = FC_SUCCESS;
2629 		break;
2630 	}
2631 
2632 #if 0
2633 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2634 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2635 	    sizeof (fc_frame_hdr_t) / 4);
2636 #endif
2637 	/*
2638 	 * Return success if the srb was consumed by an iocb. The packet
2639 	 * completion callback will be invoked by the response handler.
2640 	 */
2641 	if (rval == QL_CONSUMED) {
2642 		rval = FC_SUCCESS;
2643 	} else if (rval == FC_SUCCESS &&
2644 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2645 		/* Do command callback only if no error */
2646 		ql_awaken_task_daemon(ha, sp, 0, 0);
2647 	}
2648 
2649 	if (rval != FC_SUCCESS) {
2650 		EL(ha, "failed, rval = %xh\n", rval);
2651 	} else {
2652 		/*EMPTY*/
2653 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2654 	}
2655 	return (rval);
2656 }
2657 
2658 /*
2659  * ql_get_cap
2660  *	Export FCA hardware and software capabilities.
2661  *
2662  * Input:
2663  *	fca_handle = handle setup by ql_bind_port().
2664  *	cap = pointer to the capabilities string.
2665  *	ptr = buffer pointer for return capability.
2666  *
2667  * Returns:
2668  *	FC_CAP_ERROR - no such capability
2669  *	FC_CAP_FOUND - the capability was returned and cannot be set
2670  *	FC_CAP_SETTABLE - the capability was returned and can be set
2671  *	FC_UNBOUND - the fca_handle specified is not bound.
2672  *
2673  * Context:
2674  *	Kernel context.
2675  */
2676 static int
2677 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2678 {
2679 	ql_adapter_state_t	*ha;
2680 	int			rval;
2681 	uint32_t		*rptr = (uint32_t *)ptr;
2682 
2683 	ha = ql_fca_handle_to_state(fca_handle);
2684 	if (ha == NULL) {
2685 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2686 		    (void *)fca_handle);
2687 		return (FC_UNBOUND);
2688 	}
2689 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2690 
2691 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2692 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2693 		    ptr, 8);
2694 		rval = FC_CAP_FOUND;
2695 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2696 		bcopy((void *)&ha->loginparams, ptr,
2697 		    sizeof (la_els_logi_t));
2698 		rval = FC_CAP_FOUND;
2699 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2700 		*rptr = (uint32_t)QL_UB_LIMIT;
2701 		rval = FC_CAP_FOUND;
2702 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2703 
2704 		dev_info_t	*psydip = NULL;
2705 #ifdef __sparc
2706 		/*
2707 		 * Disable streaming for certain 2 chip adapters
2708 		 * below Psycho to handle Psycho byte hole issue.
2709 		 */
2710 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2711 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2712 			for (psydip = ddi_get_parent(ha->dip); psydip;
2713 			    psydip = ddi_get_parent(psydip)) {
2714 				if (strcmp(ddi_driver_name(psydip),
2715 				    "pcipsy") == 0) {
2716 					break;
2717 				}
2718 			}
2719 		}
2720 #endif	/* __sparc */
2721 
2722 		if (psydip) {
2723 			*rptr = (uint32_t)FC_NO_STREAMING;
2724 			EL(ha, "No Streaming\n");
2725 		} else {
2726 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2727 			EL(ha, "Allow Streaming\n");
2728 		}
2729 		rval = FC_CAP_FOUND;
2730 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2731 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2732 			*rptr = (uint32_t)CHAR_TO_SHORT(
2733 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2734 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2735 		} else {
2736 			*rptr = (uint32_t)CHAR_TO_SHORT(
2737 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2738 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2739 		}
2740 		rval = FC_CAP_FOUND;
2741 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2742 		*rptr = FC_RESET_RETURN_ALL;
2743 		rval = FC_CAP_FOUND;
2744 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2745 		*rptr = FC_NO_DVMA_SPACE;
2746 		rval = FC_CAP_FOUND;
2747 	} else {
2748 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2749 		rval = FC_CAP_ERROR;
2750 	}
2751 
2752 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2753 
2754 	return (rval);
2755 }
2756 
2757 /*
2758  * ql_set_cap
2759  *	Allow the FC Transport to set FCA capabilities if possible.
2760  *
2761  * Input:
2762  *	fca_handle = handle setup by ql_bind_port().
2763  *	cap = pointer to the capabilities string.
2764  *	ptr = buffer pointer for capability.
2765  *
2766  * Returns:
2767  *	FC_CAP_ERROR - no such capability
2768  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2769  *	FC_CAP_SETTABLE - the capability was successfully set.
2770  *	FC_UNBOUND - the fca_handle specified is not bound.
2771  *
2772  * Context:
2773  *	Kernel context.
2774  */
2775 /* ARGSUSED */
2776 static int
2777 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2778 {
2779 	ql_adapter_state_t	*ha;
2780 	int			rval;
2781 
2782 	ha = ql_fca_handle_to_state(fca_handle);
2783 	if (ha == NULL) {
2784 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2785 		    (void *)fca_handle);
2786 		return (FC_UNBOUND);
2787 	}
2788 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2789 
2790 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2791 		rval = FC_CAP_FOUND;
2792 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2793 		rval = FC_CAP_FOUND;
2794 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2795 		rval = FC_CAP_FOUND;
2796 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2797 		rval = FC_CAP_FOUND;
2798 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2799 		rval = FC_CAP_FOUND;
2800 	} else {
2801 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2802 		rval = FC_CAP_ERROR;
2803 	}
2804 
2805 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2806 
2807 	return (rval);
2808 }
2809 
2810 /*
2811  * ql_getmap
2812  *	Request of Arbitrated Loop (AL-PA) map.
2813  *
2814  * Input:
2815  *	fca_handle = handle setup by ql_bind_port().
2816  *	mapbuf= buffer pointer for map.
2817  *
2818  * Returns:
2819  *	FC_OLDPORT - the specified port is not operating in loop mode.
2820  *	FC_OFFLINE - the specified port is not online.
2821  *	FC_NOMAP - there is no loop map available for this port.
2822  *	FC_UNBOUND - the fca_handle specified is not bound.
2823  *	FC_SUCCESS - a valid map has been placed in mapbuf.
2824  *
2825  * Context:
2826  *	Kernel context.
2827  */
2828 static int
2829 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2830 {
2831 	ql_adapter_state_t	*ha;
2832 	clock_t			timer = drv_usectohz(30000000);
2833 	int			rval = FC_SUCCESS;
2834 
2835 	ha = ql_fca_handle_to_state(fca_handle);
2836 	if (ha == NULL) {
2837 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2838 		    (void *)fca_handle);
2839 		return (FC_UNBOUND);
2840 	}
2841 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2842 
2843 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2844 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2845 
2846 	/* Wait for suspension to end. */
2847 	TASK_DAEMON_LOCK(ha);
2848 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2849 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2850 
2851 		/* 30 seconds from now */
2852 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2853 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2854 			/*
2855 			 * The timeout time 'timer' was
2856 			 * reached without the condition
2857 			 * being signaled.
2858 			 */
2859 
2860 			/* Release task daemon lock. */
2861 			TASK_DAEMON_UNLOCK(ha);
2862 
2863 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2864 			return (FC_TRAN_BUSY);
2865 		}
2866 	}
2867 	/* Release task daemon lock. */
2868 	TASK_DAEMON_UNLOCK(ha);
2869 
2870 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2871 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2872 		/*
2873 		 * Now, since transport drivers cosider this as an
2874 		 * offline condition, let's wait for few seconds
2875 		 * for any loop transitions before we reset the.
2876 		 * chip and restart all over again.
2877 		 */
2878 		ql_delay(ha, 2000000);
2879 		EL(ha, "failed, FC_NOMAP\n");
2880 		rval = FC_NOMAP;
2881 	} else {
2882 		/*EMPTY*/
2883 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2884 		    "data %xh %xh %xh %xh\n", ha->instance,
2885 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2886 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2887 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2888 	}
2889 
2890 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2891 #if 0
2892 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2893 #endif
2894 	return (rval);
2895 }
2896 
2897 /*
2898  * ql_transport
2899  *	Issue an I/O request. Handles all regular requests.
2900  *
2901  * Input:
2902  *	fca_handle = handle setup by ql_bind_port().
2903  *	pkt = pointer to fc_packet.
2904  *
2905  * Returns:
2906  *	FC_SUCCESS - the packet was accepted for transport.
2907  *	FC_TRANSPORT_ERROR - a transport error occurred.
2908  *	FC_BADPACKET - the packet to be transported had not been
2909  *			initialized by this FCA.
2910  *	FC_UNBOUND - the fca_handle specified is not bound.
2911  *
2912  * Context:
2913  *	Kernel context.
2914  */
2915 static int
2916 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2917 {
2918 	ql_adapter_state_t	*ha;
2919 	int			rval = FC_TRANSPORT_ERROR;
2920 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2921 
2922 	/* Verify proper command. */
2923 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2924 	if (ha == NULL) {
2925 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2926 		    rval, fca_handle);
2927 		return (rval);
2928 	}
2929 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2930 #if 0
2931 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2932 	    sizeof (fc_frame_hdr_t) / 4);
2933 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2934 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2935 #endif
2936 
2937 	/* Reset SRB flags. */
2938 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2939 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2940 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2941 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2942 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2943 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2944 	    SRB_MS_PKT | SRB_ELS_PKT);
2945 
2946 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2947 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2948 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2949 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2950 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2951 
2952 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2953 	case R_CTL_COMMAND:
2954 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2955 			sp->flags |= SRB_FCP_CMD_PKT;
2956 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2957 		}
2958 		break;
2959 
2960 	default:
2961 		/* Setup response header and buffer. */
2962 		if (pkt->pkt_rsplen) {
2963 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2964 		}
2965 
2966 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
2967 		case R_CTL_UNSOL_DATA:
2968 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
2969 				sp->flags |= SRB_IP_PKT;
2970 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
2971 			}
2972 			break;
2973 
2974 		case R_CTL_UNSOL_CONTROL:
2975 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
2976 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
2977 				rval = ql_fc_services(ha, pkt);
2978 			}
2979 			break;
2980 
2981 		case R_CTL_SOLICITED_DATA:
2982 		case R_CTL_STATUS:
2983 		default:
2984 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
2985 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2986 			rval = FC_TRANSPORT_ERROR;
2987 			EL(ha, "unknown, r_ctl=%xh\n",
2988 			    pkt->pkt_cmd_fhdr.r_ctl);
2989 			break;
2990 		}
2991 	}
2992 
2993 	if (rval != FC_SUCCESS) {
2994 		EL(ha, "failed, rval = %xh\n", rval);
2995 	} else {
2996 		/*EMPTY*/
2997 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2998 	}
2999 
3000 	return (rval);
3001 }
3002 
3003 /*
3004  * ql_ub_alloc
3005  *	Allocate buffers for unsolicited exchanges.
3006  *
3007  * Input:
3008  *	fca_handle = handle setup by ql_bind_port().
3009  *	tokens = token array for each buffer.
3010  *	size = size of each buffer.
3011  *	count = pointer to number of buffers.
3012  *	type = the FC-4 type the buffers are reserved for.
3013  *		1 = Extended Link Services, 5 = LLC/SNAP
3014  *
3015  * Returns:
3016  *	FC_FAILURE - buffers could not be allocated.
3017  *	FC_TOOMANY - the FCA could not allocate the requested
3018  *			number of buffers.
3019  *	FC_SUCCESS - unsolicited buffers were allocated.
3020  *	FC_UNBOUND - the fca_handle specified is not bound.
3021  *
3022  * Context:
3023  *	Kernel context.
3024  */
3025 static int
3026 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3027     uint32_t *count, uint32_t type)
3028 {
3029 	ql_adapter_state_t	*ha;
3030 	caddr_t			bufp = NULL;
3031 	fc_unsol_buf_t		*ubp;
3032 	ql_srb_t		*sp;
3033 	uint32_t		index;
3034 	uint32_t		cnt;
3035 	uint32_t		ub_array_index = 0;
3036 	int			rval = FC_SUCCESS;
3037 	int			ub_updated = FALSE;
3038 
3039 	/* Check handle. */
3040 	ha = ql_fca_handle_to_state(fca_handle);
3041 	if (ha == NULL) {
3042 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3043 		    (void *)fca_handle);
3044 		return (FC_UNBOUND);
3045 	}
3046 	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3047 	    ha->instance, ha->vp_index, *count);
3048 
3049 	QL_PM_LOCK(ha);
3050 	if (ha->power_level != PM_LEVEL_D0) {
3051 		QL_PM_UNLOCK(ha);
3052 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3053 		    ha->vp_index);
3054 		return (FC_FAILURE);
3055 	}
3056 	QL_PM_UNLOCK(ha);
3057 
3058 	/* Acquire adapter state lock. */
3059 	ADAPTER_STATE_LOCK(ha);
3060 
3061 	/* Check the count. */
3062 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3063 		*count = 0;
3064 		EL(ha, "failed, FC_TOOMANY\n");
3065 		rval = FC_TOOMANY;
3066 	}
3067 
3068 	/*
3069 	 * reset ub_array_index
3070 	 */
3071 	ub_array_index = 0;
3072 
3073 	/*
3074 	 * Now proceed to allocate any buffers required
3075 	 */
3076 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3077 		/* Allocate all memory needed. */
3078 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3079 		    KM_SLEEP);
3080 		if (ubp == NULL) {
3081 			EL(ha, "failed, FC_FAILURE\n");
3082 			rval = FC_FAILURE;
3083 		} else {
3084 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3085 			if (sp == NULL) {
3086 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3087 				rval = FC_FAILURE;
3088 			} else {
3089 				if (type == FC_TYPE_IS8802_SNAP) {
3090 #ifdef	__sparc
3091 					if (ql_get_dma_mem(ha,
3092 					    &sp->ub_buffer, size,
3093 					    BIG_ENDIAN_DMA,
3094 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3095 						rval = FC_FAILURE;
3096 						kmem_free(ubp,
3097 						    sizeof (fc_unsol_buf_t));
3098 						kmem_free(sp,
3099 						    sizeof (ql_srb_t));
3100 					} else {
3101 						bufp = sp->ub_buffer.bp;
3102 						sp->ub_size = size;
3103 					}
3104 #else
3105 					if (ql_get_dma_mem(ha,
3106 					    &sp->ub_buffer, size,
3107 					    LITTLE_ENDIAN_DMA,
3108 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3109 						rval = FC_FAILURE;
3110 						kmem_free(ubp,
3111 						    sizeof (fc_unsol_buf_t));
3112 						kmem_free(sp,
3113 						    sizeof (ql_srb_t));
3114 					} else {
3115 						bufp = sp->ub_buffer.bp;
3116 						sp->ub_size = size;
3117 					}
3118 #endif
3119 				} else {
3120 					bufp = kmem_zalloc(size, KM_SLEEP);
3121 					if (bufp == NULL) {
3122 						rval = FC_FAILURE;
3123 						kmem_free(ubp,
3124 						    sizeof (fc_unsol_buf_t));
3125 						kmem_free(sp,
3126 						    sizeof (ql_srb_t));
3127 					} else {
3128 						sp->ub_size = size;
3129 					}
3130 				}
3131 			}
3132 		}
3133 
3134 		if (rval == FC_SUCCESS) {
3135 			/* Find next available slot. */
3136 			QL_UB_LOCK(ha);
3137 			while (ha->ub_array[ub_array_index] != NULL) {
3138 				ub_array_index++;
3139 			}
3140 
3141 			ubp->ub_fca_private = (void *)sp;
3142 
3143 			/* init cmd links */
3144 			sp->cmd.base_address = sp;
3145 			sp->cmd.prev = NULL;
3146 			sp->cmd.next = NULL;
3147 			sp->cmd.head = NULL;
3148 
3149 			/* init wdg links */
3150 			sp->wdg.base_address = sp;
3151 			sp->wdg.prev = NULL;
3152 			sp->wdg.next = NULL;
3153 			sp->wdg.head = NULL;
3154 			sp->ha = ha;
3155 
3156 			ubp->ub_buffer = bufp;
3157 			ubp->ub_bufsize = size;
3158 			ubp->ub_port_handle = fca_handle;
3159 			ubp->ub_token = ub_array_index;
3160 
3161 			/* Save the token. */
3162 			tokens[index] = ub_array_index;
3163 
3164 			/* Setup FCA private information. */
3165 			sp->ub_type = type;
3166 			sp->handle = ub_array_index;
3167 			sp->flags |= SRB_UB_IN_FCA;
3168 
3169 			ha->ub_array[ub_array_index] = ubp;
3170 			ha->ub_allocated++;
3171 			ub_updated = TRUE;
3172 			QL_UB_UNLOCK(ha);
3173 		}
3174 	}
3175 
3176 	/* Release adapter state lock. */
3177 	ADAPTER_STATE_UNLOCK(ha);
3178 
3179 	/* IP buffer. */
3180 	if (ub_updated) {
3181 		if ((type == FC_TYPE_IS8802_SNAP) &&
3182 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3183 
3184 			ADAPTER_STATE_LOCK(ha);
3185 			ha->flags |= IP_ENABLED;
3186 			ADAPTER_STATE_UNLOCK(ha);
3187 
3188 			if (!(ha->flags & IP_INITIALIZED)) {
3189 				if (CFG_IST(ha, CFG_CTRL_2422)) {
3190 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3191 					    LSB(ql_ip_mtu);
3192 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3193 					    MSB(ql_ip_mtu);
3194 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3195 					    LSB(size);
3196 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3197 					    MSB(size);
3198 
3199 					cnt = CHAR_TO_SHORT(
3200 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3201 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3202 
3203 					if (cnt < *count) {
3204 						ha->ip_init_ctrl_blk.cb24.cc[0]
3205 						    = LSB(*count);
3206 						ha->ip_init_ctrl_blk.cb24.cc[1]
3207 						    = MSB(*count);
3208 					}
3209 				} else {
3210 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3211 					    LSB(ql_ip_mtu);
3212 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3213 					    MSB(ql_ip_mtu);
3214 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3215 					    LSB(size);
3216 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3217 					    MSB(size);
3218 
3219 					cnt = CHAR_TO_SHORT(
3220 					    ha->ip_init_ctrl_blk.cb.cc[0],
3221 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3222 
3223 					if (cnt < *count) {
3224 						ha->ip_init_ctrl_blk.cb.cc[0] =
3225 						    LSB(*count);
3226 						ha->ip_init_ctrl_blk.cb.cc[1] =
3227 						    MSB(*count);
3228 					}
3229 				}
3230 
3231 				(void) ql_initialize_ip(ha);
3232 			}
3233 			ql_isp_rcvbuf(ha);
3234 		}
3235 	}
3236 
3237 	if (rval != FC_SUCCESS) {
3238 		EL(ha, "failed=%xh\n", rval);
3239 	} else {
3240 		/*EMPTY*/
3241 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3242 		    ha->vp_index);
3243 	}
3244 	return (rval);
3245 }
3246 
3247 /*
3248  * ql_ub_free
3249  *	Free unsolicited buffers.
3250  *
3251  * Input:
3252  *	fca_handle = handle setup by ql_bind_port().
3253  *	count = number of buffers.
3254  *	tokens = token array for each buffer.
3255  *
3256  * Returns:
3257  *	FC_SUCCESS - the requested buffers have been freed.
3258  *	FC_UNBOUND - the fca_handle specified is not bound.
3259  *	FC_UB_BADTOKEN - an invalid token was encountered.
3260  *			 No buffers have been released.
3261  *
3262  * Context:
3263  *	Kernel context.
3264  */
3265 static int
3266 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3267 {
3268 	ql_adapter_state_t	*ha;
3269 	ql_srb_t		*sp;
3270 	uint32_t		index;
3271 	uint64_t		ub_array_index;
3272 	int			rval = FC_SUCCESS;
3273 
3274 	/* Check handle. */
3275 	ha = ql_fca_handle_to_state(fca_handle);
3276 	if (ha == NULL) {
3277 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3278 		    (void *)fca_handle);
3279 		return (FC_UNBOUND);
3280 	}
3281 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3282 
3283 	/* Acquire adapter state lock. */
3284 	ADAPTER_STATE_LOCK(ha);
3285 
3286 	/* Check all returned tokens. */
3287 	for (index = 0; index < count; index++) {
3288 		fc_unsol_buf_t	*ubp;
3289 
3290 		/* Check the token range. */
3291 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3292 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3293 			rval = FC_UB_BADTOKEN;
3294 			break;
3295 		}
3296 
3297 		/* Check the unsolicited buffer array. */
3298 		QL_UB_LOCK(ha);
3299 		ubp = ha->ub_array[ub_array_index];
3300 
3301 		if (ubp == NULL) {
3302 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3303 			rval = FC_UB_BADTOKEN;
3304 			QL_UB_UNLOCK(ha);
3305 			break;
3306 		}
3307 
3308 		/* Check the state of the unsolicited buffer. */
3309 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3310 		sp->flags |= SRB_UB_FREE_REQUESTED;
3311 
3312 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3313 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3314 			QL_UB_UNLOCK(ha);
3315 			ADAPTER_STATE_UNLOCK(ha);
3316 			delay(drv_usectohz(100000));
3317 			ADAPTER_STATE_LOCK(ha);
3318 			QL_UB_LOCK(ha);
3319 		}
3320 		ha->ub_array[ub_array_index] = NULL;
3321 		QL_UB_UNLOCK(ha);
3322 		ql_free_unsolicited_buffer(ha, ubp);
3323 	}
3324 
3325 	if (rval == FC_SUCCESS) {
3326 		/*
3327 		 * Signal any pending hardware reset when there are
3328 		 * no more unsolicited buffers in use.
3329 		 */
3330 		if (ha->ub_allocated == 0) {
3331 			cv_broadcast(&ha->pha->cv_ub);
3332 		}
3333 	}
3334 
3335 	/* Release adapter state lock. */
3336 	ADAPTER_STATE_UNLOCK(ha);
3337 
3338 	if (rval != FC_SUCCESS) {
3339 		EL(ha, "failed=%xh\n", rval);
3340 	} else {
3341 		/*EMPTY*/
3342 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3343 	}
3344 	return (rval);
3345 }
3346 
3347 /*
3348  * ql_ub_release
3349  *	Release unsolicited buffers from FC Transport
3350  *	to FCA for future use.
3351  *
3352  * Input:
3353  *	fca_handle = handle setup by ql_bind_port().
3354  *	count = number of buffers.
3355  *	tokens = token array for each buffer.
3356  *
3357  * Returns:
3358  *	FC_SUCCESS - the requested buffers have been released.
3359  *	FC_UNBOUND - the fca_handle specified is not bound.
3360  *	FC_UB_BADTOKEN - an invalid token was encountered.
3361  *		No buffers have been released.
3362  *
3363  * Context:
3364  *	Kernel context.
3365  */
3366 static int
3367 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3368 {
3369 	ql_adapter_state_t	*ha;
3370 	ql_srb_t		*sp;
3371 	uint32_t		index;
3372 	uint64_t		ub_array_index;
3373 	int			rval = FC_SUCCESS;
3374 	int			ub_ip_updated = FALSE;
3375 
3376 	/* Check handle. */
3377 	ha = ql_fca_handle_to_state(fca_handle);
3378 	if (ha == NULL) {
3379 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3380 		    (void *)fca_handle);
3381 		return (FC_UNBOUND);
3382 	}
3383 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3384 
3385 	/* Acquire adapter state lock. */
3386 	ADAPTER_STATE_LOCK(ha);
3387 	QL_UB_LOCK(ha);
3388 
3389 	/* Check all returned tokens. */
3390 	for (index = 0; index < count; index++) {
3391 		/* Check the token range. */
3392 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3393 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3394 			rval = FC_UB_BADTOKEN;
3395 			break;
3396 		}
3397 
3398 		/* Check the unsolicited buffer array. */
3399 		if (ha->ub_array[ub_array_index] == NULL) {
3400 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3401 			rval = FC_UB_BADTOKEN;
3402 			break;
3403 		}
3404 
3405 		/* Check the state of the unsolicited buffer. */
3406 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3407 		if (sp->flags & SRB_UB_IN_FCA) {
3408 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3409 			rval = FC_UB_BADTOKEN;
3410 			break;
3411 		}
3412 	}
3413 
3414 	/* If all tokens checkout, release the buffers. */
3415 	if (rval == FC_SUCCESS) {
3416 		/* Check all returned tokens. */
3417 		for (index = 0; index < count; index++) {
3418 			fc_unsol_buf_t	*ubp;
3419 
3420 			ub_array_index = tokens[index];
3421 			ubp = ha->ub_array[ub_array_index];
3422 			sp = ubp->ub_fca_private;
3423 
3424 			ubp->ub_resp_flags = 0;
3425 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3426 			sp->flags |= SRB_UB_IN_FCA;
3427 
3428 			/* IP buffer. */
3429 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3430 				ub_ip_updated = TRUE;
3431 			}
3432 		}
3433 	}
3434 
3435 	QL_UB_UNLOCK(ha);
3436 	/* Release adapter state lock. */
3437 	ADAPTER_STATE_UNLOCK(ha);
3438 
3439 	/*
3440 	 * XXX: We should call ql_isp_rcvbuf() to return a
3441 	 * buffer to ISP only if the number of buffers fall below
3442 	 * the low water mark.
3443 	 */
3444 	if (ub_ip_updated) {
3445 		ql_isp_rcvbuf(ha);
3446 	}
3447 
3448 	if (rval != FC_SUCCESS) {
3449 		EL(ha, "failed, rval = %xh\n", rval);
3450 	} else {
3451 		/*EMPTY*/
3452 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3453 	}
3454 	return (rval);
3455 }
3456 
3457 /*
3458  * ql_abort
3459  *	Abort a packet.
3460  *
3461  * Input:
3462  *	fca_handle = handle setup by ql_bind_port().
3463  *	pkt = pointer to fc_packet.
3464  *	flags = KM_SLEEP flag.
3465  *
3466  * Returns:
3467  *	FC_SUCCESS - the packet has successfully aborted.
3468  *	FC_ABORTED - the packet has successfully aborted.
3469  *	FC_ABORTING - the packet is being aborted.
3470  *	FC_ABORT_FAILED - the packet could not be aborted.
3471  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3472  *		to abort the packet.
3473  *	FC_BADEXCHANGE - no packet found.
3474  *	FC_UNBOUND - the fca_handle specified is not bound.
3475  *
3476  * Context:
3477  *	Kernel context.
3478  */
3479 static int
3480 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3481 {
3482 	port_id_t		d_id;
3483 	ql_link_t		*link;
3484 	ql_adapter_state_t	*ha, *pha;
3485 	ql_srb_t		*sp;
3486 	ql_tgt_t		*tq;
3487 	ql_lun_t		*lq;
3488 	int			rval = FC_ABORTED;
3489 
3490 	ha = ql_fca_handle_to_state(fca_handle);
3491 	if (ha == NULL) {
3492 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3493 		    (void *)fca_handle);
3494 		return (FC_UNBOUND);
3495 	}
3496 
3497 	pha = ha->pha;
3498 
3499 	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3500 
3501 	/* Get target queue pointer. */
3502 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3503 	tq = ql_d_id_to_queue(ha, d_id);
3504 
3505 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3506 		if (tq == NULL) {
3507 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3508 			rval = FC_TRANSPORT_ERROR;
3509 		} else {
3510 			EL(ha, "failed, FC_OFFLINE\n");
3511 			rval = FC_OFFLINE;
3512 		}
3513 		return (rval);
3514 	}
3515 
3516 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3517 	lq = sp->lun_queue;
3518 
3519 	/* Set poll flag if sleep wanted. */
3520 	if (flags == KM_SLEEP) {
3521 		sp->flags |= SRB_POLL;
3522 	}
3523 
3524 	/* Acquire target queue lock. */
3525 	DEVICE_QUEUE_LOCK(tq);
3526 	REQUEST_RING_LOCK(ha);
3527 
3528 	/* If command not already started. */
3529 	if (!(sp->flags & SRB_ISP_STARTED)) {
3530 		/* Check pending queue for command. */
3531 		sp = NULL;
3532 		for (link = pha->pending_cmds.first; link != NULL;
3533 		    link = link->next) {
3534 			sp = link->base_address;
3535 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3536 				/* Remove srb from q. */
3537 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3538 				break;
3539 			} else {
3540 				sp = NULL;
3541 			}
3542 		}
3543 		REQUEST_RING_UNLOCK(ha);
3544 
3545 		if (sp == NULL) {
3546 			/* Check for cmd on device queue. */
3547 			for (link = lq->cmd.first; link != NULL;
3548 			    link = link->next) {
3549 				sp = link->base_address;
3550 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3551 					/* Remove srb from q. */
3552 					ql_remove_link(&lq->cmd, &sp->cmd);
3553 					break;
3554 				} else {
3555 					sp = NULL;
3556 				}
3557 			}
3558 		}
3559 		/* Release device lock */
3560 		DEVICE_QUEUE_UNLOCK(tq);
3561 
3562 		/* If command on target queue. */
3563 		if (sp != NULL) {
3564 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3565 
3566 			/* Set return status */
3567 			pkt->pkt_reason = CS_ABORTED;
3568 
3569 			sp->cmd.next = NULL;
3570 			ql_done(&sp->cmd);
3571 			rval = FC_ABORTED;
3572 		} else {
3573 			EL(ha, "failed, FC_BADEXCHANGE\n");
3574 			rval = FC_BADEXCHANGE;
3575 		}
3576 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3577 		/* Release device queue lock. */
3578 		REQUEST_RING_UNLOCK(ha);
3579 		DEVICE_QUEUE_UNLOCK(tq);
3580 		EL(ha, "failed, already done, FC_FAILURE\n");
3581 		rval = FC_FAILURE;
3582 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3583 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3584 		/*
3585 		 * If here, target data/resp ctio is with Fw.
3586 		 * Since firmware is supposed to terminate such I/Os
3587 		 * with an error, we need not do any thing. If FW
3588 		 * decides not to terminate those IOs and simply keep
3589 		 * quite then we need to initiate cleanup here by
3590 		 * calling ql_done.
3591 		 */
3592 		REQUEST_RING_UNLOCK(ha);
3593 		DEVICE_QUEUE_UNLOCK(tq);
3594 		rval = FC_ABORTED;
3595 	} else {
3596 		request_t	*ep = pha->request_ring_bp;
3597 		uint16_t	cnt;
3598 
3599 		if (sp->handle != 0) {
3600 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3601 				if (sp->handle == ddi_get32(
3602 				    pha->hba_buf.acc_handle, &ep->handle)) {
3603 					ep->entry_type = INVALID_ENTRY_TYPE;
3604 					break;
3605 				}
3606 				ep++;
3607 			}
3608 		}
3609 
3610 		/* Release device queue lock. */
3611 		REQUEST_RING_UNLOCK(ha);
3612 		DEVICE_QUEUE_UNLOCK(tq);
3613 
3614 		sp->flags |= SRB_ABORTING;
3615 		(void) ql_abort_command(ha, sp);
3616 		pkt->pkt_reason = CS_ABORTED;
3617 		rval = FC_ABORTED;
3618 	}
3619 
3620 	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3621 
3622 	return (rval);
3623 }
3624 
3625 /*
3626  * ql_reset
3627  *	Reset link or hardware.
3628  *
3629  * Input:
3630  *	fca_handle = handle setup by ql_bind_port().
3631  *	cmd = reset type command.
3632  *
3633  * Returns:
3634  *	FC_SUCCESS - reset has successfully finished.
3635  *	FC_UNBOUND - the fca_handle specified is not bound.
3636  *	FC_FAILURE - reset failed.
3637  *
3638  * Context:
3639  *	Kernel context.
3640  */
3641 static int
3642 ql_reset(opaque_t fca_handle, uint32_t cmd)
3643 {
3644 	ql_adapter_state_t	*ha;
3645 	int			rval = FC_SUCCESS, rval2;
3646 
3647 	ha = ql_fca_handle_to_state(fca_handle);
3648 	if (ha == NULL) {
3649 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3650 		    (void *)fca_handle);
3651 		return (FC_UNBOUND);
3652 	}
3653 
3654 	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3655 	    ha->vp_index, cmd);
3656 
3657 	switch (cmd) {
3658 	case FC_FCA_CORE:
3659 		/* dump firmware core if specified. */
3660 		if (ha->vp_index == 0) {
3661 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3662 				EL(ha, "failed, FC_FAILURE\n");
3663 				rval = FC_FAILURE;
3664 			}
3665 		}
3666 		break;
3667 	case FC_FCA_LINK_RESET:
3668 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3669 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3670 				EL(ha, "failed, FC_FAILURE-2\n");
3671 				rval = FC_FAILURE;
3672 			}
3673 		}
3674 		break;
3675 	case FC_FCA_RESET_CORE:
3676 	case FC_FCA_RESET:
3677 		/* if dump firmware core if specified. */
3678 		if (cmd == FC_FCA_RESET_CORE) {
3679 			if (ha->vp_index != 0) {
3680 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3681 				    ? QL_SUCCESS : ql_loop_reset(ha);
3682 			} else {
3683 				rval2 = ql_dump_firmware(ha);
3684 			}
3685 			if (rval2 != QL_SUCCESS) {
3686 				EL(ha, "failed, FC_FAILURE-3\n");
3687 				rval = FC_FAILURE;
3688 			}
3689 		}
3690 
3691 		/* Free up all unsolicited buffers. */
3692 		if (ha->ub_allocated != 0) {
3693 			/* Inform to release buffers. */
3694 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3695 			ha->state |= FC_STATE_RESET_REQUESTED;
3696 			if (ha->flags & FCA_BOUND) {
3697 				(ha->bind_info.port_statec_cb)
3698 				    (ha->bind_info.port_handle,
3699 				    ha->state);
3700 			}
3701 		}
3702 
3703 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3704 
3705 		/* All buffers freed */
3706 		if (ha->ub_allocated == 0) {
3707 			/* Hardware reset. */
3708 			if (cmd == FC_FCA_RESET) {
3709 				if (ha->vp_index == 0) {
3710 					(void) ql_abort_isp(ha);
3711 				} else if (!(ha->pha->task_daemon_flags &
3712 				    LOOP_DOWN)) {
3713 					(void) ql_loop_reset(ha);
3714 				}
3715 			}
3716 
3717 			/* Inform that the hardware has been reset */
3718 			ha->state |= FC_STATE_RESET;
3719 		} else {
3720 			/*
3721 			 * the port driver expects an online if
3722 			 * buffers are not freed.
3723 			 */
3724 			if (ha->topology & QL_LOOP_CONNECTION) {
3725 				ha->state |= FC_STATE_LOOP;
3726 			} else {
3727 				ha->state |= FC_STATE_ONLINE;
3728 			}
3729 		}
3730 
3731 		TASK_DAEMON_LOCK(ha);
3732 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3733 		TASK_DAEMON_UNLOCK(ha);
3734 
3735 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3736 
3737 		break;
3738 	default:
3739 		EL(ha, "unknown cmd=%xh\n", cmd);
3740 		break;
3741 	}
3742 
3743 	if (rval != FC_SUCCESS) {
3744 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3745 	} else {
3746 		/*EMPTY*/
3747 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3748 		    ha->vp_index);
3749 	}
3750 
3751 	return (rval);
3752 }
3753 
3754 /*
3755  * ql_port_manage
3756  *	Perform port management or diagnostics.
3757  *
3758  * Input:
3759  *	fca_handle = handle setup by ql_bind_port().
3760  *	cmd = pointer to command structure.
3761  *
3762  * Returns:
3763  *	FC_SUCCESS - the request completed successfully.
3764  *	FC_FAILURE - the request did not complete successfully.
3765  *	FC_UNBOUND - the fca_handle specified is not bound.
3766  *
3767  * Context:
3768  *	Kernel context.
3769  */
3770 static int
3771 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3772 {
3773 	clock_t			timer;
3774 	uint16_t		index;
3775 	uint32_t		*bp;
3776 	port_id_t		d_id;
3777 	ql_link_t		*link;
3778 	ql_adapter_state_t	*ha, *pha;
3779 	ql_tgt_t		*tq;
3780 	dma_mem_t		buffer_xmt, buffer_rcv;
3781 	size_t			length;
3782 	uint32_t		cnt;
3783 	char			buf[80];
3784 	lbp_t			*lb;
3785 	ql_mbx_data_t		mr;
3786 	app_mbx_cmd_t		*mcp;
3787 	int			i0;
3788 	uint8_t			*bptr;
3789 	int			rval2, rval = FC_SUCCESS;
3790 	uint32_t		opcode;
3791 	uint32_t		set_flags = 0;
3792 
3793 	ha = ql_fca_handle_to_state(fca_handle);
3794 	if (ha == NULL) {
3795 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3796 		    (void *)fca_handle);
3797 		return (FC_UNBOUND);
3798 	}
3799 	pha = ha->pha;
3800 
3801 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3802 	    cmd->pm_cmd_code);
3803 
3804 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3805 
3806 	/*
3807 	 * Wait for all outstanding commands to complete
3808 	 */
3809 	index = (uint16_t)ql_wait_outstanding(ha);
3810 
3811 	if (index != MAX_OUTSTANDING_COMMANDS) {
3812 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3813 		ql_restart_queues(ha);
3814 		EL(ha, "failed, FC_TRAN_BUSY\n");
3815 		return (FC_TRAN_BUSY);
3816 	}
3817 
3818 	switch (cmd->pm_cmd_code) {
3819 	case FC_PORT_BYPASS:
3820 		d_id.b24 = *cmd->pm_cmd_buf;
3821 		tq = ql_d_id_to_queue(ha, d_id);
3822 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3823 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3824 			rval = FC_FAILURE;
3825 		}
3826 		break;
3827 	case FC_PORT_UNBYPASS:
3828 		d_id.b24 = *cmd->pm_cmd_buf;
3829 		tq = ql_d_id_to_queue(ha, d_id);
3830 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3831 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3832 			rval = FC_FAILURE;
3833 		}
3834 		break;
3835 	case FC_PORT_GET_FW_REV:
3836 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3837 		    pha->fw_minor_version, pha->fw_subminor_version);
3838 		length = strlen(buf) + 1;
3839 		if (cmd->pm_data_len < length) {
3840 			cmd->pm_data_len = length;
3841 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3842 			rval = FC_FAILURE;
3843 		} else {
3844 			(void) strcpy(cmd->pm_data_buf, buf);
3845 		}
3846 		break;
3847 
3848 	case FC_PORT_GET_FCODE_REV: {
3849 		caddr_t		fcode_ver_buf = NULL;
3850 
3851 		i0 = 0;
3852 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3853 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3854 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3855 		    (caddr_t)&fcode_ver_buf, &i0);
3856 		length = (uint_t)i0;
3857 
3858 		if (rval2 != DDI_PROP_SUCCESS) {
3859 			EL(ha, "failed, getting version = %xh\n", rval2);
3860 			length = 20;
3861 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3862 			if (fcode_ver_buf != NULL) {
3863 				(void) sprintf(fcode_ver_buf,
3864 				    "NO FCODE FOUND");
3865 			}
3866 		}
3867 
3868 		if (cmd->pm_data_len < length) {
3869 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3870 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3871 			cmd->pm_data_len = length;
3872 			rval = FC_FAILURE;
3873 		} else if (fcode_ver_buf != NULL) {
3874 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3875 			    length);
3876 		}
3877 
3878 		if (fcode_ver_buf != NULL) {
3879 			kmem_free(fcode_ver_buf, length);
3880 		}
3881 		break;
3882 	}
3883 
3884 	case FC_PORT_GET_DUMP:
3885 		QL_DUMP_LOCK(pha);
3886 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3887 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3888 			    "length=%lxh\n", cmd->pm_data_len);
3889 			cmd->pm_data_len = pha->risc_dump_size;
3890 			rval = FC_FAILURE;
3891 		} else if (pha->ql_dump_state & QL_DUMPING) {
3892 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3893 			rval = FC_TRAN_BUSY;
3894 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
3895 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3896 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
3897 		} else {
3898 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3899 			rval = FC_FAILURE;
3900 		}
3901 		QL_DUMP_UNLOCK(pha);
3902 		break;
3903 	case FC_PORT_FORCE_DUMP:
3904 		PORTMANAGE_LOCK(ha);
3905 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3906 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3907 			rval = FC_FAILURE;
3908 		}
3909 		PORTMANAGE_UNLOCK(ha);
3910 		break;
3911 	case FC_PORT_DOWNLOAD_FW:
3912 		PORTMANAGE_LOCK(ha);
3913 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3914 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3915 			    (uint32_t)cmd->pm_data_len,
3916 			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
3917 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3918 				rval = FC_FAILURE;
3919 			}
3920 			ql_reset_chip(ha);
3921 			set_flags |= ISP_ABORT_NEEDED;
3922 		} else {
3923 			/* Save copy of the firmware. */
3924 			if (pha->risc_code != NULL) {
3925 				kmem_free(pha->risc_code, pha->risc_code_size);
3926 				pha->risc_code = NULL;
3927 				pha->risc_code_size = 0;
3928 			}
3929 
3930 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3931 			    KM_SLEEP);
3932 			if (pha->risc_code != NULL) {
3933 				pha->risc_code_size =
3934 				    (uint32_t)cmd->pm_data_len;
3935 				bcopy(cmd->pm_data_buf, pha->risc_code,
3936 				    cmd->pm_data_len);
3937 
3938 				/* Do abort to force reload. */
3939 				ql_reset_chip(ha);
3940 				if (ql_abort_isp(ha) != QL_SUCCESS) {
3941 					kmem_free(pha->risc_code,
3942 					    pha->risc_code_size);
3943 					pha->risc_code = NULL;
3944 					pha->risc_code_size = 0;
3945 					ql_reset_chip(ha);
3946 					(void) ql_abort_isp(ha);
3947 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3948 					    " FC_FAILURE\n");
3949 					rval = FC_FAILURE;
3950 				}
3951 			}
3952 		}
3953 		PORTMANAGE_UNLOCK(ha);
3954 		break;
3955 	case FC_PORT_GET_DUMP_SIZE:
3956 		bp = (uint32_t *)cmd->pm_data_buf;
3957 		*bp = pha->risc_dump_size;
3958 		break;
3959 	case FC_PORT_DIAG:
3960 		/*
3961 		 * Prevents concurrent diags
3962 		 */
3963 		PORTMANAGE_LOCK(ha);
3964 
3965 		/* Wait for suspension to end. */
3966 		for (timer = 0; timer < 3000 &&
3967 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
3968 			ql_delay(ha, 10000);
3969 		}
3970 
3971 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
3972 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
3973 			rval = FC_TRAN_BUSY;
3974 			PORTMANAGE_UNLOCK(ha);
3975 			break;
3976 		}
3977 
3978 		switch (cmd->pm_cmd_flags) {
3979 		case QL_DIAG_EXEFMW:
3980 			if (ql_start_firmware(ha) != QL_SUCCESS) {
3981 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
3982 				rval = FC_FAILURE;
3983 			}
3984 			break;
3985 		case QL_DIAG_CHKCMDQUE:
3986 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
3987 			    i0++) {
3988 				cnt += (pha->outstanding_cmds[i0] != NULL);
3989 			}
3990 			if (cnt != 0) {
3991 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
3992 				    "FC_FAILURE\n");
3993 				rval = FC_FAILURE;
3994 			}
3995 			break;
3996 		case QL_DIAG_FMWCHKSUM:
3997 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
3998 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
3999 				    "FC_FAILURE\n");
4000 				rval = FC_FAILURE;
4001 			}
4002 			break;
4003 		case QL_DIAG_SLFTST:
4004 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4005 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4006 				rval = FC_FAILURE;
4007 			}
4008 			ql_reset_chip(ha);
4009 			set_flags |= ISP_ABORT_NEEDED;
4010 			break;
4011 		case QL_DIAG_REVLVL:
4012 			if (cmd->pm_stat_len <
4013 			    sizeof (ql_adapter_revlvl_t)) {
4014 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4015 				    "slen=%lxh, rlvllen=%lxh\n",
4016 				    cmd->pm_stat_len,
4017 				    sizeof (ql_adapter_revlvl_t));
4018 				rval = FC_NOMEM;
4019 			} else {
4020 				bcopy((void *)&(pha->adapter_stats->revlvl),
4021 				    cmd->pm_stat_buf,
4022 				    (size_t)cmd->pm_stat_len);
4023 				cmd->pm_stat_len =
4024 				    sizeof (ql_adapter_revlvl_t);
4025 			}
4026 			break;
4027 		case QL_DIAG_LPBMBX:
4028 
4029 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4030 				EL(ha, "failed, QL_DIAG_LPBMBX "
4031 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4032 				    "reqd=%lxh\n", cmd->pm_data_len,
4033 				    sizeof (struct app_mbx_cmd));
4034 				rval = FC_INVALID_REQUEST;
4035 				break;
4036 			}
4037 			/*
4038 			 * Don't do the wrap test on a 2200 when the
4039 			 * firmware is running.
4040 			 */
4041 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4042 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4043 				mr.mb[1] = mcp->mb[1];
4044 				mr.mb[2] = mcp->mb[2];
4045 				mr.mb[3] = mcp->mb[3];
4046 				mr.mb[4] = mcp->mb[4];
4047 				mr.mb[5] = mcp->mb[5];
4048 				mr.mb[6] = mcp->mb[6];
4049 				mr.mb[7] = mcp->mb[7];
4050 
4051 				bcopy(&mr.mb[0], &mr.mb[10],
4052 				    sizeof (uint16_t) * 8);
4053 
4054 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4055 					EL(ha, "failed, QL_DIAG_LPBMBX "
4056 					    "FC_FAILURE\n");
4057 					rval = FC_FAILURE;
4058 					break;
4059 				} else {
4060 					for (i0 = 1; i0 < 8; i0++) {
4061 						if (mr.mb[i0] !=
4062 						    mr.mb[i0 + 10]) {
4063 							EL(ha, "failed, "
4064 							    "QL_DIAG_LPBMBX "
4065 							    "FC_FAILURE-2\n");
4066 							rval = FC_FAILURE;
4067 							break;
4068 						}
4069 					}
4070 				}
4071 
4072 				if (rval == FC_FAILURE) {
4073 					(void) ql_flash_errlog(ha,
4074 					    FLASH_ERRLOG_ISP_ERR, 0,
4075 					    RD16_IO_REG(ha, hccr),
4076 					    RD16_IO_REG(ha, istatus));
4077 					set_flags |= ISP_ABORT_NEEDED;
4078 				}
4079 			}
4080 			break;
4081 		case QL_DIAG_LPBDTA:
4082 			/*
4083 			 * For loopback data, we receive the
4084 			 * data back in pm_stat_buf. This provides
4085 			 * the user an opportunity to compare the
4086 			 * transmitted and received data.
4087 			 *
4088 			 * NB: lb->options are:
4089 			 *	0 --> Ten bit loopback
4090 			 *	1 --> One bit loopback
4091 			 *	2 --> External loopback
4092 			 */
4093 			if (cmd->pm_data_len > 65536) {
4094 				rval = FC_TOOMANY;
4095 				EL(ha, "failed, QL_DIAG_LPBDTA "
4096 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4097 				break;
4098 			}
4099 			if (ql_get_dma_mem(ha, &buffer_xmt,
4100 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4101 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4102 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4103 				rval = FC_NOMEM;
4104 				break;
4105 			}
4106 			if (ql_get_dma_mem(ha, &buffer_rcv,
4107 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4108 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4109 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4110 				rval = FC_NOMEM;
4111 				break;
4112 			}
4113 			ddi_rep_put8(buffer_xmt.acc_handle,
4114 			    (uint8_t *)cmd->pm_data_buf,
4115 			    (uint8_t *)buffer_xmt.bp,
4116 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4117 
4118 			/* 22xx's adapter must be in loop mode for test. */
4119 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4120 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4121 				if (ha->flags & POINT_TO_POINT ||
4122 				    (ha->task_daemon_flags & LOOP_DOWN &&
4123 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4124 					cnt = *bptr;
4125 					*bptr = (uint8_t)
4126 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4127 					(void) ql_abort_isp(ha);
4128 					*bptr = (uint8_t)cnt;
4129 				}
4130 			}
4131 
4132 			/* Shutdown IP. */
4133 			if (pha->flags & IP_INITIALIZED) {
4134 				(void) ql_shutdown_ip(pha);
4135 			}
4136 
4137 			lb = (lbp_t *)cmd->pm_cmd_buf;
4138 			lb->transfer_count =
4139 			    (uint32_t)cmd->pm_data_len;
4140 			lb->transfer_segment_count = 0;
4141 			lb->receive_segment_count = 0;
4142 			lb->transfer_data_address =
4143 			    buffer_xmt.cookie.dmac_address;
4144 			lb->receive_data_address =
4145 			    buffer_rcv.cookie.dmac_address;
4146 
4147 			if ((lb->options & 7) == 2 &&
4148 			    pha->task_daemon_flags &
4149 			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4150 				/* Loop must be up for external */
4151 				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4152 				rval = FC_TRAN_BUSY;
4153 			} else if (ql_loop_back(ha, 0, lb,
4154 			    buffer_xmt.cookie.dmac_notused,
4155 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4156 				bzero((void *)cmd->pm_stat_buf,
4157 				    cmd->pm_stat_len);
4158 				ddi_rep_get8(buffer_rcv.acc_handle,
4159 				    (uint8_t *)cmd->pm_stat_buf,
4160 				    (uint8_t *)buffer_rcv.bp,
4161 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4162 			} else {
4163 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4164 				rval = FC_FAILURE;
4165 			}
4166 
4167 			ql_free_phys(ha, &buffer_xmt);
4168 			ql_free_phys(ha, &buffer_rcv);
4169 
4170 			/* Needed to recover the f/w */
4171 			set_flags |= ISP_ABORT_NEEDED;
4172 
4173 			/* Restart IP if it was shutdown. */
4174 			if (pha->flags & IP_ENABLED &&
4175 			    !(pha->flags & IP_INITIALIZED)) {
4176 				(void) ql_initialize_ip(pha);
4177 				ql_isp_rcvbuf(pha);
4178 			}
4179 
4180 			break;
4181 		case QL_DIAG_ECHO: {
4182 			/*
4183 			 * issue an echo command with a user supplied
4184 			 * data pattern and destination address
4185 			 */
4186 			echo_t		echo;		/* temp echo struct */
4187 
4188 			/* Setup echo cmd & adjust for platform */
4189 			opcode = QL_ECHO_CMD;
4190 			BIG_ENDIAN_32(&opcode);
4191 
4192 			/*
4193 			 * due to limitations in the ql
4194 			 * firmaware the echo data field is
4195 			 * limited to 220
4196 			 */
4197 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4198 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4199 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4200 				    "cmdl1=%lxh, statl2=%lxh\n",
4201 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4202 				rval = FC_TOOMANY;
4203 				break;
4204 			}
4205 
4206 			/*
4207 			 * the input data buffer has the user
4208 			 * supplied data pattern.  The "echoed"
4209 			 * data will be DMAed into the output
4210 			 * data buffer.  Therefore the length
4211 			 * of the output buffer must be equal
4212 			 * to or greater then the input buffer
4213 			 * length
4214 			 */
4215 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4216 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4217 				    " cmdl1=%lxh, statl2=%lxh\n",
4218 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4219 				rval = FC_TOOMANY;
4220 				break;
4221 			}
4222 			/* add four bytes for the opcode */
4223 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4224 
4225 			/*
4226 			 * are we 32 or 64 bit addressed???
4227 			 * We need to get the appropriate
4228 			 * DMA and set the command options;
4229 			 * 64 bit (bit 6) or 32 bit
4230 			 * (no bit 6) addressing.
4231 			 * while we are at it lets ask for
4232 			 * real echo (bit 15)
4233 			 */
4234 			echo.options = BIT_15;
4235 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4236 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
4237 				echo.options = (uint16_t)
4238 				    (echo.options | BIT_6);
4239 			}
4240 
4241 			/*
4242 			 * Set up the DMA mappings for the
4243 			 * output and input data buffers.
4244 			 * First the output buffer
4245 			 */
4246 			if (ql_get_dma_mem(ha, &buffer_xmt,
4247 			    (uint32_t)(cmd->pm_data_len + 4),
4248 			    LITTLE_ENDIAN_DMA,
4249 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4250 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4251 				rval = FC_NOMEM;
4252 				break;
4253 			}
4254 			echo.transfer_data_address = buffer_xmt.cookie;
4255 
4256 			/* Next the input buffer */
4257 			if (ql_get_dma_mem(ha, &buffer_rcv,
4258 			    (uint32_t)(cmd->pm_data_len + 4),
4259 			    LITTLE_ENDIAN_DMA,
4260 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4261 				/*
4262 				 * since we could not allocate
4263 				 * DMA space for the input
4264 				 * buffer we need to clean up
4265 				 * by freeing the DMA space
4266 				 * we allocated for the output
4267 				 * buffer
4268 				 */
4269 				ql_free_phys(ha, &buffer_xmt);
4270 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4271 				rval = FC_NOMEM;
4272 				break;
4273 			}
4274 			echo.receive_data_address = buffer_rcv.cookie;
4275 
4276 			/*
4277 			 * copy the 4 byte ECHO op code to the
4278 			 * allocated DMA space
4279 			 */
4280 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4281 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4282 
4283 			/*
4284 			 * copy the user supplied data to the
4285 			 * allocated DMA space
4286 			 */
4287 			ddi_rep_put8(buffer_xmt.acc_handle,
4288 			    (uint8_t *)cmd->pm_cmd_buf,
4289 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4290 			    DDI_DEV_AUTOINCR);
4291 
4292 			/* Shutdown IP. */
4293 			if (pha->flags & IP_INITIALIZED) {
4294 				(void) ql_shutdown_ip(pha);
4295 			}
4296 
4297 			/* send the echo */
4298 			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4299 				ddi_rep_put8(buffer_rcv.acc_handle,
4300 				    (uint8_t *)buffer_rcv.bp + 4,
4301 				    (uint8_t *)cmd->pm_stat_buf,
4302 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4303 			} else {
4304 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4305 				rval = FC_FAILURE;
4306 			}
4307 
4308 			/* Restart IP if it was shutdown. */
4309 			if (pha->flags & IP_ENABLED &&
4310 			    !(pha->flags & IP_INITIALIZED)) {
4311 				(void) ql_initialize_ip(pha);
4312 				ql_isp_rcvbuf(pha);
4313 			}
4314 			/* free up our DMA buffers */
4315 			ql_free_phys(ha, &buffer_xmt);
4316 			ql_free_phys(ha, &buffer_rcv);
4317 			break;
4318 		}
4319 		default:
4320 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4321 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4322 			rval = FC_INVALID_REQUEST;
4323 			break;
4324 		}
4325 		PORTMANAGE_UNLOCK(ha);
4326 		break;
4327 	case FC_PORT_LINK_STATE:
4328 		/* Check for name equal to null. */
4329 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4330 		    index++) {
4331 			if (cmd->pm_cmd_buf[index] != 0) {
4332 				break;
4333 			}
4334 		}
4335 
4336 		/* If name not null. */
4337 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4338 			/* Locate device queue. */
4339 			tq = NULL;
4340 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4341 			    tq == NULL; index++) {
4342 				for (link = ha->dev[index].first; link != NULL;
4343 				    link = link->next) {
4344 					tq = link->base_address;
4345 
4346 					if (bcmp((void *)&tq->port_name[0],
4347 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4348 						break;
4349 					} else {
4350 						tq = NULL;
4351 					}
4352 				}
4353 			}
4354 
4355 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4356 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4357 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4358 			} else {
4359 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4360 				    FC_STATE_OFFLINE;
4361 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4362 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4363 			}
4364 		} else {
4365 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4366 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4367 		}
4368 		break;
4369 	case FC_PORT_INITIALIZE:
4370 		if (cmd->pm_cmd_len >= 8) {
4371 			tq = NULL;
4372 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4373 			    tq == NULL; index++) {
4374 				for (link = ha->dev[index].first; link != NULL;
4375 				    link = link->next) {
4376 					tq = link->base_address;
4377 
4378 					if (bcmp((void *)&tq->port_name[0],
4379 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4380 						if (!VALID_DEVICE_ID(ha,
4381 						    tq->loop_id)) {
4382 							tq = NULL;
4383 						}
4384 						break;
4385 					} else {
4386 						tq = NULL;
4387 					}
4388 				}
4389 			}
4390 
4391 			if (tq == NULL || ql_target_reset(ha, tq,
4392 			    ha->loop_reset_delay) != QL_SUCCESS) {
4393 				EL(ha, "failed, FC_PORT_INITIALIZE "
4394 				    "FC_FAILURE\n");
4395 				rval = FC_FAILURE;
4396 			}
4397 		} else {
4398 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4399 			    "clen=%lxh\n", cmd->pm_cmd_len);
4400 
4401 			rval = FC_FAILURE;
4402 		}
4403 		break;
4404 	case FC_PORT_RLS:
4405 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4406 			EL(ha, "failed, buffer size passed: %lxh, "
4407 			    "req: %lxh\n", cmd->pm_data_len,
4408 			    (sizeof (fc_rls_acc_t)));
4409 			rval = FC_FAILURE;
4410 		} else if (LOOP_NOT_READY(pha)) {
4411 			EL(ha, "loop NOT ready\n");
4412 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4413 		} else if (ql_get_link_status(ha, ha->loop_id,
4414 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4415 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4416 			rval = FC_FAILURE;
4417 #ifdef _BIG_ENDIAN
4418 		} else {
4419 			fc_rls_acc_t		*rls;
4420 
4421 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4422 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4423 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4424 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4425 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4426 #endif /* _BIG_ENDIAN */
4427 		}
4428 		break;
4429 	case FC_PORT_GET_NODE_ID:
4430 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4431 		    cmd->pm_data_buf) != QL_SUCCESS) {
4432 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4433 			rval = FC_FAILURE;
4434 		}
4435 		break;
4436 	case FC_PORT_SET_NODE_ID:
4437 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4438 		    cmd->pm_data_buf) != QL_SUCCESS) {
4439 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4440 			rval = FC_FAILURE;
4441 		}
4442 		break;
4443 	case FC_PORT_DOWNLOAD_FCODE:
4444 		PORTMANAGE_LOCK(ha);
4445 		if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
4446 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4447 			    (uint32_t)cmd->pm_data_len);
4448 		} else {
4449 			if (cmd->pm_data_buf[0] == 4 &&
4450 			    cmd->pm_data_buf[8] == 0 &&
4451 			    cmd->pm_data_buf[9] == 0x10 &&
4452 			    cmd->pm_data_buf[10] == 0 &&
4453 			    cmd->pm_data_buf[11] == 0) {
4454 				rval = ql_24xx_load_flash(ha,
4455 				    (uint8_t *)cmd->pm_data_buf,
4456 				    (uint32_t)cmd->pm_data_len,
4457 				    ha->flash_fw_addr << 2);
4458 			} else {
4459 				rval = ql_24xx_load_flash(ha,
4460 				    (uint8_t *)cmd->pm_data_buf,
4461 				    (uint32_t)cmd->pm_data_len, 0);
4462 			}
4463 		}
4464 
4465 		if (rval != QL_SUCCESS) {
4466 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4467 			rval = FC_FAILURE;
4468 		} else {
4469 			rval = FC_SUCCESS;
4470 		}
4471 		ql_reset_chip(ha);
4472 		set_flags |= ISP_ABORT_NEEDED;
4473 		PORTMANAGE_UNLOCK(ha);
4474 		break;
4475 	default:
4476 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4477 		rval = FC_BADCMD;
4478 		break;
4479 	}
4480 
4481 	/* Wait for suspension to end. */
4482 	ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4483 	timer = 0;
4484 
4485 	while (timer++ < 3000 &&
4486 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4487 		ql_delay(ha, 10000);
4488 	}
4489 
4490 	ql_restart_queues(ha);
4491 
4492 	if (rval != FC_SUCCESS) {
4493 		EL(ha, "failed, rval = %xh\n", rval);
4494 	} else {
4495 		/*EMPTY*/
4496 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4497 	}
4498 
4499 	return (rval);
4500 }
4501 
4502 static opaque_t
4503 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4504 {
4505 	port_id_t		id;
4506 	ql_adapter_state_t	*ha;
4507 	ql_tgt_t		*tq;
4508 
4509 	id.r.rsvd_1 = 0;
4510 	id.b24 = d_id.port_id;
4511 
4512 	ha = ql_fca_handle_to_state(fca_handle);
4513 	if (ha == NULL) {
4514 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4515 		    (void *)fca_handle);
4516 		return (NULL);
4517 	}
4518 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4519 
4520 	tq = ql_d_id_to_queue(ha, id);
4521 
4522 	if (tq == NULL) {
4523 		EL(ha, "failed, tq=NULL\n");
4524 	} else {
4525 		/*EMPTY*/
4526 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4527 	}
4528 	return (tq);
4529 }
4530 
4531 /* ************************************************************************ */
4532 /*			FCA Driver Local Support Functions.		    */
4533 /* ************************************************************************ */
4534 
4535 /*
4536  * ql_cmd_setup
4537  *	Verifies proper command.
4538  *
4539  * Input:
4540  *	fca_handle = handle setup by ql_bind_port().
4541  *	pkt = pointer to fc_packet.
4542  *	rval = pointer for return value.
4543  *
4544  * Returns:
4545  *	Adapter state pointer, NULL = failure.
4546  *
4547  * Context:
4548  *	Kernel context.
4549  */
4550 static ql_adapter_state_t *
4551 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4552 {
4553 	ql_adapter_state_t	*ha, *pha;
4554 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4555 	ql_tgt_t		*tq;
4556 	port_id_t		d_id;
4557 
4558 	pkt->pkt_resp_resid = 0;
4559 	pkt->pkt_data_resid = 0;
4560 
4561 	/* check that the handle is assigned by this FCA */
4562 	ha = ql_fca_handle_to_state(fca_handle);
4563 	if (ha == NULL) {
4564 		*rval = FC_UNBOUND;
4565 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4566 		    (void *)fca_handle);
4567 		return (NULL);
4568 	}
4569 	pha = ha->pha;
4570 
4571 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4572 
4573 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4574 		return (ha);
4575 	}
4576 
4577 	if (!(pha->flags & ONLINE)) {
4578 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4579 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4580 		*rval = FC_TRANSPORT_ERROR;
4581 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4582 		return (NULL);
4583 	}
4584 
4585 	/* Exit on loop down. */
4586 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4587 	    pha->task_daemon_flags & LOOP_DOWN &&
4588 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4589 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4590 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4591 		*rval = FC_OFFLINE;
4592 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4593 		return (NULL);
4594 	}
4595 
4596 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4597 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4598 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4599 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4600 			d_id.r.rsvd_1 = 0;
4601 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4602 			tq = ql_d_id_to_queue(ha, d_id);
4603 
4604 			pkt->pkt_fca_device = (opaque_t)tq;
4605 		}
4606 
4607 		if (tq != NULL) {
4608 			DEVICE_QUEUE_LOCK(tq);
4609 			if (tq->flags & (TQF_RSCN_RCVD |
4610 			    TQF_NEED_AUTHENTICATION)) {
4611 				*rval = FC_DEVICE_BUSY;
4612 				DEVICE_QUEUE_UNLOCK(tq);
4613 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4614 				    tq->flags, tq->d_id.b24);
4615 				return (NULL);
4616 			}
4617 			DEVICE_QUEUE_UNLOCK(tq);
4618 		}
4619 	}
4620 
4621 	/*
4622 	 * Check DMA pointers.
4623 	 */
4624 	*rval = DDI_SUCCESS;
4625 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4626 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4627 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4628 		if (*rval == DDI_SUCCESS) {
4629 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4630 		}
4631 	}
4632 
4633 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4634 	    pkt->pkt_rsplen != 0) {
4635 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4636 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4637 		if (*rval == DDI_SUCCESS) {
4638 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4639 		}
4640 	}
4641 
4642 	/*
4643 	 * Minimum branch conditional; Change it with care.
4644 	 */
4645 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4646 	    (pkt->pkt_datalen != 0)) != 0) {
4647 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4648 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4649 		if (*rval == DDI_SUCCESS) {
4650 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4651 		}
4652 	}
4653 
4654 	if (*rval != DDI_SUCCESS) {
4655 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4656 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4657 
4658 		/* Do command callback. */
4659 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4660 			ql_awaken_task_daemon(ha, sp, 0, 0);
4661 		}
4662 		*rval = FC_BADPACKET;
4663 		EL(ha, "failed, bad DMA pointers\n");
4664 		return (NULL);
4665 	}
4666 
4667 	if (sp->magic_number != QL_FCA_BRAND) {
4668 		*rval = FC_BADPACKET;
4669 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4670 		return (NULL);
4671 	}
4672 	*rval = FC_SUCCESS;
4673 
4674 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4675 
4676 	return (ha);
4677 }
4678 
4679 /*
4680  * ql_els_plogi
4681  *	Issue a extended link service port login request.
4682  *
4683  * Input:
4684  *	ha = adapter state pointer.
4685  *	pkt = pointer to fc_packet.
4686  *
4687  * Returns:
4688  *	FC_SUCCESS - the packet was accepted for transport.
4689  *	FC_TRANSPORT_ERROR - a transport error occurred.
4690  *
4691  * Context:
4692  *	Kernel context.
4693  */
4694 static int
4695 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4696 {
4697 	ql_tgt_t		*tq = NULL;
4698 	port_id_t		d_id;
4699 	la_els_logi_t		acc;
4700 	class_svc_param_t	*class3_param;
4701 	int			ret;
4702 	int			rval = FC_SUCCESS;
4703 
4704 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4705 	    pkt->pkt_cmd_fhdr.d_id);
4706 
4707 	TASK_DAEMON_LOCK(ha);
4708 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4709 		TASK_DAEMON_UNLOCK(ha);
4710 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4711 		return (FC_OFFLINE);
4712 	}
4713 	TASK_DAEMON_UNLOCK(ha);
4714 
4715 	bzero(&acc, sizeof (acc));
4716 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4717 
4718 	ret = QL_SUCCESS;
4719 
4720 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4721 		/*
4722 		 * In p2p topology he sends a PLOGI after determining
4723 		 * he has the N_Port login initiative.
4724 		 */
4725 		ret = ql_p2p_plogi(ha, pkt);
4726 	}
4727 	if (ret == QL_CONSUMED) {
4728 		return (ret);
4729 	}
4730 
4731 	switch (ret = ql_login_port(ha, d_id)) {
4732 	case QL_SUCCESS:
4733 		tq = ql_d_id_to_queue(ha, d_id);
4734 		break;
4735 
4736 	case QL_LOOP_ID_USED:
4737 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4738 			tq = ql_d_id_to_queue(ha, d_id);
4739 		}
4740 		break;
4741 
4742 	default:
4743 		break;
4744 	}
4745 
4746 	if (ret != QL_SUCCESS) {
4747 		/*
4748 		 * Invalidate this entry so as to seek a fresh loop ID
4749 		 * in case firmware reassigns it to something else
4750 		 */
4751 		tq = ql_d_id_to_queue(ha, d_id);
4752 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4753 			tq->loop_id = PORT_NO_LOOP_ID;
4754 		}
4755 	} else if (tq) {
4756 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4757 	}
4758 
4759 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4760 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4761 
4762 		/* Build ACC. */
4763 		acc.ls_code.ls_code = LA_ELS_ACC;
4764 		acc.common_service.fcph_version = 0x2006;
4765 		acc.common_service.cmn_features = 0x8800;
4766 		CFG_IST(ha, CFG_CTRL_242581) ?
4767 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4768 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4769 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4770 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4771 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4772 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4773 		acc.common_service.conc_sequences = 0xff;
4774 		acc.common_service.relative_offset = 0x03;
4775 		acc.common_service.e_d_tov = 0x7d0;
4776 
4777 		bcopy((void *)&tq->port_name[0],
4778 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4779 		bcopy((void *)&tq->node_name[0],
4780 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4781 
4782 		class3_param = (class_svc_param_t *)&acc.class_3;
4783 		class3_param->class_valid_svc_opt = 0x8000;
4784 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4785 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4786 		class3_param->conc_sequences = tq->class3_conc_sequences;
4787 		class3_param->open_sequences_per_exch =
4788 		    tq->class3_open_sequences_per_exch;
4789 
4790 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4791 			acc.ls_code.ls_code = LA_ELS_RJT;
4792 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4793 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4794 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4795 			rval = FC_TRAN_BUSY;
4796 		} else {
4797 			DEVICE_QUEUE_LOCK(tq);
4798 			tq->logout_sent = 0;
4799 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4800 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4801 				tq->flags |= TQF_IIDMA_NEEDED;
4802 			}
4803 			DEVICE_QUEUE_UNLOCK(tq);
4804 
4805 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4806 				TASK_DAEMON_LOCK(ha);
4807 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4808 				TASK_DAEMON_UNLOCK(ha);
4809 			}
4810 
4811 			pkt->pkt_state = FC_PKT_SUCCESS;
4812 		}
4813 	} else {
4814 		/* Build RJT. */
4815 		acc.ls_code.ls_code = LA_ELS_RJT;
4816 
4817 		switch (ret) {
4818 		case QL_FUNCTION_TIMEOUT:
4819 			pkt->pkt_state = FC_PKT_TIMEOUT;
4820 			pkt->pkt_reason = FC_REASON_HW_ERROR;
4821 			break;
4822 
4823 		case QL_MEMORY_ALLOC_FAILED:
4824 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4825 			pkt->pkt_reason = FC_REASON_NOMEM;
4826 			rval = FC_TRAN_BUSY;
4827 			break;
4828 
4829 		case QL_FABRIC_NOT_INITIALIZED:
4830 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4831 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4832 			rval = FC_TRAN_BUSY;
4833 			break;
4834 
4835 		default:
4836 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4837 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4838 			break;
4839 		}
4840 
4841 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4842 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4843 		    pkt->pkt_reason, ret, rval);
4844 	}
4845 
4846 	if (tq != NULL) {
4847 		DEVICE_QUEUE_LOCK(tq);
4848 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4849 		if (rval == FC_TRAN_BUSY) {
4850 			if (tq->d_id.b24 != BROADCAST_ADDR) {
4851 				tq->flags |= TQF_NEED_AUTHENTICATION;
4852 			}
4853 		}
4854 		DEVICE_QUEUE_UNLOCK(tq);
4855 	}
4856 
4857 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4858 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4859 
4860 	if (rval != FC_SUCCESS) {
4861 		EL(ha, "failed, rval = %xh\n", rval);
4862 	} else {
4863 		/*EMPTY*/
4864 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4865 	}
4866 	return (rval);
4867 }
4868 
4869 /*
4870  * ql_p2p_plogi
4871  *	Start an extended link service port login request using
4872  *	an ELS Passthru iocb.
4873  *
4874  * Input:
4875  *	ha = adapter state pointer.
4876  *	pkt = pointer to fc_packet.
4877  *
4878  * Returns:
4879  *	QL_CONSUMMED - the iocb was queued for transport.
4880  *
4881  * Context:
4882  *	Kernel context.
4883  */
4884 static int
4885 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4886 {
4887 	uint16_t	id;
4888 	ql_tgt_t	tmp;
4889 	ql_tgt_t	*tq = &tmp;
4890 	int		rval;
4891 
4892 	tq->d_id.b.al_pa = 0;
4893 	tq->d_id.b.area = 0;
4894 	tq->d_id.b.domain = 0;
4895 
4896 	/*
4897 	 * Verify that the port database hasn't moved beneath our feet by
4898 	 * switching to the appropriate n_port_handle if necessary.  This is
4899 	 * less unplesant than the error recovery if the wrong one is used.
4900 	 */
4901 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
4902 		tq->loop_id = id;
4903 		rval = ql_get_port_database(ha, tq, PDF_NONE);
4904 		EL(ha, "rval=%xh\n", rval);
4905 		/* check all the ones not logged in for possible use */
4906 		if (rval == QL_NOT_LOGGED_IN) {
4907 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
4908 				ha->n_port->n_port_handle = tq->loop_id;
4909 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4910 				    tq->loop_id, tq->master_state);
4911 				break;
4912 			}
4913 			/*
4914 			 * Use a 'port unavailable' entry only
4915 			 * if we used it before.
4916 			 */
4917 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
4918 				/* if the port_id matches, reuse it */
4919 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
4920 					EL(ha, "n_port_handle =%xh,"
4921 					    "master state=%xh\n",
4922 					    tq->loop_id, tq->master_state);
4923 					break;
4924 				} else if (tq->loop_id ==
4925 				    ha->n_port->n_port_handle) {
4926 				    // avoid a lint error
4927 					uint16_t *hndl;
4928 					uint16_t val;
4929 
4930 					hndl = &ha->n_port->n_port_handle;
4931 					val = *hndl;
4932 					val++;
4933 					val++;
4934 					*hndl = val;
4935 				}
4936 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4937 			    "master state=%x\n", rval, id, tq->loop_id,
4938 			    tq->master_state);
4939 			}
4940 
4941 		}
4942 		if (rval == QL_SUCCESS) {
4943 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
4944 				ha->n_port->n_port_handle = tq->loop_id;
4945 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4946 				    tq->loop_id, tq->master_state);
4947 				break;
4948 			}
4949 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4950 			    "master state=%x\n", rval, id, tq->loop_id,
4951 			    tq->master_state);
4952 		}
4953 	}
4954 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
4955 
4956 	ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
4957 
4958 	return (QL_CONSUMED);
4959 }
4960 
4961 
4962 /*
4963  * ql_els_flogi
4964  *	Issue a extended link service fabric login request.
4965  *
4966  * Input:
4967  *	ha = adapter state pointer.
4968  *	pkt = pointer to fc_packet.
4969  *
4970  * Returns:
4971  *	FC_SUCCESS - the packet was accepted for transport.
4972  *	FC_TRANSPORT_ERROR - a transport error occurred.
4973  *
4974  * Context:
4975  *	Kernel context.
4976  */
4977 static int
4978 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4979 {
4980 	ql_tgt_t		*tq = NULL;
4981 	port_id_t		d_id;
4982 	la_els_logi_t		acc;
4983 	class_svc_param_t	*class3_param;
4984 	int			rval = FC_SUCCESS;
4985 	int			accept = 0;
4986 
4987 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4988 	    pkt->pkt_cmd_fhdr.d_id);
4989 
4990 	bzero(&acc, sizeof (acc));
4991 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4992 
4993 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4994 		/*
4995 		 * d_id of zero in a FLOGI accept response in a point to point
4996 		 * topology triggers evaluation of N Port login initiative.
4997 		 */
4998 		pkt->pkt_resp_fhdr.d_id = 0;
4999 		/*
5000 		 * An N_Port already logged in with the firmware
5001 		 * will have the only database entry.
5002 		 */
5003 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5004 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5005 		}
5006 
5007 		if (tq != NULL) {
5008 			/*
5009 			 * If the target port has initiative send
5010 			 * up a PLOGI about the new device.
5011 			 */
5012 			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5013 			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5014 			    &ha->init_ctrl_blk.cb24.port_name[0] :
5015 			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5016 				ha->send_plogi_timer = 3;
5017 			} else {
5018 				ha->send_plogi_timer = 0;
5019 			}
5020 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5021 		} else {
5022 			/*
5023 			 * An N_Port not logged in with the firmware will not
5024 			 * have a database entry.  We accept anyway and rely
5025 			 * on a PLOGI from the upper layers to set the d_id
5026 			 * and s_id.
5027 			 */
5028 			accept = 1;
5029 		}
5030 	} else {
5031 		tq = ql_d_id_to_queue(ha, d_id);
5032 	}
5033 	if ((tq != NULL) || (accept != NULL)) {
5034 		/* Build ACC. */
5035 		pkt->pkt_state = FC_PKT_SUCCESS;
5036 		class3_param = (class_svc_param_t *)&acc.class_3;
5037 
5038 		acc.ls_code.ls_code = LA_ELS_ACC;
5039 		acc.common_service.fcph_version = 0x2006;
5040 		if (ha->topology & QL_N_PORT) {
5041 			/* clear F_Port indicator */
5042 			acc.common_service.cmn_features = 0x0800;
5043 		} else {
5044 			acc.common_service.cmn_features = 0x1b00;
5045 		}
5046 		CFG_IST(ha, CFG_CTRL_242581) ?
5047 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5048 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5049 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5050 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5051 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5052 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5053 		acc.common_service.conc_sequences = 0xff;
5054 		acc.common_service.relative_offset = 0x03;
5055 		acc.common_service.e_d_tov = 0x7d0;
5056 		if (accept) {
5057 			/* Use the saved N_Port WWNN and WWPN */
5058 			if (ha->n_port != NULL) {
5059 				bcopy((void *)&ha->n_port->port_name[0],
5060 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5061 				bcopy((void *)&ha->n_port->node_name[0],
5062 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5063 				/* mark service options invalid */
5064 				class3_param->class_valid_svc_opt = 0x0800;
5065 			} else {
5066 				EL(ha, "ha->n_port is NULL\n");
5067 				/* Build RJT. */
5068 				acc.ls_code.ls_code = LA_ELS_RJT;
5069 
5070 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5071 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5072 			}
5073 		} else {
5074 			bcopy((void *)&tq->port_name[0],
5075 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5076 			bcopy((void *)&tq->node_name[0],
5077 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5078 
5079 			class3_param = (class_svc_param_t *)&acc.class_3;
5080 			class3_param->class_valid_svc_opt = 0x8800;
5081 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5082 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5083 			class3_param->conc_sequences =
5084 			    tq->class3_conc_sequences;
5085 			class3_param->open_sequences_per_exch =
5086 			    tq->class3_open_sequences_per_exch;
5087 		}
5088 	} else {
5089 		/* Build RJT. */
5090 		acc.ls_code.ls_code = LA_ELS_RJT;
5091 
5092 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5093 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5094 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5095 	}
5096 
5097 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5098 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5099 
5100 	if (rval != FC_SUCCESS) {
5101 		EL(ha, "failed, rval = %xh\n", rval);
5102 	} else {
5103 		/*EMPTY*/
5104 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5105 	}
5106 	return (rval);
5107 }
5108 
5109 /*
5110  * ql_els_logo
5111  *	Issue a extended link service logout request.
5112  *
5113  * Input:
5114  *	ha = adapter state pointer.
5115  *	pkt = pointer to fc_packet.
5116  *
5117  * Returns:
5118  *	FC_SUCCESS - the packet was accepted for transport.
5119  *	FC_TRANSPORT_ERROR - a transport error occurred.
5120  *
5121  * Context:
5122  *	Kernel context.
5123  */
5124 static int
5125 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5126 {
5127 	port_id_t	d_id;
5128 	ql_tgt_t	*tq;
5129 	la_els_logo_t	acc;
5130 	int		rval = FC_SUCCESS;
5131 
5132 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5133 	    pkt->pkt_cmd_fhdr.d_id);
5134 
5135 	bzero(&acc, sizeof (acc));
5136 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5137 
5138 	tq = ql_d_id_to_queue(ha, d_id);
5139 	if (tq) {
5140 		DEVICE_QUEUE_LOCK(tq);
5141 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5142 			DEVICE_QUEUE_UNLOCK(tq);
5143 			return (FC_SUCCESS);
5144 		}
5145 
5146 		tq->flags |= TQF_NEED_AUTHENTICATION;
5147 
5148 		do {
5149 			DEVICE_QUEUE_UNLOCK(tq);
5150 			(void) ql_abort_device(ha, tq, 1);
5151 
5152 			/*
5153 			 * Wait for commands to drain in F/W (doesn't
5154 			 * take more than a few milliseconds)
5155 			 */
5156 			ql_delay(ha, 10000);
5157 
5158 			DEVICE_QUEUE_LOCK(tq);
5159 		} while (tq->outcnt);
5160 
5161 		DEVICE_QUEUE_UNLOCK(tq);
5162 	}
5163 
5164 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5165 		/* Build ACC. */
5166 		acc.ls_code.ls_code = LA_ELS_ACC;
5167 
5168 		pkt->pkt_state = FC_PKT_SUCCESS;
5169 	} else {
5170 		/* Build RJT. */
5171 		acc.ls_code.ls_code = LA_ELS_RJT;
5172 
5173 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5174 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5175 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5176 	}
5177 
5178 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5179 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5180 
5181 	if (rval != FC_SUCCESS) {
5182 		EL(ha, "failed, rval = %xh\n", rval);
5183 	} else {
5184 		/*EMPTY*/
5185 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5186 	}
5187 	return (rval);
5188 }
5189 
5190 /*
5191  * ql_els_prli
5192  *	Issue a extended link service process login request.
5193  *
5194  * Input:
5195  *	ha = adapter state pointer.
5196  *	pkt = pointer to fc_packet.
5197  *
5198  * Returns:
5199  *	FC_SUCCESS - the packet was accepted for transport.
5200  *	FC_TRANSPORT_ERROR - a transport error occurred.
5201  *
5202  * Context:
5203  *	Kernel context.
5204  */
5205 static int
5206 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5207 {
5208 	ql_tgt_t		*tq;
5209 	port_id_t		d_id;
5210 	la_els_prli_t		acc;
5211 	prli_svc_param_t	*param;
5212 	int			rval = FC_SUCCESS;
5213 
5214 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5215 	    pkt->pkt_cmd_fhdr.d_id);
5216 
5217 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5218 
5219 	tq = ql_d_id_to_queue(ha, d_id);
5220 	if (tq != NULL) {
5221 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5222 
5223 		if ((ha->topology & QL_N_PORT) &&
5224 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5225 			ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5226 			rval = QL_CONSUMED;
5227 		} else {
5228 			/* Build ACC. */
5229 			bzero(&acc, sizeof (acc));
5230 			acc.ls_code = LA_ELS_ACC;
5231 			acc.page_length = 0x10;
5232 			acc.payload_length = tq->prli_payload_length;
5233 
5234 			param = (prli_svc_param_t *)&acc.service_params[0];
5235 			param->type = 0x08;
5236 			param->rsvd = 0x00;
5237 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5238 			param->process_flags = tq->prli_svc_param_word_3;
5239 
5240 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5241 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5242 			    DDI_DEV_AUTOINCR);
5243 
5244 			pkt->pkt_state = FC_PKT_SUCCESS;
5245 		}
5246 	} else {
5247 		la_els_rjt_t rjt;
5248 
5249 		/* Build RJT. */
5250 		bzero(&rjt, sizeof (rjt));
5251 		rjt.ls_code.ls_code = LA_ELS_RJT;
5252 
5253 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5254 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5255 
5256 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5257 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5258 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5259 	}
5260 
5261 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5262 		EL(ha, "failed, rval = %xh\n", rval);
5263 	} else {
5264 		/*EMPTY*/
5265 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5266 	}
5267 	return (rval);
5268 }
5269 
5270 /*
5271  * ql_els_prlo
5272  *	Issue a extended link service process logout request.
5273  *
5274  * Input:
5275  *	ha = adapter state pointer.
5276  *	pkt = pointer to fc_packet.
5277  *
5278  * Returns:
5279  *	FC_SUCCESS - the packet was accepted for transport.
5280  *	FC_TRANSPORT_ERROR - a transport error occurred.
5281  *
5282  * Context:
5283  *	Kernel context.
5284  */
5285 /* ARGSUSED */
5286 static int
5287 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5288 {
5289 	la_els_prli_t	acc;
5290 	int		rval = FC_SUCCESS;
5291 
5292 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5293 	    pkt->pkt_cmd_fhdr.d_id);
5294 
5295 	/* Build ACC. */
5296 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5297 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5298 
5299 	acc.ls_code = LA_ELS_ACC;
5300 	acc.service_params[2] = 1;
5301 
5302 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5303 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5304 
5305 	pkt->pkt_state = FC_PKT_SUCCESS;
5306 
5307 	if (rval != FC_SUCCESS) {
5308 		EL(ha, "failed, rval = %xh\n", rval);
5309 	} else {
5310 		/*EMPTY*/
5311 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5312 	}
5313 	return (rval);
5314 }
5315 
5316 /*
5317  * ql_els_adisc
5318  *	Issue a extended link service address discovery request.
5319  *
5320  * Input:
5321  *	ha = adapter state pointer.
5322  *	pkt = pointer to fc_packet.
5323  *
5324  * Returns:
5325  *	FC_SUCCESS - the packet was accepted for transport.
5326  *	FC_TRANSPORT_ERROR - a transport error occurred.
5327  *
5328  * Context:
5329  *	Kernel context.
5330  */
5331 static int
5332 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5333 {
5334 	ql_dev_id_list_t	*list;
5335 	uint32_t		list_size;
5336 	ql_link_t		*link;
5337 	ql_tgt_t		*tq;
5338 	ql_lun_t		*lq;
5339 	port_id_t		d_id;
5340 	la_els_adisc_t		acc;
5341 	uint16_t		index, loop_id;
5342 	ql_mbx_data_t		mr;
5343 	int			rval = FC_SUCCESS;
5344 
5345 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5346 
5347 	bzero(&acc, sizeof (acc));
5348 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5349 
5350 	/*
5351 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5352 	 * the device from the firmware
5353 	 */
5354 	index = ql_alpa_to_index[d_id.b.al_pa];
5355 	tq = NULL;
5356 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5357 		tq = link->base_address;
5358 		if (tq->d_id.b24 == d_id.b24) {
5359 			break;
5360 		} else {
5361 			tq = NULL;
5362 		}
5363 	}
5364 
5365 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5366 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5367 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5368 
5369 		if (list != NULL &&
5370 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5371 		    QL_SUCCESS) {
5372 
5373 			for (index = 0; index < mr.mb[1]; index++) {
5374 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5375 
5376 				if (tq->d_id.b24 == d_id.b24) {
5377 					tq->loop_id = loop_id;
5378 					break;
5379 				}
5380 			}
5381 		} else {
5382 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5383 			    QL_NAME, ha->instance, d_id.b24);
5384 			tq = NULL;
5385 		}
5386 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5387 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5388 			    QL_NAME, ha->instance, tq->d_id.b24);
5389 			tq = NULL;
5390 		}
5391 
5392 		if (list != NULL) {
5393 			kmem_free(list, list_size);
5394 		}
5395 	}
5396 
5397 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5398 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5399 
5400 		/* Build ACC. */
5401 
5402 		DEVICE_QUEUE_LOCK(tq);
5403 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5404 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5405 			for (link = tq->lun_queues.first; link != NULL;
5406 			    link = link->next) {
5407 				lq = link->base_address;
5408 
5409 				if (lq->cmd.first != NULL) {
5410 					ql_next(ha, lq);
5411 					DEVICE_QUEUE_LOCK(tq);
5412 				}
5413 			}
5414 		}
5415 		DEVICE_QUEUE_UNLOCK(tq);
5416 
5417 		acc.ls_code.ls_code = LA_ELS_ACC;
5418 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5419 
5420 		bcopy((void *)&tq->port_name[0],
5421 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5422 		bcopy((void *)&tq->node_name[0],
5423 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5424 
5425 		acc.nport_id.port_id = tq->d_id.b24;
5426 
5427 		pkt->pkt_state = FC_PKT_SUCCESS;
5428 	} else {
5429 		/* Build RJT. */
5430 		acc.ls_code.ls_code = LA_ELS_RJT;
5431 
5432 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5433 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5434 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5435 	}
5436 
5437 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5438 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5439 
5440 	if (rval != FC_SUCCESS) {
5441 		EL(ha, "failed, rval = %xh\n", rval);
5442 	} else {
5443 		/*EMPTY*/
5444 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5445 	}
5446 	return (rval);
5447 }
5448 
5449 /*
5450  * ql_els_linit
5451  *	Issue a extended link service loop initialize request.
5452  *
5453  * Input:
5454  *	ha = adapter state pointer.
5455  *	pkt = pointer to fc_packet.
5456  *
5457  * Returns:
5458  *	FC_SUCCESS - the packet was accepted for transport.
5459  *	FC_TRANSPORT_ERROR - a transport error occurred.
5460  *
5461  * Context:
5462  *	Kernel context.
5463  */
5464 static int
5465 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5466 {
5467 	ddi_dma_cookie_t	*cp;
5468 	uint32_t		cnt;
5469 	conv_num_t		n;
5470 	port_id_t		d_id;
5471 	int			rval = FC_SUCCESS;
5472 
5473 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5474 
5475 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5476 	if (ha->topology & QL_SNS_CONNECTION) {
5477 		fc_linit_req_t els;
5478 		lfa_cmd_t lfa;
5479 
5480 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5481 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5482 
5483 		/* Setup LFA mailbox command data. */
5484 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5485 
5486 		lfa.resp_buffer_length[0] = 4;
5487 
5488 		cp = pkt->pkt_resp_cookie;
5489 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5490 			n.size64 = (uint64_t)cp->dmac_laddress;
5491 			LITTLE_ENDIAN_64(&n.size64);
5492 		} else {
5493 			n.size32[0] = LSD(cp->dmac_laddress);
5494 			LITTLE_ENDIAN_32(&n.size32[0]);
5495 			n.size32[1] = MSD(cp->dmac_laddress);
5496 			LITTLE_ENDIAN_32(&n.size32[1]);
5497 		}
5498 
5499 		/* Set buffer address. */
5500 		for (cnt = 0; cnt < 8; cnt++) {
5501 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5502 		}
5503 
5504 		lfa.subcommand_length[0] = 4;
5505 		n.size32[0] = d_id.b24;
5506 		LITTLE_ENDIAN_32(&n.size32[0]);
5507 		lfa.addr[0] = n.size8[0];
5508 		lfa.addr[1] = n.size8[1];
5509 		lfa.addr[2] = n.size8[2];
5510 		lfa.subcommand[1] = 0x70;
5511 		lfa.payload[2] = els.func;
5512 		lfa.payload[4] = els.lip_b3;
5513 		lfa.payload[5] = els.lip_b4;
5514 
5515 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5516 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5517 		} else {
5518 			pkt->pkt_state = FC_PKT_SUCCESS;
5519 		}
5520 	} else {
5521 		fc_linit_resp_t rjt;
5522 
5523 		/* Build RJT. */
5524 		bzero(&rjt, sizeof (rjt));
5525 		rjt.ls_code.ls_code = LA_ELS_RJT;
5526 
5527 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5528 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5529 
5530 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5531 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5532 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5533 	}
5534 
5535 	if (rval != FC_SUCCESS) {
5536 		EL(ha, "failed, rval = %xh\n", rval);
5537 	} else {
5538 		/*EMPTY*/
5539 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5540 	}
5541 	return (rval);
5542 }
5543 
5544 /*
5545  * ql_els_lpc
5546  *	Issue a extended link service loop control request.
5547  *
5548  * Input:
5549  *	ha = adapter state pointer.
5550  *	pkt = pointer to fc_packet.
5551  *
5552  * Returns:
5553  *	FC_SUCCESS - the packet was accepted for transport.
5554  *	FC_TRANSPORT_ERROR - a transport error occurred.
5555  *
5556  * Context:
5557  *	Kernel context.
5558  */
5559 static int
5560 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5561 {
5562 	ddi_dma_cookie_t	*cp;
5563 	uint32_t		cnt;
5564 	conv_num_t		n;
5565 	port_id_t		d_id;
5566 	int			rval = FC_SUCCESS;
5567 
5568 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5569 
5570 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5571 	if (ha->topology & QL_SNS_CONNECTION) {
5572 		ql_lpc_t els;
5573 		lfa_cmd_t lfa;
5574 
5575 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5576 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5577 
5578 		/* Setup LFA mailbox command data. */
5579 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5580 
5581 		lfa.resp_buffer_length[0] = 4;
5582 
5583 		cp = pkt->pkt_resp_cookie;
5584 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5585 			n.size64 = (uint64_t)(cp->dmac_laddress);
5586 			LITTLE_ENDIAN_64(&n.size64);
5587 		} else {
5588 			n.size32[0] = cp->dmac_address;
5589 			LITTLE_ENDIAN_32(&n.size32[0]);
5590 			n.size32[1] = 0;
5591 		}
5592 
5593 		/* Set buffer address. */
5594 		for (cnt = 0; cnt < 8; cnt++) {
5595 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5596 		}
5597 
5598 		lfa.subcommand_length[0] = 20;
5599 		n.size32[0] = d_id.b24;
5600 		LITTLE_ENDIAN_32(&n.size32[0]);
5601 		lfa.addr[0] = n.size8[0];
5602 		lfa.addr[1] = n.size8[1];
5603 		lfa.addr[2] = n.size8[2];
5604 		lfa.subcommand[1] = 0x71;
5605 		lfa.payload[4] = els.port_control;
5606 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5607 
5608 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5609 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5610 		} else {
5611 			pkt->pkt_state = FC_PKT_SUCCESS;
5612 		}
5613 	} else {
5614 		ql_lpc_resp_t rjt;
5615 
5616 		/* Build RJT. */
5617 		bzero(&rjt, sizeof (rjt));
5618 		rjt.ls_code.ls_code = LA_ELS_RJT;
5619 
5620 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5621 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5622 
5623 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5624 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5625 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5626 	}
5627 
5628 	if (rval != FC_SUCCESS) {
5629 		EL(ha, "failed, rval = %xh\n", rval);
5630 	} else {
5631 		/*EMPTY*/
5632 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5633 	}
5634 	return (rval);
5635 }
5636 
5637 /*
5638  * ql_els_lsts
5639  *	Issue a extended link service loop status request.
5640  *
5641  * Input:
5642  *	ha = adapter state pointer.
5643  *	pkt = pointer to fc_packet.
5644  *
5645  * Returns:
5646  *	FC_SUCCESS - the packet was accepted for transport.
5647  *	FC_TRANSPORT_ERROR - a transport error occurred.
5648  *
5649  * Context:
5650  *	Kernel context.
5651  */
5652 static int
5653 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5654 {
5655 	ddi_dma_cookie_t	*cp;
5656 	uint32_t		cnt;
5657 	conv_num_t		n;
5658 	port_id_t		d_id;
5659 	int			rval = FC_SUCCESS;
5660 
5661 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5662 
5663 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5664 	if (ha->topology & QL_SNS_CONNECTION) {
5665 		fc_lsts_req_t els;
5666 		lfa_cmd_t lfa;
5667 
5668 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5669 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5670 
5671 		/* Setup LFA mailbox command data. */
5672 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5673 
5674 		lfa.resp_buffer_length[0] = 84;
5675 
5676 		cp = pkt->pkt_resp_cookie;
5677 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5678 			n.size64 = cp->dmac_laddress;
5679 			LITTLE_ENDIAN_64(&n.size64);
5680 		} else {
5681 			n.size32[0] = cp->dmac_address;
5682 			LITTLE_ENDIAN_32(&n.size32[0]);
5683 			n.size32[1] = 0;
5684 		}
5685 
5686 		/* Set buffer address. */
5687 		for (cnt = 0; cnt < 8; cnt++) {
5688 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5689 		}
5690 
5691 		lfa.subcommand_length[0] = 2;
5692 		n.size32[0] = d_id.b24;
5693 		LITTLE_ENDIAN_32(&n.size32[0]);
5694 		lfa.addr[0] = n.size8[0];
5695 		lfa.addr[1] = n.size8[1];
5696 		lfa.addr[2] = n.size8[2];
5697 		lfa.subcommand[1] = 0x72;
5698 
5699 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5700 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5701 		} else {
5702 			pkt->pkt_state = FC_PKT_SUCCESS;
5703 		}
5704 	} else {
5705 		fc_lsts_resp_t rjt;
5706 
5707 		/* Build RJT. */
5708 		bzero(&rjt, sizeof (rjt));
5709 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5710 
5711 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5712 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5713 
5714 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5715 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5716 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5717 	}
5718 
5719 	if (rval != FC_SUCCESS) {
5720 		EL(ha, "failed=%xh\n", rval);
5721 	} else {
5722 		/*EMPTY*/
5723 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5724 	}
5725 	return (rval);
5726 }
5727 
5728 /*
5729  * ql_els_scr
5730  *	Issue a extended link service state change registration request.
5731  *
5732  * Input:
5733  *	ha = adapter state pointer.
5734  *	pkt = pointer to fc_packet.
5735  *
5736  * Returns:
5737  *	FC_SUCCESS - the packet was accepted for transport.
5738  *	FC_TRANSPORT_ERROR - a transport error occurred.
5739  *
5740  * Context:
5741  *	Kernel context.
5742  */
5743 static int
5744 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5745 {
5746 	fc_scr_resp_t	acc;
5747 	int		rval = FC_SUCCESS;
5748 
5749 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5750 
5751 	bzero(&acc, sizeof (acc));
5752 	if (ha->topology & QL_SNS_CONNECTION) {
5753 		fc_scr_req_t els;
5754 
5755 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5756 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5757 
5758 		if (ql_send_change_request(ha, els.scr_func) ==
5759 		    QL_SUCCESS) {
5760 			/* Build ACC. */
5761 			acc.scr_acc = LA_ELS_ACC;
5762 
5763 			pkt->pkt_state = FC_PKT_SUCCESS;
5764 		} else {
5765 			/* Build RJT. */
5766 			acc.scr_acc = LA_ELS_RJT;
5767 
5768 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5769 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5770 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5771 		}
5772 	} else {
5773 		/* Build RJT. */
5774 		acc.scr_acc = LA_ELS_RJT;
5775 
5776 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5777 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5778 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5779 	}
5780 
5781 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5782 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5783 
5784 	if (rval != FC_SUCCESS) {
5785 		EL(ha, "failed, rval = %xh\n", rval);
5786 	} else {
5787 		/*EMPTY*/
5788 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5789 	}
5790 	return (rval);
5791 }
5792 
5793 /*
5794  * ql_els_rscn
5795  *	Issue a extended link service register state
5796  *	change notification request.
5797  *
5798  * Input:
5799  *	ha = adapter state pointer.
5800  *	pkt = pointer to fc_packet.
5801  *
5802  * Returns:
5803  *	FC_SUCCESS - the packet was accepted for transport.
5804  *	FC_TRANSPORT_ERROR - a transport error occurred.
5805  *
5806  * Context:
5807  *	Kernel context.
5808  */
5809 static int
5810 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5811 {
5812 	ql_rscn_resp_t	acc;
5813 	int		rval = FC_SUCCESS;
5814 
5815 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5816 
5817 	bzero(&acc, sizeof (acc));
5818 	if (ha->topology & QL_SNS_CONNECTION) {
5819 		/* Build ACC. */
5820 		acc.scr_acc = LA_ELS_ACC;
5821 
5822 		pkt->pkt_state = FC_PKT_SUCCESS;
5823 	} else {
5824 		/* Build RJT. */
5825 		acc.scr_acc = LA_ELS_RJT;
5826 
5827 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5828 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5829 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5830 	}
5831 
5832 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5833 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5834 
5835 	if (rval != FC_SUCCESS) {
5836 		EL(ha, "failed, rval = %xh\n", rval);
5837 	} else {
5838 		/*EMPTY*/
5839 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5840 	}
5841 	return (rval);
5842 }
5843 
5844 /*
5845  * ql_els_farp_req
5846  *	Issue FC Address Resolution Protocol (FARP)
5847  *	extended link service request.
5848  *
5849  *	Note: not supported.
5850  *
5851  * Input:
5852  *	ha = adapter state pointer.
5853  *	pkt = pointer to fc_packet.
5854  *
5855  * Returns:
5856  *	FC_SUCCESS - the packet was accepted for transport.
5857  *	FC_TRANSPORT_ERROR - a transport error occurred.
5858  *
5859  * Context:
5860  *	Kernel context.
5861  */
5862 static int
5863 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5864 {
5865 	ql_acc_rjt_t	acc;
5866 	int		rval = FC_SUCCESS;
5867 
5868 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5869 
5870 	bzero(&acc, sizeof (acc));
5871 
5872 	/* Build ACC. */
5873 	acc.ls_code.ls_code = LA_ELS_ACC;
5874 
5875 	pkt->pkt_state = FC_PKT_SUCCESS;
5876 
5877 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5878 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5879 
5880 	if (rval != FC_SUCCESS) {
5881 		EL(ha, "failed, rval = %xh\n", rval);
5882 	} else {
5883 		/*EMPTY*/
5884 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5885 	}
5886 	return (rval);
5887 }
5888 
5889 /*
5890  * ql_els_farp_reply
5891  *	Issue FC Address Resolution Protocol (FARP)
5892  *	extended link service reply.
5893  *
5894  *	Note: not supported.
5895  *
5896  * Input:
5897  *	ha = adapter state pointer.
5898  *	pkt = pointer to fc_packet.
5899  *
5900  * Returns:
5901  *	FC_SUCCESS - the packet was accepted for transport.
5902  *	FC_TRANSPORT_ERROR - a transport error occurred.
5903  *
5904  * Context:
5905  *	Kernel context.
5906  */
5907 /* ARGSUSED */
5908 static int
5909 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5910 {
5911 	ql_acc_rjt_t	acc;
5912 	int		rval = FC_SUCCESS;
5913 
5914 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5915 
5916 	bzero(&acc, sizeof (acc));
5917 
5918 	/* Build ACC. */
5919 	acc.ls_code.ls_code = LA_ELS_ACC;
5920 
5921 	pkt->pkt_state = FC_PKT_SUCCESS;
5922 
5923 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5924 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5925 
5926 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5927 
5928 	return (rval);
5929 }
5930 
5931 static int
5932 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5933 {
5934 	uchar_t			*rnid_acc;
5935 	port_id_t		d_id;
5936 	ql_link_t		*link;
5937 	ql_tgt_t		*tq;
5938 	uint16_t		index;
5939 	la_els_rnid_acc_t	acc;
5940 	la_els_rnid_t		*req;
5941 	size_t			req_len;
5942 
5943 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5944 
5945 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5946 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5947 	index = ql_alpa_to_index[d_id.b.al_pa];
5948 
5949 	tq = NULL;
5950 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5951 		tq = link->base_address;
5952 		if (tq->d_id.b24 == d_id.b24) {
5953 			break;
5954 		} else {
5955 			tq = NULL;
5956 		}
5957 	}
5958 
5959 	/* Allocate memory for rnid status block */
5960 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5961 
5962 	bzero(&acc, sizeof (acc));
5963 
5964 	req = (la_els_rnid_t *)pkt->pkt_cmd;
5965 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5966 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
5967 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
5968 
5969 		kmem_free(rnid_acc, req_len);
5970 		acc.ls_code.ls_code = LA_ELS_RJT;
5971 
5972 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5973 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5974 
5975 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5976 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5977 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5978 
5979 		return (FC_FAILURE);
5980 	}
5981 
5982 	acc.ls_code.ls_code = LA_ELS_ACC;
5983 	bcopy(rnid_acc, &acc.hdr, req_len);
5984 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5985 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5986 
5987 	kmem_free(rnid_acc, req_len);
5988 	pkt->pkt_state = FC_PKT_SUCCESS;
5989 
5990 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5991 
5992 	return (FC_SUCCESS);
5993 }
5994 
5995 static int
5996 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
5997 {
5998 	fc_rls_acc_t		*rls_acc;
5999 	port_id_t		d_id;
6000 	ql_link_t		*link;
6001 	ql_tgt_t		*tq;
6002 	uint16_t		index;
6003 	la_els_rls_acc_t	acc;
6004 
6005 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6006 
6007 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6008 	index = ql_alpa_to_index[d_id.b.al_pa];
6009 
6010 	tq = NULL;
6011 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6012 		tq = link->base_address;
6013 		if (tq->d_id.b24 == d_id.b24) {
6014 			break;
6015 		} else {
6016 			tq = NULL;
6017 		}
6018 	}
6019 
6020 	/* Allocate memory for link error status block */
6021 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6022 
6023 	bzero(&acc, sizeof (la_els_rls_acc_t));
6024 
6025 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6026 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6027 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6028 
6029 		kmem_free(rls_acc, sizeof (*rls_acc));
6030 		acc.ls_code.ls_code = LA_ELS_RJT;
6031 
6032 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6033 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6034 
6035 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6036 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6037 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6038 
6039 		return (FC_FAILURE);
6040 	}
6041 
6042 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6043 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6044 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6045 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6046 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6047 
6048 	acc.ls_code.ls_code = LA_ELS_ACC;
6049 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6050 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6051 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6052 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6053 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6054 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6055 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6056 
6057 	kmem_free(rls_acc, sizeof (*rls_acc));
6058 	pkt->pkt_state = FC_PKT_SUCCESS;
6059 
6060 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6061 
6062 	return (FC_SUCCESS);
6063 }
6064 
6065 static int
6066 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6067 {
6068 	port_id_t	d_id;
6069 	ql_srb_t	*sp;
6070 	fc_unsol_buf_t  *ubp;
6071 	ql_link_t	*link, *next_link;
6072 	int		rval = FC_SUCCESS;
6073 	int		cnt = 5;
6074 
6075 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6076 
6077 	/*
6078 	 * we need to ensure that q->outcnt == 0, otherwise
6079 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6080 	 * will confuse ulps.
6081 	 */
6082 
6083 	DEVICE_QUEUE_LOCK(tq);
6084 	do {
6085 		/*
6086 		 * wait for the cmds to get drained. If they
6087 		 * don't get drained then the transport will
6088 		 * retry PLOGI after few secs.
6089 		 */
6090 		if (tq->outcnt != 0) {
6091 			rval = FC_TRAN_BUSY;
6092 			DEVICE_QUEUE_UNLOCK(tq);
6093 			ql_delay(ha, 10000);
6094 			DEVICE_QUEUE_LOCK(tq);
6095 			cnt--;
6096 			if (!cnt) {
6097 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6098 				    " for %xh outcount %xh", QL_NAME,
6099 				    ha->instance, tq->d_id.b24, tq->outcnt);
6100 			}
6101 		} else {
6102 			rval = FC_SUCCESS;
6103 			break;
6104 		}
6105 	} while (cnt > 0);
6106 	DEVICE_QUEUE_UNLOCK(tq);
6107 
6108 	/*
6109 	 * return, if busy or if the plogi was asynchronous.
6110 	 */
6111 	if ((rval != FC_SUCCESS) ||
6112 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6113 	    pkt->pkt_comp)) {
6114 		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6115 		    ha->instance);
6116 		return (rval);
6117 	}
6118 
6119 	/*
6120 	 * Let us give daemon sufficient time and hopefully
6121 	 * when transport retries PLOGI, it would have flushed
6122 	 * callback queue.
6123 	 */
6124 	TASK_DAEMON_LOCK(ha);
6125 	for (link = ha->callback_queue.first; link != NULL;
6126 	    link = next_link) {
6127 		next_link = link->next;
6128 		sp = link->base_address;
6129 		if (sp->flags & SRB_UB_CALLBACK) {
6130 			ubp = ha->ub_array[sp->handle];
6131 			d_id.b24 = ubp->ub_frame.s_id;
6132 		} else {
6133 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6134 		}
6135 		if (tq->d_id.b24 == d_id.b24) {
6136 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6137 			    ha->instance, tq->d_id.b24);
6138 			rval = FC_TRAN_BUSY;
6139 			break;
6140 		}
6141 	}
6142 	TASK_DAEMON_UNLOCK(ha);
6143 
6144 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6145 
6146 	return (rval);
6147 }
6148 
6149 /*
6150  * ql_login_port
6151  *	Logs in a device if not already logged in.
6152  *
6153  * Input:
6154  *	ha = adapter state pointer.
6155  *	d_id = 24 bit port ID.
6156  *	DEVICE_QUEUE_LOCK must be released.
6157  *
6158  * Returns:
6159  *	QL local function return status code.
6160  *
6161  * Context:
6162  *	Kernel context.
6163  */
6164 static int
6165 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6166 {
6167 	ql_adapter_state_t	*vha;
6168 	ql_link_t		*link;
6169 	uint16_t		index;
6170 	ql_tgt_t		*tq, *tq2;
6171 	uint16_t		loop_id, first_loop_id, last_loop_id;
6172 	int			rval = QL_SUCCESS;
6173 
6174 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6175 	    d_id.b24);
6176 
6177 	/* Get head queue index. */
6178 	index = ql_alpa_to_index[d_id.b.al_pa];
6179 
6180 	/* Check for device already has a queue. */
6181 	tq = NULL;
6182 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6183 		tq = link->base_address;
6184 		if (tq->d_id.b24 == d_id.b24) {
6185 			loop_id = tq->loop_id;
6186 			break;
6187 		} else {
6188 			tq = NULL;
6189 		}
6190 	}
6191 
6192 	/* Let's stop issuing any IO and unsolicited logo */
6193 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6194 		DEVICE_QUEUE_LOCK(tq);
6195 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6196 		tq->flags &= ~TQF_RSCN_RCVD;
6197 		DEVICE_QUEUE_UNLOCK(tq);
6198 	}
6199 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6200 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6201 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6202 	}
6203 
6204 	/* Special case for Nameserver */
6205 	if (d_id.b24 == 0xFFFFFC) {
6206 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
6207 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6208 		if (tq == NULL) {
6209 			ADAPTER_STATE_LOCK(ha);
6210 			tq = ql_dev_init(ha, d_id, loop_id);
6211 			ADAPTER_STATE_UNLOCK(ha);
6212 			if (tq == NULL) {
6213 				EL(ha, "failed=%xh, d_id=%xh\n",
6214 				    QL_FUNCTION_FAILED, d_id.b24);
6215 				return (QL_FUNCTION_FAILED);
6216 			}
6217 		}
6218 		rval = ql_login_fabric_port(ha, tq, loop_id);
6219 		if (rval == QL_SUCCESS) {
6220 			tq->loop_id = loop_id;
6221 			tq->flags |= TQF_FABRIC_DEVICE;
6222 			(void) ql_get_port_database(ha, tq, PDF_NONE);
6223 			ha->topology = (uint8_t)
6224 			    (ha->topology | QL_SNS_CONNECTION);
6225 		}
6226 	/* Check for device already logged in. */
6227 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6228 		if (tq->flags & TQF_FABRIC_DEVICE) {
6229 			rval = ql_login_fabric_port(ha, tq, loop_id);
6230 			if (rval == QL_PORT_ID_USED) {
6231 				rval = QL_SUCCESS;
6232 			}
6233 		} else if (LOCAL_LOOP_ID(loop_id)) {
6234 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6235 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6236 			    LLF_NONE : LLF_PLOGI));
6237 			if (rval == QL_SUCCESS) {
6238 				DEVICE_QUEUE_LOCK(tq);
6239 				tq->loop_id = loop_id;
6240 				DEVICE_QUEUE_UNLOCK(tq);
6241 			}
6242 		}
6243 	} else if (ha->topology & QL_SNS_CONNECTION) {
6244 		/* Locate unused loop ID. */
6245 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6246 			first_loop_id = 0;
6247 			last_loop_id = LAST_N_PORT_HDL;
6248 		} else if (ha->topology & QL_F_PORT) {
6249 			first_loop_id = 0;
6250 			last_loop_id = SNS_LAST_LOOP_ID;
6251 		} else {
6252 			first_loop_id = SNS_FIRST_LOOP_ID;
6253 			last_loop_id = SNS_LAST_LOOP_ID;
6254 		}
6255 
6256 		/* Acquire adapter state lock. */
6257 		ADAPTER_STATE_LOCK(ha);
6258 
6259 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6260 		if (tq == NULL) {
6261 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6262 			    d_id.b24);
6263 
6264 			ADAPTER_STATE_UNLOCK(ha);
6265 
6266 			return (QL_FUNCTION_FAILED);
6267 		}
6268 
6269 		rval = QL_FUNCTION_FAILED;
6270 		loop_id = ha->pha->free_loop_id++;
6271 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6272 		    index--) {
6273 			if (loop_id < first_loop_id ||
6274 			    loop_id > last_loop_id) {
6275 				loop_id = first_loop_id;
6276 				ha->pha->free_loop_id = (uint16_t)
6277 				    (loop_id + 1);
6278 			}
6279 
6280 			/* Bypass if loop ID used. */
6281 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6282 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6283 				if (tq2 != NULL && tq2 != tq) {
6284 					break;
6285 				}
6286 			}
6287 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6288 			    loop_id == ha->loop_id) {
6289 				loop_id = ha->pha->free_loop_id++;
6290 				continue;
6291 			}
6292 
6293 			ADAPTER_STATE_UNLOCK(ha);
6294 			rval = ql_login_fabric_port(ha, tq, loop_id);
6295 
6296 			/*
6297 			 * If PORT_ID_USED is returned
6298 			 * the login_fabric_port() updates
6299 			 * with the correct loop ID
6300 			 */
6301 			switch (rval) {
6302 			case QL_PORT_ID_USED:
6303 				/*
6304 				 * use f/w handle and try to
6305 				 * login again.
6306 				 */
6307 				ADAPTER_STATE_LOCK(ha);
6308 				ha->pha->free_loop_id--;
6309 				ADAPTER_STATE_UNLOCK(ha);
6310 				loop_id = tq->loop_id;
6311 				break;
6312 
6313 			case QL_SUCCESS:
6314 				tq->flags |= TQF_FABRIC_DEVICE;
6315 				(void) ql_get_port_database(ha,
6316 				    tq, PDF_NONE);
6317 				index = 1;
6318 				break;
6319 
6320 			case QL_LOOP_ID_USED:
6321 				tq->loop_id = PORT_NO_LOOP_ID;
6322 				loop_id = ha->pha->free_loop_id++;
6323 				break;
6324 
6325 			case QL_ALL_IDS_IN_USE:
6326 				tq->loop_id = PORT_NO_LOOP_ID;
6327 				index = 1;
6328 				break;
6329 
6330 			default:
6331 				tq->loop_id = PORT_NO_LOOP_ID;
6332 				index = 1;
6333 				break;
6334 			}
6335 
6336 			ADAPTER_STATE_LOCK(ha);
6337 		}
6338 
6339 		ADAPTER_STATE_UNLOCK(ha);
6340 	} else {
6341 		rval = QL_FUNCTION_FAILED;
6342 	}
6343 
6344 	if (rval != QL_SUCCESS) {
6345 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6346 	} else {
6347 		EL(ha, "d_id=%xh, loop_id=%xh, "
6348 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6349 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6350 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6351 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6352 	}
6353 	return (rval);
6354 }
6355 
6356 /*
6357  * ql_login_fabric_port
6358  *	Issue login fabric port mailbox command.
6359  *
6360  * Input:
6361  *	ha:		adapter state pointer.
6362  *	tq:		target queue pointer.
6363  *	loop_id:	FC Loop ID.
6364  *
6365  * Returns:
6366  *	ql local function return status code.
6367  *
6368  * Context:
6369  *	Kernel context.
6370  */
6371 static int
6372 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6373 {
6374 	int		rval;
6375 	int		index;
6376 	int		retry = 0;
6377 	port_id_t	d_id;
6378 	ql_tgt_t	*newq;
6379 	ql_mbx_data_t	mr;
6380 
6381 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6382 	    tq->d_id.b24);
6383 
6384 	/*
6385 	 * QL_PARAMETER_ERROR also means the firmware is
6386 	 * not able to allocate PCB entry due to resource
6387 	 * issues, or collision.
6388 	 */
6389 	do {
6390 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6391 		if ((rval == QL_PARAMETER_ERROR) ||
6392 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6393 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6394 			retry++;
6395 			drv_usecwait(10 * MILLISEC);
6396 		} else {
6397 			break;
6398 		}
6399 	} while (retry < 5);
6400 
6401 	switch (rval) {
6402 	case QL_SUCCESS:
6403 		tq->loop_id = loop_id;
6404 		break;
6405 
6406 	case QL_PORT_ID_USED:
6407 		/*
6408 		 * This Loop ID should NOT be in use in drivers
6409 		 */
6410 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6411 
6412 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6413 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6414 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6415 			    newq->loop_id, newq->d_id.b24);
6416 			ql_send_logo(ha, newq, NULL);
6417 		}
6418 
6419 		tq->loop_id = mr.mb[1];
6420 		break;
6421 
6422 	case QL_LOOP_ID_USED:
6423 		d_id.b.al_pa = LSB(mr.mb[2]);
6424 		d_id.b.area = MSB(mr.mb[2]);
6425 		d_id.b.domain = LSB(mr.mb[1]);
6426 
6427 		newq = ql_d_id_to_queue(ha, d_id);
6428 		if (newq && (newq->loop_id != loop_id)) {
6429 			/*
6430 			 * This should NEVER ever happen; but this
6431 			 * code is needed to bail out when the worst
6432 			 * case happens - or as used to happen before
6433 			 */
6434 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6435 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6436 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6437 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6438 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6439 			    newq->d_id.b24, loop_id);
6440 
6441 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6442 				ADAPTER_STATE_LOCK(ha);
6443 
6444 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6445 				ql_add_link_b(&ha->dev[index], &newq->device);
6446 
6447 				newq->d_id.b24 = d_id.b24;
6448 
6449 				index = ql_alpa_to_index[d_id.b.al_pa];
6450 				ql_add_link_b(&ha->dev[index], &newq->device);
6451 
6452 				ADAPTER_STATE_UNLOCK(ha);
6453 			}
6454 
6455 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6456 
6457 		}
6458 
6459 		/*
6460 		 * Invalidate the loop ID for the
6461 		 * us to obtain a new one.
6462 		 */
6463 		tq->loop_id = PORT_NO_LOOP_ID;
6464 		break;
6465 
6466 	case QL_ALL_IDS_IN_USE:
6467 		rval = QL_FUNCTION_FAILED;
6468 		EL(ha, "no loop id's available\n");
6469 		break;
6470 
6471 	default:
6472 		if (rval == QL_COMMAND_ERROR) {
6473 			switch (mr.mb[1]) {
6474 			case 2:
6475 			case 3:
6476 				rval = QL_MEMORY_ALLOC_FAILED;
6477 				break;
6478 
6479 			case 4:
6480 				rval = QL_FUNCTION_TIMEOUT;
6481 				break;
6482 			case 7:
6483 				rval = QL_FABRIC_NOT_INITIALIZED;
6484 				break;
6485 			default:
6486 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6487 				break;
6488 			}
6489 		} else {
6490 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6491 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6492 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6493 		}
6494 		break;
6495 	}
6496 
6497 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6498 	    rval != QL_LOOP_ID_USED) {
6499 		EL(ha, "failed=%xh\n", rval);
6500 	} else {
6501 		/*EMPTY*/
6502 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6503 	}
6504 	return (rval);
6505 }
6506 
6507 /*
6508  * ql_logout_port
6509  *	Logs out a device if possible.
6510  *
6511  * Input:
6512  *	ha:	adapter state pointer.
6513  *	d_id:	24 bit port ID.
6514  *
6515  * Returns:
6516  *	QL local function return status code.
6517  *
6518  * Context:
6519  *	Kernel context.
6520  */
6521 static int
6522 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6523 {
6524 	ql_link_t	*link;
6525 	ql_tgt_t	*tq;
6526 	uint16_t	index;
6527 
6528 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6529 
6530 	/* Get head queue index. */
6531 	index = ql_alpa_to_index[d_id.b.al_pa];
6532 
6533 	/* Get device queue. */
6534 	tq = NULL;
6535 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6536 		tq = link->base_address;
6537 		if (tq->d_id.b24 == d_id.b24) {
6538 			break;
6539 		} else {
6540 			tq = NULL;
6541 		}
6542 	}
6543 
6544 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6545 		(void) ql_logout_fabric_port(ha, tq);
6546 		tq->loop_id = PORT_NO_LOOP_ID;
6547 	}
6548 
6549 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6550 
6551 	return (QL_SUCCESS);
6552 }
6553 
6554 /*
6555  * ql_dev_init
6556  *	Initialize/allocate device queue.
6557  *
6558  * Input:
6559  *	ha:		adapter state pointer.
6560  *	d_id:		device destination ID
6561  *	loop_id:	device loop ID
6562  *	ADAPTER_STATE_LOCK must be already obtained.
6563  *
6564  * Returns:
6565  *	NULL = failure
6566  *
6567  * Context:
6568  *	Kernel context.
6569  */
6570 ql_tgt_t *
6571 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6572 {
6573 	ql_link_t	*link;
6574 	uint16_t	index;
6575 	ql_tgt_t	*tq;
6576 
6577 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6578 	    ha->instance, d_id.b24, loop_id);
6579 
6580 	index = ql_alpa_to_index[d_id.b.al_pa];
6581 
6582 	/* If device queue exists, set proper loop ID. */
6583 	tq = NULL;
6584 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6585 		tq = link->base_address;
6586 		if (tq->d_id.b24 == d_id.b24) {
6587 			tq->loop_id = loop_id;
6588 
6589 			/* Reset port down retry count. */
6590 			tq->port_down_retry_count = ha->port_down_retry_count;
6591 			tq->qfull_retry_count = ha->qfull_retry_count;
6592 
6593 			break;
6594 		} else {
6595 			tq = NULL;
6596 		}
6597 	}
6598 
6599 	/* If device does not have queue. */
6600 	if (tq == NULL) {
6601 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6602 		if (tq != NULL) {
6603 			/*
6604 			 * mutex to protect the device queue,
6605 			 * does not block interrupts.
6606 			 */
6607 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6608 			    (ha->iflags & IFLG_INTR_AIF) ?
6609 			    (void *)(uintptr_t)ha->intr_pri :
6610 			    (void *)(uintptr_t)ha->iblock_cookie);
6611 
6612 			tq->d_id.b24 = d_id.b24;
6613 			tq->loop_id = loop_id;
6614 			tq->device.base_address = tq;
6615 			tq->iidma_rate = IIDMA_RATE_INIT;
6616 
6617 			/* Reset port down retry count. */
6618 			tq->port_down_retry_count = ha->port_down_retry_count;
6619 			tq->qfull_retry_count = ha->qfull_retry_count;
6620 
6621 			/* Add device to device queue. */
6622 			ql_add_link_b(&ha->dev[index], &tq->device);
6623 		}
6624 	}
6625 
6626 	if (tq == NULL) {
6627 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6628 	} else {
6629 		/*EMPTY*/
6630 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6631 	}
6632 	return (tq);
6633 }
6634 
6635 /*
6636  * ql_dev_free
6637  *	Remove queue from device list and frees resources used by queue.
6638  *
6639  * Input:
6640  *	ha:	adapter state pointer.
6641  *	tq:	target queue pointer.
6642  *	ADAPTER_STATE_LOCK must be already obtained.
6643  *
6644  * Context:
6645  *	Kernel context.
6646  */
6647 void
6648 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6649 {
6650 	ql_link_t	*link;
6651 	uint16_t	index;
6652 	ql_lun_t	*lq;
6653 
6654 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6655 
6656 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6657 		lq = link->base_address;
6658 		if (lq->cmd.first != NULL) {
6659 			return;
6660 		}
6661 	}
6662 
6663 	if (tq->outcnt == 0) {
6664 		/* Get head queue index. */
6665 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6666 		for (link = ha->dev[index].first; link != NULL;
6667 		    link = link->next) {
6668 			if (link->base_address == tq) {
6669 				ql_remove_link(&ha->dev[index], link);
6670 
6671 				link = tq->lun_queues.first;
6672 				while (link != NULL) {
6673 					lq = link->base_address;
6674 					link = link->next;
6675 
6676 					ql_remove_link(&tq->lun_queues,
6677 					    &lq->link);
6678 					kmem_free(lq, sizeof (ql_lun_t));
6679 				}
6680 
6681 				mutex_destroy(&tq->mutex);
6682 				kmem_free(tq, sizeof (ql_tgt_t));
6683 				break;
6684 			}
6685 		}
6686 	}
6687 
6688 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6689 }
6690 
6691 /*
6692  * ql_lun_queue
6693  *	Allocate LUN queue if does not exists.
6694  *
6695  * Input:
6696  *	ha:	adapter state pointer.
6697  *	tq:	target queue.
6698  *	lun:	LUN number.
6699  *
6700  * Returns:
6701  *	NULL = failure
6702  *
6703  * Context:
6704  *	Kernel context.
6705  */
6706 static ql_lun_t *
6707 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6708 {
6709 	ql_lun_t	*lq;
6710 	ql_link_t	*link;
6711 
6712 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6713 
6714 	/* Fast path. */
6715 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6716 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6717 		return (tq->last_lun_queue);
6718 	}
6719 
6720 	if (lun >= MAX_LUNS) {
6721 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6722 		return (NULL);
6723 	}
6724 	/* If device queue exists, set proper loop ID. */
6725 	lq = NULL;
6726 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6727 		lq = link->base_address;
6728 		if (lq->lun_no == lun) {
6729 			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6730 			tq->last_lun_queue = lq;
6731 			return (lq);
6732 		}
6733 	}
6734 
6735 	/* If queue does exist. */
6736 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6737 
6738 	/* Initialize LUN queue. */
6739 	if (lq != NULL) {
6740 		lq->link.base_address = lq;
6741 
6742 		lq->lun_no = lun;
6743 		lq->target_queue = tq;
6744 
6745 		DEVICE_QUEUE_LOCK(tq);
6746 		ql_add_link_b(&tq->lun_queues, &lq->link);
6747 		DEVICE_QUEUE_UNLOCK(tq);
6748 		tq->last_lun_queue = lq;
6749 	}
6750 
6751 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6752 
6753 	return (lq);
6754 }
6755 
6756 /*
6757  * ql_fcp_scsi_cmd
6758  *	Process fibre channel (FCP) SCSI protocol commands.
6759  *
6760  * Input:
6761  *	ha = adapter state pointer.
6762  *	pkt = pointer to fc_packet.
6763  *	sp = srb pointer.
6764  *
6765  * Returns:
6766  *	FC_SUCCESS - the packet was accepted for transport.
6767  *	FC_TRANSPORT_ERROR - a transport error occurred.
6768  *
6769  * Context:
6770  *	Kernel context.
6771  */
6772 static int
6773 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6774 {
6775 	port_id_t	d_id;
6776 	ql_tgt_t	*tq;
6777 	uint64_t	*ptr;
6778 	uint16_t	lun;
6779 
6780 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6781 
6782 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6783 	if (tq == NULL) {
6784 		d_id.r.rsvd_1 = 0;
6785 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6786 		tq = ql_d_id_to_queue(ha, d_id);
6787 	}
6788 
6789 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6790 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6791 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6792 
6793 	if (tq != NULL &&
6794 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6795 
6796 		/*
6797 		 * zero out FCP response; 24 Bytes
6798 		 */
6799 		ptr = (uint64_t *)pkt->pkt_resp;
6800 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6801 
6802 		/* Handle task management function. */
6803 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6804 		    sp->fcp->fcp_cntl.cntl_clr_aca |
6805 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6806 		    sp->fcp->fcp_cntl.cntl_reset_lun |
6807 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6808 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6809 			ql_task_mgmt(ha, tq, pkt, sp);
6810 		} else {
6811 			ha->pha->xioctl->IosRequested++;
6812 			ha->pha->xioctl->BytesRequested += (uint32_t)
6813 			    sp->fcp->fcp_data_len;
6814 
6815 			/*
6816 			 * Setup for commands with data transfer
6817 			 */
6818 			sp->iocb = ha->fcp_cmd;
6819 			if (sp->fcp->fcp_data_len != 0) {
6820 				/*
6821 				 * FCP data is bound to pkt_data_dma
6822 				 */
6823 				if (sp->fcp->fcp_cntl.cntl_write_data) {
6824 					(void) ddi_dma_sync(pkt->pkt_data_dma,
6825 					    0, 0, DDI_DMA_SYNC_FORDEV);
6826 				}
6827 
6828 				/* Setup IOCB count. */
6829 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6830 					uint32_t	cnt;
6831 
6832 					cnt = pkt->pkt_data_cookie_cnt -
6833 					    ha->cmd_segs;
6834 					sp->req_cnt = (uint16_t)
6835 					    (cnt / ha->cmd_cont_segs);
6836 					if (cnt % ha->cmd_cont_segs) {
6837 						sp->req_cnt = (uint16_t)
6838 						    (sp->req_cnt + 2);
6839 					} else {
6840 						sp->req_cnt++;
6841 					}
6842 				} else {
6843 					sp->req_cnt = 1;
6844 				}
6845 			} else {
6846 				sp->req_cnt = 1;
6847 			}
6848 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6849 
6850 			return (ql_start_cmd(ha, tq, pkt, sp));
6851 		}
6852 	} else {
6853 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6854 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6855 
6856 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6857 			ql_awaken_task_daemon(ha, sp, 0, 0);
6858 	}
6859 
6860 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6861 
6862 	return (FC_SUCCESS);
6863 }
6864 
6865 /*
6866  * ql_task_mgmt
6867  *	Task management function processor.
6868  *
6869  * Input:
6870  *	ha:	adapter state pointer.
6871  *	tq:	target queue pointer.
6872  *	pkt:	pointer to fc_packet.
6873  *	sp:	SRB pointer.
6874  *
6875  * Context:
6876  *	Kernel context.
6877  */
6878 static void
6879 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6880     ql_srb_t *sp)
6881 {
6882 	fcp_rsp_t		*fcpr;
6883 	struct fcp_rsp_info	*rsp;
6884 	uint16_t		lun;
6885 
6886 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6887 
6888 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6889 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6890 
6891 	bzero(fcpr, pkt->pkt_rsplen);
6892 
6893 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6894 	fcpr->fcp_response_len = 8;
6895 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6896 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6897 
6898 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6899 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6900 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6901 		}
6902 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6903 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6904 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6905 		}
6906 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6907 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6908 		    QL_SUCCESS) {
6909 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6910 		}
6911 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6912 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6913 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6914 		}
6915 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6916 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6917 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6918 		}
6919 	} else {
6920 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6921 	}
6922 
6923 	pkt->pkt_state = FC_PKT_SUCCESS;
6924 
6925 	/* Do command callback. */
6926 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6927 		ql_awaken_task_daemon(ha, sp, 0, 0);
6928 	}
6929 
6930 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6931 }
6932 
6933 /*
6934  * ql_fcp_ip_cmd
6935  *	Process fibre channel (FCP) Internet (IP) protocols commands.
6936  *
6937  * Input:
6938  *	ha:	adapter state pointer.
6939  *	pkt:	pointer to fc_packet.
6940  *	sp:	SRB pointer.
6941  *
6942  * Returns:
6943  *	FC_SUCCESS - the packet was accepted for transport.
6944  *	FC_TRANSPORT_ERROR - a transport error occurred.
6945  *
6946  * Context:
6947  *	Kernel context.
6948  */
6949 static int
6950 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6951 {
6952 	port_id_t	d_id;
6953 	ql_tgt_t	*tq;
6954 
6955 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6956 
6957 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6958 	if (tq == NULL) {
6959 		d_id.r.rsvd_1 = 0;
6960 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6961 		tq = ql_d_id_to_queue(ha, d_id);
6962 	}
6963 
6964 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
6965 		/*
6966 		 * IP data is bound to pkt_cmd_dma
6967 		 */
6968 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
6969 		    0, 0, DDI_DMA_SYNC_FORDEV);
6970 
6971 		/* Setup IOCB count. */
6972 		sp->iocb = ha->ip_cmd;
6973 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
6974 			uint32_t	cnt;
6975 
6976 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
6977 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
6978 			if (cnt % ha->cmd_cont_segs) {
6979 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6980 			} else {
6981 				sp->req_cnt++;
6982 			}
6983 		} else {
6984 			sp->req_cnt = 1;
6985 		}
6986 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6987 
6988 		return (ql_start_cmd(ha, tq, pkt, sp));
6989 	} else {
6990 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6991 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6992 
6993 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6994 			ql_awaken_task_daemon(ha, sp, 0, 0);
6995 	}
6996 
6997 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6998 
6999 	return (FC_SUCCESS);
7000 }
7001 
7002 /*
7003  * ql_fc_services
7004  *	Process fibre channel services (name server).
7005  *
7006  * Input:
7007  *	ha:	adapter state pointer.
7008  *	pkt:	pointer to fc_packet.
7009  *
7010  * Returns:
7011  *	FC_SUCCESS - the packet was accepted for transport.
7012  *	FC_TRANSPORT_ERROR - a transport error occurred.
7013  *
7014  * Context:
7015  *	Kernel context.
7016  */
7017 static int
7018 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7019 {
7020 	uint32_t	cnt;
7021 	fc_ct_header_t	hdr;
7022 	la_els_rjt_t	rjt;
7023 	port_id_t	d_id;
7024 	ql_tgt_t	*tq;
7025 	ql_srb_t	*sp;
7026 	int		rval;
7027 
7028 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7029 
7030 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7031 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7032 
7033 	bzero(&rjt, sizeof (rjt));
7034 
7035 	/* Do some sanity checks */
7036 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7037 	    sizeof (fc_ct_header_t));
7038 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7039 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7040 		    pkt->pkt_rsplen);
7041 		return (FC_ELS_MALFORMED);
7042 	}
7043 
7044 	switch (hdr.ct_fcstype) {
7045 	case FCSTYPE_DIRECTORY:
7046 	case FCSTYPE_MGMTSERVICE:
7047 		/* An FCA must make sure that the header is in big endian */
7048 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7049 
7050 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7051 		tq = ql_d_id_to_queue(ha, d_id);
7052 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7053 		if (tq == NULL ||
7054 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7055 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7056 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7057 			rval = QL_SUCCESS;
7058 			break;
7059 		}
7060 
7061 		/*
7062 		 * Services data is bound to pkt_cmd_dma
7063 		 */
7064 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7065 		    DDI_DMA_SYNC_FORDEV);
7066 
7067 		sp->flags |= SRB_MS_PKT;
7068 		sp->retry_count = 32;
7069 
7070 		/* Setup IOCB count. */
7071 		sp->iocb = ha->ms_cmd;
7072 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7073 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7074 			sp->req_cnt =
7075 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7076 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7077 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7078 			} else {
7079 				sp->req_cnt++;
7080 			}
7081 		} else {
7082 			sp->req_cnt = 1;
7083 		}
7084 		rval = ql_start_cmd(ha, tq, pkt, sp);
7085 
7086 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7087 		    ha->instance, rval);
7088 
7089 		return (rval);
7090 
7091 	default:
7092 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7093 		rval = QL_FUNCTION_PARAMETER_ERROR;
7094 		break;
7095 	}
7096 
7097 	if (rval != QL_SUCCESS) {
7098 		/* Build RJT. */
7099 		rjt.ls_code.ls_code = LA_ELS_RJT;
7100 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7101 
7102 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7103 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7104 
7105 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7106 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7107 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7108 	}
7109 
7110 	/* Do command callback. */
7111 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7112 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7113 		    0, 0);
7114 	}
7115 
7116 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7117 
7118 	return (FC_SUCCESS);
7119 }
7120 
7121 /*
7122  * ql_cthdr_endian
7123  *	Change endianess of ct passthrough header and payload.
7124  *
7125  * Input:
7126  *	acc_handle:	DMA buffer access handle.
7127  *	ct_hdr:		Pointer to header.
7128  *	restore:	Restore first flag.
7129  *
7130  * Context:
7131  *	Interrupt or Kernel context, no mailbox commands allowed.
7132  */
7133 void
7134 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7135     boolean_t restore)
7136 {
7137 	uint8_t		i, *bp;
7138 	fc_ct_header_t	hdr;
7139 	uint32_t	*hdrp = (uint32_t *)&hdr;
7140 
7141 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7142 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7143 
7144 	if (restore) {
7145 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7146 			*hdrp = BE_32(*hdrp);
7147 			hdrp++;
7148 		}
7149 	}
7150 
7151 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7152 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7153 
7154 		switch (hdr.ct_cmdrsp) {
7155 		case NS_GA_NXT:
7156 		case NS_GPN_ID:
7157 		case NS_GNN_ID:
7158 		case NS_GCS_ID:
7159 		case NS_GFT_ID:
7160 		case NS_GSPN_ID:
7161 		case NS_GPT_ID:
7162 		case NS_GID_FT:
7163 		case NS_GID_PT:
7164 		case NS_RPN_ID:
7165 		case NS_RNN_ID:
7166 		case NS_RSPN_ID:
7167 		case NS_DA_ID:
7168 			BIG_ENDIAN_32(bp);
7169 			break;
7170 		case NS_RFT_ID:
7171 		case NS_RCS_ID:
7172 		case NS_RPT_ID:
7173 			BIG_ENDIAN_32(bp);
7174 			bp += 4;
7175 			BIG_ENDIAN_32(bp);
7176 			break;
7177 		case NS_GNN_IP:
7178 		case NS_GIPA_IP:
7179 			BIG_ENDIAN(bp, 16);
7180 			break;
7181 		case NS_RIP_NN:
7182 			bp += 8;
7183 			BIG_ENDIAN(bp, 16);
7184 			break;
7185 		case NS_RIPA_NN:
7186 			bp += 8;
7187 			BIG_ENDIAN_64(bp);
7188 			break;
7189 		default:
7190 			break;
7191 		}
7192 	}
7193 
7194 	if (restore == B_FALSE) {
7195 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7196 			*hdrp = BE_32(*hdrp);
7197 			hdrp++;
7198 		}
7199 	}
7200 
7201 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7202 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7203 }
7204 
7205 /*
7206  * ql_start_cmd
7207  *	Finishes starting fibre channel protocol (FCP) command.
7208  *
7209  * Input:
7210  *	ha:	adapter state pointer.
7211  *	tq:	target queue pointer.
7212  *	pkt:	pointer to fc_packet.
7213  *	sp:	SRB pointer.
7214  *
7215  * Context:
7216  *	Kernel context.
7217  */
7218 static int
7219 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7220     ql_srb_t *sp)
7221 {
7222 	int		rval = FC_SUCCESS;
7223 	time_t		poll_wait = 0;
7224 	ql_lun_t	*lq = sp->lun_queue;
7225 
7226 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7227 
7228 	sp->handle = 0;
7229 
7230 	/* Set poll for finish. */
7231 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7232 		sp->flags |= SRB_POLL;
7233 		if (pkt->pkt_timeout == 0) {
7234 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7235 		}
7236 	}
7237 
7238 	/* Acquire device queue lock. */
7239 	DEVICE_QUEUE_LOCK(tq);
7240 
7241 	/*
7242 	 * If we need authentication, report device busy to
7243 	 * upper layers to retry later
7244 	 */
7245 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7246 		DEVICE_QUEUE_UNLOCK(tq);
7247 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7248 		    tq->d_id.b24);
7249 		return (FC_DEVICE_BUSY);
7250 	}
7251 
7252 	/* Insert command onto watchdog queue. */
7253 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7254 		ql_timeout_insert(ha, tq, sp);
7255 	} else {
7256 		/*
7257 		 * Run dump requests in polled mode as kernel threads
7258 		 * and interrupts may have been disabled.
7259 		 */
7260 		sp->flags |= SRB_POLL;
7261 		sp->init_wdg_q_time = 0;
7262 		sp->isp_timeout = 0;
7263 	}
7264 
7265 	/* If a polling command setup wait time. */
7266 	if (sp->flags & SRB_POLL) {
7267 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7268 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7269 		} else {
7270 			poll_wait = pkt->pkt_timeout;
7271 		}
7272 	}
7273 
7274 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7275 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7276 		/* Set ending status. */
7277 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7278 
7279 		/* Call done routine to handle completions. */
7280 		sp->cmd.next = NULL;
7281 		DEVICE_QUEUE_UNLOCK(tq);
7282 		ql_done(&sp->cmd);
7283 	} else {
7284 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7285 			int do_lip = 0;
7286 
7287 			DEVICE_QUEUE_UNLOCK(tq);
7288 
7289 			ADAPTER_STATE_LOCK(ha);
7290 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7291 				ha->pha->lip_on_panic++;
7292 			}
7293 			ADAPTER_STATE_UNLOCK(ha);
7294 
7295 			if (!do_lip) {
7296 
7297 				/*
7298 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7299 				 * is helpful here. If a PLOGI fails for some
7300 				 * reason, you would get CS_PORT_LOGGED_OUT
7301 				 * or some such error; and we should get a
7302 				 * careful polled mode login kicked off inside
7303 				 * of this driver itself. You don't have FC
7304 				 * transport's services as all threads are
7305 				 * suspended, interrupts disabled, and so
7306 				 * on. Right now we do re-login if the packet
7307 				 * state isn't FC_PKT_SUCCESS.
7308 				 */
7309 				(void) ql_abort_isp(ha);
7310 			}
7311 
7312 			ql_start_iocb(ha, sp);
7313 		} else {
7314 			/* Add the command to the device queue */
7315 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7316 				ql_add_link_t(&lq->cmd, &sp->cmd);
7317 			} else {
7318 				ql_add_link_b(&lq->cmd, &sp->cmd);
7319 			}
7320 
7321 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7322 
7323 			/* Check whether next message can be processed */
7324 			ql_next(ha, lq);
7325 		}
7326 	}
7327 
7328 	/* If polling, wait for finish. */
7329 	if (poll_wait) {
7330 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7331 			int	res;
7332 
7333 			res = ql_abort((opaque_t)ha, pkt, 0);
7334 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7335 				DEVICE_QUEUE_LOCK(tq);
7336 				ql_remove_link(&lq->cmd, &sp->cmd);
7337 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7338 				DEVICE_QUEUE_UNLOCK(tq);
7339 			}
7340 		}
7341 
7342 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7343 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7344 			rval = FC_TRANSPORT_ERROR;
7345 		}
7346 
7347 		if (ddi_in_panic()) {
7348 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7349 				port_id_t d_id;
7350 
7351 				/*
7352 				 * successful LOGIN implies by design
7353 				 * that PRLI also succeeded for disks
7354 				 * Note also that there is no special
7355 				 * mailbox command to send PRLI.
7356 				 */
7357 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7358 				(void) ql_login_port(ha, d_id);
7359 			}
7360 		}
7361 
7362 		/*
7363 		 * This should only happen during CPR dumping
7364 		 */
7365 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7366 		    pkt->pkt_comp) {
7367 			sp->flags &= ~SRB_POLL;
7368 			(*pkt->pkt_comp)(pkt);
7369 		}
7370 	}
7371 
7372 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7373 
7374 	return (rval);
7375 }
7376 
7377 /*
7378  * ql_poll_cmd
7379  *	Polls commands for completion.
7380  *
7381  * Input:
7382  *	ha = adapter state pointer.
7383  *	sp = SRB command pointer.
7384  *	poll_wait = poll wait time in seconds.
7385  *
7386  * Returns:
7387  *	QL local function return status code.
7388  *
7389  * Context:
7390  *	Kernel context.
7391  */
7392 static int
7393 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7394 {
7395 	int			rval = QL_SUCCESS;
7396 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7397 	ql_adapter_state_t	*ha = vha->pha;
7398 
7399 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7400 
7401 	while (sp->flags & SRB_POLL) {
7402 
7403 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7404 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7405 
7406 			/* If waiting for restart, do it now. */
7407 			if (ha->port_retry_timer != 0) {
7408 				ADAPTER_STATE_LOCK(ha);
7409 				ha->port_retry_timer = 0;
7410 				ADAPTER_STATE_UNLOCK(ha);
7411 
7412 				TASK_DAEMON_LOCK(ha);
7413 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7414 				TASK_DAEMON_UNLOCK(ha);
7415 			}
7416 
7417 			if ((CFG_IST(ha, CFG_CTRL_242581) ?
7418 			    RD32_IO_REG(ha, istatus) :
7419 			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7420 				(void) ql_isr((caddr_t)ha);
7421 				INTR_LOCK(ha);
7422 				ha->intr_claimed = TRUE;
7423 				INTR_UNLOCK(ha);
7424 			}
7425 
7426 			/*
7427 			 * Call task thread function in case the
7428 			 * daemon is not running.
7429 			 */
7430 			TASK_DAEMON_LOCK(ha);
7431 
7432 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7433 			    QL_TASK_PENDING(ha)) {
7434 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7435 				ql_task_thread(ha);
7436 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7437 			}
7438 
7439 			TASK_DAEMON_UNLOCK(ha);
7440 		}
7441 
7442 		if (msecs_left < 10) {
7443 			rval = QL_FUNCTION_TIMEOUT;
7444 			break;
7445 		}
7446 
7447 		/*
7448 		 * Polling interval is 10 milli seconds; Increasing
7449 		 * the polling interval to seconds since disk IO
7450 		 * timeout values are ~60 seconds is tempting enough,
7451 		 * but CPR dump time increases, and so will the crash
7452 		 * dump time; Don't toy with the settings without due
7453 		 * consideration for all the scenarios that will be
7454 		 * impacted.
7455 		 */
7456 		ql_delay(ha, 10000);
7457 		msecs_left -= 10;
7458 	}
7459 
7460 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7461 
7462 	return (rval);
7463 }
7464 
7465 /*
7466  * ql_next
7467  *	Retrieve and process next job in the device queue.
7468  *
7469  * Input:
7470  *	ha:	adapter state pointer.
7471  *	lq:	LUN queue pointer.
7472  *	DEVICE_QUEUE_LOCK must be already obtained.
7473  *
7474  * Output:
7475  *	Releases DEVICE_QUEUE_LOCK upon exit.
7476  *
7477  * Context:
7478  *	Interrupt or Kernel context, no mailbox commands allowed.
7479  */
7480 void
7481 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7482 {
7483 	ql_srb_t		*sp;
7484 	ql_link_t		*link;
7485 	ql_tgt_t		*tq = lq->target_queue;
7486 	ql_adapter_state_t	*ha = vha->pha;
7487 
7488 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7489 
7490 	if (ddi_in_panic()) {
7491 		DEVICE_QUEUE_UNLOCK(tq);
7492 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7493 		    ha->instance);
7494 		return;
7495 	}
7496 
7497 	while ((link = lq->cmd.first) != NULL) {
7498 		sp = link->base_address;
7499 
7500 		/* Exit if can not start commands. */
7501 		if (DRIVER_SUSPENDED(ha) ||
7502 		    (ha->flags & ONLINE) == 0 ||
7503 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7504 		    sp->flags & SRB_ABORT ||
7505 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7506 		    TQF_QUEUE_SUSPENDED)) {
7507 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7508 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7509 			    ha->task_daemon_flags, tq->flags, sp->flags,
7510 			    ha->flags, tq->loop_id);
7511 			break;
7512 		}
7513 
7514 		/*
7515 		 * Find out the LUN number for untagged command use.
7516 		 * If there is an untagged command pending for the LUN,
7517 		 * we would not submit another untagged command
7518 		 * or if reached LUN execution throttle.
7519 		 */
7520 		if (sp->flags & SRB_FCP_CMD_PKT) {
7521 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7522 			    lq->lun_outcnt >= ha->execution_throttle) {
7523 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7524 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7525 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7526 				break;
7527 			}
7528 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7529 			    FCP_QTYPE_UNTAGGED) {
7530 				/*
7531 				 * Set the untagged-flag for the LUN
7532 				 * so that no more untagged commands
7533 				 * can be submitted for this LUN.
7534 				 */
7535 				lq->flags |= LQF_UNTAGGED_PENDING;
7536 			}
7537 
7538 			/* Count command as sent. */
7539 			lq->lun_outcnt++;
7540 		}
7541 
7542 		/* Remove srb from device queue. */
7543 		ql_remove_link(&lq->cmd, &sp->cmd);
7544 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7545 
7546 		tq->outcnt++;
7547 
7548 		ql_start_iocb(vha, sp);
7549 	}
7550 
7551 	/* Release device queue lock. */
7552 	DEVICE_QUEUE_UNLOCK(tq);
7553 
7554 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7555 }
7556 
7557 /*
7558  * ql_done
7559  *	Process completed commands.
7560  *
7561  * Input:
7562  *	link:	first command link in chain.
7563  *
7564  * Context:
7565  *	Interrupt or Kernel context, no mailbox commands allowed.
7566  */
7567 void
7568 ql_done(ql_link_t *link)
7569 {
7570 	ql_adapter_state_t	*ha;
7571 	ql_link_t		*next_link;
7572 	ql_srb_t		*sp;
7573 	ql_tgt_t		*tq;
7574 	ql_lun_t		*lq;
7575 
7576 	QL_PRINT_3(CE_CONT, "started\n");
7577 
7578 	for (; link != NULL; link = next_link) {
7579 		next_link = link->next;
7580 		sp = link->base_address;
7581 		ha = sp->ha;
7582 
7583 		if (sp->flags & SRB_UB_CALLBACK) {
7584 			QL_UB_LOCK(ha);
7585 			if (sp->flags & SRB_UB_IN_ISP) {
7586 				if (ha->ub_outcnt != 0) {
7587 					ha->ub_outcnt--;
7588 				}
7589 				QL_UB_UNLOCK(ha);
7590 				ql_isp_rcvbuf(ha);
7591 				QL_UB_LOCK(ha);
7592 			}
7593 			QL_UB_UNLOCK(ha);
7594 			ql_awaken_task_daemon(ha, sp, 0, 0);
7595 		} else {
7596 			/* Free outstanding command slot. */
7597 			if (sp->handle != 0) {
7598 				ha->outstanding_cmds[
7599 				    sp->handle & OSC_INDEX_MASK] = NULL;
7600 				sp->handle = 0;
7601 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7602 			}
7603 
7604 			/* Acquire device queue lock. */
7605 			lq = sp->lun_queue;
7606 			tq = lq->target_queue;
7607 			DEVICE_QUEUE_LOCK(tq);
7608 
7609 			/* Decrement outstanding commands on device. */
7610 			if (tq->outcnt != 0) {
7611 				tq->outcnt--;
7612 			}
7613 
7614 			if (sp->flags & SRB_FCP_CMD_PKT) {
7615 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7616 				    FCP_QTYPE_UNTAGGED) {
7617 					/*
7618 					 * Clear the flag for this LUN so that
7619 					 * untagged commands can be submitted
7620 					 * for it.
7621 					 */
7622 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7623 				}
7624 
7625 				if (lq->lun_outcnt != 0) {
7626 					lq->lun_outcnt--;
7627 				}
7628 			}
7629 
7630 			/* Reset port down retry count on good completion. */
7631 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7632 				tq->port_down_retry_count =
7633 				    ha->port_down_retry_count;
7634 				tq->qfull_retry_count = ha->qfull_retry_count;
7635 			}
7636 
7637 			/* Place request back on top of target command queue */
7638 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7639 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7640 			    sp->flags & SRB_RETRY &&
7641 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7642 			    sp->wdg_q_time > 1)) {
7643 				sp->flags &= ~(SRB_ISP_STARTED |
7644 				    SRB_ISP_COMPLETED | SRB_RETRY);
7645 
7646 				/* Reset watchdog timer */
7647 				sp->wdg_q_time = sp->init_wdg_q_time;
7648 
7649 				/* Issue marker command on reset status. */
7650 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7651 				    (sp->pkt->pkt_reason == CS_RESET ||
7652 				    (CFG_IST(ha, CFG_CTRL_242581) &&
7653 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7654 					(void) ql_marker(ha, tq->loop_id, 0,
7655 					    MK_SYNC_ID);
7656 				}
7657 
7658 				ql_add_link_t(&lq->cmd, &sp->cmd);
7659 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7660 				ql_next(ha, lq);
7661 			} else {
7662 				/* Remove command from watchdog queue. */
7663 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7664 					ql_remove_link(&tq->wdg, &sp->wdg);
7665 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7666 				}
7667 
7668 				if (lq->cmd.first != NULL) {
7669 					ql_next(ha, lq);
7670 				} else {
7671 					/* Release LU queue specific lock. */
7672 					DEVICE_QUEUE_UNLOCK(tq);
7673 					if (ha->pha->pending_cmds.first !=
7674 					    NULL) {
7675 						ql_start_iocb(ha, NULL);
7676 					}
7677 				}
7678 
7679 				/* Sync buffers if required.  */
7680 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7681 					(void) ddi_dma_sync(
7682 					    sp->pkt->pkt_resp_dma,
7683 					    0, 0, DDI_DMA_SYNC_FORCPU);
7684 				}
7685 
7686 				/* Map ISP completion codes. */
7687 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7688 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7689 				switch (sp->pkt->pkt_reason) {
7690 				case CS_COMPLETE:
7691 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7692 					break;
7693 				case CS_RESET:
7694 					/* Issue marker command. */
7695 					if (!(ha->task_daemon_flags &
7696 					    LOOP_DOWN)) {
7697 						(void) ql_marker(ha,
7698 						    tq->loop_id, 0,
7699 						    MK_SYNC_ID);
7700 					}
7701 					sp->pkt->pkt_state =
7702 					    FC_PKT_PORT_OFFLINE;
7703 					sp->pkt->pkt_reason =
7704 					    FC_REASON_ABORTED;
7705 					break;
7706 				case CS_RESOUCE_UNAVAILABLE:
7707 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7708 					sp->pkt->pkt_reason =
7709 					    FC_REASON_PKT_BUSY;
7710 					break;
7711 
7712 				case CS_TIMEOUT:
7713 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7714 					sp->pkt->pkt_reason =
7715 					    FC_REASON_HW_ERROR;
7716 					break;
7717 				case CS_DATA_OVERRUN:
7718 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7719 					sp->pkt->pkt_reason =
7720 					    FC_REASON_OVERRUN;
7721 					break;
7722 				case CS_PORT_UNAVAILABLE:
7723 				case CS_PORT_LOGGED_OUT:
7724 					sp->pkt->pkt_state =
7725 					    FC_PKT_PORT_OFFLINE;
7726 					sp->pkt->pkt_reason =
7727 					    FC_REASON_LOGIN_REQUIRED;
7728 					ql_send_logo(ha, tq, NULL);
7729 					break;
7730 				case CS_PORT_CONFIG_CHG:
7731 					sp->pkt->pkt_state =
7732 					    FC_PKT_PORT_OFFLINE;
7733 					sp->pkt->pkt_reason =
7734 					    FC_REASON_OFFLINE;
7735 					break;
7736 				case CS_QUEUE_FULL:
7737 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7738 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7739 					break;
7740 
7741 				case CS_ABORTED:
7742 					DEVICE_QUEUE_LOCK(tq);
7743 					if (tq->flags & (TQF_RSCN_RCVD |
7744 					    TQF_NEED_AUTHENTICATION)) {
7745 						sp->pkt->pkt_state =
7746 						    FC_PKT_PORT_OFFLINE;
7747 						sp->pkt->pkt_reason =
7748 						    FC_REASON_LOGIN_REQUIRED;
7749 					} else {
7750 						sp->pkt->pkt_state =
7751 						    FC_PKT_LOCAL_RJT;
7752 						sp->pkt->pkt_reason =
7753 						    FC_REASON_ABORTED;
7754 					}
7755 					DEVICE_QUEUE_UNLOCK(tq);
7756 					break;
7757 
7758 				case CS_TRANSPORT:
7759 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7760 					sp->pkt->pkt_reason =
7761 					    FC_PKT_TRAN_ERROR;
7762 					break;
7763 
7764 				case CS_DATA_UNDERRUN:
7765 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7766 					sp->pkt->pkt_reason =
7767 					    FC_REASON_UNDERRUN;
7768 					break;
7769 				case CS_DMA_ERROR:
7770 				case CS_BAD_PAYLOAD:
7771 				case CS_UNKNOWN:
7772 				case CS_CMD_FAILED:
7773 				default:
7774 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7775 					sp->pkt->pkt_reason =
7776 					    FC_REASON_HW_ERROR;
7777 					break;
7778 				}
7779 
7780 				/* Now call the pkt completion callback */
7781 				if (sp->flags & SRB_POLL) {
7782 					sp->flags &= ~SRB_POLL;
7783 				} else if (sp->pkt->pkt_comp) {
7784 					if (sp->pkt->pkt_tran_flags &
7785 					    FC_TRAN_IMMEDIATE_CB) {
7786 						(*sp->pkt->pkt_comp)(sp->pkt);
7787 					} else {
7788 						ql_awaken_task_daemon(ha, sp,
7789 						    0, 0);
7790 					}
7791 				}
7792 			}
7793 		}
7794 	}
7795 
7796 	QL_PRINT_3(CE_CONT, "done\n");
7797 }
7798 
7799 /*
7800  * ql_awaken_task_daemon
7801  *	Adds command completion callback to callback queue and/or
7802  *	awakens task daemon thread.
7803  *
7804  * Input:
7805  *	ha:		adapter state pointer.
7806  *	sp:		srb pointer.
7807  *	set_flags:	task daemon flags to set.
7808  *	reset_flags:	task daemon flags to reset.
7809  *
7810  * Context:
7811  *	Interrupt or Kernel context, no mailbox commands allowed.
7812  */
7813 void
7814 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7815     uint32_t set_flags, uint32_t reset_flags)
7816 {
7817 	ql_adapter_state_t	*ha = vha->pha;
7818 
7819 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7820 
7821 	/* Acquire task daemon lock. */
7822 	TASK_DAEMON_LOCK(ha);
7823 
7824 	if (set_flags & ISP_ABORT_NEEDED) {
7825 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7826 			set_flags &= ~ISP_ABORT_NEEDED;
7827 		}
7828 	}
7829 
7830 	ha->task_daemon_flags |= set_flags;
7831 	ha->task_daemon_flags &= ~reset_flags;
7832 
7833 	if (QL_DAEMON_SUSPENDED(ha)) {
7834 		if (sp != NULL) {
7835 			TASK_DAEMON_UNLOCK(ha);
7836 
7837 			/* Do callback. */
7838 			if (sp->flags & SRB_UB_CALLBACK) {
7839 				ql_unsol_callback(sp);
7840 			} else {
7841 				(*sp->pkt->pkt_comp)(sp->pkt);
7842 			}
7843 		} else {
7844 			if (!(curthread->t_flag & T_INTR_THREAD) &&
7845 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7846 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7847 				ql_task_thread(ha);
7848 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7849 			}
7850 
7851 			TASK_DAEMON_UNLOCK(ha);
7852 		}
7853 	} else {
7854 		if (sp != NULL) {
7855 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7856 		}
7857 
7858 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7859 			cv_broadcast(&ha->cv_task_daemon);
7860 		}
7861 		TASK_DAEMON_UNLOCK(ha);
7862 	}
7863 
7864 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7865 }
7866 
7867 /*
7868  * ql_task_daemon
7869  *	Thread that is awaken by the driver when a
7870  *	background needs to be done.
7871  *
7872  * Input:
7873  *	arg = adapter state pointer.
7874  *
7875  * Context:
7876  *	Kernel context.
7877  */
7878 static void
7879 ql_task_daemon(void *arg)
7880 {
7881 	ql_adapter_state_t	*ha = (void *)arg;
7882 
7883 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7884 
7885 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7886 	    "ql_task_daemon");
7887 
7888 	/* Acquire task daemon lock. */
7889 	TASK_DAEMON_LOCK(ha);
7890 
7891 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7892 
7893 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7894 		ql_task_thread(ha);
7895 
7896 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7897 
7898 		/*
7899 		 * Before we wait on the conditional variable, we
7900 		 * need to check if STOP_FLG is set for us to terminate
7901 		 */
7902 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7903 			break;
7904 		}
7905 
7906 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7907 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7908 
7909 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7910 
7911 		/* If killed, stop task daemon */
7912 		if (cv_wait_sig(&ha->cv_task_daemon,
7913 		    &ha->task_daemon_mutex) == 0) {
7914 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7915 		}
7916 
7917 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7918 
7919 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7920 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7921 
7922 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7923 	}
7924 
7925 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7926 	    TASK_DAEMON_ALIVE_FLG);
7927 
7928 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7929 	CALLB_CPR_EXIT(&ha->cprinfo);
7930 
7931 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7932 
7933 	thread_exit();
7934 }
7935 
7936 /*
7937  * ql_task_thread
7938  *	Thread run by daemon.
7939  *
7940  * Input:
7941  *	ha = adapter state pointer.
7942  *	TASK_DAEMON_LOCK must be acquired prior to call.
7943  *
7944  * Context:
7945  *	Kernel context.
7946  */
7947 static void
7948 ql_task_thread(ql_adapter_state_t *ha)
7949 {
7950 	int			loop_again, rval;
7951 	ql_srb_t		*sp;
7952 	ql_head_t		*head;
7953 	ql_link_t		*link;
7954 	caddr_t			msg;
7955 	ql_adapter_state_t	*vha;
7956 
7957 	do {
7958 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
7959 		    ha->instance, ha->task_daemon_flags);
7960 
7961 		loop_again = FALSE;
7962 
7963 		QL_PM_LOCK(ha);
7964 		if (ha->power_level != PM_LEVEL_D0) {
7965 			QL_PM_UNLOCK(ha);
7966 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
7967 			break;
7968 		}
7969 		QL_PM_UNLOCK(ha);
7970 
7971 		/* IDC acknowledge needed. */
7972 		if (ha->task_daemon_flags & IDC_ACK_NEEDED) {
7973 			ha->task_daemon_flags &= ~IDC_ACK_NEEDED;
7974 			ADAPTER_STATE_LOCK(ha);
7975 			switch (ha->idc_mb[2]) {
7976 			case IDC_OPC_DRV_START:
7977 				if (ha->idc_restart_mpi != 0) {
7978 					ha->idc_restart_mpi--;
7979 					if (ha->idc_restart_mpi == 0) {
7980 						ha->restart_mpi_timer = 0;
7981 						ha->task_daemon_flags &=
7982 						    ~TASK_DAEMON_STALLED_FLG;
7983 					}
7984 				}
7985 				if (ha->idc_flash_acc != 0) {
7986 					ha->idc_flash_acc--;
7987 					if (ha->idc_flash_acc == 0) {
7988 						ha->flash_acc_timer = 0;
7989 						GLOBAL_HW_LOCK();
7990 					}
7991 				}
7992 				break;
7993 			case IDC_OPC_FLASH_ACC:
7994 				ha->flash_acc_timer = 30;
7995 				if (ha->idc_flash_acc == 0) {
7996 					GLOBAL_HW_UNLOCK();
7997 				}
7998 				ha->idc_flash_acc++;
7999 				break;
8000 			case IDC_OPC_RESTART_MPI:
8001 				ha->restart_mpi_timer = 30;
8002 				ha->idc_restart_mpi++;
8003 				ha->task_daemon_flags |=
8004 				    TASK_DAEMON_STALLED_FLG;
8005 				break;
8006 			default:
8007 				EL(ha, "Unknown IDC opcode=%xh\n",
8008 				    ha->idc_mb[2]);
8009 				break;
8010 			}
8011 			ADAPTER_STATE_UNLOCK(ha);
8012 
8013 			if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
8014 				TASK_DAEMON_UNLOCK(ha);
8015 				rval = ql_idc_ack(ha);
8016 				if (rval != QL_SUCCESS) {
8017 					EL(ha, "idc_ack status=%xh\n", rval);
8018 				}
8019 				TASK_DAEMON_LOCK(ha);
8020 				loop_again = TRUE;
8021 			}
8022 		}
8023 
8024 		if (ha->flags & ADAPTER_SUSPENDED ||
8025 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
8026 		    DRIVER_STALL) ||
8027 		    (ha->flags & ONLINE) == 0) {
8028 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8029 			break;
8030 		}
8031 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8032 
8033 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8034 			TASK_DAEMON_UNLOCK(ha);
8035 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8036 			TASK_DAEMON_LOCK(ha);
8037 			loop_again = TRUE;
8038 		}
8039 
8040 		/* Idle Check. */
8041 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8042 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8043 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8044 				TASK_DAEMON_UNLOCK(ha);
8045 				ql_idle_check(ha);
8046 				TASK_DAEMON_LOCK(ha);
8047 				loop_again = TRUE;
8048 			}
8049 		}
8050 
8051 		/* Crystal+ port#0 bypass transition */
8052 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8053 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8054 			TASK_DAEMON_UNLOCK(ha);
8055 			(void) ql_initiate_lip(ha);
8056 			TASK_DAEMON_LOCK(ha);
8057 			loop_again = TRUE;
8058 		}
8059 
8060 		/* Abort queues needed. */
8061 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8062 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8063 			TASK_DAEMON_UNLOCK(ha);
8064 			ql_abort_queues(ha);
8065 			TASK_DAEMON_LOCK(ha);
8066 		}
8067 
8068 		/* Not suspended, awaken waiting routines. */
8069 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8070 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8071 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8072 			cv_broadcast(&ha->cv_dr_suspended);
8073 			loop_again = TRUE;
8074 		}
8075 
8076 		/* Handle RSCN changes. */
8077 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8078 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8079 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8080 				TASK_DAEMON_UNLOCK(ha);
8081 				(void) ql_handle_rscn_update(vha);
8082 				TASK_DAEMON_LOCK(ha);
8083 				loop_again = TRUE;
8084 			}
8085 		}
8086 
8087 		/* Handle state changes. */
8088 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8089 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8090 			    !(ha->task_daemon_flags &
8091 			    TASK_DAEMON_POWERING_DOWN)) {
8092 				/* Report state change. */
8093 				EL(vha, "state change = %xh\n", vha->state);
8094 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8095 
8096 				if (vha->task_daemon_flags &
8097 				    COMMAND_WAIT_NEEDED) {
8098 					vha->task_daemon_flags &=
8099 					    ~COMMAND_WAIT_NEEDED;
8100 					if (!(ha->task_daemon_flags &
8101 					    COMMAND_WAIT_ACTIVE)) {
8102 						ha->task_daemon_flags |=
8103 						    COMMAND_WAIT_ACTIVE;
8104 						TASK_DAEMON_UNLOCK(ha);
8105 						ql_cmd_wait(ha);
8106 						TASK_DAEMON_LOCK(ha);
8107 						ha->task_daemon_flags &=
8108 						    ~COMMAND_WAIT_ACTIVE;
8109 					}
8110 				}
8111 
8112 				msg = NULL;
8113 				if (FC_PORT_STATE_MASK(vha->state) ==
8114 				    FC_STATE_OFFLINE) {
8115 					if (vha->task_daemon_flags &
8116 					    STATE_ONLINE) {
8117 						if (ha->topology &
8118 						    QL_LOOP_CONNECTION) {
8119 							msg = "Loop OFFLINE";
8120 						} else {
8121 							msg = "Link OFFLINE";
8122 						}
8123 					}
8124 					vha->task_daemon_flags &=
8125 					    ~STATE_ONLINE;
8126 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8127 				    FC_STATE_LOOP) {
8128 					if (!(vha->task_daemon_flags &
8129 					    STATE_ONLINE)) {
8130 						msg = "Loop ONLINE";
8131 					}
8132 					vha->task_daemon_flags |= STATE_ONLINE;
8133 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8134 				    FC_STATE_ONLINE) {
8135 					if (!(vha->task_daemon_flags &
8136 					    STATE_ONLINE)) {
8137 						msg = "Link ONLINE";
8138 					}
8139 					vha->task_daemon_flags |= STATE_ONLINE;
8140 				} else {
8141 					msg = "Unknown Link state";
8142 				}
8143 
8144 				if (msg != NULL) {
8145 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8146 					    "%s", QL_NAME, ha->instance,
8147 					    vha->vp_index, msg);
8148 				}
8149 
8150 				if (vha->flags & FCA_BOUND) {
8151 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8152 					    "cb state=%xh\n", ha->instance,
8153 					    vha->vp_index, vha->state);
8154 					TASK_DAEMON_UNLOCK(ha);
8155 					(vha->bind_info.port_statec_cb)
8156 					    (vha->bind_info.port_handle,
8157 					    vha->state);
8158 					TASK_DAEMON_LOCK(ha);
8159 				}
8160 				loop_again = TRUE;
8161 			}
8162 		}
8163 
8164 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8165 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8166 			EL(ha, "processing LIP reset\n");
8167 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8168 			TASK_DAEMON_UNLOCK(ha);
8169 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8170 				if (vha->flags & FCA_BOUND) {
8171 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8172 					    "cb reset\n", ha->instance,
8173 					    vha->vp_index);
8174 					(vha->bind_info.port_statec_cb)
8175 					    (vha->bind_info.port_handle,
8176 					    FC_STATE_TARGET_PORT_RESET);
8177 				}
8178 			}
8179 			TASK_DAEMON_LOCK(ha);
8180 			loop_again = TRUE;
8181 		}
8182 
8183 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8184 		    FIRMWARE_UP)) {
8185 			/*
8186 			 * The firmware needs more unsolicited
8187 			 * buffers. We cannot allocate any new
8188 			 * buffers unless the ULP module requests
8189 			 * for new buffers. All we can do here is
8190 			 * to give received buffers from the pool
8191 			 * that is already allocated
8192 			 */
8193 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8194 			TASK_DAEMON_UNLOCK(ha);
8195 			ql_isp_rcvbuf(ha);
8196 			TASK_DAEMON_LOCK(ha);
8197 			loop_again = TRUE;
8198 		}
8199 
8200 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8201 			TASK_DAEMON_UNLOCK(ha);
8202 			(void) ql_abort_isp(ha);
8203 			TASK_DAEMON_LOCK(ha);
8204 			loop_again = TRUE;
8205 		}
8206 
8207 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8208 		    COMMAND_WAIT_NEEDED))) {
8209 			if (QL_IS_SET(ha->task_daemon_flags,
8210 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8211 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8212 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8213 					ha->task_daemon_flags |= RESET_ACTIVE;
8214 					TASK_DAEMON_UNLOCK(ha);
8215 					for (vha = ha; vha != NULL;
8216 					    vha = vha->vp_next) {
8217 						ql_rst_aen(vha);
8218 					}
8219 					TASK_DAEMON_LOCK(ha);
8220 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8221 					loop_again = TRUE;
8222 				}
8223 			}
8224 
8225 			if (QL_IS_SET(ha->task_daemon_flags,
8226 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8227 				if (!(ha->task_daemon_flags &
8228 				    LOOP_RESYNC_ACTIVE)) {
8229 					ha->task_daemon_flags |=
8230 					    LOOP_RESYNC_ACTIVE;
8231 					TASK_DAEMON_UNLOCK(ha);
8232 					(void) ql_loop_resync(ha);
8233 					TASK_DAEMON_LOCK(ha);
8234 					loop_again = TRUE;
8235 				}
8236 			}
8237 		}
8238 
8239 		/* Port retry needed. */
8240 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8241 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8242 			ADAPTER_STATE_LOCK(ha);
8243 			ha->port_retry_timer = 0;
8244 			ADAPTER_STATE_UNLOCK(ha);
8245 
8246 			TASK_DAEMON_UNLOCK(ha);
8247 			ql_restart_queues(ha);
8248 			TASK_DAEMON_LOCK(ha);
8249 			loop_again = B_TRUE;
8250 		}
8251 
8252 		/* iiDMA setting needed? */
8253 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8254 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8255 
8256 			TASK_DAEMON_UNLOCK(ha);
8257 			ql_iidma(ha);
8258 			TASK_DAEMON_LOCK(ha);
8259 			loop_again = B_TRUE;
8260 		}
8261 
8262 		if (ha->task_daemon_flags & SEND_PLOGI) {
8263 			ha->task_daemon_flags &= ~SEND_PLOGI;
8264 			TASK_DAEMON_UNLOCK(ha);
8265 			(void) ql_n_port_plogi(ha);
8266 			TASK_DAEMON_LOCK(ha);
8267 		}
8268 
8269 		head = &ha->callback_queue;
8270 		if (head->first != NULL) {
8271 			sp = head->first->base_address;
8272 			link = &sp->cmd;
8273 
8274 			/* Dequeue command. */
8275 			ql_remove_link(head, link);
8276 
8277 			/* Release task daemon lock. */
8278 			TASK_DAEMON_UNLOCK(ha);
8279 
8280 			/* Do callback. */
8281 			if (sp->flags & SRB_UB_CALLBACK) {
8282 				ql_unsol_callback(sp);
8283 			} else {
8284 				(*sp->pkt->pkt_comp)(sp->pkt);
8285 			}
8286 
8287 			/* Acquire task daemon lock. */
8288 			TASK_DAEMON_LOCK(ha);
8289 
8290 			loop_again = TRUE;
8291 		}
8292 
8293 	} while (loop_again);
8294 }
8295 
8296 /*
8297  * ql_idle_check
8298  *	Test for adapter is alive and well.
8299  *
8300  * Input:
8301  *	ha:	adapter state pointer.
8302  *
8303  * Context:
8304  *	Kernel context.
8305  */
8306 static void
8307 ql_idle_check(ql_adapter_state_t *ha)
8308 {
8309 	ddi_devstate_t	state;
8310 	int		rval;
8311 	ql_mbx_data_t	mr;
8312 
8313 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8314 
8315 	/* Firmware Ready Test. */
8316 	rval = ql_get_firmware_state(ha, &mr);
8317 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8318 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8319 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8320 		state = ddi_get_devstate(ha->dip);
8321 		if (state == DDI_DEVSTATE_UP) {
8322 			/*EMPTY*/
8323 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8324 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8325 		}
8326 		TASK_DAEMON_LOCK(ha);
8327 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8328 			EL(ha, "fstate_ready, isp_abort_needed\n");
8329 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8330 		}
8331 		TASK_DAEMON_UNLOCK(ha);
8332 	}
8333 
8334 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8335 }
8336 
8337 /*
8338  * ql_unsol_callback
8339  *	Handle unsolicited buffer callbacks.
8340  *
8341  * Input:
8342  *	ha = adapter state pointer.
8343  *	sp = srb pointer.
8344  *
8345  * Context:
8346  *	Kernel context.
8347  */
8348 static void
8349 ql_unsol_callback(ql_srb_t *sp)
8350 {
8351 	fc_affected_id_t	*af;
8352 	fc_unsol_buf_t		*ubp;
8353 	uchar_t			r_ctl;
8354 	uchar_t			ls_code;
8355 	ql_tgt_t		*tq;
8356 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8357 
8358 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8359 
8360 	ubp = ha->ub_array[sp->handle];
8361 	r_ctl = ubp->ub_frame.r_ctl;
8362 	ls_code = ubp->ub_buffer[0];
8363 
8364 	if (sp->lun_queue == NULL) {
8365 		tq = NULL;
8366 	} else {
8367 		tq = sp->lun_queue->target_queue;
8368 	}
8369 
8370 	QL_UB_LOCK(ha);
8371 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8372 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8373 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8374 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8375 		sp->flags |= SRB_UB_IN_FCA;
8376 		QL_UB_UNLOCK(ha);
8377 		return;
8378 	}
8379 
8380 	/* Process RSCN */
8381 	if (sp->flags & SRB_UB_RSCN) {
8382 		int sendup = 1;
8383 
8384 		/*
8385 		 * Defer RSCN posting until commands return
8386 		 */
8387 		QL_UB_UNLOCK(ha);
8388 
8389 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8390 
8391 		/* Abort outstanding commands */
8392 		sendup = ql_process_rscn(ha, af);
8393 		if (sendup == 0) {
8394 
8395 			TASK_DAEMON_LOCK(ha);
8396 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8397 			TASK_DAEMON_UNLOCK(ha);
8398 
8399 			/*
8400 			 * Wait for commands to drain in F/W (doesn't take
8401 			 * more than a few milliseconds)
8402 			 */
8403 			ql_delay(ha, 10000);
8404 
8405 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8406 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8407 			    af->aff_format, af->aff_d_id);
8408 			return;
8409 		}
8410 
8411 		QL_UB_LOCK(ha);
8412 
8413 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8414 		    af->aff_format, af->aff_d_id);
8415 	}
8416 
8417 	/* Process UNSOL LOGO */
8418 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8419 		QL_UB_UNLOCK(ha);
8420 
8421 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8422 			TASK_DAEMON_LOCK(ha);
8423 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8424 			TASK_DAEMON_UNLOCK(ha);
8425 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8426 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8427 			return;
8428 		}
8429 
8430 		QL_UB_LOCK(ha);
8431 		EL(ha, "sending unsol logout for %xh to transport\n",
8432 		    ubp->ub_frame.s_id);
8433 	}
8434 
8435 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8436 	    SRB_UB_FCP);
8437 
8438 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8439 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8440 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8441 	}
8442 	QL_UB_UNLOCK(ha);
8443 
8444 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8445 	    ubp, sp->ub_type);
8446 
8447 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8448 }
8449 
8450 /*
8451  * ql_send_logo
8452  *
8453  * Input:
8454  *	ha:	adapter state pointer.
8455  *	tq:	target queue pointer.
8456  *	done_q:	done queue pointer.
8457  *
8458  * Context:
8459  *	Interrupt or Kernel context, no mailbox commands allowed.
8460  */
8461 void
8462 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8463 {
8464 	fc_unsol_buf_t		*ubp;
8465 	ql_srb_t		*sp;
8466 	la_els_logo_t		*payload;
8467 	ql_adapter_state_t	*ha = vha->pha;
8468 
8469 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8470 	    tq->d_id.b24);
8471 
8472 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8473 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8474 		return;
8475 	}
8476 
8477 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8478 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8479 
8480 		/* Locate a buffer to use. */
8481 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8482 		if (ubp == NULL) {
8483 			EL(vha, "Failed, get_unsolicited_buffer\n");
8484 			return;
8485 		}
8486 
8487 		DEVICE_QUEUE_LOCK(tq);
8488 		tq->flags |= TQF_NEED_AUTHENTICATION;
8489 		tq->logout_sent++;
8490 		DEVICE_QUEUE_UNLOCK(tq);
8491 
8492 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8493 
8494 		sp = ubp->ub_fca_private;
8495 
8496 		/* Set header. */
8497 		ubp->ub_frame.d_id = vha->d_id.b24;
8498 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8499 		ubp->ub_frame.s_id = tq->d_id.b24;
8500 		ubp->ub_frame.rsvd = 0;
8501 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8502 		    F_CTL_SEQ_INITIATIVE;
8503 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8504 		ubp->ub_frame.seq_cnt = 0;
8505 		ubp->ub_frame.df_ctl = 0;
8506 		ubp->ub_frame.seq_id = 0;
8507 		ubp->ub_frame.rx_id = 0xffff;
8508 		ubp->ub_frame.ox_id = 0xffff;
8509 
8510 		/* set payload. */
8511 		payload = (la_els_logo_t *)ubp->ub_buffer;
8512 		bzero(payload, sizeof (la_els_logo_t));
8513 		/* Make sure ls_code in payload is always big endian */
8514 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8515 		ubp->ub_buffer[1] = 0;
8516 		ubp->ub_buffer[2] = 0;
8517 		ubp->ub_buffer[3] = 0;
8518 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8519 		    &payload->nport_ww_name.raw_wwn[0], 8);
8520 		payload->nport_id.port_id = tq->d_id.b24;
8521 
8522 		QL_UB_LOCK(ha);
8523 		sp->flags |= SRB_UB_CALLBACK;
8524 		QL_UB_UNLOCK(ha);
8525 		if (tq->lun_queues.first != NULL) {
8526 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8527 		} else {
8528 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8529 		}
8530 		if (done_q) {
8531 			ql_add_link_b(done_q, &sp->cmd);
8532 		} else {
8533 			ql_awaken_task_daemon(ha, sp, 0, 0);
8534 		}
8535 	}
8536 
8537 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8538 }
8539 
8540 static int
8541 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8542 {
8543 	port_id_t	d_id;
8544 	ql_srb_t	*sp;
8545 	ql_link_t	*link;
8546 	int		sendup = 1;
8547 
8548 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8549 
8550 	DEVICE_QUEUE_LOCK(tq);
8551 	if (tq->outcnt) {
8552 		DEVICE_QUEUE_UNLOCK(tq);
8553 		sendup = 0;
8554 		(void) ql_abort_device(ha, tq, 1);
8555 		ql_delay(ha, 10000);
8556 	} else {
8557 		DEVICE_QUEUE_UNLOCK(tq);
8558 		TASK_DAEMON_LOCK(ha);
8559 
8560 		for (link = ha->pha->callback_queue.first; link != NULL;
8561 		    link = link->next) {
8562 			sp = link->base_address;
8563 			if (sp->flags & SRB_UB_CALLBACK) {
8564 				continue;
8565 			}
8566 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8567 
8568 			if (tq->d_id.b24 == d_id.b24) {
8569 				sendup = 0;
8570 				break;
8571 			}
8572 		}
8573 
8574 		TASK_DAEMON_UNLOCK(ha);
8575 	}
8576 
8577 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8578 
8579 	return (sendup);
8580 }
8581 
8582 static int
8583 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8584 {
8585 	fc_unsol_buf_t		*ubp;
8586 	ql_srb_t		*sp;
8587 	la_els_logi_t		*payload;
8588 	class_svc_param_t	*class3_param;
8589 
8590 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8591 
8592 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8593 	    LOOP_DOWN)) {
8594 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8595 		return (QL_FUNCTION_FAILED);
8596 	}
8597 
8598 	/* Locate a buffer to use. */
8599 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8600 	if (ubp == NULL) {
8601 		EL(ha, "Failed\n");
8602 		return (QL_FUNCTION_FAILED);
8603 	}
8604 
8605 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8606 	    ha->instance, tq->d_id.b24);
8607 
8608 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8609 
8610 	sp = ubp->ub_fca_private;
8611 
8612 	/* Set header. */
8613 	ubp->ub_frame.d_id = ha->d_id.b24;
8614 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8615 	ubp->ub_frame.s_id = tq->d_id.b24;
8616 	ubp->ub_frame.rsvd = 0;
8617 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8618 	    F_CTL_SEQ_INITIATIVE;
8619 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8620 	ubp->ub_frame.seq_cnt = 0;
8621 	ubp->ub_frame.df_ctl = 0;
8622 	ubp->ub_frame.seq_id = 0;
8623 	ubp->ub_frame.rx_id = 0xffff;
8624 	ubp->ub_frame.ox_id = 0xffff;
8625 
8626 	/* set payload. */
8627 	payload = (la_els_logi_t *)ubp->ub_buffer;
8628 	bzero(payload, sizeof (payload));
8629 
8630 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8631 	payload->common_service.fcph_version = 0x2006;
8632 	payload->common_service.cmn_features = 0x8800;
8633 
8634 	CFG_IST(ha, CFG_CTRL_242581) ?
8635 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8636 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8637 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8638 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8639 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8640 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8641 
8642 	payload->common_service.conc_sequences = 0xff;
8643 	payload->common_service.relative_offset = 0x03;
8644 	payload->common_service.e_d_tov = 0x7d0;
8645 
8646 	bcopy((void *)&tq->port_name[0],
8647 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8648 
8649 	bcopy((void *)&tq->node_name[0],
8650 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8651 
8652 	class3_param = (class_svc_param_t *)&payload->class_3;
8653 	class3_param->class_valid_svc_opt = 0x8000;
8654 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8655 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8656 	class3_param->conc_sequences = tq->class3_conc_sequences;
8657 	class3_param->open_sequences_per_exch =
8658 	    tq->class3_open_sequences_per_exch;
8659 
8660 	QL_UB_LOCK(ha);
8661 	sp->flags |= SRB_UB_CALLBACK;
8662 	QL_UB_UNLOCK(ha);
8663 
8664 	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8665 
8666 	if (done_q) {
8667 		ql_add_link_b(done_q, &sp->cmd);
8668 	} else {
8669 		ql_awaken_task_daemon(ha, sp, 0, 0);
8670 	}
8671 
8672 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8673 
8674 	return (QL_SUCCESS);
8675 }
8676 
8677 /*
8678  * Abort outstanding commands in the Firmware, clear internally
8679  * queued commands in the driver, Synchronize the target with
8680  * the Firmware
8681  */
8682 int
8683 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8684 {
8685 	ql_link_t	*link, *link2;
8686 	ql_lun_t	*lq;
8687 	int		rval = QL_SUCCESS;
8688 	ql_srb_t	*sp;
8689 	ql_head_t	done_q = { NULL, NULL };
8690 
8691 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8692 
8693 	/*
8694 	 * First clear, internally queued commands
8695 	 */
8696 	DEVICE_QUEUE_LOCK(tq);
8697 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8698 		lq = link->base_address;
8699 
8700 		link2 = lq->cmd.first;
8701 		while (link2 != NULL) {
8702 			sp = link2->base_address;
8703 			link2 = link2->next;
8704 
8705 			if (sp->flags & SRB_ABORT) {
8706 				continue;
8707 			}
8708 
8709 			/* Remove srb from device command queue. */
8710 			ql_remove_link(&lq->cmd, &sp->cmd);
8711 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8712 
8713 			/* Set ending status. */
8714 			sp->pkt->pkt_reason = CS_ABORTED;
8715 
8716 			/* Call done routine to handle completions. */
8717 			ql_add_link_b(&done_q, &sp->cmd);
8718 		}
8719 	}
8720 	DEVICE_QUEUE_UNLOCK(tq);
8721 
8722 	if (done_q.first != NULL) {
8723 		ql_done(done_q.first);
8724 	}
8725 
8726 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8727 		rval = ql_abort_target(ha, tq, 0);
8728 	}
8729 
8730 	if (rval != QL_SUCCESS) {
8731 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8732 	} else {
8733 		/*EMPTY*/
8734 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8735 		    ha->vp_index);
8736 	}
8737 
8738 	return (rval);
8739 }
8740 
8741 /*
8742  * ql_rcv_rscn_els
8743  *	Processes received RSCN extended link service.
8744  *
8745  * Input:
8746  *	ha:	adapter state pointer.
8747  *	mb:	array containing input mailbox registers.
8748  *	done_q:	done queue pointer.
8749  *
8750  * Context:
8751  *	Interrupt or Kernel context, no mailbox commands allowed.
8752  */
8753 void
8754 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8755 {
8756 	fc_unsol_buf_t		*ubp;
8757 	ql_srb_t		*sp;
8758 	fc_rscn_t		*rn;
8759 	fc_affected_id_t	*af;
8760 	port_id_t		d_id;
8761 
8762 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8763 
8764 	/* Locate a buffer to use. */
8765 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8766 	if (ubp != NULL) {
8767 		sp = ubp->ub_fca_private;
8768 
8769 		/* Set header. */
8770 		ubp->ub_frame.d_id = ha->d_id.b24;
8771 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8772 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8773 		ubp->ub_frame.rsvd = 0;
8774 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8775 		    F_CTL_SEQ_INITIATIVE;
8776 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8777 		ubp->ub_frame.seq_cnt = 0;
8778 		ubp->ub_frame.df_ctl = 0;
8779 		ubp->ub_frame.seq_id = 0;
8780 		ubp->ub_frame.rx_id = 0xffff;
8781 		ubp->ub_frame.ox_id = 0xffff;
8782 
8783 		/* set payload. */
8784 		rn = (fc_rscn_t *)ubp->ub_buffer;
8785 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8786 
8787 		rn->rscn_code = LA_ELS_RSCN;
8788 		rn->rscn_len = 4;
8789 		rn->rscn_payload_len = 8;
8790 		d_id.b.al_pa = LSB(mb[2]);
8791 		d_id.b.area = MSB(mb[2]);
8792 		d_id.b.domain =	LSB(mb[1]);
8793 		af->aff_d_id = d_id.b24;
8794 		af->aff_format = MSB(mb[1]);
8795 
8796 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8797 		    af->aff_d_id);
8798 
8799 		ql_update_rscn(ha, af);
8800 
8801 		QL_UB_LOCK(ha);
8802 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8803 		QL_UB_UNLOCK(ha);
8804 		ql_add_link_b(done_q, &sp->cmd);
8805 	}
8806 
8807 	if (ubp == NULL) {
8808 		EL(ha, "Failed, get_unsolicited_buffer\n");
8809 	} else {
8810 		/*EMPTY*/
8811 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8812 	}
8813 }
8814 
8815 /*
8816  * ql_update_rscn
8817  *	Update devices from received RSCN.
8818  *
8819  * Input:
8820  *	ha:	adapter state pointer.
8821  *	af:	pointer to RSCN data.
8822  *
8823  * Context:
8824  *	Interrupt or Kernel context, no mailbox commands allowed.
8825  */
8826 static void
8827 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8828 {
8829 	ql_link_t	*link;
8830 	uint16_t	index;
8831 	ql_tgt_t	*tq;
8832 
8833 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8834 
8835 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8836 		port_id_t d_id;
8837 
8838 		d_id.r.rsvd_1 = 0;
8839 		d_id.b24 = af->aff_d_id;
8840 
8841 		tq = ql_d_id_to_queue(ha, d_id);
8842 		if (tq) {
8843 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8844 			DEVICE_QUEUE_LOCK(tq);
8845 			tq->flags |= TQF_RSCN_RCVD;
8846 			DEVICE_QUEUE_UNLOCK(tq);
8847 		}
8848 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8849 		    ha->instance);
8850 
8851 		return;
8852 	}
8853 
8854 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8855 		for (link = ha->dev[index].first; link != NULL;
8856 		    link = link->next) {
8857 			tq = link->base_address;
8858 
8859 			switch (af->aff_format) {
8860 			case FC_RSCN_FABRIC_ADDRESS:
8861 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8862 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8863 					    tq->d_id.b24);
8864 					DEVICE_QUEUE_LOCK(tq);
8865 					tq->flags |= TQF_RSCN_RCVD;
8866 					DEVICE_QUEUE_UNLOCK(tq);
8867 				}
8868 				break;
8869 
8870 			case FC_RSCN_AREA_ADDRESS:
8871 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8872 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8873 					    tq->d_id.b24);
8874 					DEVICE_QUEUE_LOCK(tq);
8875 					tq->flags |= TQF_RSCN_RCVD;
8876 					DEVICE_QUEUE_UNLOCK(tq);
8877 				}
8878 				break;
8879 
8880 			case FC_RSCN_DOMAIN_ADDRESS:
8881 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8882 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8883 					    tq->d_id.b24);
8884 					DEVICE_QUEUE_LOCK(tq);
8885 					tq->flags |= TQF_RSCN_RCVD;
8886 					DEVICE_QUEUE_UNLOCK(tq);
8887 				}
8888 				break;
8889 
8890 			default:
8891 				break;
8892 			}
8893 		}
8894 	}
8895 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8896 }
8897 
8898 /*
8899  * ql_process_rscn
8900  *
8901  * Input:
8902  *	ha:	adapter state pointer.
8903  *	af:	RSCN payload pointer.
8904  *
8905  * Context:
8906  *	Kernel context.
8907  */
8908 static int
8909 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8910 {
8911 	int		sendit;
8912 	int		sendup = 1;
8913 	ql_link_t	*link;
8914 	uint16_t	index;
8915 	ql_tgt_t	*tq;
8916 
8917 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8918 
8919 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8920 		port_id_t d_id;
8921 
8922 		d_id.r.rsvd_1 = 0;
8923 		d_id.b24 = af->aff_d_id;
8924 
8925 		tq = ql_d_id_to_queue(ha, d_id);
8926 		if (tq) {
8927 			sendup = ql_process_rscn_for_device(ha, tq);
8928 		}
8929 
8930 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8931 
8932 		return (sendup);
8933 	}
8934 
8935 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8936 		for (link = ha->dev[index].first; link != NULL;
8937 		    link = link->next) {
8938 
8939 			tq = link->base_address;
8940 			if (tq == NULL) {
8941 				continue;
8942 			}
8943 
8944 			switch (af->aff_format) {
8945 			case FC_RSCN_FABRIC_ADDRESS:
8946 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8947 					sendit = ql_process_rscn_for_device(
8948 					    ha, tq);
8949 					if (sendup) {
8950 						sendup = sendit;
8951 					}
8952 				}
8953 				break;
8954 
8955 			case FC_RSCN_AREA_ADDRESS:
8956 				if ((tq->d_id.b24 & 0xffff00) ==
8957 				    af->aff_d_id) {
8958 					sendit = ql_process_rscn_for_device(
8959 					    ha, tq);
8960 
8961 					if (sendup) {
8962 						sendup = sendit;
8963 					}
8964 				}
8965 				break;
8966 
8967 			case FC_RSCN_DOMAIN_ADDRESS:
8968 				if ((tq->d_id.b24 & 0xff0000) ==
8969 				    af->aff_d_id) {
8970 					sendit = ql_process_rscn_for_device(
8971 					    ha, tq);
8972 
8973 					if (sendup) {
8974 						sendup = sendit;
8975 					}
8976 				}
8977 				break;
8978 
8979 			default:
8980 				break;
8981 			}
8982 		}
8983 	}
8984 
8985 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8986 
8987 	return (sendup);
8988 }
8989 
8990 /*
8991  * ql_process_rscn_for_device
8992  *
8993  * Input:
8994  *	ha:	adapter state pointer.
8995  *	tq:	target queue pointer.
8996  *
8997  * Context:
8998  *	Kernel context.
8999  */
9000 static int
9001 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9002 {
9003 	int sendup = 1;
9004 
9005 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9006 
9007 	DEVICE_QUEUE_LOCK(tq);
9008 
9009 	/*
9010 	 * Let FCP-2 compliant devices continue I/Os
9011 	 * with their low level recoveries.
9012 	 */
9013 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9014 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9015 		/*
9016 		 * Cause ADISC to go out
9017 		 */
9018 		DEVICE_QUEUE_UNLOCK(tq);
9019 
9020 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9021 
9022 		DEVICE_QUEUE_LOCK(tq);
9023 		tq->flags &= ~TQF_RSCN_RCVD;
9024 
9025 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9026 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9027 			tq->flags |= TQF_NEED_AUTHENTICATION;
9028 		}
9029 
9030 		DEVICE_QUEUE_UNLOCK(tq);
9031 
9032 		(void) ql_abort_device(ha, tq, 1);
9033 
9034 		DEVICE_QUEUE_LOCK(tq);
9035 
9036 		if (tq->outcnt) {
9037 			sendup = 0;
9038 		} else {
9039 			tq->flags &= ~TQF_RSCN_RCVD;
9040 		}
9041 	} else {
9042 		tq->flags &= ~TQF_RSCN_RCVD;
9043 	}
9044 
9045 	if (sendup) {
9046 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9047 			tq->flags |= TQF_NEED_AUTHENTICATION;
9048 		}
9049 	}
9050 
9051 	DEVICE_QUEUE_UNLOCK(tq);
9052 
9053 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9054 
9055 	return (sendup);
9056 }
9057 
9058 static int
9059 ql_handle_rscn_update(ql_adapter_state_t *ha)
9060 {
9061 	int			rval;
9062 	ql_tgt_t		*tq;
9063 	uint16_t		index, loop_id;
9064 	ql_dev_id_list_t	*list;
9065 	uint32_t		list_size;
9066 	port_id_t		d_id;
9067 	ql_mbx_data_t		mr;
9068 	ql_head_t		done_q = { NULL, NULL };
9069 
9070 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9071 
9072 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9073 	list = kmem_zalloc(list_size, KM_SLEEP);
9074 	if (list == NULL) {
9075 		rval = QL_MEMORY_ALLOC_FAILED;
9076 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9077 		return (rval);
9078 	}
9079 
9080 	/*
9081 	 * Get data from RISC code d_id list to init each device queue.
9082 	 */
9083 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9084 	if (rval != QL_SUCCESS) {
9085 		kmem_free(list, list_size);
9086 		EL(ha, "get_id_list failed=%xh\n", rval);
9087 		return (rval);
9088 	}
9089 
9090 	/* Acquire adapter state lock. */
9091 	ADAPTER_STATE_LOCK(ha);
9092 
9093 	/* Check for new devices */
9094 	for (index = 0; index < mr.mb[1]; index++) {
9095 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9096 
9097 		if (VALID_DEVICE_ID(ha, loop_id)) {
9098 			d_id.r.rsvd_1 = 0;
9099 
9100 			tq = ql_d_id_to_queue(ha, d_id);
9101 			if (tq != NULL) {
9102 				continue;
9103 			}
9104 
9105 			tq = ql_dev_init(ha, d_id, loop_id);
9106 
9107 			/* Test for fabric device. */
9108 			if (d_id.b.domain != ha->d_id.b.domain ||
9109 			    d_id.b.area != ha->d_id.b.area) {
9110 				tq->flags |= TQF_FABRIC_DEVICE;
9111 			}
9112 
9113 			ADAPTER_STATE_UNLOCK(ha);
9114 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9115 			    QL_SUCCESS) {
9116 				tq->loop_id = PORT_NO_LOOP_ID;
9117 			}
9118 			ADAPTER_STATE_LOCK(ha);
9119 
9120 			/*
9121 			 * Send up a PLOGI about the new device
9122 			 */
9123 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9124 				(void) ql_send_plogi(ha, tq, &done_q);
9125 			}
9126 		}
9127 	}
9128 
9129 	/* Release adapter state lock. */
9130 	ADAPTER_STATE_UNLOCK(ha);
9131 
9132 	if (done_q.first != NULL) {
9133 		ql_done(done_q.first);
9134 	}
9135 
9136 	kmem_free(list, list_size);
9137 
9138 	if (rval != QL_SUCCESS) {
9139 		EL(ha, "failed=%xh\n", rval);
9140 	} else {
9141 		/*EMPTY*/
9142 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9143 	}
9144 
9145 	return (rval);
9146 }
9147 
9148 /*
9149  * ql_free_unsolicited_buffer
9150  *	Frees allocated buffer.
9151  *
9152  * Input:
9153  *	ha = adapter state pointer.
9154  *	index = buffer array index.
9155  *	ADAPTER_STATE_LOCK must be already obtained.
9156  *
9157  * Context:
9158  *	Kernel context.
9159  */
9160 static void
9161 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9162 {
9163 	ql_srb_t	*sp;
9164 	int		status;
9165 
9166 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9167 
9168 	sp = ubp->ub_fca_private;
9169 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9170 		/* Disconnect IP from system buffers. */
9171 		if (ha->flags & IP_INITIALIZED) {
9172 			ADAPTER_STATE_UNLOCK(ha);
9173 			status = ql_shutdown_ip(ha);
9174 			ADAPTER_STATE_LOCK(ha);
9175 			if (status != QL_SUCCESS) {
9176 				cmn_err(CE_WARN,
9177 				    "!Qlogic %s(%d): Failed to shutdown IP",
9178 				    QL_NAME, ha->instance);
9179 				return;
9180 			}
9181 
9182 			ha->flags &= ~IP_ENABLED;
9183 		}
9184 
9185 		ql_free_phys(ha, &sp->ub_buffer);
9186 	} else {
9187 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9188 	}
9189 
9190 	kmem_free(sp, sizeof (ql_srb_t));
9191 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9192 
9193 	if (ha->ub_allocated != 0) {
9194 		ha->ub_allocated--;
9195 	}
9196 
9197 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9198 }
9199 
9200 /*
9201  * ql_get_unsolicited_buffer
9202  *	Locates a free unsolicited buffer.
9203  *
9204  * Input:
9205  *	ha = adapter state pointer.
9206  *	type = buffer type.
9207  *
9208  * Returns:
9209  *	Unsolicited buffer pointer.
9210  *
9211  * Context:
9212  *	Interrupt or Kernel context, no mailbox commands allowed.
9213  */
9214 fc_unsol_buf_t *
9215 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9216 {
9217 	fc_unsol_buf_t	*ubp;
9218 	ql_srb_t	*sp;
9219 	uint16_t	index;
9220 
9221 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9222 
9223 	/* Locate a buffer to use. */
9224 	ubp = NULL;
9225 
9226 	QL_UB_LOCK(ha);
9227 	for (index = 0; index < QL_UB_LIMIT; index++) {
9228 		ubp = ha->ub_array[index];
9229 		if (ubp != NULL) {
9230 			sp = ubp->ub_fca_private;
9231 			if ((sp->ub_type == type) &&
9232 			    (sp->flags & SRB_UB_IN_FCA) &&
9233 			    (!(sp->flags & (SRB_UB_CALLBACK |
9234 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9235 				sp->flags |= SRB_UB_ACQUIRED;
9236 				ubp->ub_resp_flags = 0;
9237 				break;
9238 			}
9239 			ubp = NULL;
9240 		}
9241 	}
9242 	QL_UB_UNLOCK(ha);
9243 
9244 	if (ubp) {
9245 		ubp->ub_resp_token = NULL;
9246 		ubp->ub_class = FC_TRAN_CLASS3;
9247 	}
9248 
9249 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9250 
9251 	return (ubp);
9252 }
9253 
9254 /*
9255  * ql_ub_frame_hdr
9256  *	Processes received unsolicited buffers from ISP.
9257  *
9258  * Input:
9259  *	ha:	adapter state pointer.
9260  *	tq:	target queue pointer.
9261  *	index:	unsolicited buffer array index.
9262  *	done_q:	done queue pointer.
9263  *
9264  * Returns:
9265  *	ql local function return status code.
9266  *
9267  * Context:
9268  *	Interrupt or Kernel context, no mailbox commands allowed.
9269  */
9270 int
9271 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9272     ql_head_t *done_q)
9273 {
9274 	fc_unsol_buf_t	*ubp;
9275 	ql_srb_t	*sp;
9276 	uint16_t	loop_id;
9277 	int		rval = QL_FUNCTION_FAILED;
9278 
9279 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9280 
9281 	QL_UB_LOCK(ha);
9282 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9283 		EL(ha, "Invalid buffer index=%xh\n", index);
9284 		QL_UB_UNLOCK(ha);
9285 		return (rval);
9286 	}
9287 
9288 	sp = ubp->ub_fca_private;
9289 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9290 		EL(ha, "buffer freed index=%xh\n", index);
9291 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9292 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9293 
9294 		sp->flags |= SRB_UB_IN_FCA;
9295 
9296 		QL_UB_UNLOCK(ha);
9297 		return (rval);
9298 	}
9299 
9300 	if ((sp->handle == index) &&
9301 	    (sp->flags & SRB_UB_IN_ISP) &&
9302 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9303 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9304 		/* set broadcast D_ID */
9305 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
9306 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9307 		if (tq->ub_loop_id == loop_id) {
9308 			if (ha->topology & QL_FL_PORT) {
9309 				ubp->ub_frame.d_id = 0x000000;
9310 			} else {
9311 				ubp->ub_frame.d_id = 0xffffff;
9312 			}
9313 		} else {
9314 			ubp->ub_frame.d_id = ha->d_id.b24;
9315 		}
9316 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9317 		ubp->ub_frame.rsvd = 0;
9318 		ubp->ub_frame.s_id = tq->d_id.b24;
9319 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9320 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9321 		ubp->ub_frame.df_ctl = 0;
9322 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9323 		ubp->ub_frame.rx_id = 0xffff;
9324 		ubp->ub_frame.ox_id = 0xffff;
9325 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9326 		    sp->ub_size : tq->ub_sequence_length;
9327 		ubp->ub_frame.ro = tq->ub_frame_ro;
9328 
9329 		tq->ub_sequence_length = (uint16_t)
9330 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9331 		tq->ub_frame_ro += ubp->ub_bufsize;
9332 		tq->ub_seq_cnt++;
9333 
9334 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9335 			if (tq->ub_seq_cnt == 1) {
9336 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9337 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9338 			} else {
9339 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9340 				    F_CTL_END_SEQ;
9341 			}
9342 			tq->ub_total_seg_cnt = 0;
9343 		} else if (tq->ub_seq_cnt == 1) {
9344 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9345 			    F_CTL_FIRST_SEQ;
9346 			ubp->ub_frame.df_ctl = 0x20;
9347 		}
9348 
9349 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9350 		    ha->instance, ubp->ub_frame.d_id);
9351 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9352 		    ha->instance, ubp->ub_frame.s_id);
9353 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9354 		    ha->instance, ubp->ub_frame.seq_cnt);
9355 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9356 		    ha->instance, ubp->ub_frame.seq_id);
9357 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9358 		    ha->instance, ubp->ub_frame.ro);
9359 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9360 		    ha->instance, ubp->ub_frame.f_ctl);
9361 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9362 		    ha->instance, ubp->ub_bufsize);
9363 		QL_DUMP_3(ubp->ub_buffer, 8,
9364 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9365 
9366 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9367 		ql_add_link_b(done_q, &sp->cmd);
9368 		rval = QL_SUCCESS;
9369 	} else {
9370 		if (sp->handle != index) {
9371 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9372 			    sp->handle);
9373 		}
9374 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9375 			EL(ha, "buffer was already in driver, index=%xh\n",
9376 			    index);
9377 		}
9378 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9379 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9380 			    index);
9381 		}
9382 		if (sp->flags & SRB_UB_ACQUIRED) {
9383 			EL(ha, "buffer was being used by driver, index=%xh\n",
9384 			    index);
9385 		}
9386 	}
9387 	QL_UB_UNLOCK(ha);
9388 
9389 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9390 
9391 	return (rval);
9392 }
9393 
9394 /*
9395  * ql_timer
9396  *	One second timer function.
9397  *
9398  * Input:
9399  *	ql_hba.first = first link in adapter list.
9400  *
9401  * Context:
9402  *	Interrupt context, no mailbox commands allowed.
9403  */
9404 static void
9405 ql_timer(void *arg)
9406 {
9407 	ql_link_t		*link;
9408 	uint32_t		set_flags;
9409 	uint32_t		reset_flags;
9410 	ql_adapter_state_t	*ha = NULL, *vha;
9411 
9412 	QL_PRINT_6(CE_CONT, "started\n");
9413 
9414 	/* Acquire global state lock. */
9415 	GLOBAL_STATE_LOCK();
9416 	if (ql_timer_timeout_id == NULL) {
9417 		/* Release global state lock. */
9418 		GLOBAL_STATE_UNLOCK();
9419 		return;
9420 	}
9421 
9422 	for (link = ql_hba.first; link != NULL; link = link->next) {
9423 		ha = link->base_address;
9424 
9425 		/* Skip adapter if suspended of stalled. */
9426 		ADAPTER_STATE_LOCK(ha);
9427 		if (ha->flags & ADAPTER_SUSPENDED ||
9428 		    ha->task_daemon_flags & DRIVER_STALL) {
9429 			ADAPTER_STATE_UNLOCK(ha);
9430 			continue;
9431 		}
9432 		ha->flags |= ADAPTER_TIMER_BUSY;
9433 		ADAPTER_STATE_UNLOCK(ha);
9434 
9435 		QL_PM_LOCK(ha);
9436 		if (ha->power_level != PM_LEVEL_D0) {
9437 			QL_PM_UNLOCK(ha);
9438 
9439 			ADAPTER_STATE_LOCK(ha);
9440 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9441 			ADAPTER_STATE_UNLOCK(ha);
9442 			continue;
9443 		}
9444 		ha->busy++;
9445 		QL_PM_UNLOCK(ha);
9446 
9447 		set_flags = 0;
9448 		reset_flags = 0;
9449 
9450 		/* Port retry timer handler. */
9451 		if (LOOP_READY(ha)) {
9452 			ADAPTER_STATE_LOCK(ha);
9453 			if (ha->port_retry_timer != 0) {
9454 				ha->port_retry_timer--;
9455 				if (ha->port_retry_timer == 0) {
9456 					set_flags |= PORT_RETRY_NEEDED;
9457 				}
9458 			}
9459 			ADAPTER_STATE_UNLOCK(ha);
9460 		}
9461 
9462 		/* Loop down timer handler. */
9463 		if (LOOP_RECONFIGURE(ha) == 0) {
9464 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9465 				ha->loop_down_timer--;
9466 				/*
9467 				 * give the firmware loop down dump flag
9468 				 * a chance to work.
9469 				 */
9470 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9471 					if (CFG_IST(ha,
9472 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9473 						(void) ql_binary_fw_dump(ha,
9474 						    TRUE);
9475 					}
9476 					EL(ha, "loop_down_reset, "
9477 					    "isp_abort_needed\n");
9478 					set_flags |= ISP_ABORT_NEEDED;
9479 				}
9480 			}
9481 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9482 				/* Command abort time handler. */
9483 				if (ha->loop_down_timer ==
9484 				    ha->loop_down_abort_time) {
9485 					ADAPTER_STATE_LOCK(ha);
9486 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9487 					ADAPTER_STATE_UNLOCK(ha);
9488 					set_flags |= ABORT_QUEUES_NEEDED;
9489 					EL(ha, "loop_down_abort_time, "
9490 					    "abort_queues_needed\n");
9491 				}
9492 
9493 				/* Watchdog timer handler. */
9494 				if (ha->watchdog_timer == 0) {
9495 					ha->watchdog_timer = WATCHDOG_TIME;
9496 				} else if (LOOP_READY(ha)) {
9497 					ha->watchdog_timer--;
9498 					if (ha->watchdog_timer == 0) {
9499 						for (vha = ha; vha != NULL;
9500 						    vha = vha->vp_next) {
9501 							ql_watchdog(vha,
9502 							    &set_flags,
9503 							    &reset_flags);
9504 						}
9505 						ha->watchdog_timer =
9506 						    WATCHDOG_TIME;
9507 					}
9508 				}
9509 			}
9510 		}
9511 
9512 		/* Idle timer handler. */
9513 		if (!DRIVER_SUSPENDED(ha)) {
9514 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9515 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9516 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9517 #endif
9518 				ha->idle_timer = 0;
9519 			}
9520 			if (ha->send_plogi_timer != NULL) {
9521 				ha->send_plogi_timer--;
9522 				if (ha->send_plogi_timer == NULL) {
9523 					set_flags |= SEND_PLOGI;
9524 				}
9525 			}
9526 		}
9527 		ADAPTER_STATE_LOCK(ha);
9528 		if (ha->restart_mpi_timer != 0) {
9529 			ha->restart_mpi_timer--;
9530 			if (ha->restart_mpi_timer == 0 &&
9531 			    ha->idc_restart_mpi != 0) {
9532 				ha->idc_restart_mpi = 0;
9533 				reset_flags |= TASK_DAEMON_STALLED_FLG;
9534 			}
9535 		}
9536 		if (ha->flash_acc_timer != 0) {
9537 			ha->flash_acc_timer--;
9538 			if (ha->flash_acc_timer == 0 &&
9539 			    ha->idc_flash_acc != 0) {
9540 				ha->idc_flash_acc = 1;
9541 				ha->idc_mb[1] = 0;
9542 				ha->idc_mb[2] = IDC_OPC_DRV_START;
9543 				set_flags |= IDC_ACK_NEEDED;
9544 			}
9545 		}
9546 		ADAPTER_STATE_UNLOCK(ha);
9547 
9548 		if (set_flags != 0 || reset_flags != 0) {
9549 			ql_awaken_task_daemon(ha, NULL, set_flags,
9550 			    reset_flags);
9551 		}
9552 
9553 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9554 			ql_blink_led(ha);
9555 		}
9556 
9557 		/* Update the IO stats */
9558 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9559 			ha->xioctl->IOInputMByteCnt +=
9560 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9561 			ha->xioctl->IOInputByteCnt %= 0x100000;
9562 		}
9563 
9564 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9565 			ha->xioctl->IOOutputMByteCnt +=
9566 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9567 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9568 		}
9569 
9570 		ADAPTER_STATE_LOCK(ha);
9571 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9572 		ADAPTER_STATE_UNLOCK(ha);
9573 
9574 		QL_PM_LOCK(ha);
9575 		ha->busy--;
9576 		QL_PM_UNLOCK(ha);
9577 	}
9578 
9579 	/* Restart timer, if not being stopped. */
9580 	if (ql_timer_timeout_id != NULL) {
9581 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9582 	}
9583 
9584 	/* Release global state lock. */
9585 	GLOBAL_STATE_UNLOCK();
9586 
9587 	QL_PRINT_6(CE_CONT, "done\n");
9588 }
9589 
9590 /*
9591  * ql_timeout_insert
9592  *	Function used to insert a command block onto the
9593  *	watchdog timer queue.
9594  *
9595  *	Note: Must insure that pkt_time is not zero
9596  *			before calling ql_timeout_insert.
9597  *
9598  * Input:
9599  *	ha:	adapter state pointer.
9600  *	tq:	target queue pointer.
9601  *	sp:	SRB pointer.
9602  *	DEVICE_QUEUE_LOCK must be already obtained.
9603  *
9604  * Context:
9605  *	Kernel context.
9606  */
9607 /* ARGSUSED */
9608 static void
9609 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9610 {
9611 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9612 
9613 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9614 		sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9615 		/*
9616 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9617 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9618 		 * will expire in the next watchdog call, which could be in
9619 		 * 1 microsecond.
9620 		 *
9621 		 */
9622 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9623 		    WATCHDOG_TIME;
9624 		/*
9625 		 * Added an additional 10 to account for the
9626 		 * firmware timer drift which can occur with
9627 		 * very long timeout values.
9628 		 */
9629 		sp->wdg_q_time += 10;
9630 
9631 		/*
9632 		 * Add 6 more to insure watchdog does not timeout at the same
9633 		 * time as ISP RISC code timeout.
9634 		 */
9635 		sp->wdg_q_time += 6;
9636 
9637 		/* Save initial time for resetting watchdog time. */
9638 		sp->init_wdg_q_time = sp->wdg_q_time;
9639 
9640 		/* Insert command onto watchdog queue. */
9641 		ql_add_link_b(&tq->wdg, &sp->wdg);
9642 
9643 		sp->flags |= SRB_WATCHDOG_ENABLED;
9644 	} else {
9645 		sp->isp_timeout = 0;
9646 		sp->wdg_q_time = 0;
9647 		sp->init_wdg_q_time = 0;
9648 	}
9649 
9650 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9651 }
9652 
9653 /*
9654  * ql_watchdog
9655  *	Timeout handler that runs in interrupt context. The
9656  *	ql_adapter_state_t * argument is the parameter set up when the
9657  *	timeout was initialized (state structure pointer).
9658  *	Function used to update timeout values and if timeout
9659  *	has occurred command will be aborted.
9660  *
9661  * Input:
9662  *	ha:		adapter state pointer.
9663  *	set_flags:	task daemon flags to set.
9664  *	reset_flags:	task daemon flags to reset.
9665  *
9666  * Context:
9667  *	Interrupt context, no mailbox commands allowed.
9668  */
9669 static void
9670 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9671 {
9672 	ql_srb_t	*sp;
9673 	ql_link_t	*link;
9674 	ql_link_t	*next_cmd;
9675 	ql_link_t	*next_device;
9676 	ql_tgt_t	*tq;
9677 	ql_lun_t	*lq;
9678 	uint16_t	index;
9679 	int		q_sane;
9680 
9681 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9682 
9683 	/* Loop through all targets. */
9684 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9685 		for (link = ha->dev[index].first; link != NULL;
9686 		    link = next_device) {
9687 			tq = link->base_address;
9688 
9689 			/* Try to acquire device queue lock. */
9690 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9691 				next_device = NULL;
9692 				continue;
9693 			}
9694 
9695 			next_device = link->next;
9696 
9697 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9698 			    (tq->port_down_retry_count == 0)) {
9699 				/* Release device queue lock. */
9700 				DEVICE_QUEUE_UNLOCK(tq);
9701 				continue;
9702 			}
9703 
9704 			/* Find out if this device is in a sane state. */
9705 			if (tq->flags & (TQF_RSCN_RCVD |
9706 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9707 				q_sane = 0;
9708 			} else {
9709 				q_sane = 1;
9710 			}
9711 			/* Loop through commands on watchdog queue. */
9712 			for (link = tq->wdg.first; link != NULL;
9713 			    link = next_cmd) {
9714 				next_cmd = link->next;
9715 				sp = link->base_address;
9716 				lq = sp->lun_queue;
9717 
9718 				/*
9719 				 * For SCSI commands, if everything seems to
9720 				 * be going fine and this packet is stuck
9721 				 * because of throttling at LUN or target
9722 				 * level then do not decrement the
9723 				 * sp->wdg_q_time
9724 				 */
9725 				if (ha->task_daemon_flags & STATE_ONLINE &&
9726 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9727 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9728 				    lq->lun_outcnt >= ha->execution_throttle) {
9729 					continue;
9730 				}
9731 
9732 				if (sp->wdg_q_time != 0) {
9733 					sp->wdg_q_time--;
9734 
9735 					/* Timeout? */
9736 					if (sp->wdg_q_time != 0) {
9737 						continue;
9738 					}
9739 
9740 					ql_remove_link(&tq->wdg, &sp->wdg);
9741 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9742 
9743 					if (sp->flags & SRB_ISP_STARTED) {
9744 						ql_cmd_timeout(ha, tq, sp,
9745 						    set_flags, reset_flags);
9746 
9747 						DEVICE_QUEUE_UNLOCK(tq);
9748 						tq = NULL;
9749 						next_cmd = NULL;
9750 						next_device = NULL;
9751 						index = DEVICE_HEAD_LIST_SIZE;
9752 					} else {
9753 						ql_cmd_timeout(ha, tq, sp,
9754 						    set_flags, reset_flags);
9755 					}
9756 				}
9757 			}
9758 
9759 			/* Release device queue lock. */
9760 			if (tq != NULL) {
9761 				DEVICE_QUEUE_UNLOCK(tq);
9762 			}
9763 		}
9764 	}
9765 
9766 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9767 }
9768 
9769 /*
9770  * ql_cmd_timeout
9771  *	Command timeout handler.
9772  *
9773  * Input:
9774  *	ha:		adapter state pointer.
9775  *	tq:		target queue pointer.
9776  *	sp:		SRB pointer.
9777  *	set_flags:	task daemon flags to set.
9778  *	reset_flags:	task daemon flags to reset.
9779  *
9780  * Context:
9781  *	Interrupt context, no mailbox commands allowed.
9782  */
9783 /* ARGSUSED */
9784 static void
9785 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9786     uint32_t *set_flags, uint32_t *reset_flags)
9787 {
9788 
9789 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9790 
9791 	if (!(sp->flags & SRB_ISP_STARTED)) {
9792 
9793 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9794 
9795 		REQUEST_RING_LOCK(ha);
9796 
9797 		/* if it's on a queue */
9798 		if (sp->cmd.head) {
9799 			/*
9800 			 * The pending_cmds que needs to be
9801 			 * protected by the ring lock
9802 			 */
9803 			ql_remove_link(sp->cmd.head, &sp->cmd);
9804 		}
9805 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9806 
9807 		/* Release device queue lock. */
9808 		REQUEST_RING_UNLOCK(ha);
9809 		DEVICE_QUEUE_UNLOCK(tq);
9810 
9811 		/* Set timeout status */
9812 		sp->pkt->pkt_reason = CS_TIMEOUT;
9813 
9814 		/* Ensure no retry */
9815 		sp->flags &= ~SRB_RETRY;
9816 
9817 		/* Call done routine to handle completion. */
9818 		ql_done(&sp->cmd);
9819 
9820 		DEVICE_QUEUE_LOCK(tq);
9821 	} else {
9822 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9823 		    "isp_abort_needed\n", (void *)sp,
9824 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9825 		    sp->handle & OSC_INDEX_MASK);
9826 
9827 		/* Release device queue lock. */
9828 		DEVICE_QUEUE_UNLOCK(tq);
9829 
9830 		INTR_LOCK(ha);
9831 		ha->pha->xioctl->ControllerErrorCount++;
9832 		INTR_UNLOCK(ha);
9833 
9834 		/* Set ISP needs to be reset */
9835 		sp->flags |= SRB_COMMAND_TIMEOUT;
9836 
9837 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9838 			(void) ql_binary_fw_dump(ha, TRUE);
9839 		}
9840 
9841 		*set_flags |= ISP_ABORT_NEEDED;
9842 
9843 		DEVICE_QUEUE_LOCK(tq);
9844 	}
9845 
9846 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9847 }
9848 
9849 /*
9850  * ql_rst_aen
9851  *	Processes asynchronous reset.
9852  *
9853  * Input:
9854  *	ha = adapter state pointer.
9855  *
9856  * Context:
9857  *	Kernel context.
9858  */
9859 static void
9860 ql_rst_aen(ql_adapter_state_t *ha)
9861 {
9862 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9863 
9864 	/* Issue marker command. */
9865 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9866 
9867 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9868 }
9869 
9870 /*
9871  * ql_cmd_wait
9872  *	Stall driver until all outstanding commands are returned.
9873  *
9874  * Input:
9875  *	ha = adapter state pointer.
9876  *
9877  * Context:
9878  *	Kernel context.
9879  */
9880 void
9881 ql_cmd_wait(ql_adapter_state_t *ha)
9882 {
9883 	uint16_t		index;
9884 	ql_link_t		*link;
9885 	ql_tgt_t		*tq;
9886 	ql_adapter_state_t	*vha;
9887 
9888 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9889 
9890 	/* Wait for all outstanding commands to be returned. */
9891 	(void) ql_wait_outstanding(ha);
9892 
9893 	/*
9894 	 * clear out internally queued commands
9895 	 */
9896 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9897 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9898 			for (link = vha->dev[index].first; link != NULL;
9899 			    link = link->next) {
9900 				tq = link->base_address;
9901 				if (tq &&
9902 				    (!(tq->prli_svc_param_word_3 &
9903 				    PRLI_W3_RETRY))) {
9904 					(void) ql_abort_device(vha, tq, 0);
9905 				}
9906 			}
9907 		}
9908 	}
9909 
9910 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9911 }
9912 
9913 /*
9914  * ql_wait_outstanding
9915  *	Wait for all outstanding commands to complete.
9916  *
9917  * Input:
9918  *	ha = adapter state pointer.
9919  *
9920  * Returns:
9921  *	index - the index for ql_srb into outstanding_cmds.
9922  *
9923  * Context:
9924  *	Kernel context.
9925  */
9926 static uint16_t
9927 ql_wait_outstanding(ql_adapter_state_t *ha)
9928 {
9929 	ql_srb_t	*sp;
9930 	uint16_t	index, count;
9931 
9932 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9933 
9934 	count = 3000;
9935 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9936 		if (ha->pha->pending_cmds.first != NULL) {
9937 			ql_start_iocb(ha, NULL);
9938 			index = 1;
9939 		}
9940 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
9941 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
9942 			if (count-- != 0) {
9943 				ql_delay(ha, 10000);
9944 				index = 0;
9945 			} else {
9946 				EL(ha, "failed, sp=%ph\n", (void *)sp);
9947 				break;
9948 			}
9949 		}
9950 	}
9951 
9952 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9953 
9954 	return (index);
9955 }
9956 
9957 /*
9958  * ql_restart_queues
9959  *	Restart device queues.
9960  *
9961  * Input:
9962  *	ha = adapter state pointer.
9963  *	DEVICE_QUEUE_LOCK must be released.
9964  *
9965  * Context:
9966  *	Interrupt or Kernel context, no mailbox commands allowed.
9967  */
9968 static void
9969 ql_restart_queues(ql_adapter_state_t *ha)
9970 {
9971 	ql_link_t		*link, *link2;
9972 	ql_tgt_t		*tq;
9973 	ql_lun_t		*lq;
9974 	uint16_t		index;
9975 	ql_adapter_state_t	*vha;
9976 
9977 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9978 
9979 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
9980 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9981 			for (link = vha->dev[index].first; link != NULL;
9982 			    link = link->next) {
9983 				tq = link->base_address;
9984 
9985 				/* Acquire device queue lock. */
9986 				DEVICE_QUEUE_LOCK(tq);
9987 
9988 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
9989 
9990 				for (link2 = tq->lun_queues.first;
9991 				    link2 != NULL; link2 = link2->next) {
9992 					lq = link2->base_address;
9993 
9994 					if (lq->cmd.first != NULL) {
9995 						ql_next(vha, lq);
9996 						DEVICE_QUEUE_LOCK(tq);
9997 					}
9998 				}
9999 
10000 				/* Release device queue lock. */
10001 				DEVICE_QUEUE_UNLOCK(tq);
10002 			}
10003 		}
10004 	}
10005 
10006 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10007 }
10008 
10009 /*
10010  * ql_iidma
10011  *	Setup iiDMA parameters to firmware
10012  *
10013  * Input:
10014  *	ha = adapter state pointer.
10015  *	DEVICE_QUEUE_LOCK must be released.
10016  *
10017  * Context:
10018  *	Interrupt or Kernel context, no mailbox commands allowed.
10019  */
10020 static void
10021 ql_iidma(ql_adapter_state_t *ha)
10022 {
10023 	ql_link_t	*link;
10024 	ql_tgt_t	*tq;
10025 	uint16_t	index;
10026 	char		buf[256];
10027 	uint32_t	data;
10028 
10029 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10030 
10031 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10032 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10033 		return;
10034 	}
10035 
10036 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10037 		for (link = ha->dev[index].first; link != NULL;
10038 		    link = link->next) {
10039 			tq = link->base_address;
10040 
10041 			/* Acquire device queue lock. */
10042 			DEVICE_QUEUE_LOCK(tq);
10043 
10044 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10045 				DEVICE_QUEUE_UNLOCK(tq);
10046 				continue;
10047 			}
10048 
10049 			tq->flags &= ~TQF_IIDMA_NEEDED;
10050 
10051 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10052 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10053 				DEVICE_QUEUE_UNLOCK(tq);
10054 				continue;
10055 			}
10056 
10057 			/* Get the iiDMA persistent data */
10058 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10059 				(void) sprintf(buf,
10060 				    "iidma-rate-%02x%02x%02x%02x%02x"
10061 				    "%02x%02x%02x", tq->port_name[0],
10062 				    tq->port_name[1], tq->port_name[2],
10063 				    tq->port_name[3], tq->port_name[4],
10064 				    tq->port_name[5], tq->port_name[6],
10065 				    tq->port_name[7]);
10066 
10067 				if ((data = ql_get_prop(ha, buf)) ==
10068 				    0xffffffff) {
10069 					tq->iidma_rate = IIDMA_RATE_NDEF;
10070 				} else {
10071 					switch (data) {
10072 					case IIDMA_RATE_1GB:
10073 					case IIDMA_RATE_2GB:
10074 					case IIDMA_RATE_4GB:
10075 					case IIDMA_RATE_10GB:
10076 						tq->iidma_rate = data;
10077 						break;
10078 					case IIDMA_RATE_8GB:
10079 						if (CFG_IST(ha,
10080 						    CFG_CTRL_25XX)) {
10081 							tq->iidma_rate = data;
10082 						} else {
10083 							tq->iidma_rate =
10084 							    IIDMA_RATE_4GB;
10085 						}
10086 						break;
10087 					default:
10088 						EL(ha, "invalid data for "
10089 						    "parameter: %s: %xh\n",
10090 						    buf, data);
10091 						tq->iidma_rate =
10092 						    IIDMA_RATE_NDEF;
10093 						break;
10094 					}
10095 				}
10096 			}
10097 
10098 			/* Set the firmware's iiDMA rate */
10099 			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10100 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
10101 				data = ql_iidma_rate(ha, tq->loop_id,
10102 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10103 				if (data != QL_SUCCESS) {
10104 					EL(ha, "mbx failed: %xh\n", data);
10105 				}
10106 			}
10107 
10108 			/* Release device queue lock. */
10109 			DEVICE_QUEUE_UNLOCK(tq);
10110 		}
10111 	}
10112 
10113 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10114 }
10115 
10116 /*
10117  * ql_abort_queues
10118  *	Abort all commands on device queues.
10119  *
10120  * Input:
10121  *	ha = adapter state pointer.
10122  *
10123  * Context:
10124  *	Interrupt or Kernel context, no mailbox commands allowed.
10125  */
10126 static void
10127 ql_abort_queues(ql_adapter_state_t *ha)
10128 {
10129 	ql_link_t		*link;
10130 	ql_tgt_t		*tq;
10131 	ql_srb_t		*sp;
10132 	uint16_t		index;
10133 	ql_adapter_state_t	*vha;
10134 
10135 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10136 
10137 	/* Return all commands in outstanding command list. */
10138 	INTR_LOCK(ha);
10139 
10140 	/* Place all commands in outstanding cmd list on device queue. */
10141 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10142 		if (ha->pending_cmds.first != NULL) {
10143 			INTR_UNLOCK(ha);
10144 			ql_start_iocb(ha, NULL);
10145 			/* Delay for system */
10146 			ql_delay(ha, 10000);
10147 			INTR_LOCK(ha);
10148 			index = 1;
10149 		}
10150 		sp = ha->outstanding_cmds[index];
10151 
10152 		/* skip devices capable of FCP2 retrys */
10153 		if ((sp != NULL) &&
10154 		    ((tq = sp->lun_queue->target_queue) != NULL) &&
10155 		    (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10156 			ha->outstanding_cmds[index] = NULL;
10157 			sp->handle = 0;
10158 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10159 
10160 			INTR_UNLOCK(ha);
10161 
10162 			/* Set ending status. */
10163 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10164 			sp->flags |= SRB_ISP_COMPLETED;
10165 
10166 			/* Call done routine to handle completions. */
10167 			sp->cmd.next = NULL;
10168 			ql_done(&sp->cmd);
10169 
10170 			INTR_LOCK(ha);
10171 		}
10172 	}
10173 	INTR_UNLOCK(ha);
10174 
10175 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10176 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10177 		    vha->instance, vha->vp_index);
10178 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10179 			for (link = vha->dev[index].first; link != NULL;
10180 			    link = link->next) {
10181 				tq = link->base_address;
10182 				/* skip devices capable of FCP2 retrys */
10183 				if (!(tq->prli_svc_param_word_3 &
10184 				    PRLI_W3_RETRY)) {
10185 					/*
10186 					 * Set port unavailable status and
10187 					 * return all commands on a devices
10188 					 * queues.
10189 					 */
10190 					ql_abort_device_queues(ha, tq);
10191 				}
10192 			}
10193 		}
10194 	}
10195 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10196 }
10197 
10198 /*
10199  * ql_abort_device_queues
10200  *	Abort all commands on device queues.
10201  *
10202  * Input:
10203  *	ha = adapter state pointer.
10204  *
10205  * Context:
10206  *	Interrupt or Kernel context, no mailbox commands allowed.
10207  */
10208 static void
10209 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10210 {
10211 	ql_link_t	*lun_link, *cmd_link;
10212 	ql_srb_t	*sp;
10213 	ql_lun_t	*lq;
10214 
10215 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10216 
10217 	DEVICE_QUEUE_LOCK(tq);
10218 
10219 	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10220 	    lun_link = lun_link->next) {
10221 		lq = lun_link->base_address;
10222 
10223 		cmd_link = lq->cmd.first;
10224 		while (cmd_link != NULL) {
10225 			sp = cmd_link->base_address;
10226 
10227 			if (sp->flags & SRB_ABORT) {
10228 				cmd_link = cmd_link->next;
10229 				continue;
10230 			}
10231 
10232 			/* Remove srb from device cmd queue. */
10233 			ql_remove_link(&lq->cmd, &sp->cmd);
10234 
10235 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10236 
10237 			DEVICE_QUEUE_UNLOCK(tq);
10238 
10239 			/* Set ending status. */
10240 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10241 
10242 			/* Call done routine to handle completion. */
10243 			ql_done(&sp->cmd);
10244 
10245 			/* Delay for system */
10246 			ql_delay(ha, 10000);
10247 
10248 			DEVICE_QUEUE_LOCK(tq);
10249 			cmd_link = lq->cmd.first;
10250 		}
10251 	}
10252 	DEVICE_QUEUE_UNLOCK(tq);
10253 
10254 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10255 }
10256 
10257 /*
10258  * ql_loop_resync
10259  *	Resync with fibre channel devices.
10260  *
10261  * Input:
10262  *	ha = adapter state pointer.
10263  *	DEVICE_QUEUE_LOCK must be released.
10264  *
10265  * Returns:
10266  *	ql local function return status code.
10267  *
10268  * Context:
10269  *	Kernel context.
10270  */
10271 static int
10272 ql_loop_resync(ql_adapter_state_t *ha)
10273 {
10274 	int rval;
10275 
10276 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10277 
10278 	if (ha->flags & IP_INITIALIZED) {
10279 		(void) ql_shutdown_ip(ha);
10280 	}
10281 
10282 	rval = ql_fw_ready(ha, 10);
10283 
10284 	TASK_DAEMON_LOCK(ha);
10285 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10286 	TASK_DAEMON_UNLOCK(ha);
10287 
10288 	/* Set loop online, if it really is. */
10289 	if (rval == QL_SUCCESS) {
10290 		ql_loop_online(ha);
10291 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10292 	} else {
10293 		EL(ha, "failed, rval = %xh\n", rval);
10294 	}
10295 
10296 	return (rval);
10297 }
10298 
10299 /*
10300  * ql_loop_online
10301  *	Set loop online status if it really is online.
10302  *
10303  * Input:
10304  *	ha = adapter state pointer.
10305  *	DEVICE_QUEUE_LOCK must be released.
10306  *
10307  * Context:
10308  *	Kernel context.
10309  */
10310 void
10311 ql_loop_online(ql_adapter_state_t *ha)
10312 {
10313 	ql_adapter_state_t	*vha;
10314 
10315 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10316 
10317 	/* Inform the FC Transport that the hardware is online. */
10318 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10319 		if (!(vha->task_daemon_flags &
10320 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10321 			/* Restart IP if it was shutdown. */
10322 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10323 			    !(vha->flags & IP_INITIALIZED)) {
10324 				(void) ql_initialize_ip(vha);
10325 				ql_isp_rcvbuf(vha);
10326 			}
10327 
10328 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10329 			    FC_PORT_STATE_MASK(vha->state) !=
10330 			    FC_STATE_ONLINE) {
10331 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10332 				if (vha->topology & QL_LOOP_CONNECTION) {
10333 					vha->state |= FC_STATE_LOOP;
10334 				} else {
10335 					vha->state |= FC_STATE_ONLINE;
10336 				}
10337 				TASK_DAEMON_LOCK(ha);
10338 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10339 				TASK_DAEMON_UNLOCK(ha);
10340 			}
10341 		}
10342 	}
10343 
10344 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10345 
10346 	/* Restart device queues that may have been stopped. */
10347 	ql_restart_queues(ha);
10348 
10349 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10350 }
10351 
10352 /*
10353  * ql_fca_handle_to_state
10354  *	Verifies handle to be correct.
10355  *
10356  * Input:
10357  *	fca_handle = pointer to state structure.
10358  *
10359  * Returns:
10360  *	NULL = failure
10361  *
10362  * Context:
10363  *	Kernel context.
10364  */
10365 static ql_adapter_state_t *
10366 ql_fca_handle_to_state(opaque_t fca_handle)
10367 {
10368 #ifdef	QL_DEBUG_ROUTINES
10369 	ql_link_t		*link;
10370 	ql_adapter_state_t	*ha = NULL;
10371 	ql_adapter_state_t	*vha = NULL;
10372 
10373 	for (link = ql_hba.first; link != NULL; link = link->next) {
10374 		ha = link->base_address;
10375 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10376 			if ((opaque_t)vha == fca_handle) {
10377 				ha = vha;
10378 				break;
10379 			}
10380 		}
10381 		if ((opaque_t)ha == fca_handle) {
10382 			break;
10383 		} else {
10384 			ha = NULL;
10385 		}
10386 	}
10387 
10388 	if (ha == NULL) {
10389 		/*EMPTY*/
10390 		QL_PRINT_2(CE_CONT, "failed\n");
10391 	}
10392 
10393 #endif /* QL_DEBUG_ROUTINES */
10394 
10395 	return ((ql_adapter_state_t *)fca_handle);
10396 }
10397 
10398 /*
10399  * ql_d_id_to_queue
10400  *	Locate device queue that matches destination ID.
10401  *
10402  * Input:
10403  *	ha = adapter state pointer.
10404  *	d_id = destination ID
10405  *
10406  * Returns:
10407  *	NULL = failure
10408  *
10409  * Context:
10410  *	Interrupt or Kernel context, no mailbox commands allowed.
10411  */
10412 ql_tgt_t *
10413 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10414 {
10415 	uint16_t	index;
10416 	ql_tgt_t	*tq;
10417 	ql_link_t	*link;
10418 
10419 	/* Get head queue index. */
10420 	index = ql_alpa_to_index[d_id.b.al_pa];
10421 
10422 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10423 		tq = link->base_address;
10424 		if (tq->d_id.b24 == d_id.b24 &&
10425 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10426 			return (tq);
10427 		}
10428 	}
10429 
10430 	return (NULL);
10431 }
10432 
10433 /*
10434  * ql_loop_id_to_queue
10435  *	Locate device queue that matches loop ID.
10436  *
10437  * Input:
10438  *	ha:		adapter state pointer.
10439  *	loop_id:	destination ID
10440  *
10441  * Returns:
10442  *	NULL = failure
10443  *
10444  * Context:
10445  *	Interrupt or Kernel context, no mailbox commands allowed.
10446  */
10447 ql_tgt_t *
10448 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10449 {
10450 	uint16_t	index;
10451 	ql_tgt_t	*tq;
10452 	ql_link_t	*link;
10453 
10454 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10455 		for (link = ha->dev[index].first; link != NULL;
10456 		    link = link->next) {
10457 			tq = link->base_address;
10458 			if (tq->loop_id == loop_id) {
10459 				return (tq);
10460 			}
10461 		}
10462 	}
10463 
10464 	return (NULL);
10465 }
10466 
10467 /*
10468  * ql_kstat_update
10469  *	Updates kernel statistics.
10470  *
10471  * Input:
10472  *	ksp - driver kernel statistics structure pointer.
10473  *	rw - function to perform
10474  *
10475  * Returns:
10476  *	0 or EACCES
10477  *
10478  * Context:
10479  *	Kernel context.
10480  */
10481 /* ARGSUSED */
10482 static int
10483 ql_kstat_update(kstat_t *ksp, int rw)
10484 {
10485 	int			rval;
10486 
10487 	QL_PRINT_3(CE_CONT, "started\n");
10488 
10489 	if (rw == KSTAT_WRITE) {
10490 		rval = EACCES;
10491 	} else {
10492 		rval = 0;
10493 	}
10494 
10495 	if (rval != 0) {
10496 		/*EMPTY*/
10497 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10498 	} else {
10499 		/*EMPTY*/
10500 		QL_PRINT_3(CE_CONT, "done\n");
10501 	}
10502 	return (rval);
10503 }
10504 
10505 /*
10506  * ql_load_flash
10507  *	Loads flash.
10508  *
10509  * Input:
10510  *	ha:	adapter state pointer.
10511  *	dp:	data pointer.
10512  *	size:	data length.
10513  *
10514  * Returns:
10515  *	ql local function return status code.
10516  *
10517  * Context:
10518  *	Kernel context.
10519  */
10520 int
10521 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10522 {
10523 	uint32_t	cnt;
10524 	int		rval;
10525 	uint32_t	size_to_offset;
10526 	uint32_t	size_to_compare;
10527 	int		erase_all;
10528 
10529 	if (CFG_IST(ha, CFG_CTRL_242581)) {
10530 		return (ql_24xx_load_flash(ha, dp, size, 0));
10531 	}
10532 
10533 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10534 
10535 	size_to_compare = 0x20000;
10536 	size_to_offset = 0;
10537 	erase_all = 0;
10538 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10539 		if (size == 0x80000) {
10540 			/* Request to flash the entire chip. */
10541 			size_to_compare = 0x80000;
10542 			erase_all = 1;
10543 		} else {
10544 			size_to_compare = 0x40000;
10545 			if (ql_flash_sbus_fpga) {
10546 				size_to_offset = 0x40000;
10547 			}
10548 		}
10549 	}
10550 	if (size > size_to_compare) {
10551 		rval = QL_FUNCTION_PARAMETER_ERROR;
10552 		EL(ha, "failed=%xh\n", rval);
10553 		return (rval);
10554 	}
10555 
10556 	GLOBAL_HW_LOCK();
10557 
10558 	/* Enable Flash Read/Write. */
10559 	ql_flash_enable(ha);
10560 
10561 	/* Erase flash prior to write. */
10562 	rval = ql_erase_flash(ha, erase_all);
10563 
10564 	if (rval == QL_SUCCESS) {
10565 		/* Write data to flash. */
10566 		for (cnt = 0; cnt < size; cnt++) {
10567 			/* Allow other system activity. */
10568 			if (cnt % 0x1000 == 0) {
10569 				ql_delay(ha, 10000);
10570 			}
10571 			rval = ql_program_flash_address(ha,
10572 			    cnt + size_to_offset, *dp++);
10573 			if (rval != QL_SUCCESS) {
10574 				break;
10575 			}
10576 		}
10577 	}
10578 
10579 	ql_flash_disable(ha);
10580 
10581 	GLOBAL_HW_UNLOCK();
10582 
10583 	if (rval != QL_SUCCESS) {
10584 		EL(ha, "failed=%xh\n", rval);
10585 	} else {
10586 		/*EMPTY*/
10587 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10588 	}
10589 	return (rval);
10590 }
10591 
10592 /*
10593  * ql_program_flash_address
10594  *	Program flash address.
10595  *
10596  * Input:
10597  *	ha = adapter state pointer.
10598  *	addr = flash byte address.
10599  *	data = data to be written to flash.
10600  *
10601  * Returns:
10602  *	ql local function return status code.
10603  *
10604  * Context:
10605  *	Kernel context.
10606  */
10607 static int
10608 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10609 {
10610 	int rval;
10611 
10612 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10613 
10614 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10615 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10616 		ql_write_flash_byte(ha, addr, data);
10617 	} else {
10618 		/* Write Program Command Sequence */
10619 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10620 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10621 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10622 		ql_write_flash_byte(ha, addr, data);
10623 	}
10624 
10625 	/* Wait for write to complete. */
10626 	rval = ql_poll_flash(ha, addr, data);
10627 
10628 	if (rval != QL_SUCCESS) {
10629 		EL(ha, "failed=%xh\n", rval);
10630 	} else {
10631 		/*EMPTY*/
10632 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10633 	}
10634 	return (rval);
10635 }
10636 
10637 /*
10638  * ql_erase_flash
10639  *	Erases entire flash.
10640  *
10641  * Input:
10642  *	ha = adapter state pointer.
10643  *
10644  * Returns:
10645  *	ql local function return status code.
10646  *
10647  * Context:
10648  *	Kernel context.
10649  */
10650 int
10651 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10652 {
10653 	int		rval;
10654 	uint32_t	erase_delay = 2000000;
10655 	uint32_t	sStartAddr;
10656 	uint32_t	ssize;
10657 	uint32_t	cnt;
10658 	uint8_t		*bfp;
10659 	uint8_t		*tmp;
10660 
10661 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10662 
10663 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10664 
10665 		if (ql_flash_sbus_fpga == 1) {
10666 			ssize = QL_SBUS_FCODE_SIZE;
10667 			sStartAddr = QL_FCODE_OFFSET;
10668 		} else {
10669 			ssize = QL_FPGA_SIZE;
10670 			sStartAddr = QL_FPGA_OFFSET;
10671 		}
10672 
10673 		erase_delay = 20000000;
10674 
10675 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10676 
10677 		/* Save the section of flash we're not updating to buffer */
10678 		tmp = bfp;
10679 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10680 			/* Allow other system activity. */
10681 			if (cnt % 0x1000 == 0) {
10682 				ql_delay(ha, 10000);
10683 			}
10684 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10685 		}
10686 	}
10687 
10688 	/* Chip Erase Command Sequence */
10689 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10690 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10691 	ql_write_flash_byte(ha, 0x5555, 0x80);
10692 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10693 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10694 	ql_write_flash_byte(ha, 0x5555, 0x10);
10695 
10696 	ql_delay(ha, erase_delay);
10697 
10698 	/* Wait for erase to complete. */
10699 	rval = ql_poll_flash(ha, 0, 0x80);
10700 
10701 	if (rval != QL_SUCCESS) {
10702 		EL(ha, "failed=%xh\n", rval);
10703 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10704 			kmem_free(bfp, ssize);
10705 		}
10706 		return (rval);
10707 	}
10708 
10709 	/* restore the section we saved in the buffer */
10710 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10711 		/* Restore the section we saved off */
10712 		tmp = bfp;
10713 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10714 			/* Allow other system activity. */
10715 			if (cnt % 0x1000 == 0) {
10716 				ql_delay(ha, 10000);
10717 			}
10718 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10719 			if (rval != QL_SUCCESS) {
10720 				break;
10721 			}
10722 		}
10723 
10724 		kmem_free(bfp, ssize);
10725 	}
10726 
10727 	if (rval != QL_SUCCESS) {
10728 		EL(ha, "failed=%xh\n", rval);
10729 	} else {
10730 		/*EMPTY*/
10731 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10732 	}
10733 	return (rval);
10734 }
10735 
10736 /*
10737  * ql_poll_flash
10738  *	Polls flash for completion.
10739  *
10740  * Input:
10741  *	ha = adapter state pointer.
10742  *	addr = flash byte address.
10743  *	data = data to be polled.
10744  *
10745  * Returns:
10746  *	ql local function return status code.
10747  *
10748  * Context:
10749  *	Kernel context.
10750  */
10751 int
10752 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10753 {
10754 	uint8_t		flash_data;
10755 	uint32_t	cnt;
10756 	int		rval = QL_FUNCTION_FAILED;
10757 
10758 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10759 
10760 	poll_data = (uint8_t)(poll_data & BIT_7);
10761 
10762 	/* Wait for 30 seconds for command to finish. */
10763 	for (cnt = 30000000; cnt; cnt--) {
10764 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10765 
10766 		if ((flash_data & BIT_7) == poll_data) {
10767 			rval = QL_SUCCESS;
10768 			break;
10769 		}
10770 		if (flash_data & BIT_5 && cnt > 2) {
10771 			cnt = 2;
10772 		}
10773 		drv_usecwait(1);
10774 	}
10775 
10776 	if (rval != QL_SUCCESS) {
10777 		EL(ha, "failed=%xh\n", rval);
10778 	} else {
10779 		/*EMPTY*/
10780 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10781 	}
10782 	return (rval);
10783 }
10784 
10785 /*
10786  * ql_flash_enable
10787  *	Setup flash for reading/writing.
10788  *
10789  * Input:
10790  *	ha = adapter state pointer.
10791  *
10792  * Context:
10793  *	Kernel context.
10794  */
10795 void
10796 ql_flash_enable(ql_adapter_state_t *ha)
10797 {
10798 	uint16_t	data;
10799 
10800 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10801 
10802 	/* Enable Flash Read/Write. */
10803 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10804 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10805 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10806 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10807 		ddi_put16(ha->sbus_fpga_dev_handle,
10808 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10809 		/* Read reset command sequence */
10810 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10811 		ql_write_flash_byte(ha, 0x555, 0x55);
10812 		ql_write_flash_byte(ha, 0xaaa, 0x20);
10813 		ql_write_flash_byte(ha, 0x555, 0xf0);
10814 	} else {
10815 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10816 		    ISP_FLASH_ENABLE);
10817 		WRT16_IO_REG(ha, ctrl_status, data);
10818 
10819 		/* Read/Reset Command Sequence */
10820 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10821 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10822 		ql_write_flash_byte(ha, 0x5555, 0xf0);
10823 	}
10824 	(void) ql_read_flash_byte(ha, 0);
10825 
10826 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10827 }
10828 
10829 /*
10830  * ql_flash_disable
10831  *	Disable flash and allow RISC to run.
10832  *
10833  * Input:
10834  *	ha = adapter state pointer.
10835  *
10836  * Context:
10837  *	Kernel context.
10838  */
10839 void
10840 ql_flash_disable(ql_adapter_state_t *ha)
10841 {
10842 	uint16_t	data;
10843 
10844 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10845 
10846 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10847 		/*
10848 		 * Lock the flash back up.
10849 		 */
10850 		ql_write_flash_byte(ha, 0x555, 0x90);
10851 		ql_write_flash_byte(ha, 0x555, 0x0);
10852 
10853 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10854 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10855 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10856 		ddi_put16(ha->sbus_fpga_dev_handle,
10857 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10858 	} else {
10859 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10860 		    ~ISP_FLASH_ENABLE);
10861 		WRT16_IO_REG(ha, ctrl_status, data);
10862 	}
10863 
10864 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10865 }
10866 
10867 /*
10868  * ql_write_flash_byte
10869  *	Write byte to flash.
10870  *
10871  * Input:
10872  *	ha = adapter state pointer.
10873  *	addr = flash byte address.
10874  *	data = data to be written.
10875  *
10876  * Context:
10877  *	Kernel context.
10878  */
10879 void
10880 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10881 {
10882 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10883 		ddi_put16(ha->sbus_fpga_dev_handle,
10884 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10885 		    LSW(addr));
10886 		ddi_put16(ha->sbus_fpga_dev_handle,
10887 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10888 		    MSW(addr));
10889 		ddi_put16(ha->sbus_fpga_dev_handle,
10890 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10891 		    (uint16_t)data);
10892 	} else {
10893 		uint16_t bank_select;
10894 
10895 		/* Setup bit 16 of flash address. */
10896 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10897 
10898 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10899 			bank_select = (uint16_t)(bank_select & ~0xf0);
10900 			bank_select = (uint16_t)(bank_select |
10901 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10902 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10903 		} else {
10904 			if (addr & BIT_16 && !(bank_select &
10905 			    ISP_FLASH_64K_BANK)) {
10906 				bank_select = (uint16_t)(bank_select |
10907 				    ISP_FLASH_64K_BANK);
10908 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10909 			} else if (!(addr & BIT_16) && bank_select &
10910 			    ISP_FLASH_64K_BANK) {
10911 				bank_select = (uint16_t)(bank_select &
10912 				    ~ISP_FLASH_64K_BANK);
10913 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10914 			}
10915 		}
10916 
10917 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10918 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10919 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10920 		} else {
10921 			WRT16_IOMAP_REG(ha, flash_address, addr);
10922 			WRT16_IOMAP_REG(ha, flash_data, data);
10923 		}
10924 	}
10925 }
10926 
10927 /*
10928  * ql_read_flash_byte
10929  *	Reads byte from flash, but must read a word from chip.
10930  *
10931  * Input:
10932  *	ha = adapter state pointer.
10933  *	addr = flash byte address.
10934  *
10935  * Returns:
10936  *	byte from flash.
10937  *
10938  * Context:
10939  *	Kernel context.
10940  */
10941 uint8_t
10942 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
10943 {
10944 	uint8_t	data;
10945 
10946 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10947 		ddi_put16(ha->sbus_fpga_dev_handle,
10948 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10949 		    LSW(addr));
10950 		ddi_put16(ha->sbus_fpga_dev_handle,
10951 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10952 		    MSW(addr));
10953 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
10954 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
10955 	} else {
10956 		uint16_t	bank_select;
10957 
10958 		/* Setup bit 16 of flash address. */
10959 		bank_select = RD16_IO_REG(ha, ctrl_status);
10960 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10961 			bank_select = (uint16_t)(bank_select & ~0xf0);
10962 			bank_select = (uint16_t)(bank_select |
10963 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10964 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10965 		} else {
10966 			if (addr & BIT_16 &&
10967 			    !(bank_select & ISP_FLASH_64K_BANK)) {
10968 				bank_select = (uint16_t)(bank_select |
10969 				    ISP_FLASH_64K_BANK);
10970 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10971 			} else if (!(addr & BIT_16) &&
10972 			    bank_select & ISP_FLASH_64K_BANK) {
10973 				bank_select = (uint16_t)(bank_select &
10974 				    ~ISP_FLASH_64K_BANK);
10975 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10976 			}
10977 		}
10978 
10979 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10980 			WRT16_IO_REG(ha, flash_address, addr);
10981 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
10982 		} else {
10983 			WRT16_IOMAP_REG(ha, flash_address, addr);
10984 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
10985 		}
10986 	}
10987 
10988 	return (data);
10989 }
10990 
10991 /*
10992  * ql_24xx_flash_id
10993  *	Get flash IDs.
10994  *
10995  * Input:
10996  *	ha:		adapter state pointer.
10997  *
10998  * Returns:
10999  *	ql local function return status code.
11000  *
11001  * Context:
11002  *	Kernel context.
11003  */
11004 int
11005 ql_24xx_flash_id(ql_adapter_state_t *vha)
11006 {
11007 	int			rval;
11008 	uint32_t		fdata = 0;
11009 	ql_adapter_state_t	*ha = vha->pha;
11010 	ql_xioctl_t		*xp = ha->xioctl;
11011 
11012 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11013 
11014 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11015 
11016 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11017 		fdata = 0;
11018 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11019 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11020 	}
11021 
11022 	if (rval != QL_SUCCESS) {
11023 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11024 	} else if (fdata != 0) {
11025 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11026 		xp->fdesc.flash_id = MSB(LSW(fdata));
11027 		xp->fdesc.flash_len = LSB(MSW(fdata));
11028 	} else {
11029 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11030 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11031 		xp->fdesc.flash_len = 0;
11032 	}
11033 
11034 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11035 
11036 	return (rval);
11037 }
11038 
11039 /*
11040  * ql_24xx_load_flash
11041  *	Loads flash.
11042  *
11043  * Input:
11044  *	ha = adapter state pointer.
11045  *	dp = data pointer.
11046  *	size = data length in bytes.
11047  *	faddr = 32bit word flash byte address.
11048  *
11049  * Returns:
11050  *	ql local function return status code.
11051  *
11052  * Context:
11053  *	Kernel context.
11054  */
11055 int
11056 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11057     uint32_t faddr)
11058 {
11059 	int			rval;
11060 	uint32_t		cnt, rest_addr, fdata, wc;
11061 	dma_mem_t		dmabuf = {0};
11062 	ql_adapter_state_t	*ha = vha->pha;
11063 	ql_xioctl_t		*xp = ha->xioctl;
11064 
11065 	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11066 	    ha->instance, faddr, size);
11067 
11068 	/* start address must be 32 bit word aligned */
11069 	if ((faddr & 0x3) != 0) {
11070 		EL(ha, "incorrect buffer size alignment\n");
11071 		return (QL_FUNCTION_PARAMETER_ERROR);
11072 	}
11073 
11074 	/* Allocate DMA buffer */
11075 	if (CFG_IST(ha, CFG_CTRL_2581)) {
11076 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11077 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11078 		    QL_SUCCESS) {
11079 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11080 			return (rval);
11081 		}
11082 	}
11083 
11084 	GLOBAL_HW_LOCK();
11085 
11086 	/* Enable flash write */
11087 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11088 		GLOBAL_HW_UNLOCK();
11089 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11090 		ql_free_phys(ha, &dmabuf);
11091 		return (rval);
11092 	}
11093 
11094 	/* setup mask of address range within a sector */
11095 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11096 
11097 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11098 
11099 	/*
11100 	 * Write data to flash.
11101 	 */
11102 	cnt = 0;
11103 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11104 
11105 	while (cnt < size) {
11106 		/* Beginning of a sector? */
11107 		if ((faddr & rest_addr) == 0) {
11108 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
11109 				fdata = ha->flash_data_addr | faddr;
11110 				rval = ql_flash_access(ha,
11111 				    FAC_ERASE_SECTOR, fdata, fdata +
11112 				    rest_addr, 0);
11113 				if (rval != QL_SUCCESS) {
11114 					EL(ha, "erase sector status="
11115 					    "%xh, start=%xh, end=%xh"
11116 					    "\n", rval, fdata,
11117 					    fdata + rest_addr);
11118 					break;
11119 				}
11120 			} else {
11121 				fdata = (faddr & ~rest_addr) << 2;
11122 				fdata = (fdata & 0xff00) |
11123 				    (fdata << 16 & 0xff0000) |
11124 				    (fdata >> 16 & 0xff);
11125 
11126 				if (rest_addr == 0x1fff) {
11127 					/* 32kb sector block erase */
11128 					rval = ql_24xx_write_flash(ha,
11129 					    FLASH_CONF_ADDR | 0x0352,
11130 					    fdata);
11131 				} else {
11132 					/* 64kb sector block erase */
11133 					rval = ql_24xx_write_flash(ha,
11134 					    FLASH_CONF_ADDR | 0x03d8,
11135 					    fdata);
11136 				}
11137 				if (rval != QL_SUCCESS) {
11138 					EL(ha, "Unable to flash sector"
11139 					    ": address=%xh\n", faddr);
11140 					break;
11141 				}
11142 			}
11143 		}
11144 
11145 		/* Write data */
11146 		if (CFG_IST(ha, CFG_CTRL_2581) &&
11147 		    ((faddr & 0x3f) == 0)) {
11148 			/*
11149 			 * Limit write up to sector boundary.
11150 			 */
11151 			wc = ((~faddr & (rest_addr>>1)) + 1);
11152 
11153 			if (size - cnt < wc) {
11154 				wc = size - cnt;
11155 			}
11156 
11157 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11158 			    (uint8_t *)dmabuf.bp, wc<<2,
11159 			    DDI_DEV_AUTOINCR);
11160 
11161 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11162 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11163 			if (rval != QL_SUCCESS) {
11164 				EL(ha, "unable to dma to flash "
11165 				    "address=%xh\n", faddr << 2);
11166 				break;
11167 			}
11168 
11169 			cnt += wc;
11170 			faddr += wc;
11171 			dp += wc << 2;
11172 		} else {
11173 			fdata = *dp++;
11174 			fdata |= *dp++ << 8;
11175 			fdata |= *dp++ << 16;
11176 			fdata |= *dp++ << 24;
11177 			rval = ql_24xx_write_flash(ha,
11178 			    ha->flash_data_addr | faddr, fdata);
11179 			if (rval != QL_SUCCESS) {
11180 				EL(ha, "Unable to program flash "
11181 				    "address=%xh data=%xh\n", faddr,
11182 				    *dp);
11183 				break;
11184 			}
11185 			cnt++;
11186 			faddr++;
11187 
11188 			/* Allow other system activity. */
11189 			if (cnt % 0x1000 == 0) {
11190 				ql_delay(ha, 10000);
11191 			}
11192 		}
11193 	}
11194 
11195 	ql_24xx_protect_flash(ha);
11196 
11197 	ql_free_phys(ha, &dmabuf);
11198 
11199 	GLOBAL_HW_UNLOCK();
11200 
11201 	if (rval != QL_SUCCESS) {
11202 		EL(ha, "failed=%xh\n", rval);
11203 	} else {
11204 		/*EMPTY*/
11205 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11206 	}
11207 	return (rval);
11208 }
11209 
11210 /*
11211  * ql_24xx_read_flash
11212  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11213  *
11214  * Input:
11215  *	ha:	adapter state pointer.
11216  *	faddr:	NVRAM/FLASH address.
11217  *	bp:	data pointer.
11218  *
11219  * Returns:
11220  *	ql local function return status code.
11221  *
11222  * Context:
11223  *	Kernel context.
11224  */
11225 int
11226 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11227 {
11228 	uint32_t		timer;
11229 	int			rval = QL_SUCCESS;
11230 	ql_adapter_state_t	*ha = vha->pha;
11231 
11232 	/* Clear access error flag */
11233 	WRT32_IO_REG(ha, ctrl_status,
11234 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11235 
11236 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11237 
11238 	/* Wait for READ cycle to complete. */
11239 	for (timer = 300000; timer; timer--) {
11240 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11241 			break;
11242 		}
11243 		drv_usecwait(10);
11244 	}
11245 
11246 	if (timer == 0) {
11247 		EL(ha, "failed, timeout\n");
11248 		rval = QL_FUNCTION_TIMEOUT;
11249 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11250 		EL(ha, "failed, access error\n");
11251 		rval = QL_FUNCTION_FAILED;
11252 	}
11253 
11254 	*bp = RD32_IO_REG(ha, flash_data);
11255 
11256 	return (rval);
11257 }
11258 
11259 /*
11260  * ql_24xx_write_flash
11261  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11262  *
11263  * Input:
11264  *	ha:	adapter state pointer.
11265  *	addr:	NVRAM/FLASH address.
11266  *	value:	data.
11267  *
11268  * Returns:
11269  *	ql local function return status code.
11270  *
11271  * Context:
11272  *	Kernel context.
11273  */
11274 int
11275 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11276 {
11277 	uint32_t		timer, fdata;
11278 	int			rval = QL_SUCCESS;
11279 	ql_adapter_state_t	*ha = vha->pha;
11280 
11281 	/* Clear access error flag */
11282 	WRT32_IO_REG(ha, ctrl_status,
11283 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11284 
11285 	WRT32_IO_REG(ha, flash_data, data);
11286 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11287 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11288 
11289 	/* Wait for Write cycle to complete. */
11290 	for (timer = 3000000; timer; timer--) {
11291 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11292 			/* Check flash write in progress. */
11293 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11294 				(void) ql_24xx_read_flash(ha,
11295 				    FLASH_CONF_ADDR | 0x005, &fdata);
11296 				if (!(fdata & BIT_0)) {
11297 					break;
11298 				}
11299 			} else {
11300 				break;
11301 			}
11302 		}
11303 		drv_usecwait(10);
11304 	}
11305 	if (timer == 0) {
11306 		EL(ha, "failed, timeout\n");
11307 		rval = QL_FUNCTION_TIMEOUT;
11308 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11309 		EL(ha, "access error\n");
11310 		rval = QL_FUNCTION_FAILED;
11311 	}
11312 
11313 	return (rval);
11314 }
11315 /*
11316  * ql_24xx_unprotect_flash
11317  *	Enable writes
11318  *
11319  * Input:
11320  *	ha:	adapter state pointer.
11321  *
11322  * Returns:
11323  *	ql local function return status code.
11324  *
11325  * Context:
11326  *	Kernel context.
11327  */
11328 int
11329 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11330 {
11331 	int			rval;
11332 	uint32_t		fdata;
11333 	ql_adapter_state_t	*ha = vha->pha;
11334 	ql_xioctl_t		*xp = ha->xioctl;
11335 
11336 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11337 
11338 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11339 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11340 			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11341 			    0)) != QL_SUCCESS) {
11342 				EL(ha, "status=%xh\n", rval);
11343 			}
11344 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11345 			    ha->instance);
11346 			return (rval);
11347 		}
11348 	} else {
11349 		/* Enable flash write. */
11350 		WRT32_IO_REG(ha, ctrl_status,
11351 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11352 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11353 	}
11354 
11355 	/*
11356 	 * Remove block write protection (SST and ST) and
11357 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11358 	 * Unprotect sectors.
11359 	 */
11360 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11361 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11362 
11363 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11364 		for (fdata = 0; fdata < 0x10; fdata++) {
11365 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11366 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11367 		}
11368 
11369 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11370 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11371 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11372 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11373 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11374 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11375 	}
11376 
11377 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11378 
11379 	return (QL_SUCCESS);
11380 }
11381 
11382 /*
11383  * ql_24xx_protect_flash
11384  *	Disable writes
11385  *
11386  * Input:
11387  *	ha:	adapter state pointer.
11388  *
11389  * Context:
11390  *	Kernel context.
11391  */
11392 void
11393 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11394 {
11395 	int			rval;
11396 	uint32_t		fdata;
11397 	ql_adapter_state_t	*ha = vha->pha;
11398 	ql_xioctl_t		*xp = ha->xioctl;
11399 
11400 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11401 
11402 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11403 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11404 			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11405 			    0)) != QL_SUCCESS) {
11406 				EL(ha, "status=%xh\n", rval);
11407 			}
11408 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11409 			    ha->instance);
11410 			return;
11411 		}
11412 	} else {
11413 		/* Enable flash write. */
11414 		WRT32_IO_REG(ha, ctrl_status,
11415 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11416 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11417 	}
11418 
11419 	/*
11420 	 * Protect sectors.
11421 	 * Set block write protection (SST and ST) and
11422 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11423 	 */
11424 	if (xp->fdesc.protect_sector_cmd != 0) {
11425 		for (fdata = 0; fdata < 0x10; fdata++) {
11426 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11427 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11428 		}
11429 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11430 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11431 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11432 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11433 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11434 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11435 
11436 		/* TODO: ??? */
11437 		(void) ql_24xx_write_flash(ha,
11438 		    FLASH_CONF_ADDR | 0x101, 0x80);
11439 	} else {
11440 		(void) ql_24xx_write_flash(ha,
11441 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11442 	}
11443 
11444 	/* Disable flash write. */
11445 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11446 		WRT32_IO_REG(ha, ctrl_status,
11447 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11448 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11449 	}
11450 
11451 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11452 }
11453 
11454 /*
11455  * ql_dump_firmware
11456  *	Save RISC code state information.
11457  *
11458  * Input:
11459  *	ha = adapter state pointer.
11460  *
11461  * Returns:
11462  *	QL local function return status code.
11463  *
11464  * Context:
11465  *	Kernel context.
11466  */
11467 static int
11468 ql_dump_firmware(ql_adapter_state_t *vha)
11469 {
11470 	int			rval;
11471 	clock_t			timer = drv_usectohz(30000000);
11472 	ql_adapter_state_t	*ha = vha->pha;
11473 
11474 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11475 
11476 	QL_DUMP_LOCK(ha);
11477 
11478 	if (ha->ql_dump_state & QL_DUMPING ||
11479 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11480 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11481 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11482 		QL_DUMP_UNLOCK(ha);
11483 		return (QL_SUCCESS);
11484 	}
11485 
11486 	QL_DUMP_UNLOCK(ha);
11487 
11488 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11489 
11490 	/*
11491 	 * Wait for all outstanding commands to complete
11492 	 */
11493 	(void) ql_wait_outstanding(ha);
11494 
11495 	/* Dump firmware. */
11496 	rval = ql_binary_fw_dump(ha, TRUE);
11497 
11498 	/* Do abort to force restart. */
11499 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11500 	EL(ha, "restarting, isp_abort_needed\n");
11501 
11502 	/* Acquire task daemon lock. */
11503 	TASK_DAEMON_LOCK(ha);
11504 
11505 	/* Wait for suspension to end. */
11506 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11507 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11508 
11509 		/* 30 seconds from now */
11510 		if (cv_reltimedwait(&ha->cv_dr_suspended,
11511 		    &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11512 			/*
11513 			 * The timeout time 'timer' was
11514 			 * reached without the condition
11515 			 * being signaled.
11516 			 */
11517 			break;
11518 		}
11519 	}
11520 
11521 	/* Release task daemon lock. */
11522 	TASK_DAEMON_UNLOCK(ha);
11523 
11524 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11525 		/*EMPTY*/
11526 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11527 	} else {
11528 		EL(ha, "failed, rval = %xh\n", rval);
11529 	}
11530 	return (rval);
11531 }
11532 
11533 /*
11534  * ql_binary_fw_dump
11535  *	Dumps binary data from firmware.
11536  *
11537  * Input:
11538  *	ha = adapter state pointer.
11539  *	lock_needed = mailbox lock needed.
11540  *
11541  * Returns:
11542  *	ql local function return status code.
11543  *
11544  * Context:
11545  *	Interrupt or Kernel context, no mailbox commands allowed.
11546  */
11547 int
11548 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11549 {
11550 	clock_t			timer;
11551 	mbx_cmd_t		mc;
11552 	mbx_cmd_t		*mcp = &mc;
11553 	int			rval = QL_SUCCESS;
11554 	ql_adapter_state_t	*ha = vha->pha;
11555 
11556 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11557 
11558 	QL_DUMP_LOCK(ha);
11559 
11560 	if (ha->ql_dump_state & QL_DUMPING ||
11561 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11562 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11563 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11564 		QL_DUMP_UNLOCK(ha);
11565 		return (QL_DATA_EXISTS);
11566 	}
11567 
11568 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11569 	ha->ql_dump_state |= QL_DUMPING;
11570 
11571 	QL_DUMP_UNLOCK(ha);
11572 
11573 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11574 
11575 		/* Insert Time Stamp */
11576 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11577 		    FTO_INSERT_TIME_STAMP);
11578 		if (rval != QL_SUCCESS) {
11579 			EL(ha, "f/w extended trace insert"
11580 			    "time stamp failed: %xh\n", rval);
11581 		}
11582 	}
11583 
11584 	if (lock_needed == TRUE) {
11585 		/* Acquire mailbox register lock. */
11586 		MBX_REGISTER_LOCK(ha);
11587 		timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11588 		/* Check for mailbox available, if not wait for signal. */
11589 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11590 			ha->mailbox_flags = (uint8_t)
11591 			    (ha->mailbox_flags | MBX_WANT_FLG);
11592 
11593 			/* 30 seconds from now */
11594 			if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11595 			    timer, TR_CLOCK_TICK) == -1) {
11596 				/*
11597 				 * The timeout time 'timer' was
11598 				 * reached without the condition
11599 				 * being signaled.
11600 				 */
11601 
11602 				/* Release mailbox register lock. */
11603 				MBX_REGISTER_UNLOCK(ha);
11604 
11605 				EL(ha, "failed, rval = %xh\n",
11606 				    QL_FUNCTION_TIMEOUT);
11607 				return (QL_FUNCTION_TIMEOUT);
11608 			}
11609 		}
11610 
11611 		/* Set busy flag. */
11612 		ha->mailbox_flags = (uint8_t)
11613 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11614 		mcp->timeout = 120;
11615 		ha->mcp = mcp;
11616 
11617 		/* Release mailbox register lock. */
11618 		MBX_REGISTER_UNLOCK(ha);
11619 	}
11620 
11621 	/* Free previous dump buffer. */
11622 	if (ha->ql_dump_ptr != NULL) {
11623 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11624 		ha->ql_dump_ptr = NULL;
11625 	}
11626 
11627 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11628 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11629 		    ha->fw_ext_memory_size);
11630 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11631 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11632 		    ha->fw_ext_memory_size);
11633 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11634 		ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11635 		    ha->fw_ext_memory_size);
11636 	} else {
11637 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11638 	}
11639 
11640 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11641 	    NULL) {
11642 		rval = QL_MEMORY_ALLOC_FAILED;
11643 	} else {
11644 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11645 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11646 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11647 			rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11648 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11649 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11650 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11651 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11652 		} else {
11653 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11654 		}
11655 	}
11656 
11657 	/* Reset ISP chip. */
11658 	ql_reset_chip(ha);
11659 
11660 	QL_DUMP_LOCK(ha);
11661 
11662 	if (rval != QL_SUCCESS) {
11663 		if (ha->ql_dump_ptr != NULL) {
11664 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11665 			ha->ql_dump_ptr = NULL;
11666 		}
11667 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11668 		    QL_DUMP_UPLOADED);
11669 		EL(ha, "failed, rval = %xh\n", rval);
11670 	} else {
11671 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11672 		ha->ql_dump_state |= QL_DUMP_VALID;
11673 		EL(ha, "done\n");
11674 	}
11675 
11676 	QL_DUMP_UNLOCK(ha);
11677 
11678 	return (rval);
11679 }
11680 
11681 /*
11682  * ql_ascii_fw_dump
11683  *	Converts firmware binary dump to ascii.
11684  *
11685  * Input:
11686  *	ha = adapter state pointer.
11687  *	bptr = buffer pointer.
11688  *
11689  * Returns:
11690  *	Amount of data buffer used.
11691  *
11692  * Context:
11693  *	Kernel context.
11694  */
11695 size_t
11696 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11697 {
11698 	uint32_t		cnt;
11699 	caddr_t			bp;
11700 	int			mbox_cnt;
11701 	ql_adapter_state_t	*ha = vha->pha;
11702 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11703 
11704 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11705 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11706 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11707 		return (ql_2581_ascii_fw_dump(ha, bufp));
11708 	}
11709 
11710 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11711 
11712 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11713 		(void) sprintf(bufp, "\nISP 2300IP ");
11714 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11715 		(void) sprintf(bufp, "\nISP 6322FLX ");
11716 	} else {
11717 		(void) sprintf(bufp, "\nISP 2200IP ");
11718 	}
11719 
11720 	bp = bufp + strlen(bufp);
11721 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11722 	    ha->fw_major_version, ha->fw_minor_version,
11723 	    ha->fw_subminor_version);
11724 
11725 	(void) strcat(bufp, "\nPBIU Registers:");
11726 	bp = bufp + strlen(bufp);
11727 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11728 		if (cnt % 8 == 0) {
11729 			*bp++ = '\n';
11730 		}
11731 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11732 		bp = bp + 6;
11733 	}
11734 
11735 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11736 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11737 		    "registers:");
11738 		bp = bufp + strlen(bufp);
11739 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11740 			if (cnt % 8 == 0) {
11741 				*bp++ = '\n';
11742 			}
11743 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11744 			bp = bp + 6;
11745 		}
11746 	}
11747 
11748 	(void) strcat(bp, "\n\nMailbox Registers:");
11749 	bp = bufp + strlen(bufp);
11750 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11751 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11752 		if (cnt % 8 == 0) {
11753 			*bp++ = '\n';
11754 		}
11755 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11756 		bp = bp + 6;
11757 	}
11758 
11759 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11760 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11761 		bp = bufp + strlen(bufp);
11762 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11763 			if (cnt % 8 == 0) {
11764 				*bp++ = '\n';
11765 			}
11766 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11767 			bp = bp + 6;
11768 		}
11769 	}
11770 
11771 	(void) strcat(bp, "\n\nDMA Registers:");
11772 	bp = bufp + strlen(bufp);
11773 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11774 		if (cnt % 8 == 0) {
11775 			*bp++ = '\n';
11776 		}
11777 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11778 		bp = bp + 6;
11779 	}
11780 
11781 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11782 	bp = bufp + strlen(bufp);
11783 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11784 		if (cnt % 8 == 0) {
11785 			*bp++ = '\n';
11786 		}
11787 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11788 		bp = bp + 6;
11789 	}
11790 
11791 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11792 	bp = bufp + strlen(bufp);
11793 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11794 		if (cnt % 8 == 0) {
11795 			*bp++ = '\n';
11796 		}
11797 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11798 		bp = bp + 6;
11799 	}
11800 
11801 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11802 	bp = bufp + strlen(bufp);
11803 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11804 		if (cnt % 8 == 0) {
11805 			*bp++ = '\n';
11806 		}
11807 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11808 		bp = bp + 6;
11809 	}
11810 
11811 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11812 	bp = bufp + strlen(bufp);
11813 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11814 		if (cnt % 8 == 0) {
11815 			*bp++ = '\n';
11816 		}
11817 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11818 		bp = bp + 6;
11819 	}
11820 
11821 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11822 	bp = bufp + strlen(bufp);
11823 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11824 		if (cnt % 8 == 0) {
11825 			*bp++ = '\n';
11826 		}
11827 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11828 		bp = bp + 6;
11829 	}
11830 
11831 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11832 	bp = bufp + strlen(bufp);
11833 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11834 		if (cnt % 8 == 0) {
11835 			*bp++ = '\n';
11836 		}
11837 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11838 		bp = bp + 6;
11839 	}
11840 
11841 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11842 	bp = bufp + strlen(bufp);
11843 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11844 		if (cnt % 8 == 0) {
11845 			*bp++ = '\n';
11846 		}
11847 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11848 		bp = bp + 6;
11849 	}
11850 
11851 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11852 	bp = bufp + strlen(bufp);
11853 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11854 		if (cnt % 8 == 0) {
11855 			*bp++ = '\n';
11856 		}
11857 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11858 		bp = bp + 6;
11859 	}
11860 
11861 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11862 	bp = bufp + strlen(bufp);
11863 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11864 		if (cnt % 8 == 0) {
11865 			*bp++ = '\n';
11866 		}
11867 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11868 		bp = bp + 6;
11869 	}
11870 
11871 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11872 	bp = bufp + strlen(bufp);
11873 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11874 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11875 		    CFG_CTRL_6322)) == 0))) {
11876 			break;
11877 		}
11878 		if (cnt % 8 == 0) {
11879 			*bp++ = '\n';
11880 		}
11881 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11882 		bp = bp + 6;
11883 	}
11884 
11885 	(void) strcat(bp, "\n\nFPM B0 Registers:");
11886 	bp = bufp + strlen(bufp);
11887 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11888 		if (cnt % 8 == 0) {
11889 			*bp++ = '\n';
11890 		}
11891 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11892 		bp = bp + 6;
11893 	}
11894 
11895 	(void) strcat(bp, "\n\nFPM B1 Registers:");
11896 	bp = bufp + strlen(bufp);
11897 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11898 		if (cnt % 8 == 0) {
11899 			*bp++ = '\n';
11900 		}
11901 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11902 		bp = bp + 6;
11903 	}
11904 
11905 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11906 		(void) strcat(bp, "\n\nCode RAM Dump:");
11907 		bp = bufp + strlen(bufp);
11908 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11909 			if (cnt % 8 == 0) {
11910 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11911 				bp = bp + 8;
11912 			}
11913 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11914 			bp = bp + 6;
11915 		}
11916 
11917 		(void) strcat(bp, "\n\nStack RAM Dump:");
11918 		bp = bufp + strlen(bufp);
11919 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11920 			if (cnt % 8 == 0) {
11921 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11922 				bp = bp + 8;
11923 			}
11924 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11925 			bp = bp + 6;
11926 		}
11927 
11928 		(void) strcat(bp, "\n\nData RAM Dump:");
11929 		bp = bufp + strlen(bufp);
11930 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11931 			if (cnt % 8 == 0) {
11932 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11933 				bp = bp + 8;
11934 			}
11935 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11936 			bp = bp + 6;
11937 		}
11938 	} else {
11939 		(void) strcat(bp, "\n\nRISC SRAM:");
11940 		bp = bufp + strlen(bufp);
11941 		for (cnt = 0; cnt < 0xf000; cnt++) {
11942 			if (cnt % 8 == 0) {
11943 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
11944 				bp = bp + 7;
11945 			}
11946 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11947 			bp = bp + 6;
11948 		}
11949 	}
11950 
11951 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
11952 	bp += strlen(bp);
11953 
11954 	(void) sprintf(bp, "\n\nRequest Queue");
11955 	bp += strlen(bp);
11956 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
11957 		if (cnt % 8 == 0) {
11958 			(void) sprintf(bp, "\n%08x: ", cnt);
11959 			bp += strlen(bp);
11960 		}
11961 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
11962 		bp += strlen(bp);
11963 	}
11964 
11965 	(void) sprintf(bp, "\n\nResponse Queue");
11966 	bp += strlen(bp);
11967 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
11968 		if (cnt % 8 == 0) {
11969 			(void) sprintf(bp, "\n%08x: ", cnt);
11970 			bp += strlen(bp);
11971 		}
11972 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
11973 		bp += strlen(bp);
11974 	}
11975 
11976 	(void) sprintf(bp, "\n");
11977 
11978 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11979 
11980 	return (strlen(bufp));
11981 }
11982 
11983 /*
11984  * ql_24xx_ascii_fw_dump
11985  *	Converts ISP24xx firmware binary dump to ascii.
11986  *
11987  * Input:
11988  *	ha = adapter state pointer.
11989  *	bptr = buffer pointer.
11990  *
11991  * Returns:
11992  *	Amount of data buffer used.
11993  *
11994  * Context:
11995  *	Kernel context.
11996  */
11997 static size_t
11998 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
11999 {
12000 	uint32_t		cnt;
12001 	caddr_t			bp = bufp;
12002 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12003 
12004 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12005 
12006 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12007 	    ha->fw_major_version, ha->fw_minor_version,
12008 	    ha->fw_subminor_version, ha->fw_attributes);
12009 	bp += strlen(bp);
12010 
12011 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12012 
12013 	(void) strcat(bp, "\nHost Interface Registers");
12014 	bp += strlen(bp);
12015 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12016 		if (cnt % 8 == 0) {
12017 			(void) sprintf(bp++, "\n");
12018 		}
12019 
12020 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12021 		bp += 9;
12022 	}
12023 
12024 	(void) sprintf(bp, "\n\nMailbox Registers");
12025 	bp += strlen(bp);
12026 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12027 		if (cnt % 16 == 0) {
12028 			(void) sprintf(bp++, "\n");
12029 		}
12030 
12031 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12032 		bp += 5;
12033 	}
12034 
12035 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12036 	bp += strlen(bp);
12037 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12038 		if (cnt % 8 == 0) {
12039 			(void) sprintf(bp++, "\n");
12040 		}
12041 
12042 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12043 		bp += 9;
12044 	}
12045 
12046 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12047 	bp += strlen(bp);
12048 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12049 		if (cnt % 8 == 0) {
12050 			(void) sprintf(bp++, "\n");
12051 		}
12052 
12053 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12054 		bp += 9;
12055 	}
12056 
12057 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12058 	bp += strlen(bp);
12059 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12060 		if (cnt % 8 == 0) {
12061 			(void) sprintf(bp++, "\n");
12062 		}
12063 
12064 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12065 		bp += 9;
12066 	}
12067 
12068 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12069 	bp += strlen(bp);
12070 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12071 		if (cnt % 8 == 0) {
12072 			(void) sprintf(bp++, "\n");
12073 		}
12074 
12075 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12076 		bp += 9;
12077 	}
12078 
12079 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12080 	bp += strlen(bp);
12081 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12082 		if (cnt % 8 == 0) {
12083 			(void) sprintf(bp++, "\n");
12084 		}
12085 
12086 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12087 		bp += 9;
12088 	}
12089 
12090 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12091 	bp += strlen(bp);
12092 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12093 		if (cnt % 8 == 0) {
12094 			(void) sprintf(bp++, "\n");
12095 		}
12096 
12097 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12098 		bp += 9;
12099 	}
12100 
12101 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12102 	bp += strlen(bp);
12103 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12104 		if (cnt % 8 == 0) {
12105 			(void) sprintf(bp++, "\n");
12106 		}
12107 
12108 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12109 		bp += 9;
12110 	}
12111 
12112 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12113 	bp += strlen(bp);
12114 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12115 		if (cnt % 8 == 0) {
12116 			(void) sprintf(bp++, "\n");
12117 		}
12118 
12119 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12120 		bp += 9;
12121 	}
12122 
12123 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12124 	bp += strlen(bp);
12125 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12126 		if (cnt % 8 == 0) {
12127 			(void) sprintf(bp++, "\n");
12128 		}
12129 
12130 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12131 		bp += 9;
12132 	}
12133 
12134 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12135 	bp += strlen(bp);
12136 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12137 		if (cnt % 8 == 0) {
12138 			(void) sprintf(bp++, "\n");
12139 		}
12140 
12141 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12142 		bp += 9;
12143 	}
12144 
12145 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12146 	bp += strlen(bp);
12147 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12148 		if (cnt % 8 == 0) {
12149 			(void) sprintf(bp++, "\n");
12150 		}
12151 
12152 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12153 		bp += 9;
12154 	}
12155 
12156 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12157 	bp += strlen(bp);
12158 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12159 		if (cnt % 8 == 0) {
12160 			(void) sprintf(bp++, "\n");
12161 		}
12162 
12163 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12164 		bp += 9;
12165 	}
12166 
12167 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12168 	bp += strlen(bp);
12169 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12170 		if (cnt % 8 == 0) {
12171 			(void) sprintf(bp++, "\n");
12172 		}
12173 
12174 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12175 		bp += 9;
12176 	}
12177 
12178 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12179 	bp += strlen(bp);
12180 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12181 		if (cnt % 8 == 0) {
12182 			(void) sprintf(bp++, "\n");
12183 		}
12184 
12185 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12186 		bp += 9;
12187 	}
12188 
12189 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12190 	bp += strlen(bp);
12191 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12192 		if (cnt % 8 == 0) {
12193 			(void) sprintf(bp++, "\n");
12194 		}
12195 
12196 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12197 		bp += 9;
12198 	}
12199 
12200 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12201 	bp += strlen(bp);
12202 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12203 		if (cnt % 8 == 0) {
12204 			(void) sprintf(bp++, "\n");
12205 		}
12206 
12207 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12208 		bp += 9;
12209 	}
12210 
12211 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12212 	bp += strlen(bp);
12213 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12214 		if (cnt % 8 == 0) {
12215 			(void) sprintf(bp++, "\n");
12216 		}
12217 
12218 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12219 		bp += 9;
12220 	}
12221 
12222 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12223 	bp += strlen(bp);
12224 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12225 		if (cnt % 8 == 0) {
12226 			(void) sprintf(bp++, "\n");
12227 		}
12228 
12229 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12230 		bp += 9;
12231 	}
12232 
12233 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12234 	bp += strlen(bp);
12235 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12236 		if (cnt % 8 == 0) {
12237 			(void) sprintf(bp++, "\n");
12238 		}
12239 
12240 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12241 		bp += 9;
12242 	}
12243 
12244 	(void) sprintf(bp, "\n\nRISC GP Registers");
12245 	bp += strlen(bp);
12246 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12247 		if (cnt % 8 == 0) {
12248 			(void) sprintf(bp++, "\n");
12249 		}
12250 
12251 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12252 		bp += 9;
12253 	}
12254 
12255 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12256 	bp += strlen(bp);
12257 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12258 		if (cnt % 8 == 0) {
12259 			(void) sprintf(bp++, "\n");
12260 		}
12261 
12262 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12263 		bp += 9;
12264 	}
12265 
12266 	(void) sprintf(bp, "\n\nLMC Registers");
12267 	bp += strlen(bp);
12268 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12269 		if (cnt % 8 == 0) {
12270 			(void) sprintf(bp++, "\n");
12271 		}
12272 
12273 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12274 		bp += 9;
12275 	}
12276 
12277 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12278 	bp += strlen(bp);
12279 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12280 		if (cnt % 8 == 0) {
12281 			(void) sprintf(bp++, "\n");
12282 		}
12283 
12284 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12285 		bp += 9;
12286 	}
12287 
12288 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12289 	bp += strlen(bp);
12290 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12291 		if (cnt % 8 == 0) {
12292 			(void) sprintf(bp++, "\n");
12293 		}
12294 
12295 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12296 		bp += 9;
12297 	}
12298 
12299 	(void) sprintf(bp, "\n\nCode RAM");
12300 	bp += strlen(bp);
12301 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12302 		if (cnt % 8 == 0) {
12303 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12304 			bp += 11;
12305 		}
12306 
12307 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12308 		bp += 9;
12309 	}
12310 
12311 	(void) sprintf(bp, "\n\nExternal Memory");
12312 	bp += strlen(bp);
12313 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12314 		if (cnt % 8 == 0) {
12315 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12316 			bp += 11;
12317 		}
12318 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12319 		bp += 9;
12320 	}
12321 
12322 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12323 	bp += strlen(bp);
12324 
12325 	(void) sprintf(bp, "\n\nRequest Queue");
12326 	bp += strlen(bp);
12327 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12328 		if (cnt % 8 == 0) {
12329 			(void) sprintf(bp, "\n%08x: ", cnt);
12330 			bp += strlen(bp);
12331 		}
12332 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12333 		bp += strlen(bp);
12334 	}
12335 
12336 	(void) sprintf(bp, "\n\nResponse Queue");
12337 	bp += strlen(bp);
12338 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12339 		if (cnt % 8 == 0) {
12340 			(void) sprintf(bp, "\n%08x: ", cnt);
12341 			bp += strlen(bp);
12342 		}
12343 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12344 		bp += strlen(bp);
12345 	}
12346 
12347 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12348 	    (ha->fwexttracebuf.bp != NULL)) {
12349 		uint32_t cnt_b = 0;
12350 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12351 
12352 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12353 		bp += strlen(bp);
12354 		/* show data address as a byte address, data as long words */
12355 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12356 			cnt_b = cnt * 4;
12357 			if (cnt_b % 32 == 0) {
12358 				(void) sprintf(bp, "\n%08x: ",
12359 				    (int)(w64 + cnt_b));
12360 				bp += 11;
12361 			}
12362 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12363 			bp += 9;
12364 		}
12365 	}
12366 
12367 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12368 	    (ha->fwfcetracebuf.bp != NULL)) {
12369 		uint32_t cnt_b = 0;
12370 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12371 
12372 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12373 		bp += strlen(bp);
12374 		/* show data address as a byte address, data as long words */
12375 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12376 			cnt_b = cnt * 4;
12377 			if (cnt_b % 32 == 0) {
12378 				(void) sprintf(bp, "\n%08x: ",
12379 				    (int)(w64 + cnt_b));
12380 				bp += 11;
12381 			}
12382 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12383 			bp += 9;
12384 		}
12385 	}
12386 
12387 	(void) sprintf(bp, "\n\n");
12388 	bp += strlen(bp);
12389 
12390 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12391 
12392 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12393 
12394 	return (cnt);
12395 }
12396 
12397 /*
12398  * ql_2581_ascii_fw_dump
12399  *	Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12400  *
12401  * Input:
12402  *	ha = adapter state pointer.
12403  *	bptr = buffer pointer.
12404  *
12405  * Returns:
12406  *	Amount of data buffer used.
12407  *
12408  * Context:
12409  *	Kernel context.
12410  */
12411 static size_t
12412 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12413 {
12414 	uint32_t		cnt;
12415 	uint32_t		cnt1;
12416 	caddr_t			bp = bufp;
12417 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12418 
12419 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12420 
12421 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12422 	    ha->fw_major_version, ha->fw_minor_version,
12423 	    ha->fw_subminor_version, ha->fw_attributes);
12424 	bp += strlen(bp);
12425 
12426 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12427 	bp += strlen(bp);
12428 
12429 	(void) sprintf(bp, "\nHostRisc Registers");
12430 	bp += strlen(bp);
12431 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12432 		if (cnt % 8 == 0) {
12433 			(void) sprintf(bp++, "\n");
12434 		}
12435 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12436 		bp += 9;
12437 	}
12438 
12439 	(void) sprintf(bp, "\n\nPCIe Registers");
12440 	bp += strlen(bp);
12441 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12442 		if (cnt % 8 == 0) {
12443 			(void) sprintf(bp++, "\n");
12444 		}
12445 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12446 		bp += 9;
12447 	}
12448 
12449 	(void) strcat(bp, "\n\nHost Interface Registers");
12450 	bp += strlen(bp);
12451 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12452 		if (cnt % 8 == 0) {
12453 			(void) sprintf(bp++, "\n");
12454 		}
12455 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12456 		bp += 9;
12457 	}
12458 
12459 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12460 	bp += strlen(bp);
12461 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12462 		if (cnt % 8 == 0) {
12463 			(void) sprintf(bp++, "\n");
12464 		}
12465 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12466 		bp += 9;
12467 	}
12468 
12469 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12470 	    fw->risc_io);
12471 	bp += strlen(bp);
12472 
12473 	(void) sprintf(bp, "\n\nMailbox Registers");
12474 	bp += strlen(bp);
12475 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12476 		if (cnt % 16 == 0) {
12477 			(void) sprintf(bp++, "\n");
12478 		}
12479 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12480 		bp += 5;
12481 	}
12482 
12483 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12484 	bp += strlen(bp);
12485 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12486 		if (cnt % 8 == 0) {
12487 			(void) sprintf(bp++, "\n");
12488 		}
12489 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12490 		bp += 9;
12491 	}
12492 
12493 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12494 	bp += strlen(bp);
12495 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12496 		if (cnt % 8 == 0) {
12497 			(void) sprintf(bp++, "\n");
12498 		}
12499 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12500 		bp += 9;
12501 	}
12502 
12503 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12504 	bp += strlen(bp);
12505 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12506 		if (cnt % 8 == 0) {
12507 			(void) sprintf(bp++, "\n");
12508 		}
12509 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12510 		bp += 9;
12511 	}
12512 
12513 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12514 	bp += strlen(bp);
12515 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12516 		if (cnt % 8 == 0) {
12517 			(void) sprintf(bp++, "\n");
12518 		}
12519 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12520 		bp += 9;
12521 	}
12522 
12523 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12524 	bp += strlen(bp);
12525 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12526 		if (cnt % 8 == 0) {
12527 			(void) sprintf(bp++, "\n");
12528 		}
12529 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12530 		bp += 9;
12531 	}
12532 
12533 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12534 	bp += strlen(bp);
12535 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12536 		if (cnt % 8 == 0) {
12537 			(void) sprintf(bp++, "\n");
12538 		}
12539 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12540 		bp += 9;
12541 	}
12542 
12543 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12544 	bp += strlen(bp);
12545 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12546 		if (cnt % 8 == 0) {
12547 			(void) sprintf(bp++, "\n");
12548 		}
12549 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12550 		bp += 9;
12551 	}
12552 
12553 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12554 	bp += strlen(bp);
12555 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12556 		if (cnt % 8 == 0) {
12557 			(void) sprintf(bp++, "\n");
12558 		}
12559 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12560 		bp += 9;
12561 	}
12562 
12563 	(void) sprintf(bp, "\n\nASEQ-0 Registers");
12564 	bp += strlen(bp);
12565 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12566 		if (cnt % 8 == 0) {
12567 			(void) sprintf(bp++, "\n");
12568 		}
12569 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12570 		bp += 9;
12571 	}
12572 
12573 	(void) sprintf(bp, "\n\nASEQ-1 Registers");
12574 	bp += strlen(bp);
12575 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12576 		if (cnt % 8 == 0) {
12577 			(void) sprintf(bp++, "\n");
12578 		}
12579 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12580 		bp += 9;
12581 	}
12582 
12583 	(void) sprintf(bp, "\n\nASEQ-2 Registers");
12584 	bp += strlen(bp);
12585 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12586 		if (cnt % 8 == 0) {
12587 			(void) sprintf(bp++, "\n");
12588 		}
12589 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12590 		bp += 9;
12591 	}
12592 
12593 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12594 	bp += strlen(bp);
12595 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12596 		if (cnt % 8 == 0) {
12597 			(void) sprintf(bp++, "\n");
12598 		}
12599 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12600 		bp += 9;
12601 	}
12602 
12603 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12604 	bp += strlen(bp);
12605 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12606 		if (cnt % 8 == 0) {
12607 			(void) sprintf(bp++, "\n");
12608 		}
12609 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12610 		bp += 9;
12611 	}
12612 
12613 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12614 	bp += strlen(bp);
12615 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12616 		if (cnt % 8 == 0) {
12617 			(void) sprintf(bp++, "\n");
12618 		}
12619 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12620 		bp += 9;
12621 	}
12622 
12623 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12624 	bp += strlen(bp);
12625 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12626 		if (cnt % 8 == 0) {
12627 			(void) sprintf(bp++, "\n");
12628 		}
12629 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12630 		bp += 9;
12631 	}
12632 
12633 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12634 	bp += strlen(bp);
12635 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12636 		if (cnt % 8 == 0) {
12637 			(void) sprintf(bp++, "\n");
12638 		}
12639 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12640 		bp += 9;
12641 	}
12642 
12643 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12644 	bp += strlen(bp);
12645 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12646 		if (cnt % 8 == 0) {
12647 			(void) sprintf(bp++, "\n");
12648 		}
12649 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12650 		bp += 9;
12651 	}
12652 
12653 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12654 	bp += strlen(bp);
12655 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12656 		if (cnt % 8 == 0) {
12657 			(void) sprintf(bp++, "\n");
12658 		}
12659 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12660 		bp += 9;
12661 	}
12662 
12663 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12664 	bp += strlen(bp);
12665 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12666 		if (cnt % 8 == 0) {
12667 			(void) sprintf(bp++, "\n");
12668 		}
12669 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12670 		bp += 9;
12671 	}
12672 
12673 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12674 	bp += strlen(bp);
12675 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12676 		if (cnt % 8 == 0) {
12677 			(void) sprintf(bp++, "\n");
12678 		}
12679 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12680 		bp += 9;
12681 	}
12682 
12683 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12684 	bp += strlen(bp);
12685 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12686 		if (cnt % 8 == 0) {
12687 			(void) sprintf(bp++, "\n");
12688 		}
12689 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12690 		bp += 9;
12691 	}
12692 
12693 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12694 	bp += strlen(bp);
12695 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12696 		if (cnt % 8 == 0) {
12697 			(void) sprintf(bp++, "\n");
12698 		}
12699 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12700 		bp += 9;
12701 	}
12702 
12703 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12704 	bp += strlen(bp);
12705 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12706 		if (cnt % 8 == 0) {
12707 			(void) sprintf(bp++, "\n");
12708 		}
12709 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12710 		bp += 9;
12711 	}
12712 
12713 	(void) sprintf(bp, "\n\nRISC GP Registers");
12714 	bp += strlen(bp);
12715 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12716 		if (cnt % 8 == 0) {
12717 			(void) sprintf(bp++, "\n");
12718 		}
12719 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12720 		bp += 9;
12721 	}
12722 
12723 	(void) sprintf(bp, "\n\nLMC Registers");
12724 	bp += strlen(bp);
12725 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12726 		if (cnt % 8 == 0) {
12727 			(void) sprintf(bp++, "\n");
12728 		}
12729 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12730 		bp += 9;
12731 	}
12732 
12733 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12734 	bp += strlen(bp);
12735 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
12736 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
12737 	    (uint32_t)(sizeof (fw->fpm_hdw_reg));
12738 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
12739 		if (cnt % 8 == 0) {
12740 			(void) sprintf(bp++, "\n");
12741 		}
12742 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12743 		bp += 9;
12744 	}
12745 
12746 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12747 	bp += strlen(bp);
12748 	cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
12749 	    (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
12750 	    (uint32_t)(sizeof (fw->fb_hdw_reg));
12751 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
12752 		if (cnt % 8 == 0) {
12753 			(void) sprintf(bp++, "\n");
12754 		}
12755 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12756 		bp += 9;
12757 	}
12758 
12759 	(void) sprintf(bp, "\n\nCode RAM");
12760 	bp += strlen(bp);
12761 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12762 		if (cnt % 8 == 0) {
12763 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12764 			bp += 11;
12765 		}
12766 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12767 		bp += 9;
12768 	}
12769 
12770 	(void) sprintf(bp, "\n\nExternal Memory");
12771 	bp += strlen(bp);
12772 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12773 		if (cnt % 8 == 0) {
12774 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12775 			bp += 11;
12776 		}
12777 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12778 		bp += 9;
12779 	}
12780 
12781 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12782 	bp += strlen(bp);
12783 
12784 	(void) sprintf(bp, "\n\nRequest Queue");
12785 	bp += strlen(bp);
12786 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12787 		if (cnt % 8 == 0) {
12788 			(void) sprintf(bp, "\n%08x: ", cnt);
12789 			bp += strlen(bp);
12790 		}
12791 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12792 		bp += strlen(bp);
12793 	}
12794 
12795 	(void) sprintf(bp, "\n\nResponse Queue");
12796 	bp += strlen(bp);
12797 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12798 		if (cnt % 8 == 0) {
12799 			(void) sprintf(bp, "\n%08x: ", cnt);
12800 			bp += strlen(bp);
12801 		}
12802 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12803 		bp += strlen(bp);
12804 	}
12805 
12806 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12807 	    (ha->fwexttracebuf.bp != NULL)) {
12808 		uint32_t cnt_b = 0;
12809 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12810 
12811 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12812 		bp += strlen(bp);
12813 		/* show data address as a byte address, data as long words */
12814 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12815 			cnt_b = cnt * 4;
12816 			if (cnt_b % 32 == 0) {
12817 				(void) sprintf(bp, "\n%08x: ",
12818 				    (int)(w64 + cnt_b));
12819 				bp += 11;
12820 			}
12821 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12822 			bp += 9;
12823 		}
12824 	}
12825 
12826 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12827 	    (ha->fwfcetracebuf.bp != NULL)) {
12828 		uint32_t cnt_b = 0;
12829 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12830 
12831 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12832 		bp += strlen(bp);
12833 		/* show data address as a byte address, data as long words */
12834 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12835 			cnt_b = cnt * 4;
12836 			if (cnt_b % 32 == 0) {
12837 				(void) sprintf(bp, "\n%08x: ",
12838 				    (int)(w64 + cnt_b));
12839 				bp += 11;
12840 			}
12841 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12842 			bp += 9;
12843 		}
12844 	}
12845 
12846 	(void) sprintf(bp, "\n\n");
12847 	bp += strlen(bp);
12848 
12849 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12850 
12851 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12852 
12853 	return (cnt);
12854 }
12855 
12856 /*
12857  * ql_2200_binary_fw_dump
12858  *
12859  * Input:
12860  *	ha:	adapter state pointer.
12861  *	fw:	firmware dump context pointer.
12862  *
12863  * Returns:
12864  *	ql local function return status code.
12865  *
12866  * Context:
12867  *	Interrupt or Kernel context, no mailbox commands allowed.
12868  */
12869 static int
12870 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12871 {
12872 	uint32_t	cnt;
12873 	uint16_t	risc_address;
12874 	clock_t		timer;
12875 	mbx_cmd_t	mc;
12876 	mbx_cmd_t	*mcp = &mc;
12877 	int		rval = QL_SUCCESS;
12878 
12879 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12880 
12881 	/* Disable ISP interrupts. */
12882 	WRT16_IO_REG(ha, ictrl, 0);
12883 	ADAPTER_STATE_LOCK(ha);
12884 	ha->flags &= ~INTERRUPTS_ENABLED;
12885 	ADAPTER_STATE_UNLOCK(ha);
12886 
12887 	/* Release mailbox registers. */
12888 	WRT16_IO_REG(ha, semaphore, 0);
12889 
12890 	/* Pause RISC. */
12891 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12892 	timer = 30000;
12893 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12894 		if (timer-- != 0) {
12895 			drv_usecwait(MILLISEC);
12896 		} else {
12897 			rval = QL_FUNCTION_TIMEOUT;
12898 			break;
12899 		}
12900 	}
12901 
12902 	if (rval == QL_SUCCESS) {
12903 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12904 		    sizeof (fw->pbiu_reg) / 2, 16);
12905 
12906 		/* In 2200 we only read 8 mailboxes */
12907 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12908 		    8, 16);
12909 
12910 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12911 		    sizeof (fw->dma_reg) / 2, 16);
12912 
12913 		WRT16_IO_REG(ha, ctrl_status, 0);
12914 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12915 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12916 
12917 		WRT16_IO_REG(ha, pcr, 0x2000);
12918 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12919 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12920 
12921 		WRT16_IO_REG(ha, pcr, 0x2100);
12922 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12923 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12924 
12925 		WRT16_IO_REG(ha, pcr, 0x2200);
12926 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12927 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12928 
12929 		WRT16_IO_REG(ha, pcr, 0x2300);
12930 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12931 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12932 
12933 		WRT16_IO_REG(ha, pcr, 0x2400);
12934 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12935 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12936 
12937 		WRT16_IO_REG(ha, pcr, 0x2500);
12938 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12939 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12940 
12941 		WRT16_IO_REG(ha, pcr, 0x2600);
12942 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12943 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12944 
12945 		WRT16_IO_REG(ha, pcr, 0x2700);
12946 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12947 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12948 
12949 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12950 		/* 2200 has only 16 registers */
12951 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12952 		    ha->iobase + 0x80, 16, 16);
12953 
12954 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12955 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12956 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12957 
12958 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12959 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12960 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12961 
12962 		/* Select FPM registers. */
12963 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12964 
12965 		/* FPM Soft Reset. */
12966 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12967 
12968 		/* Select frame buffer registers. */
12969 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12970 
12971 		/* Reset frame buffer FIFOs. */
12972 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12973 
12974 		/* Select RISC module registers. */
12975 		WRT16_IO_REG(ha, ctrl_status, 0);
12976 
12977 		/* Reset RISC module. */
12978 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
12979 
12980 		/* Reset ISP semaphore. */
12981 		WRT16_IO_REG(ha, semaphore, 0);
12982 
12983 		/* Release RISC module. */
12984 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12985 
12986 		/* Wait for RISC to recover from reset. */
12987 		timer = 30000;
12988 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
12989 			if (timer-- != 0) {
12990 				drv_usecwait(MILLISEC);
12991 			} else {
12992 				rval = QL_FUNCTION_TIMEOUT;
12993 				break;
12994 			}
12995 		}
12996 
12997 		/* Disable RISC pause on FPM parity error. */
12998 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
12999 	}
13000 
13001 	if (rval == QL_SUCCESS) {
13002 		/* Pause RISC. */
13003 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13004 		timer = 30000;
13005 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13006 			if (timer-- != 0) {
13007 				drv_usecwait(MILLISEC);
13008 			} else {
13009 				rval = QL_FUNCTION_TIMEOUT;
13010 				break;
13011 			}
13012 		}
13013 	}
13014 
13015 	if (rval == QL_SUCCESS) {
13016 		/* Set memory configuration and timing. */
13017 		WRT16_IO_REG(ha, mctr, 0xf2);
13018 
13019 		/* Release RISC. */
13020 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13021 
13022 		/* Get RISC SRAM. */
13023 		risc_address = 0x1000;
13024 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
13025 		for (cnt = 0; cnt < 0xf000; cnt++) {
13026 			WRT16_IO_REG(ha, mailbox[1], risc_address++);
13027 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13028 			for (timer = 6000000; timer != 0; timer--) {
13029 				/* Check for pending interrupts. */
13030 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
13031 					if (RD16_IO_REG(ha, semaphore) &
13032 					    BIT_0) {
13033 						WRT16_IO_REG(ha, hccr,
13034 						    HC_CLR_RISC_INT);
13035 						mcp->mb[0] = RD16_IO_REG(ha,
13036 						    mailbox[0]);
13037 						fw->risc_ram[cnt] =
13038 						    RD16_IO_REG(ha,
13039 						    mailbox[2]);
13040 						WRT16_IO_REG(ha,
13041 						    semaphore, 0);
13042 						break;
13043 					}
13044 					WRT16_IO_REG(ha, hccr,
13045 					    HC_CLR_RISC_INT);
13046 				}
13047 				drv_usecwait(5);
13048 			}
13049 
13050 			if (timer == 0) {
13051 				rval = QL_FUNCTION_TIMEOUT;
13052 			} else {
13053 				rval = mcp->mb[0];
13054 			}
13055 
13056 			if (rval != QL_SUCCESS) {
13057 				break;
13058 			}
13059 		}
13060 	}
13061 
13062 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13063 
13064 	return (rval);
13065 }
13066 
13067 /*
13068  * ql_2300_binary_fw_dump
13069  *
13070  * Input:
13071  *	ha:	adapter state pointer.
13072  *	fw:	firmware dump context pointer.
13073  *
13074  * Returns:
13075  *	ql local function return status code.
13076  *
13077  * Context:
13078  *	Interrupt or Kernel context, no mailbox commands allowed.
13079  */
13080 static int
13081 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13082 {
13083 	clock_t	timer;
13084 	int	rval = QL_SUCCESS;
13085 
13086 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13087 
13088 	/* Disable ISP interrupts. */
13089 	WRT16_IO_REG(ha, ictrl, 0);
13090 	ADAPTER_STATE_LOCK(ha);
13091 	ha->flags &= ~INTERRUPTS_ENABLED;
13092 	ADAPTER_STATE_UNLOCK(ha);
13093 
13094 	/* Release mailbox registers. */
13095 	WRT16_IO_REG(ha, semaphore, 0);
13096 
13097 	/* Pause RISC. */
13098 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13099 	timer = 30000;
13100 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13101 		if (timer-- != 0) {
13102 			drv_usecwait(MILLISEC);
13103 		} else {
13104 			rval = QL_FUNCTION_TIMEOUT;
13105 			break;
13106 		}
13107 	}
13108 
13109 	if (rval == QL_SUCCESS) {
13110 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13111 		    sizeof (fw->pbiu_reg) / 2, 16);
13112 
13113 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13114 		    sizeof (fw->risc_host_reg) / 2, 16);
13115 
13116 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13117 		    sizeof (fw->mailbox_reg) / 2, 16);
13118 
13119 		WRT16_IO_REG(ha, ctrl_status, 0x40);
13120 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13121 		    sizeof (fw->resp_dma_reg) / 2, 16);
13122 
13123 		WRT16_IO_REG(ha, ctrl_status, 0x50);
13124 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13125 		    sizeof (fw->dma_reg) / 2, 16);
13126 
13127 		WRT16_IO_REG(ha, ctrl_status, 0);
13128 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13129 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13130 
13131 		WRT16_IO_REG(ha, pcr, 0x2000);
13132 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13133 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13134 
13135 		WRT16_IO_REG(ha, pcr, 0x2200);
13136 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13137 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13138 
13139 		WRT16_IO_REG(ha, pcr, 0x2400);
13140 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13141 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13142 
13143 		WRT16_IO_REG(ha, pcr, 0x2600);
13144 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13145 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13146 
13147 		WRT16_IO_REG(ha, pcr, 0x2800);
13148 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13149 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13150 
13151 		WRT16_IO_REG(ha, pcr, 0x2A00);
13152 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13153 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13154 
13155 		WRT16_IO_REG(ha, pcr, 0x2C00);
13156 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13157 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13158 
13159 		WRT16_IO_REG(ha, pcr, 0x2E00);
13160 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13161 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13162 
13163 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13164 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13165 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13166 
13167 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13168 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13169 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13170 
13171 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13172 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13173 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13174 
13175 		/* Select FPM registers. */
13176 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13177 
13178 		/* FPM Soft Reset. */
13179 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13180 
13181 		/* Select frame buffer registers. */
13182 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13183 
13184 		/* Reset frame buffer FIFOs. */
13185 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13186 
13187 		/* Select RISC module registers. */
13188 		WRT16_IO_REG(ha, ctrl_status, 0);
13189 
13190 		/* Reset RISC module. */
13191 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13192 
13193 		/* Reset ISP semaphore. */
13194 		WRT16_IO_REG(ha, semaphore, 0);
13195 
13196 		/* Release RISC module. */
13197 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13198 
13199 		/* Wait for RISC to recover from reset. */
13200 		timer = 30000;
13201 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13202 			if (timer-- != 0) {
13203 				drv_usecwait(MILLISEC);
13204 			} else {
13205 				rval = QL_FUNCTION_TIMEOUT;
13206 				break;
13207 			}
13208 		}
13209 
13210 		/* Disable RISC pause on FPM parity error. */
13211 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13212 	}
13213 
13214 	/* Get RISC SRAM. */
13215 	if (rval == QL_SUCCESS) {
13216 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13217 	}
13218 	/* Get STACK SRAM. */
13219 	if (rval == QL_SUCCESS) {
13220 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13221 	}
13222 	/* Get DATA SRAM. */
13223 	if (rval == QL_SUCCESS) {
13224 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13225 	}
13226 
13227 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13228 
13229 	return (rval);
13230 }
13231 
13232 /*
13233  * ql_24xx_binary_fw_dump
13234  *
13235  * Input:
13236  *	ha:	adapter state pointer.
13237  *	fw:	firmware dump context pointer.
13238  *
13239  * Returns:
13240  *	ql local function return status code.
13241  *
13242  * Context:
13243  *	Interrupt or Kernel context, no mailbox commands allowed.
13244  */
13245 static int
13246 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13247 {
13248 	uint32_t	*reg32;
13249 	void		*bp;
13250 	clock_t		timer;
13251 	int		rval = QL_SUCCESS;
13252 
13253 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13254 
13255 	fw->hccr = RD32_IO_REG(ha, hccr);
13256 
13257 	/* Pause RISC. */
13258 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13259 		/* Disable ISP interrupts. */
13260 		WRT16_IO_REG(ha, ictrl, 0);
13261 
13262 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13263 		for (timer = 30000;
13264 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13265 		    rval == QL_SUCCESS; timer--) {
13266 			if (timer) {
13267 				drv_usecwait(100);
13268 			} else {
13269 				rval = QL_FUNCTION_TIMEOUT;
13270 			}
13271 		}
13272 	}
13273 
13274 	if (rval == QL_SUCCESS) {
13275 		/* Host interface registers. */
13276 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13277 		    sizeof (fw->host_reg) / 4, 32);
13278 
13279 		/* Disable ISP interrupts. */
13280 		WRT32_IO_REG(ha, ictrl, 0);
13281 		RD32_IO_REG(ha, ictrl);
13282 		ADAPTER_STATE_LOCK(ha);
13283 		ha->flags &= ~INTERRUPTS_ENABLED;
13284 		ADAPTER_STATE_UNLOCK(ha);
13285 
13286 		/* Shadow registers. */
13287 
13288 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13289 		RD32_IO_REG(ha, io_base_addr);
13290 
13291 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13292 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13293 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13294 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13295 
13296 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13297 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13298 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13299 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13300 
13301 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13302 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13303 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13304 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13305 
13306 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13307 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13308 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13309 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13310 
13311 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13312 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13313 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13314 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13315 
13316 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13317 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13318 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13319 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13320 
13321 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13322 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13323 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13324 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13325 
13326 		/* Mailbox registers. */
13327 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13328 		    sizeof (fw->mailbox_reg) / 2, 16);
13329 
13330 		/* Transfer sequence registers. */
13331 
13332 		/* XSEQ GP */
13333 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13334 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13335 		    16, 32);
13336 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13337 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13338 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13339 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13340 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13341 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13342 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13343 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13344 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13345 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13346 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13347 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13348 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13349 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13350 
13351 		/* XSEQ-0 */
13352 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13353 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13354 		    sizeof (fw->xseq_0_reg) / 4, 32);
13355 
13356 		/* XSEQ-1 */
13357 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13358 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13359 		    sizeof (fw->xseq_1_reg) / 4, 32);
13360 
13361 		/* Receive sequence registers. */
13362 
13363 		/* RSEQ GP */
13364 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13365 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13366 		    16, 32);
13367 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13368 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13369 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13370 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13371 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13372 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13373 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13374 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13375 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13376 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13377 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13378 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13379 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13380 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13381 
13382 		/* RSEQ-0 */
13383 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13384 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13385 		    sizeof (fw->rseq_0_reg) / 4, 32);
13386 
13387 		/* RSEQ-1 */
13388 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13389 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13390 		    sizeof (fw->rseq_1_reg) / 4, 32);
13391 
13392 		/* RSEQ-2 */
13393 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13394 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13395 		    sizeof (fw->rseq_2_reg) / 4, 32);
13396 
13397 		/* Command DMA registers. */
13398 
13399 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13400 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13401 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13402 
13403 		/* Queues. */
13404 
13405 		/* RequestQ0 */
13406 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13407 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13408 		    8, 32);
13409 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13410 
13411 		/* ResponseQ0 */
13412 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13413 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13414 		    8, 32);
13415 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13416 
13417 		/* RequestQ1 */
13418 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13419 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13420 		    8, 32);
13421 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13422 
13423 		/* Transmit DMA registers. */
13424 
13425 		/* XMT0 */
13426 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13427 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13428 		    16, 32);
13429 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13430 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13431 
13432 		/* XMT1 */
13433 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13434 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13435 		    16, 32);
13436 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13437 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13438 
13439 		/* XMT2 */
13440 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13441 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13442 		    16, 32);
13443 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13444 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13445 
13446 		/* XMT3 */
13447 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13448 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13449 		    16, 32);
13450 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13451 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13452 
13453 		/* XMT4 */
13454 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13455 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13456 		    16, 32);
13457 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13458 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13459 
13460 		/* XMT Common */
13461 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13462 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13463 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13464 
13465 		/* Receive DMA registers. */
13466 
13467 		/* RCVThread0 */
13468 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13469 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13470 		    ha->iobase + 0xC0, 16, 32);
13471 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13472 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13473 
13474 		/* RCVThread1 */
13475 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13476 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13477 		    ha->iobase + 0xC0, 16, 32);
13478 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13479 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13480 
13481 		/* RISC registers. */
13482 
13483 		/* RISC GP */
13484 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13485 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13486 		    16, 32);
13487 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13488 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13489 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13490 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13491 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13492 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13493 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13494 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13495 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13496 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13497 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13498 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13499 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13500 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13501 
13502 		/* Local memory controller registers. */
13503 
13504 		/* LMC */
13505 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13506 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13507 		    16, 32);
13508 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13509 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13510 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13511 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13512 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13513 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13514 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13515 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13516 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13517 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13518 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13519 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13520 
13521 		/* Fibre Protocol Module registers. */
13522 
13523 		/* FPM hardware */
13524 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13525 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13526 		    16, 32);
13527 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13528 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13529 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13530 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13531 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13532 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13533 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13534 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13535 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13536 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13537 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13538 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13539 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13540 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13541 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13542 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13543 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13544 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13545 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13546 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13547 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13548 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13549 
13550 		/* Frame Buffer registers. */
13551 
13552 		/* FB hardware */
13553 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13554 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13555 		    16, 32);
13556 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13557 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13558 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13559 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13560 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13561 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13562 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13563 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13564 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13565 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13566 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13567 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13568 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13569 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13570 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13571 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13572 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13573 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13574 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13575 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13576 	}
13577 
13578 	/* Get the request queue */
13579 	if (rval == QL_SUCCESS) {
13580 		uint32_t	cnt;
13581 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13582 
13583 		/* Sync DMA buffer. */
13584 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13585 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13586 		    DDI_DMA_SYNC_FORKERNEL);
13587 
13588 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13589 			fw->req_q[cnt] = *w32++;
13590 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13591 		}
13592 	}
13593 
13594 	/* Get the response queue */
13595 	if (rval == QL_SUCCESS) {
13596 		uint32_t	cnt;
13597 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13598 
13599 		/* Sync DMA buffer. */
13600 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13601 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13602 		    DDI_DMA_SYNC_FORKERNEL);
13603 
13604 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13605 			fw->rsp_q[cnt] = *w32++;
13606 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13607 		}
13608 	}
13609 
13610 	/* Reset RISC. */
13611 	ql_reset_chip(ha);
13612 
13613 	/* Memory. */
13614 	if (rval == QL_SUCCESS) {
13615 		/* Code RAM. */
13616 		rval = ql_read_risc_ram(ha, 0x20000,
13617 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13618 	}
13619 	if (rval == QL_SUCCESS) {
13620 		/* External Memory. */
13621 		rval = ql_read_risc_ram(ha, 0x100000,
13622 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13623 	}
13624 
13625 	/* Get the extended trace buffer */
13626 	if (rval == QL_SUCCESS) {
13627 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13628 		    (ha->fwexttracebuf.bp != NULL)) {
13629 			uint32_t	cnt;
13630 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13631 
13632 			/* Sync DMA buffer. */
13633 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13634 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13635 
13636 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13637 				fw->ext_trace_buf[cnt] = *w32++;
13638 			}
13639 		}
13640 	}
13641 
13642 	/* Get the FC event trace buffer */
13643 	if (rval == QL_SUCCESS) {
13644 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13645 		    (ha->fwfcetracebuf.bp != NULL)) {
13646 			uint32_t	cnt;
13647 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13648 
13649 			/* Sync DMA buffer. */
13650 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13651 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13652 
13653 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13654 				fw->fce_trace_buf[cnt] = *w32++;
13655 			}
13656 		}
13657 	}
13658 
13659 	if (rval != QL_SUCCESS) {
13660 		EL(ha, "failed=%xh\n", rval);
13661 	} else {
13662 		/*EMPTY*/
13663 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13664 	}
13665 
13666 	return (rval);
13667 }
13668 
13669 /*
13670  * ql_25xx_binary_fw_dump
13671  *
13672  * Input:
13673  *	ha:	adapter state pointer.
13674  *	fw:	firmware dump context pointer.
13675  *
13676  * Returns:
13677  *	ql local function return status code.
13678  *
13679  * Context:
13680  *	Interrupt or Kernel context, no mailbox commands allowed.
13681  */
13682 static int
13683 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13684 {
13685 	uint32_t	*reg32;
13686 	void		*bp;
13687 	clock_t		timer;
13688 	int		rval = QL_SUCCESS;
13689 
13690 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13691 
13692 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13693 
13694 	/* Pause RISC. */
13695 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13696 		/* Disable ISP interrupts. */
13697 		WRT16_IO_REG(ha, ictrl, 0);
13698 
13699 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13700 		for (timer = 30000;
13701 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13702 		    rval == QL_SUCCESS; timer--) {
13703 			if (timer) {
13704 				drv_usecwait(100);
13705 				if (timer % 10000 == 0) {
13706 					EL(ha, "risc pause %d\n", timer);
13707 				}
13708 			} else {
13709 				EL(ha, "risc pause timeout\n");
13710 				rval = QL_FUNCTION_TIMEOUT;
13711 			}
13712 		}
13713 	}
13714 
13715 	if (rval == QL_SUCCESS) {
13716 
13717 		/* Host Interface registers */
13718 
13719 		/* HostRisc registers. */
13720 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13721 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13722 		    16, 32);
13723 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13724 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13725 
13726 		/* PCIe registers. */
13727 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13728 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13729 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13730 		    3, 32);
13731 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13732 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13733 
13734 		/* Host interface registers. */
13735 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13736 		    sizeof (fw->host_reg) / 4, 32);
13737 
13738 		/* Disable ISP interrupts. */
13739 
13740 		WRT32_IO_REG(ha, ictrl, 0);
13741 		RD32_IO_REG(ha, ictrl);
13742 		ADAPTER_STATE_LOCK(ha);
13743 		ha->flags &= ~INTERRUPTS_ENABLED;
13744 		ADAPTER_STATE_UNLOCK(ha);
13745 
13746 		/* Shadow registers. */
13747 
13748 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13749 		RD32_IO_REG(ha, io_base_addr);
13750 
13751 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13752 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13753 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13754 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13755 
13756 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13757 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13758 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13759 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13760 
13761 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13762 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13763 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13764 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13765 
13766 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13767 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13768 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13769 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13770 
13771 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13772 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13773 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13774 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13775 
13776 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13777 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13778 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13779 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13780 
13781 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13782 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13783 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13784 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13785 
13786 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13787 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13788 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13789 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13790 
13791 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13792 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13793 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13794 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13795 
13796 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13797 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13798 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13799 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13800 
13801 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13802 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13803 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13804 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13805 
13806 		/* RISC I/O register. */
13807 
13808 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13809 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13810 		    1, 32);
13811 
13812 		/* Mailbox registers. */
13813 
13814 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13815 		    sizeof (fw->mailbox_reg) / 2, 16);
13816 
13817 		/* Transfer sequence registers. */
13818 
13819 		/* XSEQ GP */
13820 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13821 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13822 		    16, 32);
13823 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13824 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13825 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13826 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13827 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13828 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13829 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13830 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13831 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13832 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13833 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13834 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13835 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13836 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13837 
13838 		/* XSEQ-0 */
13839 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13840 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13841 		    16, 32);
13842 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13843 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13844 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13845 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13846 
13847 		/* XSEQ-1 */
13848 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13849 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13850 		    16, 32);
13851 
13852 		/* Receive sequence registers. */
13853 
13854 		/* RSEQ GP */
13855 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13856 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13857 		    16, 32);
13858 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13859 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13860 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13861 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13862 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13863 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13864 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13865 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13866 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13867 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13868 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13869 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13870 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13871 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13872 
13873 		/* RSEQ-0 */
13874 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13875 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13876 		    16, 32);
13877 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13878 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13879 
13880 		/* RSEQ-1 */
13881 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13882 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13883 		    sizeof (fw->rseq_1_reg) / 4, 32);
13884 
13885 		/* RSEQ-2 */
13886 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13887 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13888 		    sizeof (fw->rseq_2_reg) / 4, 32);
13889 
13890 		/* Auxiliary sequencer registers. */
13891 
13892 		/* ASEQ GP */
13893 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13894 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13895 		    16, 32);
13896 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13897 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13898 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13899 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13900 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13901 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13902 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13903 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13904 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13905 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13906 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13907 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13908 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13909 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13910 
13911 		/* ASEQ-0 */
13912 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13913 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13914 		    16, 32);
13915 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13916 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13917 
13918 		/* ASEQ-1 */
13919 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13920 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13921 		    16, 32);
13922 
13923 		/* ASEQ-2 */
13924 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13925 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13926 		    16, 32);
13927 
13928 		/* Command DMA registers. */
13929 
13930 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13931 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13932 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13933 
13934 		/* Queues. */
13935 
13936 		/* RequestQ0 */
13937 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13938 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13939 		    8, 32);
13940 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13941 
13942 		/* ResponseQ0 */
13943 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13944 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13945 		    8, 32);
13946 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13947 
13948 		/* RequestQ1 */
13949 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13950 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13951 		    8, 32);
13952 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13953 
13954 		/* Transmit DMA registers. */
13955 
13956 		/* XMT0 */
13957 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13958 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13959 		    16, 32);
13960 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13961 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13962 
13963 		/* XMT1 */
13964 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13965 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13966 		    16, 32);
13967 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13968 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13969 
13970 		/* XMT2 */
13971 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13972 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13973 		    16, 32);
13974 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13975 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13976 
13977 		/* XMT3 */
13978 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13979 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13980 		    16, 32);
13981 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13982 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13983 
13984 		/* XMT4 */
13985 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13986 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13987 		    16, 32);
13988 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13989 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13990 
13991 		/* XMT Common */
13992 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13993 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13994 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13995 
13996 		/* Receive DMA registers. */
13997 
13998 		/* RCVThread0 */
13999 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14000 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14001 		    ha->iobase + 0xC0, 16, 32);
14002 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14003 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14004 
14005 		/* RCVThread1 */
14006 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14007 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14008 		    ha->iobase + 0xC0, 16, 32);
14009 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14010 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14011 
14012 		/* RISC registers. */
14013 
14014 		/* RISC GP */
14015 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14016 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14017 		    16, 32);
14018 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14019 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14020 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14021 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14022 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14023 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14024 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14025 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14026 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14027 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14028 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14029 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14030 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14031 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14032 
14033 		/* Local memory controller (LMC) registers. */
14034 
14035 		/* LMC */
14036 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14037 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14038 		    16, 32);
14039 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14040 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14041 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14042 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14043 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14044 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14045 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14046 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14047 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14048 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14049 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14050 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14051 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14052 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14053 
14054 		/* Fibre Protocol Module registers. */
14055 
14056 		/* FPM hardware */
14057 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14058 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14059 		    16, 32);
14060 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14061 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14062 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14063 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14064 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14065 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14066 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14067 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14068 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14069 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14070 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14071 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14072 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14073 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14074 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14075 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14076 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14077 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14078 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14079 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14080 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14081 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14082 
14083 		/* Frame Buffer registers. */
14084 
14085 		/* FB hardware */
14086 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14087 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14088 		    16, 32);
14089 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14090 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14091 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14092 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14093 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14094 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14095 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14096 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14097 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14098 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14099 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14100 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14101 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14102 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14103 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14104 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14105 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14106 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14107 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14108 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14109 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14110 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14111 	}
14112 
14113 	/* Get the request queue */
14114 	if (rval == QL_SUCCESS) {
14115 		uint32_t	cnt;
14116 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14117 
14118 		/* Sync DMA buffer. */
14119 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14120 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14121 		    DDI_DMA_SYNC_FORKERNEL);
14122 
14123 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14124 			fw->req_q[cnt] = *w32++;
14125 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14126 		}
14127 	}
14128 
14129 	/* Get the respons queue */
14130 	if (rval == QL_SUCCESS) {
14131 		uint32_t	cnt;
14132 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14133 
14134 		/* Sync DMA buffer. */
14135 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14136 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14137 		    DDI_DMA_SYNC_FORKERNEL);
14138 
14139 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14140 			fw->rsp_q[cnt] = *w32++;
14141 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14142 		}
14143 	}
14144 
14145 	/* Reset RISC. */
14146 
14147 	ql_reset_chip(ha);
14148 
14149 	/* Memory. */
14150 
14151 	if (rval == QL_SUCCESS) {
14152 		/* Code RAM. */
14153 		rval = ql_read_risc_ram(ha, 0x20000,
14154 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14155 	}
14156 	if (rval == QL_SUCCESS) {
14157 		/* External Memory. */
14158 		rval = ql_read_risc_ram(ha, 0x100000,
14159 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14160 	}
14161 
14162 	/* Get the FC event trace buffer */
14163 	if (rval == QL_SUCCESS) {
14164 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14165 		    (ha->fwfcetracebuf.bp != NULL)) {
14166 			uint32_t	cnt;
14167 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14168 
14169 			/* Sync DMA buffer. */
14170 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14171 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14172 
14173 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14174 				fw->fce_trace_buf[cnt] = *w32++;
14175 			}
14176 		}
14177 	}
14178 
14179 	/* Get the extended trace buffer */
14180 	if (rval == QL_SUCCESS) {
14181 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14182 		    (ha->fwexttracebuf.bp != NULL)) {
14183 			uint32_t	cnt;
14184 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14185 
14186 			/* Sync DMA buffer. */
14187 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14188 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14189 
14190 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14191 				fw->ext_trace_buf[cnt] = *w32++;
14192 			}
14193 		}
14194 	}
14195 
14196 	if (rval != QL_SUCCESS) {
14197 		EL(ha, "failed=%xh\n", rval);
14198 	} else {
14199 		/*EMPTY*/
14200 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14201 	}
14202 
14203 	return (rval);
14204 }
14205 
14206 /*
14207  * ql_81xx_binary_fw_dump
14208  *
14209  * Input:
14210  *	ha:	adapter state pointer.
14211  *	fw:	firmware dump context pointer.
14212  *
14213  * Returns:
14214  *	ql local function return status code.
14215  *
14216  * Context:
14217  *	Interrupt or Kernel context, no mailbox commands allowed.
14218  */
14219 static int
14220 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14221 {
14222 	uint32_t	*reg32;
14223 	void		*bp;
14224 	clock_t		timer;
14225 	int		rval = QL_SUCCESS;
14226 
14227 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14228 
14229 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
14230 
14231 	/* Pause RISC. */
14232 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
14233 		/* Disable ISP interrupts. */
14234 		WRT16_IO_REG(ha, ictrl, 0);
14235 
14236 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14237 		for (timer = 30000;
14238 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
14239 		    rval == QL_SUCCESS; timer--) {
14240 			if (timer) {
14241 				drv_usecwait(100);
14242 				if (timer % 10000 == 0) {
14243 					EL(ha, "risc pause %d\n", timer);
14244 				}
14245 			} else {
14246 				EL(ha, "risc pause timeout\n");
14247 				rval = QL_FUNCTION_TIMEOUT;
14248 			}
14249 		}
14250 	}
14251 
14252 	if (rval == QL_SUCCESS) {
14253 
14254 		/* Host Interface registers */
14255 
14256 		/* HostRisc registers. */
14257 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
14258 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14259 		    16, 32);
14260 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
14261 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14262 
14263 		/* PCIe registers. */
14264 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14265 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14266 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14267 		    3, 32);
14268 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14269 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14270 
14271 		/* Host interface registers. */
14272 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14273 		    sizeof (fw->host_reg) / 4, 32);
14274 
14275 		/* Disable ISP interrupts. */
14276 
14277 		WRT32_IO_REG(ha, ictrl, 0);
14278 		RD32_IO_REG(ha, ictrl);
14279 		ADAPTER_STATE_LOCK(ha);
14280 		ha->flags &= ~INTERRUPTS_ENABLED;
14281 		ADAPTER_STATE_UNLOCK(ha);
14282 
14283 		/* Shadow registers. */
14284 
14285 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14286 		RD32_IO_REG(ha, io_base_addr);
14287 
14288 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14289 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
14290 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14291 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14292 
14293 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14294 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
14295 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14296 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14297 
14298 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14299 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
14300 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14301 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14302 
14303 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14304 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
14305 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14306 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14307 
14308 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14309 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
14310 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14311 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14312 
14313 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14314 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
14315 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14316 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14317 
14318 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14319 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
14320 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14321 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14322 
14323 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14324 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
14325 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14326 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14327 
14328 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14329 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
14330 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14331 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14332 
14333 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14334 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
14335 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14336 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14337 
14338 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14339 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14340 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14341 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14342 
14343 		/* RISC I/O register. */
14344 
14345 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
14346 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14347 		    1, 32);
14348 
14349 		/* Mailbox registers. */
14350 
14351 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14352 		    sizeof (fw->mailbox_reg) / 2, 16);
14353 
14354 		/* Transfer sequence registers. */
14355 
14356 		/* XSEQ GP */
14357 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14358 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14359 		    16, 32);
14360 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14361 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14362 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14363 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14364 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14365 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14366 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14367 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14368 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14369 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14370 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14371 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14372 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14373 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14374 
14375 		/* XSEQ-0 */
14376 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14377 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14378 		    16, 32);
14379 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14380 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14381 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14382 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14383 
14384 		/* XSEQ-1 */
14385 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14386 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14387 		    16, 32);
14388 
14389 		/* Receive sequence registers. */
14390 
14391 		/* RSEQ GP */
14392 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14393 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14394 		    16, 32);
14395 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14396 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14397 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14398 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14399 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14400 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14401 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14402 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14403 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14404 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14405 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14406 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14407 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14408 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14409 
14410 		/* RSEQ-0 */
14411 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14412 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14413 		    16, 32);
14414 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14415 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14416 
14417 		/* RSEQ-1 */
14418 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14419 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14420 		    sizeof (fw->rseq_1_reg) / 4, 32);
14421 
14422 		/* RSEQ-2 */
14423 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14424 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14425 		    sizeof (fw->rseq_2_reg) / 4, 32);
14426 
14427 		/* Auxiliary sequencer registers. */
14428 
14429 		/* ASEQ GP */
14430 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
14431 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14432 		    16, 32);
14433 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
14434 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14435 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
14436 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14437 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
14438 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14439 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
14440 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14441 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
14442 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14443 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
14444 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14445 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
14446 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14447 
14448 		/* ASEQ-0 */
14449 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14450 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14451 		    16, 32);
14452 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14453 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14454 
14455 		/* ASEQ-1 */
14456 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14457 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14458 		    16, 32);
14459 
14460 		/* ASEQ-2 */
14461 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14462 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14463 		    16, 32);
14464 
14465 		/* Command DMA registers. */
14466 
14467 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
14468 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14469 		    sizeof (fw->cmd_dma_reg) / 4, 32);
14470 
14471 		/* Queues. */
14472 
14473 		/* RequestQ0 */
14474 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
14475 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14476 		    8, 32);
14477 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14478 
14479 		/* ResponseQ0 */
14480 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14481 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14482 		    8, 32);
14483 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14484 
14485 		/* RequestQ1 */
14486 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14487 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14488 		    8, 32);
14489 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14490 
14491 		/* Transmit DMA registers. */
14492 
14493 		/* XMT0 */
14494 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14495 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14496 		    16, 32);
14497 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14498 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14499 
14500 		/* XMT1 */
14501 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14502 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14503 		    16, 32);
14504 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14505 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14506 
14507 		/* XMT2 */
14508 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14509 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14510 		    16, 32);
14511 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14512 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14513 
14514 		/* XMT3 */
14515 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14516 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14517 		    16, 32);
14518 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14519 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14520 
14521 		/* XMT4 */
14522 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14523 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14524 		    16, 32);
14525 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14526 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14527 
14528 		/* XMT Common */
14529 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14530 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14531 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14532 
14533 		/* Receive DMA registers. */
14534 
14535 		/* RCVThread0 */
14536 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14537 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14538 		    ha->iobase + 0xC0, 16, 32);
14539 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14540 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14541 
14542 		/* RCVThread1 */
14543 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14544 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14545 		    ha->iobase + 0xC0, 16, 32);
14546 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14547 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14548 
14549 		/* RISC registers. */
14550 
14551 		/* RISC GP */
14552 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14553 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14554 		    16, 32);
14555 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14556 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14557 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14558 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14559 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14560 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14561 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14562 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14563 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14564 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14565 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14566 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14567 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14568 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14569 
14570 		/* Local memory controller (LMC) registers. */
14571 
14572 		/* LMC */
14573 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14574 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14575 		    16, 32);
14576 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14577 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14578 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14579 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14580 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14581 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14582 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14583 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14584 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14585 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14586 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14587 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14588 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14589 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14590 
14591 		/* Fibre Protocol Module registers. */
14592 
14593 		/* FPM hardware */
14594 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14595 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14596 		    16, 32);
14597 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14598 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14599 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14600 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14601 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14602 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14603 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14604 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14605 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14606 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14607 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14608 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14609 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14610 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14611 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14612 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14613 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14614 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14615 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14616 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14617 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14618 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14619 		WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14620 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14621 		WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14622 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14623 
14624 		/* Frame Buffer registers. */
14625 
14626 		/* FB hardware */
14627 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14628 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14629 		    16, 32);
14630 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14631 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14632 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14633 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14634 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14635 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14636 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14637 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14638 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14639 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14640 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14641 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14642 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14643 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14644 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14645 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14646 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14647 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14648 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14649 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14650 		WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14651 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14652 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14653 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14654 	}
14655 
14656 	/* Get the request queue */
14657 	if (rval == QL_SUCCESS) {
14658 		uint32_t	cnt;
14659 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14660 
14661 		/* Sync DMA buffer. */
14662 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14663 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14664 		    DDI_DMA_SYNC_FORKERNEL);
14665 
14666 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14667 			fw->req_q[cnt] = *w32++;
14668 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14669 		}
14670 	}
14671 
14672 	/* Get the respons queue */
14673 	if (rval == QL_SUCCESS) {
14674 		uint32_t	cnt;
14675 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14676 
14677 		/* Sync DMA buffer. */
14678 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14679 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14680 		    DDI_DMA_SYNC_FORKERNEL);
14681 
14682 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14683 			fw->rsp_q[cnt] = *w32++;
14684 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14685 		}
14686 	}
14687 
14688 	/* Reset RISC. */
14689 
14690 	ql_reset_chip(ha);
14691 
14692 	/* Memory. */
14693 
14694 	if (rval == QL_SUCCESS) {
14695 		/* Code RAM. */
14696 		rval = ql_read_risc_ram(ha, 0x20000,
14697 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14698 	}
14699 	if (rval == QL_SUCCESS) {
14700 		/* External Memory. */
14701 		rval = ql_read_risc_ram(ha, 0x100000,
14702 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14703 	}
14704 
14705 	/* Get the FC event trace buffer */
14706 	if (rval == QL_SUCCESS) {
14707 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14708 		    (ha->fwfcetracebuf.bp != NULL)) {
14709 			uint32_t	cnt;
14710 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14711 
14712 			/* Sync DMA buffer. */
14713 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14714 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14715 
14716 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14717 				fw->fce_trace_buf[cnt] = *w32++;
14718 			}
14719 		}
14720 	}
14721 
14722 	/* Get the extended trace buffer */
14723 	if (rval == QL_SUCCESS) {
14724 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14725 		    (ha->fwexttracebuf.bp != NULL)) {
14726 			uint32_t	cnt;
14727 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14728 
14729 			/* Sync DMA buffer. */
14730 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14731 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14732 
14733 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14734 				fw->ext_trace_buf[cnt] = *w32++;
14735 			}
14736 		}
14737 	}
14738 
14739 	if (rval != QL_SUCCESS) {
14740 		EL(ha, "failed=%xh\n", rval);
14741 	} else {
14742 		/*EMPTY*/
14743 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14744 	}
14745 
14746 	return (rval);
14747 }
14748 
14749 /*
14750  * ql_read_risc_ram
14751  *	Reads RISC RAM one word at a time.
14752  *	Risc interrupts must be disabled when this routine is called.
14753  *
14754  * Input:
14755  *	ha:	adapter state pointer.
14756  *	risc_address:	RISC code start address.
14757  *	len:		Number of words.
14758  *	buf:		buffer pointer.
14759  *
14760  * Returns:
14761  *	ql local function return status code.
14762  *
14763  * Context:
14764  *	Interrupt or Kernel context, no mailbox commands allowed.
14765  */
14766 static int
14767 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
14768     void *buf)
14769 {
14770 	uint32_t	cnt;
14771 	uint16_t	stat;
14772 	clock_t		timer;
14773 	uint16_t	*buf16 = (uint16_t *)buf;
14774 	uint32_t	*buf32 = (uint32_t *)buf;
14775 	int		rval = QL_SUCCESS;
14776 
14777 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
14778 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
14779 		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
14780 		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
14781 		CFG_IST(ha, CFG_CTRL_242581) ?
14782 		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
14783 		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14784 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
14785 			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
14786 				stat = (uint16_t)
14787 				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
14788 				if ((stat == 1) || (stat == 0x10)) {
14789 					if (CFG_IST(ha, CFG_CTRL_242581)) {
14790 						buf32[cnt] = SHORT_TO_LONG(
14791 						    RD16_IO_REG(ha,
14792 						    mailbox[2]),
14793 						    RD16_IO_REG(ha,
14794 						    mailbox[3]));
14795 					} else {
14796 						buf16[cnt] =
14797 						    RD16_IO_REG(ha, mailbox[2]);
14798 					}
14799 
14800 					break;
14801 				} else if ((stat == 2) || (stat == 0x11)) {
14802 					rval = RD16_IO_REG(ha, mailbox[0]);
14803 					break;
14804 				}
14805 				if (CFG_IST(ha, CFG_CTRL_242581)) {
14806 					WRT32_IO_REG(ha, hccr,
14807 					    HC24_CLR_RISC_INT);
14808 					RD32_IO_REG(ha, hccr);
14809 				} else {
14810 					WRT16_IO_REG(ha, hccr,
14811 					    HC_CLR_RISC_INT);
14812 				}
14813 			}
14814 			drv_usecwait(5);
14815 		}
14816 		if (CFG_IST(ha, CFG_CTRL_242581)) {
14817 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
14818 			RD32_IO_REG(ha, hccr);
14819 		} else {
14820 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
14821 			WRT16_IO_REG(ha, semaphore, 0);
14822 		}
14823 
14824 		if (timer == 0) {
14825 			rval = QL_FUNCTION_TIMEOUT;
14826 		}
14827 	}
14828 
14829 	return (rval);
14830 }
14831 
14832 /*
14833  * ql_read_regs
14834  *	Reads adapter registers to buffer.
14835  *
14836  * Input:
14837  *	ha:	adapter state pointer.
14838  *	buf:	buffer pointer.
14839  *	reg:	start address.
14840  *	count:	number of registers.
14841  *	wds:	register size.
14842  *
14843  * Context:
14844  *	Interrupt or Kernel context, no mailbox commands allowed.
14845  */
14846 static void *
14847 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
14848     uint8_t wds)
14849 {
14850 	uint32_t	*bp32, *reg32;
14851 	uint16_t	*bp16, *reg16;
14852 	uint8_t		*bp8, *reg8;
14853 
14854 	switch (wds) {
14855 	case 32:
14856 		bp32 = buf;
14857 		reg32 = reg;
14858 		while (count--) {
14859 			*bp32++ = RD_REG_DWORD(ha, reg32++);
14860 		}
14861 		return (bp32);
14862 	case 16:
14863 		bp16 = buf;
14864 		reg16 = reg;
14865 		while (count--) {
14866 			*bp16++ = RD_REG_WORD(ha, reg16++);
14867 		}
14868 		return (bp16);
14869 	case 8:
14870 		bp8 = buf;
14871 		reg8 = reg;
14872 		while (count--) {
14873 			*bp8++ = RD_REG_BYTE(ha, reg8++);
14874 		}
14875 		return (bp8);
14876 	default:
14877 		EL(ha, "Unknown word size=%d\n", wds);
14878 		return (buf);
14879 	}
14880 }
14881 
14882 static int
14883 ql_save_config_regs(dev_info_t *dip)
14884 {
14885 	ql_adapter_state_t	*ha;
14886 	int			ret;
14887 	ql_config_space_t	chs;
14888 	caddr_t			prop = "ql-config-space";
14889 
14890 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14891 	if (ha == NULL) {
14892 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14893 		    ddi_get_instance(dip));
14894 		return (DDI_FAILURE);
14895 	}
14896 
14897 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14898 
14899 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14900 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
14901 	    1) {
14902 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14903 		return (DDI_SUCCESS);
14904 	}
14905 
14906 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
14907 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
14908 	    PCI_CONF_HEADER);
14909 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14910 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
14911 		    PCI_BCNF_BCNTRL);
14912 	}
14913 
14914 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
14915 	    PCI_CONF_CACHE_LINESZ);
14916 
14917 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14918 	    PCI_CONF_LATENCY_TIMER);
14919 
14920 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14921 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14922 		    PCI_BCNF_LATENCY_TIMER);
14923 	}
14924 
14925 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
14926 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
14927 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
14928 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
14929 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
14930 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
14931 
14932 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14933 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
14934 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
14935 
14936 	if (ret != DDI_PROP_SUCCESS) {
14937 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
14938 		    QL_NAME, ddi_get_instance(dip), prop);
14939 		return (DDI_FAILURE);
14940 	}
14941 
14942 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14943 
14944 	return (DDI_SUCCESS);
14945 }
14946 
14947 static int
14948 ql_restore_config_regs(dev_info_t *dip)
14949 {
14950 	ql_adapter_state_t	*ha;
14951 	uint_t			elements;
14952 	ql_config_space_t	*chs_p;
14953 	caddr_t			prop = "ql-config-space";
14954 
14955 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14956 	if (ha == NULL) {
14957 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14958 		    ddi_get_instance(dip));
14959 		return (DDI_FAILURE);
14960 	}
14961 
14962 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14963 
14964 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14965 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
14966 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
14967 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
14968 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14969 		return (DDI_FAILURE);
14970 	}
14971 
14972 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
14973 
14974 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14975 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
14976 		    chs_p->chs_bridge_control);
14977 	}
14978 
14979 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
14980 	    chs_p->chs_cache_line_size);
14981 
14982 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
14983 	    chs_p->chs_latency_timer);
14984 
14985 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14986 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
14987 		    chs_p->chs_sec_latency_timer);
14988 	}
14989 
14990 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
14991 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
14992 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
14993 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
14994 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
14995 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
14996 
14997 	ddi_prop_free(chs_p);
14998 
14999 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15000 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15001 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15002 		    QL_NAME, ddi_get_instance(dip), prop);
15003 	}
15004 
15005 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15006 
15007 	return (DDI_SUCCESS);
15008 }
15009 
15010 uint8_t
15011 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15012 {
15013 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15014 		return (ddi_get8(ha->sbus_config_handle,
15015 		    (uint8_t *)(ha->sbus_config_base + off)));
15016 	}
15017 
15018 #ifdef KERNEL_32
15019 	return (pci_config_getb(ha->pci_handle, off));
15020 #else
15021 	return (pci_config_get8(ha->pci_handle, off));
15022 #endif
15023 }
15024 
15025 uint16_t
15026 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15027 {
15028 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15029 		return (ddi_get16(ha->sbus_config_handle,
15030 		    (uint16_t *)(ha->sbus_config_base + off)));
15031 	}
15032 
15033 #ifdef KERNEL_32
15034 	return (pci_config_getw(ha->pci_handle, off));
15035 #else
15036 	return (pci_config_get16(ha->pci_handle, off));
15037 #endif
15038 }
15039 
15040 uint32_t
15041 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15042 {
15043 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15044 		return (ddi_get32(ha->sbus_config_handle,
15045 		    (uint32_t *)(ha->sbus_config_base + off)));
15046 	}
15047 
15048 #ifdef KERNEL_32
15049 	return (pci_config_getl(ha->pci_handle, off));
15050 #else
15051 	return (pci_config_get32(ha->pci_handle, off));
15052 #endif
15053 }
15054 
15055 void
15056 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15057 {
15058 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15059 		ddi_put8(ha->sbus_config_handle,
15060 		    (uint8_t *)(ha->sbus_config_base + off), val);
15061 	} else {
15062 #ifdef KERNEL_32
15063 		pci_config_putb(ha->pci_handle, off, val);
15064 #else
15065 		pci_config_put8(ha->pci_handle, off, val);
15066 #endif
15067 	}
15068 }
15069 
15070 void
15071 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15072 {
15073 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15074 		ddi_put16(ha->sbus_config_handle,
15075 		    (uint16_t *)(ha->sbus_config_base + off), val);
15076 	} else {
15077 #ifdef KERNEL_32
15078 		pci_config_putw(ha->pci_handle, off, val);
15079 #else
15080 		pci_config_put16(ha->pci_handle, off, val);
15081 #endif
15082 	}
15083 }
15084 
15085 void
15086 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15087 {
15088 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
15089 		ddi_put32(ha->sbus_config_handle,
15090 		    (uint32_t *)(ha->sbus_config_base + off), val);
15091 	} else {
15092 #ifdef KERNEL_32
15093 		pci_config_putl(ha->pci_handle, off, val);
15094 #else
15095 		pci_config_put32(ha->pci_handle, off, val);
15096 #endif
15097 	}
15098 }
15099 
15100 /*
15101  * ql_halt
15102  *	Waits for commands that are running to finish and
15103  *	if they do not, commands are aborted.
15104  *	Finally the adapter is reset.
15105  *
15106  * Input:
15107  *	ha:	adapter state pointer.
15108  *	pwr:	power state.
15109  *
15110  * Context:
15111  *	Kernel context.
15112  */
15113 static void
15114 ql_halt(ql_adapter_state_t *ha, int pwr)
15115 {
15116 	uint32_t	cnt;
15117 	ql_tgt_t	*tq;
15118 	ql_srb_t	*sp;
15119 	uint16_t	index;
15120 	ql_link_t	*link;
15121 
15122 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15123 
15124 	/* Wait for all commands running to finish. */
15125 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15126 		for (link = ha->dev[index].first; link != NULL;
15127 		    link = link->next) {
15128 			tq = link->base_address;
15129 			(void) ql_abort_device(ha, tq, 0);
15130 
15131 			/* Wait for 30 seconds for commands to finish. */
15132 			for (cnt = 3000; cnt != 0; cnt--) {
15133 				/* Acquire device queue lock. */
15134 				DEVICE_QUEUE_LOCK(tq);
15135 				if (tq->outcnt == 0) {
15136 					/* Release device queue lock. */
15137 					DEVICE_QUEUE_UNLOCK(tq);
15138 					break;
15139 				} else {
15140 					/* Release device queue lock. */
15141 					DEVICE_QUEUE_UNLOCK(tq);
15142 					ql_delay(ha, 10000);
15143 				}
15144 			}
15145 
15146 			/* Finish any commands waiting for more status. */
15147 			if (ha->status_srb != NULL) {
15148 				sp = ha->status_srb;
15149 				ha->status_srb = NULL;
15150 				sp->cmd.next = NULL;
15151 				ql_done(&sp->cmd);
15152 			}
15153 
15154 			/* Abort commands that did not finish. */
15155 			if (cnt == 0) {
15156 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15157 				    cnt++) {
15158 					if (ha->pending_cmds.first != NULL) {
15159 						ql_start_iocb(ha, NULL);
15160 						cnt = 1;
15161 					}
15162 					sp = ha->outstanding_cmds[cnt];
15163 					if (sp != NULL &&
15164 					    sp->lun_queue->target_queue ==
15165 					    tq) {
15166 						(void) ql_abort((opaque_t)ha,
15167 						    sp->pkt, 0);
15168 					}
15169 				}
15170 			}
15171 		}
15172 	}
15173 
15174 	/* Shutdown IP. */
15175 	if (ha->flags & IP_INITIALIZED) {
15176 		(void) ql_shutdown_ip(ha);
15177 	}
15178 
15179 	/* Stop all timers. */
15180 	ADAPTER_STATE_LOCK(ha);
15181 	ha->port_retry_timer = 0;
15182 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15183 	ha->watchdog_timer = 0;
15184 	ADAPTER_STATE_UNLOCK(ha);
15185 
15186 	if (pwr == PM_LEVEL_D3) {
15187 		ADAPTER_STATE_LOCK(ha);
15188 		ha->flags &= ~ONLINE;
15189 		ADAPTER_STATE_UNLOCK(ha);
15190 
15191 		/* Reset ISP chip. */
15192 		ql_reset_chip(ha);
15193 	}
15194 
15195 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15196 }
15197 
15198 /*
15199  * ql_get_dma_mem
15200  *	Function used to allocate dma memory.
15201  *
15202  * Input:
15203  *	ha:			adapter state pointer.
15204  *	mem:			pointer to dma memory object.
15205  *	size:			size of the request in bytes
15206  *
15207  * Returns:
15208  *	qn local function return status code.
15209  *
15210  * Context:
15211  *	Kernel context.
15212  */
15213 int
15214 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15215     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15216 {
15217 	int	rval;
15218 
15219 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15220 
15221 	mem->size = size;
15222 	mem->type = allocation_type;
15223 	mem->cookie_count = 1;
15224 
15225 	switch (alignment) {
15226 	case QL_DMA_DATA_ALIGN:
15227 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15228 		break;
15229 	case QL_DMA_RING_ALIGN:
15230 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15231 		break;
15232 	default:
15233 		EL(ha, "failed, unknown alignment type %x\n", alignment);
15234 		break;
15235 	}
15236 
15237 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15238 		ql_free_phys(ha, mem);
15239 		EL(ha, "failed, alloc_phys=%xh\n", rval);
15240 	}
15241 
15242 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15243 
15244 	return (rval);
15245 }
15246 
15247 /*
15248  * ql_alloc_phys
15249  *	Function used to allocate memory and zero it.
15250  *	Memory is below 4 GB.
15251  *
15252  * Input:
15253  *	ha:			adapter state pointer.
15254  *	mem:			pointer to dma memory object.
15255  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15256  *	mem->cookie_count	number of segments allowed.
15257  *	mem->type		memory allocation type.
15258  *	mem->size		memory size.
15259  *	mem->alignment		memory alignment.
15260  *
15261  * Returns:
15262  *	qn local function return status code.
15263  *
15264  * Context:
15265  *	Kernel context.
15266  */
15267 int
15268 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15269 {
15270 	size_t			rlen;
15271 	ddi_dma_attr_t		dma_attr;
15272 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
15273 
15274 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15275 
15276 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15277 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15278 
15279 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15280 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15281 
15282 	/*
15283 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
15284 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
15285 	 * to make sure buffer has enough room for overrun.
15286 	 */
15287 	if (mem->size & 7) {
15288 		mem->size += 8 - (mem->size & 7);
15289 	}
15290 
15291 	mem->flags = DDI_DMA_CONSISTENT;
15292 
15293 	/*
15294 	 * Allocate DMA memory for command.
15295 	 */
15296 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15297 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15298 	    DDI_SUCCESS) {
15299 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15300 		mem->dma_handle = NULL;
15301 		return (QL_MEMORY_ALLOC_FAILED);
15302 	}
15303 
15304 	switch (mem->type) {
15305 	case KERNEL_MEM:
15306 		mem->bp = kmem_zalloc(mem->size, sleep);
15307 		break;
15308 	case BIG_ENDIAN_DMA:
15309 	case LITTLE_ENDIAN_DMA:
15310 	case NO_SWAP_DMA:
15311 		if (mem->type == BIG_ENDIAN_DMA) {
15312 			acc_attr.devacc_attr_endian_flags =
15313 			    DDI_STRUCTURE_BE_ACC;
15314 		} else if (mem->type == NO_SWAP_DMA) {
15315 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15316 		}
15317 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15318 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15319 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15320 		    &mem->acc_handle) == DDI_SUCCESS) {
15321 			bzero(mem->bp, mem->size);
15322 			/* ensure we got what we asked for (32bit) */
15323 			if (dma_attr.dma_attr_addr_hi == NULL) {
15324 				if (mem->cookie.dmac_notused != NULL) {
15325 					EL(ha, "failed, ddi_dma_mem_alloc "
15326 					    "returned 64 bit DMA address\n");
15327 					ql_free_phys(ha, mem);
15328 					return (QL_MEMORY_ALLOC_FAILED);
15329 				}
15330 			}
15331 		} else {
15332 			mem->acc_handle = NULL;
15333 			mem->bp = NULL;
15334 		}
15335 		break;
15336 	default:
15337 		EL(ha, "failed, unknown type=%xh\n", mem->type);
15338 		mem->acc_handle = NULL;
15339 		mem->bp = NULL;
15340 		break;
15341 	}
15342 
15343 	if (mem->bp == NULL) {
15344 		EL(ha, "failed, ddi_dma_mem_alloc\n");
15345 		ddi_dma_free_handle(&mem->dma_handle);
15346 		mem->dma_handle = NULL;
15347 		return (QL_MEMORY_ALLOC_FAILED);
15348 	}
15349 
15350 	mem->flags |= DDI_DMA_RDWR;
15351 
15352 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15353 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15354 		ql_free_phys(ha, mem);
15355 		return (QL_MEMORY_ALLOC_FAILED);
15356 	}
15357 
15358 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15359 
15360 	return (QL_SUCCESS);
15361 }
15362 
15363 /*
15364  * ql_free_phys
15365  *	Function used to free physical memory.
15366  *
15367  * Input:
15368  *	ha:	adapter state pointer.
15369  *	mem:	pointer to dma memory object.
15370  *
15371  * Context:
15372  *	Kernel context.
15373  */
15374 void
15375 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15376 {
15377 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15378 
15379 	if (mem != NULL && mem->dma_handle != NULL) {
15380 		ql_unbind_dma_buffer(ha, mem);
15381 		switch (mem->type) {
15382 		case KERNEL_MEM:
15383 			if (mem->bp != NULL) {
15384 				kmem_free(mem->bp, mem->size);
15385 			}
15386 			break;
15387 		case LITTLE_ENDIAN_DMA:
15388 		case BIG_ENDIAN_DMA:
15389 		case NO_SWAP_DMA:
15390 			if (mem->acc_handle != NULL) {
15391 				ddi_dma_mem_free(&mem->acc_handle);
15392 				mem->acc_handle = NULL;
15393 			}
15394 			break;
15395 		default:
15396 			break;
15397 		}
15398 		mem->bp = NULL;
15399 		ddi_dma_free_handle(&mem->dma_handle);
15400 		mem->dma_handle = NULL;
15401 	}
15402 
15403 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15404 }
15405 
15406 /*
15407  * ql_alloc_dma_resouce.
15408  *	Allocates DMA resource for buffer.
15409  *
15410  * Input:
15411  *	ha:			adapter state pointer.
15412  *	mem:			pointer to dma memory object.
15413  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
15414  *	mem->cookie_count	number of segments allowed.
15415  *	mem->type		memory allocation type.
15416  *	mem->size		memory size.
15417  *	mem->bp			pointer to memory or struct buf
15418  *
15419  * Returns:
15420  *	qn local function return status code.
15421  *
15422  * Context:
15423  *	Kernel context.
15424  */
15425 int
15426 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15427 {
15428 	ddi_dma_attr_t	dma_attr;
15429 
15430 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15431 
15432 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15433 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15434 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15435 
15436 	/*
15437 	 * Allocate DMA handle for command.
15438 	 */
15439 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15440 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15441 	    DDI_SUCCESS) {
15442 		EL(ha, "failed, ddi_dma_alloc_handle\n");
15443 		mem->dma_handle = NULL;
15444 		return (QL_MEMORY_ALLOC_FAILED);
15445 	}
15446 
15447 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15448 
15449 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15450 		EL(ha, "failed, bind_dma_buffer\n");
15451 		ddi_dma_free_handle(&mem->dma_handle);
15452 		mem->dma_handle = NULL;
15453 		return (QL_MEMORY_ALLOC_FAILED);
15454 	}
15455 
15456 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15457 
15458 	return (QL_SUCCESS);
15459 }
15460 
15461 /*
15462  * ql_free_dma_resource
15463  *	Frees DMA resources.
15464  *
15465  * Input:
15466  *	ha:		adapter state pointer.
15467  *	mem:		pointer to dma memory object.
15468  *	mem->dma_handle	DMA memory handle.
15469  *
15470  * Context:
15471  *	Kernel context.
15472  */
15473 void
15474 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15475 {
15476 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15477 
15478 	ql_free_phys(ha, mem);
15479 
15480 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15481 }
15482 
15483 /*
15484  * ql_bind_dma_buffer
15485  *	Binds DMA buffer.
15486  *
15487  * Input:
15488  *	ha:			adapter state pointer.
15489  *	mem:			pointer to dma memory object.
15490  *	sleep:			KM_SLEEP or KM_NOSLEEP.
15491  *	mem->dma_handle		DMA memory handle.
15492  *	mem->cookie_count	number of segments allowed.
15493  *	mem->type		memory allocation type.
15494  *	mem->size		memory size.
15495  *	mem->bp			pointer to memory or struct buf
15496  *
15497  * Returns:
15498  *	mem->cookies		pointer to list of cookies.
15499  *	mem->cookie_count	number of cookies.
15500  *	status			success = DDI_DMA_MAPPED
15501  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15502  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15503  *				DDI_DMA_TOOBIG
15504  *
15505  * Context:
15506  *	Kernel context.
15507  */
15508 static int
15509 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15510 {
15511 	int			rval;
15512 	ddi_dma_cookie_t	*cookiep;
15513 	uint32_t		cnt = mem->cookie_count;
15514 
15515 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15516 
15517 	if (mem->type == STRUCT_BUF_MEMORY) {
15518 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15519 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15520 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15521 	} else {
15522 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15523 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
15524 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15525 		    &mem->cookie_count);
15526 	}
15527 
15528 	if (rval == DDI_DMA_MAPPED) {
15529 		if (mem->cookie_count > cnt) {
15530 			(void) ddi_dma_unbind_handle(mem->dma_handle);
15531 			EL(ha, "failed, cookie_count %d > %d\n",
15532 			    mem->cookie_count, cnt);
15533 			rval = DDI_DMA_TOOBIG;
15534 		} else {
15535 			if (mem->cookie_count > 1) {
15536 				if (mem->cookies = kmem_zalloc(
15537 				    sizeof (ddi_dma_cookie_t) *
15538 				    mem->cookie_count, sleep)) {
15539 					*mem->cookies = mem->cookie;
15540 					cookiep = mem->cookies;
15541 					for (cnt = 1; cnt < mem->cookie_count;
15542 					    cnt++) {
15543 						ddi_dma_nextcookie(
15544 						    mem->dma_handle,
15545 						    ++cookiep);
15546 					}
15547 				} else {
15548 					(void) ddi_dma_unbind_handle(
15549 					    mem->dma_handle);
15550 					EL(ha, "failed, kmem_zalloc\n");
15551 					rval = DDI_DMA_NORESOURCES;
15552 				}
15553 			} else {
15554 				/*
15555 				 * It has been reported that dmac_size at times
15556 				 * may be incorrect on sparc machines so for
15557 				 * sparc machines that only have one segment
15558 				 * use the buffer size instead.
15559 				 */
15560 				mem->cookies = &mem->cookie;
15561 				mem->cookies->dmac_size = mem->size;
15562 			}
15563 		}
15564 	}
15565 
15566 	if (rval != DDI_DMA_MAPPED) {
15567 		EL(ha, "failed=%xh\n", rval);
15568 	} else {
15569 		/*EMPTY*/
15570 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15571 	}
15572 
15573 	return (rval);
15574 }
15575 
15576 /*
15577  * ql_unbind_dma_buffer
15578  *	Unbinds DMA buffer.
15579  *
15580  * Input:
15581  *	ha:			adapter state pointer.
15582  *	mem:			pointer to dma memory object.
15583  *	mem->dma_handle		DMA memory handle.
15584  *	mem->cookies		pointer to cookie list.
15585  *	mem->cookie_count	number of cookies.
15586  *
15587  * Context:
15588  *	Kernel context.
15589  */
15590 /* ARGSUSED */
15591 static void
15592 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15593 {
15594 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15595 
15596 	(void) ddi_dma_unbind_handle(mem->dma_handle);
15597 	if (mem->cookie_count > 1) {
15598 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15599 		    mem->cookie_count);
15600 		mem->cookies = NULL;
15601 	}
15602 	mem->cookie_count = 0;
15603 
15604 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15605 }
15606 
15607 static int
15608 ql_suspend_adapter(ql_adapter_state_t *ha)
15609 {
15610 	clock_t timer = 32 * drv_usectohz(1000000);
15611 
15612 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15613 
15614 	/*
15615 	 * First we will claim mbox ownership so that no
15616 	 * thread using mbox hangs when we disable the
15617 	 * interrupt in the middle of it.
15618 	 */
15619 	MBX_REGISTER_LOCK(ha);
15620 
15621 	/* Check for mailbox available, if not wait for signal. */
15622 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15623 		ha->mailbox_flags = (uint8_t)
15624 		    (ha->mailbox_flags | MBX_WANT_FLG);
15625 
15626 		/* 30 seconds from now */
15627 		if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15628 		    timer, TR_CLOCK_TICK) == -1) {
15629 
15630 			/* Release mailbox register lock. */
15631 			MBX_REGISTER_UNLOCK(ha);
15632 			EL(ha, "failed, Suspend mbox");
15633 			return (QL_FUNCTION_TIMEOUT);
15634 		}
15635 	}
15636 
15637 	/* Set busy flag. */
15638 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15639 	MBX_REGISTER_UNLOCK(ha);
15640 
15641 	(void) ql_wait_outstanding(ha);
15642 
15643 	/*
15644 	 * here we are sure that there will not be any mbox interrupt.
15645 	 * So, let's make sure that we return back all the outstanding
15646 	 * cmds as well as internally queued commands.
15647 	 */
15648 	ql_halt(ha, PM_LEVEL_D0);
15649 
15650 	if (ha->power_level != PM_LEVEL_D3) {
15651 		/* Disable ISP interrupts. */
15652 		WRT16_IO_REG(ha, ictrl, 0);
15653 	}
15654 
15655 	ADAPTER_STATE_LOCK(ha);
15656 	ha->flags &= ~INTERRUPTS_ENABLED;
15657 	ADAPTER_STATE_UNLOCK(ha);
15658 
15659 	MBX_REGISTER_LOCK(ha);
15660 	/* Reset busy status. */
15661 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15662 
15663 	/* If thread is waiting for mailbox go signal it to start. */
15664 	if (ha->mailbox_flags & MBX_WANT_FLG) {
15665 		ha->mailbox_flags = (uint8_t)
15666 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15667 		cv_broadcast(&ha->cv_mbx_wait);
15668 	}
15669 	/* Release mailbox register lock. */
15670 	MBX_REGISTER_UNLOCK(ha);
15671 
15672 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15673 
15674 	return (QL_SUCCESS);
15675 }
15676 
15677 /*
15678  * ql_add_link_b
15679  *	Add link to the end of the chain.
15680  *
15681  * Input:
15682  *	head = Head of link list.
15683  *	link = link to be added.
15684  *	LOCK must be already obtained.
15685  *
15686  * Context:
15687  *	Interrupt or Kernel context, no mailbox commands allowed.
15688  */
15689 void
15690 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15691 {
15692 	/* at the end there isn't a next */
15693 	link->next = NULL;
15694 
15695 	if ((link->prev = head->last) == NULL) {
15696 		head->first = link;
15697 	} else {
15698 		head->last->next = link;
15699 	}
15700 
15701 	head->last = link;
15702 	link->head = head;	/* the queue we're on */
15703 }
15704 
15705 /*
15706  * ql_add_link_t
15707  *	Add link to the beginning of the chain.
15708  *
15709  * Input:
15710  *	head = Head of link list.
15711  *	link = link to be added.
15712  *	LOCK must be already obtained.
15713  *
15714  * Context:
15715  *	Interrupt or Kernel context, no mailbox commands allowed.
15716  */
15717 void
15718 ql_add_link_t(ql_head_t *head, ql_link_t *link)
15719 {
15720 	link->prev = NULL;
15721 
15722 	if ((link->next = head->first) == NULL)	{
15723 		head->last = link;
15724 	} else {
15725 		head->first->prev = link;
15726 	}
15727 
15728 	head->first = link;
15729 	link->head = head;	/* the queue we're on */
15730 }
15731 
15732 /*
15733  * ql_remove_link
15734  *	Remove a link from the chain.
15735  *
15736  * Input:
15737  *	head = Head of link list.
15738  *	link = link to be removed.
15739  *	LOCK must be already obtained.
15740  *
15741  * Context:
15742  *	Interrupt or Kernel context, no mailbox commands allowed.
15743  */
15744 void
15745 ql_remove_link(ql_head_t *head, ql_link_t *link)
15746 {
15747 	if (link->prev != NULL) {
15748 		if ((link->prev->next = link->next) == NULL) {
15749 			head->last = link->prev;
15750 		} else {
15751 			link->next->prev = link->prev;
15752 		}
15753 	} else if ((head->first = link->next) == NULL) {
15754 		head->last = NULL;
15755 	} else {
15756 		head->first->prev = NULL;
15757 	}
15758 
15759 	/* not on a queue any more */
15760 	link->prev = link->next = NULL;
15761 	link->head = NULL;
15762 }
15763 
15764 /*
15765  * ql_chg_endian
15766  *	Change endianess of byte array.
15767  *
15768  * Input:
15769  *	buf = array pointer.
15770  *	size = size of array in bytes.
15771  *
15772  * Context:
15773  *	Interrupt or Kernel context, no mailbox commands allowed.
15774  */
15775 void
15776 ql_chg_endian(uint8_t buf[], size_t size)
15777 {
15778 	uint8_t byte;
15779 	size_t  cnt1;
15780 	size_t  cnt;
15781 
15782 	cnt1 = size - 1;
15783 	for (cnt = 0; cnt < size / 2; cnt++) {
15784 		byte = buf[cnt1];
15785 		buf[cnt1] = buf[cnt];
15786 		buf[cnt] = byte;
15787 		cnt1--;
15788 	}
15789 }
15790 
15791 /*
15792  * ql_bstr_to_dec
15793  *	Convert decimal byte string to number.
15794  *
15795  * Input:
15796  *	s:	byte string pointer.
15797  *	ans:	interger pointer for number.
15798  *	size:	number of ascii bytes.
15799  *
15800  * Returns:
15801  *	success = number of ascii bytes processed.
15802  *
15803  * Context:
15804  *	Kernel/Interrupt context.
15805  */
15806 static int
15807 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
15808 {
15809 	int			mul, num, cnt, pos;
15810 	char			*str;
15811 
15812 	/* Calculate size of number. */
15813 	if (size == 0) {
15814 		for (str = s; *str >= '0' && *str <= '9'; str++) {
15815 			size++;
15816 		}
15817 	}
15818 
15819 	*ans = 0;
15820 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
15821 		if (*s >= '0' && *s <= '9') {
15822 			num = *s++ - '0';
15823 		} else {
15824 			break;
15825 		}
15826 
15827 		for (mul = 1, pos = 1; pos < size; pos++) {
15828 			mul *= 10;
15829 		}
15830 		*ans += num * mul;
15831 	}
15832 
15833 	return (cnt);
15834 }
15835 
15836 /*
15837  * ql_delay
15838  *	Calls delay routine if threads are not suspended, otherwise, busy waits
15839  *	Minimum = 1 tick = 10ms
15840  *
15841  * Input:
15842  *	dly = delay time in microseconds.
15843  *
15844  * Context:
15845  *	Kernel or Interrupt context, no mailbox commands allowed.
15846  */
15847 void
15848 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
15849 {
15850 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
15851 		drv_usecwait(usecs);
15852 	} else {
15853 		delay(drv_usectohz(usecs));
15854 	}
15855 }
15856 
15857 /*
15858  * ql_stall_drv
15859  *	Stalls one or all driver instances, waits for 30 seconds.
15860  *
15861  * Input:
15862  *	ha:		adapter state pointer or NULL for all.
15863  *	options:	BIT_0 --> leave driver stalled on exit if
15864  *				  failed.
15865  *
15866  * Returns:
15867  *	ql local function return status code.
15868  *
15869  * Context:
15870  *	Kernel context.
15871  */
15872 int
15873 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
15874 {
15875 	ql_link_t		*link;
15876 	ql_adapter_state_t	*ha2;
15877 	uint32_t		timer;
15878 
15879 	QL_PRINT_3(CE_CONT, "started\n");
15880 
15881 	/* Wait for 30 seconds for daemons unstall. */
15882 	timer = 3000;
15883 	link = ha == NULL ? ql_hba.first : &ha->hba;
15884 	while (link != NULL && timer) {
15885 		ha2 = link->base_address;
15886 
15887 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
15888 
15889 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15890 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15891 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
15892 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
15893 			link = ha == NULL ? link->next : NULL;
15894 			continue;
15895 		}
15896 
15897 		ql_delay(ha, 10000);
15898 		timer--;
15899 		link = ha == NULL ? ql_hba.first : &ha->hba;
15900 	}
15901 
15902 	if (ha2 != NULL && timer == 0) {
15903 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
15904 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
15905 		    "unstalled"));
15906 		if (options & BIT_0) {
15907 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15908 		}
15909 		return (QL_FUNCTION_TIMEOUT);
15910 	}
15911 
15912 	QL_PRINT_3(CE_CONT, "done\n");
15913 
15914 	return (QL_SUCCESS);
15915 }
15916 
15917 /*
15918  * ql_restart_driver
15919  *	Restarts one or all driver instances.
15920  *
15921  * Input:
15922  *	ha:	adapter state pointer or NULL for all.
15923  *
15924  * Context:
15925  *	Kernel context.
15926  */
15927 void
15928 ql_restart_driver(ql_adapter_state_t *ha)
15929 {
15930 	ql_link_t		*link;
15931 	ql_adapter_state_t	*ha2;
15932 	uint32_t		timer;
15933 
15934 	QL_PRINT_3(CE_CONT, "started\n");
15935 
15936 	/* Tell all daemons to unstall. */
15937 	link = ha == NULL ? ql_hba.first : &ha->hba;
15938 	while (link != NULL) {
15939 		ha2 = link->base_address;
15940 
15941 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15942 
15943 		link = ha == NULL ? link->next : NULL;
15944 	}
15945 
15946 	/* Wait for 30 seconds for all daemons unstall. */
15947 	timer = 3000;
15948 	link = ha == NULL ? ql_hba.first : &ha->hba;
15949 	while (link != NULL && timer) {
15950 		ha2 = link->base_address;
15951 
15952 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15953 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15954 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
15955 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
15956 			    ha2->instance, ha2->vp_index);
15957 			ql_restart_queues(ha2);
15958 			link = ha == NULL ? link->next : NULL;
15959 			continue;
15960 		}
15961 
15962 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
15963 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
15964 
15965 		ql_delay(ha, 10000);
15966 		timer--;
15967 		link = ha == NULL ? ql_hba.first : &ha->hba;
15968 	}
15969 
15970 	QL_PRINT_3(CE_CONT, "done\n");
15971 }
15972 
15973 /*
15974  * ql_setup_interrupts
15975  *	Sets up interrupts based on the HBA's and platform's
15976  *	capabilities (e.g., legacy / MSI / FIXED).
15977  *
15978  * Input:
15979  *	ha = adapter state pointer.
15980  *
15981  * Returns:
15982  *	DDI_SUCCESS or DDI_FAILURE.
15983  *
15984  * Context:
15985  *	Kernel context.
15986  */
15987 static int
15988 ql_setup_interrupts(ql_adapter_state_t *ha)
15989 {
15990 	int32_t		rval = DDI_FAILURE;
15991 	int32_t		i;
15992 	int32_t		itypes = 0;
15993 
15994 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15995 
15996 	/*
15997 	 * The Solaris Advanced Interrupt Functions (aif) are only
15998 	 * supported on s10U1 or greater.
15999 	 */
16000 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16001 		EL(ha, "interrupt framework is not supported or is "
16002 		    "disabled, using legacy\n");
16003 		return (ql_legacy_intr(ha));
16004 	} else if (ql_os_release_level == 10) {
16005 		/*
16006 		 * See if the advanced interrupt functions (aif) are
16007 		 * in the kernel
16008 		 */
16009 		void	*fptr = (void *)&ddi_intr_get_supported_types;
16010 
16011 		if (fptr == NULL) {
16012 			EL(ha, "aif is not supported, using legacy "
16013 			    "interrupts (rev)\n");
16014 			return (ql_legacy_intr(ha));
16015 		}
16016 	}
16017 
16018 	/* See what types of interrupts this HBA and platform support */
16019 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16020 	    DDI_SUCCESS) {
16021 		EL(ha, "get supported types failed, rval=%xh, "
16022 		    "assuming FIXED\n", i);
16023 		itypes = DDI_INTR_TYPE_FIXED;
16024 	}
16025 
16026 	EL(ha, "supported types are: %xh\n", itypes);
16027 
16028 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
16029 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16030 		EL(ha, "successful MSI-X setup\n");
16031 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
16032 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16033 		EL(ha, "successful MSI setup\n");
16034 	} else {
16035 		rval = ql_setup_fixed(ha);
16036 	}
16037 
16038 	if (rval != DDI_SUCCESS) {
16039 		EL(ha, "failed, aif, rval=%xh\n", rval);
16040 	} else {
16041 		/*EMPTY*/
16042 		QL_PRINT_3(CE_CONT, "(%d): done\n");
16043 	}
16044 
16045 	return (rval);
16046 }
16047 
16048 /*
16049  * ql_setup_msi
16050  *	Set up aif MSI interrupts
16051  *
16052  * Input:
16053  *	ha = adapter state pointer.
16054  *
16055  * Returns:
16056  *	DDI_SUCCESS or DDI_FAILURE.
16057  *
16058  * Context:
16059  *	Kernel context.
16060  */
16061 static int
16062 ql_setup_msi(ql_adapter_state_t *ha)
16063 {
16064 	int32_t		count = 0;
16065 	int32_t		avail = 0;
16066 	int32_t		actual = 0;
16067 	int32_t		msitype = DDI_INTR_TYPE_MSI;
16068 	int32_t		ret;
16069 	ql_ifunc_t	itrfun[10] = {0};
16070 
16071 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16072 
16073 	if (ql_disable_msi != 0) {
16074 		EL(ha, "MSI is disabled by user\n");
16075 		return (DDI_FAILURE);
16076 	}
16077 
16078 	/* MSI support is only suported on 24xx HBA's. */
16079 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
16080 		EL(ha, "HBA does not support MSI\n");
16081 		return (DDI_FAILURE);
16082 	}
16083 
16084 	/* Get number of MSI interrupts the system supports */
16085 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16086 	    DDI_SUCCESS) || count == 0) {
16087 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16088 		return (DDI_FAILURE);
16089 	}
16090 
16091 	/* Get number of available MSI interrupts */
16092 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16093 	    DDI_SUCCESS) || avail == 0) {
16094 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16095 		return (DDI_FAILURE);
16096 	}
16097 
16098 	/* MSI requires only 1.  */
16099 	count = 1;
16100 	itrfun[0].ifunc = &ql_isr_aif;
16101 
16102 	/* Allocate space for interrupt handles */
16103 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16104 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16105 
16106 	ha->iflags |= IFLG_INTR_MSI;
16107 
16108 	/* Allocate the interrupts */
16109 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16110 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
16111 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16112 		    "actual=%xh\n", ret, count, actual);
16113 		ql_release_intr(ha);
16114 		return (DDI_FAILURE);
16115 	}
16116 
16117 	ha->intr_cnt = actual;
16118 
16119 	/* Get interrupt priority */
16120 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16121 	    DDI_SUCCESS) {
16122 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16123 		ql_release_intr(ha);
16124 		return (ret);
16125 	}
16126 
16127 	/* Add the interrupt handler */
16128 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16129 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16130 		EL(ha, "failed, intr_add ret=%xh\n", ret);
16131 		ql_release_intr(ha);
16132 		return (ret);
16133 	}
16134 
16135 	/* Setup mutexes */
16136 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16137 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16138 		ql_release_intr(ha);
16139 		return (ret);
16140 	}
16141 
16142 	/* Get the capabilities */
16143 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16144 
16145 	/* Enable interrupts */
16146 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16147 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16148 		    DDI_SUCCESS) {
16149 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16150 			ql_destroy_mutex(ha);
16151 			ql_release_intr(ha);
16152 			return (ret);
16153 		}
16154 	} else {
16155 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16156 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16157 			ql_destroy_mutex(ha);
16158 			ql_release_intr(ha);
16159 			return (ret);
16160 		}
16161 	}
16162 
16163 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16164 
16165 	return (DDI_SUCCESS);
16166 }
16167 
16168 /*
16169  * ql_setup_msix
16170  *	Set up aif MSI-X interrupts
16171  *
16172  * Input:
16173  *	ha = adapter state pointer.
16174  *
16175  * Returns:
16176  *	DDI_SUCCESS or DDI_FAILURE.
16177  *
16178  * Context:
16179  *	Kernel context.
16180  */
16181 static int
16182 ql_setup_msix(ql_adapter_state_t *ha)
16183 {
16184 	uint16_t	hwvect;
16185 	int32_t		count = 0;
16186 	int32_t		avail = 0;
16187 	int32_t		actual = 0;
16188 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
16189 	int32_t		ret;
16190 	uint32_t	i;
16191 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
16192 
16193 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16194 
16195 	if (ql_disable_msix != 0) {
16196 		EL(ha, "MSI-X is disabled by user\n");
16197 		return (DDI_FAILURE);
16198 	}
16199 
16200 	/*
16201 	 * MSI-X support is only available on 24xx HBA's that have
16202 	 * rev A2 parts (revid = 3) or greater.
16203 	 */
16204 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16205 	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001))) {
16206 		EL(ha, "HBA does not support MSI-X\n");
16207 		return (DDI_FAILURE);
16208 	}
16209 
16210 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16211 		EL(ha, "HBA does not support MSI-X (revid)\n");
16212 		return (DDI_FAILURE);
16213 	}
16214 
16215 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
16216 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16217 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16218 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
16219 		return (DDI_FAILURE);
16220 	}
16221 
16222 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
16223 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16224 	    ql_pci_config_get16(ha, 0x7e) :
16225 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16226 
16227 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
16228 
16229 	if (hwvect < QL_MSIX_MAXAIF) {
16230 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16231 		    QL_MSIX_MAXAIF, hwvect);
16232 		return (DDI_FAILURE);
16233 	}
16234 
16235 	/* Get number of MSI-X interrupts the platform h/w supports */
16236 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16237 	    DDI_SUCCESS) || count == 0) {
16238 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16239 		return (DDI_FAILURE);
16240 	}
16241 
16242 	/* Get number of available system interrupts */
16243 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16244 	    DDI_SUCCESS) || avail == 0) {
16245 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16246 		return (DDI_FAILURE);
16247 	}
16248 
16249 	/* Fill out the intr table */
16250 	count = QL_MSIX_MAXAIF;
16251 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16252 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16253 
16254 	/* Allocate space for interrupt handles */
16255 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16256 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16257 		ha->hsize = 0;
16258 		EL(ha, "failed, unable to allocate htable space\n");
16259 		return (DDI_FAILURE);
16260 	}
16261 
16262 	ha->iflags |= IFLG_INTR_MSIX;
16263 
16264 	/* Allocate the interrupts */
16265 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16266 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16267 	    actual < QL_MSIX_MAXAIF) {
16268 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16269 		    "actual=%xh\n", ret, count, actual);
16270 		ql_release_intr(ha);
16271 		return (DDI_FAILURE);
16272 	}
16273 
16274 	ha->intr_cnt = actual;
16275 
16276 	/* Get interrupt priority */
16277 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16278 	    DDI_SUCCESS) {
16279 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16280 		ql_release_intr(ha);
16281 		return (ret);
16282 	}
16283 
16284 	/* Add the interrupt handlers */
16285 	for (i = 0; i < actual; i++) {
16286 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16287 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16288 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16289 			    actual, ret);
16290 			ql_release_intr(ha);
16291 			return (ret);
16292 		}
16293 	}
16294 
16295 	/*
16296 	 * duplicate the rest of the intr's
16297 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
16298 	 */
16299 #ifdef __sparc
16300 	for (i = actual; i < hwvect; i++) {
16301 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16302 		    &ha->htable[i])) != DDI_SUCCESS) {
16303 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16304 			    i, actual, ret);
16305 			ql_release_intr(ha);
16306 			return (ret);
16307 		}
16308 	}
16309 #endif
16310 
16311 	/* Setup mutexes */
16312 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16313 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16314 		ql_release_intr(ha);
16315 		return (ret);
16316 	}
16317 
16318 	/* Get the capabilities */
16319 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16320 
16321 	/* Enable interrupts */
16322 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16323 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16324 		    DDI_SUCCESS) {
16325 			EL(ha, "failed, block enable, ret=%xh\n", ret);
16326 			ql_destroy_mutex(ha);
16327 			ql_release_intr(ha);
16328 			return (ret);
16329 		}
16330 	} else {
16331 		for (i = 0; i < ha->intr_cnt; i++) {
16332 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
16333 			    DDI_SUCCESS) {
16334 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
16335 				ql_destroy_mutex(ha);
16336 				ql_release_intr(ha);
16337 				return (ret);
16338 			}
16339 		}
16340 	}
16341 
16342 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16343 
16344 	return (DDI_SUCCESS);
16345 }
16346 
16347 /*
16348  * ql_setup_fixed
16349  *	Sets up aif FIXED interrupts
16350  *
16351  * Input:
16352  *	ha = adapter state pointer.
16353  *
16354  * Returns:
16355  *	DDI_SUCCESS or DDI_FAILURE.
16356  *
16357  * Context:
16358  *	Kernel context.
16359  */
16360 static int
16361 ql_setup_fixed(ql_adapter_state_t *ha)
16362 {
16363 	int32_t		count = 0;
16364 	int32_t		actual = 0;
16365 	int32_t		ret;
16366 	uint32_t	i;
16367 
16368 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16369 
16370 	/* Get number of fixed interrupts the system supports */
16371 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16372 	    &count)) != DDI_SUCCESS) || count == 0) {
16373 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16374 		return (DDI_FAILURE);
16375 	}
16376 
16377 	ha->iflags |= IFLG_INTR_FIXED;
16378 
16379 	/* Allocate space for interrupt handles */
16380 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16381 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16382 
16383 	/* Allocate the interrupts */
16384 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16385 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16386 	    actual < count) {
16387 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16388 		    "actual=%xh\n", ret, count, actual);
16389 		ql_release_intr(ha);
16390 		return (DDI_FAILURE);
16391 	}
16392 
16393 	ha->intr_cnt = actual;
16394 
16395 	/* Get interrupt priority */
16396 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16397 	    DDI_SUCCESS) {
16398 		EL(ha, "failed, get_pri ret=%xh\n", ret);
16399 		ql_release_intr(ha);
16400 		return (ret);
16401 	}
16402 
16403 	/* Add the interrupt handlers */
16404 	for (i = 0; i < ha->intr_cnt; i++) {
16405 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16406 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16407 			EL(ha, "failed, intr_add ret=%xh\n", ret);
16408 			ql_release_intr(ha);
16409 			return (ret);
16410 		}
16411 	}
16412 
16413 	/* Setup mutexes */
16414 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16415 		EL(ha, "failed, mutex init ret=%xh\n", ret);
16416 		ql_release_intr(ha);
16417 		return (ret);
16418 	}
16419 
16420 	/* Enable interrupts */
16421 	for (i = 0; i < ha->intr_cnt; i++) {
16422 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16423 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
16424 			ql_destroy_mutex(ha);
16425 			ql_release_intr(ha);
16426 			return (ret);
16427 		}
16428 	}
16429 
16430 	EL(ha, "using FIXED interupts\n");
16431 
16432 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16433 
16434 	return (DDI_SUCCESS);
16435 }
16436 
16437 /*
16438  * ql_disable_intr
16439  *	Disables interrupts
16440  *
16441  * Input:
16442  *	ha = adapter state pointer.
16443  *
16444  * Returns:
16445  *
16446  * Context:
16447  *	Kernel context.
16448  */
16449 static void
16450 ql_disable_intr(ql_adapter_state_t *ha)
16451 {
16452 	uint32_t	i, rval;
16453 
16454 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16455 
16456 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16457 
16458 		/* Disable legacy interrupts */
16459 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16460 
16461 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16462 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16463 
16464 		/* Remove AIF block interrupts (MSI) */
16465 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16466 		    != DDI_SUCCESS) {
16467 			EL(ha, "failed intr block disable, rval=%x\n", rval);
16468 		}
16469 
16470 	} else {
16471 
16472 		/* Remove AIF non-block interrupts (fixed).  */
16473 		for (i = 0; i < ha->intr_cnt; i++) {
16474 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
16475 			    DDI_SUCCESS) {
16476 				EL(ha, "failed intr disable, intr#=%xh, "
16477 				    "rval=%xh\n", i, rval);
16478 			}
16479 		}
16480 	}
16481 
16482 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16483 }
16484 
16485 /*
16486  * ql_release_intr
16487  *	Releases aif legacy interrupt resources
16488  *
16489  * Input:
16490  *	ha = adapter state pointer.
16491  *
16492  * Returns:
16493  *
16494  * Context:
16495  *	Kernel context.
16496  */
16497 static void
16498 ql_release_intr(ql_adapter_state_t *ha)
16499 {
16500 	int32_t 	i;
16501 
16502 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16503 
16504 	if (!(ha->iflags & IFLG_INTR_AIF)) {
16505 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16506 		return;
16507 	}
16508 
16509 	ha->iflags &= ~(IFLG_INTR_AIF);
16510 	if (ha->htable != NULL && ha->hsize > 0) {
16511 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16512 		while (i-- > 0) {
16513 			if (ha->htable[i] == 0) {
16514 				EL(ha, "htable[%x]=0h\n", i);
16515 				continue;
16516 			}
16517 
16518 			(void) ddi_intr_disable(ha->htable[i]);
16519 
16520 			if (i < ha->intr_cnt) {
16521 				(void) ddi_intr_remove_handler(ha->htable[i]);
16522 			}
16523 
16524 			(void) ddi_intr_free(ha->htable[i]);
16525 		}
16526 
16527 		kmem_free(ha->htable, ha->hsize);
16528 		ha->htable = NULL;
16529 	}
16530 
16531 	ha->hsize = 0;
16532 	ha->intr_cnt = 0;
16533 	ha->intr_pri = 0;
16534 	ha->intr_cap = 0;
16535 
16536 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16537 }
16538 
16539 /*
16540  * ql_legacy_intr
16541  *	Sets up legacy interrupts.
16542  *
16543  *	NB: Only to be used if AIF (Advanced Interupt Framework)
16544  *	    if NOT in the kernel.
16545  *
16546  * Input:
16547  *	ha = adapter state pointer.
16548  *
16549  * Returns:
16550  *	DDI_SUCCESS or DDI_FAILURE.
16551  *
16552  * Context:
16553  *	Kernel context.
16554  */
16555 static int
16556 ql_legacy_intr(ql_adapter_state_t *ha)
16557 {
16558 	int	rval = DDI_SUCCESS;
16559 
16560 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16561 
16562 	/* Setup mutexes */
16563 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16564 		EL(ha, "failed, mutex init\n");
16565 		return (DDI_FAILURE);
16566 	}
16567 
16568 	/* Setup standard/legacy interrupt handler */
16569 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16570 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16571 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16572 		    QL_NAME, ha->instance);
16573 		ql_destroy_mutex(ha);
16574 		rval = DDI_FAILURE;
16575 	}
16576 
16577 	if (rval == DDI_SUCCESS) {
16578 		ha->iflags |= IFLG_INTR_LEGACY;
16579 		EL(ha, "using legacy interrupts\n");
16580 	}
16581 
16582 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16583 
16584 	return (rval);
16585 }
16586 
16587 /*
16588  * ql_init_mutex
16589  *	Initializes mutex's
16590  *
16591  * Input:
16592  *	ha = adapter state pointer.
16593  *
16594  * Returns:
16595  *	DDI_SUCCESS or DDI_FAILURE.
16596  *
16597  * Context:
16598  *	Kernel context.
16599  */
16600 static int
16601 ql_init_mutex(ql_adapter_state_t *ha)
16602 {
16603 	int	ret;
16604 	void	*intr;
16605 
16606 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16607 
16608 	if (ha->iflags & IFLG_INTR_AIF) {
16609 		intr = (void *)(uintptr_t)ha->intr_pri;
16610 	} else {
16611 		/* Get iblock cookies to initialize mutexes */
16612 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16613 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16614 			EL(ha, "failed, get_iblock: %xh\n", ret);
16615 			return (DDI_FAILURE);
16616 		}
16617 		intr = (void *)ha->iblock_cookie;
16618 	}
16619 
16620 	/* mutexes to protect the adapter state structure. */
16621 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16622 
16623 	/* mutex to protect the ISP response ring. */
16624 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16625 
16626 	/* mutex to protect the mailbox registers. */
16627 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16628 
16629 	/* power management protection */
16630 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16631 
16632 	/* Mailbox wait and interrupt conditional variable. */
16633 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16634 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16635 
16636 	/* mutex to protect the ISP request ring. */
16637 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16638 
16639 	/* Unsolicited buffer conditional variable. */
16640 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16641 
16642 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16643 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16644 
16645 	/* Suspended conditional variable. */
16646 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16647 
16648 	/* mutex to protect task daemon context. */
16649 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16650 
16651 	/* Task_daemon thread conditional variable. */
16652 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16653 
16654 	/* mutex to protect diag port manage interface */
16655 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16656 
16657 	/* mutex to protect per instance f/w dump flags and buffer */
16658 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16659 
16660 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16661 
16662 	return (DDI_SUCCESS);
16663 }
16664 
16665 /*
16666  * ql_destroy_mutex
16667  *	Destroys mutex's
16668  *
16669  * Input:
16670  *	ha = adapter state pointer.
16671  *
16672  * Returns:
16673  *
16674  * Context:
16675  *	Kernel context.
16676  */
16677 static void
16678 ql_destroy_mutex(ql_adapter_state_t *ha)
16679 {
16680 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16681 
16682 	mutex_destroy(&ha->dump_mutex);
16683 	mutex_destroy(&ha->portmutex);
16684 	cv_destroy(&ha->cv_task_daemon);
16685 	mutex_destroy(&ha->task_daemon_mutex);
16686 	cv_destroy(&ha->cv_dr_suspended);
16687 	mutex_destroy(&ha->cache_mutex);
16688 	mutex_destroy(&ha->ub_mutex);
16689 	cv_destroy(&ha->cv_ub);
16690 	mutex_destroy(&ha->req_ring_mutex);
16691 	cv_destroy(&ha->cv_mbx_intr);
16692 	cv_destroy(&ha->cv_mbx_wait);
16693 	mutex_destroy(&ha->pm_mutex);
16694 	mutex_destroy(&ha->mbx_mutex);
16695 	mutex_destroy(&ha->intr_mutex);
16696 	mutex_destroy(&ha->mutex);
16697 
16698 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16699 }
16700 
16701 /*
16702  * ql_fwmodule_resolve
16703  *	Loads and resolves external firmware module and symbols
16704  *
16705  * Input:
16706  *	ha:		adapter state pointer.
16707  *
16708  * Returns:
16709  *	ql local function return status code:
16710  *		QL_SUCCESS - external f/w module module and symbols resolved
16711  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16712  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16713  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16714  * Context:
16715  *	Kernel context.
16716  *
16717  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
16718  * could switch to a tighter scope around acutal download (and add an extra
16719  * ddi_modopen for module opens that occur before root is mounted).
16720  *
16721  */
16722 uint32_t
16723 ql_fwmodule_resolve(ql_adapter_state_t *ha)
16724 {
16725 	int8_t			module[128];
16726 	int8_t			fw_version[128];
16727 	uint32_t		rval = QL_SUCCESS;
16728 	caddr_t			code, code02;
16729 	uint8_t			*p_ucfw;
16730 	uint16_t		*p_usaddr, *p_uslen;
16731 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
16732 	uint32_t		*p_uiaddr02, *p_uilen02;
16733 	struct fw_table		*fwt;
16734 	extern struct fw_table	fw_table[];
16735 
16736 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16737 
16738 	if (ha->fw_module != NULL) {
16739 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
16740 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
16741 		    ha->fw_subminor_version);
16742 		return (rval);
16743 	}
16744 
16745 	/* make sure the fw_class is in the fw_table of supported classes */
16746 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
16747 		if (fwt->fw_class == ha->fw_class)
16748 			break;			/* match */
16749 	}
16750 	if (fwt->fw_version == NULL) {
16751 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
16752 		    "in driver's fw_table", QL_NAME, ha->instance,
16753 		    ha->fw_class);
16754 		return (QL_FW_NOT_SUPPORTED);
16755 	}
16756 
16757 	/*
16758 	 * open the module related to the fw_class
16759 	 */
16760 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
16761 	    ha->fw_class);
16762 
16763 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
16764 	if (ha->fw_module == NULL) {
16765 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
16766 		    QL_NAME, ha->instance, module);
16767 		return (QL_FWMODLOAD_FAILED);
16768 	}
16769 
16770 	/*
16771 	 * resolve the fw module symbols, data types depend on fw_class
16772 	 */
16773 
16774 	switch (ha->fw_class) {
16775 	case 0x2200:
16776 	case 0x2300:
16777 	case 0x6322:
16778 
16779 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16780 		    NULL)) == NULL) {
16781 			rval = QL_FWSYM_NOT_FOUND;
16782 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16783 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
16784 		    "risc_code_addr01", NULL)) == NULL) {
16785 			rval = QL_FWSYM_NOT_FOUND;
16786 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16787 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
16788 		    "risc_code_length01", NULL)) == NULL) {
16789 			rval = QL_FWSYM_NOT_FOUND;
16790 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16791 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
16792 		    "firmware_version", NULL)) == NULL) {
16793 			rval = QL_FWSYM_NOT_FOUND;
16794 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16795 		}
16796 
16797 		if (rval == QL_SUCCESS) {
16798 			ha->risc_fw[0].code = code;
16799 			ha->risc_fw[0].addr = *p_usaddr;
16800 			ha->risc_fw[0].length = *p_uslen;
16801 
16802 			(void) snprintf(fw_version, sizeof (fw_version),
16803 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
16804 		}
16805 		break;
16806 
16807 	case 0x2400:
16808 	case 0x2500:
16809 	case 0x8100:
16810 
16811 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16812 		    NULL)) == NULL) {
16813 			rval = QL_FWSYM_NOT_FOUND;
16814 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16815 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
16816 		    "risc_code_addr01", NULL)) == NULL) {
16817 			rval = QL_FWSYM_NOT_FOUND;
16818 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16819 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
16820 		    "risc_code_length01", NULL)) == NULL) {
16821 			rval = QL_FWSYM_NOT_FOUND;
16822 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16823 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
16824 		    "firmware_version", NULL)) == NULL) {
16825 			rval = QL_FWSYM_NOT_FOUND;
16826 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16827 		}
16828 
16829 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
16830 		    NULL)) == NULL) {
16831 			rval = QL_FWSYM_NOT_FOUND;
16832 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
16833 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
16834 		    "risc_code_addr02", NULL)) == NULL) {
16835 			rval = QL_FWSYM_NOT_FOUND;
16836 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
16837 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
16838 		    "risc_code_length02", NULL)) == NULL) {
16839 			rval = QL_FWSYM_NOT_FOUND;
16840 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
16841 		}
16842 
16843 		if (rval == QL_SUCCESS) {
16844 			ha->risc_fw[0].code = code;
16845 			ha->risc_fw[0].addr = *p_uiaddr;
16846 			ha->risc_fw[0].length = *p_uilen;
16847 			ha->risc_fw[1].code = code02;
16848 			ha->risc_fw[1].addr = *p_uiaddr02;
16849 			ha->risc_fw[1].length = *p_uilen02;
16850 
16851 			(void) snprintf(fw_version, sizeof (fw_version),
16852 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
16853 		}
16854 		break;
16855 
16856 	default:
16857 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
16858 		rval = QL_FW_NOT_SUPPORTED;
16859 	}
16860 
16861 	if (rval != QL_SUCCESS) {
16862 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
16863 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
16864 		if (ha->fw_module != NULL) {
16865 			(void) ddi_modclose(ha->fw_module);
16866 			ha->fw_module = NULL;
16867 		}
16868 	} else {
16869 		/*
16870 		 * check for firmware version mismatch between module and
16871 		 * compiled in fw_table version.
16872 		 */
16873 
16874 		if (strcmp(fwt->fw_version, fw_version) != 0) {
16875 
16876 			/*
16877 			 * If f/w / driver version mismatches then
16878 			 * return a successful status -- however warn
16879 			 * the user that this is NOT recommended.
16880 			 */
16881 
16882 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
16883 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
16884 			    ha->instance, ha->fw_class, fwt->fw_version,
16885 			    fw_version);
16886 
16887 			ha->cfg_flags |= CFG_FW_MISMATCH;
16888 		} else {
16889 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
16890 		}
16891 	}
16892 
16893 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16894 
16895 	return (rval);
16896 }
16897 
16898 /*
16899  * ql_port_state
16900  *	Set the state on all adapter ports.
16901  *
16902  * Input:
16903  *	ha:	parent adapter state pointer.
16904  *	state:	port state.
16905  *	flags:	task daemon flags to set.
16906  *
16907  * Context:
16908  *	Interrupt or Kernel context, no mailbox commands allowed.
16909  */
16910 void
16911 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
16912 {
16913 	ql_adapter_state_t	*vha;
16914 
16915 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16916 
16917 	TASK_DAEMON_LOCK(ha);
16918 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
16919 		if (FC_PORT_STATE_MASK(vha->state) != state) {
16920 			vha->state = state != FC_STATE_OFFLINE ?
16921 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
16922 			vha->task_daemon_flags |= flags;
16923 		}
16924 	}
16925 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
16926 	TASK_DAEMON_UNLOCK(ha);
16927 
16928 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16929 }
16930 
16931 /*
16932  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
16933  *
16934  * Input:	Pointer to the adapter state structure.
16935  * Returns:	Success or Failure.
16936  * Context:	Kernel context.
16937  */
16938 int
16939 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
16940 {
16941 	int	rval = DDI_SUCCESS;
16942 
16943 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16944 
16945 	ha->el_trace_desc =
16946 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
16947 
16948 	if (ha->el_trace_desc == NULL) {
16949 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
16950 		    QL_NAME, ha->instance);
16951 		rval = DDI_FAILURE;
16952 	} else {
16953 		ha->el_trace_desc->next		= 0;
16954 		ha->el_trace_desc->trace_buffer =
16955 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
16956 
16957 		if (ha->el_trace_desc->trace_buffer == NULL) {
16958 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
16959 			    QL_NAME, ha->instance);
16960 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16961 			rval = DDI_FAILURE;
16962 		} else {
16963 			ha->el_trace_desc->trace_buffer_size =
16964 			    EL_TRACE_BUF_SIZE;
16965 			mutex_init(&ha->el_trace_desc->mutex, NULL,
16966 			    MUTEX_DRIVER, NULL);
16967 		}
16968 	}
16969 
16970 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16971 
16972 	return (rval);
16973 }
16974 
16975 /*
16976  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
16977  *
16978  * Input:	Pointer to the adapter state structure.
16979  * Returns:	Success or Failure.
16980  * Context:	Kernel context.
16981  */
16982 int
16983 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
16984 {
16985 	int	rval = DDI_SUCCESS;
16986 
16987 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16988 
16989 	if (ha->el_trace_desc == NULL) {
16990 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
16991 		    QL_NAME, ha->instance);
16992 		rval = DDI_FAILURE;
16993 	} else {
16994 		if (ha->el_trace_desc->trace_buffer != NULL) {
16995 			kmem_free(ha->el_trace_desc->trace_buffer,
16996 			    ha->el_trace_desc->trace_buffer_size);
16997 		}
16998 		mutex_destroy(&ha->el_trace_desc->mutex);
16999 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17000 	}
17001 
17002 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17003 
17004 	return (rval);
17005 }
17006 
17007 /*
17008  * els_cmd_text	- Return a pointer to a string describing the command
17009  *
17010  * Input:	els_cmd = the els command opcode.
17011  * Returns:	pointer to a string.
17012  * Context:	Kernel context.
17013  */
17014 char *
17015 els_cmd_text(int els_cmd)
17016 {
17017 	cmd_table_t *entry = &els_cmd_tbl[0];
17018 
17019 	return (cmd_text(entry, els_cmd));
17020 }
17021 
17022 /*
17023  * mbx_cmd_text - Return a pointer to a string describing the command
17024  *
17025  * Input:	mbx_cmd = the mailbox command opcode.
17026  * Returns:	pointer to a string.
17027  * Context:	Kernel context.
17028  */
17029 char *
17030 mbx_cmd_text(int mbx_cmd)
17031 {
17032 	cmd_table_t *entry = &mbox_cmd_tbl[0];
17033 
17034 	return (cmd_text(entry, mbx_cmd));
17035 }
17036 
17037 /*
17038  * cmd_text	Return a pointer to a string describing the command
17039  *
17040  * Input:	entry = the command table
17041  *		cmd = the command.
17042  * Returns:	pointer to a string.
17043  * Context:	Kernel context.
17044  */
17045 char *
17046 cmd_text(cmd_table_t *entry, int cmd)
17047 {
17048 	for (; entry->cmd != 0; entry++) {
17049 		if (entry->cmd == cmd) {
17050 			break;
17051 		}
17052 	}
17053 	return (entry->string);
17054 }
17055 
17056 /*
17057  * ql_els_24xx_mbox_cmd_iocb - els request indication.
17058  *
17059  * Input:	ha = adapter state pointer.
17060  *		srb = scsi request block pointer.
17061  *		arg = els passthru entry iocb pointer.
17062  * Returns:
17063  * Context:	Kernel context.
17064  */
17065 void
17066 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17067 {
17068 	els_descriptor_t	els_desc;
17069 
17070 	/* Extract the ELS information */
17071 	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17072 
17073 	/* Construct the passthru entry */
17074 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17075 
17076 	/* Ensure correct endianness */
17077 	ql_isp_els_handle_cmd_endian(ha, srb);
17078 }
17079 
17080 /*
17081  * ql_fca_isp_els_request - Extract into an els descriptor the info required
17082  *			    to build an els_passthru iocb from an fc packet.
17083  *
17084  * Input:	ha = adapter state pointer.
17085  *		pkt = fc packet pointer
17086  *		els_desc = els descriptor pointer
17087  * Returns:
17088  * Context:	Kernel context.
17089  */
17090 static void
17091 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17092     els_descriptor_t *els_desc)
17093 {
17094 	ls_code_t	els;
17095 
17096 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17097 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17098 
17099 	els_desc->els = els.ls_code;
17100 
17101 	els_desc->els_handle = ha->hba_buf.acc_handle;
17102 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17103 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17104 	/* if n_port_handle is not < 0x7d use 0 */
17105 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17106 		els_desc->n_port_handle = ha->n_port->n_port_handle;
17107 	} else {
17108 		els_desc->n_port_handle = 0;
17109 	}
17110 	els_desc->control_flags = 0;
17111 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17112 	/*
17113 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
17114 	 * (without the frame header) in system memory.
17115 	 */
17116 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17117 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17118 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17119 
17120 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
17121 	/*
17122 	 * Receive DSD. This field defines the ELS response payload buffer
17123 	 * for the ISP24xx firmware transferring the received ELS
17124 	 * response frame to a location in host memory.
17125 	 */
17126 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17127 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17128 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17129 }
17130 
17131 /*
17132  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17133  * using the els descriptor.
17134  *
17135  * Input:	ha = adapter state pointer.
17136  *		els_desc = els descriptor pointer.
17137  *		els_entry = els passthru entry iocb pointer.
17138  * Returns:
17139  * Context:	Kernel context.
17140  */
17141 static void
17142 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17143     els_passthru_entry_t *els_entry)
17144 {
17145 	uint32_t	*ptr32;
17146 
17147 	/*
17148 	 * Construct command packet.
17149 	 */
17150 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17151 	    (uint8_t)ELS_PASSTHRU_TYPE);
17152 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17153 	    els_desc->n_port_handle);
17154 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17155 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17156 	    (uint32_t)0);
17157 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17158 	    els_desc->els);
17159 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17160 	    els_desc->d_id.b.al_pa);
17161 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17162 	    els_desc->d_id.b.area);
17163 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17164 	    els_desc->d_id.b.domain);
17165 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17166 	    els_desc->s_id.b.al_pa);
17167 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17168 	    els_desc->s_id.b.area);
17169 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17170 	    els_desc->s_id.b.domain);
17171 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17172 	    els_desc->control_flags);
17173 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17174 	    els_desc->rsp_byte_count);
17175 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17176 	    els_desc->cmd_byte_count);
17177 	/* Load transmit data segments and count. */
17178 	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17179 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17180 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17181 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17182 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17183 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17184 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17185 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17186 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17187 }
17188 
17189 /*
17190  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17191  *				  in host memory.
17192  *
17193  * Input:	ha = adapter state pointer.
17194  *		srb = scsi request block
17195  * Returns:
17196  * Context:	Kernel context.
17197  */
17198 void
17199 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17200 {
17201 	ls_code_t	els;
17202 	fc_packet_t	*pkt;
17203 	uint8_t		*ptr;
17204 
17205 	pkt = srb->pkt;
17206 
17207 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17208 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17209 
17210 	ptr = (uint8_t *)pkt->pkt_cmd;
17211 
17212 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17213 }
17214 
17215 /*
17216  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17217  *				  in host memory.
17218  * Input:	ha = adapter state pointer.
17219  *		srb = scsi request block
17220  * Returns:
17221  * Context:	Kernel context.
17222  */
17223 void
17224 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17225 {
17226 	ls_code_t	els;
17227 	fc_packet_t	*pkt;
17228 	uint8_t		*ptr;
17229 
17230 	pkt = srb->pkt;
17231 
17232 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17233 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17234 
17235 	ptr = (uint8_t *)pkt->pkt_resp;
17236 	BIG_ENDIAN_32(&els);
17237 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17238 }
17239 
17240 /*
17241  * ql_isp_els_handle_endian - els requests/responses must be in big endian
17242  *			      in host memory.
17243  * Input:	ha = adapter state pointer.
17244  *		ptr = els request/response buffer pointer.
17245  *		ls_code = els command code.
17246  * Returns:
17247  * Context:	Kernel context.
17248  */
17249 void
17250 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17251 {
17252 	switch (ls_code) {
17253 	case LA_ELS_PLOGI: {
17254 		BIG_ENDIAN_32(ptr);	/* Command Code */
17255 		ptr += 4;
17256 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
17257 		ptr += 2;
17258 		BIG_ENDIAN_16(ptr);	/* b2b credit */
17259 		ptr += 2;
17260 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
17261 		ptr += 2;
17262 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
17263 		ptr += 2;
17264 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17265 		ptr += 2;
17266 		BIG_ENDIAN_16(ptr);	/* Rel offset */
17267 		ptr += 2;
17268 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
17269 		ptr += 4;		/* Port Name */
17270 		ptr += 8;		/* Node Name */
17271 		ptr += 8;		/* Class 1 */
17272 		ptr += 16;		/* Class 2 */
17273 		ptr += 16;		/* Class 3 */
17274 		BIG_ENDIAN_16(ptr);	/* Service options */
17275 		ptr += 2;
17276 		BIG_ENDIAN_16(ptr);	/* Initiator control */
17277 		ptr += 2;
17278 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
17279 		ptr += 2;
17280 		BIG_ENDIAN_16(ptr);	/* Rcv size */
17281 		ptr += 2;
17282 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
17283 		ptr += 2;
17284 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
17285 		ptr += 2;
17286 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
17287 		break;
17288 	}
17289 	case LA_ELS_PRLI: {
17290 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
17291 		ptr += 4;		/* Type */
17292 		ptr += 2;
17293 		BIG_ENDIAN_16(ptr);	/* Flags */
17294 		ptr += 2;
17295 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
17296 		ptr += 4;
17297 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
17298 		ptr += 4;
17299 		BIG_ENDIAN_32(ptr);	/* Flags */
17300 		break;
17301 	}
17302 	default:
17303 		EL(ha, "can't handle els code %x\n", ls_code);
17304 		break;
17305 	}
17306 }
17307 
17308 /*
17309  * ql_n_port_plogi
17310  *	In N port 2 N port topology where an N Port has logged in with the
17311  *	firmware because it has the N_Port login initiative, we send up
17312  *	a plogi by proxy which stimulates the login procedure to continue.
17313  *
17314  * Input:
17315  *	ha = adapter state pointer.
17316  * Returns:
17317  *
17318  * Context:
17319  *	Kernel context.
17320  */
17321 static int
17322 ql_n_port_plogi(ql_adapter_state_t *ha)
17323 {
17324 	int		rval;
17325 	ql_tgt_t	*tq;
17326 	ql_head_t done_q = { NULL, NULL };
17327 
17328 	rval = QL_SUCCESS;
17329 
17330 	if (ha->topology & QL_N_PORT) {
17331 		/* if we're doing this the n_port_handle must be good */
17332 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17333 			tq = ql_loop_id_to_queue(ha,
17334 			    ha->n_port->n_port_handle);
17335 			if (tq != NULL) {
17336 				(void) ql_send_plogi(ha, tq, &done_q);
17337 			} else {
17338 				EL(ha, "n_port_handle = %x, tq = %x\n",
17339 				    ha->n_port->n_port_handle, tq);
17340 			}
17341 		} else {
17342 			EL(ha, "n_port_handle = %x, tq = %x\n",
17343 			    ha->n_port->n_port_handle, tq);
17344 		}
17345 		if (done_q.first != NULL) {
17346 			ql_done(done_q.first);
17347 		}
17348 	}
17349 	return (rval);
17350 }
17351 
17352 /*
17353  * Compare two WWNs. The NAA is omitted for comparison.
17354  *
17355  * Note particularly that the indentation used in this
17356  * function  isn't according to Sun recommendations. It
17357  * is indented to make reading a bit easy.
17358  *
17359  * Return Values:
17360  *   if first == second return  0
17361  *   if first > second  return  1
17362  *   if first < second  return -1
17363  */
17364 int
17365 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17366 {
17367 	la_wwn_t t1, t2;
17368 	int rval;
17369 
17370 	EL(ha, "WWPN=%08x%08x\n",
17371 	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17372 	EL(ha, "WWPN=%08x%08x\n",
17373 	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17374 	/*
17375 	 * Fibre Channel protocol is big endian, so compare
17376 	 * as big endian values
17377 	 */
17378 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17379 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17380 
17381 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17382 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17383 
17384 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
17385 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
17386 			rval = 0;
17387 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17388 			rval = 1;
17389 		} else {
17390 			rval = -1;
17391 		}
17392 	} else {
17393 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
17394 			rval = 1;
17395 		} else {
17396 			rval = -1;
17397 		}
17398 	}
17399 	return (rval);
17400 }
17401 
17402 /*
17403  * ql_wait_for_td_stop
17404  *	Wait for task daemon to stop running.  Internal command timeout
17405  *	is approximately 30 seconds, so it may help in some corner
17406  *	cases to wait that long
17407  *
17408  * Input:
17409  *	ha = adapter state pointer.
17410  *
17411  * Returns:
17412  *	DDI_SUCCESS or DDI_FAILURE.
17413  *
17414  * Context:
17415  *	Kernel context.
17416  */
17417 
17418 static int
17419 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17420 {
17421 	int	rval = DDI_FAILURE;
17422 	UINT16	wait_cnt;
17423 
17424 	for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17425 		/* The task daemon clears the stop flag on exit. */
17426 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17427 			if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17428 			    ddi_in_panic()) {
17429 				drv_usecwait(10000);
17430 			} else {
17431 				delay(drv_usectohz(10000));
17432 			}
17433 		} else {
17434 			rval = DDI_SUCCESS;
17435 			break;
17436 		}
17437 	}
17438 	return (rval);
17439 }
17440