1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Solaris external defines.
56  */
57 extern pri_t minclsyspri;
58 extern pri_t maxclsyspri;
59 
60 /*
61  * dev_ops functions prototypes
62  */
63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66 static int ql_power(dev_info_t *, int, int);
67 static int ql_quiesce(dev_info_t *);
68 
69 /*
70  * FCA functions prototypes exported by means of the transport table
71  */
72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73     fc_fca_bind_info_t *);
74 static void ql_unbind_port(opaque_t);
75 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77 static int ql_els_send(opaque_t, fc_packet_t *);
78 static int ql_get_cap(opaque_t, char *, void *);
79 static int ql_set_cap(opaque_t, char *, void *);
80 static int ql_getmap(opaque_t, fc_lilpmap_t *);
81 static int ql_transport(opaque_t, fc_packet_t *);
82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85 static int ql_abort(opaque_t, fc_packet_t *, int);
86 static int ql_reset(opaque_t, uint32_t);
87 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
88 static opaque_t ql_get_device(opaque_t, fc_portid_t);
89 
90 /*
91  * FCA Driver Support Function Prototypes.
92  */
93 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
94 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
95     ql_srb_t *);
96 static void ql_task_daemon(void *);
97 static void ql_task_thread(ql_adapter_state_t *);
98 static void ql_unsol_callback(ql_srb_t *);
99 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
100     fc_unsol_buf_t *);
101 static void ql_timer(void *);
102 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
103 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
104     uint32_t *, uint32_t *);
105 static void ql_halt(ql_adapter_state_t *, int);
106 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_login_port(ql_adapter_state_t *, port_id_t);
122 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
123 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
124 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
126 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
128 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
129 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
130     ql_srb_t *);
131 static int ql_kstat_update(kstat_t *, int);
132 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
133 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
134 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
135 static void ql_rst_aen(ql_adapter_state_t *);
136 static void ql_restart_queues(ql_adapter_state_t *);
137 static void ql_abort_queues(ql_adapter_state_t *);
138 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
139 static void ql_idle_check(ql_adapter_state_t *);
140 static int ql_loop_resync(ql_adapter_state_t *);
141 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
142 static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
143 static int ql_save_config_regs(dev_info_t *);
144 static int ql_restore_config_regs(dev_info_t *);
145 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
146 static int ql_handle_rscn_update(ql_adapter_state_t *);
147 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
148 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
149 static int ql_dump_firmware(ql_adapter_state_t *);
150 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
152 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
154 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
155 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
156     void *);
157 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
158     uint8_t);
159 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
160 static int ql_suspend_adapter(ql_adapter_state_t *);
161 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
162 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
163 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
164 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
165 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
166 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
167 static int ql_setup_interrupts(ql_adapter_state_t *);
168 static int ql_setup_msi(ql_adapter_state_t *);
169 static int ql_setup_msix(ql_adapter_state_t *);
170 static int ql_setup_fixed(ql_adapter_state_t *);
171 static void ql_release_intr(ql_adapter_state_t *);
172 static void ql_disable_intr(ql_adapter_state_t *);
173 static int ql_legacy_intr(ql_adapter_state_t *);
174 static int ql_init_mutex(ql_adapter_state_t *);
175 static void ql_destroy_mutex(ql_adapter_state_t *);
176 static void ql_iidma(ql_adapter_state_t *);
177 
178 static int ql_n_port_plogi(ql_adapter_state_t *);
179 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
180     els_descriptor_t *);
181 static void ql_isp_els_request_ctor(els_descriptor_t *,
182     els_passthru_entry_t *);
183 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
184 static int ql_wait_for_td_stop(ql_adapter_state_t *ha);
185 
186 /*
187  * Global data
188  */
189 static uint8_t	ql_enable_pm = 1;
190 static int	ql_flash_sbus_fpga = 0;
191 uint32_t	ql_os_release_level;
192 uint32_t	ql_disable_aif = 0;
193 uint32_t	ql_disable_msi = 0;
194 uint32_t	ql_disable_msix = 0;
195 
196 /* Timer routine variables. */
197 static timeout_id_t	ql_timer_timeout_id = NULL;
198 static clock_t		ql_timer_ticks;
199 
200 /* Soft state head pointer. */
201 void *ql_state = NULL;
202 
203 /* Head adapter link. */
204 ql_head_t ql_hba = {
205 	NULL,
206 	NULL
207 };
208 
209 /* Global hba index */
210 uint32_t ql_gfru_hba_index = 1;
211 
212 /*
213  * Some IP defines and globals
214  */
215 uint32_t	ql_ip_buffer_count = 128;
216 uint32_t	ql_ip_low_water = 10;
217 uint8_t		ql_ip_fast_post_count = 5;
218 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
219 
220 /* Device AL_PA to Device Head Queue index array. */
221 uint8_t ql_alpa_to_index[] = {
222 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
223 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
224 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
225 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
226 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
227 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
228 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
229 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
230 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
231 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
232 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
233 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
234 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
235 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
236 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
237 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
238 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
239 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
240 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
241 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
242 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
243 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
244 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
245 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
246 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
247 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
248 };
249 
250 /* Device loop_id to ALPA array. */
251 static uint8_t ql_index_to_alpa[] = {
252 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
253 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
254 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
255 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
256 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
257 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
258 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
259 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
260 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
261 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
262 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
263 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
264 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
265 };
266 
267 /* 2200 register offsets */
268 static reg_off_t reg_off_2200 = {
269 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
270 	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
271 	0x00, 0x00, /* intr info lo, hi */
272 	24, /* Number of mailboxes */
273 	/* Mailbox register offsets */
274 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
275 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
276 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
277 	/* 2200 does not have mailbox 24-31 */
278 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
279 	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
280 	/* host to host sema */
281 	0x00,
282 	/* 2200 does not have pri_req_in, pri_req_out, */
283 	/* atio_req_in, atio_req_out, io_base_addr */
284 	0xff, 0xff, 0xff, 0xff,	0xff
285 };
286 
287 /* 2300 register offsets */
288 static reg_off_t reg_off_2300 = {
289 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
290 	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
291 	0x18, 0x1A, /* intr info lo, hi */
292 	32, /* Number of mailboxes */
293 	/* Mailbox register offsets */
294 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
295 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
296 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
297 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
298 	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
299 	/* host to host sema */
300 	0x1c,
301 	/* 2300 does not have pri_req_in, pri_req_out, */
302 	/* atio_req_in, atio_req_out, io_base_addr */
303 	0xff, 0xff, 0xff, 0xff,	0xff
304 };
305 
306 /* 2400/2500 register offsets */
307 reg_off_t reg_off_2400_2500 = {
308 	0x00, 0x04,		/* flash_address, flash_data */
309 	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
310 	/* 2400 does not have semaphore, nvram */
311 	0x14, 0x18,
312 	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
313 	0x44, 0x46,		/* intr info lo, hi */
314 	32,			/* Number of mailboxes */
315 	/* Mailbox register offsets */
316 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
317 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
318 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
319 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
320 	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
321 	0xff, 0xff, 0xff, 0xff,
322 	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
323 	0xff,			/* host to host sema */
324 	0x2c, 0x30,		/* pri_req_in, pri_req_out */
325 	0x3c, 0x40,		/* atio_req_in, atio_req_out */
326 	0x54			/* io_base_addr */
327 };
328 
329 /* mutex for protecting variables shared by all instances of the driver */
330 kmutex_t ql_global_mutex;
331 kmutex_t ql_global_hw_mutex;
332 kmutex_t ql_global_el_mutex;
333 
334 /* DMA access attribute structure. */
335 static ddi_device_acc_attr_t ql_dev_acc_attr = {
336 	DDI_DEVICE_ATTR_V0,
337 	DDI_STRUCTURE_LE_ACC,
338 	DDI_STRICTORDER_ACC
339 };
340 
341 /* I/O DMA attributes structures. */
342 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
343 	DMA_ATTR_V0,			/* dma_attr_version */
344 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
345 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
346 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
347 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
348 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
349 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
350 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
351 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
352 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
353 	QL_DMA_GRANULARITY,		/* granularity of device */
354 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
355 };
356 
357 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
358 	DMA_ATTR_V0,			/* dma_attr_version */
359 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
360 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
361 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
362 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
363 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
364 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
365 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
366 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
367 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
368 	QL_DMA_GRANULARITY,		/* granularity of device */
369 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
370 };
371 
372 /* Load the default dma attributes */
373 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
374 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
375 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
376 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
377 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
378 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
379 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
380 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
381 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
382 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
383 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
384 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
385 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
386 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
387 
388 /* Static declarations of cb_ops entry point functions... */
389 static struct cb_ops ql_cb_ops = {
390 	ql_open,			/* b/c open */
391 	ql_close,			/* b/c close */
392 	nodev,				/* b strategy */
393 	nodev,				/* b print */
394 	nodev,				/* b dump */
395 	nodev,				/* c read */
396 	nodev,				/* c write */
397 	ql_ioctl,			/* c ioctl */
398 	nodev,				/* c devmap */
399 	nodev,				/* c mmap */
400 	nodev,				/* c segmap */
401 	nochpoll,			/* c poll */
402 	nodev,				/* cb_prop_op */
403 	NULL,				/* streamtab  */
404 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
405 	CB_REV,				/* cb_ops revision */
406 	nodev,				/* c aread */
407 	nodev				/* c awrite */
408 };
409 
410 /* Static declarations of dev_ops entry point functions... */
411 static struct dev_ops ql_devops = {
412 	DEVO_REV,			/* devo_rev */
413 	0,				/* refcnt */
414 	ql_getinfo,			/* getinfo */
415 	nulldev,			/* identify */
416 	nulldev,			/* probe */
417 	ql_attach,			/* attach */
418 	ql_detach,			/* detach */
419 	nodev,				/* reset */
420 	&ql_cb_ops,			/* char/block ops */
421 	NULL,				/* bus operations */
422 	ql_power,			/* power management */
423 	ql_quiesce			/* quiesce device */
424 };
425 
426 /* ELS command code to text converter */
427 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
428 /* Mailbox command code to text converter */
429 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
430 
431 char qlc_driver_version[] = QL_VERSION;
432 
433 /*
434  * Loadable Driver Interface Structures.
435  * Declare and initialize the module configuration section...
436  */
437 static struct modldrv modldrv = {
438 	&mod_driverops,				/* type of module: driver */
439 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
440 	&ql_devops				/* driver dev_ops */
441 };
442 
443 static struct modlinkage modlinkage = {
444 	MODREV_1,
445 	&modldrv,
446 	NULL
447 };
448 
449 /* ************************************************************************ */
450 /*				Loadable Module Routines.		    */
451 /* ************************************************************************ */
452 
453 /*
454  * _init
455  *	Initializes a loadable module. It is called before any other
456  *	routine in a loadable module.
457  *
458  * Returns:
459  *	0 = success
460  *
461  * Context:
462  *	Kernel context.
463  */
464 int
465 _init(void)
466 {
467 	uint16_t	w16;
468 	int		rval = 0;
469 
470 	/* Get OS major release level. */
471 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
472 		if (utsname.release[w16] == '.') {
473 			w16++;
474 			break;
475 		}
476 	}
477 	if (w16 < sizeof (utsname.release)) {
478 		(void) ql_bstr_to_dec(&utsname.release[w16],
479 		    &ql_os_release_level, 0);
480 	} else {
481 		ql_os_release_level = 0;
482 	}
483 	if (ql_os_release_level < 6) {
484 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
485 		    QL_NAME, ql_os_release_level);
486 		rval = EINVAL;
487 	}
488 	if (ql_os_release_level == 6) {
489 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
490 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
491 	}
492 
493 	if (rval == 0) {
494 		rval = ddi_soft_state_init(&ql_state,
495 		    sizeof (ql_adapter_state_t), 0);
496 	}
497 	if (rval == 0) {
498 		/* allow the FC Transport to tweak the dev_ops */
499 		fc_fca_init(&ql_devops);
500 
501 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
502 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
503 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
504 		rval = mod_install(&modlinkage);
505 		if (rval != 0) {
506 			mutex_destroy(&ql_global_hw_mutex);
507 			mutex_destroy(&ql_global_mutex);
508 			mutex_destroy(&ql_global_el_mutex);
509 			ddi_soft_state_fini(&ql_state);
510 		} else {
511 			/*EMPTY*/
512 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
513 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
514 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
515 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
516 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
517 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
518 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
519 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
520 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
521 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
522 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
523 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
524 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
525 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
526 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
527 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
528 			    QL_FCSM_CMD_SGLLEN;
529 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
530 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
531 			    QL_FCSM_RSP_SGLLEN;
532 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
533 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
534 			    QL_FCIP_CMD_SGLLEN;
535 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
536 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
537 			    QL_FCIP_RSP_SGLLEN;
538 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
539 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
540 			    QL_FCP_CMD_SGLLEN;
541 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
542 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
543 			    QL_FCP_RSP_SGLLEN;
544 		}
545 	}
546 
547 	if (rval != 0) {
548 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
549 		    QL_NAME);
550 	}
551 
552 	return (rval);
553 }
554 
555 /*
556  * _fini
557  *	Prepares a module for unloading. It is called when the system
558  *	wants to unload a module. If the module determines that it can
559  *	be unloaded, then _fini() returns the value returned by
560  *	mod_remove(). Upon successful return from _fini() no other
561  *	routine in the module will be called before _init() is called.
562  *
563  * Returns:
564  *	0 = success
565  *
566  * Context:
567  *	Kernel context.
568  */
569 int
570 _fini(void)
571 {
572 	int	rval;
573 
574 	rval = mod_remove(&modlinkage);
575 	if (rval == 0) {
576 		mutex_destroy(&ql_global_hw_mutex);
577 		mutex_destroy(&ql_global_mutex);
578 		mutex_destroy(&ql_global_el_mutex);
579 		ddi_soft_state_fini(&ql_state);
580 	}
581 
582 	return (rval);
583 }
584 
585 /*
586  * _info
587  *	Returns information about loadable module.
588  *
589  * Input:
590  *	modinfo = pointer to module information structure.
591  *
592  * Returns:
593  *	Value returned by mod_info().
594  *
595  * Context:
596  *	Kernel context.
597  */
598 int
599 _info(struct modinfo *modinfop)
600 {
601 	return (mod_info(&modlinkage, modinfop));
602 }
603 
604 /* ************************************************************************ */
605 /*			dev_ops functions				    */
606 /* ************************************************************************ */
607 
608 /*
609  * ql_getinfo
610  *	Returns the pointer associated with arg when cmd is
611  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
612  *	instance number associated with arg when cmd is set
613  *	to DDI_INFO_DEV2INSTANCE.
614  *
615  * Input:
616  *	dip = Do not use.
617  *	cmd = command argument.
618  *	arg = command specific argument.
619  *	resultp = pointer to where request information is stored.
620  *
621  * Returns:
622  *	DDI_SUCCESS or DDI_FAILURE.
623  *
624  * Context:
625  *	Kernel context.
626  */
627 /* ARGSUSED */
628 static int
629 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
630 {
631 	ql_adapter_state_t	*ha;
632 	int			minor;
633 	int			rval = DDI_FAILURE;
634 
635 	minor = (int)(getminor((dev_t)arg));
636 	ha = ddi_get_soft_state(ql_state, minor);
637 	if (ha == NULL) {
638 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
639 		    getminor((dev_t)arg));
640 		*resultp = NULL;
641 		return (rval);
642 	}
643 
644 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
645 
646 	switch (cmd) {
647 	case DDI_INFO_DEVT2DEVINFO:
648 		*resultp = ha->dip;
649 		rval = DDI_SUCCESS;
650 		break;
651 	case DDI_INFO_DEVT2INSTANCE:
652 		*resultp = (void *)(uintptr_t)(ha->instance);
653 		rval = DDI_SUCCESS;
654 		break;
655 	default:
656 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
657 		rval = DDI_FAILURE;
658 		break;
659 	}
660 
661 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
662 
663 	return (rval);
664 }
665 
666 /*
667  * ql_attach
668  *	Configure and attach an instance of the driver
669  *	for a port.
670  *
671  * Input:
672  *	dip = pointer to device information structure.
673  *	cmd = attach type.
674  *
675  * Returns:
676  *	DDI_SUCCESS or DDI_FAILURE.
677  *
678  * Context:
679  *	Kernel context.
680  */
681 static int
682 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
683 {
684 	uint32_t		size;
685 	int			rval;
686 	int			instance;
687 	uint_t			progress = 0;
688 	char			*buf;
689 	ushort_t		caps_ptr, cap;
690 	fc_fca_tran_t		*tran;
691 	ql_adapter_state_t	*ha = NULL;
692 
693 	static char *pmcomps[] = {
694 		NULL,
695 		PM_LEVEL_D3_STR,		/* Device OFF */
696 		PM_LEVEL_D0_STR,		/* Device ON */
697 	};
698 
699 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
700 	    ddi_get_instance(dip), cmd);
701 
702 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
703 
704 	switch (cmd) {
705 	case DDI_ATTACH:
706 		/* first get the instance */
707 		instance = ddi_get_instance(dip);
708 
709 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
710 		    QL_NAME, instance, QL_VERSION);
711 
712 		/* Correct OS version? */
713 		if (ql_os_release_level != 11) {
714 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
715 			    "11", QL_NAME, instance);
716 			goto attach_failed;
717 		}
718 
719 		/* Hardware is installed in a DMA-capable slot? */
720 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
721 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
722 			    instance);
723 			goto attach_failed;
724 		}
725 
726 		/* No support for high-level interrupts */
727 		if (ddi_intr_hilevel(dip, 0) != 0) {
728 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
729 			    " not supported", QL_NAME, instance);
730 			goto attach_failed;
731 		}
732 
733 		/* Allocate our per-device-instance structure */
734 		if (ddi_soft_state_zalloc(ql_state,
735 		    instance) != DDI_SUCCESS) {
736 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
737 			    QL_NAME, instance);
738 			goto attach_failed;
739 		}
740 		progress |= QL_SOFT_STATE_ALLOCED;
741 
742 		ha = ddi_get_soft_state(ql_state, instance);
743 		if (ha == NULL) {
744 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
745 			    QL_NAME, instance);
746 			goto attach_failed;
747 		}
748 		ha->dip = dip;
749 		ha->instance = instance;
750 		ha->hba.base_address = ha;
751 		ha->pha = ha;
752 
753 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
754 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
755 			    QL_NAME, instance);
756 			goto attach_failed;
757 		}
758 
759 		/* Get extended logging and dump flags. */
760 		ql_common_properties(ha);
761 
762 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
763 		    "sbus") == 0) {
764 			EL(ha, "%s SBUS card detected", QL_NAME);
765 			ha->cfg_flags |= CFG_SBUS_CARD;
766 		}
767 
768 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
769 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
770 
771 		ha->outstanding_cmds = kmem_zalloc(
772 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
773 		    KM_SLEEP);
774 
775 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
776 		    QL_UB_LIMIT, KM_SLEEP);
777 
778 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
779 		    KM_SLEEP);
780 
781 		(void) ddi_pathname(dip, buf);
782 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
783 		if (ha->devpath == NULL) {
784 			EL(ha, "devpath mem alloc failed\n");
785 		} else {
786 			(void) strcpy(ha->devpath, buf);
787 			EL(ha, "devpath is: %s\n", ha->devpath);
788 		}
789 
790 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
791 			/*
792 			 * For cards where PCI is mapped to sbus e.g. Ivory.
793 			 *
794 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
795 			 *	: 0x100 - 0x3FF PCI IO space for 2200
796 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
797 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
798 			 */
799 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
800 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
801 			    != DDI_SUCCESS) {
802 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
803 				    " registers", QL_NAME, instance);
804 				goto attach_failed;
805 			}
806 			if (ddi_regs_map_setup(dip, 1,
807 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
808 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
809 			    != DDI_SUCCESS) {
810 				/* We should not fail attach here */
811 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
812 				    QL_NAME, instance);
813 				ha->sbus_fpga_iobase = NULL;
814 			}
815 			progress |= QL_REGS_MAPPED;
816 		} else {
817 			/*
818 			 * Setup the ISP2200 registers address mapping to be
819 			 * accessed by this particular driver.
820 			 * 0x0   Configuration Space
821 			 * 0x1   I/O Space
822 			 * 0x2   32-bit Memory Space address
823 			 * 0x3   64-bit Memory Space address
824 			 */
825 			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
826 			    0, 0x100, &ql_dev_acc_attr,
827 			    &ha->dev_handle) != DDI_SUCCESS) {
828 				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
829 				    "failed", QL_NAME, instance);
830 				goto attach_failed;
831 			}
832 			progress |= QL_REGS_MAPPED;
833 
834 			/*
835 			 * We need I/O space mappings for 23xx HBAs for
836 			 * loading flash (FCode). The chip has a bug due to
837 			 * which loading flash fails through mem space
838 			 * mappings in PCI-X mode.
839 			 */
840 			if (ddi_regs_map_setup(dip, 1,
841 			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
842 			    &ql_dev_acc_attr,
843 			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
844 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
845 				    " failed", QL_NAME, instance);
846 				goto attach_failed;
847 			}
848 			progress |= QL_IOMAP_IOBASE_MAPPED;
849 		}
850 
851 		/*
852 		 * We should map config space before adding interrupt
853 		 * So that the chip type (2200 or 2300) can be determined
854 		 * before the interrupt routine gets a chance to execute.
855 		 */
856 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
857 			if (ddi_regs_map_setup(dip, 0,
858 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
859 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
860 			    DDI_SUCCESS) {
861 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
862 				    "config registers", QL_NAME, instance);
863 				goto attach_failed;
864 			}
865 		} else {
866 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
867 			    DDI_SUCCESS) {
868 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
869 				    "config space", QL_NAME, instance);
870 				goto attach_failed;
871 			}
872 		}
873 		progress |= QL_CONFIG_SPACE_SETUP;
874 
875 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
876 		    PCI_CONF_SUBSYSID);
877 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
878 		    PCI_CONF_SUBVENID);
879 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
880 		    PCI_CONF_VENID);
881 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
882 		    PCI_CONF_DEVID);
883 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
884 		    PCI_CONF_REVID);
885 
886 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
887 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
888 		    ha->subven_id, ha->subsys_id);
889 
890 		switch (ha->device_id) {
891 		case 0x2300:
892 		case 0x2312:
893 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
894 		/*
895 		 * per marketing, fibre-lite HBA's are not supported
896 		 * on sparc platforms
897 		 */
898 		case 0x6312:
899 		case 0x6322:
900 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
901 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
902 				ha->flags |= FUNCTION_1;
903 			}
904 			if (ha->device_id == 0x6322) {
905 				ha->cfg_flags |= CFG_CTRL_6322;
906 				ha->fw_class = 0x6322;
907 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
908 			} else {
909 				ha->cfg_flags |= CFG_CTRL_2300;
910 				ha->fw_class = 0x2300;
911 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
912 			}
913 			ha->reg_off = &reg_off_2300;
914 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
915 				goto attach_failed;
916 			}
917 			ha->fcp_cmd = ql_command_iocb;
918 			ha->ip_cmd = ql_ip_iocb;
919 			ha->ms_cmd = ql_ms_iocb;
920 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
921 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
922 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
923 			} else {
924 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
925 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
926 			}
927 			break;
928 
929 		case 0x2200:
930 			ha->cfg_flags |= CFG_CTRL_2200;
931 			ha->reg_off = &reg_off_2200;
932 			ha->fw_class = 0x2200;
933 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
934 				goto attach_failed;
935 			}
936 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
937 			ha->fcp_cmd = ql_command_iocb;
938 			ha->ip_cmd = ql_ip_iocb;
939 			ha->ms_cmd = ql_ms_iocb;
940 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
941 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
942 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
943 			} else {
944 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
945 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
946 			}
947 			break;
948 
949 		case 0x2422:
950 		case 0x2432:
951 		case 0x5422:
952 		case 0x5432:
953 		case 0x8432:
954 #ifdef __sparc
955 			/*
956 			 * Per marketing, the QLA/QLE-2440's (which
957 			 * also use the 2422 & 2432) are only for the
958 			 * x86 platform (SMB market).
959 			 */
960 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
961 			    ha->subsys_id == 0x13e) {
962 				cmn_err(CE_WARN,
963 				    "%s(%d): Unsupported HBA ssid: %x",
964 				    QL_NAME, instance, ha->subsys_id);
965 				goto attach_failed;
966 			}
967 #endif	/* __sparc */
968 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
969 				ha->flags |= FUNCTION_1;
970 			}
971 			ha->cfg_flags |= CFG_CTRL_2422;
972 			if (ha->device_id == 0x8432) {
973 				ha->cfg_flags |= CFG_CTRL_MENLO;
974 			} else {
975 				ha->flags |= VP_ENABLED;
976 			}
977 
978 			ha->reg_off = &reg_off_2400_2500;
979 			ha->fw_class = 0x2400;
980 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
981 				goto attach_failed;
982 			}
983 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
984 			ha->fcp_cmd = ql_command_24xx_iocb;
985 			ha->ip_cmd = ql_ip_24xx_iocb;
986 			ha->ms_cmd = ql_ms_24xx_iocb;
987 			ha->els_cmd = ql_els_24xx_iocb;
988 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
989 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
990 			break;
991 
992 		case 0x2522:
993 		case 0x2532:
994 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
995 				ha->flags |= FUNCTION_1;
996 			}
997 			ha->cfg_flags |= CFG_CTRL_25XX;
998 			ha->flags |= VP_ENABLED;
999 			ha->fw_class = 0x2500;
1000 			ha->reg_off = &reg_off_2400_2500;
1001 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1002 				goto attach_failed;
1003 			}
1004 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1005 			ha->fcp_cmd = ql_command_24xx_iocb;
1006 			ha->ip_cmd = ql_ip_24xx_iocb;
1007 			ha->ms_cmd = ql_ms_24xx_iocb;
1008 			ha->els_cmd = ql_els_24xx_iocb;
1009 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1010 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1011 			break;
1012 
1013 		case 0x8001:
1014 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1015 				ha->flags |= FUNCTION_1;
1016 			}
1017 			ha->cfg_flags |= CFG_CTRL_81XX;
1018 			ha->flags |= VP_ENABLED;
1019 			ha->fw_class = 0x8100;
1020 			ha->reg_off = &reg_off_2400_2500;
1021 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1022 				goto attach_failed;
1023 			}
1024 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1025 			ha->fcp_cmd = ql_command_24xx_iocb;
1026 			ha->ip_cmd = ql_ip_24xx_iocb;
1027 			ha->ms_cmd = ql_ms_24xx_iocb;
1028 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1029 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1030 			break;
1031 
1032 		default:
1033 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1034 			    QL_NAME, instance, ha->device_id);
1035 			goto attach_failed;
1036 		}
1037 
1038 		/* Setup hba buffer. */
1039 
1040 		size = CFG_IST(ha, CFG_CTRL_242581) ?
1041 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1042 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1043 		    RCVBUF_QUEUE_SIZE);
1044 
1045 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1046 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1047 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1048 			    "alloc failed", QL_NAME, instance);
1049 			goto attach_failed;
1050 		}
1051 		progress |= QL_HBA_BUFFER_SETUP;
1052 
1053 		/* Setup buffer pointers. */
1054 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1055 		    REQUEST_Q_BUFFER_OFFSET;
1056 		ha->request_ring_bp = (struct cmd_entry *)
1057 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1058 
1059 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1060 		    RESPONSE_Q_BUFFER_OFFSET;
1061 		ha->response_ring_bp = (struct sts_entry *)
1062 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1063 
1064 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1065 		    RCVBUF_Q_BUFFER_OFFSET;
1066 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1067 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1068 
1069 		/* Allocate resource for QLogic IOCTL */
1070 		(void) ql_alloc_xioctl_resource(ha);
1071 
1072 		/* Setup interrupts */
1073 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1074 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1075 			    "rval=%xh", QL_NAME, instance, rval);
1076 			goto attach_failed;
1077 		}
1078 
1079 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1080 
1081 		/*
1082 		 * Allocate an N Port information structure
1083 		 * for use when in P2P topology.
1084 		 */
1085 		ha->n_port = (ql_n_port_info_t *)
1086 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1087 		if (ha->n_port == NULL) {
1088 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1089 			    QL_NAME, instance);
1090 			goto attach_failed;
1091 		}
1092 
1093 		progress |= QL_N_PORT_INFO_CREATED;
1094 
1095 		/*
1096 		 * Determine support for Power Management
1097 		 */
1098 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1099 
1100 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1101 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1102 			if (cap == PCI_CAP_ID_PM) {
1103 				ha->pm_capable = 1;
1104 				break;
1105 			}
1106 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1107 			    PCI_CAP_NEXT_PTR);
1108 		}
1109 
1110 		if (ha->pm_capable) {
1111 			/*
1112 			 * Enable PM for 2200 based HBAs only.
1113 			 */
1114 			if (ha->device_id != 0x2200) {
1115 				ha->pm_capable = 0;
1116 			}
1117 		}
1118 
1119 		if (ha->pm_capable) {
1120 			ha->pm_capable = ql_enable_pm;
1121 		}
1122 
1123 		if (ha->pm_capable) {
1124 			/*
1125 			 * Initialize power management bookkeeping;
1126 			 * components are created idle.
1127 			 */
1128 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1129 			pmcomps[0] = buf;
1130 
1131 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1132 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1133 			    dip, "pm-components", pmcomps,
1134 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1135 			    DDI_PROP_SUCCESS) {
1136 				cmn_err(CE_WARN, "%s(%d): failed to create"
1137 				    " pm-components property", QL_NAME,
1138 				    instance);
1139 
1140 				/* Initialize adapter. */
1141 				ha->power_level = PM_LEVEL_D0;
1142 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1143 					cmn_err(CE_WARN, "%s(%d): failed to"
1144 					    " initialize adapter", QL_NAME,
1145 					    instance);
1146 					goto attach_failed;
1147 				}
1148 			} else {
1149 				ha->power_level = PM_LEVEL_D3;
1150 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1151 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1152 					cmn_err(CE_WARN, "%s(%d): failed to"
1153 					    " raise power or initialize"
1154 					    " adapter", QL_NAME, instance);
1155 				}
1156 			}
1157 		} else {
1158 			/* Initialize adapter. */
1159 			ha->power_level = PM_LEVEL_D0;
1160 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1161 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1162 				    " adapter", QL_NAME, instance);
1163 			}
1164 		}
1165 
1166 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1167 		    ha->fw_subminor_version == 0) {
1168 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1169 			    QL_NAME, ha->instance);
1170 		} else {
1171 			int	rval;
1172 			char	ver_fmt[256];
1173 
1174 			rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1175 			    "Firmware version %d.%d.%d", ha->fw_major_version,
1176 			    ha->fw_minor_version, ha->fw_subminor_version);
1177 
1178 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
1179 				rval = (int)snprintf(ver_fmt + rval,
1180 				    (size_t)sizeof (ver_fmt),
1181 				    ", MPI fw version %d.%d.%d",
1182 				    ha->mpi_fw_major_version,
1183 				    ha->mpi_fw_minor_version,
1184 				    ha->mpi_fw_subminor_version);
1185 
1186 				if (ha->subsys_id == 0x17B ||
1187 				    ha->subsys_id == 0x17D) {
1188 					(void) snprintf(ver_fmt + rval,
1189 					    (size_t)sizeof (ver_fmt),
1190 					    ", PHY fw version %d.%d.%d",
1191 					    ha->phy_fw_major_version,
1192 					    ha->phy_fw_minor_version,
1193 					    ha->phy_fw_subminor_version);
1194 				}
1195 			}
1196 			cmn_err(CE_NOTE, "!%s(%d): %s",
1197 			    QL_NAME, ha->instance, ver_fmt);
1198 		}
1199 
1200 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1201 		    "controller", KSTAT_TYPE_RAW,
1202 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1203 		if (ha->k_stats == NULL) {
1204 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1205 			    QL_NAME, instance);
1206 			goto attach_failed;
1207 		}
1208 		progress |= QL_KSTAT_CREATED;
1209 
1210 		ha->adapter_stats->version = 1;
1211 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1212 		ha->k_stats->ks_private = ha;
1213 		ha->k_stats->ks_update = ql_kstat_update;
1214 		ha->k_stats->ks_ndata = 1;
1215 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1216 		kstat_install(ha->k_stats);
1217 
1218 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1219 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1220 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1221 			    QL_NAME, instance);
1222 			goto attach_failed;
1223 		}
1224 		progress |= QL_MINOR_NODE_CREATED;
1225 
1226 		/* Allocate a transport structure for this instance */
1227 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1228 		if (tran == NULL) {
1229 			cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1230 			    QL_NAME, instance);
1231 			goto attach_failed;
1232 		}
1233 
1234 		progress |= QL_FCA_TRAN_ALLOCED;
1235 
1236 		/* fill in the structure */
1237 		tran->fca_numports = 1;
1238 		tran->fca_version = FCTL_FCA_MODREV_5;
1239 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1240 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1241 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1242 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1243 		}
1244 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1245 		    tran->fca_perm_pwwn.raw_wwn, 8);
1246 
1247 		EL(ha, "FCA version %d\n", tran->fca_version);
1248 
1249 		/* Specify the amount of space needed in each packet */
1250 		tran->fca_pkt_size = sizeof (ql_srb_t);
1251 
1252 		/* command limits are usually dictated by hardware */
1253 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1254 
1255 		/* dmaattr are static, set elsewhere. */
1256 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1257 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1258 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1259 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1260 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1261 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1262 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1263 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1264 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1265 		} else {
1266 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1267 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1268 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1269 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1270 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1271 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1272 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1273 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1274 		}
1275 
1276 		tran->fca_acc_attr = &ql_dev_acc_attr;
1277 		tran->fca_iblock = &(ha->iblock_cookie);
1278 
1279 		/* the remaining values are simply function vectors */
1280 		tran->fca_bind_port = ql_bind_port;
1281 		tran->fca_unbind_port = ql_unbind_port;
1282 		tran->fca_init_pkt = ql_init_pkt;
1283 		tran->fca_un_init_pkt = ql_un_init_pkt;
1284 		tran->fca_els_send = ql_els_send;
1285 		tran->fca_get_cap = ql_get_cap;
1286 		tran->fca_set_cap = ql_set_cap;
1287 		tran->fca_getmap = ql_getmap;
1288 		tran->fca_transport = ql_transport;
1289 		tran->fca_ub_alloc = ql_ub_alloc;
1290 		tran->fca_ub_free = ql_ub_free;
1291 		tran->fca_ub_release = ql_ub_release;
1292 		tran->fca_abort = ql_abort;
1293 		tran->fca_reset = ql_reset;
1294 		tran->fca_port_manage = ql_port_manage;
1295 		tran->fca_get_device = ql_get_device;
1296 
1297 		/* give it to the FC transport */
1298 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1299 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1300 			    instance);
1301 			goto attach_failed;
1302 		}
1303 		progress |= QL_FCA_ATTACH_DONE;
1304 
1305 		/* Stash the structure so it can be freed at detach */
1306 		ha->tran = tran;
1307 
1308 		/* Acquire global state lock. */
1309 		GLOBAL_STATE_LOCK();
1310 
1311 		/* Add adapter structure to link list. */
1312 		ql_add_link_b(&ql_hba, &ha->hba);
1313 
1314 		/* Start one second driver timer. */
1315 		if (ql_timer_timeout_id == NULL) {
1316 			ql_timer_ticks = drv_usectohz(1000000);
1317 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1318 			    ql_timer_ticks);
1319 		}
1320 
1321 		/* Release global state lock. */
1322 		GLOBAL_STATE_UNLOCK();
1323 
1324 		/* Determine and populate HBA fru info */
1325 		ql_setup_fruinfo(ha);
1326 
1327 		/* Setup task_daemon thread. */
1328 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1329 		    0, &p0, TS_RUN, minclsyspri);
1330 
1331 		progress |= QL_TASK_DAEMON_STARTED;
1332 
1333 		ddi_report_dev(dip);
1334 
1335 		/* Disable link reset in panic path */
1336 		ha->lip_on_panic = 1;
1337 
1338 		rval = DDI_SUCCESS;
1339 		break;
1340 
1341 attach_failed:
1342 		if (progress & QL_FCA_ATTACH_DONE) {
1343 			(void) fc_fca_detach(dip);
1344 			progress &= ~QL_FCA_ATTACH_DONE;
1345 		}
1346 
1347 		if (progress & QL_FCA_TRAN_ALLOCED) {
1348 			kmem_free(tran, sizeof (fc_fca_tran_t));
1349 			progress &= ~QL_FCA_TRAN_ALLOCED;
1350 		}
1351 
1352 		if (progress & QL_MINOR_NODE_CREATED) {
1353 			ddi_remove_minor_node(dip, "devctl");
1354 			progress &= ~QL_MINOR_NODE_CREATED;
1355 		}
1356 
1357 		if (progress & QL_KSTAT_CREATED) {
1358 			kstat_delete(ha->k_stats);
1359 			progress &= ~QL_KSTAT_CREATED;
1360 		}
1361 
1362 		if (progress & QL_N_PORT_INFO_CREATED) {
1363 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1364 			progress &= ~QL_N_PORT_INFO_CREATED;
1365 		}
1366 
1367 		if (progress & QL_TASK_DAEMON_STARTED) {
1368 			TASK_DAEMON_LOCK(ha);
1369 
1370 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1371 
1372 			cv_signal(&ha->cv_task_daemon);
1373 
1374 			/* Release task daemon lock. */
1375 			TASK_DAEMON_UNLOCK(ha);
1376 
1377 			/* Wait for for task daemon to stop running. */
1378 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1379 				ql_delay(ha, 10000);
1380 			}
1381 			progress &= ~QL_TASK_DAEMON_STARTED;
1382 		}
1383 
1384 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1385 			ddi_regs_map_free(&ha->iomap_dev_handle);
1386 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1387 		}
1388 
1389 		if (progress & QL_CONFIG_SPACE_SETUP) {
1390 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1391 				ddi_regs_map_free(&ha->sbus_config_handle);
1392 			} else {
1393 				pci_config_teardown(&ha->pci_handle);
1394 			}
1395 			progress &= ~QL_CONFIG_SPACE_SETUP;
1396 		}
1397 
1398 		if (progress & QL_INTR_ADDED) {
1399 			ql_disable_intr(ha);
1400 			ql_release_intr(ha);
1401 			progress &= ~QL_INTR_ADDED;
1402 		}
1403 
1404 		if (progress & QL_MUTEX_CV_INITED) {
1405 			ql_destroy_mutex(ha);
1406 			progress &= ~QL_MUTEX_CV_INITED;
1407 		}
1408 
1409 		if (progress & QL_HBA_BUFFER_SETUP) {
1410 			ql_free_phys(ha, &ha->hba_buf);
1411 			progress &= ~QL_HBA_BUFFER_SETUP;
1412 		}
1413 
1414 		if (progress & QL_REGS_MAPPED) {
1415 			ddi_regs_map_free(&ha->dev_handle);
1416 			if (ha->sbus_fpga_iobase != NULL) {
1417 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1418 			}
1419 			progress &= ~QL_REGS_MAPPED;
1420 		}
1421 
1422 		if (progress & QL_SOFT_STATE_ALLOCED) {
1423 
1424 			ql_fcache_rel(ha->fcache);
1425 
1426 			kmem_free(ha->adapter_stats,
1427 			    sizeof (*ha->adapter_stats));
1428 
1429 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1430 			    QL_UB_LIMIT);
1431 
1432 			kmem_free(ha->outstanding_cmds,
1433 			    sizeof (*ha->outstanding_cmds) *
1434 			    MAX_OUTSTANDING_COMMANDS);
1435 
1436 			if (ha->devpath != NULL) {
1437 				kmem_free(ha->devpath,
1438 				    strlen(ha->devpath) + 1);
1439 			}
1440 
1441 			kmem_free(ha->dev, sizeof (*ha->dev) *
1442 			    DEVICE_HEAD_LIST_SIZE);
1443 
1444 			if (ha->xioctl != NULL) {
1445 				ql_free_xioctl_resource(ha);
1446 			}
1447 
1448 			if (ha->fw_module != NULL) {
1449 				(void) ddi_modclose(ha->fw_module);
1450 			}
1451 
1452 			ddi_soft_state_free(ql_state, instance);
1453 			progress &= ~QL_SOFT_STATE_ALLOCED;
1454 		}
1455 
1456 		ddi_prop_remove_all(dip);
1457 		rval = DDI_FAILURE;
1458 		break;
1459 
1460 	case DDI_RESUME:
1461 		rval = DDI_FAILURE;
1462 
1463 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1464 		if (ha == NULL) {
1465 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1466 			    QL_NAME, instance);
1467 			break;
1468 		}
1469 
1470 		ha->power_level = PM_LEVEL_D3;
1471 		if (ha->pm_capable) {
1472 			/*
1473 			 * Get ql_power to do power on initialization
1474 			 */
1475 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1476 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1477 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1478 				    " power", QL_NAME, instance);
1479 			}
1480 		}
1481 
1482 		/*
1483 		 * There is a bug in DR that prevents PM framework
1484 		 * from calling ql_power.
1485 		 */
1486 		if (ha->power_level == PM_LEVEL_D3) {
1487 			ha->power_level = PM_LEVEL_D0;
1488 
1489 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1490 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1491 				    " adapter", QL_NAME, instance);
1492 			}
1493 
1494 			/* Wake up task_daemon. */
1495 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1496 			    0);
1497 		}
1498 
1499 		/* Acquire global state lock. */
1500 		GLOBAL_STATE_LOCK();
1501 
1502 		/* Restart driver timer. */
1503 		if (ql_timer_timeout_id == NULL) {
1504 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1505 			    ql_timer_ticks);
1506 		}
1507 
1508 		/* Release global state lock. */
1509 		GLOBAL_STATE_UNLOCK();
1510 
1511 		/* Wake up command start routine. */
1512 		ADAPTER_STATE_LOCK(ha);
1513 		ha->flags &= ~ADAPTER_SUSPENDED;
1514 		ADAPTER_STATE_UNLOCK(ha);
1515 
1516 		/*
1517 		 * Transport doesn't make FC discovery in polled
1518 		 * mode; So we need the daemon thread's services
1519 		 * right here.
1520 		 */
1521 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1522 
1523 		rval = DDI_SUCCESS;
1524 
1525 		/* Restart IP if it was running. */
1526 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1527 			(void) ql_initialize_ip(ha);
1528 			ql_isp_rcvbuf(ha);
1529 		}
1530 		break;
1531 
1532 	default:
1533 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1534 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1535 		rval = DDI_FAILURE;
1536 		break;
1537 	}
1538 
1539 	kmem_free(buf, MAXPATHLEN);
1540 
1541 	if (rval != DDI_SUCCESS) {
1542 		/*EMPTY*/
1543 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1544 		    ddi_get_instance(dip), rval);
1545 	} else {
1546 		/*EMPTY*/
1547 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1548 	}
1549 
1550 	return (rval);
1551 }
1552 
1553 /*
1554  * ql_detach
1555  *	Used to remove all the states associated with a given
1556  *	instances of a device node prior to the removal of that
1557  *	instance from the system.
1558  *
1559  * Input:
1560  *	dip = pointer to device information structure.
1561  *	cmd = type of detach.
1562  *
1563  * Returns:
1564  *	DDI_SUCCESS or DDI_FAILURE.
1565  *
1566  * Context:
1567  *	Kernel context.
1568  */
1569 static int
1570 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1571 {
1572 	ql_adapter_state_t	*ha, *vha;
1573 	ql_tgt_t		*tq;
1574 	int			delay_cnt;
1575 	uint16_t		index;
1576 	ql_link_t		*link;
1577 	char			*buf;
1578 	timeout_id_t		timer_id = NULL;
1579 	int			rval = DDI_SUCCESS;
1580 
1581 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1582 	if (ha == NULL) {
1583 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1584 		    ddi_get_instance(dip));
1585 		return (DDI_FAILURE);
1586 	}
1587 
1588 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1589 
1590 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1591 
1592 	switch (cmd) {
1593 	case DDI_DETACH:
1594 		ADAPTER_STATE_LOCK(ha);
1595 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1596 		ADAPTER_STATE_UNLOCK(ha);
1597 
1598 		TASK_DAEMON_LOCK(ha);
1599 
1600 		if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1601 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1602 			cv_signal(&ha->cv_task_daemon);
1603 
1604 			TASK_DAEMON_UNLOCK(ha);
1605 
1606 			(void) ql_wait_for_td_stop(ha);
1607 
1608 			TASK_DAEMON_LOCK(ha);
1609 			if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1610 				ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1611 				EL(ha, "failed, could not stop task daemon\n");
1612 			}
1613 		}
1614 		TASK_DAEMON_UNLOCK(ha);
1615 
1616 		GLOBAL_STATE_LOCK();
1617 
1618 		/* Disable driver timer if no adapters. */
1619 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1620 		    ql_hba.last == &ha->hba) {
1621 			timer_id = ql_timer_timeout_id;
1622 			ql_timer_timeout_id = NULL;
1623 		}
1624 		ql_remove_link(&ql_hba, &ha->hba);
1625 
1626 		GLOBAL_STATE_UNLOCK();
1627 
1628 		if (timer_id) {
1629 			(void) untimeout(timer_id);
1630 		}
1631 
1632 		if (ha->pm_capable) {
1633 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1634 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1635 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1636 				    " power", QL_NAME, ha->instance);
1637 			}
1638 		}
1639 
1640 		/*
1641 		 * If pm_lower_power shutdown the adapter, there
1642 		 * isn't much else to do
1643 		 */
1644 		if (ha->power_level != PM_LEVEL_D3) {
1645 			ql_halt(ha, PM_LEVEL_D3);
1646 		}
1647 
1648 		/* Remove virtual ports. */
1649 		while ((vha = ha->vp_next) != NULL) {
1650 			ql_vport_destroy(vha);
1651 		}
1652 
1653 		/* Free target queues. */
1654 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1655 			link = ha->dev[index].first;
1656 			while (link != NULL) {
1657 				tq = link->base_address;
1658 				link = link->next;
1659 				ql_dev_free(ha, tq);
1660 			}
1661 		}
1662 
1663 		/*
1664 		 * Free unsolicited buffers.
1665 		 * If we are here then there are no ULPs still
1666 		 * alive that wish to talk to ql so free up
1667 		 * any SRB_IP_UB_UNUSED buffers that are
1668 		 * lingering around
1669 		 */
1670 		QL_UB_LOCK(ha);
1671 		for (index = 0; index < QL_UB_LIMIT; index++) {
1672 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1673 
1674 			if (ubp != NULL) {
1675 				ql_srb_t *sp = ubp->ub_fca_private;
1676 
1677 				sp->flags |= SRB_UB_FREE_REQUESTED;
1678 
1679 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1680 				    (sp->flags & (SRB_UB_CALLBACK |
1681 				    SRB_UB_ACQUIRED))) {
1682 					QL_UB_UNLOCK(ha);
1683 					delay(drv_usectohz(100000));
1684 					QL_UB_LOCK(ha);
1685 				}
1686 				ha->ub_array[index] = NULL;
1687 
1688 				QL_UB_UNLOCK(ha);
1689 				ql_free_unsolicited_buffer(ha, ubp);
1690 				QL_UB_LOCK(ha);
1691 			}
1692 		}
1693 		QL_UB_UNLOCK(ha);
1694 
1695 		/* Free any saved RISC code. */
1696 		if (ha->risc_code != NULL) {
1697 			kmem_free(ha->risc_code, ha->risc_code_size);
1698 			ha->risc_code = NULL;
1699 			ha->risc_code_size = 0;
1700 		}
1701 
1702 		if (ha->fw_module != NULL) {
1703 			(void) ddi_modclose(ha->fw_module);
1704 			ha->fw_module = NULL;
1705 		}
1706 
1707 		/* Free resources. */
1708 		ddi_prop_remove_all(dip);
1709 		(void) fc_fca_detach(dip);
1710 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1711 		ddi_remove_minor_node(dip, "devctl");
1712 		if (ha->k_stats != NULL) {
1713 			kstat_delete(ha->k_stats);
1714 		}
1715 
1716 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1717 			ddi_regs_map_free(&ha->sbus_config_handle);
1718 		} else {
1719 			ddi_regs_map_free(&ha->iomap_dev_handle);
1720 			pci_config_teardown(&ha->pci_handle);
1721 		}
1722 
1723 		ql_disable_intr(ha);
1724 		ql_release_intr(ha);
1725 
1726 		ql_free_xioctl_resource(ha);
1727 
1728 		ql_destroy_mutex(ha);
1729 
1730 		ql_free_phys(ha, &ha->hba_buf);
1731 		ql_free_phys(ha, &ha->fwexttracebuf);
1732 		ql_free_phys(ha, &ha->fwfcetracebuf);
1733 
1734 		ddi_regs_map_free(&ha->dev_handle);
1735 		if (ha->sbus_fpga_iobase != NULL) {
1736 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1737 		}
1738 
1739 		ql_fcache_rel(ha->fcache);
1740 		if (ha->vcache != NULL) {
1741 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1742 		}
1743 
1744 		if (ha->pi_attrs != NULL) {
1745 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1746 		}
1747 
1748 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1749 
1750 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1751 
1752 		kmem_free(ha->outstanding_cmds,
1753 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1754 
1755 		if (ha->n_port != NULL) {
1756 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1757 		}
1758 
1759 		if (ha->devpath != NULL) {
1760 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1761 		}
1762 
1763 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1764 
1765 		EL(ha, "detached\n");
1766 
1767 		ddi_soft_state_free(ql_state, (int)ha->instance);
1768 
1769 		break;
1770 
1771 	case DDI_SUSPEND:
1772 		ADAPTER_STATE_LOCK(ha);
1773 
1774 		delay_cnt = 0;
1775 		ha->flags |= ADAPTER_SUSPENDED;
1776 		while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1777 			ADAPTER_STATE_UNLOCK(ha);
1778 			delay(drv_usectohz(1000000));
1779 			ADAPTER_STATE_LOCK(ha);
1780 		}
1781 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1782 			ha->flags &= ~ADAPTER_SUSPENDED;
1783 			ADAPTER_STATE_UNLOCK(ha);
1784 			rval = DDI_FAILURE;
1785 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1786 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1787 			    ha->busy, ha->flags);
1788 			break;
1789 		}
1790 
1791 		ADAPTER_STATE_UNLOCK(ha);
1792 
1793 		if (ha->flags & IP_INITIALIZED) {
1794 			(void) ql_shutdown_ip(ha);
1795 		}
1796 
1797 		if ((rval = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1798 			ADAPTER_STATE_LOCK(ha);
1799 			ha->flags &= ~ADAPTER_SUSPENDED;
1800 			ADAPTER_STATE_UNLOCK(ha);
1801 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1802 			    QL_NAME, ha->instance, rval);
1803 
1804 			/* Restart IP if it was running. */
1805 			if (ha->flags & IP_ENABLED &&
1806 			    !(ha->flags & IP_INITIALIZED)) {
1807 				(void) ql_initialize_ip(ha);
1808 				ql_isp_rcvbuf(ha);
1809 			}
1810 			rval = DDI_FAILURE;
1811 			break;
1812 		}
1813 
1814 		/* Acquire global state lock. */
1815 		GLOBAL_STATE_LOCK();
1816 
1817 		/* Disable driver timer if last adapter. */
1818 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1819 		    ql_hba.last == &ha->hba) {
1820 			timer_id = ql_timer_timeout_id;
1821 			ql_timer_timeout_id = NULL;
1822 		}
1823 		GLOBAL_STATE_UNLOCK();
1824 
1825 		if (timer_id) {
1826 			(void) untimeout(timer_id);
1827 		}
1828 
1829 		break;
1830 
1831 	default:
1832 		rval = DDI_FAILURE;
1833 		break;
1834 	}
1835 
1836 	kmem_free(buf, MAXPATHLEN);
1837 
1838 	if (rval != DDI_SUCCESS) {
1839 		if (ha != NULL) {
1840 			EL(ha, "failed, rval = %xh\n", rval);
1841 		} else {
1842 			/*EMPTY*/
1843 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1844 			    ddi_get_instance(dip), rval);
1845 		}
1846 	} else {
1847 		/*EMPTY*/
1848 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1849 	}
1850 
1851 	return (rval);
1852 }
1853 
1854 
1855 /*
1856  * ql_power
1857  *	Power a device attached to the system.
1858  *
1859  * Input:
1860  *	dip = pointer to device information structure.
1861  *	component = device.
1862  *	level = power level.
1863  *
1864  * Returns:
1865  *	DDI_SUCCESS or DDI_FAILURE.
1866  *
1867  * Context:
1868  *	Kernel context.
1869  */
1870 /* ARGSUSED */
1871 static int
1872 ql_power(dev_info_t *dip, int component, int level)
1873 {
1874 	int			rval = DDI_FAILURE;
1875 	off_t			csr;
1876 	uint8_t			saved_pm_val;
1877 	ql_adapter_state_t	*ha;
1878 	char			*buf;
1879 	char			*path;
1880 
1881 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1882 	if (ha == NULL || ha->pm_capable == 0) {
1883 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1884 		    ddi_get_instance(dip));
1885 		return (rval);
1886 	}
1887 
1888 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1889 
1890 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1891 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1892 
1893 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1894 	    level != PM_LEVEL_D3)) {
1895 		EL(ha, "invalid, component=%xh or level=%xh\n",
1896 		    component, level);
1897 		return (rval);
1898 	}
1899 
1900 	GLOBAL_HW_LOCK();
1901 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1902 	GLOBAL_HW_UNLOCK();
1903 
1904 	(void) snprintf(buf, sizeof (buf),
1905 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1906 	    ddi_pathname(dip, path));
1907 
1908 	switch (level) {
1909 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1910 
1911 		QL_PM_LOCK(ha);
1912 		if (ha->power_level == PM_LEVEL_D0) {
1913 			QL_PM_UNLOCK(ha);
1914 			rval = DDI_SUCCESS;
1915 			break;
1916 		}
1917 
1918 		/*
1919 		 * Enable interrupts now
1920 		 */
1921 		saved_pm_val = ha->power_level;
1922 		ha->power_level = PM_LEVEL_D0;
1923 		QL_PM_UNLOCK(ha);
1924 
1925 		GLOBAL_HW_LOCK();
1926 
1927 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1928 
1929 		/*
1930 		 * Delay after reset, for chip to recover.
1931 		 * Otherwise causes system PANIC
1932 		 */
1933 		drv_usecwait(200000);
1934 
1935 		GLOBAL_HW_UNLOCK();
1936 
1937 		if (ha->config_saved) {
1938 			ha->config_saved = 0;
1939 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1940 				QL_PM_LOCK(ha);
1941 				ha->power_level = saved_pm_val;
1942 				QL_PM_UNLOCK(ha);
1943 				cmn_err(CE_WARN, "%s failed to restore "
1944 				    "config regs", buf);
1945 				break;
1946 			}
1947 		}
1948 
1949 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1950 			cmn_err(CE_WARN, "%s adapter initialization failed",
1951 			    buf);
1952 		}
1953 
1954 		/* Wake up task_daemon. */
1955 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1956 		    TASK_DAEMON_SLEEPING_FLG, 0);
1957 
1958 		/* Restart IP if it was running. */
1959 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1960 			(void) ql_initialize_ip(ha);
1961 			ql_isp_rcvbuf(ha);
1962 		}
1963 
1964 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1965 		    ha->instance, QL_NAME);
1966 
1967 		rval = DDI_SUCCESS;
1968 		break;
1969 
1970 	case PM_LEVEL_D3:	/* power down to D3 state - off */
1971 
1972 		QL_PM_LOCK(ha);
1973 
1974 		if (ha->busy || ((ha->task_daemon_flags &
1975 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1976 			QL_PM_UNLOCK(ha);
1977 			break;
1978 		}
1979 
1980 		if (ha->power_level == PM_LEVEL_D3) {
1981 			rval = DDI_SUCCESS;
1982 			QL_PM_UNLOCK(ha);
1983 			break;
1984 		}
1985 		QL_PM_UNLOCK(ha);
1986 
1987 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1988 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
1989 			    " config regs", QL_NAME, ha->instance, buf);
1990 			break;
1991 		}
1992 		ha->config_saved = 1;
1993 
1994 		/*
1995 		 * Don't enable interrupts. Running mailbox commands with
1996 		 * interrupts enabled could cause hangs since pm_run_scan()
1997 		 * runs out of a callout thread and on single cpu systems
1998 		 * cv_timedwait(), called from ql_mailbox_command(), would
1999 		 * not get to run.
2000 		 */
2001 		TASK_DAEMON_LOCK(ha);
2002 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2003 		TASK_DAEMON_UNLOCK(ha);
2004 
2005 		ql_halt(ha, PM_LEVEL_D3);
2006 
2007 		/*
2008 		 * Setup ql_intr to ignore interrupts from here on.
2009 		 */
2010 		QL_PM_LOCK(ha);
2011 		ha->power_level = PM_LEVEL_D3;
2012 		QL_PM_UNLOCK(ha);
2013 
2014 		/*
2015 		 * Wait for ISR to complete.
2016 		 */
2017 		INTR_LOCK(ha);
2018 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2019 		INTR_UNLOCK(ha);
2020 
2021 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2022 		    ha->instance, QL_NAME);
2023 
2024 		rval = DDI_SUCCESS;
2025 		break;
2026 	}
2027 
2028 	kmem_free(buf, MAXPATHLEN);
2029 	kmem_free(path, MAXPATHLEN);
2030 
2031 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2032 
2033 	return (rval);
2034 }
2035 
2036 /*
2037  * ql_quiesce
2038  *	quiesce a device attached to the system.
2039  *
2040  * Input:
2041  *	dip = pointer to device information structure.
2042  *
2043  * Returns:
2044  *	DDI_SUCCESS
2045  *
2046  * Context:
2047  *	Kernel context.
2048  */
2049 static int
2050 ql_quiesce(dev_info_t *dip)
2051 {
2052 	ql_adapter_state_t	*ha;
2053 	uint32_t		timer;
2054 	uint32_t		stat;
2055 
2056 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2057 	if (ha == NULL) {
2058 		/* Oh well.... */
2059 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2060 		    ddi_get_instance(dip));
2061 		return (DDI_SUCCESS);
2062 	}
2063 
2064 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2065 
2066 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2067 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2068 		WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
2069 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2070 		for (timer = 0; timer < 30000; timer++) {
2071 			stat = RD32_IO_REG(ha, intr_info_lo);
2072 			if (stat & BIT_15) {
2073 				if ((stat & 0xff) < 0x12) {
2074 					WRT32_IO_REG(ha, hccr,
2075 					    HC24_CLR_RISC_INT);
2076 					break;
2077 				}
2078 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2079 			}
2080 			drv_usecwait(100);
2081 		}
2082 		/* Reset the chip. */
2083 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2084 		    MWB_4096_BYTES);
2085 		drv_usecwait(100);
2086 
2087 	} else {
2088 		/* Disable ISP interrupts. */
2089 		WRT16_IO_REG(ha, ictrl, 0);
2090 		/* Select RISC module registers. */
2091 		WRT16_IO_REG(ha, ctrl_status, 0);
2092 		/* Reset ISP semaphore. */
2093 		WRT16_IO_REG(ha, semaphore, 0);
2094 		/* Reset RISC module. */
2095 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2096 		/* Release RISC module. */
2097 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2098 	}
2099 
2100 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2101 
2102 	return (DDI_SUCCESS);
2103 }
2104 
2105 /* ************************************************************************ */
2106 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2107 /* ************************************************************************ */
2108 
2109 /*
2110  * ql_bind_port
2111  *	Handling port binding. The FC Transport attempts to bind an FCA port
2112  *	when it is ready to start transactions on the port. The FC Transport
2113  *	will call the fca_bind_port() function specified in the fca_transport
2114  *	structure it receives. The FCA must fill in the port_info structure
2115  *	passed in the call and also stash the information for future calls.
2116  *
2117  * Input:
2118  *	dip = pointer to FCA information structure.
2119  *	port_info = pointer to port information structure.
2120  *	bind_info = pointer to bind information structure.
2121  *
2122  * Returns:
2123  *	NULL = failure
2124  *
2125  * Context:
2126  *	Kernel context.
2127  */
2128 static opaque_t
2129 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2130     fc_fca_bind_info_t *bind_info)
2131 {
2132 	ql_adapter_state_t	*ha, *vha;
2133 	opaque_t		fca_handle = NULL;
2134 	port_id_t		d_id;
2135 	int			port_npiv = bind_info->port_npiv;
2136 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2137 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2138 
2139 	/* get state info based on the dip */
2140 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2141 	if (ha == NULL) {
2142 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2143 		    ddi_get_instance(dip));
2144 		return (NULL);
2145 	}
2146 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2147 
2148 	/* Verify port number is supported. */
2149 	if (port_npiv != 0) {
2150 		if (!(ha->flags & VP_ENABLED)) {
2151 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2152 			    ha->instance);
2153 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2154 			return (NULL);
2155 		}
2156 		if (!(ha->flags & POINT_TO_POINT)) {
2157 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2158 			    ha->instance);
2159 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2160 			return (NULL);
2161 		}
2162 		if (!(ha->flags & FDISC_ENABLED)) {
2163 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2164 			    "FDISC\n", ha->instance);
2165 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2166 			return (NULL);
2167 		}
2168 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2169 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2170 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2171 			    "FC_OUTOFBOUNDS\n", ha->instance);
2172 			port_info->pi_error = FC_OUTOFBOUNDS;
2173 			return (NULL);
2174 		}
2175 	} else if (bind_info->port_num != 0) {
2176 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2177 		    "supported\n", ha->instance, bind_info->port_num);
2178 		port_info->pi_error = FC_OUTOFBOUNDS;
2179 		return (NULL);
2180 	}
2181 
2182 	/* Locate port context. */
2183 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2184 		if (vha->vp_index == bind_info->port_num) {
2185 			break;
2186 		}
2187 	}
2188 
2189 	/* If virtual port does not exist. */
2190 	if (vha == NULL) {
2191 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2192 	}
2193 
2194 	/* make sure this port isn't already bound */
2195 	if (vha->flags & FCA_BOUND) {
2196 		port_info->pi_error = FC_ALREADY;
2197 	} else {
2198 		if (vha->vp_index != 0) {
2199 			bcopy(port_nwwn,
2200 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2201 			bcopy(port_pwwn,
2202 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2203 		}
2204 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2205 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2206 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2207 				    "virtual port=%d\n", ha->instance,
2208 				    vha->vp_index);
2209 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2210 				return (NULL);
2211 			}
2212 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2213 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2214 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2215 			    QL_NAME, ha->instance, vha->vp_index,
2216 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2217 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2218 			    port_pwwn[6], port_pwwn[7],
2219 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2220 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2221 			    port_nwwn[6], port_nwwn[7]);
2222 		}
2223 
2224 		/* stash the bind_info supplied by the FC Transport */
2225 		vha->bind_info.port_handle = bind_info->port_handle;
2226 		vha->bind_info.port_statec_cb =
2227 		    bind_info->port_statec_cb;
2228 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2229 
2230 		/* Set port's source ID. */
2231 		port_info->pi_s_id.port_id = vha->d_id.b24;
2232 
2233 		/* copy out the default login parameters */
2234 		bcopy((void *)&vha->loginparams,
2235 		    (void *)&port_info->pi_login_params,
2236 		    sizeof (la_els_logi_t));
2237 
2238 		/* Set port's hard address if enabled. */
2239 		port_info->pi_hard_addr.hard_addr = 0;
2240 		if (bind_info->port_num == 0) {
2241 			d_id.b24 = ha->d_id.b24;
2242 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2243 				if (ha->init_ctrl_blk.cb24.
2244 				    firmware_options_1[0] & BIT_0) {
2245 					d_id.b.al_pa = ql_index_to_alpa[ha->
2246 					    init_ctrl_blk.cb24.
2247 					    hard_address[0]];
2248 					port_info->pi_hard_addr.hard_addr =
2249 					    d_id.b24;
2250 				}
2251 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2252 			    BIT_0) {
2253 				d_id.b.al_pa = ql_index_to_alpa[ha->
2254 				    init_ctrl_blk.cb.hard_address[0]];
2255 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2256 			}
2257 
2258 			/* Set the node id data */
2259 			if (ql_get_rnid_params(ha,
2260 			    sizeof (port_info->pi_rnid_params.params),
2261 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2262 			    QL_SUCCESS) {
2263 				port_info->pi_rnid_params.status = FC_SUCCESS;
2264 			} else {
2265 				port_info->pi_rnid_params.status = FC_FAILURE;
2266 			}
2267 
2268 			/* Populate T11 FC-HBA details */
2269 			ql_populate_hba_fru_details(ha, port_info);
2270 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2271 			    KM_SLEEP);
2272 			if (ha->pi_attrs != NULL) {
2273 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2274 				    sizeof (fca_port_attrs_t));
2275 			}
2276 		} else {
2277 			port_info->pi_rnid_params.status = FC_FAILURE;
2278 			if (ha->pi_attrs != NULL) {
2279 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2280 				    sizeof (fca_port_attrs_t));
2281 			}
2282 		}
2283 
2284 		/* Generate handle for this FCA. */
2285 		fca_handle = (opaque_t)vha;
2286 
2287 		ADAPTER_STATE_LOCK(ha);
2288 		vha->flags |= FCA_BOUND;
2289 		ADAPTER_STATE_UNLOCK(ha);
2290 		/* Set port's current state. */
2291 		port_info->pi_port_state = vha->state;
2292 	}
2293 
2294 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2295 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2296 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2297 
2298 	return (fca_handle);
2299 }
2300 
2301 /*
2302  * ql_unbind_port
2303  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2304  *
2305  * Input:
2306  *	fca_handle = handle setup by ql_bind_port().
2307  *
2308  * Context:
2309  *	Kernel context.
2310  */
2311 static void
2312 ql_unbind_port(opaque_t fca_handle)
2313 {
2314 	ql_adapter_state_t	*ha;
2315 	ql_tgt_t		*tq;
2316 	uint32_t		flgs;
2317 
2318 	ha = ql_fca_handle_to_state(fca_handle);
2319 	if (ha == NULL) {
2320 		/*EMPTY*/
2321 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2322 		    (void *)fca_handle);
2323 	} else {
2324 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2325 		    ha->vp_index);
2326 
2327 		if (!(ha->flags & FCA_BOUND)) {
2328 			/*EMPTY*/
2329 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2330 			    ha->instance, ha->vp_index);
2331 		} else {
2332 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2333 				if ((tq = ql_loop_id_to_queue(ha,
2334 				    FL_PORT_24XX_HDL)) != NULL) {
2335 					(void) ql_logout_fabric_port(ha, tq);
2336 				}
2337 				(void) ql_vport_control(ha, (uint8_t)
2338 				    (CFG_IST(ha, CFG_CTRL_2425) ?
2339 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2340 				flgs = FCA_BOUND | VP_ENABLED;
2341 			} else {
2342 				flgs = FCA_BOUND;
2343 			}
2344 			ADAPTER_STATE_LOCK(ha);
2345 			ha->flags &= ~flgs;
2346 			ADAPTER_STATE_UNLOCK(ha);
2347 		}
2348 
2349 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2350 		    ha->vp_index);
2351 	}
2352 }
2353 
2354 /*
2355  * ql_init_pkt
2356  *	Initialize FCA portion of packet.
2357  *
2358  * Input:
2359  *	fca_handle = handle setup by ql_bind_port().
2360  *	pkt = pointer to fc_packet.
2361  *
2362  * Returns:
2363  *	FC_SUCCESS - the packet has successfully been initialized.
2364  *	FC_UNBOUND - the fca_handle specified is not bound.
2365  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2366  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2367  *
2368  * Context:
2369  *	Kernel context.
2370  */
2371 /* ARGSUSED */
2372 static int
2373 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2374 {
2375 	ql_adapter_state_t	*ha;
2376 	ql_srb_t		*sp;
2377 
2378 	ha = ql_fca_handle_to_state(fca_handle);
2379 	if (ha == NULL) {
2380 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2381 		    (void *)fca_handle);
2382 		return (FC_UNBOUND);
2383 	}
2384 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2385 
2386 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2387 	sp->flags = 0;
2388 
2389 	/* init cmd links */
2390 	sp->cmd.base_address = sp;
2391 	sp->cmd.prev = NULL;
2392 	sp->cmd.next = NULL;
2393 	sp->cmd.head = NULL;
2394 
2395 	/* init watchdog links */
2396 	sp->wdg.base_address = sp;
2397 	sp->wdg.prev = NULL;
2398 	sp->wdg.next = NULL;
2399 	sp->wdg.head = NULL;
2400 	sp->pkt = pkt;
2401 	sp->ha = ha;
2402 	sp->magic_number = QL_FCA_BRAND;
2403 
2404 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2405 
2406 	return (FC_SUCCESS);
2407 }
2408 
2409 /*
2410  * ql_un_init_pkt
2411  *	Release all local resources bound to packet.
2412  *
2413  * Input:
2414  *	fca_handle = handle setup by ql_bind_port().
2415  *	pkt = pointer to fc_packet.
2416  *
2417  * Returns:
2418  *	FC_SUCCESS - the packet has successfully been invalidated.
2419  *	FC_UNBOUND - the fca_handle specified is not bound.
2420  *	FC_BADPACKET - the packet has not been initialized or has
2421  *			already been freed by this FCA.
2422  *
2423  * Context:
2424  *	Kernel context.
2425  */
2426 static int
2427 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2428 {
2429 	ql_adapter_state_t *ha;
2430 	int rval;
2431 	ql_srb_t *sp;
2432 
2433 	ha = ql_fca_handle_to_state(fca_handle);
2434 	if (ha == NULL) {
2435 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2436 		    (void *)fca_handle);
2437 		return (FC_UNBOUND);
2438 	}
2439 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2440 
2441 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2442 
2443 	if (sp->magic_number != QL_FCA_BRAND) {
2444 		EL(ha, "failed, FC_BADPACKET\n");
2445 		rval = FC_BADPACKET;
2446 	} else {
2447 		sp->magic_number = NULL;
2448 
2449 		rval = FC_SUCCESS;
2450 	}
2451 
2452 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2453 
2454 	return (rval);
2455 }
2456 
2457 /*
2458  * ql_els_send
2459  *	Issue a extended link service request.
2460  *
2461  * Input:
2462  *	fca_handle = handle setup by ql_bind_port().
2463  *	pkt = pointer to fc_packet.
2464  *
2465  * Returns:
2466  *	FC_SUCCESS - the command was successful.
2467  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2468  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2469  *	FC_TRANSPORT_ERROR - a transport error occurred.
2470  *	FC_UNBOUND - the fca_handle specified is not bound.
2471  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2472  *
2473  * Context:
2474  *	Kernel context.
2475  */
2476 static int
2477 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2478 {
2479 	ql_adapter_state_t	*ha;
2480 	int			rval;
2481 	clock_t			timer;
2482 	ls_code_t		els;
2483 	la_els_rjt_t		rjt;
2484 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2485 
2486 	/* Verify proper command. */
2487 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2488 	if (ha == NULL) {
2489 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2490 		    rval, fca_handle);
2491 		return (FC_INVALID_REQUEST);
2492 	}
2493 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2494 
2495 	/* Wait for suspension to end. */
2496 	TASK_DAEMON_LOCK(ha);
2497 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2498 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2499 
2500 		/* 30 seconds from now */
2501 		timer = ddi_get_lbolt();
2502 		timer += drv_usectohz(30000000);
2503 
2504 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2505 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2506 			/*
2507 			 * The timeout time 'timer' was
2508 			 * reached without the condition
2509 			 * being signaled.
2510 			 */
2511 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2512 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2513 
2514 			/* Release task daemon lock. */
2515 			TASK_DAEMON_UNLOCK(ha);
2516 
2517 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2518 			    QL_FUNCTION_TIMEOUT);
2519 			return (FC_TRAN_BUSY);
2520 		}
2521 	}
2522 	/* Release task daemon lock. */
2523 	TASK_DAEMON_UNLOCK(ha);
2524 
2525 	/* Setup response header. */
2526 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2527 	    sizeof (fc_frame_hdr_t));
2528 
2529 	if (pkt->pkt_rsplen) {
2530 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2531 	}
2532 
2533 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2534 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2535 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2536 	    R_CTL_SOLICITED_CONTROL;
2537 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2538 	    F_CTL_END_SEQ;
2539 
2540 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2541 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2542 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2543 
2544 	sp->flags |= SRB_ELS_PKT;
2545 
2546 	/* map the type of ELS to a function */
2547 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2548 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2549 
2550 #if 0
2551 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2552 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2553 	    sizeof (fc_frame_hdr_t) / 4);
2554 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2555 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2556 #endif
2557 
2558 	sp->iocb = ha->els_cmd;
2559 	sp->req_cnt = 1;
2560 
2561 	switch (els.ls_code) {
2562 	case LA_ELS_RJT:
2563 	case LA_ELS_ACC:
2564 		EL(ha, "LA_ELS_RJT\n");
2565 		pkt->pkt_state = FC_PKT_SUCCESS;
2566 		rval = FC_SUCCESS;
2567 		break;
2568 	case LA_ELS_PLOGI:
2569 	case LA_ELS_PDISC:
2570 		rval = ql_els_plogi(ha, pkt);
2571 		break;
2572 	case LA_ELS_FLOGI:
2573 	case LA_ELS_FDISC:
2574 		rval = ql_els_flogi(ha, pkt);
2575 		break;
2576 	case LA_ELS_LOGO:
2577 		rval = ql_els_logo(ha, pkt);
2578 		break;
2579 	case LA_ELS_PRLI:
2580 		rval = ql_els_prli(ha, pkt);
2581 		break;
2582 	case LA_ELS_PRLO:
2583 		rval = ql_els_prlo(ha, pkt);
2584 		break;
2585 	case LA_ELS_ADISC:
2586 		rval = ql_els_adisc(ha, pkt);
2587 		break;
2588 	case LA_ELS_LINIT:
2589 		rval = ql_els_linit(ha, pkt);
2590 		break;
2591 	case LA_ELS_LPC:
2592 		rval = ql_els_lpc(ha, pkt);
2593 		break;
2594 	case LA_ELS_LSTS:
2595 		rval = ql_els_lsts(ha, pkt);
2596 		break;
2597 	case LA_ELS_SCR:
2598 		rval = ql_els_scr(ha, pkt);
2599 		break;
2600 	case LA_ELS_RSCN:
2601 		rval = ql_els_rscn(ha, pkt);
2602 		break;
2603 	case LA_ELS_FARP_REQ:
2604 		rval = ql_els_farp_req(ha, pkt);
2605 		break;
2606 	case LA_ELS_FARP_REPLY:
2607 		rval = ql_els_farp_reply(ha, pkt);
2608 		break;
2609 	case LA_ELS_RLS:
2610 		rval = ql_els_rls(ha, pkt);
2611 		break;
2612 	case LA_ELS_RNID:
2613 		rval = ql_els_rnid(ha, pkt);
2614 		break;
2615 	default:
2616 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2617 		    els.ls_code);
2618 		/* Build RJT. */
2619 		bzero(&rjt, sizeof (rjt));
2620 		rjt.ls_code.ls_code = LA_ELS_RJT;
2621 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2622 
2623 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2624 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2625 
2626 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2627 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2628 		rval = FC_SUCCESS;
2629 		break;
2630 	}
2631 
2632 #if 0
2633 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2634 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2635 	    sizeof (fc_frame_hdr_t) / 4);
2636 #endif
2637 	/*
2638 	 * Return success if the srb was consumed by an iocb. The packet
2639 	 * completion callback will be invoked by the response handler.
2640 	 */
2641 	if (rval == QL_CONSUMED) {
2642 		rval = FC_SUCCESS;
2643 	} else if (rval == FC_SUCCESS &&
2644 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2645 		/* Do command callback only if no error */
2646 		ql_awaken_task_daemon(ha, sp, 0, 0);
2647 	}
2648 
2649 	if (rval != FC_SUCCESS) {
2650 		EL(ha, "failed, rval = %xh\n", rval);
2651 	} else {
2652 		/*EMPTY*/
2653 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2654 	}
2655 	return (rval);
2656 }
2657 
2658 /*
2659  * ql_get_cap
2660  *	Export FCA hardware and software capabilities.
2661  *
2662  * Input:
2663  *	fca_handle = handle setup by ql_bind_port().
2664  *	cap = pointer to the capabilities string.
2665  *	ptr = buffer pointer for return capability.
2666  *
2667  * Returns:
2668  *	FC_CAP_ERROR - no such capability
2669  *	FC_CAP_FOUND - the capability was returned and cannot be set
2670  *	FC_CAP_SETTABLE - the capability was returned and can be set
2671  *	FC_UNBOUND - the fca_handle specified is not bound.
2672  *
2673  * Context:
2674  *	Kernel context.
2675  */
2676 static int
2677 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2678 {
2679 	ql_adapter_state_t	*ha;
2680 	int			rval;
2681 	uint32_t		*rptr = (uint32_t *)ptr;
2682 
2683 	ha = ql_fca_handle_to_state(fca_handle);
2684 	if (ha == NULL) {
2685 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2686 		    (void *)fca_handle);
2687 		return (FC_UNBOUND);
2688 	}
2689 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2690 
2691 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2692 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2693 		    ptr, 8);
2694 		rval = FC_CAP_FOUND;
2695 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2696 		bcopy((void *)&ha->loginparams, ptr,
2697 		    sizeof (la_els_logi_t));
2698 		rval = FC_CAP_FOUND;
2699 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2700 		*rptr = (uint32_t)QL_UB_LIMIT;
2701 		rval = FC_CAP_FOUND;
2702 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2703 
2704 		dev_info_t	*psydip = NULL;
2705 #ifdef __sparc
2706 		/*
2707 		 * Disable streaming for certain 2 chip adapters
2708 		 * below Psycho to handle Psycho byte hole issue.
2709 		 */
2710 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2711 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2712 			for (psydip = ddi_get_parent(ha->dip); psydip;
2713 			    psydip = ddi_get_parent(psydip)) {
2714 				if (strcmp(ddi_driver_name(psydip),
2715 				    "pcipsy") == 0) {
2716 					break;
2717 				}
2718 			}
2719 		}
2720 #endif	/* __sparc */
2721 
2722 		if (psydip) {
2723 			*rptr = (uint32_t)FC_NO_STREAMING;
2724 			EL(ha, "No Streaming\n");
2725 		} else {
2726 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2727 			EL(ha, "Allow Streaming\n");
2728 		}
2729 		rval = FC_CAP_FOUND;
2730 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2731 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2732 			*rptr = (uint32_t)CHAR_TO_SHORT(
2733 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2734 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2735 		} else {
2736 			*rptr = (uint32_t)CHAR_TO_SHORT(
2737 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2738 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2739 		}
2740 		rval = FC_CAP_FOUND;
2741 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2742 		*rptr = FC_RESET_RETURN_ALL;
2743 		rval = FC_CAP_FOUND;
2744 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2745 		*rptr = FC_NO_DVMA_SPACE;
2746 		rval = FC_CAP_FOUND;
2747 	} else {
2748 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2749 		rval = FC_CAP_ERROR;
2750 	}
2751 
2752 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2753 
2754 	return (rval);
2755 }
2756 
2757 /*
2758  * ql_set_cap
2759  *	Allow the FC Transport to set FCA capabilities if possible.
2760  *
2761  * Input:
2762  *	fca_handle = handle setup by ql_bind_port().
2763  *	cap = pointer to the capabilities string.
2764  *	ptr = buffer pointer for capability.
2765  *
2766  * Returns:
2767  *	FC_CAP_ERROR - no such capability
2768  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2769  *	FC_CAP_SETTABLE - the capability was successfully set.
2770  *	FC_UNBOUND - the fca_handle specified is not bound.
2771  *
2772  * Context:
2773  *	Kernel context.
2774  */
2775 /* ARGSUSED */
2776 static int
2777 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2778 {
2779 	ql_adapter_state_t	*ha;
2780 	int			rval;
2781 
2782 	ha = ql_fca_handle_to_state(fca_handle);
2783 	if (ha == NULL) {
2784 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2785 		    (void *)fca_handle);
2786 		return (FC_UNBOUND);
2787 	}
2788 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2789 
2790 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2791 		rval = FC_CAP_FOUND;
2792 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2793 		rval = FC_CAP_FOUND;
2794 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2795 		rval = FC_CAP_FOUND;
2796 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2797 		rval = FC_CAP_FOUND;
2798 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2799 		rval = FC_CAP_FOUND;
2800 	} else {
2801 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2802 		rval = FC_CAP_ERROR;
2803 	}
2804 
2805 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2806 
2807 	return (rval);
2808 }
2809 
2810 /*
2811  * ql_getmap
2812  *	Request of Arbitrated Loop (AL-PA) map.
2813  *
2814  * Input:
2815  *	fca_handle = handle setup by ql_bind_port().
2816  *	mapbuf= buffer pointer for map.
2817  *
2818  * Returns:
2819  *	FC_OLDPORT - the specified port is not operating in loop mode.
2820  *	FC_OFFLINE - the specified port is not online.
2821  *	FC_NOMAP - there is no loop map available for this port.
2822  *	FC_UNBOUND - the fca_handle specified is not bound.
2823  *	FC_SUCCESS - a valid map has been placed in mapbuf.
2824  *
2825  * Context:
2826  *	Kernel context.
2827  */
2828 static int
2829 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2830 {
2831 	ql_adapter_state_t	*ha;
2832 	clock_t			timer;
2833 	int			rval = FC_SUCCESS;
2834 
2835 	ha = ql_fca_handle_to_state(fca_handle);
2836 	if (ha == NULL) {
2837 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2838 		    (void *)fca_handle);
2839 		return (FC_UNBOUND);
2840 	}
2841 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2842 
2843 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2844 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2845 
2846 	/* Wait for suspension to end. */
2847 	TASK_DAEMON_LOCK(ha);
2848 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2849 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2850 
2851 		/* 30 seconds from now */
2852 		timer = ddi_get_lbolt();
2853 		timer += drv_usectohz(30000000);
2854 
2855 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2856 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2857 			/*
2858 			 * The timeout time 'timer' was
2859 			 * reached without the condition
2860 			 * being signaled.
2861 			 */
2862 
2863 			/* Release task daemon lock. */
2864 			TASK_DAEMON_UNLOCK(ha);
2865 
2866 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2867 			return (FC_TRAN_BUSY);
2868 		}
2869 	}
2870 	/* Release task daemon lock. */
2871 	TASK_DAEMON_UNLOCK(ha);
2872 
2873 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2874 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2875 		/*
2876 		 * Now, since transport drivers cosider this as an
2877 		 * offline condition, let's wait for few seconds
2878 		 * for any loop transitions before we reset the.
2879 		 * chip and restart all over again.
2880 		 */
2881 		ql_delay(ha, 2000000);
2882 		EL(ha, "failed, FC_NOMAP\n");
2883 		rval = FC_NOMAP;
2884 	} else {
2885 		/*EMPTY*/
2886 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2887 		    "data %xh %xh %xh %xh\n", ha->instance,
2888 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2889 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2890 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2891 	}
2892 
2893 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2894 #if 0
2895 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2896 #endif
2897 	return (rval);
2898 }
2899 
2900 /*
2901  * ql_transport
2902  *	Issue an I/O request. Handles all regular requests.
2903  *
2904  * Input:
2905  *	fca_handle = handle setup by ql_bind_port().
2906  *	pkt = pointer to fc_packet.
2907  *
2908  * Returns:
2909  *	FC_SUCCESS - the packet was accepted for transport.
2910  *	FC_TRANSPORT_ERROR - a transport error occurred.
2911  *	FC_BADPACKET - the packet to be transported had not been
2912  *			initialized by this FCA.
2913  *	FC_UNBOUND - the fca_handle specified is not bound.
2914  *
2915  * Context:
2916  *	Kernel context.
2917  */
2918 static int
2919 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2920 {
2921 	ql_adapter_state_t	*ha;
2922 	int			rval = FC_TRANSPORT_ERROR;
2923 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2924 
2925 	/* Verify proper command. */
2926 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2927 	if (ha == NULL) {
2928 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2929 		    rval, fca_handle);
2930 		return (rval);
2931 	}
2932 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2933 #if 0
2934 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2935 	    sizeof (fc_frame_hdr_t) / 4);
2936 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2937 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2938 #endif
2939 
2940 	/* Reset SRB flags. */
2941 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2942 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2943 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2944 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2945 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2946 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2947 	    SRB_MS_PKT | SRB_ELS_PKT);
2948 
2949 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2950 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2951 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2952 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2953 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2954 
2955 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2956 	case R_CTL_COMMAND:
2957 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2958 			sp->flags |= SRB_FCP_CMD_PKT;
2959 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2960 		}
2961 		break;
2962 
2963 	default:
2964 		/* Setup response header and buffer. */
2965 		if (pkt->pkt_rsplen) {
2966 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2967 		}
2968 
2969 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
2970 		case R_CTL_UNSOL_DATA:
2971 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
2972 				sp->flags |= SRB_IP_PKT;
2973 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
2974 			}
2975 			break;
2976 
2977 		case R_CTL_UNSOL_CONTROL:
2978 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
2979 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
2980 				rval = ql_fc_services(ha, pkt);
2981 			}
2982 			break;
2983 
2984 		case R_CTL_SOLICITED_DATA:
2985 		case R_CTL_STATUS:
2986 		default:
2987 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
2988 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2989 			rval = FC_TRANSPORT_ERROR;
2990 			EL(ha, "unknown, r_ctl=%xh\n",
2991 			    pkt->pkt_cmd_fhdr.r_ctl);
2992 			break;
2993 		}
2994 	}
2995 
2996 	if (rval != FC_SUCCESS) {
2997 		EL(ha, "failed, rval = %xh\n", rval);
2998 	} else {
2999 		/*EMPTY*/
3000 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3001 	}
3002 
3003 	return (rval);
3004 }
3005 
3006 /*
3007  * ql_ub_alloc
3008  *	Allocate buffers for unsolicited exchanges.
3009  *
3010  * Input:
3011  *	fca_handle = handle setup by ql_bind_port().
3012  *	tokens = token array for each buffer.
3013  *	size = size of each buffer.
3014  *	count = pointer to number of buffers.
3015  *	type = the FC-4 type the buffers are reserved for.
3016  *		1 = Extended Link Services, 5 = LLC/SNAP
3017  *
3018  * Returns:
3019  *	FC_FAILURE - buffers could not be allocated.
3020  *	FC_TOOMANY - the FCA could not allocate the requested
3021  *			number of buffers.
3022  *	FC_SUCCESS - unsolicited buffers were allocated.
3023  *	FC_UNBOUND - the fca_handle specified is not bound.
3024  *
3025  * Context:
3026  *	Kernel context.
3027  */
3028 static int
3029 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3030     uint32_t *count, uint32_t type)
3031 {
3032 	ql_adapter_state_t	*ha;
3033 	caddr_t			bufp = NULL;
3034 	fc_unsol_buf_t		*ubp;
3035 	ql_srb_t		*sp;
3036 	uint32_t		index;
3037 	uint32_t		cnt;
3038 	uint32_t		ub_array_index = 0;
3039 	int			rval = FC_SUCCESS;
3040 	int			ub_updated = FALSE;
3041 
3042 	/* Check handle. */
3043 	ha = ql_fca_handle_to_state(fca_handle);
3044 	if (ha == NULL) {
3045 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3046 		    (void *)fca_handle);
3047 		return (FC_UNBOUND);
3048 	}
3049 	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3050 	    ha->instance, ha->vp_index, *count);
3051 
3052 	QL_PM_LOCK(ha);
3053 	if (ha->power_level != PM_LEVEL_D0) {
3054 		QL_PM_UNLOCK(ha);
3055 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3056 		    ha->vp_index);
3057 		return (FC_FAILURE);
3058 	}
3059 	QL_PM_UNLOCK(ha);
3060 
3061 	/* Acquire adapter state lock. */
3062 	ADAPTER_STATE_LOCK(ha);
3063 
3064 	/* Check the count. */
3065 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3066 		*count = 0;
3067 		EL(ha, "failed, FC_TOOMANY\n");
3068 		rval = FC_TOOMANY;
3069 	}
3070 
3071 	/*
3072 	 * reset ub_array_index
3073 	 */
3074 	ub_array_index = 0;
3075 
3076 	/*
3077 	 * Now proceed to allocate any buffers required
3078 	 */
3079 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3080 		/* Allocate all memory needed. */
3081 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3082 		    KM_SLEEP);
3083 		if (ubp == NULL) {
3084 			EL(ha, "failed, FC_FAILURE\n");
3085 			rval = FC_FAILURE;
3086 		} else {
3087 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3088 			if (sp == NULL) {
3089 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3090 				rval = FC_FAILURE;
3091 			} else {
3092 				if (type == FC_TYPE_IS8802_SNAP) {
3093 #ifdef	__sparc
3094 					if (ql_get_dma_mem(ha,
3095 					    &sp->ub_buffer, size,
3096 					    BIG_ENDIAN_DMA,
3097 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3098 						rval = FC_FAILURE;
3099 						kmem_free(ubp,
3100 						    sizeof (fc_unsol_buf_t));
3101 						kmem_free(sp,
3102 						    sizeof (ql_srb_t));
3103 					} else {
3104 						bufp = sp->ub_buffer.bp;
3105 						sp->ub_size = size;
3106 					}
3107 #else
3108 					if (ql_get_dma_mem(ha,
3109 					    &sp->ub_buffer, size,
3110 					    LITTLE_ENDIAN_DMA,
3111 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3112 						rval = FC_FAILURE;
3113 						kmem_free(ubp,
3114 						    sizeof (fc_unsol_buf_t));
3115 						kmem_free(sp,
3116 						    sizeof (ql_srb_t));
3117 					} else {
3118 						bufp = sp->ub_buffer.bp;
3119 						sp->ub_size = size;
3120 					}
3121 #endif
3122 				} else {
3123 					bufp = kmem_zalloc(size, KM_SLEEP);
3124 					if (bufp == NULL) {
3125 						rval = FC_FAILURE;
3126 						kmem_free(ubp,
3127 						    sizeof (fc_unsol_buf_t));
3128 						kmem_free(sp,
3129 						    sizeof (ql_srb_t));
3130 					} else {
3131 						sp->ub_size = size;
3132 					}
3133 				}
3134 			}
3135 		}
3136 
3137 		if (rval == FC_SUCCESS) {
3138 			/* Find next available slot. */
3139 			QL_UB_LOCK(ha);
3140 			while (ha->ub_array[ub_array_index] != NULL) {
3141 				ub_array_index++;
3142 			}
3143 
3144 			ubp->ub_fca_private = (void *)sp;
3145 
3146 			/* init cmd links */
3147 			sp->cmd.base_address = sp;
3148 			sp->cmd.prev = NULL;
3149 			sp->cmd.next = NULL;
3150 			sp->cmd.head = NULL;
3151 
3152 			/* init wdg links */
3153 			sp->wdg.base_address = sp;
3154 			sp->wdg.prev = NULL;
3155 			sp->wdg.next = NULL;
3156 			sp->wdg.head = NULL;
3157 			sp->ha = ha;
3158 
3159 			ubp->ub_buffer = bufp;
3160 			ubp->ub_bufsize = size;
3161 			ubp->ub_port_handle = fca_handle;
3162 			ubp->ub_token = ub_array_index;
3163 
3164 			/* Save the token. */
3165 			tokens[index] = ub_array_index;
3166 
3167 			/* Setup FCA private information. */
3168 			sp->ub_type = type;
3169 			sp->handle = ub_array_index;
3170 			sp->flags |= SRB_UB_IN_FCA;
3171 
3172 			ha->ub_array[ub_array_index] = ubp;
3173 			ha->ub_allocated++;
3174 			ub_updated = TRUE;
3175 			QL_UB_UNLOCK(ha);
3176 		}
3177 	}
3178 
3179 	/* Release adapter state lock. */
3180 	ADAPTER_STATE_UNLOCK(ha);
3181 
3182 	/* IP buffer. */
3183 	if (ub_updated) {
3184 		if ((type == FC_TYPE_IS8802_SNAP) &&
3185 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3186 
3187 			ADAPTER_STATE_LOCK(ha);
3188 			ha->flags |= IP_ENABLED;
3189 			ADAPTER_STATE_UNLOCK(ha);
3190 
3191 			if (!(ha->flags & IP_INITIALIZED)) {
3192 				if (CFG_IST(ha, CFG_CTRL_2422)) {
3193 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3194 					    LSB(ql_ip_mtu);
3195 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3196 					    MSB(ql_ip_mtu);
3197 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3198 					    LSB(size);
3199 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3200 					    MSB(size);
3201 
3202 					cnt = CHAR_TO_SHORT(
3203 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3204 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3205 
3206 					if (cnt < *count) {
3207 						ha->ip_init_ctrl_blk.cb24.cc[0]
3208 						    = LSB(*count);
3209 						ha->ip_init_ctrl_blk.cb24.cc[1]
3210 						    = MSB(*count);
3211 					}
3212 				} else {
3213 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3214 					    LSB(ql_ip_mtu);
3215 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3216 					    MSB(ql_ip_mtu);
3217 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3218 					    LSB(size);
3219 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3220 					    MSB(size);
3221 
3222 					cnt = CHAR_TO_SHORT(
3223 					    ha->ip_init_ctrl_blk.cb.cc[0],
3224 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3225 
3226 					if (cnt < *count) {
3227 						ha->ip_init_ctrl_blk.cb.cc[0] =
3228 						    LSB(*count);
3229 						ha->ip_init_ctrl_blk.cb.cc[1] =
3230 						    MSB(*count);
3231 					}
3232 				}
3233 
3234 				(void) ql_initialize_ip(ha);
3235 			}
3236 			ql_isp_rcvbuf(ha);
3237 		}
3238 	}
3239 
3240 	if (rval != FC_SUCCESS) {
3241 		EL(ha, "failed=%xh\n", rval);
3242 	} else {
3243 		/*EMPTY*/
3244 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3245 		    ha->vp_index);
3246 	}
3247 	return (rval);
3248 }
3249 
3250 /*
3251  * ql_ub_free
3252  *	Free unsolicited buffers.
3253  *
3254  * Input:
3255  *	fca_handle = handle setup by ql_bind_port().
3256  *	count = number of buffers.
3257  *	tokens = token array for each buffer.
3258  *
3259  * Returns:
3260  *	FC_SUCCESS - the requested buffers have been freed.
3261  *	FC_UNBOUND - the fca_handle specified is not bound.
3262  *	FC_UB_BADTOKEN - an invalid token was encountered.
3263  *			 No buffers have been released.
3264  *
3265  * Context:
3266  *	Kernel context.
3267  */
3268 static int
3269 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3270 {
3271 	ql_adapter_state_t	*ha;
3272 	ql_srb_t		*sp;
3273 	uint32_t		index;
3274 	uint64_t		ub_array_index;
3275 	int			rval = FC_SUCCESS;
3276 
3277 	/* Check handle. */
3278 	ha = ql_fca_handle_to_state(fca_handle);
3279 	if (ha == NULL) {
3280 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3281 		    (void *)fca_handle);
3282 		return (FC_UNBOUND);
3283 	}
3284 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3285 
3286 	/* Acquire adapter state lock. */
3287 	ADAPTER_STATE_LOCK(ha);
3288 
3289 	/* Check all returned tokens. */
3290 	for (index = 0; index < count; index++) {
3291 		fc_unsol_buf_t	*ubp;
3292 
3293 		/* Check the token range. */
3294 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3295 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3296 			rval = FC_UB_BADTOKEN;
3297 			break;
3298 		}
3299 
3300 		/* Check the unsolicited buffer array. */
3301 		QL_UB_LOCK(ha);
3302 		ubp = ha->ub_array[ub_array_index];
3303 
3304 		if (ubp == NULL) {
3305 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3306 			rval = FC_UB_BADTOKEN;
3307 			QL_UB_UNLOCK(ha);
3308 			break;
3309 		}
3310 
3311 		/* Check the state of the unsolicited buffer. */
3312 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3313 		sp->flags |= SRB_UB_FREE_REQUESTED;
3314 
3315 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3316 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3317 			QL_UB_UNLOCK(ha);
3318 			ADAPTER_STATE_UNLOCK(ha);
3319 			delay(drv_usectohz(100000));
3320 			ADAPTER_STATE_LOCK(ha);
3321 			QL_UB_LOCK(ha);
3322 		}
3323 		ha->ub_array[ub_array_index] = NULL;
3324 		QL_UB_UNLOCK(ha);
3325 		ql_free_unsolicited_buffer(ha, ubp);
3326 	}
3327 
3328 	if (rval == FC_SUCCESS) {
3329 		/*
3330 		 * Signal any pending hardware reset when there are
3331 		 * no more unsolicited buffers in use.
3332 		 */
3333 		if (ha->ub_allocated == 0) {
3334 			cv_broadcast(&ha->pha->cv_ub);
3335 		}
3336 	}
3337 
3338 	/* Release adapter state lock. */
3339 	ADAPTER_STATE_UNLOCK(ha);
3340 
3341 	if (rval != FC_SUCCESS) {
3342 		EL(ha, "failed=%xh\n", rval);
3343 	} else {
3344 		/*EMPTY*/
3345 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3346 	}
3347 	return (rval);
3348 }
3349 
3350 /*
3351  * ql_ub_release
3352  *	Release unsolicited buffers from FC Transport
3353  *	to FCA for future use.
3354  *
3355  * Input:
3356  *	fca_handle = handle setup by ql_bind_port().
3357  *	count = number of buffers.
3358  *	tokens = token array for each buffer.
3359  *
3360  * Returns:
3361  *	FC_SUCCESS - the requested buffers have been released.
3362  *	FC_UNBOUND - the fca_handle specified is not bound.
3363  *	FC_UB_BADTOKEN - an invalid token was encountered.
3364  *		No buffers have been released.
3365  *
3366  * Context:
3367  *	Kernel context.
3368  */
3369 static int
3370 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3371 {
3372 	ql_adapter_state_t	*ha;
3373 	ql_srb_t		*sp;
3374 	uint32_t		index;
3375 	uint64_t		ub_array_index;
3376 	int			rval = FC_SUCCESS;
3377 	int			ub_ip_updated = FALSE;
3378 
3379 	/* Check handle. */
3380 	ha = ql_fca_handle_to_state(fca_handle);
3381 	if (ha == NULL) {
3382 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3383 		    (void *)fca_handle);
3384 		return (FC_UNBOUND);
3385 	}
3386 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3387 
3388 	/* Acquire adapter state lock. */
3389 	ADAPTER_STATE_LOCK(ha);
3390 	QL_UB_LOCK(ha);
3391 
3392 	/* Check all returned tokens. */
3393 	for (index = 0; index < count; index++) {
3394 		/* Check the token range. */
3395 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3396 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3397 			rval = FC_UB_BADTOKEN;
3398 			break;
3399 		}
3400 
3401 		/* Check the unsolicited buffer array. */
3402 		if (ha->ub_array[ub_array_index] == NULL) {
3403 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3404 			rval = FC_UB_BADTOKEN;
3405 			break;
3406 		}
3407 
3408 		/* Check the state of the unsolicited buffer. */
3409 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3410 		if (sp->flags & SRB_UB_IN_FCA) {
3411 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3412 			rval = FC_UB_BADTOKEN;
3413 			break;
3414 		}
3415 	}
3416 
3417 	/* If all tokens checkout, release the buffers. */
3418 	if (rval == FC_SUCCESS) {
3419 		/* Check all returned tokens. */
3420 		for (index = 0; index < count; index++) {
3421 			fc_unsol_buf_t	*ubp;
3422 
3423 			ub_array_index = tokens[index];
3424 			ubp = ha->ub_array[ub_array_index];
3425 			sp = ubp->ub_fca_private;
3426 
3427 			ubp->ub_resp_flags = 0;
3428 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3429 			sp->flags |= SRB_UB_IN_FCA;
3430 
3431 			/* IP buffer. */
3432 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3433 				ub_ip_updated = TRUE;
3434 			}
3435 		}
3436 	}
3437 
3438 	QL_UB_UNLOCK(ha);
3439 	/* Release adapter state lock. */
3440 	ADAPTER_STATE_UNLOCK(ha);
3441 
3442 	/*
3443 	 * XXX: We should call ql_isp_rcvbuf() to return a
3444 	 * buffer to ISP only if the number of buffers fall below
3445 	 * the low water mark.
3446 	 */
3447 	if (ub_ip_updated) {
3448 		ql_isp_rcvbuf(ha);
3449 	}
3450 
3451 	if (rval != FC_SUCCESS) {
3452 		EL(ha, "failed, rval = %xh\n", rval);
3453 	} else {
3454 		/*EMPTY*/
3455 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3456 	}
3457 	return (rval);
3458 }
3459 
3460 /*
3461  * ql_abort
3462  *	Abort a packet.
3463  *
3464  * Input:
3465  *	fca_handle = handle setup by ql_bind_port().
3466  *	pkt = pointer to fc_packet.
3467  *	flags = KM_SLEEP flag.
3468  *
3469  * Returns:
3470  *	FC_SUCCESS - the packet has successfully aborted.
3471  *	FC_ABORTED - the packet has successfully aborted.
3472  *	FC_ABORTING - the packet is being aborted.
3473  *	FC_ABORT_FAILED - the packet could not be aborted.
3474  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3475  *		to abort the packet.
3476  *	FC_BADEXCHANGE - no packet found.
3477  *	FC_UNBOUND - the fca_handle specified is not bound.
3478  *
3479  * Context:
3480  *	Kernel context.
3481  */
3482 static int
3483 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3484 {
3485 	port_id_t		d_id;
3486 	ql_link_t		*link;
3487 	ql_adapter_state_t	*ha, *pha;
3488 	ql_srb_t		*sp;
3489 	ql_tgt_t		*tq;
3490 	ql_lun_t		*lq;
3491 	int			rval = FC_ABORTED;
3492 
3493 	ha = ql_fca_handle_to_state(fca_handle);
3494 	if (ha == NULL) {
3495 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3496 		    (void *)fca_handle);
3497 		return (FC_UNBOUND);
3498 	}
3499 
3500 	pha = ha->pha;
3501 
3502 	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3503 
3504 	/* Get target queue pointer. */
3505 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3506 	tq = ql_d_id_to_queue(ha, d_id);
3507 
3508 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3509 		if (tq == NULL) {
3510 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3511 			rval = FC_TRANSPORT_ERROR;
3512 		} else {
3513 			EL(ha, "failed, FC_OFFLINE\n");
3514 			rval = FC_OFFLINE;
3515 		}
3516 		return (rval);
3517 	}
3518 
3519 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3520 	lq = sp->lun_queue;
3521 
3522 	/* Set poll flag if sleep wanted. */
3523 	if (flags == KM_SLEEP) {
3524 		sp->flags |= SRB_POLL;
3525 	}
3526 
3527 	/* Acquire target queue lock. */
3528 	DEVICE_QUEUE_LOCK(tq);
3529 	REQUEST_RING_LOCK(ha);
3530 
3531 	/* If command not already started. */
3532 	if (!(sp->flags & SRB_ISP_STARTED)) {
3533 		/* Check pending queue for command. */
3534 		sp = NULL;
3535 		for (link = pha->pending_cmds.first; link != NULL;
3536 		    link = link->next) {
3537 			sp = link->base_address;
3538 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3539 				/* Remove srb from q. */
3540 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3541 				break;
3542 			} else {
3543 				sp = NULL;
3544 			}
3545 		}
3546 		REQUEST_RING_UNLOCK(ha);
3547 
3548 		if (sp == NULL) {
3549 			/* Check for cmd on device queue. */
3550 			for (link = lq->cmd.first; link != NULL;
3551 			    link = link->next) {
3552 				sp = link->base_address;
3553 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3554 					/* Remove srb from q. */
3555 					ql_remove_link(&lq->cmd, &sp->cmd);
3556 					break;
3557 				} else {
3558 					sp = NULL;
3559 				}
3560 			}
3561 		}
3562 		/* Release device lock */
3563 		DEVICE_QUEUE_UNLOCK(tq);
3564 
3565 		/* If command on target queue. */
3566 		if (sp != NULL) {
3567 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3568 
3569 			/* Set return status */
3570 			pkt->pkt_reason = CS_ABORTED;
3571 
3572 			sp->cmd.next = NULL;
3573 			ql_done(&sp->cmd);
3574 			rval = FC_ABORTED;
3575 		} else {
3576 			EL(ha, "failed, FC_BADEXCHANGE\n");
3577 			rval = FC_BADEXCHANGE;
3578 		}
3579 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3580 		/* Release device queue lock. */
3581 		REQUEST_RING_UNLOCK(ha);
3582 		DEVICE_QUEUE_UNLOCK(tq);
3583 		EL(ha, "failed, already done, FC_FAILURE\n");
3584 		rval = FC_FAILURE;
3585 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3586 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3587 		/*
3588 		 * If here, target data/resp ctio is with Fw.
3589 		 * Since firmware is supposed to terminate such I/Os
3590 		 * with an error, we need not do any thing. If FW
3591 		 * decides not to terminate those IOs and simply keep
3592 		 * quite then we need to initiate cleanup here by
3593 		 * calling ql_done.
3594 		 */
3595 		REQUEST_RING_UNLOCK(ha);
3596 		DEVICE_QUEUE_UNLOCK(tq);
3597 		rval = FC_ABORTED;
3598 	} else {
3599 		request_t	*ep = pha->request_ring_bp;
3600 		uint16_t	cnt;
3601 
3602 		if (sp->handle != 0) {
3603 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3604 				if (sp->handle == ddi_get32(
3605 				    pha->hba_buf.acc_handle, &ep->handle)) {
3606 					ep->entry_type = INVALID_ENTRY_TYPE;
3607 					break;
3608 				}
3609 				ep++;
3610 			}
3611 		}
3612 
3613 		/* Release device queue lock. */
3614 		REQUEST_RING_UNLOCK(ha);
3615 		DEVICE_QUEUE_UNLOCK(tq);
3616 
3617 		sp->flags |= SRB_ABORTING;
3618 		(void) ql_abort_command(ha, sp);
3619 		pkt->pkt_reason = CS_ABORTED;
3620 		rval = FC_ABORTED;
3621 	}
3622 
3623 	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3624 
3625 	return (rval);
3626 }
3627 
3628 /*
3629  * ql_reset
3630  *	Reset link or hardware.
3631  *
3632  * Input:
3633  *	fca_handle = handle setup by ql_bind_port().
3634  *	cmd = reset type command.
3635  *
3636  * Returns:
3637  *	FC_SUCCESS - reset has successfully finished.
3638  *	FC_UNBOUND - the fca_handle specified is not bound.
3639  *	FC_FAILURE - reset failed.
3640  *
3641  * Context:
3642  *	Kernel context.
3643  */
3644 static int
3645 ql_reset(opaque_t fca_handle, uint32_t cmd)
3646 {
3647 	ql_adapter_state_t	*ha;
3648 	int			rval = FC_SUCCESS, rval2;
3649 
3650 	ha = ql_fca_handle_to_state(fca_handle);
3651 	if (ha == NULL) {
3652 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3653 		    (void *)fca_handle);
3654 		return (FC_UNBOUND);
3655 	}
3656 
3657 	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3658 	    ha->vp_index, cmd);
3659 
3660 	switch (cmd) {
3661 	case FC_FCA_CORE:
3662 		/* dump firmware core if specified. */
3663 		if (ha->vp_index == 0) {
3664 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3665 				EL(ha, "failed, FC_FAILURE\n");
3666 				rval = FC_FAILURE;
3667 			}
3668 		}
3669 		break;
3670 	case FC_FCA_LINK_RESET:
3671 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3672 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3673 				EL(ha, "failed, FC_FAILURE-2\n");
3674 				rval = FC_FAILURE;
3675 			}
3676 		}
3677 		break;
3678 	case FC_FCA_RESET_CORE:
3679 	case FC_FCA_RESET:
3680 		/* if dump firmware core if specified. */
3681 		if (cmd == FC_FCA_RESET_CORE) {
3682 			if (ha->vp_index != 0) {
3683 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3684 				    ? QL_SUCCESS : ql_loop_reset(ha);
3685 			} else {
3686 				rval2 = ql_dump_firmware(ha);
3687 			}
3688 			if (rval2 != QL_SUCCESS) {
3689 				EL(ha, "failed, FC_FAILURE-3\n");
3690 				rval = FC_FAILURE;
3691 			}
3692 		}
3693 
3694 		/* Free up all unsolicited buffers. */
3695 		if (ha->ub_allocated != 0) {
3696 			/* Inform to release buffers. */
3697 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3698 			ha->state |= FC_STATE_RESET_REQUESTED;
3699 			if (ha->flags & FCA_BOUND) {
3700 				(ha->bind_info.port_statec_cb)
3701 				    (ha->bind_info.port_handle,
3702 				    ha->state);
3703 			}
3704 		}
3705 
3706 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3707 
3708 		/* All buffers freed */
3709 		if (ha->ub_allocated == 0) {
3710 			/* Hardware reset. */
3711 			if (cmd == FC_FCA_RESET) {
3712 				if (ha->vp_index == 0) {
3713 					(void) ql_abort_isp(ha);
3714 				} else if (!(ha->pha->task_daemon_flags &
3715 				    LOOP_DOWN)) {
3716 					(void) ql_loop_reset(ha);
3717 				}
3718 			}
3719 
3720 			/* Inform that the hardware has been reset */
3721 			ha->state |= FC_STATE_RESET;
3722 		} else {
3723 			/*
3724 			 * the port driver expects an online if
3725 			 * buffers are not freed.
3726 			 */
3727 			if (ha->topology & QL_LOOP_CONNECTION) {
3728 				ha->state |= FC_STATE_LOOP;
3729 			} else {
3730 				ha->state |= FC_STATE_ONLINE;
3731 			}
3732 		}
3733 
3734 		TASK_DAEMON_LOCK(ha);
3735 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3736 		TASK_DAEMON_UNLOCK(ha);
3737 
3738 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3739 
3740 		break;
3741 	default:
3742 		EL(ha, "unknown cmd=%xh\n", cmd);
3743 		break;
3744 	}
3745 
3746 	if (rval != FC_SUCCESS) {
3747 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3748 	} else {
3749 		/*EMPTY*/
3750 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3751 		    ha->vp_index);
3752 	}
3753 
3754 	return (rval);
3755 }
3756 
3757 /*
3758  * ql_port_manage
3759  *	Perform port management or diagnostics.
3760  *
3761  * Input:
3762  *	fca_handle = handle setup by ql_bind_port().
3763  *	cmd = pointer to command structure.
3764  *
3765  * Returns:
3766  *	FC_SUCCESS - the request completed successfully.
3767  *	FC_FAILURE - the request did not complete successfully.
3768  *	FC_UNBOUND - the fca_handle specified is not bound.
3769  *
3770  * Context:
3771  *	Kernel context.
3772  */
3773 static int
3774 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3775 {
3776 	clock_t			timer;
3777 	uint16_t		index;
3778 	uint32_t		*bp;
3779 	port_id_t		d_id;
3780 	ql_link_t		*link;
3781 	ql_adapter_state_t	*ha, *pha;
3782 	ql_tgt_t		*tq;
3783 	dma_mem_t		buffer_xmt, buffer_rcv;
3784 	size_t			length;
3785 	uint32_t		cnt;
3786 	char			buf[80];
3787 	lbp_t			*lb;
3788 	ql_mbx_data_t		mr;
3789 	app_mbx_cmd_t		*mcp;
3790 	int			i0;
3791 	uint8_t			*bptr;
3792 	int			rval2, rval = FC_SUCCESS;
3793 	uint32_t		opcode;
3794 
3795 	ha = ql_fca_handle_to_state(fca_handle);
3796 	if (ha == NULL) {
3797 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3798 		    (void *)fca_handle);
3799 		return (FC_UNBOUND);
3800 	}
3801 	pha = ha->pha;
3802 
3803 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3804 	    cmd->pm_cmd_code);
3805 
3806 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3807 
3808 	/*
3809 	 * Wait for all outstanding commands to complete
3810 	 */
3811 	index = (uint16_t)ql_wait_outstanding(ha);
3812 
3813 	if (index != MAX_OUTSTANDING_COMMANDS) {
3814 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3815 		ql_restart_queues(ha);
3816 		EL(ha, "failed, FC_TRAN_BUSY\n");
3817 		return (FC_TRAN_BUSY);
3818 	}
3819 
3820 	switch (cmd->pm_cmd_code) {
3821 	case FC_PORT_BYPASS:
3822 		d_id.b24 = *cmd->pm_cmd_buf;
3823 		tq = ql_d_id_to_queue(ha, d_id);
3824 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3825 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3826 			rval = FC_FAILURE;
3827 		}
3828 		break;
3829 	case FC_PORT_UNBYPASS:
3830 		d_id.b24 = *cmd->pm_cmd_buf;
3831 		tq = ql_d_id_to_queue(ha, d_id);
3832 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3833 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3834 			rval = FC_FAILURE;
3835 		}
3836 		break;
3837 	case FC_PORT_GET_FW_REV:
3838 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3839 		    pha->fw_minor_version, pha->fw_subminor_version);
3840 		length = strlen(buf) + 1;
3841 		if (cmd->pm_data_len < length) {
3842 			cmd->pm_data_len = length;
3843 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3844 			rval = FC_FAILURE;
3845 		} else {
3846 			(void) strcpy(cmd->pm_data_buf, buf);
3847 		}
3848 		break;
3849 
3850 	case FC_PORT_GET_FCODE_REV: {
3851 		caddr_t		fcode_ver_buf = NULL;
3852 
3853 		i0 = 0;
3854 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3855 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3856 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3857 		    (caddr_t)&fcode_ver_buf, &i0);
3858 		length = (uint_t)i0;
3859 
3860 		if (rval2 != DDI_PROP_SUCCESS) {
3861 			EL(ha, "failed, getting version = %xh\n", rval2);
3862 			length = 20;
3863 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3864 			if (fcode_ver_buf != NULL) {
3865 				(void) sprintf(fcode_ver_buf,
3866 				    "NO FCODE FOUND");
3867 			}
3868 		}
3869 
3870 		if (cmd->pm_data_len < length) {
3871 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3872 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3873 			cmd->pm_data_len = length;
3874 			rval = FC_FAILURE;
3875 		} else if (fcode_ver_buf != NULL) {
3876 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3877 			    length);
3878 		}
3879 
3880 		if (fcode_ver_buf != NULL) {
3881 			kmem_free(fcode_ver_buf, length);
3882 		}
3883 		break;
3884 	}
3885 
3886 	case FC_PORT_GET_DUMP:
3887 		QL_DUMP_LOCK(pha);
3888 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3889 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3890 			    "length=%lxh\n", cmd->pm_data_len);
3891 			cmd->pm_data_len = pha->risc_dump_size;
3892 			rval = FC_FAILURE;
3893 		} else if (pha->ql_dump_state & QL_DUMPING) {
3894 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3895 			rval = FC_TRAN_BUSY;
3896 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
3897 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3898 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
3899 		} else {
3900 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3901 			rval = FC_FAILURE;
3902 		}
3903 		QL_DUMP_UNLOCK(pha);
3904 		break;
3905 	case FC_PORT_FORCE_DUMP:
3906 		PORTMANAGE_LOCK(ha);
3907 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3908 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3909 			rval = FC_FAILURE;
3910 		}
3911 		PORTMANAGE_UNLOCK(ha);
3912 		break;
3913 	case FC_PORT_DOWNLOAD_FW:
3914 		PORTMANAGE_LOCK(ha);
3915 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3916 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3917 			    (uint32_t)cmd->pm_data_len,
3918 			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
3919 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3920 				rval = FC_FAILURE;
3921 			}
3922 			ql_reset_chip(ha);
3923 			(void) ql_abort_isp(ha);
3924 		} else {
3925 			/* Save copy of the firmware. */
3926 			if (pha->risc_code != NULL) {
3927 				kmem_free(pha->risc_code, pha->risc_code_size);
3928 				pha->risc_code = NULL;
3929 				pha->risc_code_size = 0;
3930 			}
3931 
3932 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3933 			    KM_SLEEP);
3934 			if (pha->risc_code != NULL) {
3935 				pha->risc_code_size =
3936 				    (uint32_t)cmd->pm_data_len;
3937 				bcopy(cmd->pm_data_buf, pha->risc_code,
3938 				    cmd->pm_data_len);
3939 
3940 				/* Do abort to force reload. */
3941 				ql_reset_chip(ha);
3942 				if (ql_abort_isp(ha) != QL_SUCCESS) {
3943 					kmem_free(pha->risc_code,
3944 					    pha->risc_code_size);
3945 					pha->risc_code = NULL;
3946 					pha->risc_code_size = 0;
3947 					ql_reset_chip(ha);
3948 					(void) ql_abort_isp(ha);
3949 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3950 					    " FC_FAILURE\n");
3951 					rval = FC_FAILURE;
3952 				}
3953 			}
3954 		}
3955 		PORTMANAGE_UNLOCK(ha);
3956 		break;
3957 	case FC_PORT_GET_DUMP_SIZE:
3958 		bp = (uint32_t *)cmd->pm_data_buf;
3959 		*bp = pha->risc_dump_size;
3960 		break;
3961 	case FC_PORT_DIAG:
3962 		/*
3963 		 * Prevents concurrent diags
3964 		 */
3965 		PORTMANAGE_LOCK(ha);
3966 
3967 		/* Wait for suspension to end. */
3968 		for (timer = 0; timer < 3000 &&
3969 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
3970 			ql_delay(ha, 10000);
3971 		}
3972 
3973 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
3974 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
3975 			rval = FC_TRAN_BUSY;
3976 			PORTMANAGE_UNLOCK(ha);
3977 			break;
3978 		}
3979 
3980 		switch (cmd->pm_cmd_flags) {
3981 		case QL_DIAG_EXEFMW:
3982 			if (ql_start_firmware(ha) != QL_SUCCESS) {
3983 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
3984 				rval = FC_FAILURE;
3985 			}
3986 			break;
3987 		case QL_DIAG_CHKCMDQUE:
3988 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
3989 			    i0++) {
3990 				cnt += (pha->outstanding_cmds[i0] != NULL);
3991 			}
3992 			if (cnt != 0) {
3993 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
3994 				    "FC_FAILURE\n");
3995 				rval = FC_FAILURE;
3996 			}
3997 			break;
3998 		case QL_DIAG_FMWCHKSUM:
3999 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4000 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4001 				    "FC_FAILURE\n");
4002 				rval = FC_FAILURE;
4003 			}
4004 			break;
4005 		case QL_DIAG_SLFTST:
4006 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4007 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4008 				rval = FC_FAILURE;
4009 			}
4010 			ql_reset_chip(ha);
4011 			(void) ql_abort_isp(ha);
4012 			break;
4013 		case QL_DIAG_REVLVL:
4014 			if (cmd->pm_stat_len <
4015 			    sizeof (ql_adapter_revlvl_t)) {
4016 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4017 				    "slen=%lxh, rlvllen=%lxh\n",
4018 				    cmd->pm_stat_len,
4019 				    sizeof (ql_adapter_revlvl_t));
4020 				rval = FC_NOMEM;
4021 			} else {
4022 				bcopy((void *)&(pha->adapter_stats->revlvl),
4023 				    cmd->pm_stat_buf,
4024 				    (size_t)cmd->pm_stat_len);
4025 				cmd->pm_stat_len =
4026 				    sizeof (ql_adapter_revlvl_t);
4027 			}
4028 			break;
4029 		case QL_DIAG_LPBMBX:
4030 
4031 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4032 				EL(ha, "failed, QL_DIAG_LPBMBX "
4033 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4034 				    "reqd=%lxh\n", cmd->pm_data_len,
4035 				    sizeof (struct app_mbx_cmd));
4036 				rval = FC_INVALID_REQUEST;
4037 				break;
4038 			}
4039 			/*
4040 			 * Don't do the wrap test on a 2200 when the
4041 			 * firmware is running.
4042 			 */
4043 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4044 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4045 				mr.mb[1] = mcp->mb[1];
4046 				mr.mb[2] = mcp->mb[2];
4047 				mr.mb[3] = mcp->mb[3];
4048 				mr.mb[4] = mcp->mb[4];
4049 				mr.mb[5] = mcp->mb[5];
4050 				mr.mb[6] = mcp->mb[6];
4051 				mr.mb[7] = mcp->mb[7];
4052 
4053 				bcopy(&mr.mb[0], &mr.mb[10],
4054 				    sizeof (uint16_t) * 8);
4055 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4056 					EL(ha, "failed, QL_DIAG_LPBMBX "
4057 					    "FC_FAILURE\n");
4058 					rval = FC_FAILURE;
4059 					break;
4060 				}
4061 				if (mr.mb[i0] != mr.mb[i0 + 10]) {
4062 					EL(ha, "failed, QL_DIAG_LPBMBX "
4063 					    "FC_FAILURE-2\n");
4064 
4065 					(void) ql_flash_errlog(ha,
4066 					    FLASH_ERRLOG_ISP_ERR, 0,
4067 					    RD16_IO_REG(ha, hccr),
4068 					    RD16_IO_REG(ha, istatus));
4069 
4070 					rval = FC_FAILURE;
4071 					break;
4072 				}
4073 			}
4074 			(void) ql_abort_isp(ha);
4075 			break;
4076 		case QL_DIAG_LPBDTA:
4077 			/*
4078 			 * For loopback data, we receive the
4079 			 * data back in pm_stat_buf. This provides
4080 			 * the user an opportunity to compare the
4081 			 * transmitted and received data.
4082 			 *
4083 			 * NB: lb->options are:
4084 			 *	0 --> Ten bit loopback
4085 			 *	1 --> One bit loopback
4086 			 *	2 --> External loopback
4087 			 */
4088 			if (cmd->pm_data_len > 65536) {
4089 				rval = FC_TOOMANY;
4090 				EL(ha, "failed, QL_DIAG_LPBDTA "
4091 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4092 				break;
4093 			}
4094 			if (ql_get_dma_mem(ha, &buffer_xmt,
4095 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4096 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4097 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4098 				rval = FC_NOMEM;
4099 				break;
4100 			}
4101 			if (ql_get_dma_mem(ha, &buffer_rcv,
4102 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4103 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4104 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4105 				rval = FC_NOMEM;
4106 				break;
4107 			}
4108 			ddi_rep_put8(buffer_xmt.acc_handle,
4109 			    (uint8_t *)cmd->pm_data_buf,
4110 			    (uint8_t *)buffer_xmt.bp,
4111 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4112 
4113 			/* 22xx's adapter must be in loop mode for test. */
4114 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4115 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4116 				if (ha->flags & POINT_TO_POINT ||
4117 				    (ha->task_daemon_flags & LOOP_DOWN &&
4118 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4119 					cnt = *bptr;
4120 					*bptr = (uint8_t)
4121 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4122 					(void) ql_abort_isp(ha);
4123 					*bptr = (uint8_t)cnt;
4124 				}
4125 			}
4126 
4127 			/* Shutdown IP. */
4128 			if (pha->flags & IP_INITIALIZED) {
4129 				(void) ql_shutdown_ip(pha);
4130 			}
4131 
4132 			lb = (lbp_t *)cmd->pm_cmd_buf;
4133 			lb->transfer_count =
4134 			    (uint32_t)cmd->pm_data_len;
4135 			lb->transfer_segment_count = 0;
4136 			lb->receive_segment_count = 0;
4137 			lb->transfer_data_address =
4138 			    buffer_xmt.cookie.dmac_address;
4139 			lb->receive_data_address =
4140 			    buffer_rcv.cookie.dmac_address;
4141 
4142 			if ((lb->options & 7) == 2 &&
4143 			    pha->task_daemon_flags &
4144 			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4145 				/* Loop must be up for external */
4146 				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4147 				rval = FC_TRAN_BUSY;
4148 			} else if (ql_loop_back(ha, 0, lb,
4149 			    buffer_xmt.cookie.dmac_notused,
4150 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4151 				bzero((void *)cmd->pm_stat_buf,
4152 				    cmd->pm_stat_len);
4153 				ddi_rep_get8(buffer_rcv.acc_handle,
4154 				    (uint8_t *)cmd->pm_stat_buf,
4155 				    (uint8_t *)buffer_rcv.bp,
4156 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4157 			} else {
4158 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4159 				rval = FC_FAILURE;
4160 			}
4161 
4162 			ql_free_phys(ha, &buffer_xmt);
4163 			ql_free_phys(ha, &buffer_rcv);
4164 
4165 			/* Needed to recover the f/w */
4166 			(void) ql_abort_isp(ha);
4167 
4168 			/* Restart IP if it was shutdown. */
4169 			if (pha->flags & IP_ENABLED &&
4170 			    !(pha->flags & IP_INITIALIZED)) {
4171 				(void) ql_initialize_ip(pha);
4172 				ql_isp_rcvbuf(pha);
4173 			}
4174 
4175 			break;
4176 		case QL_DIAG_ECHO: {
4177 			/*
4178 			 * issue an echo command with a user supplied
4179 			 * data pattern and destination address
4180 			 */
4181 			echo_t		echo;		/* temp echo struct */
4182 
4183 			/* Setup echo cmd & adjust for platform */
4184 			opcode = QL_ECHO_CMD;
4185 			BIG_ENDIAN_32(&opcode);
4186 
4187 			/*
4188 			 * due to limitations in the ql
4189 			 * firmaware the echo data field is
4190 			 * limited to 220
4191 			 */
4192 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4193 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4194 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4195 				    "cmdl1=%lxh, statl2=%lxh\n",
4196 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4197 				rval = FC_TOOMANY;
4198 				break;
4199 			}
4200 
4201 			/*
4202 			 * the input data buffer has the user
4203 			 * supplied data pattern.  The "echoed"
4204 			 * data will be DMAed into the output
4205 			 * data buffer.  Therefore the length
4206 			 * of the output buffer must be equal
4207 			 * to or greater then the input buffer
4208 			 * length
4209 			 */
4210 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4211 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4212 				    " cmdl1=%lxh, statl2=%lxh\n",
4213 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4214 				rval = FC_TOOMANY;
4215 				break;
4216 			}
4217 			/* add four bytes for the opcode */
4218 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4219 
4220 			/*
4221 			 * are we 32 or 64 bit addressed???
4222 			 * We need to get the appropriate
4223 			 * DMA and set the command options;
4224 			 * 64 bit (bit 6) or 32 bit
4225 			 * (no bit 6) addressing.
4226 			 * while we are at it lets ask for
4227 			 * real echo (bit 15)
4228 			 */
4229 			echo.options = BIT_15;
4230 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4231 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
4232 				echo.options = (uint16_t)
4233 				    (echo.options | BIT_6);
4234 			}
4235 
4236 			/*
4237 			 * Set up the DMA mappings for the
4238 			 * output and input data buffers.
4239 			 * First the output buffer
4240 			 */
4241 			if (ql_get_dma_mem(ha, &buffer_xmt,
4242 			    (uint32_t)(cmd->pm_data_len + 4),
4243 			    LITTLE_ENDIAN_DMA,
4244 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4245 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4246 				rval = FC_NOMEM;
4247 				break;
4248 			}
4249 			echo.transfer_data_address = buffer_xmt.cookie;
4250 
4251 			/* Next the input buffer */
4252 			if (ql_get_dma_mem(ha, &buffer_rcv,
4253 			    (uint32_t)(cmd->pm_data_len + 4),
4254 			    LITTLE_ENDIAN_DMA,
4255 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4256 				/*
4257 				 * since we could not allocate
4258 				 * DMA space for the input
4259 				 * buffer we need to clean up
4260 				 * by freeing the DMA space
4261 				 * we allocated for the output
4262 				 * buffer
4263 				 */
4264 				ql_free_phys(ha, &buffer_xmt);
4265 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4266 				rval = FC_NOMEM;
4267 				break;
4268 			}
4269 			echo.receive_data_address = buffer_rcv.cookie;
4270 
4271 			/*
4272 			 * copy the 4 byte ECHO op code to the
4273 			 * allocated DMA space
4274 			 */
4275 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4276 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4277 
4278 			/*
4279 			 * copy the user supplied data to the
4280 			 * allocated DMA space
4281 			 */
4282 			ddi_rep_put8(buffer_xmt.acc_handle,
4283 			    (uint8_t *)cmd->pm_cmd_buf,
4284 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4285 			    DDI_DEV_AUTOINCR);
4286 
4287 			/* Shutdown IP. */
4288 			if (pha->flags & IP_INITIALIZED) {
4289 				(void) ql_shutdown_ip(pha);
4290 			}
4291 
4292 			/* send the echo */
4293 			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4294 				ddi_rep_put8(buffer_rcv.acc_handle,
4295 				    (uint8_t *)buffer_rcv.bp + 4,
4296 				    (uint8_t *)cmd->pm_stat_buf,
4297 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4298 			} else {
4299 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4300 				rval = FC_FAILURE;
4301 			}
4302 
4303 			/* Restart IP if it was shutdown. */
4304 			if (pha->flags & IP_ENABLED &&
4305 			    !(pha->flags & IP_INITIALIZED)) {
4306 				(void) ql_initialize_ip(pha);
4307 				ql_isp_rcvbuf(pha);
4308 			}
4309 			/* free up our DMA buffers */
4310 			ql_free_phys(ha, &buffer_xmt);
4311 			ql_free_phys(ha, &buffer_rcv);
4312 			break;
4313 		}
4314 		default:
4315 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4316 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4317 			rval = FC_INVALID_REQUEST;
4318 			break;
4319 		}
4320 		PORTMANAGE_UNLOCK(ha);
4321 		break;
4322 	case FC_PORT_LINK_STATE:
4323 		/* Check for name equal to null. */
4324 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4325 		    index++) {
4326 			if (cmd->pm_cmd_buf[index] != 0) {
4327 				break;
4328 			}
4329 		}
4330 
4331 		/* If name not null. */
4332 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4333 			/* Locate device queue. */
4334 			tq = NULL;
4335 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4336 			    tq == NULL; index++) {
4337 				for (link = ha->dev[index].first; link != NULL;
4338 				    link = link->next) {
4339 					tq = link->base_address;
4340 
4341 					if (bcmp((void *)&tq->port_name[0],
4342 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4343 						break;
4344 					} else {
4345 						tq = NULL;
4346 					}
4347 				}
4348 			}
4349 
4350 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4351 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4352 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4353 			} else {
4354 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4355 				    FC_STATE_OFFLINE;
4356 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4357 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4358 			}
4359 		} else {
4360 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4361 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4362 		}
4363 		break;
4364 	case FC_PORT_INITIALIZE:
4365 		if (cmd->pm_cmd_len >= 8) {
4366 			tq = NULL;
4367 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4368 			    tq == NULL; index++) {
4369 				for (link = ha->dev[index].first; link != NULL;
4370 				    link = link->next) {
4371 					tq = link->base_address;
4372 
4373 					if (bcmp((void *)&tq->port_name[0],
4374 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4375 						if (!VALID_DEVICE_ID(ha,
4376 						    tq->loop_id)) {
4377 							tq = NULL;
4378 						}
4379 						break;
4380 					} else {
4381 						tq = NULL;
4382 					}
4383 				}
4384 			}
4385 
4386 			if (tq == NULL || ql_target_reset(ha, tq,
4387 			    ha->loop_reset_delay) != QL_SUCCESS) {
4388 				EL(ha, "failed, FC_PORT_INITIALIZE "
4389 				    "FC_FAILURE\n");
4390 				rval = FC_FAILURE;
4391 			}
4392 		} else {
4393 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4394 			    "clen=%lxh\n", cmd->pm_cmd_len);
4395 
4396 			rval = FC_FAILURE;
4397 		}
4398 		break;
4399 	case FC_PORT_RLS:
4400 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4401 			EL(ha, "failed, buffer size passed: %lxh, "
4402 			    "req: %lxh\n", cmd->pm_data_len,
4403 			    (sizeof (fc_rls_acc_t)));
4404 			rval = FC_FAILURE;
4405 		} else if (LOOP_NOT_READY(pha)) {
4406 			EL(ha, "loop NOT ready\n");
4407 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4408 		} else if (ql_get_link_status(ha, ha->loop_id,
4409 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4410 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4411 			rval = FC_FAILURE;
4412 #ifdef _BIG_ENDIAN
4413 		} else {
4414 			fc_rls_acc_t		*rls;
4415 
4416 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4417 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4418 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4419 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4420 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4421 #endif /* _BIG_ENDIAN */
4422 		}
4423 		break;
4424 	case FC_PORT_GET_NODE_ID:
4425 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4426 		    cmd->pm_data_buf) != QL_SUCCESS) {
4427 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4428 			rval = FC_FAILURE;
4429 		}
4430 		break;
4431 	case FC_PORT_SET_NODE_ID:
4432 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4433 		    cmd->pm_data_buf) != QL_SUCCESS) {
4434 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4435 			rval = FC_FAILURE;
4436 		}
4437 		break;
4438 	case FC_PORT_DOWNLOAD_FCODE:
4439 		PORTMANAGE_LOCK(ha);
4440 		if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
4441 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4442 			    (uint32_t)cmd->pm_data_len);
4443 		} else {
4444 			if (cmd->pm_data_buf[0] == 4 &&
4445 			    cmd->pm_data_buf[8] == 0 &&
4446 			    cmd->pm_data_buf[9] == 0x10 &&
4447 			    cmd->pm_data_buf[10] == 0 &&
4448 			    cmd->pm_data_buf[11] == 0) {
4449 				rval = ql_24xx_load_flash(ha,
4450 				    (uint8_t *)cmd->pm_data_buf,
4451 				    (uint32_t)cmd->pm_data_len,
4452 				    ha->flash_fw_addr << 2);
4453 			} else {
4454 				rval = ql_24xx_load_flash(ha,
4455 				    (uint8_t *)cmd->pm_data_buf,
4456 				    (uint32_t)cmd->pm_data_len, 0);
4457 			}
4458 		}
4459 
4460 		if (rval != QL_SUCCESS) {
4461 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4462 			rval = FC_FAILURE;
4463 		} else {
4464 			rval = FC_SUCCESS;
4465 		}
4466 		ql_reset_chip(ha);
4467 		(void) ql_abort_isp(ha);
4468 		PORTMANAGE_UNLOCK(ha);
4469 		break;
4470 	default:
4471 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4472 		rval = FC_BADCMD;
4473 		break;
4474 	}
4475 
4476 	/* Wait for suspension to end. */
4477 	ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4478 	timer = 0;
4479 
4480 	while (timer++ < 3000 &&
4481 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4482 		ql_delay(ha, 10000);
4483 	}
4484 
4485 	ql_restart_queues(ha);
4486 
4487 	if (rval != FC_SUCCESS) {
4488 		EL(ha, "failed, rval = %xh\n", rval);
4489 	} else {
4490 		/*EMPTY*/
4491 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4492 	}
4493 
4494 	return (rval);
4495 }
4496 
4497 static opaque_t
4498 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4499 {
4500 	port_id_t		id;
4501 	ql_adapter_state_t	*ha;
4502 	ql_tgt_t		*tq;
4503 
4504 	id.r.rsvd_1 = 0;
4505 	id.b24 = d_id.port_id;
4506 
4507 	ha = ql_fca_handle_to_state(fca_handle);
4508 	if (ha == NULL) {
4509 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4510 		    (void *)fca_handle);
4511 		return (NULL);
4512 	}
4513 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4514 
4515 	tq = ql_d_id_to_queue(ha, id);
4516 
4517 	if (tq == NULL) {
4518 		EL(ha, "failed, tq=NULL\n");
4519 	} else {
4520 		/*EMPTY*/
4521 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4522 	}
4523 	return (tq);
4524 }
4525 
4526 /* ************************************************************************ */
4527 /*			FCA Driver Local Support Functions.		    */
4528 /* ************************************************************************ */
4529 
4530 /*
4531  * ql_cmd_setup
4532  *	Verifies proper command.
4533  *
4534  * Input:
4535  *	fca_handle = handle setup by ql_bind_port().
4536  *	pkt = pointer to fc_packet.
4537  *	rval = pointer for return value.
4538  *
4539  * Returns:
4540  *	Adapter state pointer, NULL = failure.
4541  *
4542  * Context:
4543  *	Kernel context.
4544  */
4545 static ql_adapter_state_t *
4546 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4547 {
4548 	ql_adapter_state_t	*ha, *pha;
4549 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4550 	ql_tgt_t		*tq;
4551 	port_id_t		d_id;
4552 
4553 	pkt->pkt_resp_resid = 0;
4554 	pkt->pkt_data_resid = 0;
4555 
4556 	/* check that the handle is assigned by this FCA */
4557 	ha = ql_fca_handle_to_state(fca_handle);
4558 	if (ha == NULL) {
4559 		*rval = FC_UNBOUND;
4560 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4561 		    (void *)fca_handle);
4562 		return (NULL);
4563 	}
4564 	pha = ha->pha;
4565 
4566 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4567 
4568 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4569 		return (ha);
4570 	}
4571 
4572 	if (!(pha->flags & ONLINE)) {
4573 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4574 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4575 		*rval = FC_TRANSPORT_ERROR;
4576 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4577 		return (NULL);
4578 	}
4579 
4580 	/* Exit on loop down. */
4581 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4582 	    pha->task_daemon_flags & LOOP_DOWN &&
4583 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4584 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4585 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4586 		*rval = FC_OFFLINE;
4587 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4588 		return (NULL);
4589 	}
4590 
4591 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4592 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4593 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4594 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4595 			d_id.r.rsvd_1 = 0;
4596 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4597 			tq = ql_d_id_to_queue(ha, d_id);
4598 
4599 			pkt->pkt_fca_device = (opaque_t)tq;
4600 		}
4601 
4602 		if (tq != NULL) {
4603 			DEVICE_QUEUE_LOCK(tq);
4604 			if (tq->flags & (TQF_RSCN_RCVD |
4605 			    TQF_NEED_AUTHENTICATION)) {
4606 				*rval = FC_DEVICE_BUSY;
4607 				DEVICE_QUEUE_UNLOCK(tq);
4608 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4609 				    tq->flags, tq->d_id.b24);
4610 				return (NULL);
4611 			}
4612 			DEVICE_QUEUE_UNLOCK(tq);
4613 		}
4614 	}
4615 
4616 	/*
4617 	 * Check DMA pointers.
4618 	 */
4619 	*rval = DDI_SUCCESS;
4620 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4621 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4622 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4623 		if (*rval == DDI_SUCCESS) {
4624 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4625 		}
4626 	}
4627 
4628 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4629 	    pkt->pkt_rsplen != 0) {
4630 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4631 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4632 		if (*rval == DDI_SUCCESS) {
4633 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4634 		}
4635 	}
4636 
4637 	/*
4638 	 * Minimum branch conditional; Change it with care.
4639 	 */
4640 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4641 	    (pkt->pkt_datalen != 0)) != 0) {
4642 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4643 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4644 		if (*rval == DDI_SUCCESS) {
4645 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4646 		}
4647 	}
4648 
4649 	if (*rval != DDI_SUCCESS) {
4650 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4651 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4652 
4653 		/* Do command callback. */
4654 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4655 			ql_awaken_task_daemon(ha, sp, 0, 0);
4656 		}
4657 		*rval = FC_BADPACKET;
4658 		EL(ha, "failed, bad DMA pointers\n");
4659 		return (NULL);
4660 	}
4661 
4662 	if (sp->magic_number != QL_FCA_BRAND) {
4663 		*rval = FC_BADPACKET;
4664 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4665 		return (NULL);
4666 	}
4667 	*rval = FC_SUCCESS;
4668 
4669 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4670 
4671 	return (ha);
4672 }
4673 
4674 /*
4675  * ql_els_plogi
4676  *	Issue a extended link service port login request.
4677  *
4678  * Input:
4679  *	ha = adapter state pointer.
4680  *	pkt = pointer to fc_packet.
4681  *
4682  * Returns:
4683  *	FC_SUCCESS - the packet was accepted for transport.
4684  *	FC_TRANSPORT_ERROR - a transport error occurred.
4685  *
4686  * Context:
4687  *	Kernel context.
4688  */
4689 static int
4690 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4691 {
4692 	ql_tgt_t		*tq = NULL;
4693 	port_id_t		d_id;
4694 	la_els_logi_t		acc;
4695 	class_svc_param_t	*class3_param;
4696 	int			ret;
4697 	int			rval = FC_SUCCESS;
4698 
4699 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4700 	    pkt->pkt_cmd_fhdr.d_id);
4701 
4702 	TASK_DAEMON_LOCK(ha);
4703 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4704 		TASK_DAEMON_UNLOCK(ha);
4705 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4706 		return (FC_OFFLINE);
4707 	}
4708 	TASK_DAEMON_UNLOCK(ha);
4709 
4710 	bzero(&acc, sizeof (acc));
4711 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4712 
4713 	ret = QL_SUCCESS;
4714 
4715 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4716 		/*
4717 		 * In p2p topology he sends a PLOGI after determining
4718 		 * he has the N_Port login initiative.
4719 		 */
4720 		ret = ql_p2p_plogi(ha, pkt);
4721 	}
4722 	if (ret == QL_CONSUMED) {
4723 		return (ret);
4724 	}
4725 
4726 	switch (ret = ql_login_port(ha, d_id)) {
4727 	case QL_SUCCESS:
4728 		tq = ql_d_id_to_queue(ha, d_id);
4729 		break;
4730 
4731 	case QL_LOOP_ID_USED:
4732 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4733 			tq = ql_d_id_to_queue(ha, d_id);
4734 		}
4735 		break;
4736 
4737 	default:
4738 		break;
4739 	}
4740 
4741 	if (ret != QL_SUCCESS) {
4742 		/*
4743 		 * Invalidate this entry so as to seek a fresh loop ID
4744 		 * in case firmware reassigns it to something else
4745 		 */
4746 		tq = ql_d_id_to_queue(ha, d_id);
4747 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4748 			tq->loop_id = PORT_NO_LOOP_ID;
4749 		}
4750 	} else if (tq) {
4751 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4752 	}
4753 
4754 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4755 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4756 
4757 		/* Build ACC. */
4758 		acc.ls_code.ls_code = LA_ELS_ACC;
4759 		acc.common_service.fcph_version = 0x2006;
4760 		acc.common_service.cmn_features = 0x8800;
4761 		CFG_IST(ha, CFG_CTRL_242581) ?
4762 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4763 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4764 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4765 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4766 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4767 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4768 		acc.common_service.conc_sequences = 0xff;
4769 		acc.common_service.relative_offset = 0x03;
4770 		acc.common_service.e_d_tov = 0x7d0;
4771 
4772 		bcopy((void *)&tq->port_name[0],
4773 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4774 		bcopy((void *)&tq->node_name[0],
4775 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4776 
4777 		class3_param = (class_svc_param_t *)&acc.class_3;
4778 		class3_param->class_valid_svc_opt = 0x8000;
4779 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4780 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4781 		class3_param->conc_sequences = tq->class3_conc_sequences;
4782 		class3_param->open_sequences_per_exch =
4783 		    tq->class3_open_sequences_per_exch;
4784 
4785 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4786 			acc.ls_code.ls_code = LA_ELS_RJT;
4787 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4788 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4789 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4790 			rval = FC_TRAN_BUSY;
4791 		} else {
4792 			DEVICE_QUEUE_LOCK(tq);
4793 			tq->logout_sent = 0;
4794 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4795 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4796 				tq->flags |= TQF_IIDMA_NEEDED;
4797 			}
4798 			DEVICE_QUEUE_UNLOCK(tq);
4799 
4800 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4801 				TASK_DAEMON_LOCK(ha);
4802 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4803 				TASK_DAEMON_UNLOCK(ha);
4804 			}
4805 
4806 			pkt->pkt_state = FC_PKT_SUCCESS;
4807 		}
4808 	} else {
4809 		/* Build RJT. */
4810 		acc.ls_code.ls_code = LA_ELS_RJT;
4811 
4812 		switch (ret) {
4813 		case QL_FUNCTION_TIMEOUT:
4814 			pkt->pkt_state = FC_PKT_TIMEOUT;
4815 			pkt->pkt_reason = FC_REASON_HW_ERROR;
4816 			break;
4817 
4818 		case QL_MEMORY_ALLOC_FAILED:
4819 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4820 			pkt->pkt_reason = FC_REASON_NOMEM;
4821 			rval = FC_TRAN_BUSY;
4822 			break;
4823 
4824 		case QL_FABRIC_NOT_INITIALIZED:
4825 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4826 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4827 			rval = FC_TRAN_BUSY;
4828 			break;
4829 
4830 		default:
4831 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4832 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4833 			break;
4834 		}
4835 
4836 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4837 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4838 		    pkt->pkt_reason, ret, rval);
4839 	}
4840 
4841 	if (tq != NULL) {
4842 		DEVICE_QUEUE_LOCK(tq);
4843 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4844 		if (rval == FC_TRAN_BUSY) {
4845 			if (tq->d_id.b24 != BROADCAST_ADDR) {
4846 				tq->flags |= TQF_NEED_AUTHENTICATION;
4847 			}
4848 		}
4849 		DEVICE_QUEUE_UNLOCK(tq);
4850 	}
4851 
4852 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4853 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4854 
4855 	if (rval != FC_SUCCESS) {
4856 		EL(ha, "failed, rval = %xh\n", rval);
4857 	} else {
4858 		/*EMPTY*/
4859 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4860 	}
4861 	return (rval);
4862 }
4863 
4864 /*
4865  * ql_p2p_plogi
4866  *	Start an extended link service port login request using
4867  *	an ELS Passthru iocb.
4868  *
4869  * Input:
4870  *	ha = adapter state pointer.
4871  *	pkt = pointer to fc_packet.
4872  *
4873  * Returns:
4874  *	QL_CONSUMMED - the iocb was queued for transport.
4875  *
4876  * Context:
4877  *	Kernel context.
4878  */
4879 static int
4880 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4881 {
4882 	uint16_t	id;
4883 	ql_tgt_t	tmp;
4884 	ql_tgt_t	*tq = &tmp;
4885 	int		rval;
4886 
4887 	tq->d_id.b.al_pa = 0;
4888 	tq->d_id.b.area = 0;
4889 	tq->d_id.b.domain = 0;
4890 
4891 	/*
4892 	 * Verify that the port database hasn't moved beneath our feet by
4893 	 * switching to the appropriate n_port_handle if necessary.  This is
4894 	 * less unplesant than the error recovery if the wrong one is used.
4895 	 */
4896 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
4897 		tq->loop_id = id;
4898 		rval = ql_get_port_database(ha, tq, PDF_NONE);
4899 		EL(ha, "rval=%xh\n", rval);
4900 		/* check all the ones not logged in for possible use */
4901 		if (rval == QL_NOT_LOGGED_IN) {
4902 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
4903 				ha->n_port->n_port_handle = tq->loop_id;
4904 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4905 				    tq->loop_id, tq->master_state);
4906 				break;
4907 			}
4908 			/*
4909 			 * Use a 'port unavailable' entry only
4910 			 * if we used it before.
4911 			 */
4912 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
4913 				/* if the port_id matches, reuse it */
4914 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
4915 					EL(ha, "n_port_handle =%xh,"
4916 					    "master state=%xh\n",
4917 					    tq->loop_id, tq->master_state);
4918 					break;
4919 				} else if (tq->loop_id ==
4920 				    ha->n_port->n_port_handle) {
4921 				    // avoid a lint error
4922 					uint16_t *hndl;
4923 					uint16_t val;
4924 
4925 					hndl = &ha->n_port->n_port_handle;
4926 					val = *hndl;
4927 					val++;
4928 					val++;
4929 					*hndl = val;
4930 				}
4931 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4932 			    "master state=%x\n", rval, id, tq->loop_id,
4933 			    tq->master_state);
4934 			}
4935 
4936 		}
4937 		if (rval == QL_SUCCESS) {
4938 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
4939 				ha->n_port->n_port_handle = tq->loop_id;
4940 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4941 				    tq->loop_id, tq->master_state);
4942 				break;
4943 			}
4944 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4945 			    "master state=%x\n", rval, id, tq->loop_id,
4946 			    tq->master_state);
4947 		}
4948 	}
4949 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
4950 
4951 	ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
4952 
4953 	return (QL_CONSUMED);
4954 }
4955 
4956 
4957 /*
4958  * ql_els_flogi
4959  *	Issue a extended link service fabric login request.
4960  *
4961  * Input:
4962  *	ha = adapter state pointer.
4963  *	pkt = pointer to fc_packet.
4964  *
4965  * Returns:
4966  *	FC_SUCCESS - the packet was accepted for transport.
4967  *	FC_TRANSPORT_ERROR - a transport error occurred.
4968  *
4969  * Context:
4970  *	Kernel context.
4971  */
4972 static int
4973 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4974 {
4975 	ql_tgt_t		*tq = NULL;
4976 	port_id_t		d_id;
4977 	la_els_logi_t		acc;
4978 	class_svc_param_t	*class3_param;
4979 	int			rval = FC_SUCCESS;
4980 	int			accept = 0;
4981 
4982 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4983 	    pkt->pkt_cmd_fhdr.d_id);
4984 
4985 	bzero(&acc, sizeof (acc));
4986 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4987 
4988 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4989 		/*
4990 		 * d_id of zero in a FLOGI accept response in a point to point
4991 		 * topology triggers evaluation of N Port login initiative.
4992 		 */
4993 		pkt->pkt_resp_fhdr.d_id = 0;
4994 		/*
4995 		 * An N_Port already logged in with the firmware
4996 		 * will have the only database entry.
4997 		 */
4998 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
4999 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5000 		}
5001 
5002 		if (tq != NULL) {
5003 			/*
5004 			 * If the target port has initiative send
5005 			 * up a PLOGI about the new device.
5006 			 */
5007 			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5008 			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5009 			    &ha->init_ctrl_blk.cb24.port_name[0] :
5010 			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5011 				ha->send_plogi_timer = 3;
5012 			} else {
5013 				ha->send_plogi_timer = 0;
5014 			}
5015 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5016 		} else {
5017 			/*
5018 			 * An N_Port not logged in with the firmware will not
5019 			 * have a database entry.  We accept anyway and rely
5020 			 * on a PLOGI from the upper layers to set the d_id
5021 			 * and s_id.
5022 			 */
5023 			accept = 1;
5024 		}
5025 	} else {
5026 		tq = ql_d_id_to_queue(ha, d_id);
5027 	}
5028 	if ((tq != NULL) || (accept != NULL)) {
5029 		/* Build ACC. */
5030 		pkt->pkt_state = FC_PKT_SUCCESS;
5031 		class3_param = (class_svc_param_t *)&acc.class_3;
5032 
5033 		acc.ls_code.ls_code = LA_ELS_ACC;
5034 		acc.common_service.fcph_version = 0x2006;
5035 		if (ha->topology & QL_N_PORT) {
5036 			/* clear F_Port indicator */
5037 			acc.common_service.cmn_features = 0x0800;
5038 		} else {
5039 			acc.common_service.cmn_features = 0x1b00;
5040 		}
5041 		CFG_IST(ha, CFG_CTRL_242581) ?
5042 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5043 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5044 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5045 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5046 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5047 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5048 		acc.common_service.conc_sequences = 0xff;
5049 		acc.common_service.relative_offset = 0x03;
5050 		acc.common_service.e_d_tov = 0x7d0;
5051 		if (accept) {
5052 			/* Use the saved N_Port WWNN and WWPN */
5053 			if (ha->n_port != NULL) {
5054 				bcopy((void *)&ha->n_port->port_name[0],
5055 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5056 				bcopy((void *)&ha->n_port->node_name[0],
5057 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5058 				/* mark service options invalid */
5059 				class3_param->class_valid_svc_opt = 0x0800;
5060 			} else {
5061 				EL(ha, "ha->n_port is NULL\n");
5062 				/* Build RJT. */
5063 				acc.ls_code.ls_code = LA_ELS_RJT;
5064 
5065 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5066 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5067 			}
5068 		} else {
5069 			bcopy((void *)&tq->port_name[0],
5070 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5071 			bcopy((void *)&tq->node_name[0],
5072 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5073 
5074 			class3_param = (class_svc_param_t *)&acc.class_3;
5075 			class3_param->class_valid_svc_opt = 0x8800;
5076 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5077 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5078 			class3_param->conc_sequences =
5079 			    tq->class3_conc_sequences;
5080 			class3_param->open_sequences_per_exch =
5081 			    tq->class3_open_sequences_per_exch;
5082 		}
5083 	} else {
5084 		/* Build RJT. */
5085 		acc.ls_code.ls_code = LA_ELS_RJT;
5086 
5087 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5088 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5089 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5090 	}
5091 
5092 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5093 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5094 
5095 	if (rval != FC_SUCCESS) {
5096 		EL(ha, "failed, rval = %xh\n", rval);
5097 	} else {
5098 		/*EMPTY*/
5099 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5100 	}
5101 	return (rval);
5102 }
5103 
5104 /*
5105  * ql_els_logo
5106  *	Issue a extended link service logout request.
5107  *
5108  * Input:
5109  *	ha = adapter state pointer.
5110  *	pkt = pointer to fc_packet.
5111  *
5112  * Returns:
5113  *	FC_SUCCESS - the packet was accepted for transport.
5114  *	FC_TRANSPORT_ERROR - a transport error occurred.
5115  *
5116  * Context:
5117  *	Kernel context.
5118  */
5119 static int
5120 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5121 {
5122 	port_id_t	d_id;
5123 	ql_tgt_t	*tq;
5124 	la_els_logo_t	acc;
5125 	int		rval = FC_SUCCESS;
5126 
5127 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5128 	    pkt->pkt_cmd_fhdr.d_id);
5129 
5130 	bzero(&acc, sizeof (acc));
5131 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5132 
5133 	tq = ql_d_id_to_queue(ha, d_id);
5134 	if (tq) {
5135 		DEVICE_QUEUE_LOCK(tq);
5136 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5137 			DEVICE_QUEUE_UNLOCK(tq);
5138 			return (FC_SUCCESS);
5139 		}
5140 
5141 		tq->flags |= TQF_NEED_AUTHENTICATION;
5142 
5143 		do {
5144 			DEVICE_QUEUE_UNLOCK(tq);
5145 			(void) ql_abort_device(ha, tq, 1);
5146 
5147 			/*
5148 			 * Wait for commands to drain in F/W (doesn't
5149 			 * take more than a few milliseconds)
5150 			 */
5151 			ql_delay(ha, 10000);
5152 
5153 			DEVICE_QUEUE_LOCK(tq);
5154 		} while (tq->outcnt);
5155 
5156 		DEVICE_QUEUE_UNLOCK(tq);
5157 	}
5158 
5159 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5160 		/* Build ACC. */
5161 		acc.ls_code.ls_code = LA_ELS_ACC;
5162 
5163 		pkt->pkt_state = FC_PKT_SUCCESS;
5164 	} else {
5165 		/* Build RJT. */
5166 		acc.ls_code.ls_code = LA_ELS_RJT;
5167 
5168 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5169 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5170 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5171 	}
5172 
5173 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5174 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5175 
5176 	if (rval != FC_SUCCESS) {
5177 		EL(ha, "failed, rval = %xh\n", rval);
5178 	} else {
5179 		/*EMPTY*/
5180 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5181 	}
5182 	return (rval);
5183 }
5184 
5185 /*
5186  * ql_els_prli
5187  *	Issue a extended link service process login request.
5188  *
5189  * Input:
5190  *	ha = adapter state pointer.
5191  *	pkt = pointer to fc_packet.
5192  *
5193  * Returns:
5194  *	FC_SUCCESS - the packet was accepted for transport.
5195  *	FC_TRANSPORT_ERROR - a transport error occurred.
5196  *
5197  * Context:
5198  *	Kernel context.
5199  */
5200 static int
5201 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5202 {
5203 	ql_tgt_t		*tq;
5204 	port_id_t		d_id;
5205 	la_els_prli_t		acc;
5206 	prli_svc_param_t	*param;
5207 	int			rval = FC_SUCCESS;
5208 
5209 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5210 	    pkt->pkt_cmd_fhdr.d_id);
5211 
5212 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5213 
5214 	tq = ql_d_id_to_queue(ha, d_id);
5215 	if (tq != NULL) {
5216 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5217 
5218 		if ((ha->topology & QL_N_PORT) &&
5219 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5220 			ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5221 			rval = QL_CONSUMED;
5222 		} else {
5223 			/* Build ACC. */
5224 			bzero(&acc, sizeof (acc));
5225 			acc.ls_code = LA_ELS_ACC;
5226 			acc.page_length = 0x10;
5227 			acc.payload_length = tq->prli_payload_length;
5228 
5229 			param = (prli_svc_param_t *)&acc.service_params[0];
5230 			param->type = 0x08;
5231 			param->rsvd = 0x00;
5232 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5233 			param->process_flags = tq->prli_svc_param_word_3;
5234 
5235 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5236 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5237 			    DDI_DEV_AUTOINCR);
5238 
5239 			pkt->pkt_state = FC_PKT_SUCCESS;
5240 		}
5241 	} else {
5242 		la_els_rjt_t rjt;
5243 
5244 		/* Build RJT. */
5245 		bzero(&rjt, sizeof (rjt));
5246 		rjt.ls_code.ls_code = LA_ELS_RJT;
5247 
5248 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5249 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5250 
5251 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5252 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5253 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5254 	}
5255 
5256 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5257 		EL(ha, "failed, rval = %xh\n", rval);
5258 	} else {
5259 		/*EMPTY*/
5260 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5261 	}
5262 	return (rval);
5263 }
5264 
5265 /*
5266  * ql_els_prlo
5267  *	Issue a extended link service process logout request.
5268  *
5269  * Input:
5270  *	ha = adapter state pointer.
5271  *	pkt = pointer to fc_packet.
5272  *
5273  * Returns:
5274  *	FC_SUCCESS - the packet was accepted for transport.
5275  *	FC_TRANSPORT_ERROR - a transport error occurred.
5276  *
5277  * Context:
5278  *	Kernel context.
5279  */
5280 /* ARGSUSED */
5281 static int
5282 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5283 {
5284 	la_els_prli_t	acc;
5285 	int		rval = FC_SUCCESS;
5286 
5287 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5288 	    pkt->pkt_cmd_fhdr.d_id);
5289 
5290 	/* Build ACC. */
5291 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5292 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5293 
5294 	acc.ls_code = LA_ELS_ACC;
5295 	acc.service_params[2] = 1;
5296 
5297 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5298 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5299 
5300 	pkt->pkt_state = FC_PKT_SUCCESS;
5301 
5302 	if (rval != FC_SUCCESS) {
5303 		EL(ha, "failed, rval = %xh\n", rval);
5304 	} else {
5305 		/*EMPTY*/
5306 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5307 	}
5308 	return (rval);
5309 }
5310 
5311 /*
5312  * ql_els_adisc
5313  *	Issue a extended link service address discovery request.
5314  *
5315  * Input:
5316  *	ha = adapter state pointer.
5317  *	pkt = pointer to fc_packet.
5318  *
5319  * Returns:
5320  *	FC_SUCCESS - the packet was accepted for transport.
5321  *	FC_TRANSPORT_ERROR - a transport error occurred.
5322  *
5323  * Context:
5324  *	Kernel context.
5325  */
5326 static int
5327 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5328 {
5329 	ql_dev_id_list_t	*list;
5330 	uint32_t		list_size;
5331 	ql_link_t		*link;
5332 	ql_tgt_t		*tq;
5333 	ql_lun_t		*lq;
5334 	port_id_t		d_id;
5335 	la_els_adisc_t		acc;
5336 	uint16_t		index, loop_id;
5337 	ql_mbx_data_t		mr;
5338 	int			rval = FC_SUCCESS;
5339 
5340 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5341 
5342 	bzero(&acc, sizeof (acc));
5343 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5344 
5345 	/*
5346 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5347 	 * the device from the firmware
5348 	 */
5349 	index = ql_alpa_to_index[d_id.b.al_pa];
5350 	tq = NULL;
5351 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5352 		tq = link->base_address;
5353 		if (tq->d_id.b24 == d_id.b24) {
5354 			break;
5355 		} else {
5356 			tq = NULL;
5357 		}
5358 	}
5359 
5360 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5361 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5362 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5363 
5364 		if (list != NULL &&
5365 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5366 		    QL_SUCCESS) {
5367 
5368 			for (index = 0; index < mr.mb[1]; index++) {
5369 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5370 
5371 				if (tq->d_id.b24 == d_id.b24) {
5372 					tq->loop_id = loop_id;
5373 					break;
5374 				}
5375 			}
5376 		} else {
5377 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5378 			    QL_NAME, ha->instance, d_id.b24);
5379 			tq = NULL;
5380 		}
5381 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5382 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5383 			    QL_NAME, ha->instance, tq->d_id.b24);
5384 			tq = NULL;
5385 		}
5386 
5387 		if (list != NULL) {
5388 			kmem_free(list, list_size);
5389 		}
5390 	}
5391 
5392 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5393 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5394 
5395 		/* Build ACC. */
5396 
5397 		DEVICE_QUEUE_LOCK(tq);
5398 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5399 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5400 			for (link = tq->lun_queues.first; link != NULL;
5401 			    link = link->next) {
5402 				lq = link->base_address;
5403 
5404 				if (lq->cmd.first != NULL) {
5405 					ql_next(ha, lq);
5406 					DEVICE_QUEUE_LOCK(tq);
5407 				}
5408 			}
5409 		}
5410 		DEVICE_QUEUE_UNLOCK(tq);
5411 
5412 		acc.ls_code.ls_code = LA_ELS_ACC;
5413 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5414 
5415 		bcopy((void *)&tq->port_name[0],
5416 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5417 		bcopy((void *)&tq->node_name[0],
5418 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5419 
5420 		acc.nport_id.port_id = tq->d_id.b24;
5421 
5422 		pkt->pkt_state = FC_PKT_SUCCESS;
5423 	} else {
5424 		/* Build RJT. */
5425 		acc.ls_code.ls_code = LA_ELS_RJT;
5426 
5427 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5428 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5429 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5430 	}
5431 
5432 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5433 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5434 
5435 	if (rval != FC_SUCCESS) {
5436 		EL(ha, "failed, rval = %xh\n", rval);
5437 	} else {
5438 		/*EMPTY*/
5439 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5440 	}
5441 	return (rval);
5442 }
5443 
5444 /*
5445  * ql_els_linit
5446  *	Issue a extended link service loop initialize request.
5447  *
5448  * Input:
5449  *	ha = adapter state pointer.
5450  *	pkt = pointer to fc_packet.
5451  *
5452  * Returns:
5453  *	FC_SUCCESS - the packet was accepted for transport.
5454  *	FC_TRANSPORT_ERROR - a transport error occurred.
5455  *
5456  * Context:
5457  *	Kernel context.
5458  */
5459 static int
5460 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5461 {
5462 	ddi_dma_cookie_t	*cp;
5463 	uint32_t		cnt;
5464 	conv_num_t		n;
5465 	port_id_t		d_id;
5466 	int			rval = FC_SUCCESS;
5467 
5468 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5469 
5470 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5471 	if (ha->topology & QL_SNS_CONNECTION) {
5472 		fc_linit_req_t els;
5473 		lfa_cmd_t lfa;
5474 
5475 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5476 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5477 
5478 		/* Setup LFA mailbox command data. */
5479 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5480 
5481 		lfa.resp_buffer_length[0] = 4;
5482 
5483 		cp = pkt->pkt_resp_cookie;
5484 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5485 			n.size64 = (uint64_t)cp->dmac_laddress;
5486 			LITTLE_ENDIAN_64(&n.size64);
5487 		} else {
5488 			n.size32[0] = LSD(cp->dmac_laddress);
5489 			LITTLE_ENDIAN_32(&n.size32[0]);
5490 			n.size32[1] = MSD(cp->dmac_laddress);
5491 			LITTLE_ENDIAN_32(&n.size32[1]);
5492 		}
5493 
5494 		/* Set buffer address. */
5495 		for (cnt = 0; cnt < 8; cnt++) {
5496 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5497 		}
5498 
5499 		lfa.subcommand_length[0] = 4;
5500 		n.size32[0] = d_id.b24;
5501 		LITTLE_ENDIAN_32(&n.size32[0]);
5502 		lfa.addr[0] = n.size8[0];
5503 		lfa.addr[1] = n.size8[1];
5504 		lfa.addr[2] = n.size8[2];
5505 		lfa.subcommand[1] = 0x70;
5506 		lfa.payload[2] = els.func;
5507 		lfa.payload[4] = els.lip_b3;
5508 		lfa.payload[5] = els.lip_b4;
5509 
5510 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5511 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5512 		} else {
5513 			pkt->pkt_state = FC_PKT_SUCCESS;
5514 		}
5515 	} else {
5516 		fc_linit_resp_t rjt;
5517 
5518 		/* Build RJT. */
5519 		bzero(&rjt, sizeof (rjt));
5520 		rjt.ls_code.ls_code = LA_ELS_RJT;
5521 
5522 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5523 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5524 
5525 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5526 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5527 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5528 	}
5529 
5530 	if (rval != FC_SUCCESS) {
5531 		EL(ha, "failed, rval = %xh\n", rval);
5532 	} else {
5533 		/*EMPTY*/
5534 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5535 	}
5536 	return (rval);
5537 }
5538 
5539 /*
5540  * ql_els_lpc
5541  *	Issue a extended link service loop control request.
5542  *
5543  * Input:
5544  *	ha = adapter state pointer.
5545  *	pkt = pointer to fc_packet.
5546  *
5547  * Returns:
5548  *	FC_SUCCESS - the packet was accepted for transport.
5549  *	FC_TRANSPORT_ERROR - a transport error occurred.
5550  *
5551  * Context:
5552  *	Kernel context.
5553  */
5554 static int
5555 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5556 {
5557 	ddi_dma_cookie_t	*cp;
5558 	uint32_t		cnt;
5559 	conv_num_t		n;
5560 	port_id_t		d_id;
5561 	int			rval = FC_SUCCESS;
5562 
5563 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5564 
5565 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5566 	if (ha->topology & QL_SNS_CONNECTION) {
5567 		ql_lpc_t els;
5568 		lfa_cmd_t lfa;
5569 
5570 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5571 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5572 
5573 		/* Setup LFA mailbox command data. */
5574 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5575 
5576 		lfa.resp_buffer_length[0] = 4;
5577 
5578 		cp = pkt->pkt_resp_cookie;
5579 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5580 			n.size64 = (uint64_t)(cp->dmac_laddress);
5581 			LITTLE_ENDIAN_64(&n.size64);
5582 		} else {
5583 			n.size32[0] = cp->dmac_address;
5584 			LITTLE_ENDIAN_32(&n.size32[0]);
5585 			n.size32[1] = 0;
5586 		}
5587 
5588 		/* Set buffer address. */
5589 		for (cnt = 0; cnt < 8; cnt++) {
5590 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5591 		}
5592 
5593 		lfa.subcommand_length[0] = 20;
5594 		n.size32[0] = d_id.b24;
5595 		LITTLE_ENDIAN_32(&n.size32[0]);
5596 		lfa.addr[0] = n.size8[0];
5597 		lfa.addr[1] = n.size8[1];
5598 		lfa.addr[2] = n.size8[2];
5599 		lfa.subcommand[1] = 0x71;
5600 		lfa.payload[4] = els.port_control;
5601 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5602 
5603 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5604 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5605 		} else {
5606 			pkt->pkt_state = FC_PKT_SUCCESS;
5607 		}
5608 	} else {
5609 		ql_lpc_resp_t rjt;
5610 
5611 		/* Build RJT. */
5612 		bzero(&rjt, sizeof (rjt));
5613 		rjt.ls_code.ls_code = LA_ELS_RJT;
5614 
5615 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5616 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5617 
5618 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5619 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5620 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5621 	}
5622 
5623 	if (rval != FC_SUCCESS) {
5624 		EL(ha, "failed, rval = %xh\n", rval);
5625 	} else {
5626 		/*EMPTY*/
5627 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5628 	}
5629 	return (rval);
5630 }
5631 
5632 /*
5633  * ql_els_lsts
5634  *	Issue a extended link service loop status request.
5635  *
5636  * Input:
5637  *	ha = adapter state pointer.
5638  *	pkt = pointer to fc_packet.
5639  *
5640  * Returns:
5641  *	FC_SUCCESS - the packet was accepted for transport.
5642  *	FC_TRANSPORT_ERROR - a transport error occurred.
5643  *
5644  * Context:
5645  *	Kernel context.
5646  */
5647 static int
5648 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5649 {
5650 	ddi_dma_cookie_t	*cp;
5651 	uint32_t		cnt;
5652 	conv_num_t		n;
5653 	port_id_t		d_id;
5654 	int			rval = FC_SUCCESS;
5655 
5656 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5657 
5658 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5659 	if (ha->topology & QL_SNS_CONNECTION) {
5660 		fc_lsts_req_t els;
5661 		lfa_cmd_t lfa;
5662 
5663 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5664 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5665 
5666 		/* Setup LFA mailbox command data. */
5667 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5668 
5669 		lfa.resp_buffer_length[0] = 84;
5670 
5671 		cp = pkt->pkt_resp_cookie;
5672 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5673 			n.size64 = cp->dmac_laddress;
5674 			LITTLE_ENDIAN_64(&n.size64);
5675 		} else {
5676 			n.size32[0] = cp->dmac_address;
5677 			LITTLE_ENDIAN_32(&n.size32[0]);
5678 			n.size32[1] = 0;
5679 		}
5680 
5681 		/* Set buffer address. */
5682 		for (cnt = 0; cnt < 8; cnt++) {
5683 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5684 		}
5685 
5686 		lfa.subcommand_length[0] = 2;
5687 		n.size32[0] = d_id.b24;
5688 		LITTLE_ENDIAN_32(&n.size32[0]);
5689 		lfa.addr[0] = n.size8[0];
5690 		lfa.addr[1] = n.size8[1];
5691 		lfa.addr[2] = n.size8[2];
5692 		lfa.subcommand[1] = 0x72;
5693 
5694 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5695 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5696 		} else {
5697 			pkt->pkt_state = FC_PKT_SUCCESS;
5698 		}
5699 	} else {
5700 		fc_lsts_resp_t rjt;
5701 
5702 		/* Build RJT. */
5703 		bzero(&rjt, sizeof (rjt));
5704 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5705 
5706 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5707 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5708 
5709 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5710 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5711 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5712 	}
5713 
5714 	if (rval != FC_SUCCESS) {
5715 		EL(ha, "failed=%xh\n", rval);
5716 	} else {
5717 		/*EMPTY*/
5718 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5719 	}
5720 	return (rval);
5721 }
5722 
5723 /*
5724  * ql_els_scr
5725  *	Issue a extended link service state change registration request.
5726  *
5727  * Input:
5728  *	ha = adapter state pointer.
5729  *	pkt = pointer to fc_packet.
5730  *
5731  * Returns:
5732  *	FC_SUCCESS - the packet was accepted for transport.
5733  *	FC_TRANSPORT_ERROR - a transport error occurred.
5734  *
5735  * Context:
5736  *	Kernel context.
5737  */
5738 static int
5739 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5740 {
5741 	fc_scr_resp_t	acc;
5742 	int		rval = FC_SUCCESS;
5743 
5744 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5745 
5746 	bzero(&acc, sizeof (acc));
5747 	if (ha->topology & QL_SNS_CONNECTION) {
5748 		fc_scr_req_t els;
5749 
5750 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5751 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5752 
5753 		if (ql_send_change_request(ha, els.scr_func) ==
5754 		    QL_SUCCESS) {
5755 			/* Build ACC. */
5756 			acc.scr_acc = LA_ELS_ACC;
5757 
5758 			pkt->pkt_state = FC_PKT_SUCCESS;
5759 		} else {
5760 			/* Build RJT. */
5761 			acc.scr_acc = LA_ELS_RJT;
5762 
5763 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5764 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5765 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5766 		}
5767 	} else {
5768 		/* Build RJT. */
5769 		acc.scr_acc = LA_ELS_RJT;
5770 
5771 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5772 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5773 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5774 	}
5775 
5776 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5777 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5778 
5779 	if (rval != FC_SUCCESS) {
5780 		EL(ha, "failed, rval = %xh\n", rval);
5781 	} else {
5782 		/*EMPTY*/
5783 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5784 	}
5785 	return (rval);
5786 }
5787 
5788 /*
5789  * ql_els_rscn
5790  *	Issue a extended link service register state
5791  *	change notification request.
5792  *
5793  * Input:
5794  *	ha = adapter state pointer.
5795  *	pkt = pointer to fc_packet.
5796  *
5797  * Returns:
5798  *	FC_SUCCESS - the packet was accepted for transport.
5799  *	FC_TRANSPORT_ERROR - a transport error occurred.
5800  *
5801  * Context:
5802  *	Kernel context.
5803  */
5804 static int
5805 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5806 {
5807 	ql_rscn_resp_t	acc;
5808 	int		rval = FC_SUCCESS;
5809 
5810 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5811 
5812 	bzero(&acc, sizeof (acc));
5813 	if (ha->topology & QL_SNS_CONNECTION) {
5814 		/* Build ACC. */
5815 		acc.scr_acc = LA_ELS_ACC;
5816 
5817 		pkt->pkt_state = FC_PKT_SUCCESS;
5818 	} else {
5819 		/* Build RJT. */
5820 		acc.scr_acc = LA_ELS_RJT;
5821 
5822 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5823 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5824 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5825 	}
5826 
5827 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5828 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5829 
5830 	if (rval != FC_SUCCESS) {
5831 		EL(ha, "failed, rval = %xh\n", rval);
5832 	} else {
5833 		/*EMPTY*/
5834 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5835 	}
5836 	return (rval);
5837 }
5838 
5839 /*
5840  * ql_els_farp_req
5841  *	Issue FC Address Resolution Protocol (FARP)
5842  *	extended link service request.
5843  *
5844  *	Note: not supported.
5845  *
5846  * Input:
5847  *	ha = adapter state pointer.
5848  *	pkt = pointer to fc_packet.
5849  *
5850  * Returns:
5851  *	FC_SUCCESS - the packet was accepted for transport.
5852  *	FC_TRANSPORT_ERROR - a transport error occurred.
5853  *
5854  * Context:
5855  *	Kernel context.
5856  */
5857 static int
5858 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5859 {
5860 	ql_acc_rjt_t	acc;
5861 	int		rval = FC_SUCCESS;
5862 
5863 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5864 
5865 	bzero(&acc, sizeof (acc));
5866 
5867 	/* Build ACC. */
5868 	acc.ls_code.ls_code = LA_ELS_ACC;
5869 
5870 	pkt->pkt_state = FC_PKT_SUCCESS;
5871 
5872 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5873 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5874 
5875 	if (rval != FC_SUCCESS) {
5876 		EL(ha, "failed, rval = %xh\n", rval);
5877 	} else {
5878 		/*EMPTY*/
5879 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5880 	}
5881 	return (rval);
5882 }
5883 
5884 /*
5885  * ql_els_farp_reply
5886  *	Issue FC Address Resolution Protocol (FARP)
5887  *	extended link service reply.
5888  *
5889  *	Note: not supported.
5890  *
5891  * Input:
5892  *	ha = adapter state pointer.
5893  *	pkt = pointer to fc_packet.
5894  *
5895  * Returns:
5896  *	FC_SUCCESS - the packet was accepted for transport.
5897  *	FC_TRANSPORT_ERROR - a transport error occurred.
5898  *
5899  * Context:
5900  *	Kernel context.
5901  */
5902 /* ARGSUSED */
5903 static int
5904 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5905 {
5906 	ql_acc_rjt_t	acc;
5907 	int		rval = FC_SUCCESS;
5908 
5909 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5910 
5911 	bzero(&acc, sizeof (acc));
5912 
5913 	/* Build ACC. */
5914 	acc.ls_code.ls_code = LA_ELS_ACC;
5915 
5916 	pkt->pkt_state = FC_PKT_SUCCESS;
5917 
5918 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5919 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5920 
5921 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5922 
5923 	return (rval);
5924 }
5925 
5926 static int
5927 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5928 {
5929 	uchar_t			*rnid_acc;
5930 	port_id_t		d_id;
5931 	ql_link_t		*link;
5932 	ql_tgt_t		*tq;
5933 	uint16_t		index;
5934 	la_els_rnid_acc_t	acc;
5935 	la_els_rnid_t		*req;
5936 	size_t			req_len;
5937 
5938 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5939 
5940 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5941 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5942 	index = ql_alpa_to_index[d_id.b.al_pa];
5943 
5944 	tq = NULL;
5945 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5946 		tq = link->base_address;
5947 		if (tq->d_id.b24 == d_id.b24) {
5948 			break;
5949 		} else {
5950 			tq = NULL;
5951 		}
5952 	}
5953 
5954 	/* Allocate memory for rnid status block */
5955 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5956 
5957 	bzero(&acc, sizeof (acc));
5958 
5959 	req = (la_els_rnid_t *)pkt->pkt_cmd;
5960 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5961 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
5962 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
5963 
5964 		kmem_free(rnid_acc, req_len);
5965 		acc.ls_code.ls_code = LA_ELS_RJT;
5966 
5967 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5968 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5969 
5970 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5971 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5972 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5973 
5974 		return (FC_FAILURE);
5975 	}
5976 
5977 	acc.ls_code.ls_code = LA_ELS_ACC;
5978 	bcopy(rnid_acc, &acc.hdr, req_len);
5979 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5980 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5981 
5982 	kmem_free(rnid_acc, req_len);
5983 	pkt->pkt_state = FC_PKT_SUCCESS;
5984 
5985 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5986 
5987 	return (FC_SUCCESS);
5988 }
5989 
5990 static int
5991 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
5992 {
5993 	fc_rls_acc_t		*rls_acc;
5994 	port_id_t		d_id;
5995 	ql_link_t		*link;
5996 	ql_tgt_t		*tq;
5997 	uint16_t		index;
5998 	la_els_rls_acc_t	acc;
5999 
6000 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6001 
6002 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6003 	index = ql_alpa_to_index[d_id.b.al_pa];
6004 
6005 	tq = NULL;
6006 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6007 		tq = link->base_address;
6008 		if (tq->d_id.b24 == d_id.b24) {
6009 			break;
6010 		} else {
6011 			tq = NULL;
6012 		}
6013 	}
6014 
6015 	/* Allocate memory for link error status block */
6016 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6017 
6018 	bzero(&acc, sizeof (la_els_rls_acc_t));
6019 
6020 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6021 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6022 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6023 
6024 		kmem_free(rls_acc, sizeof (*rls_acc));
6025 		acc.ls_code.ls_code = LA_ELS_RJT;
6026 
6027 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6028 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6029 
6030 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6031 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6032 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6033 
6034 		return (FC_FAILURE);
6035 	}
6036 
6037 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6038 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6039 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6040 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6041 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6042 
6043 	acc.ls_code.ls_code = LA_ELS_ACC;
6044 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6045 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6046 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6047 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6048 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6049 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6050 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6051 
6052 	kmem_free(rls_acc, sizeof (*rls_acc));
6053 	pkt->pkt_state = FC_PKT_SUCCESS;
6054 
6055 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6056 
6057 	return (FC_SUCCESS);
6058 }
6059 
6060 static int
6061 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6062 {
6063 	port_id_t	d_id;
6064 	ql_srb_t	*sp;
6065 	fc_unsol_buf_t  *ubp;
6066 	ql_link_t	*link, *next_link;
6067 	int		rval = FC_SUCCESS;
6068 	int		cnt = 5;
6069 
6070 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6071 
6072 	/*
6073 	 * we need to ensure that q->outcnt == 0, otherwise
6074 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6075 	 * will confuse ulps.
6076 	 */
6077 
6078 	DEVICE_QUEUE_LOCK(tq);
6079 	do {
6080 		/*
6081 		 * wait for the cmds to get drained. If they
6082 		 * don't get drained then the transport will
6083 		 * retry PLOGI after few secs.
6084 		 */
6085 		if (tq->outcnt != 0) {
6086 			rval = FC_TRAN_BUSY;
6087 			DEVICE_QUEUE_UNLOCK(tq);
6088 			ql_delay(ha, 10000);
6089 			DEVICE_QUEUE_LOCK(tq);
6090 			cnt--;
6091 			if (!cnt) {
6092 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6093 				    " for %xh outcount %xh", QL_NAME,
6094 				    ha->instance, tq->d_id.b24, tq->outcnt);
6095 			}
6096 		} else {
6097 			rval = FC_SUCCESS;
6098 			break;
6099 		}
6100 	} while (cnt > 0);
6101 	DEVICE_QUEUE_UNLOCK(tq);
6102 
6103 	/*
6104 	 * return, if busy or if the plogi was asynchronous.
6105 	 */
6106 	if ((rval != FC_SUCCESS) ||
6107 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6108 	    pkt->pkt_comp)) {
6109 		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6110 		    ha->instance);
6111 		return (rval);
6112 	}
6113 
6114 	/*
6115 	 * Let us give daemon sufficient time and hopefully
6116 	 * when transport retries PLOGI, it would have flushed
6117 	 * callback queue.
6118 	 */
6119 	TASK_DAEMON_LOCK(ha);
6120 	for (link = ha->callback_queue.first; link != NULL;
6121 	    link = next_link) {
6122 		next_link = link->next;
6123 		sp = link->base_address;
6124 		if (sp->flags & SRB_UB_CALLBACK) {
6125 			ubp = ha->ub_array[sp->handle];
6126 			d_id.b24 = ubp->ub_frame.s_id;
6127 		} else {
6128 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6129 		}
6130 		if (tq->d_id.b24 == d_id.b24) {
6131 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6132 			    ha->instance, tq->d_id.b24);
6133 			rval = FC_TRAN_BUSY;
6134 			break;
6135 		}
6136 	}
6137 	TASK_DAEMON_UNLOCK(ha);
6138 
6139 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6140 
6141 	return (rval);
6142 }
6143 
6144 /*
6145  * ql_login_port
6146  *	Logs in a device if not already logged in.
6147  *
6148  * Input:
6149  *	ha = adapter state pointer.
6150  *	d_id = 24 bit port ID.
6151  *	DEVICE_QUEUE_LOCK must be released.
6152  *
6153  * Returns:
6154  *	QL local function return status code.
6155  *
6156  * Context:
6157  *	Kernel context.
6158  */
6159 static int
6160 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6161 {
6162 	ql_adapter_state_t	*vha;
6163 	ql_link_t		*link;
6164 	uint16_t		index;
6165 	ql_tgt_t		*tq, *tq2;
6166 	uint16_t		loop_id, first_loop_id, last_loop_id;
6167 	int			rval = QL_SUCCESS;
6168 
6169 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6170 	    d_id.b24);
6171 
6172 	/* Get head queue index. */
6173 	index = ql_alpa_to_index[d_id.b.al_pa];
6174 
6175 	/* Check for device already has a queue. */
6176 	tq = NULL;
6177 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6178 		tq = link->base_address;
6179 		if (tq->d_id.b24 == d_id.b24) {
6180 			loop_id = tq->loop_id;
6181 			break;
6182 		} else {
6183 			tq = NULL;
6184 		}
6185 	}
6186 
6187 	/* Let's stop issuing any IO and unsolicited logo */
6188 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6189 		DEVICE_QUEUE_LOCK(tq);
6190 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6191 		tq->flags &= ~TQF_RSCN_RCVD;
6192 		DEVICE_QUEUE_UNLOCK(tq);
6193 	}
6194 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6195 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6196 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6197 	}
6198 
6199 	/* Special case for Nameserver */
6200 	if (d_id.b24 == 0xFFFFFC) {
6201 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
6202 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6203 		if (tq == NULL) {
6204 			ADAPTER_STATE_LOCK(ha);
6205 			tq = ql_dev_init(ha, d_id, loop_id);
6206 			ADAPTER_STATE_UNLOCK(ha);
6207 			if (tq == NULL) {
6208 				EL(ha, "failed=%xh, d_id=%xh\n",
6209 				    QL_FUNCTION_FAILED, d_id.b24);
6210 				return (QL_FUNCTION_FAILED);
6211 			}
6212 		}
6213 		rval = ql_login_fabric_port(ha, tq, loop_id);
6214 		if (rval == QL_SUCCESS) {
6215 			tq->loop_id = loop_id;
6216 			tq->flags |= TQF_FABRIC_DEVICE;
6217 			(void) ql_get_port_database(ha, tq, PDF_NONE);
6218 			ha->topology = (uint8_t)
6219 			    (ha->topology | QL_SNS_CONNECTION);
6220 		}
6221 	/* Check for device already logged in. */
6222 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6223 		if (tq->flags & TQF_FABRIC_DEVICE) {
6224 			rval = ql_login_fabric_port(ha, tq, loop_id);
6225 			if (rval == QL_PORT_ID_USED) {
6226 				rval = QL_SUCCESS;
6227 			}
6228 		} else if (LOCAL_LOOP_ID(loop_id)) {
6229 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6230 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6231 			    LLF_NONE : LLF_PLOGI));
6232 			if (rval == QL_SUCCESS) {
6233 				DEVICE_QUEUE_LOCK(tq);
6234 				tq->loop_id = loop_id;
6235 				DEVICE_QUEUE_UNLOCK(tq);
6236 			}
6237 		}
6238 	} else if (ha->topology & QL_SNS_CONNECTION) {
6239 		/* Locate unused loop ID. */
6240 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6241 			first_loop_id = 0;
6242 			last_loop_id = LAST_N_PORT_HDL;
6243 		} else if (ha->topology & QL_F_PORT) {
6244 			first_loop_id = 0;
6245 			last_loop_id = SNS_LAST_LOOP_ID;
6246 		} else {
6247 			first_loop_id = SNS_FIRST_LOOP_ID;
6248 			last_loop_id = SNS_LAST_LOOP_ID;
6249 		}
6250 
6251 		/* Acquire adapter state lock. */
6252 		ADAPTER_STATE_LOCK(ha);
6253 
6254 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6255 		if (tq == NULL) {
6256 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6257 			    d_id.b24);
6258 
6259 			ADAPTER_STATE_UNLOCK(ha);
6260 
6261 			return (QL_FUNCTION_FAILED);
6262 		}
6263 
6264 		rval = QL_FUNCTION_FAILED;
6265 		loop_id = ha->pha->free_loop_id++;
6266 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6267 		    index--) {
6268 			if (loop_id < first_loop_id ||
6269 			    loop_id > last_loop_id) {
6270 				loop_id = first_loop_id;
6271 				ha->pha->free_loop_id = (uint16_t)
6272 				    (loop_id + 1);
6273 			}
6274 
6275 			/* Bypass if loop ID used. */
6276 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6277 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6278 				if (tq2 != NULL && tq2 != tq) {
6279 					break;
6280 				}
6281 			}
6282 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6283 			    loop_id == ha->loop_id) {
6284 				loop_id = ha->pha->free_loop_id++;
6285 				continue;
6286 			}
6287 
6288 			ADAPTER_STATE_UNLOCK(ha);
6289 			rval = ql_login_fabric_port(ha, tq, loop_id);
6290 
6291 			/*
6292 			 * If PORT_ID_USED is returned
6293 			 * the login_fabric_port() updates
6294 			 * with the correct loop ID
6295 			 */
6296 			switch (rval) {
6297 			case QL_PORT_ID_USED:
6298 				/*
6299 				 * use f/w handle and try to
6300 				 * login again.
6301 				 */
6302 				ADAPTER_STATE_LOCK(ha);
6303 				ha->pha->free_loop_id--;
6304 				ADAPTER_STATE_UNLOCK(ha);
6305 				loop_id = tq->loop_id;
6306 				break;
6307 
6308 			case QL_SUCCESS:
6309 				tq->flags |= TQF_FABRIC_DEVICE;
6310 				(void) ql_get_port_database(ha,
6311 				    tq, PDF_NONE);
6312 				index = 1;
6313 				break;
6314 
6315 			case QL_LOOP_ID_USED:
6316 				tq->loop_id = PORT_NO_LOOP_ID;
6317 				loop_id = ha->pha->free_loop_id++;
6318 				break;
6319 
6320 			case QL_ALL_IDS_IN_USE:
6321 				tq->loop_id = PORT_NO_LOOP_ID;
6322 				index = 1;
6323 				break;
6324 
6325 			default:
6326 				tq->loop_id = PORT_NO_LOOP_ID;
6327 				index = 1;
6328 				break;
6329 			}
6330 
6331 			ADAPTER_STATE_LOCK(ha);
6332 		}
6333 
6334 		ADAPTER_STATE_UNLOCK(ha);
6335 	} else {
6336 		rval = QL_FUNCTION_FAILED;
6337 	}
6338 
6339 	if (rval != QL_SUCCESS) {
6340 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6341 	} else {
6342 		EL(ha, "d_id=%xh, loop_id=%xh, "
6343 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6344 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6345 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6346 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6347 	}
6348 	return (rval);
6349 }
6350 
6351 /*
6352  * ql_login_fabric_port
6353  *	Issue login fabric port mailbox command.
6354  *
6355  * Input:
6356  *	ha:		adapter state pointer.
6357  *	tq:		target queue pointer.
6358  *	loop_id:	FC Loop ID.
6359  *
6360  * Returns:
6361  *	ql local function return status code.
6362  *
6363  * Context:
6364  *	Kernel context.
6365  */
6366 static int
6367 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6368 {
6369 	int		rval;
6370 	int		index;
6371 	int		retry = 0;
6372 	port_id_t	d_id;
6373 	ql_tgt_t	*newq;
6374 	ql_mbx_data_t	mr;
6375 
6376 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6377 	    tq->d_id.b24);
6378 
6379 	/*
6380 	 * QL_PARAMETER_ERROR also means the firmware is
6381 	 * not able to allocate PCB entry due to resource
6382 	 * issues, or collision.
6383 	 */
6384 	do {
6385 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6386 		if ((rval == QL_PARAMETER_ERROR) ||
6387 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6388 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6389 			retry++;
6390 			drv_usecwait(10 * MILLISEC);
6391 		} else {
6392 			break;
6393 		}
6394 	} while (retry < 5);
6395 
6396 	switch (rval) {
6397 	case QL_SUCCESS:
6398 		tq->loop_id = loop_id;
6399 		break;
6400 
6401 	case QL_PORT_ID_USED:
6402 		/*
6403 		 * This Loop ID should NOT be in use in drivers
6404 		 */
6405 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6406 
6407 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6408 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6409 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6410 			    newq->loop_id, newq->d_id.b24);
6411 			ql_send_logo(ha, newq, NULL);
6412 		}
6413 
6414 		tq->loop_id = mr.mb[1];
6415 		break;
6416 
6417 	case QL_LOOP_ID_USED:
6418 		d_id.b.al_pa = LSB(mr.mb[2]);
6419 		d_id.b.area = MSB(mr.mb[2]);
6420 		d_id.b.domain = LSB(mr.mb[1]);
6421 
6422 		newq = ql_d_id_to_queue(ha, d_id);
6423 		if (newq && (newq->loop_id != loop_id)) {
6424 			/*
6425 			 * This should NEVER ever happen; but this
6426 			 * code is needed to bail out when the worst
6427 			 * case happens - or as used to happen before
6428 			 */
6429 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6430 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6431 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6432 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6433 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6434 			    newq->d_id.b24, loop_id);
6435 
6436 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6437 				ADAPTER_STATE_LOCK(ha);
6438 
6439 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6440 				ql_add_link_b(&ha->dev[index], &newq->device);
6441 
6442 				newq->d_id.b24 = d_id.b24;
6443 
6444 				index = ql_alpa_to_index[d_id.b.al_pa];
6445 				ql_add_link_b(&ha->dev[index], &newq->device);
6446 
6447 				ADAPTER_STATE_UNLOCK(ha);
6448 			}
6449 
6450 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6451 
6452 		}
6453 
6454 		/*
6455 		 * Invalidate the loop ID for the
6456 		 * us to obtain a new one.
6457 		 */
6458 		tq->loop_id = PORT_NO_LOOP_ID;
6459 		break;
6460 
6461 	case QL_ALL_IDS_IN_USE:
6462 		rval = QL_FUNCTION_FAILED;
6463 		EL(ha, "no loop id's available\n");
6464 		break;
6465 
6466 	default:
6467 		if (rval == QL_COMMAND_ERROR) {
6468 			switch (mr.mb[1]) {
6469 			case 2:
6470 			case 3:
6471 				rval = QL_MEMORY_ALLOC_FAILED;
6472 				break;
6473 
6474 			case 4:
6475 				rval = QL_FUNCTION_TIMEOUT;
6476 				break;
6477 			case 7:
6478 				rval = QL_FABRIC_NOT_INITIALIZED;
6479 				break;
6480 			default:
6481 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6482 				break;
6483 			}
6484 		} else {
6485 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6486 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6487 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6488 		}
6489 		break;
6490 	}
6491 
6492 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6493 	    rval != QL_LOOP_ID_USED) {
6494 		EL(ha, "failed=%xh\n", rval);
6495 	} else {
6496 		/*EMPTY*/
6497 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6498 	}
6499 	return (rval);
6500 }
6501 
6502 /*
6503  * ql_logout_port
6504  *	Logs out a device if possible.
6505  *
6506  * Input:
6507  *	ha:	adapter state pointer.
6508  *	d_id:	24 bit port ID.
6509  *
6510  * Returns:
6511  *	QL local function return status code.
6512  *
6513  * Context:
6514  *	Kernel context.
6515  */
6516 static int
6517 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6518 {
6519 	ql_link_t	*link;
6520 	ql_tgt_t	*tq;
6521 	uint16_t	index;
6522 
6523 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6524 
6525 	/* Get head queue index. */
6526 	index = ql_alpa_to_index[d_id.b.al_pa];
6527 
6528 	/* Get device queue. */
6529 	tq = NULL;
6530 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6531 		tq = link->base_address;
6532 		if (tq->d_id.b24 == d_id.b24) {
6533 			break;
6534 		} else {
6535 			tq = NULL;
6536 		}
6537 	}
6538 
6539 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6540 		(void) ql_logout_fabric_port(ha, tq);
6541 		tq->loop_id = PORT_NO_LOOP_ID;
6542 	}
6543 
6544 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6545 
6546 	return (QL_SUCCESS);
6547 }
6548 
6549 /*
6550  * ql_dev_init
6551  *	Initialize/allocate device queue.
6552  *
6553  * Input:
6554  *	ha:		adapter state pointer.
6555  *	d_id:		device destination ID
6556  *	loop_id:	device loop ID
6557  *	ADAPTER_STATE_LOCK must be already obtained.
6558  *
6559  * Returns:
6560  *	NULL = failure
6561  *
6562  * Context:
6563  *	Kernel context.
6564  */
6565 ql_tgt_t *
6566 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6567 {
6568 	ql_link_t	*link;
6569 	uint16_t	index;
6570 	ql_tgt_t	*tq;
6571 
6572 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6573 	    ha->instance, d_id.b24, loop_id);
6574 
6575 	index = ql_alpa_to_index[d_id.b.al_pa];
6576 
6577 	/* If device queue exists, set proper loop ID. */
6578 	tq = NULL;
6579 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6580 		tq = link->base_address;
6581 		if (tq->d_id.b24 == d_id.b24) {
6582 			tq->loop_id = loop_id;
6583 
6584 			/* Reset port down retry count. */
6585 			tq->port_down_retry_count = ha->port_down_retry_count;
6586 			tq->qfull_retry_count = ha->qfull_retry_count;
6587 
6588 			break;
6589 		} else {
6590 			tq = NULL;
6591 		}
6592 	}
6593 
6594 	/* If device does not have queue. */
6595 	if (tq == NULL) {
6596 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6597 		if (tq != NULL) {
6598 			/*
6599 			 * mutex to protect the device queue,
6600 			 * does not block interrupts.
6601 			 */
6602 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6603 			    (ha->iflags & IFLG_INTR_AIF) ?
6604 			    (void *)(uintptr_t)ha->intr_pri :
6605 			    (void *)(uintptr_t)ha->iblock_cookie);
6606 
6607 			tq->d_id.b24 = d_id.b24;
6608 			tq->loop_id = loop_id;
6609 			tq->device.base_address = tq;
6610 			tq->iidma_rate = IIDMA_RATE_INIT;
6611 
6612 			/* Reset port down retry count. */
6613 			tq->port_down_retry_count = ha->port_down_retry_count;
6614 			tq->qfull_retry_count = ha->qfull_retry_count;
6615 
6616 			/* Add device to device queue. */
6617 			ql_add_link_b(&ha->dev[index], &tq->device);
6618 		}
6619 	}
6620 
6621 	if (tq == NULL) {
6622 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6623 	} else {
6624 		/*EMPTY*/
6625 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6626 	}
6627 	return (tq);
6628 }
6629 
6630 /*
6631  * ql_dev_free
6632  *	Remove queue from device list and frees resources used by queue.
6633  *
6634  * Input:
6635  *	ha:	adapter state pointer.
6636  *	tq:	target queue pointer.
6637  *	ADAPTER_STATE_LOCK must be already obtained.
6638  *
6639  * Context:
6640  *	Kernel context.
6641  */
6642 void
6643 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6644 {
6645 	ql_link_t	*link;
6646 	uint16_t	index;
6647 	ql_lun_t	*lq;
6648 
6649 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6650 
6651 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6652 		lq = link->base_address;
6653 		if (lq->cmd.first != NULL) {
6654 			return;
6655 		}
6656 	}
6657 
6658 	if (tq->outcnt == 0) {
6659 		/* Get head queue index. */
6660 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6661 		for (link = ha->dev[index].first; link != NULL;
6662 		    link = link->next) {
6663 			if (link->base_address == tq) {
6664 				ql_remove_link(&ha->dev[index], link);
6665 
6666 				link = tq->lun_queues.first;
6667 				while (link != NULL) {
6668 					lq = link->base_address;
6669 					link = link->next;
6670 
6671 					ql_remove_link(&tq->lun_queues,
6672 					    &lq->link);
6673 					kmem_free(lq, sizeof (ql_lun_t));
6674 				}
6675 
6676 				mutex_destroy(&tq->mutex);
6677 				kmem_free(tq, sizeof (ql_tgt_t));
6678 				break;
6679 			}
6680 		}
6681 	}
6682 
6683 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6684 }
6685 
6686 /*
6687  * ql_lun_queue
6688  *	Allocate LUN queue if does not exists.
6689  *
6690  * Input:
6691  *	ha:	adapter state pointer.
6692  *	tq:	target queue.
6693  *	lun:	LUN number.
6694  *
6695  * Returns:
6696  *	NULL = failure
6697  *
6698  * Context:
6699  *	Kernel context.
6700  */
6701 static ql_lun_t *
6702 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6703 {
6704 	ql_lun_t	*lq;
6705 	ql_link_t	*link;
6706 
6707 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6708 
6709 	/* Fast path. */
6710 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6711 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6712 		return (tq->last_lun_queue);
6713 	}
6714 
6715 	if (lun >= MAX_LUNS) {
6716 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6717 		return (NULL);
6718 	}
6719 	/* If device queue exists, set proper loop ID. */
6720 	lq = NULL;
6721 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6722 		lq = link->base_address;
6723 		if (lq->lun_no == lun) {
6724 			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6725 			tq->last_lun_queue = lq;
6726 			return (lq);
6727 		}
6728 	}
6729 
6730 	/* If queue does exist. */
6731 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6732 
6733 	/* Initialize LUN queue. */
6734 	if (lq != NULL) {
6735 		lq->link.base_address = lq;
6736 
6737 		lq->lun_no = lun;
6738 		lq->target_queue = tq;
6739 
6740 		DEVICE_QUEUE_LOCK(tq);
6741 		ql_add_link_b(&tq->lun_queues, &lq->link);
6742 		DEVICE_QUEUE_UNLOCK(tq);
6743 		tq->last_lun_queue = lq;
6744 	}
6745 
6746 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6747 
6748 	return (lq);
6749 }
6750 
6751 /*
6752  * ql_fcp_scsi_cmd
6753  *	Process fibre channel (FCP) SCSI protocol commands.
6754  *
6755  * Input:
6756  *	ha = adapter state pointer.
6757  *	pkt = pointer to fc_packet.
6758  *	sp = srb pointer.
6759  *
6760  * Returns:
6761  *	FC_SUCCESS - the packet was accepted for transport.
6762  *	FC_TRANSPORT_ERROR - a transport error occurred.
6763  *
6764  * Context:
6765  *	Kernel context.
6766  */
6767 static int
6768 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6769 {
6770 	port_id_t	d_id;
6771 	ql_tgt_t	*tq;
6772 	uint64_t	*ptr;
6773 	uint16_t	lun;
6774 
6775 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6776 
6777 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6778 	if (tq == NULL) {
6779 		d_id.r.rsvd_1 = 0;
6780 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6781 		tq = ql_d_id_to_queue(ha, d_id);
6782 	}
6783 
6784 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6785 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6786 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6787 
6788 	if (tq != NULL &&
6789 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6790 
6791 		/*
6792 		 * zero out FCP response; 24 Bytes
6793 		 */
6794 		ptr = (uint64_t *)pkt->pkt_resp;
6795 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6796 
6797 		/* Handle task management function. */
6798 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6799 		    sp->fcp->fcp_cntl.cntl_clr_aca |
6800 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6801 		    sp->fcp->fcp_cntl.cntl_reset_lun |
6802 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6803 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6804 			ql_task_mgmt(ha, tq, pkt, sp);
6805 		} else {
6806 			ha->pha->xioctl->IosRequested++;
6807 			ha->pha->xioctl->BytesRequested += (uint32_t)
6808 			    sp->fcp->fcp_data_len;
6809 
6810 			/*
6811 			 * Setup for commands with data transfer
6812 			 */
6813 			sp->iocb = ha->fcp_cmd;
6814 			if (sp->fcp->fcp_data_len != 0) {
6815 				/*
6816 				 * FCP data is bound to pkt_data_dma
6817 				 */
6818 				if (sp->fcp->fcp_cntl.cntl_write_data) {
6819 					(void) ddi_dma_sync(pkt->pkt_data_dma,
6820 					    0, 0, DDI_DMA_SYNC_FORDEV);
6821 				}
6822 
6823 				/* Setup IOCB count. */
6824 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6825 					uint32_t	cnt;
6826 
6827 					cnt = pkt->pkt_data_cookie_cnt -
6828 					    ha->cmd_segs;
6829 					sp->req_cnt = (uint16_t)
6830 					    (cnt / ha->cmd_cont_segs);
6831 					if (cnt % ha->cmd_cont_segs) {
6832 						sp->req_cnt = (uint16_t)
6833 						    (sp->req_cnt + 2);
6834 					} else {
6835 						sp->req_cnt++;
6836 					}
6837 				} else {
6838 					sp->req_cnt = 1;
6839 				}
6840 			} else {
6841 				sp->req_cnt = 1;
6842 			}
6843 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6844 
6845 			return (ql_start_cmd(ha, tq, pkt, sp));
6846 		}
6847 	} else {
6848 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6849 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6850 
6851 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6852 			ql_awaken_task_daemon(ha, sp, 0, 0);
6853 	}
6854 
6855 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6856 
6857 	return (FC_SUCCESS);
6858 }
6859 
6860 /*
6861  * ql_task_mgmt
6862  *	Task management function processor.
6863  *
6864  * Input:
6865  *	ha:	adapter state pointer.
6866  *	tq:	target queue pointer.
6867  *	pkt:	pointer to fc_packet.
6868  *	sp:	SRB pointer.
6869  *
6870  * Context:
6871  *	Kernel context.
6872  */
6873 static void
6874 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6875     ql_srb_t *sp)
6876 {
6877 	fcp_rsp_t		*fcpr;
6878 	struct fcp_rsp_info	*rsp;
6879 	uint16_t		lun;
6880 
6881 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6882 
6883 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6884 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6885 
6886 	bzero(fcpr, pkt->pkt_rsplen);
6887 
6888 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6889 	fcpr->fcp_response_len = 8;
6890 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6891 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6892 
6893 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6894 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6895 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6896 		}
6897 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6898 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6899 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6900 		}
6901 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6902 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6903 		    QL_SUCCESS) {
6904 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6905 		}
6906 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6907 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6908 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6909 		}
6910 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6911 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6912 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6913 		}
6914 	} else {
6915 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6916 	}
6917 
6918 	pkt->pkt_state = FC_PKT_SUCCESS;
6919 
6920 	/* Do command callback. */
6921 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6922 		ql_awaken_task_daemon(ha, sp, 0, 0);
6923 	}
6924 
6925 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6926 }
6927 
6928 /*
6929  * ql_fcp_ip_cmd
6930  *	Process fibre channel (FCP) Internet (IP) protocols commands.
6931  *
6932  * Input:
6933  *	ha:	adapter state pointer.
6934  *	pkt:	pointer to fc_packet.
6935  *	sp:	SRB pointer.
6936  *
6937  * Returns:
6938  *	FC_SUCCESS - the packet was accepted for transport.
6939  *	FC_TRANSPORT_ERROR - a transport error occurred.
6940  *
6941  * Context:
6942  *	Kernel context.
6943  */
6944 static int
6945 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6946 {
6947 	port_id_t	d_id;
6948 	ql_tgt_t	*tq;
6949 
6950 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6951 
6952 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6953 	if (tq == NULL) {
6954 		d_id.r.rsvd_1 = 0;
6955 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6956 		tq = ql_d_id_to_queue(ha, d_id);
6957 	}
6958 
6959 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
6960 		/*
6961 		 * IP data is bound to pkt_cmd_dma
6962 		 */
6963 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
6964 		    0, 0, DDI_DMA_SYNC_FORDEV);
6965 
6966 		/* Setup IOCB count. */
6967 		sp->iocb = ha->ip_cmd;
6968 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
6969 			uint32_t	cnt;
6970 
6971 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
6972 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
6973 			if (cnt % ha->cmd_cont_segs) {
6974 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6975 			} else {
6976 				sp->req_cnt++;
6977 			}
6978 		} else {
6979 			sp->req_cnt = 1;
6980 		}
6981 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6982 
6983 		return (ql_start_cmd(ha, tq, pkt, sp));
6984 	} else {
6985 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6986 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6987 
6988 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6989 			ql_awaken_task_daemon(ha, sp, 0, 0);
6990 	}
6991 
6992 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6993 
6994 	return (FC_SUCCESS);
6995 }
6996 
6997 /*
6998  * ql_fc_services
6999  *	Process fibre channel services (name server).
7000  *
7001  * Input:
7002  *	ha:	adapter state pointer.
7003  *	pkt:	pointer to fc_packet.
7004  *
7005  * Returns:
7006  *	FC_SUCCESS - the packet was accepted for transport.
7007  *	FC_TRANSPORT_ERROR - a transport error occurred.
7008  *
7009  * Context:
7010  *	Kernel context.
7011  */
7012 static int
7013 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7014 {
7015 	uint32_t	cnt;
7016 	fc_ct_header_t	hdr;
7017 	la_els_rjt_t	rjt;
7018 	port_id_t	d_id;
7019 	ql_tgt_t	*tq;
7020 	ql_srb_t	*sp;
7021 	int		rval;
7022 
7023 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7024 
7025 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7026 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7027 
7028 	bzero(&rjt, sizeof (rjt));
7029 
7030 	/* Do some sanity checks */
7031 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7032 	    sizeof (fc_ct_header_t));
7033 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7034 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7035 		    pkt->pkt_rsplen);
7036 		return (FC_ELS_MALFORMED);
7037 	}
7038 
7039 	switch (hdr.ct_fcstype) {
7040 	case FCSTYPE_DIRECTORY:
7041 	case FCSTYPE_MGMTSERVICE:
7042 		/* An FCA must make sure that the header is in big endian */
7043 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7044 
7045 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7046 		tq = ql_d_id_to_queue(ha, d_id);
7047 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7048 		if (tq == NULL ||
7049 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7050 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7051 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7052 			rval = QL_SUCCESS;
7053 			break;
7054 		}
7055 
7056 		/*
7057 		 * Services data is bound to pkt_cmd_dma
7058 		 */
7059 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7060 		    DDI_DMA_SYNC_FORDEV);
7061 
7062 		sp->flags |= SRB_MS_PKT;
7063 		sp->retry_count = 32;
7064 
7065 		/* Setup IOCB count. */
7066 		sp->iocb = ha->ms_cmd;
7067 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7068 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7069 			sp->req_cnt =
7070 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7071 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7072 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7073 			} else {
7074 				sp->req_cnt++;
7075 			}
7076 		} else {
7077 			sp->req_cnt = 1;
7078 		}
7079 		rval = ql_start_cmd(ha, tq, pkt, sp);
7080 
7081 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7082 		    ha->instance, rval);
7083 
7084 		return (rval);
7085 
7086 	default:
7087 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7088 		rval = QL_FUNCTION_PARAMETER_ERROR;
7089 		break;
7090 	}
7091 
7092 	if (rval != QL_SUCCESS) {
7093 		/* Build RJT. */
7094 		rjt.ls_code.ls_code = LA_ELS_RJT;
7095 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7096 
7097 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7098 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7099 
7100 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7101 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7102 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7103 	}
7104 
7105 	/* Do command callback. */
7106 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7107 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7108 		    0, 0);
7109 	}
7110 
7111 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7112 
7113 	return (FC_SUCCESS);
7114 }
7115 
7116 /*
7117  * ql_cthdr_endian
7118  *	Change endianess of ct passthrough header and payload.
7119  *
7120  * Input:
7121  *	acc_handle:	DMA buffer access handle.
7122  *	ct_hdr:		Pointer to header.
7123  *	restore:	Restore first flag.
7124  *
7125  * Context:
7126  *	Interrupt or Kernel context, no mailbox commands allowed.
7127  */
7128 void
7129 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7130     boolean_t restore)
7131 {
7132 	uint8_t		i, *bp;
7133 	fc_ct_header_t	hdr;
7134 	uint32_t	*hdrp = (uint32_t *)&hdr;
7135 
7136 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7137 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7138 
7139 	if (restore) {
7140 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7141 			*hdrp = BE_32(*hdrp);
7142 			hdrp++;
7143 		}
7144 	}
7145 
7146 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7147 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7148 
7149 		switch (hdr.ct_cmdrsp) {
7150 		case NS_GA_NXT:
7151 		case NS_GPN_ID:
7152 		case NS_GNN_ID:
7153 		case NS_GCS_ID:
7154 		case NS_GFT_ID:
7155 		case NS_GSPN_ID:
7156 		case NS_GPT_ID:
7157 		case NS_GID_FT:
7158 		case NS_GID_PT:
7159 		case NS_RPN_ID:
7160 		case NS_RNN_ID:
7161 		case NS_RSPN_ID:
7162 		case NS_DA_ID:
7163 			BIG_ENDIAN_32(bp);
7164 			break;
7165 		case NS_RFT_ID:
7166 		case NS_RCS_ID:
7167 		case NS_RPT_ID:
7168 			BIG_ENDIAN_32(bp);
7169 			bp += 4;
7170 			BIG_ENDIAN_32(bp);
7171 			break;
7172 		case NS_GNN_IP:
7173 		case NS_GIPA_IP:
7174 			BIG_ENDIAN(bp, 16);
7175 			break;
7176 		case NS_RIP_NN:
7177 			bp += 8;
7178 			BIG_ENDIAN(bp, 16);
7179 			break;
7180 		case NS_RIPA_NN:
7181 			bp += 8;
7182 			BIG_ENDIAN_64(bp);
7183 			break;
7184 		default:
7185 			break;
7186 		}
7187 	}
7188 
7189 	if (restore == B_FALSE) {
7190 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7191 			*hdrp = BE_32(*hdrp);
7192 			hdrp++;
7193 		}
7194 	}
7195 
7196 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7197 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7198 }
7199 
7200 /*
7201  * ql_start_cmd
7202  *	Finishes starting fibre channel protocol (FCP) command.
7203  *
7204  * Input:
7205  *	ha:	adapter state pointer.
7206  *	tq:	target queue pointer.
7207  *	pkt:	pointer to fc_packet.
7208  *	sp:	SRB pointer.
7209  *
7210  * Context:
7211  *	Kernel context.
7212  */
7213 static int
7214 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7215     ql_srb_t *sp)
7216 {
7217 	int		rval = FC_SUCCESS;
7218 	time_t		poll_wait = 0;
7219 	ql_lun_t	*lq = sp->lun_queue;
7220 
7221 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7222 
7223 	sp->handle = 0;
7224 
7225 	/* Set poll for finish. */
7226 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7227 		sp->flags |= SRB_POLL;
7228 		if (pkt->pkt_timeout == 0) {
7229 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7230 		}
7231 	}
7232 
7233 	/* Acquire device queue lock. */
7234 	DEVICE_QUEUE_LOCK(tq);
7235 
7236 	/*
7237 	 * If we need authentication, report device busy to
7238 	 * upper layers to retry later
7239 	 */
7240 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7241 		DEVICE_QUEUE_UNLOCK(tq);
7242 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7243 		    tq->d_id.b24);
7244 		return (FC_DEVICE_BUSY);
7245 	}
7246 
7247 	/* Insert command onto watchdog queue. */
7248 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7249 		ql_timeout_insert(ha, tq, sp);
7250 	} else {
7251 		/*
7252 		 * Run dump requests in polled mode as kernel threads
7253 		 * and interrupts may have been disabled.
7254 		 */
7255 		sp->flags |= SRB_POLL;
7256 		sp->init_wdg_q_time = 0;
7257 		sp->isp_timeout = 0;
7258 	}
7259 
7260 	/* If a polling command setup wait time. */
7261 	if (sp->flags & SRB_POLL) {
7262 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7263 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7264 		} else {
7265 			poll_wait = pkt->pkt_timeout;
7266 		}
7267 	}
7268 
7269 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7270 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7271 		/* Set ending status. */
7272 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7273 
7274 		/* Call done routine to handle completions. */
7275 		sp->cmd.next = NULL;
7276 		DEVICE_QUEUE_UNLOCK(tq);
7277 		ql_done(&sp->cmd);
7278 	} else {
7279 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7280 			int do_lip = 0;
7281 
7282 			DEVICE_QUEUE_UNLOCK(tq);
7283 
7284 			ADAPTER_STATE_LOCK(ha);
7285 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7286 				ha->pha->lip_on_panic++;
7287 			}
7288 			ADAPTER_STATE_UNLOCK(ha);
7289 
7290 			if (!do_lip) {
7291 
7292 				/*
7293 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7294 				 * is helpful here. If a PLOGI fails for some
7295 				 * reason, you would get CS_PORT_LOGGED_OUT
7296 				 * or some such error; and we should get a
7297 				 * careful polled mode login kicked off inside
7298 				 * of this driver itself. You don't have FC
7299 				 * transport's services as all threads are
7300 				 * suspended, interrupts disabled, and so
7301 				 * on. Right now we do re-login if the packet
7302 				 * state isn't FC_PKT_SUCCESS.
7303 				 */
7304 				(void) ql_abort_isp(ha);
7305 			}
7306 
7307 			ql_start_iocb(ha, sp);
7308 		} else {
7309 			/* Add the command to the device queue */
7310 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7311 				ql_add_link_t(&lq->cmd, &sp->cmd);
7312 			} else {
7313 				ql_add_link_b(&lq->cmd, &sp->cmd);
7314 			}
7315 
7316 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7317 
7318 			/* Check whether next message can be processed */
7319 			ql_next(ha, lq);
7320 		}
7321 	}
7322 
7323 	/* If polling, wait for finish. */
7324 	if (poll_wait) {
7325 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7326 			int	res;
7327 
7328 			res = ql_abort((opaque_t)ha, pkt, 0);
7329 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7330 				DEVICE_QUEUE_LOCK(tq);
7331 				ql_remove_link(&lq->cmd, &sp->cmd);
7332 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7333 				DEVICE_QUEUE_UNLOCK(tq);
7334 			}
7335 		}
7336 
7337 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7338 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7339 			rval = FC_TRANSPORT_ERROR;
7340 		}
7341 
7342 		if (ddi_in_panic()) {
7343 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7344 				port_id_t d_id;
7345 
7346 				/*
7347 				 * successful LOGIN implies by design
7348 				 * that PRLI also succeeded for disks
7349 				 * Note also that there is no special
7350 				 * mailbox command to send PRLI.
7351 				 */
7352 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7353 				(void) ql_login_port(ha, d_id);
7354 			}
7355 		}
7356 
7357 		/*
7358 		 * This should only happen during CPR dumping
7359 		 */
7360 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7361 		    pkt->pkt_comp) {
7362 			sp->flags &= ~SRB_POLL;
7363 			(*pkt->pkt_comp)(pkt);
7364 		}
7365 	}
7366 
7367 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7368 
7369 	return (rval);
7370 }
7371 
7372 /*
7373  * ql_poll_cmd
7374  *	Polls commands for completion.
7375  *
7376  * Input:
7377  *	ha = adapter state pointer.
7378  *	sp = SRB command pointer.
7379  *	poll_wait = poll wait time in seconds.
7380  *
7381  * Returns:
7382  *	QL local function return status code.
7383  *
7384  * Context:
7385  *	Kernel context.
7386  */
7387 static int
7388 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7389 {
7390 	int			rval = QL_SUCCESS;
7391 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7392 	ql_adapter_state_t	*ha = vha->pha;
7393 
7394 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7395 
7396 	while (sp->flags & SRB_POLL) {
7397 
7398 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7399 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7400 
7401 			/* If waiting for restart, do it now. */
7402 			if (ha->port_retry_timer != 0) {
7403 				ADAPTER_STATE_LOCK(ha);
7404 				ha->port_retry_timer = 0;
7405 				ADAPTER_STATE_UNLOCK(ha);
7406 
7407 				TASK_DAEMON_LOCK(ha);
7408 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7409 				TASK_DAEMON_UNLOCK(ha);
7410 			}
7411 
7412 			if ((CFG_IST(ha, CFG_CTRL_242581) ?
7413 			    RD32_IO_REG(ha, istatus) :
7414 			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7415 				(void) ql_isr((caddr_t)ha);
7416 				INTR_LOCK(ha);
7417 				ha->intr_claimed = TRUE;
7418 				INTR_UNLOCK(ha);
7419 			}
7420 
7421 			/*
7422 			 * Call task thread function in case the
7423 			 * daemon is not running.
7424 			 */
7425 			TASK_DAEMON_LOCK(ha);
7426 
7427 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7428 			    QL_TASK_PENDING(ha)) {
7429 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7430 				ql_task_thread(ha);
7431 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7432 			}
7433 
7434 			TASK_DAEMON_UNLOCK(ha);
7435 		}
7436 
7437 		if (msecs_left < 10) {
7438 			rval = QL_FUNCTION_TIMEOUT;
7439 			break;
7440 		}
7441 
7442 		/*
7443 		 * Polling interval is 10 milli seconds; Increasing
7444 		 * the polling interval to seconds since disk IO
7445 		 * timeout values are ~60 seconds is tempting enough,
7446 		 * but CPR dump time increases, and so will the crash
7447 		 * dump time; Don't toy with the settings without due
7448 		 * consideration for all the scenarios that will be
7449 		 * impacted.
7450 		 */
7451 		ql_delay(ha, 10000);
7452 		msecs_left -= 10;
7453 	}
7454 
7455 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7456 
7457 	return (rval);
7458 }
7459 
7460 /*
7461  * ql_next
7462  *	Retrieve and process next job in the device queue.
7463  *
7464  * Input:
7465  *	ha:	adapter state pointer.
7466  *	lq:	LUN queue pointer.
7467  *	DEVICE_QUEUE_LOCK must be already obtained.
7468  *
7469  * Output:
7470  *	Releases DEVICE_QUEUE_LOCK upon exit.
7471  *
7472  * Context:
7473  *	Interrupt or Kernel context, no mailbox commands allowed.
7474  */
7475 void
7476 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7477 {
7478 	ql_srb_t		*sp;
7479 	ql_link_t		*link;
7480 	ql_tgt_t		*tq = lq->target_queue;
7481 	ql_adapter_state_t	*ha = vha->pha;
7482 
7483 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7484 
7485 	if (ddi_in_panic()) {
7486 		DEVICE_QUEUE_UNLOCK(tq);
7487 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7488 		    ha->instance);
7489 		return;
7490 	}
7491 
7492 	while ((link = lq->cmd.first) != NULL) {
7493 		sp = link->base_address;
7494 
7495 		/* Exit if can not start commands. */
7496 		if (DRIVER_SUSPENDED(ha) ||
7497 		    (ha->flags & ONLINE) == 0 ||
7498 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7499 		    sp->flags & SRB_ABORT ||
7500 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7501 		    TQF_QUEUE_SUSPENDED)) {
7502 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7503 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7504 			    ha->task_daemon_flags, tq->flags, sp->flags,
7505 			    ha->flags, tq->loop_id);
7506 			break;
7507 		}
7508 
7509 		/*
7510 		 * Find out the LUN number for untagged command use.
7511 		 * If there is an untagged command pending for the LUN,
7512 		 * we would not submit another untagged command
7513 		 * or if reached LUN execution throttle.
7514 		 */
7515 		if (sp->flags & SRB_FCP_CMD_PKT) {
7516 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7517 			    lq->lun_outcnt >= ha->execution_throttle) {
7518 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7519 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7520 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7521 				break;
7522 			}
7523 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7524 			    FCP_QTYPE_UNTAGGED) {
7525 				/*
7526 				 * Set the untagged-flag for the LUN
7527 				 * so that no more untagged commands
7528 				 * can be submitted for this LUN.
7529 				 */
7530 				lq->flags |= LQF_UNTAGGED_PENDING;
7531 			}
7532 
7533 			/* Count command as sent. */
7534 			lq->lun_outcnt++;
7535 		}
7536 
7537 		/* Remove srb from device queue. */
7538 		ql_remove_link(&lq->cmd, &sp->cmd);
7539 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7540 
7541 		tq->outcnt++;
7542 
7543 		ql_start_iocb(vha, sp);
7544 	}
7545 
7546 	/* Release device queue lock. */
7547 	DEVICE_QUEUE_UNLOCK(tq);
7548 
7549 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7550 }
7551 
7552 /*
7553  * ql_done
7554  *	Process completed commands.
7555  *
7556  * Input:
7557  *	link:	first command link in chain.
7558  *
7559  * Context:
7560  *	Interrupt or Kernel context, no mailbox commands allowed.
7561  */
7562 void
7563 ql_done(ql_link_t *link)
7564 {
7565 	ql_adapter_state_t	*ha;
7566 	ql_link_t		*next_link;
7567 	ql_srb_t		*sp;
7568 	ql_tgt_t		*tq;
7569 	ql_lun_t		*lq;
7570 
7571 	QL_PRINT_3(CE_CONT, "started\n");
7572 
7573 	for (; link != NULL; link = next_link) {
7574 		next_link = link->next;
7575 		sp = link->base_address;
7576 		ha = sp->ha;
7577 
7578 		if (sp->flags & SRB_UB_CALLBACK) {
7579 			QL_UB_LOCK(ha);
7580 			if (sp->flags & SRB_UB_IN_ISP) {
7581 				if (ha->ub_outcnt != 0) {
7582 					ha->ub_outcnt--;
7583 				}
7584 				QL_UB_UNLOCK(ha);
7585 				ql_isp_rcvbuf(ha);
7586 				QL_UB_LOCK(ha);
7587 			}
7588 			QL_UB_UNLOCK(ha);
7589 			ql_awaken_task_daemon(ha, sp, 0, 0);
7590 		} else {
7591 			/* Free outstanding command slot. */
7592 			if (sp->handle != 0) {
7593 				ha->outstanding_cmds[
7594 				    sp->handle & OSC_INDEX_MASK] = NULL;
7595 				sp->handle = 0;
7596 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7597 			}
7598 
7599 			/* Acquire device queue lock. */
7600 			lq = sp->lun_queue;
7601 			tq = lq->target_queue;
7602 			DEVICE_QUEUE_LOCK(tq);
7603 
7604 			/* Decrement outstanding commands on device. */
7605 			if (tq->outcnt != 0) {
7606 				tq->outcnt--;
7607 			}
7608 
7609 			if (sp->flags & SRB_FCP_CMD_PKT) {
7610 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7611 				    FCP_QTYPE_UNTAGGED) {
7612 					/*
7613 					 * Clear the flag for this LUN so that
7614 					 * untagged commands can be submitted
7615 					 * for it.
7616 					 */
7617 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7618 				}
7619 
7620 				if (lq->lun_outcnt != 0) {
7621 					lq->lun_outcnt--;
7622 				}
7623 			}
7624 
7625 			/* Reset port down retry count on good completion. */
7626 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7627 				tq->port_down_retry_count =
7628 				    ha->port_down_retry_count;
7629 				tq->qfull_retry_count = ha->qfull_retry_count;
7630 			}
7631 
7632 			/* Place request back on top of target command queue */
7633 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7634 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7635 			    sp->flags & SRB_RETRY &&
7636 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7637 			    sp->wdg_q_time > 1)) {
7638 				sp->flags &= ~(SRB_ISP_STARTED |
7639 				    SRB_ISP_COMPLETED | SRB_RETRY);
7640 
7641 				/* Reset watchdog timer */
7642 				sp->wdg_q_time = sp->init_wdg_q_time;
7643 
7644 				/* Issue marker command on reset status. */
7645 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7646 				    (sp->pkt->pkt_reason == CS_RESET ||
7647 				    (CFG_IST(ha, CFG_CTRL_242581) &&
7648 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7649 					(void) ql_marker(ha, tq->loop_id, 0,
7650 					    MK_SYNC_ID);
7651 				}
7652 
7653 				ql_add_link_t(&lq->cmd, &sp->cmd);
7654 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7655 				ql_next(ha, lq);
7656 			} else {
7657 				/* Remove command from watchdog queue. */
7658 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7659 					ql_remove_link(&tq->wdg, &sp->wdg);
7660 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7661 				}
7662 
7663 				if (lq->cmd.first != NULL) {
7664 					ql_next(ha, lq);
7665 				} else {
7666 					/* Release LU queue specific lock. */
7667 					DEVICE_QUEUE_UNLOCK(tq);
7668 					if (ha->pha->pending_cmds.first !=
7669 					    NULL) {
7670 						ql_start_iocb(ha, NULL);
7671 					}
7672 				}
7673 
7674 				/* Sync buffers if required.  */
7675 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7676 					(void) ddi_dma_sync(
7677 					    sp->pkt->pkt_resp_dma,
7678 					    0, 0, DDI_DMA_SYNC_FORCPU);
7679 				}
7680 
7681 				/* Map ISP completion codes. */
7682 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7683 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7684 				switch (sp->pkt->pkt_reason) {
7685 				case CS_COMPLETE:
7686 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7687 					break;
7688 				case CS_RESET:
7689 					/* Issue marker command. */
7690 					if (!(ha->task_daemon_flags &
7691 					    LOOP_DOWN)) {
7692 						(void) ql_marker(ha,
7693 						    tq->loop_id, 0,
7694 						    MK_SYNC_ID);
7695 					}
7696 					sp->pkt->pkt_state =
7697 					    FC_PKT_PORT_OFFLINE;
7698 					sp->pkt->pkt_reason =
7699 					    FC_REASON_ABORTED;
7700 					break;
7701 				case CS_RESOUCE_UNAVAILABLE:
7702 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7703 					sp->pkt->pkt_reason =
7704 					    FC_REASON_PKT_BUSY;
7705 					break;
7706 
7707 				case CS_TIMEOUT:
7708 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7709 					sp->pkt->pkt_reason =
7710 					    FC_REASON_HW_ERROR;
7711 					break;
7712 				case CS_DATA_OVERRUN:
7713 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7714 					sp->pkt->pkt_reason =
7715 					    FC_REASON_OVERRUN;
7716 					break;
7717 				case CS_PORT_UNAVAILABLE:
7718 				case CS_PORT_LOGGED_OUT:
7719 					sp->pkt->pkt_state =
7720 					    FC_PKT_PORT_OFFLINE;
7721 					sp->pkt->pkt_reason =
7722 					    FC_REASON_LOGIN_REQUIRED;
7723 					ql_send_logo(ha, tq, NULL);
7724 					break;
7725 				case CS_PORT_CONFIG_CHG:
7726 					sp->pkt->pkt_state =
7727 					    FC_PKT_PORT_OFFLINE;
7728 					sp->pkt->pkt_reason =
7729 					    FC_REASON_OFFLINE;
7730 					break;
7731 				case CS_QUEUE_FULL:
7732 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7733 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7734 					break;
7735 
7736 				case CS_ABORTED:
7737 					DEVICE_QUEUE_LOCK(tq);
7738 					if (tq->flags & (TQF_RSCN_RCVD |
7739 					    TQF_NEED_AUTHENTICATION)) {
7740 						sp->pkt->pkt_state =
7741 						    FC_PKT_PORT_OFFLINE;
7742 						sp->pkt->pkt_reason =
7743 						    FC_REASON_LOGIN_REQUIRED;
7744 					} else {
7745 						sp->pkt->pkt_state =
7746 						    FC_PKT_LOCAL_RJT;
7747 						sp->pkt->pkt_reason =
7748 						    FC_REASON_ABORTED;
7749 					}
7750 					DEVICE_QUEUE_UNLOCK(tq);
7751 					break;
7752 
7753 				case CS_TRANSPORT:
7754 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7755 					sp->pkt->pkt_reason =
7756 					    FC_PKT_TRAN_ERROR;
7757 					break;
7758 
7759 				case CS_DATA_UNDERRUN:
7760 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7761 					sp->pkt->pkt_reason =
7762 					    FC_REASON_UNDERRUN;
7763 					break;
7764 				case CS_DMA_ERROR:
7765 				case CS_BAD_PAYLOAD:
7766 				case CS_UNKNOWN:
7767 				case CS_CMD_FAILED:
7768 				default:
7769 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7770 					sp->pkt->pkt_reason =
7771 					    FC_REASON_HW_ERROR;
7772 					break;
7773 				}
7774 
7775 				/* Now call the pkt completion callback */
7776 				if (sp->flags & SRB_POLL) {
7777 					sp->flags &= ~SRB_POLL;
7778 				} else if (sp->pkt->pkt_comp) {
7779 					if (sp->pkt->pkt_tran_flags &
7780 					    FC_TRAN_IMMEDIATE_CB) {
7781 						(*sp->pkt->pkt_comp)(sp->pkt);
7782 					} else {
7783 						ql_awaken_task_daemon(ha, sp,
7784 						    0, 0);
7785 					}
7786 				}
7787 			}
7788 		}
7789 	}
7790 
7791 	QL_PRINT_3(CE_CONT, "done\n");
7792 }
7793 
7794 /*
7795  * ql_awaken_task_daemon
7796  *	Adds command completion callback to callback queue and/or
7797  *	awakens task daemon thread.
7798  *
7799  * Input:
7800  *	ha:		adapter state pointer.
7801  *	sp:		srb pointer.
7802  *	set_flags:	task daemon flags to set.
7803  *	reset_flags:	task daemon flags to reset.
7804  *
7805  * Context:
7806  *	Interrupt or Kernel context, no mailbox commands allowed.
7807  */
7808 void
7809 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7810     uint32_t set_flags, uint32_t reset_flags)
7811 {
7812 	ql_adapter_state_t	*ha = vha->pha;
7813 
7814 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7815 
7816 	/* Acquire task daemon lock. */
7817 	TASK_DAEMON_LOCK(ha);
7818 
7819 	if (set_flags & ISP_ABORT_NEEDED) {
7820 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7821 			set_flags &= ~ISP_ABORT_NEEDED;
7822 		}
7823 	}
7824 
7825 	ha->task_daemon_flags |= set_flags;
7826 	ha->task_daemon_flags &= ~reset_flags;
7827 
7828 	if (QL_DAEMON_SUSPENDED(ha)) {
7829 		if (sp != NULL) {
7830 			TASK_DAEMON_UNLOCK(ha);
7831 
7832 			/* Do callback. */
7833 			if (sp->flags & SRB_UB_CALLBACK) {
7834 				ql_unsol_callback(sp);
7835 			} else {
7836 				(*sp->pkt->pkt_comp)(sp->pkt);
7837 			}
7838 		} else {
7839 			if (!(curthread->t_flag & T_INTR_THREAD) &&
7840 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7841 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7842 				ql_task_thread(ha);
7843 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7844 			}
7845 
7846 			TASK_DAEMON_UNLOCK(ha);
7847 		}
7848 	} else {
7849 		if (sp != NULL) {
7850 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7851 		}
7852 
7853 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7854 			cv_broadcast(&ha->cv_task_daemon);
7855 		}
7856 		TASK_DAEMON_UNLOCK(ha);
7857 	}
7858 
7859 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7860 }
7861 
7862 /*
7863  * ql_task_daemon
7864  *	Thread that is awaken by the driver when a
7865  *	background needs to be done.
7866  *
7867  * Input:
7868  *	arg = adapter state pointer.
7869  *
7870  * Context:
7871  *	Kernel context.
7872  */
7873 static void
7874 ql_task_daemon(void *arg)
7875 {
7876 	ql_adapter_state_t	*ha = (void *)arg;
7877 
7878 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7879 
7880 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7881 	    "ql_task_daemon");
7882 
7883 	/* Acquire task daemon lock. */
7884 	TASK_DAEMON_LOCK(ha);
7885 
7886 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7887 
7888 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7889 		ql_task_thread(ha);
7890 
7891 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7892 
7893 		/*
7894 		 * Before we wait on the conditional variable, we
7895 		 * need to check if STOP_FLG is set for us to terminate
7896 		 */
7897 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7898 			break;
7899 		}
7900 
7901 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7902 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7903 
7904 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7905 
7906 		/* If killed, stop task daemon */
7907 		if (cv_wait_sig(&ha->cv_task_daemon,
7908 		    &ha->task_daemon_mutex) == 0) {
7909 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7910 		}
7911 
7912 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7913 
7914 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7915 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7916 
7917 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7918 	}
7919 
7920 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7921 	    TASK_DAEMON_ALIVE_FLG);
7922 
7923 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7924 	CALLB_CPR_EXIT(&ha->cprinfo);
7925 
7926 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7927 
7928 	thread_exit();
7929 }
7930 
7931 /*
7932  * ql_task_thread
7933  *	Thread run by daemon.
7934  *
7935  * Input:
7936  *	ha = adapter state pointer.
7937  *	TASK_DAEMON_LOCK must be acquired prior to call.
7938  *
7939  * Context:
7940  *	Kernel context.
7941  */
7942 static void
7943 ql_task_thread(ql_adapter_state_t *ha)
7944 {
7945 	int			loop_again, rval;
7946 	ql_srb_t		*sp;
7947 	ql_head_t		*head;
7948 	ql_link_t		*link;
7949 	caddr_t			msg;
7950 	ql_adapter_state_t	*vha;
7951 
7952 	do {
7953 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
7954 		    ha->instance, ha->task_daemon_flags);
7955 
7956 		loop_again = FALSE;
7957 
7958 		QL_PM_LOCK(ha);
7959 		if (ha->power_level != PM_LEVEL_D0) {
7960 			QL_PM_UNLOCK(ha);
7961 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
7962 			break;
7963 		}
7964 		QL_PM_UNLOCK(ha);
7965 
7966 		/* IDC acknowledge needed. */
7967 		if (ha->task_daemon_flags & IDC_ACK_NEEDED) {
7968 			ha->task_daemon_flags &= ~IDC_ACK_NEEDED;
7969 			ADAPTER_STATE_LOCK(ha);
7970 			switch (ha->idc_mb[2]) {
7971 			case IDC_OPC_DRV_START:
7972 				if (ha->idc_restart_mpi != 0) {
7973 					ha->idc_restart_mpi--;
7974 					if (ha->idc_restart_mpi == 0) {
7975 						ha->restart_mpi_timer = 0;
7976 						ha->task_daemon_flags &=
7977 						    ~TASK_DAEMON_STALLED_FLG;
7978 					}
7979 				}
7980 				if (ha->idc_flash_acc != 0) {
7981 					ha->idc_flash_acc--;
7982 					if (ha->idc_flash_acc == 0) {
7983 						ha->flash_acc_timer = 0;
7984 						GLOBAL_HW_LOCK();
7985 					}
7986 				}
7987 				break;
7988 			case IDC_OPC_FLASH_ACC:
7989 				ha->flash_acc_timer = 30;
7990 				if (ha->idc_flash_acc == 0) {
7991 					GLOBAL_HW_UNLOCK();
7992 				}
7993 				ha->idc_flash_acc++;
7994 				break;
7995 			case IDC_OPC_RESTART_MPI:
7996 				ha->restart_mpi_timer = 30;
7997 				ha->idc_restart_mpi++;
7998 				ha->task_daemon_flags |=
7999 				    TASK_DAEMON_STALLED_FLG;
8000 				break;
8001 			default:
8002 				EL(ha, "Unknown IDC opcode=%xh\n",
8003 				    ha->idc_mb[2]);
8004 				break;
8005 			}
8006 			ADAPTER_STATE_UNLOCK(ha);
8007 
8008 			if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
8009 				TASK_DAEMON_UNLOCK(ha);
8010 				rval = ql_idc_ack(ha);
8011 				if (rval != QL_SUCCESS) {
8012 					EL(ha, "idc_ack status=%xh\n", rval);
8013 				}
8014 				TASK_DAEMON_LOCK(ha);
8015 				loop_again = TRUE;
8016 			}
8017 		}
8018 
8019 		if (ha->flags & ADAPTER_SUSPENDED ||
8020 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
8021 		    DRIVER_STALL) ||
8022 		    (ha->flags & ONLINE) == 0) {
8023 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8024 			break;
8025 		}
8026 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8027 
8028 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8029 			TASK_DAEMON_UNLOCK(ha);
8030 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8031 			TASK_DAEMON_LOCK(ha);
8032 			loop_again = TRUE;
8033 		}
8034 
8035 		/* Idle Check. */
8036 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8037 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8038 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8039 				TASK_DAEMON_UNLOCK(ha);
8040 				ql_idle_check(ha);
8041 				TASK_DAEMON_LOCK(ha);
8042 				loop_again = TRUE;
8043 			}
8044 		}
8045 
8046 		/* Crystal+ port#0 bypass transition */
8047 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8048 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8049 			TASK_DAEMON_UNLOCK(ha);
8050 			(void) ql_initiate_lip(ha);
8051 			TASK_DAEMON_LOCK(ha);
8052 			loop_again = TRUE;
8053 		}
8054 
8055 		/* Abort queues needed. */
8056 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8057 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8058 			TASK_DAEMON_UNLOCK(ha);
8059 			ql_abort_queues(ha);
8060 			TASK_DAEMON_LOCK(ha);
8061 		}
8062 
8063 		/* Not suspended, awaken waiting routines. */
8064 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8065 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8066 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8067 			cv_broadcast(&ha->cv_dr_suspended);
8068 			loop_again = TRUE;
8069 		}
8070 
8071 		/* Handle RSCN changes. */
8072 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8073 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8074 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8075 				TASK_DAEMON_UNLOCK(ha);
8076 				(void) ql_handle_rscn_update(vha);
8077 				TASK_DAEMON_LOCK(ha);
8078 				loop_again = TRUE;
8079 			}
8080 		}
8081 
8082 		/* Handle state changes. */
8083 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8084 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8085 			    !(ha->task_daemon_flags &
8086 			    TASK_DAEMON_POWERING_DOWN)) {
8087 				/* Report state change. */
8088 				EL(vha, "state change = %xh\n", vha->state);
8089 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8090 
8091 				if (vha->task_daemon_flags &
8092 				    COMMAND_WAIT_NEEDED) {
8093 					vha->task_daemon_flags &=
8094 					    ~COMMAND_WAIT_NEEDED;
8095 					if (!(ha->task_daemon_flags &
8096 					    COMMAND_WAIT_ACTIVE)) {
8097 						ha->task_daemon_flags |=
8098 						    COMMAND_WAIT_ACTIVE;
8099 						TASK_DAEMON_UNLOCK(ha);
8100 						ql_cmd_wait(ha);
8101 						TASK_DAEMON_LOCK(ha);
8102 						ha->task_daemon_flags &=
8103 						    ~COMMAND_WAIT_ACTIVE;
8104 					}
8105 				}
8106 
8107 				msg = NULL;
8108 				if (FC_PORT_STATE_MASK(vha->state) ==
8109 				    FC_STATE_OFFLINE) {
8110 					if (vha->task_daemon_flags &
8111 					    STATE_ONLINE) {
8112 						if (ha->topology &
8113 						    QL_LOOP_CONNECTION) {
8114 							msg = "Loop OFFLINE";
8115 						} else {
8116 							msg = "Link OFFLINE";
8117 						}
8118 					}
8119 					vha->task_daemon_flags &=
8120 					    ~STATE_ONLINE;
8121 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8122 				    FC_STATE_LOOP) {
8123 					if (!(vha->task_daemon_flags &
8124 					    STATE_ONLINE)) {
8125 						msg = "Loop ONLINE";
8126 					}
8127 					vha->task_daemon_flags |= STATE_ONLINE;
8128 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8129 				    FC_STATE_ONLINE) {
8130 					if (!(vha->task_daemon_flags &
8131 					    STATE_ONLINE)) {
8132 						msg = "Link ONLINE";
8133 					}
8134 					vha->task_daemon_flags |= STATE_ONLINE;
8135 				} else {
8136 					msg = "Unknown Link state";
8137 				}
8138 
8139 				if (msg != NULL) {
8140 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8141 					    "%s", QL_NAME, ha->instance,
8142 					    vha->vp_index, msg);
8143 				}
8144 
8145 				if (vha->flags & FCA_BOUND) {
8146 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8147 					    "cb state=%xh\n", ha->instance,
8148 					    vha->vp_index, vha->state);
8149 					TASK_DAEMON_UNLOCK(ha);
8150 					(vha->bind_info.port_statec_cb)
8151 					    (vha->bind_info.port_handle,
8152 					    vha->state);
8153 					TASK_DAEMON_LOCK(ha);
8154 				}
8155 				loop_again = TRUE;
8156 			}
8157 		}
8158 
8159 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8160 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8161 			EL(ha, "processing LIP reset\n");
8162 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8163 			TASK_DAEMON_UNLOCK(ha);
8164 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8165 				if (vha->flags & FCA_BOUND) {
8166 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8167 					    "cb reset\n", ha->instance,
8168 					    vha->vp_index);
8169 					(vha->bind_info.port_statec_cb)
8170 					    (vha->bind_info.port_handle,
8171 					    FC_STATE_TARGET_PORT_RESET);
8172 				}
8173 			}
8174 			TASK_DAEMON_LOCK(ha);
8175 			loop_again = TRUE;
8176 		}
8177 
8178 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8179 		    FIRMWARE_UP)) {
8180 			/*
8181 			 * The firmware needs more unsolicited
8182 			 * buffers. We cannot allocate any new
8183 			 * buffers unless the ULP module requests
8184 			 * for new buffers. All we can do here is
8185 			 * to give received buffers from the pool
8186 			 * that is already allocated
8187 			 */
8188 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8189 			TASK_DAEMON_UNLOCK(ha);
8190 			ql_isp_rcvbuf(ha);
8191 			TASK_DAEMON_LOCK(ha);
8192 			loop_again = TRUE;
8193 		}
8194 
8195 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8196 			TASK_DAEMON_UNLOCK(ha);
8197 			(void) ql_abort_isp(ha);
8198 			TASK_DAEMON_LOCK(ha);
8199 			loop_again = TRUE;
8200 		}
8201 
8202 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8203 		    COMMAND_WAIT_NEEDED))) {
8204 			if (QL_IS_SET(ha->task_daemon_flags,
8205 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8206 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8207 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8208 					ha->task_daemon_flags |= RESET_ACTIVE;
8209 					TASK_DAEMON_UNLOCK(ha);
8210 					for (vha = ha; vha != NULL;
8211 					    vha = vha->vp_next) {
8212 						ql_rst_aen(vha);
8213 					}
8214 					TASK_DAEMON_LOCK(ha);
8215 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8216 					loop_again = TRUE;
8217 				}
8218 			}
8219 
8220 			if (QL_IS_SET(ha->task_daemon_flags,
8221 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8222 				if (!(ha->task_daemon_flags &
8223 				    LOOP_RESYNC_ACTIVE)) {
8224 					ha->task_daemon_flags |=
8225 					    LOOP_RESYNC_ACTIVE;
8226 					TASK_DAEMON_UNLOCK(ha);
8227 					(void) ql_loop_resync(ha);
8228 					TASK_DAEMON_LOCK(ha);
8229 					loop_again = TRUE;
8230 				}
8231 			}
8232 		}
8233 
8234 		/* Port retry needed. */
8235 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8236 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8237 			ADAPTER_STATE_LOCK(ha);
8238 			ha->port_retry_timer = 0;
8239 			ADAPTER_STATE_UNLOCK(ha);
8240 
8241 			TASK_DAEMON_UNLOCK(ha);
8242 			ql_restart_queues(ha);
8243 			TASK_DAEMON_LOCK(ha);
8244 			loop_again = B_TRUE;
8245 		}
8246 
8247 		/* iiDMA setting needed? */
8248 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8249 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8250 
8251 			TASK_DAEMON_UNLOCK(ha);
8252 			ql_iidma(ha);
8253 			TASK_DAEMON_LOCK(ha);
8254 			loop_again = B_TRUE;
8255 		}
8256 
8257 		if (ha->task_daemon_flags & SEND_PLOGI) {
8258 			ha->task_daemon_flags &= ~SEND_PLOGI;
8259 			TASK_DAEMON_UNLOCK(ha);
8260 			ql_n_port_plogi(ha);
8261 			TASK_DAEMON_LOCK(ha);
8262 		}
8263 
8264 		head = &ha->callback_queue;
8265 		if (head->first != NULL) {
8266 			sp = head->first->base_address;
8267 			link = &sp->cmd;
8268 
8269 			/* Dequeue command. */
8270 			ql_remove_link(head, link);
8271 
8272 			/* Release task daemon lock. */
8273 			TASK_DAEMON_UNLOCK(ha);
8274 
8275 			/* Do callback. */
8276 			if (sp->flags & SRB_UB_CALLBACK) {
8277 				ql_unsol_callback(sp);
8278 			} else {
8279 				(*sp->pkt->pkt_comp)(sp->pkt);
8280 			}
8281 
8282 			/* Acquire task daemon lock. */
8283 			TASK_DAEMON_LOCK(ha);
8284 
8285 			loop_again = TRUE;
8286 		}
8287 
8288 	} while (loop_again);
8289 }
8290 
8291 /*
8292  * ql_idle_check
8293  *	Test for adapter is alive and well.
8294  *
8295  * Input:
8296  *	ha:	adapter state pointer.
8297  *
8298  * Context:
8299  *	Kernel context.
8300  */
8301 static void
8302 ql_idle_check(ql_adapter_state_t *ha)
8303 {
8304 	ddi_devstate_t	state;
8305 	int		rval;
8306 	ql_mbx_data_t	mr;
8307 
8308 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8309 
8310 	/* Firmware Ready Test. */
8311 	rval = ql_get_firmware_state(ha, &mr);
8312 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8313 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8314 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8315 		state = ddi_get_devstate(ha->dip);
8316 		if (state == DDI_DEVSTATE_UP) {
8317 			/*EMPTY*/
8318 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8319 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8320 		}
8321 		TASK_DAEMON_LOCK(ha);
8322 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8323 			EL(ha, "fstate_ready, isp_abort_needed\n");
8324 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8325 		}
8326 		TASK_DAEMON_UNLOCK(ha);
8327 	}
8328 
8329 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8330 }
8331 
8332 /*
8333  * ql_unsol_callback
8334  *	Handle unsolicited buffer callbacks.
8335  *
8336  * Input:
8337  *	ha = adapter state pointer.
8338  *	sp = srb pointer.
8339  *
8340  * Context:
8341  *	Kernel context.
8342  */
8343 static void
8344 ql_unsol_callback(ql_srb_t *sp)
8345 {
8346 	fc_affected_id_t	*af;
8347 	fc_unsol_buf_t		*ubp;
8348 	uchar_t			r_ctl;
8349 	uchar_t			ls_code;
8350 	ql_tgt_t		*tq;
8351 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8352 
8353 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8354 
8355 	ubp = ha->ub_array[sp->handle];
8356 	r_ctl = ubp->ub_frame.r_ctl;
8357 	ls_code = ubp->ub_buffer[0];
8358 
8359 	if (sp->lun_queue == NULL) {
8360 		tq = NULL;
8361 	} else {
8362 		tq = sp->lun_queue->target_queue;
8363 	}
8364 
8365 	QL_UB_LOCK(ha);
8366 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8367 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8368 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8369 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8370 		sp->flags |= SRB_UB_IN_FCA;
8371 		QL_UB_UNLOCK(ha);
8372 		return;
8373 	}
8374 
8375 	/* Process RSCN */
8376 	if (sp->flags & SRB_UB_RSCN) {
8377 		int sendup = 1;
8378 
8379 		/*
8380 		 * Defer RSCN posting until commands return
8381 		 */
8382 		QL_UB_UNLOCK(ha);
8383 
8384 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8385 
8386 		/* Abort outstanding commands */
8387 		sendup = ql_process_rscn(ha, af);
8388 		if (sendup == 0) {
8389 
8390 			TASK_DAEMON_LOCK(ha);
8391 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8392 			TASK_DAEMON_UNLOCK(ha);
8393 
8394 			/*
8395 			 * Wait for commands to drain in F/W (doesn't take
8396 			 * more than a few milliseconds)
8397 			 */
8398 			ql_delay(ha, 10000);
8399 
8400 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8401 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8402 			    af->aff_format, af->aff_d_id);
8403 			return;
8404 		}
8405 
8406 		QL_UB_LOCK(ha);
8407 
8408 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8409 		    af->aff_format, af->aff_d_id);
8410 	}
8411 
8412 	/* Process UNSOL LOGO */
8413 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8414 		QL_UB_UNLOCK(ha);
8415 
8416 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8417 			TASK_DAEMON_LOCK(ha);
8418 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8419 			TASK_DAEMON_UNLOCK(ha);
8420 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8421 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8422 			return;
8423 		}
8424 
8425 		QL_UB_LOCK(ha);
8426 		EL(ha, "sending unsol logout for %xh to transport\n",
8427 		    ubp->ub_frame.s_id);
8428 	}
8429 
8430 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8431 	    SRB_UB_FCP);
8432 
8433 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8434 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8435 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8436 	}
8437 	QL_UB_UNLOCK(ha);
8438 
8439 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8440 	    ubp, sp->ub_type);
8441 
8442 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8443 }
8444 
8445 /*
8446  * ql_send_logo
8447  *
8448  * Input:
8449  *	ha:	adapter state pointer.
8450  *	tq:	target queue pointer.
8451  *	done_q:	done queue pointer.
8452  *
8453  * Context:
8454  *	Interrupt or Kernel context, no mailbox commands allowed.
8455  */
8456 void
8457 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8458 {
8459 	fc_unsol_buf_t		*ubp;
8460 	ql_srb_t		*sp;
8461 	la_els_logo_t		*payload;
8462 	ql_adapter_state_t	*ha = vha->pha;
8463 
8464 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8465 	    tq->d_id.b24);
8466 
8467 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8468 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8469 		return;
8470 	}
8471 
8472 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8473 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8474 
8475 		/* Locate a buffer to use. */
8476 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8477 		if (ubp == NULL) {
8478 			EL(vha, "Failed, get_unsolicited_buffer\n");
8479 			return;
8480 		}
8481 
8482 		DEVICE_QUEUE_LOCK(tq);
8483 		tq->flags |= TQF_NEED_AUTHENTICATION;
8484 		tq->logout_sent++;
8485 		DEVICE_QUEUE_UNLOCK(tq);
8486 
8487 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8488 
8489 		sp = ubp->ub_fca_private;
8490 
8491 		/* Set header. */
8492 		ubp->ub_frame.d_id = vha->d_id.b24;
8493 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8494 		ubp->ub_frame.s_id = tq->d_id.b24;
8495 		ubp->ub_frame.rsvd = 0;
8496 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8497 		    F_CTL_SEQ_INITIATIVE;
8498 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8499 		ubp->ub_frame.seq_cnt = 0;
8500 		ubp->ub_frame.df_ctl = 0;
8501 		ubp->ub_frame.seq_id = 0;
8502 		ubp->ub_frame.rx_id = 0xffff;
8503 		ubp->ub_frame.ox_id = 0xffff;
8504 
8505 		/* set payload. */
8506 		payload = (la_els_logo_t *)ubp->ub_buffer;
8507 		bzero(payload, sizeof (la_els_logo_t));
8508 		/* Make sure ls_code in payload is always big endian */
8509 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8510 		ubp->ub_buffer[1] = 0;
8511 		ubp->ub_buffer[2] = 0;
8512 		ubp->ub_buffer[3] = 0;
8513 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8514 		    &payload->nport_ww_name.raw_wwn[0], 8);
8515 		payload->nport_id.port_id = tq->d_id.b24;
8516 
8517 		QL_UB_LOCK(ha);
8518 		sp->flags |= SRB_UB_CALLBACK;
8519 		QL_UB_UNLOCK(ha);
8520 		if (tq->lun_queues.first != NULL) {
8521 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8522 		} else {
8523 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8524 		}
8525 		if (done_q) {
8526 			ql_add_link_b(done_q, &sp->cmd);
8527 		} else {
8528 			ql_awaken_task_daemon(ha, sp, 0, 0);
8529 		}
8530 	}
8531 
8532 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8533 }
8534 
8535 static int
8536 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8537 {
8538 	port_id_t	d_id;
8539 	ql_srb_t	*sp;
8540 	ql_link_t	*link;
8541 	int		sendup = 1;
8542 
8543 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8544 
8545 	DEVICE_QUEUE_LOCK(tq);
8546 	if (tq->outcnt) {
8547 		DEVICE_QUEUE_UNLOCK(tq);
8548 		sendup = 0;
8549 		(void) ql_abort_device(ha, tq, 1);
8550 		ql_delay(ha, 10000);
8551 	} else {
8552 		DEVICE_QUEUE_UNLOCK(tq);
8553 		TASK_DAEMON_LOCK(ha);
8554 
8555 		for (link = ha->pha->callback_queue.first; link != NULL;
8556 		    link = link->next) {
8557 			sp = link->base_address;
8558 			if (sp->flags & SRB_UB_CALLBACK) {
8559 				continue;
8560 			}
8561 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8562 
8563 			if (tq->d_id.b24 == d_id.b24) {
8564 				sendup = 0;
8565 				break;
8566 			}
8567 		}
8568 
8569 		TASK_DAEMON_UNLOCK(ha);
8570 	}
8571 
8572 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8573 
8574 	return (sendup);
8575 }
8576 
8577 static int
8578 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8579 {
8580 	fc_unsol_buf_t		*ubp;
8581 	ql_srb_t		*sp;
8582 	la_els_logi_t		*payload;
8583 	class_svc_param_t	*class3_param;
8584 
8585 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8586 
8587 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8588 	    LOOP_DOWN)) {
8589 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8590 		return (QL_FUNCTION_FAILED);
8591 	}
8592 
8593 	/* Locate a buffer to use. */
8594 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8595 	if (ubp == NULL) {
8596 		EL(ha, "Failed\n");
8597 		return (QL_FUNCTION_FAILED);
8598 	}
8599 
8600 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8601 	    ha->instance, tq->d_id.b24);
8602 
8603 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8604 
8605 	sp = ubp->ub_fca_private;
8606 
8607 	/* Set header. */
8608 	ubp->ub_frame.d_id = ha->d_id.b24;
8609 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8610 	ubp->ub_frame.s_id = tq->d_id.b24;
8611 	ubp->ub_frame.rsvd = 0;
8612 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8613 	    F_CTL_SEQ_INITIATIVE;
8614 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8615 	ubp->ub_frame.seq_cnt = 0;
8616 	ubp->ub_frame.df_ctl = 0;
8617 	ubp->ub_frame.seq_id = 0;
8618 	ubp->ub_frame.rx_id = 0xffff;
8619 	ubp->ub_frame.ox_id = 0xffff;
8620 
8621 	/* set payload. */
8622 	payload = (la_els_logi_t *)ubp->ub_buffer;
8623 	bzero(payload, sizeof (payload));
8624 
8625 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8626 	payload->common_service.fcph_version = 0x2006;
8627 	payload->common_service.cmn_features = 0x8800;
8628 
8629 	CFG_IST(ha, CFG_CTRL_242581) ?
8630 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8631 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8632 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8633 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8634 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8635 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8636 
8637 	payload->common_service.conc_sequences = 0xff;
8638 	payload->common_service.relative_offset = 0x03;
8639 	payload->common_service.e_d_tov = 0x7d0;
8640 
8641 	bcopy((void *)&tq->port_name[0],
8642 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8643 
8644 	bcopy((void *)&tq->node_name[0],
8645 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8646 
8647 	class3_param = (class_svc_param_t *)&payload->class_3;
8648 	class3_param->class_valid_svc_opt = 0x8000;
8649 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8650 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8651 	class3_param->conc_sequences = tq->class3_conc_sequences;
8652 	class3_param->open_sequences_per_exch =
8653 	    tq->class3_open_sequences_per_exch;
8654 
8655 	QL_UB_LOCK(ha);
8656 	sp->flags |= SRB_UB_CALLBACK;
8657 	QL_UB_UNLOCK(ha);
8658 
8659 	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8660 
8661 	if (done_q) {
8662 		ql_add_link_b(done_q, &sp->cmd);
8663 	} else {
8664 		ql_awaken_task_daemon(ha, sp, 0, 0);
8665 	}
8666 
8667 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8668 
8669 	return (QL_SUCCESS);
8670 }
8671 
8672 /*
8673  * Abort outstanding commands in the Firmware, clear internally
8674  * queued commands in the driver, Synchronize the target with
8675  * the Firmware
8676  */
8677 int
8678 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8679 {
8680 	ql_link_t	*link, *link2;
8681 	ql_lun_t	*lq;
8682 	int		rval = QL_SUCCESS;
8683 	ql_srb_t	*sp;
8684 	ql_head_t	done_q = { NULL, NULL };
8685 
8686 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8687 
8688 	/*
8689 	 * First clear, internally queued commands
8690 	 */
8691 	DEVICE_QUEUE_LOCK(tq);
8692 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8693 		lq = link->base_address;
8694 
8695 		link2 = lq->cmd.first;
8696 		while (link2 != NULL) {
8697 			sp = link2->base_address;
8698 			link2 = link2->next;
8699 
8700 			if (sp->flags & SRB_ABORT) {
8701 				continue;
8702 			}
8703 
8704 			/* Remove srb from device command queue. */
8705 			ql_remove_link(&lq->cmd, &sp->cmd);
8706 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8707 
8708 			/* Set ending status. */
8709 			sp->pkt->pkt_reason = CS_ABORTED;
8710 
8711 			/* Call done routine to handle completions. */
8712 			ql_add_link_b(&done_q, &sp->cmd);
8713 		}
8714 	}
8715 	DEVICE_QUEUE_UNLOCK(tq);
8716 
8717 	if (done_q.first != NULL) {
8718 		ql_done(done_q.first);
8719 	}
8720 
8721 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8722 		rval = ql_abort_target(ha, tq, 0);
8723 	}
8724 
8725 	if (rval != QL_SUCCESS) {
8726 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8727 	} else {
8728 		/*EMPTY*/
8729 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8730 		    ha->vp_index);
8731 	}
8732 
8733 	return (rval);
8734 }
8735 
8736 /*
8737  * ql_rcv_rscn_els
8738  *	Processes received RSCN extended link service.
8739  *
8740  * Input:
8741  *	ha:	adapter state pointer.
8742  *	mb:	array containing input mailbox registers.
8743  *	done_q:	done queue pointer.
8744  *
8745  * Context:
8746  *	Interrupt or Kernel context, no mailbox commands allowed.
8747  */
8748 void
8749 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8750 {
8751 	fc_unsol_buf_t		*ubp;
8752 	ql_srb_t		*sp;
8753 	fc_rscn_t		*rn;
8754 	fc_affected_id_t	*af;
8755 	port_id_t		d_id;
8756 
8757 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8758 
8759 	/* Locate a buffer to use. */
8760 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8761 	if (ubp != NULL) {
8762 		sp = ubp->ub_fca_private;
8763 
8764 		/* Set header. */
8765 		ubp->ub_frame.d_id = ha->d_id.b24;
8766 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8767 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8768 		ubp->ub_frame.rsvd = 0;
8769 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8770 		    F_CTL_SEQ_INITIATIVE;
8771 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8772 		ubp->ub_frame.seq_cnt = 0;
8773 		ubp->ub_frame.df_ctl = 0;
8774 		ubp->ub_frame.seq_id = 0;
8775 		ubp->ub_frame.rx_id = 0xffff;
8776 		ubp->ub_frame.ox_id = 0xffff;
8777 
8778 		/* set payload. */
8779 		rn = (fc_rscn_t *)ubp->ub_buffer;
8780 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8781 
8782 		rn->rscn_code = LA_ELS_RSCN;
8783 		rn->rscn_len = 4;
8784 		rn->rscn_payload_len = 8;
8785 		d_id.b.al_pa = LSB(mb[2]);
8786 		d_id.b.area = MSB(mb[2]);
8787 		d_id.b.domain =	LSB(mb[1]);
8788 		af->aff_d_id = d_id.b24;
8789 		af->aff_format = MSB(mb[1]);
8790 
8791 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8792 		    af->aff_d_id);
8793 
8794 		ql_update_rscn(ha, af);
8795 
8796 		QL_UB_LOCK(ha);
8797 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8798 		QL_UB_UNLOCK(ha);
8799 		ql_add_link_b(done_q, &sp->cmd);
8800 	}
8801 
8802 	if (ubp == NULL) {
8803 		EL(ha, "Failed, get_unsolicited_buffer\n");
8804 	} else {
8805 		/*EMPTY*/
8806 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8807 	}
8808 }
8809 
8810 /*
8811  * ql_update_rscn
8812  *	Update devices from received RSCN.
8813  *
8814  * Input:
8815  *	ha:	adapter state pointer.
8816  *	af:	pointer to RSCN data.
8817  *
8818  * Context:
8819  *	Interrupt or Kernel context, no mailbox commands allowed.
8820  */
8821 static void
8822 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8823 {
8824 	ql_link_t	*link;
8825 	uint16_t	index;
8826 	ql_tgt_t	*tq;
8827 
8828 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8829 
8830 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8831 		port_id_t d_id;
8832 
8833 		d_id.r.rsvd_1 = 0;
8834 		d_id.b24 = af->aff_d_id;
8835 
8836 		tq = ql_d_id_to_queue(ha, d_id);
8837 		if (tq) {
8838 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8839 			DEVICE_QUEUE_LOCK(tq);
8840 			tq->flags |= TQF_RSCN_RCVD;
8841 			DEVICE_QUEUE_UNLOCK(tq);
8842 		}
8843 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8844 		    ha->instance);
8845 
8846 		return;
8847 	}
8848 
8849 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8850 		for (link = ha->dev[index].first; link != NULL;
8851 		    link = link->next) {
8852 			tq = link->base_address;
8853 
8854 			switch (af->aff_format) {
8855 			case FC_RSCN_FABRIC_ADDRESS:
8856 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8857 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8858 					    tq->d_id.b24);
8859 					DEVICE_QUEUE_LOCK(tq);
8860 					tq->flags |= TQF_RSCN_RCVD;
8861 					DEVICE_QUEUE_UNLOCK(tq);
8862 				}
8863 				break;
8864 
8865 			case FC_RSCN_AREA_ADDRESS:
8866 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8867 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8868 					    tq->d_id.b24);
8869 					DEVICE_QUEUE_LOCK(tq);
8870 					tq->flags |= TQF_RSCN_RCVD;
8871 					DEVICE_QUEUE_UNLOCK(tq);
8872 				}
8873 				break;
8874 
8875 			case FC_RSCN_DOMAIN_ADDRESS:
8876 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8877 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8878 					    tq->d_id.b24);
8879 					DEVICE_QUEUE_LOCK(tq);
8880 					tq->flags |= TQF_RSCN_RCVD;
8881 					DEVICE_QUEUE_UNLOCK(tq);
8882 				}
8883 				break;
8884 
8885 			default:
8886 				break;
8887 			}
8888 		}
8889 	}
8890 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8891 }
8892 
8893 /*
8894  * ql_process_rscn
8895  *
8896  * Input:
8897  *	ha:	adapter state pointer.
8898  *	af:	RSCN payload pointer.
8899  *
8900  * Context:
8901  *	Kernel context.
8902  */
8903 static int
8904 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8905 {
8906 	int		sendit;
8907 	int		sendup = 1;
8908 	ql_link_t	*link;
8909 	uint16_t	index;
8910 	ql_tgt_t	*tq;
8911 
8912 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8913 
8914 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8915 		port_id_t d_id;
8916 
8917 		d_id.r.rsvd_1 = 0;
8918 		d_id.b24 = af->aff_d_id;
8919 
8920 		tq = ql_d_id_to_queue(ha, d_id);
8921 		if (tq) {
8922 			sendup = ql_process_rscn_for_device(ha, tq);
8923 		}
8924 
8925 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8926 
8927 		return (sendup);
8928 	}
8929 
8930 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8931 		for (link = ha->dev[index].first; link != NULL;
8932 		    link = link->next) {
8933 
8934 			tq = link->base_address;
8935 			if (tq == NULL) {
8936 				continue;
8937 			}
8938 
8939 			switch (af->aff_format) {
8940 			case FC_RSCN_FABRIC_ADDRESS:
8941 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8942 					sendit = ql_process_rscn_for_device(
8943 					    ha, tq);
8944 					if (sendup) {
8945 						sendup = sendit;
8946 					}
8947 				}
8948 				break;
8949 
8950 			case FC_RSCN_AREA_ADDRESS:
8951 				if ((tq->d_id.b24 & 0xffff00) ==
8952 				    af->aff_d_id) {
8953 					sendit = ql_process_rscn_for_device(
8954 					    ha, tq);
8955 
8956 					if (sendup) {
8957 						sendup = sendit;
8958 					}
8959 				}
8960 				break;
8961 
8962 			case FC_RSCN_DOMAIN_ADDRESS:
8963 				if ((tq->d_id.b24 & 0xff0000) ==
8964 				    af->aff_d_id) {
8965 					sendit = ql_process_rscn_for_device(
8966 					    ha, tq);
8967 
8968 					if (sendup) {
8969 						sendup = sendit;
8970 					}
8971 				}
8972 				break;
8973 
8974 			default:
8975 				break;
8976 			}
8977 		}
8978 	}
8979 
8980 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8981 
8982 	return (sendup);
8983 }
8984 
8985 /*
8986  * ql_process_rscn_for_device
8987  *
8988  * Input:
8989  *	ha:	adapter state pointer.
8990  *	tq:	target queue pointer.
8991  *
8992  * Context:
8993  *	Kernel context.
8994  */
8995 static int
8996 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8997 {
8998 	int sendup = 1;
8999 
9000 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9001 
9002 	DEVICE_QUEUE_LOCK(tq);
9003 
9004 	/*
9005 	 * Let FCP-2 compliant devices continue I/Os
9006 	 * with their low level recoveries.
9007 	 */
9008 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9009 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9010 		/*
9011 		 * Cause ADISC to go out
9012 		 */
9013 		DEVICE_QUEUE_UNLOCK(tq);
9014 
9015 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9016 
9017 		DEVICE_QUEUE_LOCK(tq);
9018 		tq->flags &= ~TQF_RSCN_RCVD;
9019 
9020 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9021 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9022 			tq->flags |= TQF_NEED_AUTHENTICATION;
9023 		}
9024 
9025 		DEVICE_QUEUE_UNLOCK(tq);
9026 
9027 		(void) ql_abort_device(ha, tq, 1);
9028 
9029 		DEVICE_QUEUE_LOCK(tq);
9030 
9031 		if (tq->outcnt) {
9032 			sendup = 0;
9033 		} else {
9034 			tq->flags &= ~TQF_RSCN_RCVD;
9035 		}
9036 	} else {
9037 		tq->flags &= ~TQF_RSCN_RCVD;
9038 	}
9039 
9040 	if (sendup) {
9041 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9042 			tq->flags |= TQF_NEED_AUTHENTICATION;
9043 		}
9044 	}
9045 
9046 	DEVICE_QUEUE_UNLOCK(tq);
9047 
9048 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9049 
9050 	return (sendup);
9051 }
9052 
9053 static int
9054 ql_handle_rscn_update(ql_adapter_state_t *ha)
9055 {
9056 	int			rval;
9057 	ql_tgt_t		*tq;
9058 	uint16_t		index, loop_id;
9059 	ql_dev_id_list_t	*list;
9060 	uint32_t		list_size;
9061 	port_id_t		d_id;
9062 	ql_mbx_data_t		mr;
9063 	ql_head_t		done_q = { NULL, NULL };
9064 
9065 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9066 
9067 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9068 	list = kmem_zalloc(list_size, KM_SLEEP);
9069 	if (list == NULL) {
9070 		rval = QL_MEMORY_ALLOC_FAILED;
9071 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9072 		return (rval);
9073 	}
9074 
9075 	/*
9076 	 * Get data from RISC code d_id list to init each device queue.
9077 	 */
9078 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9079 	if (rval != QL_SUCCESS) {
9080 		kmem_free(list, list_size);
9081 		EL(ha, "get_id_list failed=%xh\n", rval);
9082 		return (rval);
9083 	}
9084 
9085 	/* Acquire adapter state lock. */
9086 	ADAPTER_STATE_LOCK(ha);
9087 
9088 	/* Check for new devices */
9089 	for (index = 0; index < mr.mb[1]; index++) {
9090 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9091 
9092 		if (VALID_DEVICE_ID(ha, loop_id)) {
9093 			d_id.r.rsvd_1 = 0;
9094 
9095 			tq = ql_d_id_to_queue(ha, d_id);
9096 			if (tq != NULL) {
9097 				continue;
9098 			}
9099 
9100 			tq = ql_dev_init(ha, d_id, loop_id);
9101 
9102 			/* Test for fabric device. */
9103 			if (d_id.b.domain != ha->d_id.b.domain ||
9104 			    d_id.b.area != ha->d_id.b.area) {
9105 				tq->flags |= TQF_FABRIC_DEVICE;
9106 			}
9107 
9108 			ADAPTER_STATE_UNLOCK(ha);
9109 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9110 			    QL_SUCCESS) {
9111 				tq->loop_id = PORT_NO_LOOP_ID;
9112 			}
9113 			ADAPTER_STATE_LOCK(ha);
9114 
9115 			/*
9116 			 * Send up a PLOGI about the new device
9117 			 */
9118 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9119 				(void) ql_send_plogi(ha, tq, &done_q);
9120 			}
9121 		}
9122 	}
9123 
9124 	/* Release adapter state lock. */
9125 	ADAPTER_STATE_UNLOCK(ha);
9126 
9127 	if (done_q.first != NULL) {
9128 		ql_done(done_q.first);
9129 	}
9130 
9131 	kmem_free(list, list_size);
9132 
9133 	if (rval != QL_SUCCESS) {
9134 		EL(ha, "failed=%xh\n", rval);
9135 	} else {
9136 		/*EMPTY*/
9137 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9138 	}
9139 
9140 	return (rval);
9141 }
9142 
9143 /*
9144  * ql_free_unsolicited_buffer
9145  *	Frees allocated buffer.
9146  *
9147  * Input:
9148  *	ha = adapter state pointer.
9149  *	index = buffer array index.
9150  *	ADAPTER_STATE_LOCK must be already obtained.
9151  *
9152  * Context:
9153  *	Kernel context.
9154  */
9155 static void
9156 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9157 {
9158 	ql_srb_t	*sp;
9159 	int		status;
9160 
9161 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9162 
9163 	sp = ubp->ub_fca_private;
9164 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9165 		/* Disconnect IP from system buffers. */
9166 		if (ha->flags & IP_INITIALIZED) {
9167 			ADAPTER_STATE_UNLOCK(ha);
9168 			status = ql_shutdown_ip(ha);
9169 			ADAPTER_STATE_LOCK(ha);
9170 			if (status != QL_SUCCESS) {
9171 				cmn_err(CE_WARN,
9172 				    "!Qlogic %s(%d): Failed to shutdown IP",
9173 				    QL_NAME, ha->instance);
9174 				return;
9175 			}
9176 
9177 			ha->flags &= ~IP_ENABLED;
9178 		}
9179 
9180 		ql_free_phys(ha, &sp->ub_buffer);
9181 	} else {
9182 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9183 	}
9184 
9185 	kmem_free(sp, sizeof (ql_srb_t));
9186 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9187 
9188 	if (ha->ub_allocated != 0) {
9189 		ha->ub_allocated--;
9190 	}
9191 
9192 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9193 }
9194 
9195 /*
9196  * ql_get_unsolicited_buffer
9197  *	Locates a free unsolicited buffer.
9198  *
9199  * Input:
9200  *	ha = adapter state pointer.
9201  *	type = buffer type.
9202  *
9203  * Returns:
9204  *	Unsolicited buffer pointer.
9205  *
9206  * Context:
9207  *	Interrupt or Kernel context, no mailbox commands allowed.
9208  */
9209 fc_unsol_buf_t *
9210 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9211 {
9212 	fc_unsol_buf_t	*ubp;
9213 	ql_srb_t	*sp;
9214 	uint16_t	index;
9215 
9216 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9217 
9218 	/* Locate a buffer to use. */
9219 	ubp = NULL;
9220 
9221 	QL_UB_LOCK(ha);
9222 	for (index = 0; index < QL_UB_LIMIT; index++) {
9223 		ubp = ha->ub_array[index];
9224 		if (ubp != NULL) {
9225 			sp = ubp->ub_fca_private;
9226 			if ((sp->ub_type == type) &&
9227 			    (sp->flags & SRB_UB_IN_FCA) &&
9228 			    (!(sp->flags & (SRB_UB_CALLBACK |
9229 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9230 				sp->flags |= SRB_UB_ACQUIRED;
9231 				ubp->ub_resp_flags = 0;
9232 				break;
9233 			}
9234 			ubp = NULL;
9235 		}
9236 	}
9237 	QL_UB_UNLOCK(ha);
9238 
9239 	if (ubp) {
9240 		ubp->ub_resp_token = NULL;
9241 		ubp->ub_class = FC_TRAN_CLASS3;
9242 	}
9243 
9244 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9245 
9246 	return (ubp);
9247 }
9248 
9249 /*
9250  * ql_ub_frame_hdr
9251  *	Processes received unsolicited buffers from ISP.
9252  *
9253  * Input:
9254  *	ha:	adapter state pointer.
9255  *	tq:	target queue pointer.
9256  *	index:	unsolicited buffer array index.
9257  *	done_q:	done queue pointer.
9258  *
9259  * Returns:
9260  *	ql local function return status code.
9261  *
9262  * Context:
9263  *	Interrupt or Kernel context, no mailbox commands allowed.
9264  */
9265 int
9266 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9267     ql_head_t *done_q)
9268 {
9269 	fc_unsol_buf_t	*ubp;
9270 	ql_srb_t	*sp;
9271 	uint16_t	loop_id;
9272 	int		rval = QL_FUNCTION_FAILED;
9273 
9274 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9275 
9276 	QL_UB_LOCK(ha);
9277 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9278 		EL(ha, "Invalid buffer index=%xh\n", index);
9279 		QL_UB_UNLOCK(ha);
9280 		return (rval);
9281 	}
9282 
9283 	sp = ubp->ub_fca_private;
9284 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9285 		EL(ha, "buffer freed index=%xh\n", index);
9286 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9287 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9288 
9289 		sp->flags |= SRB_UB_IN_FCA;
9290 
9291 		QL_UB_UNLOCK(ha);
9292 		return (rval);
9293 	}
9294 
9295 	if ((sp->handle == index) &&
9296 	    (sp->flags & SRB_UB_IN_ISP) &&
9297 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9298 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9299 		/* set broadcast D_ID */
9300 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
9301 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9302 		if (tq->ub_loop_id == loop_id) {
9303 			if (ha->topology & QL_FL_PORT) {
9304 				ubp->ub_frame.d_id = 0x000000;
9305 			} else {
9306 				ubp->ub_frame.d_id = 0xffffff;
9307 			}
9308 		} else {
9309 			ubp->ub_frame.d_id = ha->d_id.b24;
9310 		}
9311 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9312 		ubp->ub_frame.rsvd = 0;
9313 		ubp->ub_frame.s_id = tq->d_id.b24;
9314 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9315 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9316 		ubp->ub_frame.df_ctl = 0;
9317 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9318 		ubp->ub_frame.rx_id = 0xffff;
9319 		ubp->ub_frame.ox_id = 0xffff;
9320 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9321 		    sp->ub_size : tq->ub_sequence_length;
9322 		ubp->ub_frame.ro = tq->ub_frame_ro;
9323 
9324 		tq->ub_sequence_length = (uint16_t)
9325 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9326 		tq->ub_frame_ro += ubp->ub_bufsize;
9327 		tq->ub_seq_cnt++;
9328 
9329 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9330 			if (tq->ub_seq_cnt == 1) {
9331 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9332 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9333 			} else {
9334 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9335 				    F_CTL_END_SEQ;
9336 			}
9337 			tq->ub_total_seg_cnt = 0;
9338 		} else if (tq->ub_seq_cnt == 1) {
9339 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9340 			    F_CTL_FIRST_SEQ;
9341 			ubp->ub_frame.df_ctl = 0x20;
9342 		}
9343 
9344 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9345 		    ha->instance, ubp->ub_frame.d_id);
9346 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9347 		    ha->instance, ubp->ub_frame.s_id);
9348 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9349 		    ha->instance, ubp->ub_frame.seq_cnt);
9350 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9351 		    ha->instance, ubp->ub_frame.seq_id);
9352 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9353 		    ha->instance, ubp->ub_frame.ro);
9354 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9355 		    ha->instance, ubp->ub_frame.f_ctl);
9356 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9357 		    ha->instance, ubp->ub_bufsize);
9358 		QL_DUMP_3(ubp->ub_buffer, 8,
9359 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9360 
9361 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9362 		ql_add_link_b(done_q, &sp->cmd);
9363 		rval = QL_SUCCESS;
9364 	} else {
9365 		if (sp->handle != index) {
9366 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9367 			    sp->handle);
9368 		}
9369 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9370 			EL(ha, "buffer was already in driver, index=%xh\n",
9371 			    index);
9372 		}
9373 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9374 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9375 			    index);
9376 		}
9377 		if (sp->flags & SRB_UB_ACQUIRED) {
9378 			EL(ha, "buffer was being used by driver, index=%xh\n",
9379 			    index);
9380 		}
9381 	}
9382 	QL_UB_UNLOCK(ha);
9383 
9384 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9385 
9386 	return (rval);
9387 }
9388 
9389 /*
9390  * ql_timer
9391  *	One second timer function.
9392  *
9393  * Input:
9394  *	ql_hba.first = first link in adapter list.
9395  *
9396  * Context:
9397  *	Interrupt context, no mailbox commands allowed.
9398  */
9399 static void
9400 ql_timer(void *arg)
9401 {
9402 	ql_link_t		*link;
9403 	uint32_t		set_flags;
9404 	uint32_t		reset_flags;
9405 	ql_adapter_state_t	*ha = NULL, *vha;
9406 
9407 	QL_PRINT_6(CE_CONT, "started\n");
9408 
9409 	/* Acquire global state lock. */
9410 	GLOBAL_STATE_LOCK();
9411 	if (ql_timer_timeout_id == NULL) {
9412 		/* Release global state lock. */
9413 		GLOBAL_STATE_UNLOCK();
9414 		return;
9415 	}
9416 
9417 	for (link = ql_hba.first; link != NULL; link = link->next) {
9418 		ha = link->base_address;
9419 
9420 		/* Skip adapter if suspended of stalled. */
9421 		ADAPTER_STATE_LOCK(ha);
9422 		if (ha->flags & ADAPTER_SUSPENDED ||
9423 		    ha->task_daemon_flags & DRIVER_STALL) {
9424 			ADAPTER_STATE_UNLOCK(ha);
9425 			continue;
9426 		}
9427 		ha->flags |= ADAPTER_TIMER_BUSY;
9428 		ADAPTER_STATE_UNLOCK(ha);
9429 
9430 		QL_PM_LOCK(ha);
9431 		if (ha->power_level != PM_LEVEL_D0) {
9432 			QL_PM_UNLOCK(ha);
9433 
9434 			ADAPTER_STATE_LOCK(ha);
9435 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9436 			ADAPTER_STATE_UNLOCK(ha);
9437 			continue;
9438 		}
9439 		ha->busy++;
9440 		QL_PM_UNLOCK(ha);
9441 
9442 		set_flags = 0;
9443 		reset_flags = 0;
9444 
9445 		/* Port retry timer handler. */
9446 		if (LOOP_READY(ha)) {
9447 			ADAPTER_STATE_LOCK(ha);
9448 			if (ha->port_retry_timer != 0) {
9449 				ha->port_retry_timer--;
9450 				if (ha->port_retry_timer == 0) {
9451 					set_flags |= PORT_RETRY_NEEDED;
9452 				}
9453 			}
9454 			ADAPTER_STATE_UNLOCK(ha);
9455 		}
9456 
9457 		/* Loop down timer handler. */
9458 		if (LOOP_RECONFIGURE(ha) == 0) {
9459 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9460 				ha->loop_down_timer--;
9461 				/*
9462 				 * give the firmware loop down dump flag
9463 				 * a chance to work.
9464 				 */
9465 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9466 					if (CFG_IST(ha,
9467 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9468 						(void) ql_binary_fw_dump(ha,
9469 						    TRUE);
9470 					}
9471 					EL(ha, "loop_down_reset, "
9472 					    "isp_abort_needed\n");
9473 					set_flags |= ISP_ABORT_NEEDED;
9474 				}
9475 			}
9476 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9477 				/* Command abort time handler. */
9478 				if (ha->loop_down_timer ==
9479 				    ha->loop_down_abort_time) {
9480 					ADAPTER_STATE_LOCK(ha);
9481 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9482 					ADAPTER_STATE_UNLOCK(ha);
9483 					set_flags |= ABORT_QUEUES_NEEDED;
9484 					EL(ha, "loop_down_abort_time, "
9485 					    "abort_queues_needed\n");
9486 				}
9487 
9488 				/* Watchdog timer handler. */
9489 				if (ha->watchdog_timer == 0) {
9490 					ha->watchdog_timer = WATCHDOG_TIME;
9491 				} else if (LOOP_READY(ha)) {
9492 					ha->watchdog_timer--;
9493 					if (ha->watchdog_timer == 0) {
9494 						for (vha = ha; vha != NULL;
9495 						    vha = vha->vp_next) {
9496 							ql_watchdog(vha,
9497 							    &set_flags,
9498 							    &reset_flags);
9499 						}
9500 						ha->watchdog_timer =
9501 						    WATCHDOG_TIME;
9502 					}
9503 				}
9504 			}
9505 		}
9506 
9507 		/* Idle timer handler. */
9508 		if (!DRIVER_SUSPENDED(ha)) {
9509 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9510 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9511 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9512 #endif
9513 				ha->idle_timer = 0;
9514 			}
9515 			if (ha->send_plogi_timer != NULL) {
9516 				ha->send_plogi_timer--;
9517 				if (ha->send_plogi_timer == NULL) {
9518 					set_flags |= SEND_PLOGI;
9519 				}
9520 			}
9521 		}
9522 		ADAPTER_STATE_LOCK(ha);
9523 		if (ha->restart_mpi_timer != 0) {
9524 			ha->restart_mpi_timer--;
9525 			if (ha->restart_mpi_timer == 0 &&
9526 			    ha->idc_restart_mpi != 0) {
9527 				ha->idc_restart_mpi = 0;
9528 				reset_flags |= TASK_DAEMON_STALLED_FLG;
9529 			}
9530 		}
9531 		if (ha->flash_acc_timer != 0) {
9532 			ha->flash_acc_timer--;
9533 			if (ha->flash_acc_timer == 0 &&
9534 			    ha->idc_flash_acc != 0) {
9535 				ha->idc_flash_acc = 1;
9536 				ha->idc_mb[1] = 0;
9537 				ha->idc_mb[2] = IDC_OPC_DRV_START;
9538 				set_flags |= IDC_ACK_NEEDED;
9539 			}
9540 		}
9541 		ADAPTER_STATE_UNLOCK(ha);
9542 
9543 		if (set_flags != 0 || reset_flags != 0) {
9544 			ql_awaken_task_daemon(ha, NULL, set_flags,
9545 			    reset_flags);
9546 		}
9547 
9548 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9549 			ql_blink_led(ha);
9550 		}
9551 
9552 		/* Update the IO stats */
9553 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9554 			ha->xioctl->IOInputMByteCnt +=
9555 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9556 			ha->xioctl->IOInputByteCnt %= 0x100000;
9557 		}
9558 
9559 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9560 			ha->xioctl->IOOutputMByteCnt +=
9561 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9562 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9563 		}
9564 
9565 		ADAPTER_STATE_LOCK(ha);
9566 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9567 		ADAPTER_STATE_UNLOCK(ha);
9568 
9569 		QL_PM_LOCK(ha);
9570 		ha->busy--;
9571 		QL_PM_UNLOCK(ha);
9572 	}
9573 
9574 	/* Restart timer, if not being stopped. */
9575 	if (ql_timer_timeout_id != NULL) {
9576 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9577 	}
9578 
9579 	/* Release global state lock. */
9580 	GLOBAL_STATE_UNLOCK();
9581 
9582 	QL_PRINT_6(CE_CONT, "done\n");
9583 }
9584 
9585 /*
9586  * ql_timeout_insert
9587  *	Function used to insert a command block onto the
9588  *	watchdog timer queue.
9589  *
9590  *	Note: Must insure that pkt_time is not zero
9591  *			before calling ql_timeout_insert.
9592  *
9593  * Input:
9594  *	ha:	adapter state pointer.
9595  *	tq:	target queue pointer.
9596  *	sp:	SRB pointer.
9597  *	DEVICE_QUEUE_LOCK must be already obtained.
9598  *
9599  * Context:
9600  *	Kernel context.
9601  */
9602 /* ARGSUSED */
9603 static void
9604 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9605 {
9606 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9607 
9608 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9609 		/* Make sure timeout >= 2 * R_A_TOV */
9610 		sp->isp_timeout = (uint16_t)
9611 		    (sp->pkt->pkt_timeout < ha->r_a_tov ? ha->r_a_tov :
9612 		    sp->pkt->pkt_timeout);
9613 
9614 		/*
9615 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9616 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9617 		 * will expire in the next watchdog call, which could be in
9618 		 * 1 microsecond.
9619 		 *
9620 		 */
9621 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9622 		    WATCHDOG_TIME;
9623 		/*
9624 		 * Added an additional 10 to account for the
9625 		 * firmware timer drift which can occur with
9626 		 * very long timeout values.
9627 		 */
9628 		sp->wdg_q_time += 10;
9629 
9630 		/*
9631 		 * Add 6 more to insure watchdog does not timeout at the same
9632 		 * time as ISP RISC code timeout.
9633 		 */
9634 		sp->wdg_q_time += 6;
9635 
9636 		/* Save initial time for resetting watchdog time. */
9637 		sp->init_wdg_q_time = sp->wdg_q_time;
9638 
9639 		/* Insert command onto watchdog queue. */
9640 		ql_add_link_b(&tq->wdg, &sp->wdg);
9641 
9642 		sp->flags |= SRB_WATCHDOG_ENABLED;
9643 	} else {
9644 		sp->isp_timeout = 0;
9645 		sp->wdg_q_time = 0;
9646 		sp->init_wdg_q_time = 0;
9647 	}
9648 
9649 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9650 }
9651 
9652 /*
9653  * ql_watchdog
9654  *	Timeout handler that runs in interrupt context. The
9655  *	ql_adapter_state_t * argument is the parameter set up when the
9656  *	timeout was initialized (state structure pointer).
9657  *	Function used to update timeout values and if timeout
9658  *	has occurred command will be aborted.
9659  *
9660  * Input:
9661  *	ha:		adapter state pointer.
9662  *	set_flags:	task daemon flags to set.
9663  *	reset_flags:	task daemon flags to reset.
9664  *
9665  * Context:
9666  *	Interrupt context, no mailbox commands allowed.
9667  */
9668 static void
9669 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9670 {
9671 	ql_srb_t	*sp;
9672 	ql_link_t	*link;
9673 	ql_link_t	*next_cmd;
9674 	ql_link_t	*next_device;
9675 	ql_tgt_t	*tq;
9676 	ql_lun_t	*lq;
9677 	uint16_t	index;
9678 	int		q_sane;
9679 
9680 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9681 
9682 	/* Loop through all targets. */
9683 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9684 		for (link = ha->dev[index].first; link != NULL;
9685 		    link = next_device) {
9686 			tq = link->base_address;
9687 
9688 			/* Try to acquire device queue lock. */
9689 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9690 				next_device = NULL;
9691 				continue;
9692 			}
9693 
9694 			next_device = link->next;
9695 
9696 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9697 			    (tq->port_down_retry_count == 0)) {
9698 				/* Release device queue lock. */
9699 				DEVICE_QUEUE_UNLOCK(tq);
9700 				continue;
9701 			}
9702 
9703 			/* Find out if this device is in a sane state. */
9704 			if (tq->flags & (TQF_RSCN_RCVD |
9705 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9706 				q_sane = 0;
9707 			} else {
9708 				q_sane = 1;
9709 			}
9710 			/* Loop through commands on watchdog queue. */
9711 			for (link = tq->wdg.first; link != NULL;
9712 			    link = next_cmd) {
9713 				next_cmd = link->next;
9714 				sp = link->base_address;
9715 				lq = sp->lun_queue;
9716 
9717 				/*
9718 				 * For SCSI commands, if everything seems to
9719 				 * be going fine and this packet is stuck
9720 				 * because of throttling at LUN or target
9721 				 * level then do not decrement the
9722 				 * sp->wdg_q_time
9723 				 */
9724 				if (ha->task_daemon_flags & STATE_ONLINE &&
9725 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9726 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9727 				    lq->lun_outcnt >= ha->execution_throttle) {
9728 					continue;
9729 				}
9730 
9731 				if (sp->wdg_q_time != 0) {
9732 					sp->wdg_q_time--;
9733 
9734 					/* Timeout? */
9735 					if (sp->wdg_q_time != 0) {
9736 						continue;
9737 					}
9738 
9739 					ql_remove_link(&tq->wdg, &sp->wdg);
9740 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9741 
9742 					if (sp->flags & SRB_ISP_STARTED) {
9743 						ql_cmd_timeout(ha, tq, sp,
9744 						    set_flags, reset_flags);
9745 
9746 						DEVICE_QUEUE_UNLOCK(tq);
9747 						tq = NULL;
9748 						next_cmd = NULL;
9749 						next_device = NULL;
9750 						index = DEVICE_HEAD_LIST_SIZE;
9751 					} else {
9752 						ql_cmd_timeout(ha, tq, sp,
9753 						    set_flags, reset_flags);
9754 					}
9755 				}
9756 			}
9757 
9758 			/* Release device queue lock. */
9759 			if (tq != NULL) {
9760 				DEVICE_QUEUE_UNLOCK(tq);
9761 			}
9762 		}
9763 	}
9764 
9765 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9766 }
9767 
9768 /*
9769  * ql_cmd_timeout
9770  *	Command timeout handler.
9771  *
9772  * Input:
9773  *	ha:		adapter state pointer.
9774  *	tq:		target queue pointer.
9775  *	sp:		SRB pointer.
9776  *	set_flags:	task daemon flags to set.
9777  *	reset_flags:	task daemon flags to reset.
9778  *
9779  * Context:
9780  *	Interrupt context, no mailbox commands allowed.
9781  */
9782 /* ARGSUSED */
9783 static void
9784 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9785     uint32_t *set_flags, uint32_t *reset_flags)
9786 {
9787 
9788 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9789 
9790 	if (!(sp->flags & SRB_ISP_STARTED)) {
9791 
9792 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9793 
9794 		REQUEST_RING_LOCK(ha);
9795 
9796 		/* if it's on a queue */
9797 		if (sp->cmd.head) {
9798 			/*
9799 			 * The pending_cmds que needs to be
9800 			 * protected by the ring lock
9801 			 */
9802 			ql_remove_link(sp->cmd.head, &sp->cmd);
9803 		}
9804 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9805 
9806 		/* Release device queue lock. */
9807 		REQUEST_RING_UNLOCK(ha);
9808 		DEVICE_QUEUE_UNLOCK(tq);
9809 
9810 		/* Set timeout status */
9811 		sp->pkt->pkt_reason = CS_TIMEOUT;
9812 
9813 		/* Ensure no retry */
9814 		sp->flags &= ~SRB_RETRY;
9815 
9816 		/* Call done routine to handle completion. */
9817 		ql_done(&sp->cmd);
9818 
9819 		DEVICE_QUEUE_LOCK(tq);
9820 	} else {
9821 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9822 		    "isp_abort_needed\n", (void *)sp,
9823 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9824 		    sp->handle & OSC_INDEX_MASK);
9825 
9826 		/* Release device queue lock. */
9827 		DEVICE_QUEUE_UNLOCK(tq);
9828 
9829 		INTR_LOCK(ha);
9830 		ha->pha->xioctl->ControllerErrorCount++;
9831 		INTR_UNLOCK(ha);
9832 
9833 		/* Set ISP needs to be reset */
9834 		sp->flags |= SRB_COMMAND_TIMEOUT;
9835 
9836 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9837 			(void) ql_binary_fw_dump(ha, TRUE);
9838 		}
9839 
9840 		*set_flags |= ISP_ABORT_NEEDED;
9841 
9842 		DEVICE_QUEUE_LOCK(tq);
9843 	}
9844 
9845 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9846 }
9847 
9848 /*
9849  * ql_rst_aen
9850  *	Processes asynchronous reset.
9851  *
9852  * Input:
9853  *	ha = adapter state pointer.
9854  *
9855  * Context:
9856  *	Kernel context.
9857  */
9858 static void
9859 ql_rst_aen(ql_adapter_state_t *ha)
9860 {
9861 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9862 
9863 	/* Issue marker command. */
9864 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9865 
9866 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9867 }
9868 
9869 /*
9870  * ql_cmd_wait
9871  *	Stall driver until all outstanding commands are returned.
9872  *
9873  * Input:
9874  *	ha = adapter state pointer.
9875  *
9876  * Context:
9877  *	Kernel context.
9878  */
9879 void
9880 ql_cmd_wait(ql_adapter_state_t *ha)
9881 {
9882 	uint16_t		index;
9883 	ql_link_t		*link;
9884 	ql_tgt_t		*tq;
9885 	ql_adapter_state_t	*vha;
9886 
9887 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9888 
9889 	/* Wait for all outstanding commands to be returned. */
9890 	(void) ql_wait_outstanding(ha);
9891 
9892 	/*
9893 	 * clear out internally queued commands
9894 	 */
9895 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9896 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9897 			for (link = vha->dev[index].first; link != NULL;
9898 			    link = link->next) {
9899 				tq = link->base_address;
9900 				if (tq &&
9901 				    (!(tq->prli_svc_param_word_3 &
9902 				    PRLI_W3_RETRY))) {
9903 					(void) ql_abort_device(vha, tq, 0);
9904 				}
9905 			}
9906 		}
9907 	}
9908 
9909 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9910 }
9911 
9912 /*
9913  * ql_wait_outstanding
9914  *	Wait for all outstanding commands to complete.
9915  *
9916  * Input:
9917  *	ha = adapter state pointer.
9918  *
9919  * Returns:
9920  *	index - the index for ql_srb into outstanding_cmds.
9921  *
9922  * Context:
9923  *	Kernel context.
9924  */
9925 static uint16_t
9926 ql_wait_outstanding(ql_adapter_state_t *ha)
9927 {
9928 	ql_srb_t	*sp;
9929 	uint16_t	index, count;
9930 
9931 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9932 
9933 	count = 3000;
9934 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9935 		if (ha->pha->pending_cmds.first != NULL) {
9936 			ql_start_iocb(ha, NULL);
9937 			index = 1;
9938 		}
9939 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
9940 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
9941 			if (count-- != 0) {
9942 				ql_delay(ha, 10000);
9943 				index = 0;
9944 			} else {
9945 				EL(ha, "failed, sp=%ph\n", (void *)sp);
9946 				break;
9947 			}
9948 		}
9949 	}
9950 
9951 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9952 
9953 	return (index);
9954 }
9955 
9956 /*
9957  * ql_restart_queues
9958  *	Restart device queues.
9959  *
9960  * Input:
9961  *	ha = adapter state pointer.
9962  *	DEVICE_QUEUE_LOCK must be released.
9963  *
9964  * Context:
9965  *	Interrupt or Kernel context, no mailbox commands allowed.
9966  */
9967 static void
9968 ql_restart_queues(ql_adapter_state_t *ha)
9969 {
9970 	ql_link_t		*link, *link2;
9971 	ql_tgt_t		*tq;
9972 	ql_lun_t		*lq;
9973 	uint16_t		index;
9974 	ql_adapter_state_t	*vha;
9975 
9976 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9977 
9978 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
9979 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9980 			for (link = vha->dev[index].first; link != NULL;
9981 			    link = link->next) {
9982 				tq = link->base_address;
9983 
9984 				/* Acquire device queue lock. */
9985 				DEVICE_QUEUE_LOCK(tq);
9986 
9987 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
9988 
9989 				for (link2 = tq->lun_queues.first;
9990 				    link2 != NULL; link2 = link2->next) {
9991 					lq = link2->base_address;
9992 
9993 					if (lq->cmd.first != NULL) {
9994 						ql_next(vha, lq);
9995 						DEVICE_QUEUE_LOCK(tq);
9996 					}
9997 				}
9998 
9999 				/* Release device queue lock. */
10000 				DEVICE_QUEUE_UNLOCK(tq);
10001 			}
10002 		}
10003 	}
10004 
10005 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10006 }
10007 
10008 /*
10009  * ql_iidma
10010  *	Setup iiDMA parameters to firmware
10011  *
10012  * Input:
10013  *	ha = adapter state pointer.
10014  *	DEVICE_QUEUE_LOCK must be released.
10015  *
10016  * Context:
10017  *	Interrupt or Kernel context, no mailbox commands allowed.
10018  */
10019 static void
10020 ql_iidma(ql_adapter_state_t *ha)
10021 {
10022 	ql_link_t	*link;
10023 	ql_tgt_t	*tq;
10024 	uint16_t	index;
10025 	char		buf[256];
10026 	uint32_t	data;
10027 
10028 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10029 
10030 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10031 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10032 		return;
10033 	}
10034 
10035 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10036 		for (link = ha->dev[index].first; link != NULL;
10037 		    link = link->next) {
10038 			tq = link->base_address;
10039 
10040 			/* Acquire device queue lock. */
10041 			DEVICE_QUEUE_LOCK(tq);
10042 
10043 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10044 				DEVICE_QUEUE_UNLOCK(tq);
10045 				continue;
10046 			}
10047 
10048 			tq->flags &= ~TQF_IIDMA_NEEDED;
10049 
10050 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10051 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10052 				DEVICE_QUEUE_UNLOCK(tq);
10053 				continue;
10054 			}
10055 
10056 			/* Get the iiDMA persistent data */
10057 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10058 				(void) sprintf(buf,
10059 				    "iidma-rate-%02x%02x%02x%02x%02x"
10060 				    "%02x%02x%02x", tq->port_name[0],
10061 				    tq->port_name[1], tq->port_name[2],
10062 				    tq->port_name[3], tq->port_name[4],
10063 				    tq->port_name[5], tq->port_name[6],
10064 				    tq->port_name[7]);
10065 
10066 				if ((data = ql_get_prop(ha, buf)) ==
10067 				    0xffffffff) {
10068 					tq->iidma_rate = IIDMA_RATE_NDEF;
10069 				} else {
10070 					switch (data) {
10071 					case IIDMA_RATE_1GB:
10072 					case IIDMA_RATE_2GB:
10073 					case IIDMA_RATE_4GB:
10074 					case IIDMA_RATE_10GB:
10075 						tq->iidma_rate = data;
10076 						break;
10077 					case IIDMA_RATE_8GB:
10078 						if (CFG_IST(ha,
10079 						    CFG_CTRL_25XX)) {
10080 							tq->iidma_rate = data;
10081 						} else {
10082 							tq->iidma_rate =
10083 							    IIDMA_RATE_4GB;
10084 						}
10085 						break;
10086 					default:
10087 						EL(ha, "invalid data for "
10088 						    "parameter: %s: %xh\n",
10089 						    buf, data);
10090 						tq->iidma_rate =
10091 						    IIDMA_RATE_NDEF;
10092 						break;
10093 					}
10094 				}
10095 			}
10096 
10097 			/* Set the firmware's iiDMA rate */
10098 			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10099 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
10100 				data = ql_iidma_rate(ha, tq->loop_id,
10101 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10102 				if (data != QL_SUCCESS) {
10103 					EL(ha, "mbx failed: %xh\n", data);
10104 				}
10105 			}
10106 
10107 			/* Release device queue lock. */
10108 			DEVICE_QUEUE_UNLOCK(tq);
10109 		}
10110 	}
10111 
10112 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10113 }
10114 
10115 /*
10116  * ql_abort_queues
10117  *	Abort all commands on device queues.
10118  *
10119  * Input:
10120  *	ha = adapter state pointer.
10121  *
10122  * Context:
10123  *	Interrupt or Kernel context, no mailbox commands allowed.
10124  */
10125 static void
10126 ql_abort_queues(ql_adapter_state_t *ha)
10127 {
10128 	ql_link_t		*link;
10129 	ql_tgt_t		*tq;
10130 	ql_srb_t		*sp;
10131 	uint16_t		index;
10132 	ql_adapter_state_t	*vha;
10133 
10134 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10135 
10136 	/* Return all commands in outstanding command list. */
10137 	INTR_LOCK(ha);
10138 
10139 	/* Place all commands in outstanding cmd list on device queue. */
10140 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10141 		if (ha->pending_cmds.first != NULL) {
10142 			INTR_UNLOCK(ha);
10143 			ql_start_iocb(ha, NULL);
10144 			/* Delay for system */
10145 			ql_delay(ha, 10000);
10146 			INTR_LOCK(ha);
10147 			index = 1;
10148 		}
10149 		sp = ha->outstanding_cmds[index];
10150 
10151 		/* skip devices capable of FCP2 retrys */
10152 		if ((sp != NULL) &&
10153 		    ((tq = sp->lun_queue->target_queue) != NULL) &&
10154 		    (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10155 			ha->outstanding_cmds[index] = NULL;
10156 			sp->handle = 0;
10157 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10158 
10159 			INTR_UNLOCK(ha);
10160 
10161 			/* Set ending status. */
10162 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10163 			sp->flags |= SRB_ISP_COMPLETED;
10164 
10165 			/* Call done routine to handle completions. */
10166 			sp->cmd.next = NULL;
10167 			ql_done(&sp->cmd);
10168 
10169 			INTR_LOCK(ha);
10170 		}
10171 	}
10172 	INTR_UNLOCK(ha);
10173 
10174 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10175 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10176 		    vha->instance, vha->vp_index);
10177 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10178 			for (link = vha->dev[index].first; link != NULL;
10179 			    link = link->next) {
10180 				tq = link->base_address;
10181 				/* skip devices capable of FCP2 retrys */
10182 				if (!(tq->prli_svc_param_word_3 &
10183 				    PRLI_W3_RETRY)) {
10184 					/*
10185 					 * Set port unavailable status and
10186 					 * return all commands on a devices
10187 					 * queues.
10188 					 */
10189 					ql_abort_device_queues(ha, tq);
10190 				}
10191 			}
10192 		}
10193 	}
10194 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10195 }
10196 
10197 /*
10198  * ql_abort_device_queues
10199  *	Abort all commands on device queues.
10200  *
10201  * Input:
10202  *	ha = adapter state pointer.
10203  *
10204  * Context:
10205  *	Interrupt or Kernel context, no mailbox commands allowed.
10206  */
10207 static void
10208 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10209 {
10210 	ql_link_t	*lun_link, *cmd_link;
10211 	ql_srb_t	*sp;
10212 	ql_lun_t	*lq;
10213 
10214 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10215 
10216 	DEVICE_QUEUE_LOCK(tq);
10217 
10218 	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10219 	    lun_link = lun_link->next) {
10220 		lq = lun_link->base_address;
10221 
10222 		cmd_link = lq->cmd.first;
10223 		while (cmd_link != NULL) {
10224 			sp = cmd_link->base_address;
10225 
10226 			if (sp->flags & SRB_ABORT) {
10227 				cmd_link = cmd_link->next;
10228 				continue;
10229 			}
10230 
10231 			/* Remove srb from device cmd queue. */
10232 			ql_remove_link(&lq->cmd, &sp->cmd);
10233 
10234 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10235 
10236 			DEVICE_QUEUE_UNLOCK(tq);
10237 
10238 			/* Set ending status. */
10239 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10240 
10241 			/* Call done routine to handle completion. */
10242 			ql_done(&sp->cmd);
10243 
10244 			/* Delay for system */
10245 			ql_delay(ha, 10000);
10246 
10247 			DEVICE_QUEUE_LOCK(tq);
10248 			cmd_link = lq->cmd.first;
10249 		}
10250 	}
10251 	DEVICE_QUEUE_UNLOCK(tq);
10252 
10253 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10254 }
10255 
10256 /*
10257  * ql_loop_resync
10258  *	Resync with fibre channel devices.
10259  *
10260  * Input:
10261  *	ha = adapter state pointer.
10262  *	DEVICE_QUEUE_LOCK must be released.
10263  *
10264  * Returns:
10265  *	ql local function return status code.
10266  *
10267  * Context:
10268  *	Kernel context.
10269  */
10270 static int
10271 ql_loop_resync(ql_adapter_state_t *ha)
10272 {
10273 	int rval;
10274 
10275 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10276 
10277 	if (ha->flags & IP_INITIALIZED) {
10278 		(void) ql_shutdown_ip(ha);
10279 	}
10280 
10281 	rval = ql_fw_ready(ha, 10);
10282 
10283 	TASK_DAEMON_LOCK(ha);
10284 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10285 	TASK_DAEMON_UNLOCK(ha);
10286 
10287 	/* Set loop online, if it really is. */
10288 	if (rval == QL_SUCCESS) {
10289 		ql_loop_online(ha);
10290 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10291 	} else {
10292 		EL(ha, "failed, rval = %xh\n", rval);
10293 	}
10294 
10295 	return (rval);
10296 }
10297 
10298 /*
10299  * ql_loop_online
10300  *	Set loop online status if it really is online.
10301  *
10302  * Input:
10303  *	ha = adapter state pointer.
10304  *	DEVICE_QUEUE_LOCK must be released.
10305  *
10306  * Context:
10307  *	Kernel context.
10308  */
10309 void
10310 ql_loop_online(ql_adapter_state_t *ha)
10311 {
10312 	ql_adapter_state_t	*vha;
10313 
10314 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10315 
10316 	/* Inform the FC Transport that the hardware is online. */
10317 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10318 		if (!(vha->task_daemon_flags &
10319 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10320 			/* Restart IP if it was shutdown. */
10321 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10322 			    !(vha->flags & IP_INITIALIZED)) {
10323 				(void) ql_initialize_ip(vha);
10324 				ql_isp_rcvbuf(vha);
10325 			}
10326 
10327 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10328 			    FC_PORT_STATE_MASK(vha->state) !=
10329 			    FC_STATE_ONLINE) {
10330 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10331 				if (vha->topology & QL_LOOP_CONNECTION) {
10332 					vha->state |= FC_STATE_LOOP;
10333 				} else {
10334 					vha->state |= FC_STATE_ONLINE;
10335 				}
10336 				TASK_DAEMON_LOCK(ha);
10337 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10338 				TASK_DAEMON_UNLOCK(ha);
10339 			}
10340 		}
10341 	}
10342 
10343 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10344 
10345 	/* Restart device queues that may have been stopped. */
10346 	ql_restart_queues(ha);
10347 
10348 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10349 }
10350 
10351 /*
10352  * ql_fca_handle_to_state
10353  *	Verifies handle to be correct.
10354  *
10355  * Input:
10356  *	fca_handle = pointer to state structure.
10357  *
10358  * Returns:
10359  *	NULL = failure
10360  *
10361  * Context:
10362  *	Kernel context.
10363  */
10364 static ql_adapter_state_t *
10365 ql_fca_handle_to_state(opaque_t fca_handle)
10366 {
10367 #ifdef	QL_DEBUG_ROUTINES
10368 	ql_link_t		*link;
10369 	ql_adapter_state_t	*ha = NULL;
10370 	ql_adapter_state_t	*vha = NULL;
10371 
10372 	for (link = ql_hba.first; link != NULL; link = link->next) {
10373 		ha = link->base_address;
10374 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10375 			if ((opaque_t)vha == fca_handle) {
10376 				ha = vha;
10377 				break;
10378 			}
10379 		}
10380 		if ((opaque_t)ha == fca_handle) {
10381 			break;
10382 		} else {
10383 			ha = NULL;
10384 		}
10385 	}
10386 
10387 	if (ha == NULL) {
10388 		/*EMPTY*/
10389 		QL_PRINT_2(CE_CONT, "failed\n");
10390 	}
10391 
10392 #endif /* QL_DEBUG_ROUTINES */
10393 
10394 	return ((ql_adapter_state_t *)fca_handle);
10395 }
10396 
10397 /*
10398  * ql_d_id_to_queue
10399  *	Locate device queue that matches destination ID.
10400  *
10401  * Input:
10402  *	ha = adapter state pointer.
10403  *	d_id = destination ID
10404  *
10405  * Returns:
10406  *	NULL = failure
10407  *
10408  * Context:
10409  *	Interrupt or Kernel context, no mailbox commands allowed.
10410  */
10411 ql_tgt_t *
10412 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10413 {
10414 	uint16_t	index;
10415 	ql_tgt_t	*tq;
10416 	ql_link_t	*link;
10417 
10418 	/* Get head queue index. */
10419 	index = ql_alpa_to_index[d_id.b.al_pa];
10420 
10421 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10422 		tq = link->base_address;
10423 		if (tq->d_id.b24 == d_id.b24 &&
10424 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10425 			return (tq);
10426 		}
10427 	}
10428 
10429 	return (NULL);
10430 }
10431 
10432 /*
10433  * ql_loop_id_to_queue
10434  *	Locate device queue that matches loop ID.
10435  *
10436  * Input:
10437  *	ha:		adapter state pointer.
10438  *	loop_id:	destination ID
10439  *
10440  * Returns:
10441  *	NULL = failure
10442  *
10443  * Context:
10444  *	Interrupt or Kernel context, no mailbox commands allowed.
10445  */
10446 ql_tgt_t *
10447 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10448 {
10449 	uint16_t	index;
10450 	ql_tgt_t	*tq;
10451 	ql_link_t	*link;
10452 
10453 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10454 		for (link = ha->dev[index].first; link != NULL;
10455 		    link = link->next) {
10456 			tq = link->base_address;
10457 			if (tq->loop_id == loop_id) {
10458 				return (tq);
10459 			}
10460 		}
10461 	}
10462 
10463 	return (NULL);
10464 }
10465 
10466 /*
10467  * ql_kstat_update
10468  *	Updates kernel statistics.
10469  *
10470  * Input:
10471  *	ksp - driver kernel statistics structure pointer.
10472  *	rw - function to perform
10473  *
10474  * Returns:
10475  *	0 or EACCES
10476  *
10477  * Context:
10478  *	Kernel context.
10479  */
10480 /* ARGSUSED */
10481 static int
10482 ql_kstat_update(kstat_t *ksp, int rw)
10483 {
10484 	int			rval;
10485 
10486 	QL_PRINT_3(CE_CONT, "started\n");
10487 
10488 	if (rw == KSTAT_WRITE) {
10489 		rval = EACCES;
10490 	} else {
10491 		rval = 0;
10492 	}
10493 
10494 	if (rval != 0) {
10495 		/*EMPTY*/
10496 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10497 	} else {
10498 		/*EMPTY*/
10499 		QL_PRINT_3(CE_CONT, "done\n");
10500 	}
10501 	return (rval);
10502 }
10503 
10504 /*
10505  * ql_load_flash
10506  *	Loads flash.
10507  *
10508  * Input:
10509  *	ha:	adapter state pointer.
10510  *	dp:	data pointer.
10511  *	size:	data length.
10512  *
10513  * Returns:
10514  *	ql local function return status code.
10515  *
10516  * Context:
10517  *	Kernel context.
10518  */
10519 int
10520 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10521 {
10522 	uint32_t	cnt;
10523 	int		rval;
10524 	uint32_t	size_to_offset;
10525 	uint32_t	size_to_compare;
10526 	int		erase_all;
10527 
10528 	if (CFG_IST(ha, CFG_CTRL_242581)) {
10529 		return (ql_24xx_load_flash(ha, dp, size, 0));
10530 	}
10531 
10532 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10533 
10534 	size_to_compare = 0x20000;
10535 	size_to_offset = 0;
10536 	erase_all = 0;
10537 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10538 		if (size == 0x80000) {
10539 			/* Request to flash the entire chip. */
10540 			size_to_compare = 0x80000;
10541 			erase_all = 1;
10542 		} else {
10543 			size_to_compare = 0x40000;
10544 			if (ql_flash_sbus_fpga) {
10545 				size_to_offset = 0x40000;
10546 			}
10547 		}
10548 	}
10549 	if (size > size_to_compare) {
10550 		rval = QL_FUNCTION_PARAMETER_ERROR;
10551 		EL(ha, "failed=%xh\n", rval);
10552 		return (rval);
10553 	}
10554 
10555 	GLOBAL_HW_LOCK();
10556 
10557 	/* Enable Flash Read/Write. */
10558 	ql_flash_enable(ha);
10559 
10560 	/* Erase flash prior to write. */
10561 	rval = ql_erase_flash(ha, erase_all);
10562 
10563 	if (rval == QL_SUCCESS) {
10564 		/* Write data to flash. */
10565 		for (cnt = 0; cnt < size; cnt++) {
10566 			/* Allow other system activity. */
10567 			if (cnt % 0x1000 == 0) {
10568 				ql_delay(ha, 10000);
10569 			}
10570 			rval = ql_program_flash_address(ha,
10571 			    cnt + size_to_offset, *dp++);
10572 			if (rval != QL_SUCCESS) {
10573 				break;
10574 			}
10575 		}
10576 	}
10577 
10578 	ql_flash_disable(ha);
10579 
10580 	GLOBAL_HW_UNLOCK();
10581 
10582 	if (rval != QL_SUCCESS) {
10583 		EL(ha, "failed=%xh\n", rval);
10584 	} else {
10585 		/*EMPTY*/
10586 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10587 	}
10588 	return (rval);
10589 }
10590 
10591 /*
10592  * ql_program_flash_address
10593  *	Program flash address.
10594  *
10595  * Input:
10596  *	ha = adapter state pointer.
10597  *	addr = flash byte address.
10598  *	data = data to be written to flash.
10599  *
10600  * Returns:
10601  *	ql local function return status code.
10602  *
10603  * Context:
10604  *	Kernel context.
10605  */
10606 static int
10607 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10608 {
10609 	int rval;
10610 
10611 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10612 
10613 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10614 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10615 		ql_write_flash_byte(ha, addr, data);
10616 	} else {
10617 		/* Write Program Command Sequence */
10618 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10619 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10620 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10621 		ql_write_flash_byte(ha, addr, data);
10622 	}
10623 
10624 	/* Wait for write to complete. */
10625 	rval = ql_poll_flash(ha, addr, data);
10626 
10627 	if (rval != QL_SUCCESS) {
10628 		EL(ha, "failed=%xh\n", rval);
10629 	} else {
10630 		/*EMPTY*/
10631 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10632 	}
10633 	return (rval);
10634 }
10635 
10636 /*
10637  * ql_erase_flash
10638  *	Erases entire flash.
10639  *
10640  * Input:
10641  *	ha = adapter state pointer.
10642  *
10643  * Returns:
10644  *	ql local function return status code.
10645  *
10646  * Context:
10647  *	Kernel context.
10648  */
10649 int
10650 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10651 {
10652 	int		rval;
10653 	uint32_t	erase_delay = 2000000;
10654 	uint32_t	sStartAddr;
10655 	uint32_t	ssize;
10656 	uint32_t	cnt;
10657 	uint8_t		*bfp;
10658 	uint8_t		*tmp;
10659 
10660 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10661 
10662 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10663 
10664 		if (ql_flash_sbus_fpga == 1) {
10665 			ssize = QL_SBUS_FCODE_SIZE;
10666 			sStartAddr = QL_FCODE_OFFSET;
10667 		} else {
10668 			ssize = QL_FPGA_SIZE;
10669 			sStartAddr = QL_FPGA_OFFSET;
10670 		}
10671 
10672 		erase_delay = 20000000;
10673 
10674 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10675 
10676 		/* Save the section of flash we're not updating to buffer */
10677 		tmp = bfp;
10678 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10679 			/* Allow other system activity. */
10680 			if (cnt % 0x1000 == 0) {
10681 				ql_delay(ha, 10000);
10682 			}
10683 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10684 		}
10685 	}
10686 
10687 	/* Chip Erase Command Sequence */
10688 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10689 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10690 	ql_write_flash_byte(ha, 0x5555, 0x80);
10691 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10692 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10693 	ql_write_flash_byte(ha, 0x5555, 0x10);
10694 
10695 	ql_delay(ha, erase_delay);
10696 
10697 	/* Wait for erase to complete. */
10698 	rval = ql_poll_flash(ha, 0, 0x80);
10699 
10700 	if (rval != QL_SUCCESS) {
10701 		EL(ha, "failed=%xh\n", rval);
10702 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10703 			kmem_free(bfp, ssize);
10704 		}
10705 		return (rval);
10706 	}
10707 
10708 	/* restore the section we saved in the buffer */
10709 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10710 		/* Restore the section we saved off */
10711 		tmp = bfp;
10712 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10713 			/* Allow other system activity. */
10714 			if (cnt % 0x1000 == 0) {
10715 				ql_delay(ha, 10000);
10716 			}
10717 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10718 			if (rval != QL_SUCCESS) {
10719 				break;
10720 			}
10721 		}
10722 
10723 		kmem_free(bfp, ssize);
10724 	}
10725 
10726 	if (rval != QL_SUCCESS) {
10727 		EL(ha, "failed=%xh\n", rval);
10728 	} else {
10729 		/*EMPTY*/
10730 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10731 	}
10732 	return (rval);
10733 }
10734 
10735 /*
10736  * ql_poll_flash
10737  *	Polls flash for completion.
10738  *
10739  * Input:
10740  *	ha = adapter state pointer.
10741  *	addr = flash byte address.
10742  *	data = data to be polled.
10743  *
10744  * Returns:
10745  *	ql local function return status code.
10746  *
10747  * Context:
10748  *	Kernel context.
10749  */
10750 int
10751 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10752 {
10753 	uint8_t		flash_data;
10754 	uint32_t	cnt;
10755 	int		rval = QL_FUNCTION_FAILED;
10756 
10757 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10758 
10759 	poll_data = (uint8_t)(poll_data & BIT_7);
10760 
10761 	/* Wait for 30 seconds for command to finish. */
10762 	for (cnt = 30000000; cnt; cnt--) {
10763 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10764 
10765 		if ((flash_data & BIT_7) == poll_data) {
10766 			rval = QL_SUCCESS;
10767 			break;
10768 		}
10769 		if (flash_data & BIT_5 && cnt > 2) {
10770 			cnt = 2;
10771 		}
10772 		drv_usecwait(1);
10773 	}
10774 
10775 	if (rval != QL_SUCCESS) {
10776 		EL(ha, "failed=%xh\n", rval);
10777 	} else {
10778 		/*EMPTY*/
10779 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10780 	}
10781 	return (rval);
10782 }
10783 
10784 /*
10785  * ql_flash_enable
10786  *	Setup flash for reading/writing.
10787  *
10788  * Input:
10789  *	ha = adapter state pointer.
10790  *
10791  * Context:
10792  *	Kernel context.
10793  */
10794 void
10795 ql_flash_enable(ql_adapter_state_t *ha)
10796 {
10797 	uint16_t	data;
10798 
10799 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10800 
10801 	/* Enable Flash Read/Write. */
10802 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10803 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10804 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10805 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10806 		ddi_put16(ha->sbus_fpga_dev_handle,
10807 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10808 		/* Read reset command sequence */
10809 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10810 		ql_write_flash_byte(ha, 0x555, 0x55);
10811 		ql_write_flash_byte(ha, 0xaaa, 0x20);
10812 		ql_write_flash_byte(ha, 0x555, 0xf0);
10813 	} else {
10814 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10815 		    ISP_FLASH_ENABLE);
10816 		WRT16_IO_REG(ha, ctrl_status, data);
10817 
10818 		/* Read/Reset Command Sequence */
10819 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10820 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10821 		ql_write_flash_byte(ha, 0x5555, 0xf0);
10822 	}
10823 	(void) ql_read_flash_byte(ha, 0);
10824 
10825 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10826 }
10827 
10828 /*
10829  * ql_flash_disable
10830  *	Disable flash and allow RISC to run.
10831  *
10832  * Input:
10833  *	ha = adapter state pointer.
10834  *
10835  * Context:
10836  *	Kernel context.
10837  */
10838 void
10839 ql_flash_disable(ql_adapter_state_t *ha)
10840 {
10841 	uint16_t	data;
10842 
10843 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10844 
10845 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10846 		/*
10847 		 * Lock the flash back up.
10848 		 */
10849 		ql_write_flash_byte(ha, 0x555, 0x90);
10850 		ql_write_flash_byte(ha, 0x555, 0x0);
10851 
10852 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10853 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10854 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10855 		ddi_put16(ha->sbus_fpga_dev_handle,
10856 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10857 	} else {
10858 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10859 		    ~ISP_FLASH_ENABLE);
10860 		WRT16_IO_REG(ha, ctrl_status, data);
10861 	}
10862 
10863 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10864 }
10865 
10866 /*
10867  * ql_write_flash_byte
10868  *	Write byte to flash.
10869  *
10870  * Input:
10871  *	ha = adapter state pointer.
10872  *	addr = flash byte address.
10873  *	data = data to be written.
10874  *
10875  * Context:
10876  *	Kernel context.
10877  */
10878 void
10879 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10880 {
10881 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10882 		ddi_put16(ha->sbus_fpga_dev_handle,
10883 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10884 		    LSW(addr));
10885 		ddi_put16(ha->sbus_fpga_dev_handle,
10886 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10887 		    MSW(addr));
10888 		ddi_put16(ha->sbus_fpga_dev_handle,
10889 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10890 		    (uint16_t)data);
10891 	} else {
10892 		uint16_t bank_select;
10893 
10894 		/* Setup bit 16 of flash address. */
10895 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10896 
10897 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10898 			bank_select = (uint16_t)(bank_select & ~0xf0);
10899 			bank_select = (uint16_t)(bank_select |
10900 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10901 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10902 		} else {
10903 			if (addr & BIT_16 && !(bank_select &
10904 			    ISP_FLASH_64K_BANK)) {
10905 				bank_select = (uint16_t)(bank_select |
10906 				    ISP_FLASH_64K_BANK);
10907 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10908 			} else if (!(addr & BIT_16) && bank_select &
10909 			    ISP_FLASH_64K_BANK) {
10910 				bank_select = (uint16_t)(bank_select &
10911 				    ~ISP_FLASH_64K_BANK);
10912 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10913 			}
10914 		}
10915 
10916 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10917 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10918 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10919 		} else {
10920 			WRT16_IOMAP_REG(ha, flash_address, addr);
10921 			WRT16_IOMAP_REG(ha, flash_data, data);
10922 		}
10923 	}
10924 }
10925 
10926 /*
10927  * ql_read_flash_byte
10928  *	Reads byte from flash, but must read a word from chip.
10929  *
10930  * Input:
10931  *	ha = adapter state pointer.
10932  *	addr = flash byte address.
10933  *
10934  * Returns:
10935  *	byte from flash.
10936  *
10937  * Context:
10938  *	Kernel context.
10939  */
10940 uint8_t
10941 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
10942 {
10943 	uint8_t	data;
10944 
10945 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10946 		ddi_put16(ha->sbus_fpga_dev_handle,
10947 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10948 		    LSW(addr));
10949 		ddi_put16(ha->sbus_fpga_dev_handle,
10950 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10951 		    MSW(addr));
10952 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
10953 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
10954 	} else {
10955 		uint16_t	bank_select;
10956 
10957 		/* Setup bit 16 of flash address. */
10958 		bank_select = RD16_IO_REG(ha, ctrl_status);
10959 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10960 			bank_select = (uint16_t)(bank_select & ~0xf0);
10961 			bank_select = (uint16_t)(bank_select |
10962 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10963 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10964 		} else {
10965 			if (addr & BIT_16 &&
10966 			    !(bank_select & ISP_FLASH_64K_BANK)) {
10967 				bank_select = (uint16_t)(bank_select |
10968 				    ISP_FLASH_64K_BANK);
10969 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10970 			} else if (!(addr & BIT_16) &&
10971 			    bank_select & ISP_FLASH_64K_BANK) {
10972 				bank_select = (uint16_t)(bank_select &
10973 				    ~ISP_FLASH_64K_BANK);
10974 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10975 			}
10976 		}
10977 
10978 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10979 			WRT16_IO_REG(ha, flash_address, addr);
10980 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
10981 		} else {
10982 			WRT16_IOMAP_REG(ha, flash_address, addr);
10983 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
10984 		}
10985 	}
10986 
10987 	return (data);
10988 }
10989 
10990 /*
10991  * ql_24xx_flash_id
10992  *	Get flash IDs.
10993  *
10994  * Input:
10995  *	ha:		adapter state pointer.
10996  *
10997  * Returns:
10998  *	ql local function return status code.
10999  *
11000  * Context:
11001  *	Kernel context.
11002  */
11003 int
11004 ql_24xx_flash_id(ql_adapter_state_t *vha)
11005 {
11006 	int			rval;
11007 	uint32_t		fdata = 0;
11008 	ql_adapter_state_t	*ha = vha->pha;
11009 	ql_xioctl_t		*xp = ha->xioctl;
11010 
11011 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11012 
11013 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11014 
11015 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11016 		fdata = 0;
11017 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11018 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11019 	}
11020 
11021 	if (rval != QL_SUCCESS) {
11022 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11023 	} else if (fdata != 0) {
11024 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11025 		xp->fdesc.flash_id = MSB(LSW(fdata));
11026 		xp->fdesc.flash_len = LSB(MSW(fdata));
11027 	} else {
11028 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11029 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11030 		xp->fdesc.flash_len = 0;
11031 	}
11032 
11033 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11034 
11035 	return (rval);
11036 }
11037 
11038 /*
11039  * ql_24xx_load_flash
11040  *	Loads flash.
11041  *
11042  * Input:
11043  *	ha = adapter state pointer.
11044  *	dp = data pointer.
11045  *	size = data length in bytes.
11046  *	faddr = 32bit word flash byte address.
11047  *
11048  * Returns:
11049  *	ql local function return status code.
11050  *
11051  * Context:
11052  *	Kernel context.
11053  */
11054 int
11055 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11056     uint32_t faddr)
11057 {
11058 	int			rval;
11059 	uint32_t		cnt, rest_addr, fdata, wc;
11060 	dma_mem_t		dmabuf = {0};
11061 	ql_adapter_state_t	*ha = vha->pha;
11062 	ql_xioctl_t		*xp = ha->xioctl;
11063 
11064 	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11065 	    ha->instance, faddr, size);
11066 
11067 	/* start address must be 32 bit word aligned */
11068 	if ((faddr & 0x3) != 0) {
11069 		EL(ha, "incorrect buffer size alignment\n");
11070 		return (QL_FUNCTION_PARAMETER_ERROR);
11071 	}
11072 
11073 	/* Allocate DMA buffer */
11074 	if (CFG_IST(ha, CFG_CTRL_2581)) {
11075 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11076 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11077 		    QL_SUCCESS) {
11078 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11079 			return (rval);
11080 		}
11081 	}
11082 
11083 	GLOBAL_HW_LOCK();
11084 
11085 	/* Enable flash write */
11086 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11087 		GLOBAL_HW_UNLOCK();
11088 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11089 		ql_free_phys(ha, &dmabuf);
11090 		return (rval);
11091 	}
11092 
11093 	/* setup mask of address range within a sector */
11094 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11095 
11096 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11097 
11098 	/*
11099 	 * Write data to flash.
11100 	 */
11101 	cnt = 0;
11102 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11103 
11104 	while (cnt < size) {
11105 		/* Beginning of a sector? */
11106 		if ((faddr & rest_addr) == 0) {
11107 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
11108 				fdata = ha->flash_data_addr | faddr;
11109 				rval = ql_flash_access(ha,
11110 				    FAC_ERASE_SECTOR, fdata, fdata +
11111 				    rest_addr, 0);
11112 				if (rval != QL_SUCCESS) {
11113 					EL(ha, "erase sector status="
11114 					    "%xh, start=%xh, end=%xh"
11115 					    "\n", rval, fdata,
11116 					    fdata + rest_addr);
11117 					break;
11118 				}
11119 			} else {
11120 				fdata = (faddr & ~rest_addr) << 2;
11121 				fdata = (fdata & 0xff00) |
11122 				    (fdata << 16 & 0xff0000) |
11123 				    (fdata >> 16 & 0xff);
11124 
11125 				if (rest_addr == 0x1fff) {
11126 					/* 32kb sector block erase */
11127 					rval = ql_24xx_write_flash(ha,
11128 					    FLASH_CONF_ADDR | 0x0352,
11129 					    fdata);
11130 				} else {
11131 					/* 64kb sector block erase */
11132 					rval = ql_24xx_write_flash(ha,
11133 					    FLASH_CONF_ADDR | 0x03d8,
11134 					    fdata);
11135 				}
11136 				if (rval != QL_SUCCESS) {
11137 					EL(ha, "Unable to flash sector"
11138 					    ": address=%xh\n", faddr);
11139 					break;
11140 				}
11141 			}
11142 		}
11143 
11144 		/* Write data */
11145 		if (CFG_IST(ha, CFG_CTRL_2581) &&
11146 		    ((faddr & 0x3f) == 0)) {
11147 			/*
11148 			 * Limit write up to sector boundary.
11149 			 */
11150 			wc = ((~faddr & (rest_addr>>1)) + 1);
11151 
11152 			if (size - cnt < wc) {
11153 				wc = size - cnt;
11154 			}
11155 
11156 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11157 			    (uint8_t *)dmabuf.bp, wc<<2,
11158 			    DDI_DEV_AUTOINCR);
11159 
11160 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11161 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11162 			if (rval != QL_SUCCESS) {
11163 				EL(ha, "unable to dma to flash "
11164 				    "address=%xh\n", faddr << 2);
11165 				break;
11166 			}
11167 
11168 			cnt += wc;
11169 			faddr += wc;
11170 			dp += wc << 2;
11171 		} else {
11172 			fdata = *dp++;
11173 			fdata |= *dp++ << 8;
11174 			fdata |= *dp++ << 16;
11175 			fdata |= *dp++ << 24;
11176 			rval = ql_24xx_write_flash(ha,
11177 			    ha->flash_data_addr | faddr, fdata);
11178 			if (rval != QL_SUCCESS) {
11179 				EL(ha, "Unable to program flash "
11180 				    "address=%xh data=%xh\n", faddr,
11181 				    *dp);
11182 				break;
11183 			}
11184 			cnt++;
11185 			faddr++;
11186 
11187 			/* Allow other system activity. */
11188 			if (cnt % 0x1000 == 0) {
11189 				ql_delay(ha, 10000);
11190 			}
11191 		}
11192 	}
11193 
11194 	ql_24xx_protect_flash(ha);
11195 
11196 	ql_free_phys(ha, &dmabuf);
11197 
11198 	GLOBAL_HW_UNLOCK();
11199 
11200 	if (rval != QL_SUCCESS) {
11201 		EL(ha, "failed=%xh\n", rval);
11202 	} else {
11203 		/*EMPTY*/
11204 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11205 	}
11206 	return (rval);
11207 }
11208 
11209 /*
11210  * ql_24xx_read_flash
11211  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11212  *
11213  * Input:
11214  *	ha:	adapter state pointer.
11215  *	faddr:	NVRAM/FLASH address.
11216  *	bp:	data pointer.
11217  *
11218  * Returns:
11219  *	ql local function return status code.
11220  *
11221  * Context:
11222  *	Kernel context.
11223  */
11224 int
11225 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11226 {
11227 	uint32_t		timer;
11228 	int			rval = QL_SUCCESS;
11229 	ql_adapter_state_t	*ha = vha->pha;
11230 
11231 	/* Clear access error flag */
11232 	WRT32_IO_REG(ha, ctrl_status,
11233 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11234 
11235 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11236 
11237 	/* Wait for READ cycle to complete. */
11238 	for (timer = 300000; timer; timer--) {
11239 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11240 			break;
11241 		}
11242 		drv_usecwait(10);
11243 	}
11244 
11245 	if (timer == 0) {
11246 		EL(ha, "failed, timeout\n");
11247 		rval = QL_FUNCTION_TIMEOUT;
11248 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11249 		EL(ha, "failed, access error\n");
11250 		rval = QL_FUNCTION_FAILED;
11251 	}
11252 
11253 	*bp = RD32_IO_REG(ha, flash_data);
11254 
11255 	return (rval);
11256 }
11257 
11258 /*
11259  * ql_24xx_write_flash
11260  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11261  *
11262  * Input:
11263  *	ha:	adapter state pointer.
11264  *	addr:	NVRAM/FLASH address.
11265  *	value:	data.
11266  *
11267  * Returns:
11268  *	ql local function return status code.
11269  *
11270  * Context:
11271  *	Kernel context.
11272  */
11273 int
11274 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11275 {
11276 	uint32_t		timer, fdata;
11277 	int			rval = QL_SUCCESS;
11278 	ql_adapter_state_t	*ha = vha->pha;
11279 
11280 	/* Clear access error flag */
11281 	WRT32_IO_REG(ha, ctrl_status,
11282 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11283 
11284 	WRT32_IO_REG(ha, flash_data, data);
11285 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11286 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11287 
11288 	/* Wait for Write cycle to complete. */
11289 	for (timer = 3000000; timer; timer--) {
11290 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11291 			/* Check flash write in progress. */
11292 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11293 				(void) ql_24xx_read_flash(ha,
11294 				    FLASH_CONF_ADDR | 0x005, &fdata);
11295 				if (!(fdata & BIT_0)) {
11296 					break;
11297 				}
11298 			} else {
11299 				break;
11300 			}
11301 		}
11302 		drv_usecwait(10);
11303 	}
11304 	if (timer == 0) {
11305 		EL(ha, "failed, timeout\n");
11306 		rval = QL_FUNCTION_TIMEOUT;
11307 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11308 		EL(ha, "access error\n");
11309 		rval = QL_FUNCTION_FAILED;
11310 	}
11311 
11312 	return (rval);
11313 }
11314 /*
11315  * ql_24xx_unprotect_flash
11316  *	Enable writes
11317  *
11318  * Input:
11319  *	ha:	adapter state pointer.
11320  *
11321  * Returns:
11322  *	ql local function return status code.
11323  *
11324  * Context:
11325  *	Kernel context.
11326  */
11327 int
11328 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11329 {
11330 	int			rval;
11331 	uint32_t		fdata;
11332 	ql_adapter_state_t	*ha = vha->pha;
11333 	ql_xioctl_t		*xp = ha->xioctl;
11334 
11335 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11336 
11337 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11338 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11339 			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11340 			    0)) != QL_SUCCESS) {
11341 				EL(ha, "status=%xh\n", rval);
11342 			}
11343 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11344 			    ha->instance);
11345 			return (rval);
11346 		}
11347 	} else {
11348 		/* Enable flash write. */
11349 		WRT32_IO_REG(ha, ctrl_status,
11350 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11351 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11352 	}
11353 
11354 	/*
11355 	 * Remove block write protection (SST and ST) and
11356 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11357 	 * Unprotect sectors.
11358 	 */
11359 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11360 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11361 
11362 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11363 		for (fdata = 0; fdata < 0x10; fdata++) {
11364 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11365 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11366 		}
11367 
11368 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11369 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11370 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11371 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11372 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11373 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11374 	}
11375 
11376 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11377 
11378 	return (QL_SUCCESS);
11379 }
11380 
11381 /*
11382  * ql_24xx_protect_flash
11383  *	Disable writes
11384  *
11385  * Input:
11386  *	ha:	adapter state pointer.
11387  *
11388  * Context:
11389  *	Kernel context.
11390  */
11391 void
11392 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11393 {
11394 	int			rval;
11395 	uint32_t		fdata;
11396 	ql_adapter_state_t	*ha = vha->pha;
11397 	ql_xioctl_t		*xp = ha->xioctl;
11398 
11399 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11400 
11401 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11402 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11403 			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11404 			    0)) != QL_SUCCESS) {
11405 				EL(ha, "status=%xh\n", rval);
11406 			}
11407 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11408 			    ha->instance);
11409 			return;
11410 		}
11411 	} else {
11412 		/* Enable flash write. */
11413 		WRT32_IO_REG(ha, ctrl_status,
11414 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11415 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11416 	}
11417 
11418 	/*
11419 	 * Protect sectors.
11420 	 * Set block write protection (SST and ST) and
11421 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11422 	 */
11423 	if (xp->fdesc.protect_sector_cmd != 0) {
11424 		for (fdata = 0; fdata < 0x10; fdata++) {
11425 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11426 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11427 		}
11428 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11429 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11430 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11431 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11432 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11433 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11434 
11435 		/* TODO: ??? */
11436 		(void) ql_24xx_write_flash(ha,
11437 		    FLASH_CONF_ADDR | 0x101, 0x80);
11438 	} else {
11439 		(void) ql_24xx_write_flash(ha,
11440 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11441 	}
11442 
11443 	/* Disable flash write. */
11444 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11445 		WRT32_IO_REG(ha, ctrl_status,
11446 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11447 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11448 	}
11449 
11450 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11451 }
11452 
11453 /*
11454  * ql_dump_firmware
11455  *	Save RISC code state information.
11456  *
11457  * Input:
11458  *	ha = adapter state pointer.
11459  *
11460  * Returns:
11461  *	QL local function return status code.
11462  *
11463  * Context:
11464  *	Kernel context.
11465  */
11466 static int
11467 ql_dump_firmware(ql_adapter_state_t *vha)
11468 {
11469 	int			rval;
11470 	clock_t			timer;
11471 	ql_adapter_state_t	*ha = vha->pha;
11472 
11473 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11474 
11475 	QL_DUMP_LOCK(ha);
11476 
11477 	if (ha->ql_dump_state & QL_DUMPING ||
11478 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11479 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11480 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11481 		QL_DUMP_UNLOCK(ha);
11482 		return (QL_SUCCESS);
11483 	}
11484 
11485 	QL_DUMP_UNLOCK(ha);
11486 
11487 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11488 
11489 	/*
11490 	 * Wait for all outstanding commands to complete
11491 	 */
11492 	(void) ql_wait_outstanding(ha);
11493 
11494 	/* Dump firmware. */
11495 	rval = ql_binary_fw_dump(ha, TRUE);
11496 
11497 	/* Do abort to force restart. */
11498 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11499 	EL(ha, "restarting, isp_abort_needed\n");
11500 
11501 	/* Acquire task daemon lock. */
11502 	TASK_DAEMON_LOCK(ha);
11503 
11504 	/* Wait for suspension to end. */
11505 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11506 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11507 
11508 		/* 30 seconds from now */
11509 		timer = ddi_get_lbolt();
11510 		timer += drv_usectohz(30000000);
11511 
11512 		if (cv_timedwait(&ha->cv_dr_suspended,
11513 		    &ha->task_daemon_mutex, timer) == -1) {
11514 			/*
11515 			 * The timeout time 'timer' was
11516 			 * reached without the condition
11517 			 * being signaled.
11518 			 */
11519 			break;
11520 		}
11521 	}
11522 
11523 	/* Release task daemon lock. */
11524 	TASK_DAEMON_UNLOCK(ha);
11525 
11526 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11527 		/*EMPTY*/
11528 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11529 	} else {
11530 		EL(ha, "failed, rval = %xh\n", rval);
11531 	}
11532 	return (rval);
11533 }
11534 
11535 /*
11536  * ql_binary_fw_dump
11537  *	Dumps binary data from firmware.
11538  *
11539  * Input:
11540  *	ha = adapter state pointer.
11541  *	lock_needed = mailbox lock needed.
11542  *
11543  * Returns:
11544  *	ql local function return status code.
11545  *
11546  * Context:
11547  *	Interrupt or Kernel context, no mailbox commands allowed.
11548  */
11549 int
11550 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11551 {
11552 	clock_t			timer;
11553 	mbx_cmd_t		mc;
11554 	mbx_cmd_t		*mcp = &mc;
11555 	int			rval = QL_SUCCESS;
11556 	ql_adapter_state_t	*ha = vha->pha;
11557 
11558 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11559 
11560 	QL_DUMP_LOCK(ha);
11561 
11562 	if (ha->ql_dump_state & QL_DUMPING ||
11563 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11564 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11565 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11566 		QL_DUMP_UNLOCK(ha);
11567 		return (QL_DATA_EXISTS);
11568 	}
11569 
11570 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11571 	ha->ql_dump_state |= QL_DUMPING;
11572 
11573 	QL_DUMP_UNLOCK(ha);
11574 
11575 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11576 
11577 		/* Insert Time Stamp */
11578 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11579 		    FTO_INSERT_TIME_STAMP);
11580 		if (rval != QL_SUCCESS) {
11581 			EL(ha, "f/w extended trace insert"
11582 			    "time stamp failed: %xh\n", rval);
11583 		}
11584 	}
11585 
11586 	if (lock_needed == TRUE) {
11587 		/* Acquire mailbox register lock. */
11588 		MBX_REGISTER_LOCK(ha);
11589 
11590 		/* Check for mailbox available, if not wait for signal. */
11591 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11592 			ha->mailbox_flags = (uint8_t)
11593 			    (ha->mailbox_flags | MBX_WANT_FLG);
11594 
11595 			/* 30 seconds from now */
11596 			timer = ddi_get_lbolt();
11597 			timer += (ha->mcp->timeout + 2) *
11598 			    drv_usectohz(1000000);
11599 			if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11600 			    timer) == -1) {
11601 				/*
11602 				 * The timeout time 'timer' was
11603 				 * reached without the condition
11604 				 * being signaled.
11605 				 */
11606 
11607 				/* Release mailbox register lock. */
11608 				MBX_REGISTER_UNLOCK(ha);
11609 
11610 				EL(ha, "failed, rval = %xh\n",
11611 				    QL_FUNCTION_TIMEOUT);
11612 				return (QL_FUNCTION_TIMEOUT);
11613 			}
11614 		}
11615 
11616 		/* Set busy flag. */
11617 		ha->mailbox_flags = (uint8_t)
11618 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11619 		mcp->timeout = 120;
11620 		ha->mcp = mcp;
11621 
11622 		/* Release mailbox register lock. */
11623 		MBX_REGISTER_UNLOCK(ha);
11624 	}
11625 
11626 	/* Free previous dump buffer. */
11627 	if (ha->ql_dump_ptr != NULL) {
11628 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11629 		ha->ql_dump_ptr = NULL;
11630 	}
11631 
11632 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11633 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11634 		    ha->fw_ext_memory_size);
11635 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11636 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11637 		    ha->fw_ext_memory_size);
11638 	} else {
11639 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11640 	}
11641 
11642 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11643 	    NULL) {
11644 		rval = QL_MEMORY_ALLOC_FAILED;
11645 	} else {
11646 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11647 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11648 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11649 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11650 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11651 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11652 		} else {
11653 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11654 		}
11655 	}
11656 
11657 	/* Reset ISP chip. */
11658 	ql_reset_chip(ha);
11659 
11660 	QL_DUMP_LOCK(ha);
11661 
11662 	if (rval != QL_SUCCESS) {
11663 		if (ha->ql_dump_ptr != NULL) {
11664 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11665 			ha->ql_dump_ptr = NULL;
11666 		}
11667 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11668 		    QL_DUMP_UPLOADED);
11669 		EL(ha, "failed, rval = %xh\n", rval);
11670 	} else {
11671 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11672 		ha->ql_dump_state |= QL_DUMP_VALID;
11673 		EL(ha, "done\n");
11674 	}
11675 
11676 	QL_DUMP_UNLOCK(ha);
11677 
11678 	return (rval);
11679 }
11680 
11681 /*
11682  * ql_ascii_fw_dump
11683  *	Converts firmware binary dump to ascii.
11684  *
11685  * Input:
11686  *	ha = adapter state pointer.
11687  *	bptr = buffer pointer.
11688  *
11689  * Returns:
11690  *	Amount of data buffer used.
11691  *
11692  * Context:
11693  *	Kernel context.
11694  */
11695 size_t
11696 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11697 {
11698 	uint32_t		cnt;
11699 	caddr_t			bp;
11700 	int			mbox_cnt;
11701 	ql_adapter_state_t	*ha = vha->pha;
11702 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11703 
11704 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11705 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11706 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11707 		return (ql_25xx_ascii_fw_dump(ha, bufp));
11708 	}
11709 
11710 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11711 
11712 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11713 		(void) sprintf(bufp, "\nISP 2300IP ");
11714 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11715 		(void) sprintf(bufp, "\nISP 6322FLX ");
11716 	} else {
11717 		(void) sprintf(bufp, "\nISP 2200IP ");
11718 	}
11719 
11720 	bp = bufp + strlen(bufp);
11721 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11722 	    ha->fw_major_version, ha->fw_minor_version,
11723 	    ha->fw_subminor_version);
11724 
11725 	(void) strcat(bufp, "\nPBIU Registers:");
11726 	bp = bufp + strlen(bufp);
11727 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11728 		if (cnt % 8 == 0) {
11729 			*bp++ = '\n';
11730 		}
11731 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11732 		bp = bp + 6;
11733 	}
11734 
11735 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11736 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11737 		    "registers:");
11738 		bp = bufp + strlen(bufp);
11739 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11740 			if (cnt % 8 == 0) {
11741 				*bp++ = '\n';
11742 			}
11743 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11744 			bp = bp + 6;
11745 		}
11746 	}
11747 
11748 	(void) strcat(bp, "\n\nMailbox Registers:");
11749 	bp = bufp + strlen(bufp);
11750 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11751 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11752 		if (cnt % 8 == 0) {
11753 			*bp++ = '\n';
11754 		}
11755 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11756 		bp = bp + 6;
11757 	}
11758 
11759 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11760 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11761 		bp = bufp + strlen(bufp);
11762 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11763 			if (cnt % 8 == 0) {
11764 				*bp++ = '\n';
11765 			}
11766 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11767 			bp = bp + 6;
11768 		}
11769 	}
11770 
11771 	(void) strcat(bp, "\n\nDMA Registers:");
11772 	bp = bufp + strlen(bufp);
11773 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11774 		if (cnt % 8 == 0) {
11775 			*bp++ = '\n';
11776 		}
11777 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11778 		bp = bp + 6;
11779 	}
11780 
11781 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11782 	bp = bufp + strlen(bufp);
11783 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11784 		if (cnt % 8 == 0) {
11785 			*bp++ = '\n';
11786 		}
11787 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11788 		bp = bp + 6;
11789 	}
11790 
11791 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11792 	bp = bufp + strlen(bufp);
11793 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11794 		if (cnt % 8 == 0) {
11795 			*bp++ = '\n';
11796 		}
11797 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11798 		bp = bp + 6;
11799 	}
11800 
11801 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11802 	bp = bufp + strlen(bufp);
11803 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11804 		if (cnt % 8 == 0) {
11805 			*bp++ = '\n';
11806 		}
11807 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11808 		bp = bp + 6;
11809 	}
11810 
11811 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11812 	bp = bufp + strlen(bufp);
11813 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11814 		if (cnt % 8 == 0) {
11815 			*bp++ = '\n';
11816 		}
11817 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11818 		bp = bp + 6;
11819 	}
11820 
11821 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11822 	bp = bufp + strlen(bufp);
11823 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11824 		if (cnt % 8 == 0) {
11825 			*bp++ = '\n';
11826 		}
11827 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11828 		bp = bp + 6;
11829 	}
11830 
11831 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11832 	bp = bufp + strlen(bufp);
11833 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11834 		if (cnt % 8 == 0) {
11835 			*bp++ = '\n';
11836 		}
11837 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11838 		bp = bp + 6;
11839 	}
11840 
11841 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11842 	bp = bufp + strlen(bufp);
11843 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11844 		if (cnt % 8 == 0) {
11845 			*bp++ = '\n';
11846 		}
11847 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11848 		bp = bp + 6;
11849 	}
11850 
11851 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11852 	bp = bufp + strlen(bufp);
11853 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11854 		if (cnt % 8 == 0) {
11855 			*bp++ = '\n';
11856 		}
11857 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11858 		bp = bp + 6;
11859 	}
11860 
11861 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11862 	bp = bufp + strlen(bufp);
11863 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11864 		if (cnt % 8 == 0) {
11865 			*bp++ = '\n';
11866 		}
11867 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11868 		bp = bp + 6;
11869 	}
11870 
11871 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11872 	bp = bufp + strlen(bufp);
11873 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11874 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11875 		    CFG_CTRL_6322)) == 0))) {
11876 			break;
11877 		}
11878 		if (cnt % 8 == 0) {
11879 			*bp++ = '\n';
11880 		}
11881 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11882 		bp = bp + 6;
11883 	}
11884 
11885 	(void) strcat(bp, "\n\nFPM B0 Registers:");
11886 	bp = bufp + strlen(bufp);
11887 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11888 		if (cnt % 8 == 0) {
11889 			*bp++ = '\n';
11890 		}
11891 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11892 		bp = bp + 6;
11893 	}
11894 
11895 	(void) strcat(bp, "\n\nFPM B1 Registers:");
11896 	bp = bufp + strlen(bufp);
11897 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11898 		if (cnt % 8 == 0) {
11899 			*bp++ = '\n';
11900 		}
11901 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11902 		bp = bp + 6;
11903 	}
11904 
11905 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11906 		(void) strcat(bp, "\n\nCode RAM Dump:");
11907 		bp = bufp + strlen(bufp);
11908 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11909 			if (cnt % 8 == 0) {
11910 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11911 				bp = bp + 8;
11912 			}
11913 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11914 			bp = bp + 6;
11915 		}
11916 
11917 		(void) strcat(bp, "\n\nStack RAM Dump:");
11918 		bp = bufp + strlen(bufp);
11919 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11920 			if (cnt % 8 == 0) {
11921 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11922 				bp = bp + 8;
11923 			}
11924 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11925 			bp = bp + 6;
11926 		}
11927 
11928 		(void) strcat(bp, "\n\nData RAM Dump:");
11929 		bp = bufp + strlen(bufp);
11930 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11931 			if (cnt % 8 == 0) {
11932 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11933 				bp = bp + 8;
11934 			}
11935 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11936 			bp = bp + 6;
11937 		}
11938 	} else {
11939 		(void) strcat(bp, "\n\nRISC SRAM:");
11940 		bp = bufp + strlen(bufp);
11941 		for (cnt = 0; cnt < 0xf000; cnt++) {
11942 			if (cnt % 8 == 0) {
11943 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
11944 				bp = bp + 7;
11945 			}
11946 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11947 			bp = bp + 6;
11948 		}
11949 	}
11950 
11951 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
11952 	bp += strlen(bp);
11953 
11954 	(void) sprintf(bp, "\n\nRequest Queue");
11955 	bp += strlen(bp);
11956 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
11957 		if (cnt % 8 == 0) {
11958 			(void) sprintf(bp, "\n%08x: ", cnt);
11959 			bp += strlen(bp);
11960 		}
11961 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
11962 		bp += strlen(bp);
11963 	}
11964 
11965 	(void) sprintf(bp, "\n\nResponse Queue");
11966 	bp += strlen(bp);
11967 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
11968 		if (cnt % 8 == 0) {
11969 			(void) sprintf(bp, "\n%08x: ", cnt);
11970 			bp += strlen(bp);
11971 		}
11972 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
11973 		bp += strlen(bp);
11974 	}
11975 
11976 	(void) sprintf(bp, "\n");
11977 
11978 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11979 
11980 	return (strlen(bufp));
11981 }
11982 
11983 /*
11984  * ql_24xx_ascii_fw_dump
11985  *	Converts ISP24xx firmware binary dump to ascii.
11986  *
11987  * Input:
11988  *	ha = adapter state pointer.
11989  *	bptr = buffer pointer.
11990  *
11991  * Returns:
11992  *	Amount of data buffer used.
11993  *
11994  * Context:
11995  *	Kernel context.
11996  */
11997 static size_t
11998 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
11999 {
12000 	uint32_t		cnt;
12001 	caddr_t			bp = bufp;
12002 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12003 
12004 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12005 
12006 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12007 	    ha->fw_major_version, ha->fw_minor_version,
12008 	    ha->fw_subminor_version, ha->fw_attributes);
12009 	bp += strlen(bp);
12010 
12011 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12012 
12013 	(void) strcat(bp, "\nHost Interface Registers");
12014 	bp += strlen(bp);
12015 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12016 		if (cnt % 8 == 0) {
12017 			(void) sprintf(bp++, "\n");
12018 		}
12019 
12020 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12021 		bp += 9;
12022 	}
12023 
12024 	(void) sprintf(bp, "\n\nMailbox Registers");
12025 	bp += strlen(bp);
12026 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12027 		if (cnt % 16 == 0) {
12028 			(void) sprintf(bp++, "\n");
12029 		}
12030 
12031 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12032 		bp += 5;
12033 	}
12034 
12035 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12036 	bp += strlen(bp);
12037 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12038 		if (cnt % 8 == 0) {
12039 			(void) sprintf(bp++, "\n");
12040 		}
12041 
12042 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12043 		bp += 9;
12044 	}
12045 
12046 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12047 	bp += strlen(bp);
12048 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12049 		if (cnt % 8 == 0) {
12050 			(void) sprintf(bp++, "\n");
12051 		}
12052 
12053 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12054 		bp += 9;
12055 	}
12056 
12057 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12058 	bp += strlen(bp);
12059 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12060 		if (cnt % 8 == 0) {
12061 			(void) sprintf(bp++, "\n");
12062 		}
12063 
12064 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12065 		bp += 9;
12066 	}
12067 
12068 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12069 	bp += strlen(bp);
12070 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12071 		if (cnt % 8 == 0) {
12072 			(void) sprintf(bp++, "\n");
12073 		}
12074 
12075 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12076 		bp += 9;
12077 	}
12078 
12079 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12080 	bp += strlen(bp);
12081 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12082 		if (cnt % 8 == 0) {
12083 			(void) sprintf(bp++, "\n");
12084 		}
12085 
12086 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12087 		bp += 9;
12088 	}
12089 
12090 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12091 	bp += strlen(bp);
12092 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12093 		if (cnt % 8 == 0) {
12094 			(void) sprintf(bp++, "\n");
12095 		}
12096 
12097 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12098 		bp += 9;
12099 	}
12100 
12101 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12102 	bp += strlen(bp);
12103 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12104 		if (cnt % 8 == 0) {
12105 			(void) sprintf(bp++, "\n");
12106 		}
12107 
12108 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12109 		bp += 9;
12110 	}
12111 
12112 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12113 	bp += strlen(bp);
12114 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12115 		if (cnt % 8 == 0) {
12116 			(void) sprintf(bp++, "\n");
12117 		}
12118 
12119 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12120 		bp += 9;
12121 	}
12122 
12123 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12124 	bp += strlen(bp);
12125 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12126 		if (cnt % 8 == 0) {
12127 			(void) sprintf(bp++, "\n");
12128 		}
12129 
12130 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12131 		bp += 9;
12132 	}
12133 
12134 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12135 	bp += strlen(bp);
12136 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12137 		if (cnt % 8 == 0) {
12138 			(void) sprintf(bp++, "\n");
12139 		}
12140 
12141 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12142 		bp += 9;
12143 	}
12144 
12145 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12146 	bp += strlen(bp);
12147 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12148 		if (cnt % 8 == 0) {
12149 			(void) sprintf(bp++, "\n");
12150 		}
12151 
12152 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12153 		bp += 9;
12154 	}
12155 
12156 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12157 	bp += strlen(bp);
12158 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12159 		if (cnt % 8 == 0) {
12160 			(void) sprintf(bp++, "\n");
12161 		}
12162 
12163 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12164 		bp += 9;
12165 	}
12166 
12167 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12168 	bp += strlen(bp);
12169 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12170 		if (cnt % 8 == 0) {
12171 			(void) sprintf(bp++, "\n");
12172 		}
12173 
12174 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12175 		bp += 9;
12176 	}
12177 
12178 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12179 	bp += strlen(bp);
12180 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12181 		if (cnt % 8 == 0) {
12182 			(void) sprintf(bp++, "\n");
12183 		}
12184 
12185 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12186 		bp += 9;
12187 	}
12188 
12189 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12190 	bp += strlen(bp);
12191 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12192 		if (cnt % 8 == 0) {
12193 			(void) sprintf(bp++, "\n");
12194 		}
12195 
12196 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12197 		bp += 9;
12198 	}
12199 
12200 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12201 	bp += strlen(bp);
12202 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12203 		if (cnt % 8 == 0) {
12204 			(void) sprintf(bp++, "\n");
12205 		}
12206 
12207 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12208 		bp += 9;
12209 	}
12210 
12211 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12212 	bp += strlen(bp);
12213 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12214 		if (cnt % 8 == 0) {
12215 			(void) sprintf(bp++, "\n");
12216 		}
12217 
12218 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12219 		bp += 9;
12220 	}
12221 
12222 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12223 	bp += strlen(bp);
12224 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12225 		if (cnt % 8 == 0) {
12226 			(void) sprintf(bp++, "\n");
12227 		}
12228 
12229 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12230 		bp += 9;
12231 	}
12232 
12233 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12234 	bp += strlen(bp);
12235 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12236 		if (cnt % 8 == 0) {
12237 			(void) sprintf(bp++, "\n");
12238 		}
12239 
12240 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12241 		bp += 9;
12242 	}
12243 
12244 	(void) sprintf(bp, "\n\nRISC GP Registers");
12245 	bp += strlen(bp);
12246 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12247 		if (cnt % 8 == 0) {
12248 			(void) sprintf(bp++, "\n");
12249 		}
12250 
12251 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12252 		bp += 9;
12253 	}
12254 
12255 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12256 	bp += strlen(bp);
12257 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12258 		if (cnt % 8 == 0) {
12259 			(void) sprintf(bp++, "\n");
12260 		}
12261 
12262 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12263 		bp += 9;
12264 	}
12265 
12266 	(void) sprintf(bp, "\n\nLMC Registers");
12267 	bp += strlen(bp);
12268 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12269 		if (cnt % 8 == 0) {
12270 			(void) sprintf(bp++, "\n");
12271 		}
12272 
12273 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12274 		bp += 9;
12275 	}
12276 
12277 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12278 	bp += strlen(bp);
12279 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12280 		if (cnt % 8 == 0) {
12281 			(void) sprintf(bp++, "\n");
12282 		}
12283 
12284 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12285 		bp += 9;
12286 	}
12287 
12288 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12289 	bp += strlen(bp);
12290 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12291 		if (cnt % 8 == 0) {
12292 			(void) sprintf(bp++, "\n");
12293 		}
12294 
12295 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12296 		bp += 9;
12297 	}
12298 
12299 	(void) sprintf(bp, "\n\nCode RAM");
12300 	bp += strlen(bp);
12301 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12302 		if (cnt % 8 == 0) {
12303 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12304 			bp += 11;
12305 		}
12306 
12307 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12308 		bp += 9;
12309 	}
12310 
12311 	(void) sprintf(bp, "\n\nExternal Memory");
12312 	bp += strlen(bp);
12313 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12314 		if (cnt % 8 == 0) {
12315 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12316 			bp += 11;
12317 		}
12318 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12319 		bp += 9;
12320 	}
12321 
12322 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12323 	bp += strlen(bp);
12324 
12325 	(void) sprintf(bp, "\n\nRequest Queue");
12326 	bp += strlen(bp);
12327 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12328 		if (cnt % 8 == 0) {
12329 			(void) sprintf(bp, "\n%08x: ", cnt);
12330 			bp += strlen(bp);
12331 		}
12332 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12333 		bp += strlen(bp);
12334 	}
12335 
12336 	(void) sprintf(bp, "\n\nResponse Queue");
12337 	bp += strlen(bp);
12338 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12339 		if (cnt % 8 == 0) {
12340 			(void) sprintf(bp, "\n%08x: ", cnt);
12341 			bp += strlen(bp);
12342 		}
12343 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12344 		bp += strlen(bp);
12345 	}
12346 
12347 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12348 	    (ha->fwexttracebuf.bp != NULL)) {
12349 		uint32_t cnt_b = 0;
12350 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12351 
12352 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12353 		bp += strlen(bp);
12354 		/* show data address as a byte address, data as long words */
12355 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12356 			cnt_b = cnt * 4;
12357 			if (cnt_b % 32 == 0) {
12358 				(void) sprintf(bp, "\n%08x: ",
12359 				    (int)(w64 + cnt_b));
12360 				bp += 11;
12361 			}
12362 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12363 			bp += 9;
12364 		}
12365 	}
12366 
12367 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12368 	    (ha->fwfcetracebuf.bp != NULL)) {
12369 		uint32_t cnt_b = 0;
12370 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12371 
12372 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12373 		bp += strlen(bp);
12374 		/* show data address as a byte address, data as long words */
12375 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12376 			cnt_b = cnt * 4;
12377 			if (cnt_b % 32 == 0) {
12378 				(void) sprintf(bp, "\n%08x: ",
12379 				    (int)(w64 + cnt_b));
12380 				bp += 11;
12381 			}
12382 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12383 			bp += 9;
12384 		}
12385 	}
12386 
12387 	(void) sprintf(bp, "\n\n");
12388 	bp += strlen(bp);
12389 
12390 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12391 
12392 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12393 
12394 	return (cnt);
12395 }
12396 
12397 /*
12398  * ql_25xx_ascii_fw_dump
12399  *	Converts ISP25xx firmware binary dump to ascii.
12400  *
12401  * Input:
12402  *	ha = adapter state pointer.
12403  *	bptr = buffer pointer.
12404  *
12405  * Returns:
12406  *	Amount of data buffer used.
12407  *
12408  * Context:
12409  *	Kernel context.
12410  */
12411 static size_t
12412 ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12413 {
12414 	uint32_t		cnt;
12415 	caddr_t			bp = bufp;
12416 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12417 
12418 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12419 
12420 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12421 	    ha->fw_major_version, ha->fw_minor_version,
12422 	    ha->fw_subminor_version, ha->fw_attributes);
12423 	bp += strlen(bp);
12424 
12425 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12426 	bp += strlen(bp);
12427 
12428 	(void) sprintf(bp, "\nHostRisc Registers");
12429 	bp += strlen(bp);
12430 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12431 		if (cnt % 8 == 0) {
12432 			(void) sprintf(bp++, "\n");
12433 		}
12434 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12435 		bp += 9;
12436 	}
12437 
12438 	(void) sprintf(bp, "\n\nPCIe Registers");
12439 	bp += strlen(bp);
12440 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12441 		if (cnt % 8 == 0) {
12442 			(void) sprintf(bp++, "\n");
12443 		}
12444 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12445 		bp += 9;
12446 	}
12447 
12448 	(void) strcat(bp, "\n\nHost Interface Registers");
12449 	bp += strlen(bp);
12450 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12451 		if (cnt % 8 == 0) {
12452 			(void) sprintf(bp++, "\n");
12453 		}
12454 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12455 		bp += 9;
12456 	}
12457 
12458 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12459 	bp += strlen(bp);
12460 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12461 		if (cnt % 8 == 0) {
12462 			(void) sprintf(bp++, "\n");
12463 		}
12464 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12465 		bp += 9;
12466 	}
12467 
12468 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12469 	    fw->risc_io);
12470 	bp += strlen(bp);
12471 
12472 	(void) sprintf(bp, "\n\nMailbox Registers");
12473 	bp += strlen(bp);
12474 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12475 		if (cnt % 16 == 0) {
12476 			(void) sprintf(bp++, "\n");
12477 		}
12478 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12479 		bp += 5;
12480 	}
12481 
12482 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12483 	bp += strlen(bp);
12484 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12485 		if (cnt % 8 == 0) {
12486 			(void) sprintf(bp++, "\n");
12487 		}
12488 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12489 		bp += 9;
12490 	}
12491 
12492 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12493 	bp += strlen(bp);
12494 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12495 		if (cnt % 8 == 0) {
12496 			(void) sprintf(bp++, "\n");
12497 		}
12498 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12499 		bp += 9;
12500 	}
12501 
12502 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12503 	bp += strlen(bp);
12504 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12505 		if (cnt % 8 == 0) {
12506 			(void) sprintf(bp++, "\n");
12507 		}
12508 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12509 		bp += 9;
12510 	}
12511 
12512 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12513 	bp += strlen(bp);
12514 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12515 		if (cnt % 8 == 0) {
12516 			(void) sprintf(bp++, "\n");
12517 		}
12518 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12519 		bp += 9;
12520 	}
12521 
12522 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12523 	bp += strlen(bp);
12524 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12525 		if (cnt % 8 == 0) {
12526 			(void) sprintf(bp++, "\n");
12527 		}
12528 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12529 		bp += 9;
12530 	}
12531 
12532 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12533 	bp += strlen(bp);
12534 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12535 		if (cnt % 8 == 0) {
12536 			(void) sprintf(bp++, "\n");
12537 		}
12538 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12539 		bp += 9;
12540 	}
12541 
12542 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12543 	bp += strlen(bp);
12544 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12545 		if (cnt % 8 == 0) {
12546 			(void) sprintf(bp++, "\n");
12547 		}
12548 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12549 		bp += 9;
12550 	}
12551 
12552 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12553 	bp += strlen(bp);
12554 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12555 		if (cnt % 8 == 0) {
12556 			(void) sprintf(bp++, "\n");
12557 		}
12558 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12559 		bp += 9;
12560 	}
12561 
12562 	(void) sprintf(bp, "\n\nASEQ-0 GP Registers");
12563 	bp += strlen(bp);
12564 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12565 		if (cnt % 8 == 0) {
12566 			(void) sprintf(bp++, "\n");
12567 		}
12568 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12569 		bp += 9;
12570 	}
12571 
12572 	(void) sprintf(bp, "\n\nASEQ-1 GP Registers");
12573 	bp += strlen(bp);
12574 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12575 		if (cnt % 8 == 0) {
12576 			(void) sprintf(bp++, "\n");
12577 		}
12578 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12579 		bp += 9;
12580 	}
12581 
12582 	(void) sprintf(bp, "\n\nASEQ-2 GP Registers");
12583 	bp += strlen(bp);
12584 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12585 		if (cnt % 8 == 0) {
12586 			(void) sprintf(bp++, "\n");
12587 		}
12588 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12589 		bp += 9;
12590 	}
12591 
12592 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12593 	bp += strlen(bp);
12594 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12595 		if (cnt % 8 == 0) {
12596 			(void) sprintf(bp++, "\n");
12597 		}
12598 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12599 		bp += 9;
12600 	}
12601 
12602 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12603 	bp += strlen(bp);
12604 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12605 		if (cnt % 8 == 0) {
12606 			(void) sprintf(bp++, "\n");
12607 		}
12608 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12609 		bp += 9;
12610 	}
12611 
12612 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12613 	bp += strlen(bp);
12614 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12615 		if (cnt % 8 == 0) {
12616 			(void) sprintf(bp++, "\n");
12617 		}
12618 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12619 		bp += 9;
12620 	}
12621 
12622 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12623 	bp += strlen(bp);
12624 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12625 		if (cnt % 8 == 0) {
12626 			(void) sprintf(bp++, "\n");
12627 		}
12628 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12629 		bp += 9;
12630 	}
12631 
12632 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12633 	bp += strlen(bp);
12634 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12635 		if (cnt % 8 == 0) {
12636 			(void) sprintf(bp++, "\n");
12637 		}
12638 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12639 		bp += 9;
12640 	}
12641 
12642 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12643 	bp += strlen(bp);
12644 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12645 		if (cnt % 8 == 0) {
12646 			(void) sprintf(bp++, "\n");
12647 		}
12648 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12649 		bp += 9;
12650 	}
12651 
12652 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12653 	bp += strlen(bp);
12654 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12655 		if (cnt % 8 == 0) {
12656 			(void) sprintf(bp++, "\n");
12657 		}
12658 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12659 		bp += 9;
12660 	}
12661 
12662 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12663 	bp += strlen(bp);
12664 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12665 		if (cnt % 8 == 0) {
12666 			(void) sprintf(bp++, "\n");
12667 		}
12668 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12669 		bp += 9;
12670 	}
12671 
12672 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12673 	bp += strlen(bp);
12674 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12675 		if (cnt % 8 == 0) {
12676 			(void) sprintf(bp++, "\n");
12677 		}
12678 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12679 		bp += 9;
12680 	}
12681 
12682 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12683 	bp += strlen(bp);
12684 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12685 		if (cnt % 8 == 0) {
12686 			(void) sprintf(bp++, "\n");
12687 		}
12688 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12689 		bp += 9;
12690 	}
12691 
12692 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12693 	bp += strlen(bp);
12694 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12695 		if (cnt % 8 == 0) {
12696 			(void) sprintf(bp++, "\n");
12697 		}
12698 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12699 		bp += 9;
12700 	}
12701 
12702 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12703 	bp += strlen(bp);
12704 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12705 		if (cnt % 8 == 0) {
12706 			(void) sprintf(bp++, "\n");
12707 		}
12708 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12709 		bp += 9;
12710 	}
12711 
12712 	(void) sprintf(bp, "\n\nRISC GP Registers");
12713 	bp += strlen(bp);
12714 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12715 		if (cnt % 8 == 0) {
12716 			(void) sprintf(bp++, "\n");
12717 		}
12718 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12719 		bp += 9;
12720 	}
12721 
12722 	(void) sprintf(bp, "\n\nLMC Registers");
12723 	bp += strlen(bp);
12724 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12725 		if (cnt % 8 == 0) {
12726 			(void) sprintf(bp++, "\n");
12727 		}
12728 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12729 		bp += 9;
12730 	}
12731 
12732 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12733 	bp += strlen(bp);
12734 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12735 		if (cnt % 8 == 0) {
12736 			(void) sprintf(bp++, "\n");
12737 		}
12738 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12739 		bp += 9;
12740 	}
12741 
12742 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12743 	bp += strlen(bp);
12744 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12745 		if (cnt % 8 == 0) {
12746 			(void) sprintf(bp++, "\n");
12747 		}
12748 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12749 		bp += 9;
12750 	}
12751 
12752 	(void) sprintf(bp, "\n\nCode RAM");
12753 	bp += strlen(bp);
12754 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12755 		if (cnt % 8 == 0) {
12756 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12757 			bp += 11;
12758 		}
12759 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12760 		bp += 9;
12761 	}
12762 
12763 	(void) sprintf(bp, "\n\nExternal Memory");
12764 	bp += strlen(bp);
12765 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12766 		if (cnt % 8 == 0) {
12767 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12768 			bp += 11;
12769 		}
12770 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12771 		bp += 9;
12772 	}
12773 
12774 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12775 	bp += strlen(bp);
12776 
12777 	(void) sprintf(bp, "\n\nRequest Queue");
12778 	bp += strlen(bp);
12779 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12780 		if (cnt % 8 == 0) {
12781 			(void) sprintf(bp, "\n%08x: ", cnt);
12782 			bp += strlen(bp);
12783 		}
12784 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12785 		bp += strlen(bp);
12786 	}
12787 
12788 	(void) sprintf(bp, "\n\nResponse Queue");
12789 	bp += strlen(bp);
12790 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12791 		if (cnt % 8 == 0) {
12792 			(void) sprintf(bp, "\n%08x: ", cnt);
12793 			bp += strlen(bp);
12794 		}
12795 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12796 		bp += strlen(bp);
12797 	}
12798 
12799 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12800 	    (ha->fwexttracebuf.bp != NULL)) {
12801 		uint32_t cnt_b = 0;
12802 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12803 
12804 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12805 		bp += strlen(bp);
12806 		/* show data address as a byte address, data as long words */
12807 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12808 			cnt_b = cnt * 4;
12809 			if (cnt_b % 32 == 0) {
12810 				(void) sprintf(bp, "\n%08x: ",
12811 				    (int)(w64 + cnt_b));
12812 				bp += 11;
12813 			}
12814 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12815 			bp += 9;
12816 		}
12817 	}
12818 
12819 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12820 	    (ha->fwfcetracebuf.bp != NULL)) {
12821 		uint32_t cnt_b = 0;
12822 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12823 
12824 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12825 		bp += strlen(bp);
12826 		/* show data address as a byte address, data as long words */
12827 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12828 			cnt_b = cnt * 4;
12829 			if (cnt_b % 32 == 0) {
12830 				(void) sprintf(bp, "\n%08x: ",
12831 				    (int)(w64 + cnt_b));
12832 				bp += 11;
12833 			}
12834 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12835 			bp += 9;
12836 		}
12837 	}
12838 
12839 	(void) sprintf(bp, "\n\n");
12840 	bp += strlen(bp);
12841 
12842 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12843 
12844 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12845 
12846 	return (cnt);
12847 }
12848 
12849 /*
12850  * ql_2200_binary_fw_dump
12851  *
12852  * Input:
12853  *	ha:	adapter state pointer.
12854  *	fw:	firmware dump context pointer.
12855  *
12856  * Returns:
12857  *	ql local function return status code.
12858  *
12859  * Context:
12860  *	Interrupt or Kernel context, no mailbox commands allowed.
12861  */
12862 static int
12863 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12864 {
12865 	uint32_t	cnt;
12866 	uint16_t	risc_address;
12867 	clock_t		timer;
12868 	mbx_cmd_t	mc;
12869 	mbx_cmd_t	*mcp = &mc;
12870 	int		rval = QL_SUCCESS;
12871 
12872 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12873 
12874 	/* Disable ISP interrupts. */
12875 	WRT16_IO_REG(ha, ictrl, 0);
12876 	ADAPTER_STATE_LOCK(ha);
12877 	ha->flags &= ~INTERRUPTS_ENABLED;
12878 	ADAPTER_STATE_UNLOCK(ha);
12879 
12880 	/* Release mailbox registers. */
12881 	WRT16_IO_REG(ha, semaphore, 0);
12882 
12883 	/* Pause RISC. */
12884 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12885 	timer = 30000;
12886 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12887 		if (timer-- != 0) {
12888 			drv_usecwait(MILLISEC);
12889 		} else {
12890 			rval = QL_FUNCTION_TIMEOUT;
12891 			break;
12892 		}
12893 	}
12894 
12895 	if (rval == QL_SUCCESS) {
12896 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12897 		    sizeof (fw->pbiu_reg) / 2, 16);
12898 
12899 		/* In 2200 we only read 8 mailboxes */
12900 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12901 		    8, 16);
12902 
12903 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12904 		    sizeof (fw->dma_reg) / 2, 16);
12905 
12906 		WRT16_IO_REG(ha, ctrl_status, 0);
12907 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12908 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12909 
12910 		WRT16_IO_REG(ha, pcr, 0x2000);
12911 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12912 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12913 
12914 		WRT16_IO_REG(ha, pcr, 0x2100);
12915 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12916 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12917 
12918 		WRT16_IO_REG(ha, pcr, 0x2200);
12919 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12920 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12921 
12922 		WRT16_IO_REG(ha, pcr, 0x2300);
12923 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12924 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12925 
12926 		WRT16_IO_REG(ha, pcr, 0x2400);
12927 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12928 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12929 
12930 		WRT16_IO_REG(ha, pcr, 0x2500);
12931 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12932 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12933 
12934 		WRT16_IO_REG(ha, pcr, 0x2600);
12935 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12936 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12937 
12938 		WRT16_IO_REG(ha, pcr, 0x2700);
12939 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12940 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12941 
12942 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12943 		/* 2200 has only 16 registers */
12944 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12945 		    ha->iobase + 0x80, 16, 16);
12946 
12947 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12948 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12949 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12950 
12951 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12952 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12953 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12954 
12955 		/* Select FPM registers. */
12956 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12957 
12958 		/* FPM Soft Reset. */
12959 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12960 
12961 		/* Select frame buffer registers. */
12962 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12963 
12964 		/* Reset frame buffer FIFOs. */
12965 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12966 
12967 		/* Select RISC module registers. */
12968 		WRT16_IO_REG(ha, ctrl_status, 0);
12969 
12970 		/* Reset RISC module. */
12971 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
12972 
12973 		/* Reset ISP semaphore. */
12974 		WRT16_IO_REG(ha, semaphore, 0);
12975 
12976 		/* Release RISC module. */
12977 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12978 
12979 		/* Wait for RISC to recover from reset. */
12980 		timer = 30000;
12981 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
12982 			if (timer-- != 0) {
12983 				drv_usecwait(MILLISEC);
12984 			} else {
12985 				rval = QL_FUNCTION_TIMEOUT;
12986 				break;
12987 			}
12988 		}
12989 
12990 		/* Disable RISC pause on FPM parity error. */
12991 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
12992 	}
12993 
12994 	if (rval == QL_SUCCESS) {
12995 		/* Pause RISC. */
12996 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12997 		timer = 30000;
12998 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12999 			if (timer-- != 0) {
13000 				drv_usecwait(MILLISEC);
13001 			} else {
13002 				rval = QL_FUNCTION_TIMEOUT;
13003 				break;
13004 			}
13005 		}
13006 	}
13007 
13008 	if (rval == QL_SUCCESS) {
13009 		/* Set memory configuration and timing. */
13010 		WRT16_IO_REG(ha, mctr, 0xf2);
13011 
13012 		/* Release RISC. */
13013 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13014 
13015 		/* Get RISC SRAM. */
13016 		risc_address = 0x1000;
13017 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
13018 		for (cnt = 0; cnt < 0xf000; cnt++) {
13019 			WRT16_IO_REG(ha, mailbox[1], risc_address++);
13020 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13021 			for (timer = 6000000; timer != 0; timer--) {
13022 				/* Check for pending interrupts. */
13023 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
13024 					if (RD16_IO_REG(ha, semaphore) &
13025 					    BIT_0) {
13026 						WRT16_IO_REG(ha, hccr,
13027 						    HC_CLR_RISC_INT);
13028 						mcp->mb[0] = RD16_IO_REG(ha,
13029 						    mailbox[0]);
13030 						fw->risc_ram[cnt] =
13031 						    RD16_IO_REG(ha,
13032 						    mailbox[2]);
13033 						WRT16_IO_REG(ha,
13034 						    semaphore, 0);
13035 						break;
13036 					}
13037 					WRT16_IO_REG(ha, hccr,
13038 					    HC_CLR_RISC_INT);
13039 				}
13040 				drv_usecwait(5);
13041 			}
13042 
13043 			if (timer == 0) {
13044 				rval = QL_FUNCTION_TIMEOUT;
13045 			} else {
13046 				rval = mcp->mb[0];
13047 			}
13048 
13049 			if (rval != QL_SUCCESS) {
13050 				break;
13051 			}
13052 		}
13053 	}
13054 
13055 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13056 
13057 	return (rval);
13058 }
13059 
13060 /*
13061  * ql_2300_binary_fw_dump
13062  *
13063  * Input:
13064  *	ha:	adapter state pointer.
13065  *	fw:	firmware dump context pointer.
13066  *
13067  * Returns:
13068  *	ql local function return status code.
13069  *
13070  * Context:
13071  *	Interrupt or Kernel context, no mailbox commands allowed.
13072  */
13073 static int
13074 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13075 {
13076 	clock_t	timer;
13077 	int	rval = QL_SUCCESS;
13078 
13079 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13080 
13081 	/* Disable ISP interrupts. */
13082 	WRT16_IO_REG(ha, ictrl, 0);
13083 	ADAPTER_STATE_LOCK(ha);
13084 	ha->flags &= ~INTERRUPTS_ENABLED;
13085 	ADAPTER_STATE_UNLOCK(ha);
13086 
13087 	/* Release mailbox registers. */
13088 	WRT16_IO_REG(ha, semaphore, 0);
13089 
13090 	/* Pause RISC. */
13091 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13092 	timer = 30000;
13093 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13094 		if (timer-- != 0) {
13095 			drv_usecwait(MILLISEC);
13096 		} else {
13097 			rval = QL_FUNCTION_TIMEOUT;
13098 			break;
13099 		}
13100 	}
13101 
13102 	if (rval == QL_SUCCESS) {
13103 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13104 		    sizeof (fw->pbiu_reg) / 2, 16);
13105 
13106 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13107 		    sizeof (fw->risc_host_reg) / 2, 16);
13108 
13109 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13110 		    sizeof (fw->mailbox_reg) / 2, 16);
13111 
13112 		WRT16_IO_REG(ha, ctrl_status, 0x40);
13113 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13114 		    sizeof (fw->resp_dma_reg) / 2, 16);
13115 
13116 		WRT16_IO_REG(ha, ctrl_status, 0x50);
13117 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13118 		    sizeof (fw->dma_reg) / 2, 16);
13119 
13120 		WRT16_IO_REG(ha, ctrl_status, 0);
13121 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13122 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13123 
13124 		WRT16_IO_REG(ha, pcr, 0x2000);
13125 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13126 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13127 
13128 		WRT16_IO_REG(ha, pcr, 0x2200);
13129 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13130 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13131 
13132 		WRT16_IO_REG(ha, pcr, 0x2400);
13133 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13134 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13135 
13136 		WRT16_IO_REG(ha, pcr, 0x2600);
13137 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13138 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13139 
13140 		WRT16_IO_REG(ha, pcr, 0x2800);
13141 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13142 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13143 
13144 		WRT16_IO_REG(ha, pcr, 0x2A00);
13145 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13146 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13147 
13148 		WRT16_IO_REG(ha, pcr, 0x2C00);
13149 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13150 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13151 
13152 		WRT16_IO_REG(ha, pcr, 0x2E00);
13153 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13154 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13155 
13156 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13157 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13158 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13159 
13160 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13161 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13162 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13163 
13164 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13165 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13166 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13167 
13168 		/* Select FPM registers. */
13169 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13170 
13171 		/* FPM Soft Reset. */
13172 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13173 
13174 		/* Select frame buffer registers. */
13175 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13176 
13177 		/* Reset frame buffer FIFOs. */
13178 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13179 
13180 		/* Select RISC module registers. */
13181 		WRT16_IO_REG(ha, ctrl_status, 0);
13182 
13183 		/* Reset RISC module. */
13184 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13185 
13186 		/* Reset ISP semaphore. */
13187 		WRT16_IO_REG(ha, semaphore, 0);
13188 
13189 		/* Release RISC module. */
13190 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13191 
13192 		/* Wait for RISC to recover from reset. */
13193 		timer = 30000;
13194 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13195 			if (timer-- != 0) {
13196 				drv_usecwait(MILLISEC);
13197 			} else {
13198 				rval = QL_FUNCTION_TIMEOUT;
13199 				break;
13200 			}
13201 		}
13202 
13203 		/* Disable RISC pause on FPM parity error. */
13204 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13205 	}
13206 
13207 	/* Get RISC SRAM. */
13208 	if (rval == QL_SUCCESS) {
13209 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13210 	}
13211 	/* Get STACK SRAM. */
13212 	if (rval == QL_SUCCESS) {
13213 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13214 	}
13215 	/* Get DATA SRAM. */
13216 	if (rval == QL_SUCCESS) {
13217 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13218 	}
13219 
13220 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13221 
13222 	return (rval);
13223 }
13224 
13225 /*
13226  * ql_24xx_binary_fw_dump
13227  *
13228  * Input:
13229  *	ha:	adapter state pointer.
13230  *	fw:	firmware dump context pointer.
13231  *
13232  * Returns:
13233  *	ql local function return status code.
13234  *
13235  * Context:
13236  *	Interrupt or Kernel context, no mailbox commands allowed.
13237  */
13238 static int
13239 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13240 {
13241 	uint32_t	*reg32;
13242 	void		*bp;
13243 	clock_t		timer;
13244 	int		rval = QL_SUCCESS;
13245 
13246 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13247 
13248 	fw->hccr = RD32_IO_REG(ha, hccr);
13249 
13250 	/* Pause RISC. */
13251 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13252 		/* Disable ISP interrupts. */
13253 		WRT16_IO_REG(ha, ictrl, 0);
13254 
13255 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13256 		for (timer = 30000;
13257 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13258 		    rval == QL_SUCCESS; timer--) {
13259 			if (timer) {
13260 				drv_usecwait(100);
13261 			} else {
13262 				rval = QL_FUNCTION_TIMEOUT;
13263 			}
13264 		}
13265 	}
13266 
13267 	if (rval == QL_SUCCESS) {
13268 		/* Host interface registers. */
13269 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13270 		    sizeof (fw->host_reg) / 4, 32);
13271 
13272 		/* Disable ISP interrupts. */
13273 		WRT32_IO_REG(ha, ictrl, 0);
13274 		RD32_IO_REG(ha, ictrl);
13275 		ADAPTER_STATE_LOCK(ha);
13276 		ha->flags &= ~INTERRUPTS_ENABLED;
13277 		ADAPTER_STATE_UNLOCK(ha);
13278 
13279 		/* Shadow registers. */
13280 
13281 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13282 		RD32_IO_REG(ha, io_base_addr);
13283 
13284 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13285 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13286 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13287 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13288 
13289 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13290 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13291 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13292 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13293 
13294 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13295 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13296 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13297 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13298 
13299 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13300 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13301 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13302 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13303 
13304 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13305 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13306 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13307 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13308 
13309 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13310 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13311 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13312 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13313 
13314 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13315 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13316 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13317 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13318 
13319 		/* Mailbox registers. */
13320 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13321 		    sizeof (fw->mailbox_reg) / 2, 16);
13322 
13323 		/* Transfer sequence registers. */
13324 
13325 		/* XSEQ GP */
13326 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13327 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13328 		    16, 32);
13329 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13330 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13331 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13332 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13333 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13334 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13335 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13336 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13337 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13338 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13339 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13340 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13341 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13342 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13343 
13344 		/* XSEQ-0 */
13345 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13346 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13347 		    sizeof (fw->xseq_0_reg) / 4, 32);
13348 
13349 		/* XSEQ-1 */
13350 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13351 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13352 		    sizeof (fw->xseq_1_reg) / 4, 32);
13353 
13354 		/* Receive sequence registers. */
13355 
13356 		/* RSEQ GP */
13357 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13358 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13359 		    16, 32);
13360 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13361 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13362 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13363 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13364 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13365 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13366 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13367 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13368 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13369 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13370 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13371 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13372 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13373 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13374 
13375 		/* RSEQ-0 */
13376 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13377 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13378 		    sizeof (fw->rseq_0_reg) / 4, 32);
13379 
13380 		/* RSEQ-1 */
13381 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13382 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13383 		    sizeof (fw->rseq_1_reg) / 4, 32);
13384 
13385 		/* RSEQ-2 */
13386 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13387 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13388 		    sizeof (fw->rseq_2_reg) / 4, 32);
13389 
13390 		/* Command DMA registers. */
13391 
13392 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13393 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13394 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13395 
13396 		/* Queues. */
13397 
13398 		/* RequestQ0 */
13399 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13400 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13401 		    8, 32);
13402 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13403 
13404 		/* ResponseQ0 */
13405 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13406 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13407 		    8, 32);
13408 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13409 
13410 		/* RequestQ1 */
13411 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13412 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13413 		    8, 32);
13414 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13415 
13416 		/* Transmit DMA registers. */
13417 
13418 		/* XMT0 */
13419 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13420 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13421 		    16, 32);
13422 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13423 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13424 
13425 		/* XMT1 */
13426 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13427 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13428 		    16, 32);
13429 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13430 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13431 
13432 		/* XMT2 */
13433 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13434 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13435 		    16, 32);
13436 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13437 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13438 
13439 		/* XMT3 */
13440 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13441 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13442 		    16, 32);
13443 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13444 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13445 
13446 		/* XMT4 */
13447 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13448 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13449 		    16, 32);
13450 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13451 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13452 
13453 		/* XMT Common */
13454 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13455 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13456 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13457 
13458 		/* Receive DMA registers. */
13459 
13460 		/* RCVThread0 */
13461 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13462 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13463 		    ha->iobase + 0xC0, 16, 32);
13464 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13465 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13466 
13467 		/* RCVThread1 */
13468 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13469 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13470 		    ha->iobase + 0xC0, 16, 32);
13471 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13472 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13473 
13474 		/* RISC registers. */
13475 
13476 		/* RISC GP */
13477 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13478 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13479 		    16, 32);
13480 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13481 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13482 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13483 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13484 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13485 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13486 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13487 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13488 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13489 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13490 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13491 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13492 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13493 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13494 
13495 		/* Local memory controller registers. */
13496 
13497 		/* LMC */
13498 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13499 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13500 		    16, 32);
13501 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13502 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13503 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13504 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13505 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13506 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13507 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13508 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13509 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13510 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13511 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13512 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13513 
13514 		/* Fibre Protocol Module registers. */
13515 
13516 		/* FPM hardware */
13517 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13518 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13519 		    16, 32);
13520 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13521 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13522 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13523 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13524 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13525 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13526 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13527 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13528 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13529 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13530 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13531 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13532 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13533 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13534 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13535 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13536 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13537 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13538 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13539 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13540 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13541 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13542 
13543 		/* Frame Buffer registers. */
13544 
13545 		/* FB hardware */
13546 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13547 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13548 		    16, 32);
13549 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13550 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13551 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13552 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13553 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13554 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13555 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13556 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13557 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13558 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13559 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13560 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13561 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13562 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13563 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13564 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13565 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13566 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13567 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13568 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13569 	}
13570 
13571 	/* Get the request queue */
13572 	if (rval == QL_SUCCESS) {
13573 		uint32_t	cnt;
13574 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13575 
13576 		/* Sync DMA buffer. */
13577 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13578 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13579 		    DDI_DMA_SYNC_FORKERNEL);
13580 
13581 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13582 			fw->req_q[cnt] = *w32++;
13583 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13584 		}
13585 	}
13586 
13587 	/* Get the response queue */
13588 	if (rval == QL_SUCCESS) {
13589 		uint32_t	cnt;
13590 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13591 
13592 		/* Sync DMA buffer. */
13593 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13594 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13595 		    DDI_DMA_SYNC_FORKERNEL);
13596 
13597 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13598 			fw->rsp_q[cnt] = *w32++;
13599 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13600 		}
13601 	}
13602 
13603 	/* Reset RISC. */
13604 	ql_reset_chip(ha);
13605 
13606 	/* Memory. */
13607 	if (rval == QL_SUCCESS) {
13608 		/* Code RAM. */
13609 		rval = ql_read_risc_ram(ha, 0x20000,
13610 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13611 	}
13612 	if (rval == QL_SUCCESS) {
13613 		/* External Memory. */
13614 		rval = ql_read_risc_ram(ha, 0x100000,
13615 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13616 	}
13617 
13618 	/* Get the extended trace buffer */
13619 	if (rval == QL_SUCCESS) {
13620 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13621 		    (ha->fwexttracebuf.bp != NULL)) {
13622 			uint32_t	cnt;
13623 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13624 
13625 			/* Sync DMA buffer. */
13626 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13627 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13628 
13629 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13630 				fw->ext_trace_buf[cnt] = *w32++;
13631 			}
13632 		}
13633 	}
13634 
13635 	/* Get the FC event trace buffer */
13636 	if (rval == QL_SUCCESS) {
13637 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13638 		    (ha->fwfcetracebuf.bp != NULL)) {
13639 			uint32_t	cnt;
13640 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13641 
13642 			/* Sync DMA buffer. */
13643 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13644 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13645 
13646 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13647 				fw->fce_trace_buf[cnt] = *w32++;
13648 			}
13649 		}
13650 	}
13651 
13652 	if (rval != QL_SUCCESS) {
13653 		EL(ha, "failed=%xh\n", rval);
13654 	} else {
13655 		/*EMPTY*/
13656 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13657 	}
13658 
13659 	return (rval);
13660 }
13661 
13662 /*
13663  * ql_25xx_binary_fw_dump
13664  *
13665  * Input:
13666  *	ha:	adapter state pointer.
13667  *	fw:	firmware dump context pointer.
13668  *
13669  * Returns:
13670  *	ql local function return status code.
13671  *
13672  * Context:
13673  *	Interrupt or Kernel context, no mailbox commands allowed.
13674  */
13675 static int
13676 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13677 {
13678 	uint32_t	*reg32;
13679 	void		*bp;
13680 	clock_t		timer;
13681 	int		rval = QL_SUCCESS;
13682 
13683 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13684 
13685 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13686 
13687 	/* Pause RISC. */
13688 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13689 		/* Disable ISP interrupts. */
13690 		WRT16_IO_REG(ha, ictrl, 0);
13691 
13692 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13693 		for (timer = 30000;
13694 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13695 		    rval == QL_SUCCESS; timer--) {
13696 			if (timer) {
13697 				drv_usecwait(100);
13698 				if (timer % 10000 == 0) {
13699 					EL(ha, "risc pause %d\n", timer);
13700 				}
13701 			} else {
13702 				EL(ha, "risc pause timeout\n");
13703 				rval = QL_FUNCTION_TIMEOUT;
13704 			}
13705 		}
13706 	}
13707 
13708 	if (rval == QL_SUCCESS) {
13709 
13710 		/* Host Interface registers */
13711 
13712 		/* HostRisc registers. */
13713 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13714 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13715 		    16, 32);
13716 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13717 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13718 
13719 		/* PCIe registers. */
13720 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13721 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13722 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13723 		    3, 32);
13724 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13725 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13726 
13727 		/* Host interface registers. */
13728 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13729 		    sizeof (fw->host_reg) / 4, 32);
13730 
13731 		/* Disable ISP interrupts. */
13732 
13733 		WRT32_IO_REG(ha, ictrl, 0);
13734 		RD32_IO_REG(ha, ictrl);
13735 		ADAPTER_STATE_LOCK(ha);
13736 		ha->flags &= ~INTERRUPTS_ENABLED;
13737 		ADAPTER_STATE_UNLOCK(ha);
13738 
13739 		/* Shadow registers. */
13740 
13741 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13742 		RD32_IO_REG(ha, io_base_addr);
13743 
13744 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13745 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13746 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13747 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13748 
13749 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13750 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13751 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13752 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13753 
13754 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13755 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13756 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13757 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13758 
13759 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13760 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13761 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13762 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13763 
13764 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13765 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13766 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13767 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13768 
13769 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13770 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13771 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13772 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13773 
13774 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13775 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13776 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13777 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13778 
13779 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13780 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13781 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13782 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13783 
13784 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13785 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13786 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13787 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13788 
13789 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13790 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13791 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13792 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13793 
13794 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13795 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13796 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13797 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13798 
13799 		/* RISC I/O register. */
13800 
13801 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13802 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13803 		    1, 32);
13804 
13805 		/* Mailbox registers. */
13806 
13807 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13808 		    sizeof (fw->mailbox_reg) / 2, 16);
13809 
13810 		/* Transfer sequence registers. */
13811 
13812 		/* XSEQ GP */
13813 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13814 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13815 		    16, 32);
13816 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13817 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13818 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13819 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13820 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13821 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13822 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13823 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13824 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13825 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13826 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13827 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13828 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13829 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13830 
13831 		/* XSEQ-0 */
13832 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13833 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13834 		    16, 32);
13835 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13836 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13837 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13838 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13839 
13840 		/* XSEQ-1 */
13841 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13842 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13843 		    16, 32);
13844 
13845 		/* Receive sequence registers. */
13846 
13847 		/* RSEQ GP */
13848 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13849 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13850 		    16, 32);
13851 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13852 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13853 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13854 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13855 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13856 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13857 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13858 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13859 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13860 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13861 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13862 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13863 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13864 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13865 
13866 		/* RSEQ-0 */
13867 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13868 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13869 		    16, 32);
13870 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13871 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13872 
13873 		/* RSEQ-1 */
13874 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13875 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13876 		    sizeof (fw->rseq_1_reg) / 4, 32);
13877 
13878 		/* RSEQ-2 */
13879 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13880 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13881 		    sizeof (fw->rseq_2_reg) / 4, 32);
13882 
13883 		/* Auxiliary sequencer registers. */
13884 
13885 		/* ASEQ GP */
13886 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13887 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13888 		    16, 32);
13889 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13890 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13891 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13892 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13893 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13894 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13895 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13896 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13897 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13898 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13899 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13900 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13901 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13902 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13903 
13904 		/* ASEQ-0 */
13905 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13906 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13907 		    16, 32);
13908 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13909 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13910 
13911 		/* ASEQ-1 */
13912 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13913 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13914 		    16, 32);
13915 
13916 		/* ASEQ-2 */
13917 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13918 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13919 		    16, 32);
13920 
13921 		/* Command DMA registers. */
13922 
13923 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13924 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13925 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13926 
13927 		/* Queues. */
13928 
13929 		/* RequestQ0 */
13930 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13931 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13932 		    8, 32);
13933 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13934 
13935 		/* ResponseQ0 */
13936 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13937 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13938 		    8, 32);
13939 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13940 
13941 		/* RequestQ1 */
13942 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13943 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13944 		    8, 32);
13945 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13946 
13947 		/* Transmit DMA registers. */
13948 
13949 		/* XMT0 */
13950 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13951 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13952 		    16, 32);
13953 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13954 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13955 
13956 		/* XMT1 */
13957 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13958 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13959 		    16, 32);
13960 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13961 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13962 
13963 		/* XMT2 */
13964 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13965 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13966 		    16, 32);
13967 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13968 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13969 
13970 		/* XMT3 */
13971 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13972 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13973 		    16, 32);
13974 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13975 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13976 
13977 		/* XMT4 */
13978 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13979 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13980 		    16, 32);
13981 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13982 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13983 
13984 		/* XMT Common */
13985 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13986 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13987 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13988 
13989 		/* Receive DMA registers. */
13990 
13991 		/* RCVThread0 */
13992 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13993 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13994 		    ha->iobase + 0xC0, 16, 32);
13995 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13996 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13997 
13998 		/* RCVThread1 */
13999 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14000 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14001 		    ha->iobase + 0xC0, 16, 32);
14002 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14003 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14004 
14005 		/* RISC registers. */
14006 
14007 		/* RISC GP */
14008 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14009 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14010 		    16, 32);
14011 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14012 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14013 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14014 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14015 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14016 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14017 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14018 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14019 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14020 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14021 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14022 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14023 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14024 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14025 
14026 		/* Local memory controller (LMC) registers. */
14027 
14028 		/* LMC */
14029 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14030 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14031 		    16, 32);
14032 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14033 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14034 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14035 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14036 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14037 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14038 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14039 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14040 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14041 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14042 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14043 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14044 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14045 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14046 
14047 		/* Fibre Protocol Module registers. */
14048 
14049 		/* FPM hardware */
14050 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14051 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14052 		    16, 32);
14053 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14054 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14055 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14056 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14057 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14058 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14059 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14060 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14061 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14062 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14063 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14064 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14065 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14066 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14067 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14068 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14069 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14070 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14071 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14072 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14073 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14074 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14075 
14076 		/* Frame Buffer registers. */
14077 
14078 		/* FB hardware */
14079 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14080 			WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14081 		} else {
14082 			WRT32_IO_REG(ha, io_base_addr, 0x6000);
14083 		}
14084 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14085 		    16, 32);
14086 
14087 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14088 			WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14089 		} else {
14090 			WRT32_IO_REG(ha, io_base_addr, 0x6010);
14091 		}
14092 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14093 
14094 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14095 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14096 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14097 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14098 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14099 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14100 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14101 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14102 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14103 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14104 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14105 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14106 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14107 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14108 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14109 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14110 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14111 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14112 
14113 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14114 			WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14115 		} else {
14116 			WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14117 		}
14118 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14119 	}
14120 
14121 	/* Get the request queue */
14122 	if (rval == QL_SUCCESS) {
14123 		uint32_t	cnt;
14124 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14125 
14126 		/* Sync DMA buffer. */
14127 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14128 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14129 		    DDI_DMA_SYNC_FORKERNEL);
14130 
14131 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14132 			fw->req_q[cnt] = *w32++;
14133 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14134 		}
14135 	}
14136 
14137 	/* Get the respons queue */
14138 	if (rval == QL_SUCCESS) {
14139 		uint32_t	cnt;
14140 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14141 
14142 		/* Sync DMA buffer. */
14143 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14144 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14145 		    DDI_DMA_SYNC_FORKERNEL);
14146 
14147 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14148 			fw->rsp_q[cnt] = *w32++;
14149 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14150 		}
14151 	}
14152 
14153 	/* Reset RISC. */
14154 
14155 	ql_reset_chip(ha);
14156 
14157 	/* Memory. */
14158 
14159 	if (rval == QL_SUCCESS) {
14160 		/* Code RAM. */
14161 		rval = ql_read_risc_ram(ha, 0x20000,
14162 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14163 	}
14164 	if (rval == QL_SUCCESS) {
14165 		/* External Memory. */
14166 		rval = ql_read_risc_ram(ha, 0x100000,
14167 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14168 	}
14169 
14170 	/* Get the FC event trace buffer */
14171 	if (rval == QL_SUCCESS) {
14172 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14173 		    (ha->fwfcetracebuf.bp != NULL)) {
14174 			uint32_t	cnt;
14175 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14176 
14177 			/* Sync DMA buffer. */
14178 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14179 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14180 
14181 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14182 				fw->fce_trace_buf[cnt] = *w32++;
14183 			}
14184 		}
14185 	}
14186 
14187 	/* Get the extended trace buffer */
14188 	if (rval == QL_SUCCESS) {
14189 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14190 		    (ha->fwexttracebuf.bp != NULL)) {
14191 			uint32_t	cnt;
14192 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14193 
14194 			/* Sync DMA buffer. */
14195 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14196 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14197 
14198 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14199 				fw->ext_trace_buf[cnt] = *w32++;
14200 			}
14201 		}
14202 	}
14203 
14204 	if (rval != QL_SUCCESS) {
14205 		EL(ha, "failed=%xh\n", rval);
14206 	} else {
14207 		/*EMPTY*/
14208 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14209 	}
14210 
14211 	return (rval);
14212 }
14213 
14214 /*
14215  * ql_read_risc_ram
14216  *	Reads RISC RAM one word at a time.
14217  *	Risc interrupts must be disabled when this routine is called.
14218  *
14219  * Input:
14220  *	ha:	adapter state pointer.
14221  *	risc_address:	RISC code start address.
14222  *	len:		Number of words.
14223  *	buf:		buffer pointer.
14224  *
14225  * Returns:
14226  *	ql local function return status code.
14227  *
14228  * Context:
14229  *	Interrupt or Kernel context, no mailbox commands allowed.
14230  */
14231 static int
14232 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
14233     void *buf)
14234 {
14235 	uint32_t	cnt;
14236 	uint16_t	stat;
14237 	clock_t		timer;
14238 	uint16_t	*buf16 = (uint16_t *)buf;
14239 	uint32_t	*buf32 = (uint32_t *)buf;
14240 	int		rval = QL_SUCCESS;
14241 
14242 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
14243 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
14244 		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
14245 		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
14246 		CFG_IST(ha, CFG_CTRL_242581) ?
14247 		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
14248 		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14249 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
14250 			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
14251 				stat = (uint16_t)
14252 				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
14253 				if ((stat == 1) || (stat == 0x10)) {
14254 					if (CFG_IST(ha, CFG_CTRL_242581)) {
14255 						buf32[cnt] = SHORT_TO_LONG(
14256 						    RD16_IO_REG(ha,
14257 						    mailbox[2]),
14258 						    RD16_IO_REG(ha,
14259 						    mailbox[3]));
14260 					} else {
14261 						buf16[cnt] =
14262 						    RD16_IO_REG(ha, mailbox[2]);
14263 					}
14264 
14265 					break;
14266 				} else if ((stat == 2) || (stat == 0x11)) {
14267 					rval = RD16_IO_REG(ha, mailbox[0]);
14268 					break;
14269 				}
14270 				if (CFG_IST(ha, CFG_CTRL_242581)) {
14271 					WRT32_IO_REG(ha, hccr,
14272 					    HC24_CLR_RISC_INT);
14273 					RD32_IO_REG(ha, hccr);
14274 				} else {
14275 					WRT16_IO_REG(ha, hccr,
14276 					    HC_CLR_RISC_INT);
14277 				}
14278 			}
14279 			drv_usecwait(5);
14280 		}
14281 		if (CFG_IST(ha, CFG_CTRL_242581)) {
14282 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
14283 			RD32_IO_REG(ha, hccr);
14284 		} else {
14285 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
14286 			WRT16_IO_REG(ha, semaphore, 0);
14287 		}
14288 
14289 		if (timer == 0) {
14290 			rval = QL_FUNCTION_TIMEOUT;
14291 		}
14292 	}
14293 
14294 	return (rval);
14295 }
14296 
14297 /*
14298  * ql_read_regs
14299  *	Reads adapter registers to buffer.
14300  *
14301  * Input:
14302  *	ha:	adapter state pointer.
14303  *	buf:	buffer pointer.
14304  *	reg:	start address.
14305  *	count:	number of registers.
14306  *	wds:	register size.
14307  *
14308  * Context:
14309  *	Interrupt or Kernel context, no mailbox commands allowed.
14310  */
14311 static void *
14312 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
14313     uint8_t wds)
14314 {
14315 	uint32_t	*bp32, *reg32;
14316 	uint16_t	*bp16, *reg16;
14317 	uint8_t		*bp8, *reg8;
14318 
14319 	switch (wds) {
14320 	case 32:
14321 		bp32 = buf;
14322 		reg32 = reg;
14323 		while (count--) {
14324 			*bp32++ = RD_REG_DWORD(ha, reg32++);
14325 		}
14326 		return (bp32);
14327 	case 16:
14328 		bp16 = buf;
14329 		reg16 = reg;
14330 		while (count--) {
14331 			*bp16++ = RD_REG_WORD(ha, reg16++);
14332 		}
14333 		return (bp16);
14334 	case 8:
14335 		bp8 = buf;
14336 		reg8 = reg;
14337 		while (count--) {
14338 			*bp8++ = RD_REG_BYTE(ha, reg8++);
14339 		}
14340 		return (bp8);
14341 	default:
14342 		EL(ha, "Unknown word size=%d\n", wds);
14343 		return (buf);
14344 	}
14345 }
14346 
14347 static int
14348 ql_save_config_regs(dev_info_t *dip)
14349 {
14350 	ql_adapter_state_t	*ha;
14351 	int			ret;
14352 	ql_config_space_t	chs;
14353 	caddr_t			prop = "ql-config-space";
14354 
14355 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14356 	if (ha == NULL) {
14357 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14358 		    ddi_get_instance(dip));
14359 		return (DDI_FAILURE);
14360 	}
14361 
14362 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14363 
14364 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14365 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
14366 	    1) {
14367 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14368 		return (DDI_SUCCESS);
14369 	}
14370 
14371 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
14372 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
14373 	    PCI_CONF_HEADER);
14374 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14375 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
14376 		    PCI_BCNF_BCNTRL);
14377 	}
14378 
14379 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
14380 	    PCI_CONF_CACHE_LINESZ);
14381 
14382 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14383 	    PCI_CONF_LATENCY_TIMER);
14384 
14385 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14386 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14387 		    PCI_BCNF_LATENCY_TIMER);
14388 	}
14389 
14390 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
14391 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
14392 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
14393 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
14394 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
14395 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
14396 
14397 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14398 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
14399 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
14400 
14401 	if (ret != DDI_PROP_SUCCESS) {
14402 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
14403 		    QL_NAME, ddi_get_instance(dip), prop);
14404 		return (DDI_FAILURE);
14405 	}
14406 
14407 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14408 
14409 	return (DDI_SUCCESS);
14410 }
14411 
14412 static int
14413 ql_restore_config_regs(dev_info_t *dip)
14414 {
14415 	ql_adapter_state_t	*ha;
14416 	uint_t			elements;
14417 	ql_config_space_t	*chs_p;
14418 	caddr_t			prop = "ql-config-space";
14419 
14420 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14421 	if (ha == NULL) {
14422 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14423 		    ddi_get_instance(dip));
14424 		return (DDI_FAILURE);
14425 	}
14426 
14427 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14428 
14429 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14430 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
14431 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
14432 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
14433 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14434 		return (DDI_FAILURE);
14435 	}
14436 
14437 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
14438 
14439 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14440 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
14441 		    chs_p->chs_bridge_control);
14442 	}
14443 
14444 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
14445 	    chs_p->chs_cache_line_size);
14446 
14447 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
14448 	    chs_p->chs_latency_timer);
14449 
14450 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14451 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
14452 		    chs_p->chs_sec_latency_timer);
14453 	}
14454 
14455 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
14456 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
14457 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
14458 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
14459 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
14460 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
14461 
14462 	ddi_prop_free(chs_p);
14463 
14464 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14465 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
14466 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
14467 		    QL_NAME, ddi_get_instance(dip), prop);
14468 	}
14469 
14470 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14471 
14472 	return (DDI_SUCCESS);
14473 }
14474 
14475 uint8_t
14476 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
14477 {
14478 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14479 		return (ddi_get8(ha->sbus_config_handle,
14480 		    (uint8_t *)(ha->sbus_config_base + off)));
14481 	}
14482 
14483 #ifdef KERNEL_32
14484 	return (pci_config_getb(ha->pci_handle, off));
14485 #else
14486 	return (pci_config_get8(ha->pci_handle, off));
14487 #endif
14488 }
14489 
14490 uint16_t
14491 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
14492 {
14493 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14494 		return (ddi_get16(ha->sbus_config_handle,
14495 		    (uint16_t *)(ha->sbus_config_base + off)));
14496 	}
14497 
14498 #ifdef KERNEL_32
14499 	return (pci_config_getw(ha->pci_handle, off));
14500 #else
14501 	return (pci_config_get16(ha->pci_handle, off));
14502 #endif
14503 }
14504 
14505 uint32_t
14506 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
14507 {
14508 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14509 		return (ddi_get32(ha->sbus_config_handle,
14510 		    (uint32_t *)(ha->sbus_config_base + off)));
14511 	}
14512 
14513 #ifdef KERNEL_32
14514 	return (pci_config_getl(ha->pci_handle, off));
14515 #else
14516 	return (pci_config_get32(ha->pci_handle, off));
14517 #endif
14518 }
14519 
14520 void
14521 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
14522 {
14523 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14524 		ddi_put8(ha->sbus_config_handle,
14525 		    (uint8_t *)(ha->sbus_config_base + off), val);
14526 	} else {
14527 #ifdef KERNEL_32
14528 		pci_config_putb(ha->pci_handle, off, val);
14529 #else
14530 		pci_config_put8(ha->pci_handle, off, val);
14531 #endif
14532 	}
14533 }
14534 
14535 void
14536 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
14537 {
14538 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14539 		ddi_put16(ha->sbus_config_handle,
14540 		    (uint16_t *)(ha->sbus_config_base + off), val);
14541 	} else {
14542 #ifdef KERNEL_32
14543 		pci_config_putw(ha->pci_handle, off, val);
14544 #else
14545 		pci_config_put16(ha->pci_handle, off, val);
14546 #endif
14547 	}
14548 }
14549 
14550 void
14551 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
14552 {
14553 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14554 		ddi_put32(ha->sbus_config_handle,
14555 		    (uint32_t *)(ha->sbus_config_base + off), val);
14556 	} else {
14557 #ifdef KERNEL_32
14558 		pci_config_putl(ha->pci_handle, off, val);
14559 #else
14560 		pci_config_put32(ha->pci_handle, off, val);
14561 #endif
14562 	}
14563 }
14564 
14565 /*
14566  * ql_halt
14567  *	Waits for commands that are running to finish and
14568  *	if they do not, commands are aborted.
14569  *	Finally the adapter is reset.
14570  *
14571  * Input:
14572  *	ha:	adapter state pointer.
14573  *	pwr:	power state.
14574  *
14575  * Context:
14576  *	Kernel context.
14577  */
14578 static void
14579 ql_halt(ql_adapter_state_t *ha, int pwr)
14580 {
14581 	uint32_t	cnt;
14582 	ql_tgt_t	*tq;
14583 	ql_srb_t	*sp;
14584 	uint16_t	index;
14585 	ql_link_t	*link;
14586 
14587 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14588 
14589 	/* Wait for all commands running to finish. */
14590 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
14591 		for (link = ha->dev[index].first; link != NULL;
14592 		    link = link->next) {
14593 			tq = link->base_address;
14594 			(void) ql_abort_device(ha, tq, 0);
14595 
14596 			/* Wait for 30 seconds for commands to finish. */
14597 			for (cnt = 3000; cnt != 0; cnt--) {
14598 				/* Acquire device queue lock. */
14599 				DEVICE_QUEUE_LOCK(tq);
14600 				if (tq->outcnt == 0) {
14601 					/* Release device queue lock. */
14602 					DEVICE_QUEUE_UNLOCK(tq);
14603 					break;
14604 				} else {
14605 					/* Release device queue lock. */
14606 					DEVICE_QUEUE_UNLOCK(tq);
14607 					ql_delay(ha, 10000);
14608 				}
14609 			}
14610 
14611 			/* Finish any commands waiting for more status. */
14612 			if (ha->status_srb != NULL) {
14613 				sp = ha->status_srb;
14614 				ha->status_srb = NULL;
14615 				sp->cmd.next = NULL;
14616 				ql_done(&sp->cmd);
14617 			}
14618 
14619 			/* Abort commands that did not finish. */
14620 			if (cnt == 0) {
14621 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
14622 				    cnt++) {
14623 					if (ha->pending_cmds.first != NULL) {
14624 						ql_start_iocb(ha, NULL);
14625 						cnt = 1;
14626 					}
14627 					sp = ha->outstanding_cmds[cnt];
14628 					if (sp != NULL &&
14629 					    sp->lun_queue->target_queue ==
14630 					    tq) {
14631 						(void) ql_abort((opaque_t)ha,
14632 						    sp->pkt, 0);
14633 					}
14634 				}
14635 			}
14636 		}
14637 	}
14638 
14639 	/* Shutdown IP. */
14640 	if (ha->flags & IP_INITIALIZED) {
14641 		(void) ql_shutdown_ip(ha);
14642 	}
14643 
14644 	/* Stop all timers. */
14645 	ADAPTER_STATE_LOCK(ha);
14646 	ha->port_retry_timer = 0;
14647 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
14648 	ha->watchdog_timer = 0;
14649 	ADAPTER_STATE_UNLOCK(ha);
14650 
14651 	if (pwr == PM_LEVEL_D3) {
14652 		ADAPTER_STATE_LOCK(ha);
14653 		ha->flags &= ~ONLINE;
14654 		ADAPTER_STATE_UNLOCK(ha);
14655 
14656 		/* Reset ISP chip. */
14657 		ql_reset_chip(ha);
14658 	}
14659 
14660 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14661 }
14662 
14663 /*
14664  * ql_get_dma_mem
14665  *	Function used to allocate dma memory.
14666  *
14667  * Input:
14668  *	ha:			adapter state pointer.
14669  *	mem:			pointer to dma memory object.
14670  *	size:			size of the request in bytes
14671  *
14672  * Returns:
14673  *	qn local function return status code.
14674  *
14675  * Context:
14676  *	Kernel context.
14677  */
14678 int
14679 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
14680     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
14681 {
14682 	int	rval;
14683 
14684 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14685 
14686 	mem->size = size;
14687 	mem->type = allocation_type;
14688 	mem->cookie_count = 1;
14689 
14690 	switch (alignment) {
14691 	case QL_DMA_DATA_ALIGN:
14692 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
14693 		break;
14694 	case QL_DMA_RING_ALIGN:
14695 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
14696 		break;
14697 	default:
14698 		EL(ha, "failed, unknown alignment type %x\n", alignment);
14699 		break;
14700 	}
14701 
14702 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
14703 		ql_free_phys(ha, mem);
14704 		EL(ha, "failed, alloc_phys=%xh\n", rval);
14705 	}
14706 
14707 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14708 
14709 	return (rval);
14710 }
14711 
14712 /*
14713  * ql_alloc_phys
14714  *	Function used to allocate memory and zero it.
14715  *	Memory is below 4 GB.
14716  *
14717  * Input:
14718  *	ha:			adapter state pointer.
14719  *	mem:			pointer to dma memory object.
14720  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14721  *	mem->cookie_count	number of segments allowed.
14722  *	mem->type		memory allocation type.
14723  *	mem->size		memory size.
14724  *	mem->alignment		memory alignment.
14725  *
14726  * Returns:
14727  *	qn local function return status code.
14728  *
14729  * Context:
14730  *	Kernel context.
14731  */
14732 int
14733 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14734 {
14735 	size_t			rlen;
14736 	ddi_dma_attr_t		dma_attr;
14737 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
14738 
14739 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14740 
14741 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14742 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14743 
14744 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
14745 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14746 
14747 	/*
14748 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
14749 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
14750 	 * to make sure buffer has enough room for overrun.
14751 	 */
14752 	if (mem->size & 7) {
14753 		mem->size += 8 - (mem->size & 7);
14754 	}
14755 
14756 	mem->flags = DDI_DMA_CONSISTENT;
14757 
14758 	/*
14759 	 * Allocate DMA memory for command.
14760 	 */
14761 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14762 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14763 	    DDI_SUCCESS) {
14764 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14765 		mem->dma_handle = NULL;
14766 		return (QL_MEMORY_ALLOC_FAILED);
14767 	}
14768 
14769 	switch (mem->type) {
14770 	case KERNEL_MEM:
14771 		mem->bp = kmem_zalloc(mem->size, sleep);
14772 		break;
14773 	case BIG_ENDIAN_DMA:
14774 	case LITTLE_ENDIAN_DMA:
14775 	case NO_SWAP_DMA:
14776 		if (mem->type == BIG_ENDIAN_DMA) {
14777 			acc_attr.devacc_attr_endian_flags =
14778 			    DDI_STRUCTURE_BE_ACC;
14779 		} else if (mem->type == NO_SWAP_DMA) {
14780 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
14781 		}
14782 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
14783 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14784 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
14785 		    &mem->acc_handle) == DDI_SUCCESS) {
14786 			bzero(mem->bp, mem->size);
14787 			/* ensure we got what we asked for (32bit) */
14788 			if (dma_attr.dma_attr_addr_hi == NULL) {
14789 				if (mem->cookie.dmac_notused != NULL) {
14790 					EL(ha, "failed, ddi_dma_mem_alloc "
14791 					    "returned 64 bit DMA address\n");
14792 					ql_free_phys(ha, mem);
14793 					return (QL_MEMORY_ALLOC_FAILED);
14794 				}
14795 			}
14796 		} else {
14797 			mem->acc_handle = NULL;
14798 			mem->bp = NULL;
14799 		}
14800 		break;
14801 	default:
14802 		EL(ha, "failed, unknown type=%xh\n", mem->type);
14803 		mem->acc_handle = NULL;
14804 		mem->bp = NULL;
14805 		break;
14806 	}
14807 
14808 	if (mem->bp == NULL) {
14809 		EL(ha, "failed, ddi_dma_mem_alloc\n");
14810 		ddi_dma_free_handle(&mem->dma_handle);
14811 		mem->dma_handle = NULL;
14812 		return (QL_MEMORY_ALLOC_FAILED);
14813 	}
14814 
14815 	mem->flags |= DDI_DMA_RDWR;
14816 
14817 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14818 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
14819 		ql_free_phys(ha, mem);
14820 		return (QL_MEMORY_ALLOC_FAILED);
14821 	}
14822 
14823 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14824 
14825 	return (QL_SUCCESS);
14826 }
14827 
14828 /*
14829  * ql_free_phys
14830  *	Function used to free physical memory.
14831  *
14832  * Input:
14833  *	ha:	adapter state pointer.
14834  *	mem:	pointer to dma memory object.
14835  *
14836  * Context:
14837  *	Kernel context.
14838  */
14839 void
14840 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
14841 {
14842 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14843 
14844 	if (mem != NULL && mem->dma_handle != NULL) {
14845 		ql_unbind_dma_buffer(ha, mem);
14846 		switch (mem->type) {
14847 		case KERNEL_MEM:
14848 			if (mem->bp != NULL) {
14849 				kmem_free(mem->bp, mem->size);
14850 			}
14851 			break;
14852 		case LITTLE_ENDIAN_DMA:
14853 		case BIG_ENDIAN_DMA:
14854 		case NO_SWAP_DMA:
14855 			if (mem->acc_handle != NULL) {
14856 				ddi_dma_mem_free(&mem->acc_handle);
14857 				mem->acc_handle = NULL;
14858 			}
14859 			break;
14860 		default:
14861 			break;
14862 		}
14863 		mem->bp = NULL;
14864 		ddi_dma_free_handle(&mem->dma_handle);
14865 		mem->dma_handle = NULL;
14866 	}
14867 
14868 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14869 }
14870 
14871 /*
14872  * ql_alloc_dma_resouce.
14873  *	Allocates DMA resource for buffer.
14874  *
14875  * Input:
14876  *	ha:			adapter state pointer.
14877  *	mem:			pointer to dma memory object.
14878  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14879  *	mem->cookie_count	number of segments allowed.
14880  *	mem->type		memory allocation type.
14881  *	mem->size		memory size.
14882  *	mem->bp			pointer to memory or struct buf
14883  *
14884  * Returns:
14885  *	qn local function return status code.
14886  *
14887  * Context:
14888  *	Kernel context.
14889  */
14890 int
14891 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14892 {
14893 	ddi_dma_attr_t	dma_attr;
14894 
14895 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14896 
14897 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14898 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14899 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14900 
14901 	/*
14902 	 * Allocate DMA handle for command.
14903 	 */
14904 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14905 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14906 	    DDI_SUCCESS) {
14907 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14908 		mem->dma_handle = NULL;
14909 		return (QL_MEMORY_ALLOC_FAILED);
14910 	}
14911 
14912 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
14913 
14914 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14915 		EL(ha, "failed, bind_dma_buffer\n");
14916 		ddi_dma_free_handle(&mem->dma_handle);
14917 		mem->dma_handle = NULL;
14918 		return (QL_MEMORY_ALLOC_FAILED);
14919 	}
14920 
14921 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14922 
14923 	return (QL_SUCCESS);
14924 }
14925 
14926 /*
14927  * ql_free_dma_resource
14928  *	Frees DMA resources.
14929  *
14930  * Input:
14931  *	ha:		adapter state pointer.
14932  *	mem:		pointer to dma memory object.
14933  *	mem->dma_handle	DMA memory handle.
14934  *
14935  * Context:
14936  *	Kernel context.
14937  */
14938 void
14939 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
14940 {
14941 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14942 
14943 	ql_free_phys(ha, mem);
14944 
14945 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14946 }
14947 
14948 /*
14949  * ql_bind_dma_buffer
14950  *	Binds DMA buffer.
14951  *
14952  * Input:
14953  *	ha:			adapter state pointer.
14954  *	mem:			pointer to dma memory object.
14955  *	sleep:			KM_SLEEP or KM_NOSLEEP.
14956  *	mem->dma_handle		DMA memory handle.
14957  *	mem->cookie_count	number of segments allowed.
14958  *	mem->type		memory allocation type.
14959  *	mem->size		memory size.
14960  *	mem->bp			pointer to memory or struct buf
14961  *
14962  * Returns:
14963  *	mem->cookies		pointer to list of cookies.
14964  *	mem->cookie_count	number of cookies.
14965  *	status			success = DDI_DMA_MAPPED
14966  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
14967  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
14968  *				DDI_DMA_TOOBIG
14969  *
14970  * Context:
14971  *	Kernel context.
14972  */
14973 static int
14974 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14975 {
14976 	int			rval;
14977 	ddi_dma_cookie_t	*cookiep;
14978 	uint32_t		cnt = mem->cookie_count;
14979 
14980 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14981 
14982 	if (mem->type == STRUCT_BUF_MEMORY) {
14983 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
14984 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14985 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
14986 	} else {
14987 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
14988 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
14989 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
14990 		    &mem->cookie_count);
14991 	}
14992 
14993 	if (rval == DDI_DMA_MAPPED) {
14994 		if (mem->cookie_count > cnt) {
14995 			(void) ddi_dma_unbind_handle(mem->dma_handle);
14996 			EL(ha, "failed, cookie_count %d > %d\n",
14997 			    mem->cookie_count, cnt);
14998 			rval = DDI_DMA_TOOBIG;
14999 		} else {
15000 			if (mem->cookie_count > 1) {
15001 				if (mem->cookies = kmem_zalloc(
15002 				    sizeof (ddi_dma_cookie_t) *
15003 				    mem->cookie_count, sleep)) {
15004 					*mem->cookies = mem->cookie;
15005 					cookiep = mem->cookies;
15006 					for (cnt = 1; cnt < mem->cookie_count;
15007 					    cnt++) {
15008 						ddi_dma_nextcookie(
15009 						    mem->dma_handle,
15010 						    ++cookiep);
15011 					}
15012 				} else {
15013 					(void) ddi_dma_unbind_handle(
15014 					    mem->dma_handle);
15015 					EL(ha, "failed, kmem_zalloc\n");
15016 					rval = DDI_DMA_NORESOURCES;
15017 				}
15018 			} else {
15019 				/*
15020 				 * It has been reported that dmac_size at times
15021 				 * may be incorrect on sparc machines so for
15022 				 * sparc machines that only have one segment
15023 				 * use the buffer size instead.
15024 				 */
15025 				mem->cookies = &mem->cookie;
15026 				mem->cookies->dmac_size = mem->size;
15027 			}
15028 		}
15029 	}
15030 
15031 	if (rval != DDI_DMA_MAPPED) {
15032 		EL(ha, "failed=%xh\n", rval);
15033 	} else {
15034 		/*EMPTY*/
15035 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15036 	}
15037 
15038 	return (rval);
15039 }
15040 
15041 /*
15042  * ql_unbind_dma_buffer
15043  *	Unbinds DMA buffer.
15044  *
15045  * Input:
15046  *	ha:			adapter state pointer.
15047  *	mem:			pointer to dma memory object.
15048  *	mem->dma_handle		DMA memory handle.
15049  *	mem->cookies		pointer to cookie list.
15050  *	mem->cookie_count	number of cookies.
15051  *
15052  * Context:
15053  *	Kernel context.
15054  */
15055 /* ARGSUSED */
15056 static void
15057 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15058 {
15059 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15060 
15061 	(void) ddi_dma_unbind_handle(mem->dma_handle);
15062 	if (mem->cookie_count > 1) {
15063 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15064 		    mem->cookie_count);
15065 		mem->cookies = NULL;
15066 	}
15067 	mem->cookie_count = 0;
15068 
15069 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15070 }
15071 
15072 static int
15073 ql_suspend_adapter(ql_adapter_state_t *ha)
15074 {
15075 	clock_t timer;
15076 
15077 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15078 
15079 	/*
15080 	 * First we will claim mbox ownership so that no
15081 	 * thread using mbox hangs when we disable the
15082 	 * interrupt in the middle of it.
15083 	 */
15084 	MBX_REGISTER_LOCK(ha);
15085 
15086 	/* Check for mailbox available, if not wait for signal. */
15087 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15088 		ha->mailbox_flags = (uint8_t)
15089 		    (ha->mailbox_flags | MBX_WANT_FLG);
15090 
15091 		/* 30 seconds from now */
15092 		timer = ddi_get_lbolt();
15093 		timer += 32 * drv_usectohz(1000000);
15094 		if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15095 		    timer) == -1) {
15096 
15097 			/* Release mailbox register lock. */
15098 			MBX_REGISTER_UNLOCK(ha);
15099 			EL(ha, "failed, Suspend mbox");
15100 			return (QL_FUNCTION_TIMEOUT);
15101 		}
15102 	}
15103 
15104 	/* Set busy flag. */
15105 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15106 	MBX_REGISTER_UNLOCK(ha);
15107 
15108 	(void) ql_wait_outstanding(ha);
15109 
15110 	/*
15111 	 * here we are sure that there will not be any mbox interrupt.
15112 	 * So, let's make sure that we return back all the outstanding
15113 	 * cmds as well as internally queued commands.
15114 	 */
15115 	ql_halt(ha, PM_LEVEL_D0);
15116 
15117 	if (ha->power_level != PM_LEVEL_D3) {
15118 		/* Disable ISP interrupts. */
15119 		WRT16_IO_REG(ha, ictrl, 0);
15120 	}
15121 
15122 	ADAPTER_STATE_LOCK(ha);
15123 	ha->flags &= ~INTERRUPTS_ENABLED;
15124 	ADAPTER_STATE_UNLOCK(ha);
15125 
15126 	MBX_REGISTER_LOCK(ha);
15127 	/* Reset busy status. */
15128 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15129 
15130 	/* If thread is waiting for mailbox go signal it to start. */
15131 	if (ha->mailbox_flags & MBX_WANT_FLG) {
15132 		ha->mailbox_flags = (uint8_t)
15133 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15134 		cv_broadcast(&ha->cv_mbx_wait);
15135 	}
15136 	/* Release mailbox register lock. */
15137 	MBX_REGISTER_UNLOCK(ha);
15138 
15139 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15140 
15141 	return (QL_SUCCESS);
15142 }
15143 
15144 /*
15145  * ql_add_link_b
15146  *	Add link to the end of the chain.
15147  *
15148  * Input:
15149  *	head = Head of link list.
15150  *	link = link to be added.
15151  *	LOCK must be already obtained.
15152  *
15153  * Context:
15154  *	Interrupt or Kernel context, no mailbox commands allowed.
15155  */
15156 void
15157 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15158 {
15159 	/* at the end there isn't a next */
15160 	link->next = NULL;
15161 
15162 	if ((link->prev = head->last) == NULL) {
15163 		head->first = link;
15164 	} else {
15165 		head->last->next = link;
15166 	}
15167 
15168 	head->last = link;
15169 	link->head = head;	/* the queue we're on */
15170 }
15171 
15172 /*
15173  * ql_add_link_t
15174  *	Add link to the beginning of the chain.
15175  *
15176  * Input:
15177  *	head = Head of link list.
15178  *	link = link to be added.
15179  *	LOCK must be already obtained.
15180  *
15181  * Context:
15182  *	Interrupt or Kernel context, no mailbox commands allowed.
15183  */
15184 void
15185 ql_add_link_t(ql_head_t *head, ql_link_t *link)
15186 {
15187 	link->prev = NULL;
15188 
15189 	if ((link->next = head->first) == NULL)	{
15190 		head->last = link;
15191 	} else {
15192 		head->first->prev = link;
15193 	}
15194 
15195 	head->first = link;
15196 	link->head = head;	/* the queue we're on */
15197 }
15198 
15199 /*
15200  * ql_remove_link
15201  *	Remove a link from the chain.
15202  *
15203  * Input:
15204  *	head = Head of link list.
15205  *	link = link to be removed.
15206  *	LOCK must be already obtained.
15207  *
15208  * Context:
15209  *	Interrupt or Kernel context, no mailbox commands allowed.
15210  */
15211 void
15212 ql_remove_link(ql_head_t *head, ql_link_t *link)
15213 {
15214 	if (link->prev != NULL) {
15215 		if ((link->prev->next = link->next) == NULL) {
15216 			head->last = link->prev;
15217 		} else {
15218 			link->next->prev = link->prev;
15219 		}
15220 	} else if ((head->first = link->next) == NULL) {
15221 		head->last = NULL;
15222 	} else {
15223 		head->first->prev = NULL;
15224 	}
15225 
15226 	/* not on a queue any more */
15227 	link->prev = link->next = NULL;
15228 	link->head = NULL;
15229 }
15230 
15231 /*
15232  * ql_chg_endian
15233  *	Change endianess of byte array.
15234  *
15235  * Input:
15236  *	buf = array pointer.
15237  *	size = size of array in bytes.
15238  *
15239  * Context:
15240  *	Interrupt or Kernel context, no mailbox commands allowed.
15241  */
15242 void
15243 ql_chg_endian(uint8_t buf[], size_t size)
15244 {
15245 	uint8_t byte;
15246 	size_t  cnt1;
15247 	size_t  cnt;
15248 
15249 	cnt1 = size - 1;
15250 	for (cnt = 0; cnt < size / 2; cnt++) {
15251 		byte = buf[cnt1];
15252 		buf[cnt1] = buf[cnt];
15253 		buf[cnt] = byte;
15254 		cnt1--;
15255 	}
15256 }
15257 
15258 /*
15259  * ql_bstr_to_dec
15260  *	Convert decimal byte string to number.
15261  *
15262  * Input:
15263  *	s:	byte string pointer.
15264  *	ans:	interger pointer for number.
15265  *	size:	number of ascii bytes.
15266  *
15267  * Returns:
15268  *	success = number of ascii bytes processed.
15269  *
15270  * Context:
15271  *	Kernel/Interrupt context.
15272  */
15273 static int
15274 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
15275 {
15276 	int			mul, num, cnt, pos;
15277 	char			*str;
15278 
15279 	/* Calculate size of number. */
15280 	if (size == 0) {
15281 		for (str = s; *str >= '0' && *str <= '9'; str++) {
15282 			size++;
15283 		}
15284 	}
15285 
15286 	*ans = 0;
15287 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
15288 		if (*s >= '0' && *s <= '9') {
15289 			num = *s++ - '0';
15290 		} else {
15291 			break;
15292 		}
15293 
15294 		for (mul = 1, pos = 1; pos < size; pos++) {
15295 			mul *= 10;
15296 		}
15297 		*ans += num * mul;
15298 	}
15299 
15300 	return (cnt);
15301 }
15302 
15303 /*
15304  * ql_delay
15305  *	Calls delay routine if threads are not suspended, otherwise, busy waits
15306  *	Minimum = 1 tick = 10ms
15307  *
15308  * Input:
15309  *	dly = delay time in microseconds.
15310  *
15311  * Context:
15312  *	Kernel or Interrupt context, no mailbox commands allowed.
15313  */
15314 void
15315 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
15316 {
15317 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
15318 		drv_usecwait(usecs);
15319 	} else {
15320 		delay(drv_usectohz(usecs));
15321 	}
15322 }
15323 
15324 /*
15325  * ql_stall_drv
15326  *	Stalls one or all driver instances, waits for 30 seconds.
15327  *
15328  * Input:
15329  *	ha:		adapter state pointer or NULL for all.
15330  *	options:	BIT_0 --> leave driver stalled on exit if
15331  *				  failed.
15332  *
15333  * Returns:
15334  *	ql local function return status code.
15335  *
15336  * Context:
15337  *	Kernel context.
15338  */
15339 int
15340 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
15341 {
15342 	ql_link_t		*link;
15343 	ql_adapter_state_t	*ha2;
15344 	uint32_t		timer;
15345 
15346 	QL_PRINT_3(CE_CONT, "started\n");
15347 
15348 	/* Wait for 30 seconds for daemons unstall. */
15349 	timer = 3000;
15350 	link = ha == NULL ? ql_hba.first : &ha->hba;
15351 	while (link != NULL && timer) {
15352 		ha2 = link->base_address;
15353 
15354 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
15355 
15356 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15357 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15358 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
15359 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
15360 			link = ha == NULL ? link->next : NULL;
15361 			continue;
15362 		}
15363 
15364 		ql_delay(ha, 10000);
15365 		timer--;
15366 		link = ha == NULL ? ql_hba.first : &ha->hba;
15367 	}
15368 
15369 	if (ha2 != NULL && timer == 0) {
15370 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
15371 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
15372 		    "unstalled"));
15373 		if (options & BIT_0) {
15374 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15375 		}
15376 		return (QL_FUNCTION_TIMEOUT);
15377 	}
15378 
15379 	QL_PRINT_3(CE_CONT, "done\n");
15380 
15381 	return (QL_SUCCESS);
15382 }
15383 
15384 /*
15385  * ql_restart_driver
15386  *	Restarts one or all driver instances.
15387  *
15388  * Input:
15389  *	ha:	adapter state pointer or NULL for all.
15390  *
15391  * Context:
15392  *	Kernel context.
15393  */
15394 void
15395 ql_restart_driver(ql_adapter_state_t *ha)
15396 {
15397 	ql_link_t		*link;
15398 	ql_adapter_state_t	*ha2;
15399 	uint32_t		timer;
15400 
15401 	QL_PRINT_3(CE_CONT, "started\n");
15402 
15403 	/* Tell all daemons to unstall. */
15404 	link = ha == NULL ? ql_hba.first : &ha->hba;
15405 	while (link != NULL) {
15406 		ha2 = link->base_address;
15407 
15408 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15409 
15410 		link = ha == NULL ? link->next : NULL;
15411 	}
15412 
15413 	/* Wait for 30 seconds for all daemons unstall. */
15414 	timer = 3000;
15415 	link = ha == NULL ? ql_hba.first : &ha->hba;
15416 	while (link != NULL && timer) {
15417 		ha2 = link->base_address;
15418 
15419 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15420 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15421 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
15422 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
15423 			    ha2->instance, ha2->vp_index);
15424 			ql_restart_queues(ha2);
15425 			link = ha == NULL ? link->next : NULL;
15426 			continue;
15427 		}
15428 
15429 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
15430 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
15431 
15432 		ql_delay(ha, 10000);
15433 		timer--;
15434 		link = ha == NULL ? ql_hba.first : &ha->hba;
15435 	}
15436 
15437 	QL_PRINT_3(CE_CONT, "done\n");
15438 }
15439 
15440 /*
15441  * ql_setup_interrupts
15442  *	Sets up interrupts based on the HBA's and platform's
15443  *	capabilities (e.g., legacy / MSI / FIXED).
15444  *
15445  * Input:
15446  *	ha = adapter state pointer.
15447  *
15448  * Returns:
15449  *	DDI_SUCCESS or DDI_FAILURE.
15450  *
15451  * Context:
15452  *	Kernel context.
15453  */
15454 static int
15455 ql_setup_interrupts(ql_adapter_state_t *ha)
15456 {
15457 	int32_t		rval = DDI_FAILURE;
15458 	int32_t		i;
15459 	int32_t		itypes = 0;
15460 
15461 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15462 
15463 	/*
15464 	 * The Solaris Advanced Interrupt Functions (aif) are only
15465 	 * supported on s10U1 or greater.
15466 	 */
15467 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
15468 		EL(ha, "interrupt framework is not supported or is "
15469 		    "disabled, using legacy\n");
15470 		return (ql_legacy_intr(ha));
15471 	} else if (ql_os_release_level == 10) {
15472 		/*
15473 		 * See if the advanced interrupt functions (aif) are
15474 		 * in the kernel
15475 		 */
15476 		void	*fptr = (void *)&ddi_intr_get_supported_types;
15477 
15478 		if (fptr == NULL) {
15479 			EL(ha, "aif is not supported, using legacy "
15480 			    "interrupts (rev)\n");
15481 			return (ql_legacy_intr(ha));
15482 		}
15483 	}
15484 
15485 	/* See what types of interrupts this HBA and platform support */
15486 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
15487 	    DDI_SUCCESS) {
15488 		EL(ha, "get supported types failed, rval=%xh, "
15489 		    "assuming FIXED\n", i);
15490 		itypes = DDI_INTR_TYPE_FIXED;
15491 	}
15492 
15493 	EL(ha, "supported types are: %xh\n", itypes);
15494 
15495 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
15496 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
15497 		EL(ha, "successful MSI-X setup\n");
15498 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
15499 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
15500 		EL(ha, "successful MSI setup\n");
15501 	} else {
15502 		rval = ql_setup_fixed(ha);
15503 	}
15504 
15505 	if (rval != DDI_SUCCESS) {
15506 		EL(ha, "failed, aif, rval=%xh\n", rval);
15507 	} else {
15508 		/*EMPTY*/
15509 		QL_PRINT_3(CE_CONT, "(%d): done\n");
15510 	}
15511 
15512 	return (rval);
15513 }
15514 
15515 /*
15516  * ql_setup_msi
15517  *	Set up aif MSI interrupts
15518  *
15519  * Input:
15520  *	ha = adapter state pointer.
15521  *
15522  * Returns:
15523  *	DDI_SUCCESS or DDI_FAILURE.
15524  *
15525  * Context:
15526  *	Kernel context.
15527  */
15528 static int
15529 ql_setup_msi(ql_adapter_state_t *ha)
15530 {
15531 	int32_t		count = 0;
15532 	int32_t		avail = 0;
15533 	int32_t		actual = 0;
15534 	int32_t		msitype = DDI_INTR_TYPE_MSI;
15535 	int32_t		ret;
15536 	ql_ifunc_t	itrfun[10] = {0};
15537 
15538 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15539 
15540 	if (ql_disable_msi != 0) {
15541 		EL(ha, "MSI is disabled by user\n");
15542 		return (DDI_FAILURE);
15543 	}
15544 
15545 	/* MSI support is only suported on 24xx HBA's. */
15546 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
15547 		EL(ha, "HBA does not support MSI\n");
15548 		return (DDI_FAILURE);
15549 	}
15550 
15551 	/* Get number of MSI interrupts the system supports */
15552 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15553 	    DDI_SUCCESS) || count == 0) {
15554 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15555 		return (DDI_FAILURE);
15556 	}
15557 
15558 	/* Get number of available MSI interrupts */
15559 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15560 	    DDI_SUCCESS) || avail == 0) {
15561 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15562 		return (DDI_FAILURE);
15563 	}
15564 
15565 	/* MSI requires only 1.  */
15566 	count = 1;
15567 	itrfun[0].ifunc = &ql_isr_aif;
15568 
15569 	/* Allocate space for interrupt handles */
15570 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15571 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15572 
15573 	ha->iflags |= IFLG_INTR_MSI;
15574 
15575 	/* Allocate the interrupts */
15576 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
15577 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
15578 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15579 		    "actual=%xh\n", ret, count, actual);
15580 		ql_release_intr(ha);
15581 		return (DDI_FAILURE);
15582 	}
15583 
15584 	ha->intr_cnt = actual;
15585 
15586 	/* Get interrupt priority */
15587 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15588 	    DDI_SUCCESS) {
15589 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15590 		ql_release_intr(ha);
15591 		return (ret);
15592 	}
15593 
15594 	/* Add the interrupt handler */
15595 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
15596 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
15597 		EL(ha, "failed, intr_add ret=%xh\n", ret);
15598 		ql_release_intr(ha);
15599 		return (ret);
15600 	}
15601 
15602 	/* Setup mutexes */
15603 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15604 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15605 		ql_release_intr(ha);
15606 		return (ret);
15607 	}
15608 
15609 	/* Get the capabilities */
15610 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15611 
15612 	/* Enable interrupts */
15613 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15614 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15615 		    DDI_SUCCESS) {
15616 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15617 			ql_destroy_mutex(ha);
15618 			ql_release_intr(ha);
15619 			return (ret);
15620 		}
15621 	} else {
15622 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
15623 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15624 			ql_destroy_mutex(ha);
15625 			ql_release_intr(ha);
15626 			return (ret);
15627 		}
15628 	}
15629 
15630 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15631 
15632 	return (DDI_SUCCESS);
15633 }
15634 
15635 /*
15636  * ql_setup_msix
15637  *	Set up aif MSI-X interrupts
15638  *
15639  * Input:
15640  *	ha = adapter state pointer.
15641  *
15642  * Returns:
15643  *	DDI_SUCCESS or DDI_FAILURE.
15644  *
15645  * Context:
15646  *	Kernel context.
15647  */
15648 static int
15649 ql_setup_msix(ql_adapter_state_t *ha)
15650 {
15651 	uint16_t	hwvect;
15652 	int32_t		count = 0;
15653 	int32_t		avail = 0;
15654 	int32_t		actual = 0;
15655 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
15656 	int32_t		ret;
15657 	uint32_t	i;
15658 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
15659 
15660 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15661 
15662 	if (ql_disable_msix != 0) {
15663 		EL(ha, "MSI-X is disabled by user\n");
15664 		return (DDI_FAILURE);
15665 	}
15666 
15667 	/*
15668 	 * MSI-X support is only available on 24xx HBA's that have
15669 	 * rev A2 parts (revid = 3) or greater.
15670 	 */
15671 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
15672 	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001))) {
15673 		EL(ha, "HBA does not support MSI-X\n");
15674 		return (DDI_FAILURE);
15675 	}
15676 
15677 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
15678 		EL(ha, "HBA does not support MSI-X (revid)\n");
15679 		return (DDI_FAILURE);
15680 	}
15681 
15682 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
15683 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
15684 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
15685 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
15686 		return (DDI_FAILURE);
15687 	}
15688 
15689 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
15690 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
15691 	    ql_pci_config_get16(ha, 0x7e) :
15692 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
15693 
15694 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
15695 
15696 	if (hwvect < QL_MSIX_MAXAIF) {
15697 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
15698 		    QL_MSIX_MAXAIF, hwvect);
15699 		return (DDI_FAILURE);
15700 	}
15701 
15702 	/* Get number of MSI-X interrupts the platform h/w supports */
15703 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15704 	    DDI_SUCCESS) || count == 0) {
15705 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15706 		return (DDI_FAILURE);
15707 	}
15708 
15709 	/* Get number of available system interrupts */
15710 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15711 	    DDI_SUCCESS) || avail == 0) {
15712 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15713 		return (DDI_FAILURE);
15714 	}
15715 
15716 	/* Fill out the intr table */
15717 	count = QL_MSIX_MAXAIF;
15718 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
15719 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
15720 
15721 	/* Allocate space for interrupt handles */
15722 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
15723 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
15724 		ha->hsize = 0;
15725 		EL(ha, "failed, unable to allocate htable space\n");
15726 		return (DDI_FAILURE);
15727 	}
15728 
15729 	ha->iflags |= IFLG_INTR_MSIX;
15730 
15731 	/* Allocate the interrupts */
15732 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
15733 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
15734 	    actual < QL_MSIX_MAXAIF) {
15735 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15736 		    "actual=%xh\n", ret, count, actual);
15737 		ql_release_intr(ha);
15738 		return (DDI_FAILURE);
15739 	}
15740 
15741 	ha->intr_cnt = actual;
15742 
15743 	/* Get interrupt priority */
15744 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15745 	    DDI_SUCCESS) {
15746 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15747 		ql_release_intr(ha);
15748 		return (ret);
15749 	}
15750 
15751 	/* Add the interrupt handlers */
15752 	for (i = 0; i < actual; i++) {
15753 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
15754 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
15755 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
15756 			    actual, ret);
15757 			ql_release_intr(ha);
15758 			return (ret);
15759 		}
15760 	}
15761 
15762 	/*
15763 	 * duplicate the rest of the intr's
15764 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
15765 	 */
15766 #ifdef __sparc
15767 	for (i = actual; i < hwvect; i++) {
15768 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
15769 		    &ha->htable[i])) != DDI_SUCCESS) {
15770 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
15771 			    i, actual, ret);
15772 			ql_release_intr(ha);
15773 			return (ret);
15774 		}
15775 	}
15776 #endif
15777 
15778 	/* Setup mutexes */
15779 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15780 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15781 		ql_release_intr(ha);
15782 		return (ret);
15783 	}
15784 
15785 	/* Get the capabilities */
15786 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15787 
15788 	/* Enable interrupts */
15789 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15790 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15791 		    DDI_SUCCESS) {
15792 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15793 			ql_destroy_mutex(ha);
15794 			ql_release_intr(ha);
15795 			return (ret);
15796 		}
15797 	} else {
15798 		for (i = 0; i < ha->intr_cnt; i++) {
15799 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
15800 			    DDI_SUCCESS) {
15801 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
15802 				ql_destroy_mutex(ha);
15803 				ql_release_intr(ha);
15804 				return (ret);
15805 			}
15806 		}
15807 	}
15808 
15809 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15810 
15811 	return (DDI_SUCCESS);
15812 }
15813 
15814 /*
15815  * ql_setup_fixed
15816  *	Sets up aif FIXED interrupts
15817  *
15818  * Input:
15819  *	ha = adapter state pointer.
15820  *
15821  * Returns:
15822  *	DDI_SUCCESS or DDI_FAILURE.
15823  *
15824  * Context:
15825  *	Kernel context.
15826  */
15827 static int
15828 ql_setup_fixed(ql_adapter_state_t *ha)
15829 {
15830 	int32_t		count = 0;
15831 	int32_t		actual = 0;
15832 	int32_t		ret;
15833 	uint32_t	i;
15834 
15835 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15836 
15837 	/* Get number of fixed interrupts the system supports */
15838 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
15839 	    &count)) != DDI_SUCCESS) || count == 0) {
15840 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15841 		return (DDI_FAILURE);
15842 	}
15843 
15844 	ha->iflags |= IFLG_INTR_FIXED;
15845 
15846 	/* Allocate space for interrupt handles */
15847 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15848 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15849 
15850 	/* Allocate the interrupts */
15851 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
15852 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
15853 	    actual < count) {
15854 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
15855 		    "actual=%xh\n", ret, count, actual);
15856 		ql_release_intr(ha);
15857 		return (DDI_FAILURE);
15858 	}
15859 
15860 	ha->intr_cnt = actual;
15861 
15862 	/* Get interrupt priority */
15863 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15864 	    DDI_SUCCESS) {
15865 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15866 		ql_release_intr(ha);
15867 		return (ret);
15868 	}
15869 
15870 	/* Add the interrupt handlers */
15871 	for (i = 0; i < ha->intr_cnt; i++) {
15872 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
15873 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
15874 			EL(ha, "failed, intr_add ret=%xh\n", ret);
15875 			ql_release_intr(ha);
15876 			return (ret);
15877 		}
15878 	}
15879 
15880 	/* Setup mutexes */
15881 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15882 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15883 		ql_release_intr(ha);
15884 		return (ret);
15885 	}
15886 
15887 	/* Enable interrupts */
15888 	for (i = 0; i < ha->intr_cnt; i++) {
15889 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
15890 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15891 			ql_destroy_mutex(ha);
15892 			ql_release_intr(ha);
15893 			return (ret);
15894 		}
15895 	}
15896 
15897 	EL(ha, "using FIXED interupts\n");
15898 
15899 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15900 
15901 	return (DDI_SUCCESS);
15902 }
15903 
15904 /*
15905  * ql_disable_intr
15906  *	Disables interrupts
15907  *
15908  * Input:
15909  *	ha = adapter state pointer.
15910  *
15911  * Returns:
15912  *
15913  * Context:
15914  *	Kernel context.
15915  */
15916 static void
15917 ql_disable_intr(ql_adapter_state_t *ha)
15918 {
15919 	uint32_t	i, rval;
15920 
15921 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15922 
15923 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15924 
15925 		/* Disable legacy interrupts */
15926 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
15927 
15928 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
15929 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
15930 
15931 		/* Remove AIF block interrupts (MSI) */
15932 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
15933 		    != DDI_SUCCESS) {
15934 			EL(ha, "failed intr block disable, rval=%x\n", rval);
15935 		}
15936 
15937 	} else {
15938 
15939 		/* Remove AIF non-block interrupts (fixed).  */
15940 		for (i = 0; i < ha->intr_cnt; i++) {
15941 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
15942 			    DDI_SUCCESS) {
15943 				EL(ha, "failed intr disable, intr#=%xh, "
15944 				    "rval=%xh\n", i, rval);
15945 			}
15946 		}
15947 	}
15948 
15949 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15950 }
15951 
15952 /*
15953  * ql_release_intr
15954  *	Releases aif legacy interrupt resources
15955  *
15956  * Input:
15957  *	ha = adapter state pointer.
15958  *
15959  * Returns:
15960  *
15961  * Context:
15962  *	Kernel context.
15963  */
15964 static void
15965 ql_release_intr(ql_adapter_state_t *ha)
15966 {
15967 	int32_t 	i;
15968 
15969 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15970 
15971 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15972 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15973 		return;
15974 	}
15975 
15976 	ha->iflags &= ~(IFLG_INTR_AIF);
15977 	if (ha->htable != NULL && ha->hsize > 0) {
15978 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
15979 		while (i-- > 0) {
15980 			if (ha->htable[i] == 0) {
15981 				EL(ha, "htable[%x]=0h\n", i);
15982 				continue;
15983 			}
15984 
15985 			(void) ddi_intr_disable(ha->htable[i]);
15986 
15987 			if (i < ha->intr_cnt) {
15988 				(void) ddi_intr_remove_handler(ha->htable[i]);
15989 			}
15990 
15991 			(void) ddi_intr_free(ha->htable[i]);
15992 		}
15993 
15994 		kmem_free(ha->htable, ha->hsize);
15995 		ha->htable = NULL;
15996 	}
15997 
15998 	ha->hsize = 0;
15999 	ha->intr_cnt = 0;
16000 	ha->intr_pri = 0;
16001 	ha->intr_cap = 0;
16002 
16003 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16004 }
16005 
16006 /*
16007  * ql_legacy_intr
16008  *	Sets up legacy interrupts.
16009  *
16010  *	NB: Only to be used if AIF (Advanced Interupt Framework)
16011  *	    if NOT in the kernel.
16012  *
16013  * Input:
16014  *	ha = adapter state pointer.
16015  *
16016  * Returns:
16017  *	DDI_SUCCESS or DDI_FAILURE.
16018  *
16019  * Context:
16020  *	Kernel context.
16021  */
16022 static int
16023 ql_legacy_intr(ql_adapter_state_t *ha)
16024 {
16025 	int	rval = DDI_SUCCESS;
16026 
16027 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16028 
16029 	/* Setup mutexes */
16030 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16031 		EL(ha, "failed, mutex init\n");
16032 		return (DDI_FAILURE);
16033 	}
16034 
16035 	/* Setup standard/legacy interrupt handler */
16036 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16037 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16038 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16039 		    QL_NAME, ha->instance);
16040 		ql_destroy_mutex(ha);
16041 		rval = DDI_FAILURE;
16042 	}
16043 
16044 	if (rval == DDI_SUCCESS) {
16045 		ha->iflags |= IFLG_INTR_LEGACY;
16046 		EL(ha, "using legacy interrupts\n");
16047 	}
16048 
16049 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16050 
16051 	return (rval);
16052 }
16053 
16054 /*
16055  * ql_init_mutex
16056  *	Initializes mutex's
16057  *
16058  * Input:
16059  *	ha = adapter state pointer.
16060  *
16061  * Returns:
16062  *	DDI_SUCCESS or DDI_FAILURE.
16063  *
16064  * Context:
16065  *	Kernel context.
16066  */
16067 static int
16068 ql_init_mutex(ql_adapter_state_t *ha)
16069 {
16070 	int	ret;
16071 	void	*intr;
16072 
16073 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16074 
16075 	if (ha->iflags & IFLG_INTR_AIF) {
16076 		intr = (void *)(uintptr_t)ha->intr_pri;
16077 	} else {
16078 		/* Get iblock cookies to initialize mutexes */
16079 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16080 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16081 			EL(ha, "failed, get_iblock: %xh\n", ret);
16082 			return (DDI_FAILURE);
16083 		}
16084 		intr = (void *)ha->iblock_cookie;
16085 	}
16086 
16087 	/* mutexes to protect the adapter state structure. */
16088 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16089 
16090 	/* mutex to protect the ISP response ring. */
16091 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16092 
16093 	/* mutex to protect the mailbox registers. */
16094 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16095 
16096 	/* power management protection */
16097 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16098 
16099 	/* Mailbox wait and interrupt conditional variable. */
16100 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16101 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16102 
16103 	/* mutex to protect the ISP request ring. */
16104 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16105 
16106 	/* Unsolicited buffer conditional variable. */
16107 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16108 
16109 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16110 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16111 
16112 	/* Suspended conditional variable. */
16113 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16114 
16115 	/* mutex to protect task daemon context. */
16116 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16117 
16118 	/* Task_daemon thread conditional variable. */
16119 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16120 
16121 	/* mutex to protect diag port manage interface */
16122 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16123 
16124 	/* mutex to protect per instance f/w dump flags and buffer */
16125 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16126 
16127 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16128 
16129 	return (DDI_SUCCESS);
16130 }
16131 
16132 /*
16133  * ql_destroy_mutex
16134  *	Destroys mutex's
16135  *
16136  * Input:
16137  *	ha = adapter state pointer.
16138  *
16139  * Returns:
16140  *
16141  * Context:
16142  *	Kernel context.
16143  */
16144 static void
16145 ql_destroy_mutex(ql_adapter_state_t *ha)
16146 {
16147 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16148 
16149 	mutex_destroy(&ha->dump_mutex);
16150 	mutex_destroy(&ha->portmutex);
16151 	cv_destroy(&ha->cv_task_daemon);
16152 	mutex_destroy(&ha->task_daemon_mutex);
16153 	cv_destroy(&ha->cv_dr_suspended);
16154 	mutex_destroy(&ha->cache_mutex);
16155 	mutex_destroy(&ha->ub_mutex);
16156 	cv_destroy(&ha->cv_ub);
16157 	mutex_destroy(&ha->req_ring_mutex);
16158 	cv_destroy(&ha->cv_mbx_intr);
16159 	cv_destroy(&ha->cv_mbx_wait);
16160 	mutex_destroy(&ha->pm_mutex);
16161 	mutex_destroy(&ha->mbx_mutex);
16162 	mutex_destroy(&ha->intr_mutex);
16163 	mutex_destroy(&ha->mutex);
16164 
16165 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16166 }
16167 
16168 /*
16169  * ql_fwmodule_resolve
16170  *	Loads and resolves external firmware module and symbols
16171  *
16172  * Input:
16173  *	ha:		adapter state pointer.
16174  *
16175  * Returns:
16176  *	ql local function return status code:
16177  *		QL_SUCCESS - external f/w module module and symbols resolved
16178  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16179  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16180  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16181  * Context:
16182  *	Kernel context.
16183  *
16184  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
16185  * could switch to a tighter scope around acutal download (and add an extra
16186  * ddi_modopen for module opens that occur before root is mounted).
16187  *
16188  */
16189 uint32_t
16190 ql_fwmodule_resolve(ql_adapter_state_t *ha)
16191 {
16192 	int8_t			module[128];
16193 	int8_t			fw_version[128];
16194 	uint32_t		rval = QL_SUCCESS;
16195 	caddr_t			code, code02;
16196 	uint8_t			*p_ucfw;
16197 	uint16_t		*p_usaddr, *p_uslen;
16198 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
16199 	uint32_t		*p_uiaddr02, *p_uilen02;
16200 	struct fw_table		*fwt;
16201 	extern struct fw_table	fw_table[];
16202 
16203 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16204 
16205 	if (ha->fw_module != NULL) {
16206 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
16207 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
16208 		    ha->fw_subminor_version);
16209 		return (rval);
16210 	}
16211 
16212 	/* make sure the fw_class is in the fw_table of supported classes */
16213 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
16214 		if (fwt->fw_class == ha->fw_class)
16215 			break;			/* match */
16216 	}
16217 	if (fwt->fw_version == NULL) {
16218 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
16219 		    "in driver's fw_table", QL_NAME, ha->instance,
16220 		    ha->fw_class);
16221 		return (QL_FW_NOT_SUPPORTED);
16222 	}
16223 
16224 	/*
16225 	 * open the module related to the fw_class
16226 	 */
16227 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
16228 	    ha->fw_class);
16229 
16230 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
16231 	if (ha->fw_module == NULL) {
16232 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
16233 		    QL_NAME, ha->instance, module);
16234 		return (QL_FWMODLOAD_FAILED);
16235 	}
16236 
16237 	/*
16238 	 * resolve the fw module symbols, data types depend on fw_class
16239 	 */
16240 
16241 	switch (ha->fw_class) {
16242 	case 0x2200:
16243 	case 0x2300:
16244 	case 0x6322:
16245 
16246 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16247 		    NULL)) == NULL) {
16248 			rval = QL_FWSYM_NOT_FOUND;
16249 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16250 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
16251 		    "risc_code_addr01", NULL)) == NULL) {
16252 			rval = QL_FWSYM_NOT_FOUND;
16253 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16254 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
16255 		    "risc_code_length01", NULL)) == NULL) {
16256 			rval = QL_FWSYM_NOT_FOUND;
16257 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16258 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
16259 		    "firmware_version", NULL)) == NULL) {
16260 			rval = QL_FWSYM_NOT_FOUND;
16261 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16262 		}
16263 
16264 		if (rval == QL_SUCCESS) {
16265 			ha->risc_fw[0].code = code;
16266 			ha->risc_fw[0].addr = *p_usaddr;
16267 			ha->risc_fw[0].length = *p_uslen;
16268 
16269 			(void) snprintf(fw_version, sizeof (fw_version),
16270 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
16271 		}
16272 		break;
16273 
16274 	case 0x2400:
16275 	case 0x2500:
16276 	case 0x8100:
16277 
16278 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16279 		    NULL)) == NULL) {
16280 			rval = QL_FWSYM_NOT_FOUND;
16281 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16282 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
16283 		    "risc_code_addr01", NULL)) == NULL) {
16284 			rval = QL_FWSYM_NOT_FOUND;
16285 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16286 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
16287 		    "risc_code_length01", NULL)) == NULL) {
16288 			rval = QL_FWSYM_NOT_FOUND;
16289 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16290 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
16291 		    "firmware_version", NULL)) == NULL) {
16292 			rval = QL_FWSYM_NOT_FOUND;
16293 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16294 		}
16295 
16296 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
16297 		    NULL)) == NULL) {
16298 			rval = QL_FWSYM_NOT_FOUND;
16299 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
16300 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
16301 		    "risc_code_addr02", NULL)) == NULL) {
16302 			rval = QL_FWSYM_NOT_FOUND;
16303 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
16304 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
16305 		    "risc_code_length02", NULL)) == NULL) {
16306 			rval = QL_FWSYM_NOT_FOUND;
16307 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
16308 		}
16309 
16310 		if (rval == QL_SUCCESS) {
16311 			ha->risc_fw[0].code = code;
16312 			ha->risc_fw[0].addr = *p_uiaddr;
16313 			ha->risc_fw[0].length = *p_uilen;
16314 			ha->risc_fw[1].code = code02;
16315 			ha->risc_fw[1].addr = *p_uiaddr02;
16316 			ha->risc_fw[1].length = *p_uilen02;
16317 
16318 			(void) snprintf(fw_version, sizeof (fw_version),
16319 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
16320 		}
16321 		break;
16322 
16323 	default:
16324 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
16325 		rval = QL_FW_NOT_SUPPORTED;
16326 	}
16327 
16328 	if (rval != QL_SUCCESS) {
16329 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
16330 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
16331 		if (ha->fw_module != NULL) {
16332 			(void) ddi_modclose(ha->fw_module);
16333 			ha->fw_module = NULL;
16334 		}
16335 	} else {
16336 		/*
16337 		 * check for firmware version mismatch between module and
16338 		 * compiled in fw_table version.
16339 		 */
16340 
16341 		if (strcmp(fwt->fw_version, fw_version) != 0) {
16342 
16343 			/*
16344 			 * If f/w / driver version mismatches then
16345 			 * return a successful status -- however warn
16346 			 * the user that this is NOT recommended.
16347 			 */
16348 
16349 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
16350 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
16351 			    ha->instance, ha->fw_class, fwt->fw_version,
16352 			    fw_version);
16353 
16354 			ha->cfg_flags |= CFG_FW_MISMATCH;
16355 		} else {
16356 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
16357 		}
16358 	}
16359 
16360 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16361 
16362 	return (rval);
16363 }
16364 
16365 /*
16366  * ql_port_state
16367  *	Set the state on all adapter ports.
16368  *
16369  * Input:
16370  *	ha:	parent adapter state pointer.
16371  *	state:	port state.
16372  *	flags:	task daemon flags to set.
16373  *
16374  * Context:
16375  *	Interrupt or Kernel context, no mailbox commands allowed.
16376  */
16377 void
16378 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
16379 {
16380 	ql_adapter_state_t	*vha;
16381 
16382 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16383 
16384 	TASK_DAEMON_LOCK(ha);
16385 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
16386 		if (FC_PORT_STATE_MASK(vha->state) != state) {
16387 			vha->state = state != FC_STATE_OFFLINE ?
16388 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
16389 			vha->task_daemon_flags |= flags;
16390 		}
16391 	}
16392 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
16393 	TASK_DAEMON_UNLOCK(ha);
16394 
16395 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16396 }
16397 
16398 /*
16399  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
16400  *
16401  * Input:	Pointer to the adapter state structure.
16402  * Returns:	Success or Failure.
16403  * Context:	Kernel context.
16404  */
16405 int
16406 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
16407 {
16408 	int	rval = DDI_SUCCESS;
16409 
16410 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16411 
16412 	ha->el_trace_desc =
16413 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
16414 
16415 	if (ha->el_trace_desc == NULL) {
16416 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
16417 		    QL_NAME, ha->instance);
16418 		rval = DDI_FAILURE;
16419 	} else {
16420 		ha->el_trace_desc->next		= 0;
16421 		ha->el_trace_desc->trace_buffer =
16422 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
16423 
16424 		if (ha->el_trace_desc->trace_buffer == NULL) {
16425 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
16426 			    QL_NAME, ha->instance);
16427 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16428 			rval = DDI_FAILURE;
16429 		} else {
16430 			ha->el_trace_desc->trace_buffer_size =
16431 			    EL_TRACE_BUF_SIZE;
16432 			mutex_init(&ha->el_trace_desc->mutex, NULL,
16433 			    MUTEX_DRIVER, NULL);
16434 		}
16435 	}
16436 
16437 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16438 
16439 	return (rval);
16440 }
16441 
16442 /*
16443  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
16444  *
16445  * Input:	Pointer to the adapter state structure.
16446  * Returns:	Success or Failure.
16447  * Context:	Kernel context.
16448  */
16449 int
16450 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
16451 {
16452 	int	rval = DDI_SUCCESS;
16453 
16454 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16455 
16456 	if (ha->el_trace_desc == NULL) {
16457 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
16458 		    QL_NAME, ha->instance);
16459 		rval = DDI_FAILURE;
16460 	} else {
16461 		if (ha->el_trace_desc->trace_buffer != NULL) {
16462 			kmem_free(ha->el_trace_desc->trace_buffer,
16463 			    ha->el_trace_desc->trace_buffer_size);
16464 		}
16465 		mutex_destroy(&ha->el_trace_desc->mutex);
16466 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16467 	}
16468 
16469 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16470 
16471 	return (rval);
16472 }
16473 
16474 /*
16475  * els_cmd_text	- Return a pointer to a string describing the command
16476  *
16477  * Input:	els_cmd = the els command opcode.
16478  * Returns:	pointer to a string.
16479  * Context:	Kernel context.
16480  */
16481 char *
16482 els_cmd_text(int els_cmd)
16483 {
16484 	cmd_table_t *entry = &els_cmd_tbl[0];
16485 
16486 	return (cmd_text(entry, els_cmd));
16487 }
16488 
16489 /*
16490  * mbx_cmd_text - Return a pointer to a string describing the command
16491  *
16492  * Input:	mbx_cmd = the mailbox command opcode.
16493  * Returns:	pointer to a string.
16494  * Context:	Kernel context.
16495  */
16496 char *
16497 mbx_cmd_text(int mbx_cmd)
16498 {
16499 	cmd_table_t *entry = &mbox_cmd_tbl[0];
16500 
16501 	return (cmd_text(entry, mbx_cmd));
16502 }
16503 
16504 /*
16505  * cmd_text	Return a pointer to a string describing the command
16506  *
16507  * Input:	entry = the command table
16508  *		cmd = the command.
16509  * Returns:	pointer to a string.
16510  * Context:	Kernel context.
16511  */
16512 char *
16513 cmd_text(cmd_table_t *entry, int cmd)
16514 {
16515 	for (; entry->cmd != 0; entry++) {
16516 		if (entry->cmd == cmd) {
16517 			break;
16518 		}
16519 	}
16520 	return (entry->string);
16521 }
16522 
16523 /*
16524  * ql_els_24xx_mbox_cmd_iocb - els request indication.
16525  *
16526  * Input:	ha = adapter state pointer.
16527  *		srb = scsi request block pointer.
16528  *		arg = els passthru entry iocb pointer.
16529  * Returns:
16530  * Context:	Kernel context.
16531  */
16532 void
16533 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
16534 {
16535 	els_descriptor_t	els_desc;
16536 
16537 	/* Extract the ELS information */
16538 	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
16539 
16540 	/* Construct the passthru entry */
16541 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
16542 
16543 	/* Ensure correct endianness */
16544 	ql_isp_els_handle_cmd_endian(ha, srb);
16545 }
16546 
16547 /*
16548  * ql_isp_els_request_map - Extract into an els descriptor the info required
16549  *			    to build an els_passthru iocb from an fc packet.
16550  *
16551  * Input:	ha = adapter state pointer.
16552  *		pkt = fc packet pointer
16553  *		els_desc = els descriptor pointer
16554  * Returns:
16555  * Context:	Kernel context.
16556  */
16557 static void
16558 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
16559     els_descriptor_t *els_desc)
16560 {
16561 	ls_code_t	els;
16562 
16563 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16564 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16565 
16566 	els_desc->els = els.ls_code;
16567 
16568 	els_desc->els_handle = ha->hba_buf.acc_handle;
16569 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
16570 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
16571 	/* if n_port_handle is not < 0x7d use 0 */
16572 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
16573 		els_desc->n_port_handle = ha->n_port->n_port_handle;
16574 	} else {
16575 		els_desc->n_port_handle = 0;
16576 	}
16577 	els_desc->control_flags = 0;
16578 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
16579 	/*
16580 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
16581 	 * (without the frame header) in system memory.
16582 	 */
16583 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
16584 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
16585 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
16586 
16587 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
16588 	/*
16589 	 * Receive DSD. This field defines the ELS response payload buffer
16590 	 * for the ISP24xx firmware transferring the received ELS
16591 	 * response frame to a location in host memory.
16592 	 */
16593 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
16594 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
16595 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
16596 }
16597 
16598 /*
16599  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
16600  * using the els descriptor.
16601  *
16602  * Input:	ha = adapter state pointer.
16603  *		els_desc = els descriptor pointer.
16604  *		els_entry = els passthru entry iocb pointer.
16605  * Returns:
16606  * Context:	Kernel context.
16607  */
16608 static void
16609 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
16610     els_passthru_entry_t *els_entry)
16611 {
16612 	uint32_t	*ptr32;
16613 
16614 	/*
16615 	 * Construct command packet.
16616 	 */
16617 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
16618 	    (uint8_t)ELS_PASSTHRU_TYPE);
16619 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
16620 	    els_desc->n_port_handle);
16621 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
16622 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
16623 	    (uint32_t)0);
16624 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
16625 	    els_desc->els);
16626 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
16627 	    els_desc->d_id.b.al_pa);
16628 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
16629 	    els_desc->d_id.b.area);
16630 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
16631 	    els_desc->d_id.b.domain);
16632 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
16633 	    els_desc->s_id.b.al_pa);
16634 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
16635 	    els_desc->s_id.b.area);
16636 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
16637 	    els_desc->s_id.b.domain);
16638 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
16639 	    els_desc->control_flags);
16640 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
16641 	    els_desc->rsp_byte_count);
16642 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
16643 	    els_desc->cmd_byte_count);
16644 	/* Load transmit data segments and count. */
16645 	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
16646 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
16647 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
16648 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
16649 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
16650 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
16651 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
16652 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
16653 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
16654 }
16655 
16656 /*
16657  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
16658  *				  in host memory.
16659  *
16660  * Input:	ha = adapter state pointer.
16661  *		srb = scsi request block
16662  * Returns:
16663  * Context:	Kernel context.
16664  */
16665 void
16666 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
16667 {
16668 	ls_code_t	els;
16669 	fc_packet_t	*pkt;
16670 	uint8_t		*ptr;
16671 
16672 	pkt = srb->pkt;
16673 
16674 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16675 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16676 
16677 	ptr = (uint8_t *)pkt->pkt_cmd;
16678 
16679 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
16680 }
16681 
16682 /*
16683  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
16684  *				  in host memory.
16685  * Input:	ha = adapter state pointer.
16686  *		srb = scsi request block
16687  * Returns:
16688  * Context:	Kernel context.
16689  */
16690 void
16691 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
16692 {
16693 	ls_code_t	els;
16694 	fc_packet_t	*pkt;
16695 	uint8_t		*ptr;
16696 
16697 	pkt = srb->pkt;
16698 
16699 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16700 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16701 
16702 	ptr = (uint8_t *)pkt->pkt_resp;
16703 	BIG_ENDIAN_32(&els);
16704 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
16705 }
16706 
16707 /*
16708  * ql_isp_els_handle_endian - els requests/responses must be in big endian
16709  *			      in host memory.
16710  * Input:	ha = adapter state pointer.
16711  *		ptr = els request/response buffer pointer.
16712  *		ls_code = els command code.
16713  * Returns:
16714  * Context:	Kernel context.
16715  */
16716 void
16717 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
16718 {
16719 	switch (ls_code) {
16720 	case LA_ELS_PLOGI: {
16721 		BIG_ENDIAN_32(ptr);	/* Command Code */
16722 		ptr += 4;
16723 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
16724 		ptr += 2;
16725 		BIG_ENDIAN_16(ptr);	/* b2b credit */
16726 		ptr += 2;
16727 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
16728 		ptr += 2;
16729 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
16730 		ptr += 2;
16731 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
16732 		ptr += 2;
16733 		BIG_ENDIAN_16(ptr);	/* Rel offset */
16734 		ptr += 2;
16735 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
16736 		ptr += 4;		/* Port Name */
16737 		ptr += 8;		/* Node Name */
16738 		ptr += 8;		/* Class 1 */
16739 		ptr += 16;		/* Class 2 */
16740 		ptr += 16;		/* Class 3 */
16741 		BIG_ENDIAN_16(ptr);	/* Service options */
16742 		ptr += 2;
16743 		BIG_ENDIAN_16(ptr);	/* Initiator control */
16744 		ptr += 2;
16745 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
16746 		ptr += 2;
16747 		BIG_ENDIAN_16(ptr);	/* Rcv size */
16748 		ptr += 2;
16749 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
16750 		ptr += 2;
16751 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
16752 		ptr += 2;
16753 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
16754 		break;
16755 	}
16756 	case LA_ELS_PRLI: {
16757 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
16758 		ptr += 4;		/* Type */
16759 		ptr += 2;
16760 		BIG_ENDIAN_16(ptr);	/* Flags */
16761 		ptr += 2;
16762 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
16763 		ptr += 4;
16764 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
16765 		ptr += 4;
16766 		BIG_ENDIAN_32(ptr);	/* Flags */
16767 		break;
16768 	}
16769 	default:
16770 		EL(ha, "can't handle els code %x\n", ls_code);
16771 		break;
16772 	}
16773 }
16774 
16775 /*
16776  * ql_n_port_plogi
16777  *	In N port 2 N port topology where an N Port has logged in with the
16778  *	firmware because it has the N_Port login initiative, we send up
16779  *	a plogi by proxy which stimulates the login procedure to continue.
16780  *
16781  * Input:
16782  *	ha = adapter state pointer.
16783  * Returns:
16784  *
16785  * Context:
16786  *	Kernel context.
16787  */
16788 static int
16789 ql_n_port_plogi(ql_adapter_state_t *ha)
16790 {
16791 	int		rval;
16792 	ql_tgt_t	*tq;
16793 	ql_head_t done_q = { NULL, NULL };
16794 
16795 	rval = QL_SUCCESS;
16796 
16797 	if (ha->topology & QL_N_PORT) {
16798 		/* if we're doing this the n_port_handle must be good */
16799 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
16800 			tq = ql_loop_id_to_queue(ha,
16801 			    ha->n_port->n_port_handle);
16802 			if (tq != NULL) {
16803 				(void) ql_send_plogi(ha, tq, &done_q);
16804 			} else {
16805 				EL(ha, "n_port_handle = %x, tq = %x\n",
16806 				    ha->n_port->n_port_handle, tq);
16807 			}
16808 		} else {
16809 			EL(ha, "n_port_handle = %x, tq = %x\n",
16810 			    ha->n_port->n_port_handle, tq);
16811 		}
16812 		if (done_q.first != NULL) {
16813 			ql_done(done_q.first);
16814 		}
16815 	}
16816 	return (rval);
16817 }
16818 
16819 /*
16820  * Compare two WWNs. The NAA is omitted for comparison.
16821  *
16822  * Note particularly that the indentation used in this
16823  * function  isn't according to Sun recommendations. It
16824  * is indented to make reading a bit easy.
16825  *
16826  * Return Values:
16827  *   if first == second return  0
16828  *   if first > second  return  1
16829  *   if first < second  return -1
16830  */
16831 int
16832 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
16833 {
16834 	la_wwn_t t1, t2;
16835 	int rval;
16836 
16837 	EL(ha, "WWPN=%08x%08x\n",
16838 	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
16839 	EL(ha, "WWPN=%08x%08x\n",
16840 	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
16841 	/*
16842 	 * Fibre Channel protocol is big endian, so compare
16843 	 * as big endian values
16844 	 */
16845 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
16846 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
16847 
16848 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
16849 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
16850 
16851 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
16852 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
16853 			rval = 0;
16854 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
16855 			rval = 1;
16856 		} else {
16857 			rval = -1;
16858 		}
16859 	} else {
16860 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
16861 			rval = 1;
16862 		} else {
16863 			rval = -1;
16864 		}
16865 	}
16866 	return (rval);
16867 }
16868 
16869 /*
16870  * ql_wait_for_td_stop
16871  *	Wait for task daemon to stop running.  Internal command timeout
16872  *	is approximately 30 seconds, so it may help in some corner
16873  *	cases to wait that long
16874  *
16875  * Input:
16876  *	ha = adapter state pointer.
16877  *
16878  * Returns:
16879  *	DDI_SUCCESS or DDI_FAILURE.
16880  *
16881  * Context:
16882  *	Kernel context.
16883  */
16884 
16885 static int
16886 ql_wait_for_td_stop(ql_adapter_state_t *ha)
16887 {
16888 	int	rval = DDI_FAILURE;
16889 	UINT16	wait_cnt;
16890 
16891 	for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
16892 		/* The task daemon clears the stop flag on exit. */
16893 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
16894 			if (ha->cprinfo.cc_events & CALLB_CPR_START ||
16895 			    ddi_in_panic()) {
16896 				drv_usecwait(10000);
16897 			} else {
16898 				delay(drv_usectohz(10000));
16899 			}
16900 		} else {
16901 			rval = DDI_SUCCESS;
16902 			break;
16903 		}
16904 	}
16905 	return (rval);
16906 }
16907