xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c (revision 5dfd244acc8f144280c5bc8f69ed941185fc3ccc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Solaris external defines.
56  */
57 extern pri_t minclsyspri;
58 extern pri_t maxclsyspri;
59 
60 /*
61  * dev_ops functions prototypes
62  */
63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66 static int ql_power(dev_info_t *, int, int);
67 static int ql_quiesce(dev_info_t *);
68 
69 /*
70  * FCA functions prototypes exported by means of the transport table
71  */
72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73     fc_fca_bind_info_t *);
74 static void ql_unbind_port(opaque_t);
75 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77 static int ql_els_send(opaque_t, fc_packet_t *);
78 static int ql_get_cap(opaque_t, char *, void *);
79 static int ql_set_cap(opaque_t, char *, void *);
80 static int ql_getmap(opaque_t, fc_lilpmap_t *);
81 static int ql_transport(opaque_t, fc_packet_t *);
82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85 static int ql_abort(opaque_t, fc_packet_t *, int);
86 static int ql_reset(opaque_t, uint32_t);
87 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
88 static opaque_t ql_get_device(opaque_t, fc_portid_t);
89 
90 /*
91  * FCA Driver Support Function Prototypes.
92  */
93 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
94 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
95     ql_srb_t *);
96 static void ql_task_daemon(void *);
97 static void ql_task_thread(ql_adapter_state_t *);
98 static void ql_unsol_callback(ql_srb_t *);
99 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
100     fc_unsol_buf_t *);
101 static void ql_timer(void *);
102 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
103 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
104     uint32_t *, uint32_t *);
105 static void ql_halt(ql_adapter_state_t *, int);
106 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_login_port(ql_adapter_state_t *, port_id_t);
122 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
123 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
124 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
126 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
128 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
129 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
130     ql_srb_t *);
131 static int ql_kstat_update(kstat_t *, int);
132 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
133 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
134 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
135 static void ql_rst_aen(ql_adapter_state_t *);
136 static void ql_restart_queues(ql_adapter_state_t *);
137 static void ql_abort_queues(ql_adapter_state_t *);
138 static void ql_idle_check(ql_adapter_state_t *);
139 static int ql_loop_resync(ql_adapter_state_t *);
140 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
141 static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
142 static int ql_save_config_regs(dev_info_t *);
143 static int ql_restore_config_regs(dev_info_t *);
144 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
145 static int ql_handle_rscn_update(ql_adapter_state_t *);
146 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
147 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
148 static int ql_dump_firmware(ql_adapter_state_t *);
149 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
150 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
151 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
152 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
153 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
154 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
155     void *);
156 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
157     uint8_t);
158 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
159 static int ql_suspend_adapter(ql_adapter_state_t *);
160 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
161 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
162 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
163 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
164 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
165 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
166 static int ql_setup_interrupts(ql_adapter_state_t *);
167 static int ql_setup_msi(ql_adapter_state_t *);
168 static int ql_setup_msix(ql_adapter_state_t *);
169 static int ql_setup_fixed(ql_adapter_state_t *);
170 static void ql_release_intr(ql_adapter_state_t *);
171 static void ql_disable_intr(ql_adapter_state_t *);
172 static int ql_legacy_intr(ql_adapter_state_t *);
173 static int ql_init_mutex(ql_adapter_state_t *);
174 static void ql_destroy_mutex(ql_adapter_state_t *);
175 static void ql_iidma(ql_adapter_state_t *);
176 
177 static int ql_n_port_plogi(ql_adapter_state_t *);
178 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
179     els_descriptor_t *);
180 static void ql_isp_els_request_ctor(els_descriptor_t *,
181     els_passthru_entry_t *);
182 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
183 /*
184  * Global data
185  */
186 static uint8_t	ql_enable_pm = 1;
187 static int	ql_flash_sbus_fpga = 0;
188 uint32_t	ql_os_release_level;
189 uint32_t	ql_disable_aif = 0;
190 uint32_t	ql_disable_msi = 0;
191 uint32_t	ql_disable_msix = 0;
192 
193 /* Timer routine variables. */
194 static timeout_id_t	ql_timer_timeout_id = NULL;
195 static clock_t		ql_timer_ticks;
196 
197 /* Soft state head pointer. */
198 void *ql_state = NULL;
199 
200 /* Head adapter link. */
201 ql_head_t ql_hba = {
202 	NULL,
203 	NULL
204 };
205 
206 /* Global hba index */
207 uint32_t ql_gfru_hba_index = 1;
208 
209 /*
210  * Some IP defines and globals
211  */
212 uint32_t	ql_ip_buffer_count = 128;
213 uint32_t	ql_ip_low_water = 10;
214 uint8_t		ql_ip_fast_post_count = 5;
215 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
216 
217 /* Device AL_PA to Device Head Queue index array. */
218 uint8_t ql_alpa_to_index[] = {
219 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
220 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
221 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
222 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
223 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
224 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
225 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
226 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
227 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
228 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
229 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
230 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
231 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
232 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
233 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
234 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
235 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
236 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
237 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
238 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
239 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
240 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
241 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
242 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
243 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
244 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
245 };
246 
247 /* Device loop_id to ALPA array. */
248 static uint8_t ql_index_to_alpa[] = {
249 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
250 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
251 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
252 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
253 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
254 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
255 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
256 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
257 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
258 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
259 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
260 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
261 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
262 };
263 
264 /* 2200 register offsets */
265 static reg_off_t reg_off_2200 = {
266 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
267 	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
268 	0x00, 0x00, /* intr info lo, hi */
269 	24, /* Number of mailboxes */
270 	/* Mailbox register offsets */
271 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
272 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
273 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
274 	/* 2200 does not have mailbox 24-31 */
275 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
276 	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
277 	/* host to host sema */
278 	0x00,
279 	/* 2200 does not have pri_req_in, pri_req_out, */
280 	/* atio_req_in, atio_req_out, io_base_addr */
281 	0xff, 0xff, 0xff, 0xff,	0xff
282 };
283 
284 /* 2300 register offsets */
285 static reg_off_t reg_off_2300 = {
286 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
287 	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
288 	0x18, 0x1A, /* intr info lo, hi */
289 	32, /* Number of mailboxes */
290 	/* Mailbox register offsets */
291 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
292 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
293 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
294 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
295 	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
296 	/* host to host sema */
297 	0x1c,
298 	/* 2300 does not have pri_req_in, pri_req_out, */
299 	/* atio_req_in, atio_req_out, io_base_addr */
300 	0xff, 0xff, 0xff, 0xff,	0xff
301 };
302 
303 /* 2400/2500 register offsets */
304 reg_off_t reg_off_2400_2500 = {
305 	0x00, 0x04,		/* flash_address, flash_data */
306 	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
307 	/* 2400 does not have semaphore, nvram */
308 	0x14, 0x18,
309 	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
310 	0x44, 0x46,		/* intr info lo, hi */
311 	32,			/* Number of mailboxes */
312 	/* Mailbox register offsets */
313 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
314 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
315 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
316 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
317 	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
318 	0xff, 0xff, 0xff, 0xff,
319 	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
320 	0xff,			/* host to host sema */
321 	0x2c, 0x30,		/* pri_req_in, pri_req_out */
322 	0x3c, 0x40,		/* atio_req_in, atio_req_out */
323 	0x54			/* io_base_addr */
324 };
325 
326 /* mutex for protecting variables shared by all instances of the driver */
327 kmutex_t ql_global_mutex;
328 kmutex_t ql_global_hw_mutex;
329 kmutex_t ql_global_el_mutex;
330 
331 /* DMA access attribute structure. */
332 static ddi_device_acc_attr_t ql_dev_acc_attr = {
333 	DDI_DEVICE_ATTR_V0,
334 	DDI_STRUCTURE_LE_ACC,
335 	DDI_STRICTORDER_ACC
336 };
337 
338 /* I/O DMA attributes structures. */
339 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
340 	DMA_ATTR_V0,			/* dma_attr_version */
341 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
342 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
343 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
344 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
345 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
346 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
347 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
348 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
349 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
350 	QL_DMA_GRANULARITY,		/* granularity of device */
351 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
352 };
353 
354 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
355 	DMA_ATTR_V0,			/* dma_attr_version */
356 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
357 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
358 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
359 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
360 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
361 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
362 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
363 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
364 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
365 	QL_DMA_GRANULARITY,		/* granularity of device */
366 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
367 };
368 
369 /* Load the default dma attributes */
370 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
371 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
372 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
373 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
374 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
375 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
376 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
377 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
378 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
379 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
380 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
381 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
382 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
383 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
384 
385 /* Static declarations of cb_ops entry point functions... */
386 static struct cb_ops ql_cb_ops = {
387 	ql_open,			/* b/c open */
388 	ql_close,			/* b/c close */
389 	nodev,				/* b strategy */
390 	nodev,				/* b print */
391 	nodev,				/* b dump */
392 	nodev,				/* c read */
393 	nodev,				/* c write */
394 	ql_ioctl,			/* c ioctl */
395 	nodev,				/* c devmap */
396 	nodev,				/* c mmap */
397 	nodev,				/* c segmap */
398 	nochpoll,			/* c poll */
399 	nodev,				/* cb_prop_op */
400 	NULL,				/* streamtab  */
401 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
402 	CB_REV,				/* cb_ops revision */
403 	nodev,				/* c aread */
404 	nodev				/* c awrite */
405 };
406 
407 /* Static declarations of dev_ops entry point functions... */
408 static struct dev_ops ql_devops = {
409 	DEVO_REV,			/* devo_rev */
410 	0,				/* refcnt */
411 	ql_getinfo,			/* getinfo */
412 	nulldev,			/* identify */
413 	nulldev,			/* probe */
414 	ql_attach,			/* attach */
415 	ql_detach,			/* detach */
416 	nodev,				/* reset */
417 	&ql_cb_ops,			/* char/block ops */
418 	NULL,				/* bus operations */
419 	ql_power,			/* power management */
420 	ql_quiesce			/* quiesce device */
421 };
422 
423 /* ELS command code to text converter */
424 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
425 /* Mailbox command code to text converter */
426 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
427 
428 char qlc_driver_version[] = QL_VERSION;
429 
430 /*
431  * Loadable Driver Interface Structures.
432  * Declare and initialize the module configuration section...
433  */
434 static struct modldrv modldrv = {
435 	&mod_driverops,				/* type of module: driver */
436 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
437 	&ql_devops				/* driver dev_ops */
438 };
439 
440 static struct modlinkage modlinkage = {
441 	MODREV_1,
442 	&modldrv,
443 	NULL
444 };
445 
446 /* ************************************************************************ */
447 /*				Loadable Module Routines.		    */
448 /* ************************************************************************ */
449 
450 /*
451  * _init
452  *	Initializes a loadable module. It is called before any other
453  *	routine in a loadable module.
454  *
455  * Returns:
456  *	0 = success
457  *
458  * Context:
459  *	Kernel context.
460  */
461 int
462 _init(void)
463 {
464 	uint16_t	w16;
465 	int		rval = 0;
466 
467 	/* Get OS major release level. */
468 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
469 		if (utsname.release[w16] == '.') {
470 			w16++;
471 			break;
472 		}
473 	}
474 	if (w16 < sizeof (utsname.release)) {
475 		(void) ql_bstr_to_dec(&utsname.release[w16],
476 		    &ql_os_release_level, 0);
477 	} else {
478 		ql_os_release_level = 0;
479 	}
480 	if (ql_os_release_level < 6) {
481 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
482 		    QL_NAME, ql_os_release_level);
483 		rval = EINVAL;
484 	}
485 	if (ql_os_release_level == 6) {
486 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
487 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
488 	}
489 
490 	if (rval == 0) {
491 		rval = ddi_soft_state_init(&ql_state,
492 		    sizeof (ql_adapter_state_t), 0);
493 	}
494 	if (rval == 0) {
495 		/* allow the FC Transport to tweak the dev_ops */
496 		fc_fca_init(&ql_devops);
497 
498 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
499 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
500 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
501 		rval = mod_install(&modlinkage);
502 		if (rval != 0) {
503 			mutex_destroy(&ql_global_hw_mutex);
504 			mutex_destroy(&ql_global_mutex);
505 			mutex_destroy(&ql_global_el_mutex);
506 			ddi_soft_state_fini(&ql_state);
507 		} else {
508 			/*EMPTY*/
509 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
510 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
511 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
512 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
513 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
514 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
515 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
516 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
517 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
518 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
519 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
520 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
521 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
522 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
523 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
524 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
525 			    QL_FCSM_CMD_SGLLEN;
526 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
527 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
528 			    QL_FCSM_RSP_SGLLEN;
529 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
530 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
531 			    QL_FCIP_CMD_SGLLEN;
532 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
533 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
534 			    QL_FCIP_RSP_SGLLEN;
535 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
536 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
537 			    QL_FCP_CMD_SGLLEN;
538 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
539 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
540 			    QL_FCP_RSP_SGLLEN;
541 		}
542 	}
543 
544 	if (rval != 0) {
545 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
546 		    QL_NAME);
547 	}
548 
549 	return (rval);
550 }
551 
552 /*
553  * _fini
554  *	Prepares a module for unloading. It is called when the system
555  *	wants to unload a module. If the module determines that it can
556  *	be unloaded, then _fini() returns the value returned by
557  *	mod_remove(). Upon successful return from _fini() no other
558  *	routine in the module will be called before _init() is called.
559  *
560  * Returns:
561  *	0 = success
562  *
563  * Context:
564  *	Kernel context.
565  */
566 int
567 _fini(void)
568 {
569 	int	rval;
570 
571 	rval = mod_remove(&modlinkage);
572 	if (rval == 0) {
573 		mutex_destroy(&ql_global_hw_mutex);
574 		mutex_destroy(&ql_global_mutex);
575 		mutex_destroy(&ql_global_el_mutex);
576 		ddi_soft_state_fini(&ql_state);
577 	}
578 
579 	return (rval);
580 }
581 
582 /*
583  * _info
584  *	Returns information about loadable module.
585  *
586  * Input:
587  *	modinfo = pointer to module information structure.
588  *
589  * Returns:
590  *	Value returned by mod_info().
591  *
592  * Context:
593  *	Kernel context.
594  */
595 int
596 _info(struct modinfo *modinfop)
597 {
598 	return (mod_info(&modlinkage, modinfop));
599 }
600 
601 /* ************************************************************************ */
602 /*			dev_ops functions				    */
603 /* ************************************************************************ */
604 
605 /*
606  * ql_getinfo
607  *	Returns the pointer associated with arg when cmd is
608  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
609  *	instance number associated with arg when cmd is set
610  *	to DDI_INFO_DEV2INSTANCE.
611  *
612  * Input:
613  *	dip = Do not use.
614  *	cmd = command argument.
615  *	arg = command specific argument.
616  *	resultp = pointer to where request information is stored.
617  *
618  * Returns:
619  *	DDI_SUCCESS or DDI_FAILURE.
620  *
621  * Context:
622  *	Kernel context.
623  */
624 /* ARGSUSED */
625 static int
626 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
627 {
628 	ql_adapter_state_t	*ha;
629 	int			minor;
630 	int			rval = DDI_FAILURE;
631 
632 	minor = (int)(getminor((dev_t)arg));
633 	ha = ddi_get_soft_state(ql_state, minor);
634 	if (ha == NULL) {
635 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
636 		    getminor((dev_t)arg));
637 		*resultp = NULL;
638 		return (rval);
639 	}
640 
641 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
642 
643 	switch (cmd) {
644 	case DDI_INFO_DEVT2DEVINFO:
645 		*resultp = ha->dip;
646 		rval = DDI_SUCCESS;
647 		break;
648 	case DDI_INFO_DEVT2INSTANCE:
649 		*resultp = (void *)(uintptr_t)(ha->instance);
650 		rval = DDI_SUCCESS;
651 		break;
652 	default:
653 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
654 		rval = DDI_FAILURE;
655 		break;
656 	}
657 
658 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
659 
660 	return (rval);
661 }
662 
663 /*
664  * ql_attach
665  *	Configure and attach an instance of the driver
666  *	for a port.
667  *
668  * Input:
669  *	dip = pointer to device information structure.
670  *	cmd = attach type.
671  *
672  * Returns:
673  *	DDI_SUCCESS or DDI_FAILURE.
674  *
675  * Context:
676  *	Kernel context.
677  */
678 static int
679 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
680 {
681 	uint32_t		size;
682 	int			rval;
683 	int			instance;
684 	uint_t			progress = 0;
685 	char			*buf;
686 	ushort_t		caps_ptr, cap;
687 	fc_fca_tran_t		*tran;
688 	ql_adapter_state_t	*ha = NULL;
689 
690 	static char *pmcomps[] = {
691 		NULL,
692 		PM_LEVEL_D3_STR,		/* Device OFF */
693 		PM_LEVEL_D0_STR,		/* Device ON */
694 	};
695 
696 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
697 	    ddi_get_instance(dip), cmd);
698 
699 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
700 
701 	switch (cmd) {
702 	case DDI_ATTACH:
703 		/* first get the instance */
704 		instance = ddi_get_instance(dip);
705 
706 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
707 		    QL_NAME, instance, QL_VERSION);
708 
709 		/* Correct OS version? */
710 		if (ql_os_release_level != 11) {
711 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
712 			    "11", QL_NAME, instance);
713 			goto attach_failed;
714 		}
715 
716 		/* Hardware is installed in a DMA-capable slot? */
717 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
718 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
719 			    instance);
720 			goto attach_failed;
721 		}
722 
723 		/* No support for high-level interrupts */
724 		if (ddi_intr_hilevel(dip, 0) != 0) {
725 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
726 			    " not supported", QL_NAME, instance);
727 			goto attach_failed;
728 		}
729 
730 		/* Allocate our per-device-instance structure */
731 		if (ddi_soft_state_zalloc(ql_state,
732 		    instance) != DDI_SUCCESS) {
733 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
734 			    QL_NAME, instance);
735 			goto attach_failed;
736 		}
737 		progress |= QL_SOFT_STATE_ALLOCED;
738 
739 		ha = ddi_get_soft_state(ql_state, instance);
740 		if (ha == NULL) {
741 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
742 			    QL_NAME, instance);
743 			goto attach_failed;
744 		}
745 		ha->dip = dip;
746 		ha->instance = instance;
747 		ha->hba.base_address = ha;
748 		ha->pha = ha;
749 
750 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
751 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
752 			    QL_NAME, instance);
753 			goto attach_failed;
754 		}
755 
756 		/* Get extended logging and dump flags. */
757 		ql_common_properties(ha);
758 
759 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
760 		    "sbus") == 0) {
761 			EL(ha, "%s SBUS card detected", QL_NAME);
762 			ha->cfg_flags |= CFG_SBUS_CARD;
763 		}
764 
765 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
766 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
767 
768 		ha->outstanding_cmds = kmem_zalloc(
769 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
770 		    KM_SLEEP);
771 
772 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
773 		    QL_UB_LIMIT, KM_SLEEP);
774 
775 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
776 		    KM_SLEEP);
777 
778 		(void) ddi_pathname(dip, buf);
779 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
780 		if (ha->devpath == NULL) {
781 			EL(ha, "devpath mem alloc failed\n");
782 		} else {
783 			(void) strcpy(ha->devpath, buf);
784 			EL(ha, "devpath is: %s\n", ha->devpath);
785 		}
786 
787 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
788 			/*
789 			 * For cards where PCI is mapped to sbus e.g. Ivory.
790 			 *
791 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
792 			 *	: 0x100 - 0x3FF PCI IO space for 2200
793 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
794 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
795 			 */
796 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
797 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
798 			    != DDI_SUCCESS) {
799 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
800 				    " registers", QL_NAME, instance);
801 				goto attach_failed;
802 			}
803 			if (ddi_regs_map_setup(dip, 1,
804 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
805 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
806 			    != DDI_SUCCESS) {
807 				/* We should not fail attach here */
808 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
809 				    QL_NAME, instance);
810 				ha->sbus_fpga_iobase = NULL;
811 			}
812 			progress |= QL_REGS_MAPPED;
813 		} else {
814 			/*
815 			 * Setup the ISP2200 registers address mapping to be
816 			 * accessed by this particular driver.
817 			 * 0x0   Configuration Space
818 			 * 0x1   I/O Space
819 			 * 0x2   32-bit Memory Space address
820 			 * 0x3   64-bit Memory Space address
821 			 */
822 			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
823 			    0, 0x100, &ql_dev_acc_attr,
824 			    &ha->dev_handle) != DDI_SUCCESS) {
825 				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
826 				    "failed", QL_NAME, instance);
827 				goto attach_failed;
828 			}
829 			progress |= QL_REGS_MAPPED;
830 
831 			/*
832 			 * We need I/O space mappings for 23xx HBAs for
833 			 * loading flash (FCode). The chip has a bug due to
834 			 * which loading flash fails through mem space
835 			 * mappings in PCI-X mode.
836 			 */
837 			if (ddi_regs_map_setup(dip, 1,
838 			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
839 			    &ql_dev_acc_attr,
840 			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
841 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
842 				    " failed", QL_NAME, instance);
843 				goto attach_failed;
844 			}
845 			progress |= QL_IOMAP_IOBASE_MAPPED;
846 		}
847 
848 		/*
849 		 * We should map config space before adding interrupt
850 		 * So that the chip type (2200 or 2300) can be determined
851 		 * before the interrupt routine gets a chance to execute.
852 		 */
853 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
854 			if (ddi_regs_map_setup(dip, 0,
855 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
856 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
857 			    DDI_SUCCESS) {
858 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
859 				    "config registers", QL_NAME, instance);
860 				goto attach_failed;
861 			}
862 		} else {
863 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
864 			    DDI_SUCCESS) {
865 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
866 				    "config space", QL_NAME, instance);
867 				goto attach_failed;
868 			}
869 		}
870 		progress |= QL_CONFIG_SPACE_SETUP;
871 
872 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
873 		    PCI_CONF_SUBSYSID);
874 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
875 		    PCI_CONF_SUBVENID);
876 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
877 		    PCI_CONF_VENID);
878 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
879 		    PCI_CONF_DEVID);
880 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
881 		    PCI_CONF_REVID);
882 
883 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
884 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
885 		    ha->subven_id, ha->subsys_id);
886 
887 		switch (ha->device_id) {
888 		case 0x2300:
889 		case 0x2312:
890 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
891 		/*
892 		 * per marketing, fibre-lite HBA's are not supported
893 		 * on sparc platforms
894 		 */
895 		case 0x6312:
896 		case 0x6322:
897 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
898 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
899 				ha->flags |= FUNCTION_1;
900 			}
901 			if (ha->device_id == 0x6322) {
902 				ha->cfg_flags |= CFG_CTRL_6322;
903 				ha->fw_class = 0x6322;
904 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
905 			} else {
906 				ha->cfg_flags |= CFG_CTRL_2300;
907 				ha->fw_class = 0x2300;
908 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
909 			}
910 			ha->reg_off = &reg_off_2300;
911 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
912 				goto attach_failed;
913 			}
914 			ha->fcp_cmd = ql_command_iocb;
915 			ha->ip_cmd = ql_ip_iocb;
916 			ha->ms_cmd = ql_ms_iocb;
917 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
918 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
919 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
920 			} else {
921 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
922 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
923 			}
924 			break;
925 
926 		case 0x2200:
927 			ha->cfg_flags |= CFG_CTRL_2200;
928 			ha->reg_off = &reg_off_2200;
929 			ha->fw_class = 0x2200;
930 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
931 				goto attach_failed;
932 			}
933 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
934 			ha->fcp_cmd = ql_command_iocb;
935 			ha->ip_cmd = ql_ip_iocb;
936 			ha->ms_cmd = ql_ms_iocb;
937 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
938 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
939 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
940 			} else {
941 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
942 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
943 			}
944 			break;
945 
946 		case 0x2422:
947 		case 0x2432:
948 		case 0x5422:
949 		case 0x5432:
950 		case 0x8432:
951 #ifdef __sparc
952 			/*
953 			 * Per marketing, the QLA/QLE-2440's (which
954 			 * also use the 2422 & 2432) are only for the
955 			 * x86 platform (SMB market).
956 			 */
957 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
958 			    ha->subsys_id == 0x13e) {
959 				cmn_err(CE_WARN,
960 				    "%s(%d): Unsupported HBA ssid: %x",
961 				    QL_NAME, instance, ha->subsys_id);
962 				goto attach_failed;
963 			}
964 #endif	/* __sparc */
965 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
966 				ha->flags |= FUNCTION_1;
967 			}
968 			ha->cfg_flags |= CFG_CTRL_2422;
969 			if (ha->device_id == 0x8432) {
970 				ha->cfg_flags |= CFG_CTRL_MENLO;
971 			} else {
972 				ha->flags |= VP_ENABLED;
973 			}
974 
975 			ha->reg_off = &reg_off_2400_2500;
976 			ha->fw_class = 0x2400;
977 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
978 				goto attach_failed;
979 			}
980 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
981 			ha->fcp_cmd = ql_command_24xx_iocb;
982 			ha->ip_cmd = ql_ip_24xx_iocb;
983 			ha->ms_cmd = ql_ms_24xx_iocb;
984 			ha->els_cmd = ql_els_24xx_iocb;
985 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
986 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
987 			break;
988 
989 		case 0x2522:
990 		case 0x2532:
991 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
992 				ha->flags |= FUNCTION_1;
993 			}
994 			ha->cfg_flags |= CFG_CTRL_25XX;
995 			ha->flags |= VP_ENABLED;
996 			ha->fw_class = 0x2500;
997 			ha->reg_off = &reg_off_2400_2500;
998 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
999 				goto attach_failed;
1000 			}
1001 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1002 			ha->fcp_cmd = ql_command_24xx_iocb;
1003 			ha->ip_cmd = ql_ip_24xx_iocb;
1004 			ha->ms_cmd = ql_ms_24xx_iocb;
1005 			ha->els_cmd = ql_els_24xx_iocb;
1006 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1007 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1008 			break;
1009 
1010 		case 0x8001:
1011 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1012 				ha->flags |= FUNCTION_1;
1013 			}
1014 			ha->cfg_flags |= CFG_CTRL_81XX;
1015 			ha->flags |= VP_ENABLED;
1016 			ha->fw_class = 0x8100;
1017 			ha->reg_off = &reg_off_2400_2500;
1018 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1019 				goto attach_failed;
1020 			}
1021 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1022 			ha->fcp_cmd = ql_command_24xx_iocb;
1023 			ha->ip_cmd = ql_ip_24xx_iocb;
1024 			ha->ms_cmd = ql_ms_24xx_iocb;
1025 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1026 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1027 			break;
1028 
1029 		default:
1030 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1031 			    QL_NAME, instance, ha->device_id);
1032 			goto attach_failed;
1033 		}
1034 
1035 		/* Setup hba buffer. */
1036 
1037 		size = CFG_IST(ha, CFG_CTRL_242581) ?
1038 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1039 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1040 		    RCVBUF_QUEUE_SIZE);
1041 
1042 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1043 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1044 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1045 			    "alloc failed", QL_NAME, instance);
1046 			goto attach_failed;
1047 		}
1048 		progress |= QL_HBA_BUFFER_SETUP;
1049 
1050 		/* Setup buffer pointers. */
1051 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1052 		    REQUEST_Q_BUFFER_OFFSET;
1053 		ha->request_ring_bp = (struct cmd_entry *)
1054 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1055 
1056 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1057 		    RESPONSE_Q_BUFFER_OFFSET;
1058 		ha->response_ring_bp = (struct sts_entry *)
1059 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1060 
1061 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1062 		    RCVBUF_Q_BUFFER_OFFSET;
1063 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1064 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1065 
1066 		/* Allocate resource for QLogic IOCTL */
1067 		(void) ql_alloc_xioctl_resource(ha);
1068 
1069 		/* Setup interrupts */
1070 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1071 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1072 			    "rval=%xh", QL_NAME, instance, rval);
1073 			goto attach_failed;
1074 		}
1075 
1076 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1077 
1078 		/*
1079 		 * Allocate an N Port information structure
1080 		 * for use when in P2P topology.
1081 		 */
1082 		ha->n_port = (ql_n_port_info_t *)
1083 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1084 		if (ha->n_port == NULL) {
1085 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1086 			    QL_NAME, instance);
1087 			goto attach_failed;
1088 		}
1089 
1090 		progress |= QL_N_PORT_INFO_CREATED;
1091 
1092 		/*
1093 		 * Determine support for Power Management
1094 		 */
1095 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1096 
1097 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1098 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1099 			if (cap == PCI_CAP_ID_PM) {
1100 				ha->pm_capable = 1;
1101 				break;
1102 			}
1103 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1104 			    PCI_CAP_NEXT_PTR);
1105 		}
1106 
1107 		if (ha->pm_capable) {
1108 			/*
1109 			 * Enable PM for 2200 based HBAs only.
1110 			 */
1111 			if (ha->device_id != 0x2200) {
1112 				ha->pm_capable = 0;
1113 			}
1114 		}
1115 
1116 		if (ha->pm_capable) {
1117 			ha->pm_capable = ql_enable_pm;
1118 		}
1119 
1120 		if (ha->pm_capable) {
1121 			/*
1122 			 * Initialize power management bookkeeping;
1123 			 * components are created idle.
1124 			 */
1125 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1126 			pmcomps[0] = buf;
1127 
1128 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1129 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1130 			    dip, "pm-components", pmcomps,
1131 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1132 			    DDI_PROP_SUCCESS) {
1133 				cmn_err(CE_WARN, "%s(%d): failed to create"
1134 				    " pm-components property", QL_NAME,
1135 				    instance);
1136 
1137 				/* Initialize adapter. */
1138 				ha->power_level = PM_LEVEL_D0;
1139 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1140 					cmn_err(CE_WARN, "%s(%d): failed to"
1141 					    " initialize adapter", QL_NAME,
1142 					    instance);
1143 					goto attach_failed;
1144 				}
1145 			} else {
1146 				ha->power_level = PM_LEVEL_D3;
1147 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1148 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1149 					cmn_err(CE_WARN, "%s(%d): failed to"
1150 					    " raise power or initialize"
1151 					    " adapter", QL_NAME, instance);
1152 				}
1153 				ASSERT(ha->power_level == PM_LEVEL_D0);
1154 			}
1155 		} else {
1156 			/* Initialize adapter. */
1157 			ha->power_level = PM_LEVEL_D0;
1158 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1159 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1160 				    " adapter", QL_NAME, instance);
1161 			}
1162 		}
1163 
1164 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1165 		    ha->fw_subminor_version == 0) {
1166 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1167 			    QL_NAME, ha->instance);
1168 		} else {
1169 			cmn_err(CE_NOTE, "!%s(%d): Firmware version %d.%d.%d",
1170 			    QL_NAME, ha->instance, ha->fw_major_version,
1171 			    ha->fw_minor_version, ha->fw_subminor_version);
1172 		}
1173 
1174 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1175 		    "controller", KSTAT_TYPE_RAW,
1176 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1177 		if (ha->k_stats == NULL) {
1178 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1179 			    QL_NAME, instance);
1180 			goto attach_failed;
1181 		}
1182 		progress |= QL_KSTAT_CREATED;
1183 
1184 		ha->adapter_stats->version = 1;
1185 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1186 		ha->k_stats->ks_private = ha;
1187 		ha->k_stats->ks_update = ql_kstat_update;
1188 		ha->k_stats->ks_ndata = 1;
1189 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1190 		kstat_install(ha->k_stats);
1191 
1192 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1193 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1194 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1195 			    QL_NAME, instance);
1196 			goto attach_failed;
1197 		}
1198 		progress |= QL_MINOR_NODE_CREATED;
1199 
1200 		/* Allocate a transport structure for this instance */
1201 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1202 		ASSERT(tran != NULL);
1203 
1204 		progress |= QL_FCA_TRAN_ALLOCED;
1205 
1206 		/* fill in the structure */
1207 		tran->fca_numports = 1;
1208 		tran->fca_version = FCTL_FCA_MODREV_5;
1209 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1210 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1211 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1212 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1213 		}
1214 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1215 		    tran->fca_perm_pwwn.raw_wwn, 8);
1216 
1217 		EL(ha, "FCA version %d\n", tran->fca_version);
1218 
1219 		/* Specify the amount of space needed in each packet */
1220 		tran->fca_pkt_size = sizeof (ql_srb_t);
1221 
1222 		/* command limits are usually dictated by hardware */
1223 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1224 
1225 		/* dmaattr are static, set elsewhere. */
1226 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1227 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1228 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1229 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1230 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1231 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1232 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1233 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1234 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1235 		} else {
1236 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1237 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1238 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1239 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1240 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1241 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1242 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1243 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1244 		}
1245 
1246 		tran->fca_acc_attr = &ql_dev_acc_attr;
1247 		tran->fca_iblock = &(ha->iblock_cookie);
1248 
1249 		/* the remaining values are simply function vectors */
1250 		tran->fca_bind_port = ql_bind_port;
1251 		tran->fca_unbind_port = ql_unbind_port;
1252 		tran->fca_init_pkt = ql_init_pkt;
1253 		tran->fca_un_init_pkt = ql_un_init_pkt;
1254 		tran->fca_els_send = ql_els_send;
1255 		tran->fca_get_cap = ql_get_cap;
1256 		tran->fca_set_cap = ql_set_cap;
1257 		tran->fca_getmap = ql_getmap;
1258 		tran->fca_transport = ql_transport;
1259 		tran->fca_ub_alloc = ql_ub_alloc;
1260 		tran->fca_ub_free = ql_ub_free;
1261 		tran->fca_ub_release = ql_ub_release;
1262 		tran->fca_abort = ql_abort;
1263 		tran->fca_reset = ql_reset;
1264 		tran->fca_port_manage = ql_port_manage;
1265 		tran->fca_get_device = ql_get_device;
1266 
1267 		/* give it to the FC transport */
1268 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1269 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1270 			    instance);
1271 			goto attach_failed;
1272 		}
1273 		progress |= QL_FCA_ATTACH_DONE;
1274 
1275 		/* Stash the structure so it can be freed at detach */
1276 		ha->tran = tran;
1277 
1278 		/* Acquire global state lock. */
1279 		GLOBAL_STATE_LOCK();
1280 
1281 		/* Add adapter structure to link list. */
1282 		ql_add_link_b(&ql_hba, &ha->hba);
1283 
1284 		/* Start one second driver timer. */
1285 		if (ql_timer_timeout_id == NULL) {
1286 			ql_timer_ticks = drv_usectohz(1000000);
1287 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1288 			    ql_timer_ticks);
1289 		}
1290 
1291 		/* Release global state lock. */
1292 		GLOBAL_STATE_UNLOCK();
1293 
1294 		/* Determine and populate HBA fru info */
1295 		ql_setup_fruinfo(ha);
1296 
1297 		/* Setup task_daemon thread. */
1298 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1299 		    0, &p0, TS_RUN, minclsyspri);
1300 
1301 		progress |= QL_TASK_DAEMON_STARTED;
1302 
1303 		ddi_report_dev(dip);
1304 
1305 		/* Disable link reset in panic path */
1306 		ha->lip_on_panic = 1;
1307 
1308 		rval = DDI_SUCCESS;
1309 		break;
1310 
1311 attach_failed:
1312 		if (progress & QL_FCA_ATTACH_DONE) {
1313 			(void) fc_fca_detach(dip);
1314 			progress &= ~QL_FCA_ATTACH_DONE;
1315 		}
1316 
1317 		if (progress & QL_FCA_TRAN_ALLOCED) {
1318 			kmem_free(tran, sizeof (fc_fca_tran_t));
1319 			progress &= ~QL_FCA_TRAN_ALLOCED;
1320 		}
1321 
1322 		if (progress & QL_MINOR_NODE_CREATED) {
1323 			ddi_remove_minor_node(dip, "devctl");
1324 			progress &= ~QL_MINOR_NODE_CREATED;
1325 		}
1326 
1327 		if (progress & QL_KSTAT_CREATED) {
1328 			kstat_delete(ha->k_stats);
1329 			progress &= ~QL_KSTAT_CREATED;
1330 		}
1331 
1332 		if (progress & QL_N_PORT_INFO_CREATED) {
1333 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1334 			progress &= ~QL_N_PORT_INFO_CREATED;
1335 		}
1336 
1337 		if (progress & QL_TASK_DAEMON_STARTED) {
1338 			TASK_DAEMON_LOCK(ha);
1339 
1340 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1341 
1342 			cv_signal(&ha->cv_task_daemon);
1343 
1344 			/* Release task daemon lock. */
1345 			TASK_DAEMON_UNLOCK(ha);
1346 
1347 			/* Wait for for task daemon to stop running. */
1348 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1349 				ql_delay(ha, 10000);
1350 			}
1351 			progress &= ~QL_TASK_DAEMON_STARTED;
1352 		}
1353 
1354 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1355 			ddi_regs_map_free(&ha->iomap_dev_handle);
1356 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1357 		}
1358 
1359 		if (progress & QL_CONFIG_SPACE_SETUP) {
1360 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1361 				ddi_regs_map_free(&ha->sbus_config_handle);
1362 			} else {
1363 				pci_config_teardown(&ha->pci_handle);
1364 			}
1365 			progress &= ~QL_CONFIG_SPACE_SETUP;
1366 		}
1367 
1368 		if (progress & QL_INTR_ADDED) {
1369 			ql_disable_intr(ha);
1370 			ql_release_intr(ha);
1371 			progress &= ~QL_INTR_ADDED;
1372 		}
1373 
1374 		if (progress & QL_MUTEX_CV_INITED) {
1375 			ql_destroy_mutex(ha);
1376 			progress &= ~QL_MUTEX_CV_INITED;
1377 		}
1378 
1379 		if (progress & QL_HBA_BUFFER_SETUP) {
1380 			ql_free_phys(ha, &ha->hba_buf);
1381 			progress &= ~QL_HBA_BUFFER_SETUP;
1382 		}
1383 
1384 		if (progress & QL_REGS_MAPPED) {
1385 			ddi_regs_map_free(&ha->dev_handle);
1386 			if (ha->sbus_fpga_iobase != NULL) {
1387 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1388 			}
1389 			progress &= ~QL_REGS_MAPPED;
1390 		}
1391 
1392 		if (progress & QL_SOFT_STATE_ALLOCED) {
1393 
1394 			ql_fcache_rel(ha->fcache);
1395 
1396 			ASSERT(ha->dev && ha->outstanding_cmds &&
1397 			    ha->ub_array && ha->adapter_stats);
1398 
1399 			kmem_free(ha->adapter_stats,
1400 			    sizeof (*ha->adapter_stats));
1401 
1402 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1403 			    QL_UB_LIMIT);
1404 
1405 			kmem_free(ha->outstanding_cmds,
1406 			    sizeof (*ha->outstanding_cmds) *
1407 			    MAX_OUTSTANDING_COMMANDS);
1408 
1409 			if (ha->devpath != NULL) {
1410 				kmem_free(ha->devpath,
1411 				    strlen(ha->devpath) + 1);
1412 			}
1413 
1414 			kmem_free(ha->dev, sizeof (*ha->dev) *
1415 			    DEVICE_HEAD_LIST_SIZE);
1416 
1417 			if (ha->xioctl != NULL) {
1418 				ql_free_xioctl_resource(ha);
1419 			}
1420 
1421 			if (ha->fw_module != NULL) {
1422 				(void) ddi_modclose(ha->fw_module);
1423 			}
1424 
1425 			ddi_soft_state_free(ql_state, instance);
1426 			progress &= ~QL_SOFT_STATE_ALLOCED;
1427 		}
1428 		ASSERT(progress == 0);
1429 
1430 		ddi_prop_remove_all(dip);
1431 		rval = DDI_FAILURE;
1432 		break;
1433 
1434 	case DDI_RESUME:
1435 		rval = DDI_FAILURE;
1436 
1437 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1438 		if (ha == NULL) {
1439 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1440 			    QL_NAME, instance);
1441 			break;
1442 		}
1443 
1444 		ha->power_level = PM_LEVEL_D3;
1445 		if (ha->pm_capable) {
1446 			/*
1447 			 * Get ql_power to do power on initialization
1448 			 */
1449 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1450 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1451 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1452 				    " power", QL_NAME, instance);
1453 			}
1454 		}
1455 
1456 		/*
1457 		 * There is a bug in DR that prevents PM framework
1458 		 * from calling ql_power.
1459 		 */
1460 		if (ha->power_level == PM_LEVEL_D3) {
1461 			ha->power_level = PM_LEVEL_D0;
1462 
1463 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1464 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1465 				    " adapter", QL_NAME, instance);
1466 			}
1467 
1468 			/* Wake up task_daemon. */
1469 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1470 			    0);
1471 		}
1472 
1473 		/* Acquire global state lock. */
1474 		GLOBAL_STATE_LOCK();
1475 
1476 		/* Restart driver timer. */
1477 		if (ql_timer_timeout_id == NULL) {
1478 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1479 			    ql_timer_ticks);
1480 		}
1481 
1482 		/* Release global state lock. */
1483 		GLOBAL_STATE_UNLOCK();
1484 
1485 		/* Wake up command start routine. */
1486 		ADAPTER_STATE_LOCK(ha);
1487 		ha->flags &= ~ADAPTER_SUSPENDED;
1488 		ADAPTER_STATE_UNLOCK(ha);
1489 
1490 		/*
1491 		 * Transport doesn't make FC discovery in polled
1492 		 * mode; So we need the daemon thread's services
1493 		 * right here.
1494 		 */
1495 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1496 
1497 		rval = DDI_SUCCESS;
1498 
1499 		/* Restart IP if it was running. */
1500 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1501 			(void) ql_initialize_ip(ha);
1502 			ql_isp_rcvbuf(ha);
1503 		}
1504 		break;
1505 
1506 	default:
1507 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1508 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1509 		rval = DDI_FAILURE;
1510 		break;
1511 	}
1512 
1513 	kmem_free(buf, MAXPATHLEN);
1514 
1515 	if (rval != DDI_SUCCESS) {
1516 		/*EMPTY*/
1517 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1518 		    ddi_get_instance(dip), rval);
1519 	} else {
1520 		/*EMPTY*/
1521 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1522 	}
1523 
1524 	return (rval);
1525 }
1526 
1527 /*
1528  * ql_detach
1529  *	Used to remove all the states associated with a given
1530  *	instances of a device node prior to the removal of that
1531  *	instance from the system.
1532  *
1533  * Input:
1534  *	dip = pointer to device information structure.
1535  *	cmd = type of detach.
1536  *
1537  * Returns:
1538  *	DDI_SUCCESS or DDI_FAILURE.
1539  *
1540  * Context:
1541  *	Kernel context.
1542  */
1543 static int
1544 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1545 {
1546 	ql_adapter_state_t	*ha, *vha;
1547 	ql_tgt_t		*tq;
1548 	int			try;
1549 	uint16_t		index;
1550 	ql_link_t		*link;
1551 	char			*buf;
1552 	timeout_id_t		timer_id = NULL;
1553 	int			rval = DDI_SUCCESS;
1554 
1555 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1556 	if (ha == NULL) {
1557 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1558 		    ddi_get_instance(dip));
1559 		return (DDI_FAILURE);
1560 	}
1561 
1562 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1563 
1564 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1565 
1566 	switch (cmd) {
1567 	case DDI_DETACH:
1568 		ADAPTER_STATE_LOCK(ha);
1569 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1570 		ADAPTER_STATE_UNLOCK(ha);
1571 
1572 		/* Acquire task daemon lock. */
1573 		TASK_DAEMON_LOCK(ha);
1574 
1575 		ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1576 		cv_signal(&ha->cv_task_daemon);
1577 
1578 		/* Release task daemon lock. */
1579 		TASK_DAEMON_UNLOCK(ha);
1580 
1581 		/*
1582 		 * Wait for task daemon to stop running.
1583 		 * Internal command timeout is approximately
1584 		 * 30 seconds, so it would help in some corner
1585 		 * cases to wait that long
1586 		 */
1587 		try = 0;
1588 		while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) &&
1589 		    try < 3000) {
1590 			ql_delay(ha, 10000);
1591 			try++;
1592 		}
1593 
1594 		TASK_DAEMON_LOCK(ha);
1595 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1596 			ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1597 			TASK_DAEMON_UNLOCK(ha);
1598 			EL(ha, "failed, could not stop task daemon\n");
1599 			return (DDI_FAILURE);
1600 		}
1601 		TASK_DAEMON_UNLOCK(ha);
1602 
1603 		/* Acquire global state lock. */
1604 		GLOBAL_STATE_LOCK();
1605 
1606 		/* Disable driver timer if no adapters. */
1607 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1608 		    ql_hba.last == &ha->hba) {
1609 			timer_id = ql_timer_timeout_id;
1610 			ql_timer_timeout_id = NULL;
1611 		}
1612 		ql_remove_link(&ql_hba, &ha->hba);
1613 
1614 		GLOBAL_STATE_UNLOCK();
1615 
1616 		if (timer_id) {
1617 			(void) untimeout(timer_id);
1618 		}
1619 
1620 		if (ha->pm_capable) {
1621 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1622 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1623 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1624 				    " power", QL_NAME, ha->instance);
1625 			}
1626 		}
1627 
1628 		/*
1629 		 * If pm_lower_power shutdown the adapter, there
1630 		 * isn't much else to do
1631 		 */
1632 		if (ha->power_level != PM_LEVEL_D3) {
1633 			ql_halt(ha, PM_LEVEL_D3);
1634 		}
1635 
1636 		/* Remove virtual ports. */
1637 		while ((vha = ha->vp_next) != NULL) {
1638 			ql_vport_destroy(vha);
1639 		}
1640 
1641 		/* Free target queues. */
1642 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1643 			link = ha->dev[index].first;
1644 			while (link != NULL) {
1645 				tq = link->base_address;
1646 				link = link->next;
1647 				ql_dev_free(ha, tq);
1648 			}
1649 		}
1650 
1651 		/*
1652 		 * Free unsolicited buffers.
1653 		 * If we are here then there are no ULPs still
1654 		 * alive that wish to talk to ql so free up
1655 		 * any SRB_IP_UB_UNUSED buffers that are
1656 		 * lingering around
1657 		 */
1658 		QL_UB_LOCK(ha);
1659 		for (index = 0; index < QL_UB_LIMIT; index++) {
1660 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1661 
1662 			if (ubp != NULL) {
1663 				ql_srb_t *sp = ubp->ub_fca_private;
1664 
1665 				sp->flags |= SRB_UB_FREE_REQUESTED;
1666 
1667 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1668 				    (sp->flags & (SRB_UB_CALLBACK |
1669 				    SRB_UB_ACQUIRED))) {
1670 					QL_UB_UNLOCK(ha);
1671 					delay(drv_usectohz(100000));
1672 					QL_UB_LOCK(ha);
1673 				}
1674 				ha->ub_array[index] = NULL;
1675 
1676 				QL_UB_UNLOCK(ha);
1677 				ql_free_unsolicited_buffer(ha, ubp);
1678 				QL_UB_LOCK(ha);
1679 			}
1680 		}
1681 		QL_UB_UNLOCK(ha);
1682 
1683 		/* Free any saved RISC code. */
1684 		if (ha->risc_code != NULL) {
1685 			kmem_free(ha->risc_code, ha->risc_code_size);
1686 			ha->risc_code = NULL;
1687 			ha->risc_code_size = 0;
1688 		}
1689 
1690 		if (ha->fw_module != NULL) {
1691 			(void) ddi_modclose(ha->fw_module);
1692 			ha->fw_module = NULL;
1693 		}
1694 
1695 		/* Free resources. */
1696 		ddi_prop_remove_all(dip);
1697 		(void) fc_fca_detach(dip);
1698 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1699 		ddi_remove_minor_node(dip, "devctl");
1700 		if (ha->k_stats != NULL) {
1701 			kstat_delete(ha->k_stats);
1702 		}
1703 
1704 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1705 			ddi_regs_map_free(&ha->sbus_config_handle);
1706 		} else {
1707 			ddi_regs_map_free(&ha->iomap_dev_handle);
1708 			pci_config_teardown(&ha->pci_handle);
1709 		}
1710 
1711 		ql_disable_intr(ha);
1712 		ql_release_intr(ha);
1713 
1714 		ql_free_xioctl_resource(ha);
1715 
1716 		ql_destroy_mutex(ha);
1717 
1718 		ql_free_phys(ha, &ha->hba_buf);
1719 		ql_free_phys(ha, &ha->fwexttracebuf);
1720 		ql_free_phys(ha, &ha->fwfcetracebuf);
1721 
1722 		ddi_regs_map_free(&ha->dev_handle);
1723 		if (ha->sbus_fpga_iobase != NULL) {
1724 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1725 		}
1726 
1727 		ql_fcache_rel(ha->fcache);
1728 		if (ha->vcache != NULL) {
1729 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1730 		}
1731 
1732 		if (ha->pi_attrs != NULL) {
1733 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1734 		}
1735 
1736 		ASSERT(ha->dev && ha->outstanding_cmds && ha->ub_array &&
1737 		    ha->adapter_stats);
1738 
1739 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1740 
1741 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1742 
1743 		kmem_free(ha->outstanding_cmds,
1744 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1745 
1746 		if (ha->n_port != NULL) {
1747 			kmem_free(&ha->n_port, sizeof (ql_n_port_info_t));
1748 		}
1749 
1750 		if (ha->devpath != NULL) {
1751 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1752 		}
1753 
1754 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1755 
1756 		EL(ha, "detached\n");
1757 
1758 		ddi_soft_state_free(ql_state, (int)ha->instance);
1759 
1760 		break;
1761 
1762 	case DDI_SUSPEND:
1763 		ADAPTER_STATE_LOCK(ha);
1764 
1765 		try = 0;
1766 		ha->flags |= ADAPTER_SUSPENDED;
1767 		while (ha->flags & ADAPTER_TIMER_BUSY && try++ < 10) {
1768 			ADAPTER_STATE_UNLOCK(ha);
1769 			delay(drv_usectohz(1000000));
1770 			ADAPTER_STATE_LOCK(ha);
1771 		}
1772 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1773 			ha->flags &= ~ADAPTER_SUSPENDED;
1774 			ADAPTER_STATE_UNLOCK(ha);
1775 			rval = DDI_FAILURE;
1776 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1777 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1778 			    ha->busy, ha->flags);
1779 			break;
1780 		}
1781 
1782 		ADAPTER_STATE_UNLOCK(ha);
1783 
1784 		if (ha->flags & IP_INITIALIZED) {
1785 			(void) ql_shutdown_ip(ha);
1786 		}
1787 
1788 		try = ql_suspend_adapter(ha);
1789 		if (try != QL_SUCCESS) {
1790 			ADAPTER_STATE_LOCK(ha);
1791 			ha->flags &= ~ADAPTER_SUSPENDED;
1792 			ADAPTER_STATE_UNLOCK(ha);
1793 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1794 			    QL_NAME, ha->instance, try);
1795 
1796 			/* Restart IP if it was running. */
1797 			if (ha->flags & IP_ENABLED &&
1798 			    !(ha->flags & IP_INITIALIZED)) {
1799 				(void) ql_initialize_ip(ha);
1800 				ql_isp_rcvbuf(ha);
1801 			}
1802 			rval = DDI_FAILURE;
1803 			break;
1804 		}
1805 
1806 		/* Acquire global state lock. */
1807 		GLOBAL_STATE_LOCK();
1808 
1809 		/* Disable driver timer if last adapter. */
1810 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1811 		    ql_hba.last == &ha->hba) {
1812 			timer_id = ql_timer_timeout_id;
1813 			ql_timer_timeout_id = NULL;
1814 		}
1815 		GLOBAL_STATE_UNLOCK();
1816 
1817 		if (timer_id) {
1818 			(void) untimeout(timer_id);
1819 		}
1820 
1821 		break;
1822 
1823 	default:
1824 		rval = DDI_FAILURE;
1825 		break;
1826 	}
1827 
1828 	kmem_free(buf, MAXPATHLEN);
1829 
1830 	if (rval != DDI_SUCCESS) {
1831 		if (ha != NULL) {
1832 			EL(ha, "failed, rval = %xh\n", rval);
1833 		} else {
1834 			/*EMPTY*/
1835 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1836 			    ddi_get_instance(dip), rval);
1837 		}
1838 	} else {
1839 		/*EMPTY*/
1840 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1841 	}
1842 
1843 	return (rval);
1844 }
1845 
1846 /*
1847  * ql_power
1848  *	Power a device attached to the system.
1849  *
1850  * Input:
1851  *	dip = pointer to device information structure.
1852  *	component = device.
1853  *	level = power level.
1854  *
1855  * Returns:
1856  *	DDI_SUCCESS or DDI_FAILURE.
1857  *
1858  * Context:
1859  *	Kernel context.
1860  */
1861 /* ARGSUSED */
1862 static int
1863 ql_power(dev_info_t *dip, int component, int level)
1864 {
1865 	int			rval = DDI_FAILURE;
1866 	off_t			csr;
1867 	uint8_t			saved_pm_val;
1868 	ql_adapter_state_t	*ha;
1869 	char			*buf;
1870 	char			*path;
1871 
1872 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1873 	if (ha == NULL || ha->pm_capable == 0) {
1874 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1875 		    ddi_get_instance(dip));
1876 		return (rval);
1877 	}
1878 
1879 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1880 
1881 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1882 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1883 
1884 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1885 	    level != PM_LEVEL_D3)) {
1886 		EL(ha, "invalid, component=%xh or level=%xh\n",
1887 		    component, level);
1888 		return (rval);
1889 	}
1890 
1891 	GLOBAL_HW_LOCK();
1892 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1893 	GLOBAL_HW_UNLOCK();
1894 
1895 	ASSERT(csr == QL_PM_CS_REG);
1896 
1897 	(void) snprintf(buf, sizeof (buf),
1898 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1899 	    ddi_pathname(dip, path));
1900 
1901 	switch (level) {
1902 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1903 
1904 		QL_PM_LOCK(ha);
1905 		if (ha->power_level == PM_LEVEL_D0) {
1906 			QL_PM_UNLOCK(ha);
1907 			rval = DDI_SUCCESS;
1908 			break;
1909 		}
1910 
1911 		/*
1912 		 * Enable interrupts now
1913 		 */
1914 		saved_pm_val = ha->power_level;
1915 		ha->power_level = PM_LEVEL_D0;
1916 		QL_PM_UNLOCK(ha);
1917 
1918 		GLOBAL_HW_LOCK();
1919 
1920 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1921 
1922 		/*
1923 		 * Delay after reset, for chip to recover.
1924 		 * Otherwise causes system PANIC
1925 		 */
1926 		drv_usecwait(200000);
1927 
1928 		GLOBAL_HW_UNLOCK();
1929 
1930 		if (ha->config_saved) {
1931 			ha->config_saved = 0;
1932 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1933 				QL_PM_LOCK(ha);
1934 				ha->power_level = saved_pm_val;
1935 				QL_PM_UNLOCK(ha);
1936 				cmn_err(CE_WARN, "%s failed to restore "
1937 				    "config regs", buf);
1938 				break;
1939 			}
1940 		}
1941 
1942 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1943 			cmn_err(CE_WARN, "%s adapter initialization failed",
1944 			    buf);
1945 		}
1946 
1947 		/* Wake up task_daemon. */
1948 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1949 		    TASK_DAEMON_SLEEPING_FLG, 0);
1950 
1951 		/* Restart IP if it was running. */
1952 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1953 			(void) ql_initialize_ip(ha);
1954 			ql_isp_rcvbuf(ha);
1955 		}
1956 
1957 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1958 		    ha->instance, QL_NAME);
1959 
1960 		rval = DDI_SUCCESS;
1961 		break;
1962 
1963 	case PM_LEVEL_D3:	/* power down to D3 state - off */
1964 
1965 		QL_PM_LOCK(ha);
1966 
1967 		if (ha->busy || ((ha->task_daemon_flags &
1968 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1969 			QL_PM_UNLOCK(ha);
1970 			break;
1971 		}
1972 
1973 		if (ha->power_level == PM_LEVEL_D3) {
1974 			rval = DDI_SUCCESS;
1975 			QL_PM_UNLOCK(ha);
1976 			break;
1977 		}
1978 		QL_PM_UNLOCK(ha);
1979 
1980 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1981 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
1982 			    " config regs", QL_NAME, ha->instance, buf);
1983 			break;
1984 		}
1985 		ha->config_saved = 1;
1986 
1987 		/*
1988 		 * Don't enable interrupts. Running mailbox commands with
1989 		 * interrupts enabled could cause hangs since pm_run_scan()
1990 		 * runs out of a callout thread and on single cpu systems
1991 		 * cv_timedwait(), called from ql_mailbox_command(), would
1992 		 * not get to run.
1993 		 */
1994 		TASK_DAEMON_LOCK(ha);
1995 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
1996 		TASK_DAEMON_UNLOCK(ha);
1997 
1998 		ql_halt(ha, PM_LEVEL_D3);
1999 
2000 		/*
2001 		 * Setup ql_intr to ignore interrupts from here on.
2002 		 */
2003 		QL_PM_LOCK(ha);
2004 		ha->power_level = PM_LEVEL_D3;
2005 		QL_PM_UNLOCK(ha);
2006 
2007 		/*
2008 		 * Wait for ISR to complete.
2009 		 */
2010 		INTR_LOCK(ha);
2011 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2012 		INTR_UNLOCK(ha);
2013 
2014 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2015 		    ha->instance, QL_NAME);
2016 
2017 		rval = DDI_SUCCESS;
2018 		break;
2019 	}
2020 
2021 	kmem_free(buf, MAXPATHLEN);
2022 	kmem_free(path, MAXPATHLEN);
2023 
2024 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2025 
2026 	return (rval);
2027 }
2028 
2029 /*
2030  * ql_quiesce
2031  *	quiesce a device attached to the system.
2032  *
2033  * Input:
2034  *	dip = pointer to device information structure.
2035  *
2036  * Returns:
2037  *	DDI_SUCCESS
2038  *
2039  * Context:
2040  *	Kernel context.
2041  */
2042 static int
2043 ql_quiesce(dev_info_t *dip)
2044 {
2045 	ql_adapter_state_t	*ha;
2046 	uint32_t		timer;
2047 	uint32_t		stat;
2048 
2049 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2050 	if (ha == NULL) {
2051 		/* Oh well.... */
2052 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2053 		    ddi_get_instance(dip));
2054 		return (DDI_SUCCESS);
2055 	}
2056 
2057 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2058 
2059 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2060 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2061 		WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
2062 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2063 		for (timer = 0; timer < 30000; timer++) {
2064 			stat = RD32_IO_REG(ha, intr_info_lo);
2065 			if (stat & BIT_15) {
2066 				if ((stat & 0xff) < 0x12) {
2067 					WRT32_IO_REG(ha, hccr,
2068 					    HC24_CLR_RISC_INT);
2069 					break;
2070 				}
2071 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2072 			}
2073 			drv_usecwait(100);
2074 		}
2075 		/* Reset the chip. */
2076 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2077 		    MWB_4096_BYTES);
2078 		drv_usecwait(100);
2079 
2080 	} else {
2081 		/* Disable ISP interrupts. */
2082 		WRT16_IO_REG(ha, ictrl, 0);
2083 		/* Select RISC module registers. */
2084 		WRT16_IO_REG(ha, ctrl_status, 0);
2085 		/* Reset ISP semaphore. */
2086 		WRT16_IO_REG(ha, semaphore, 0);
2087 		/* Reset RISC module. */
2088 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2089 		/* Release RISC module. */
2090 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2091 	}
2092 
2093 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2094 
2095 	return (DDI_SUCCESS);
2096 }
2097 
2098 /* ************************************************************************ */
2099 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2100 /* ************************************************************************ */
2101 
2102 /*
2103  * ql_bind_port
2104  *	Handling port binding. The FC Transport attempts to bind an FCA port
2105  *	when it is ready to start transactions on the port. The FC Transport
2106  *	will call the fca_bind_port() function specified in the fca_transport
2107  *	structure it receives. The FCA must fill in the port_info structure
2108  *	passed in the call and also stash the information for future calls.
2109  *
2110  * Input:
2111  *	dip = pointer to FCA information structure.
2112  *	port_info = pointer to port information structure.
2113  *	bind_info = pointer to bind information structure.
2114  *
2115  * Returns:
2116  *	NULL = failure
2117  *
2118  * Context:
2119  *	Kernel context.
2120  */
2121 static opaque_t
2122 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2123     fc_fca_bind_info_t *bind_info)
2124 {
2125 	ql_adapter_state_t	*ha, *vha;
2126 	opaque_t		fca_handle = NULL;
2127 	port_id_t		d_id;
2128 	int			port_npiv = bind_info->port_npiv;
2129 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2130 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2131 
2132 	/* get state info based on the dip */
2133 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2134 	if (ha == NULL) {
2135 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2136 		    ddi_get_instance(dip));
2137 		return (NULL);
2138 	}
2139 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2140 
2141 	/* Verify port number is supported. */
2142 	if (port_npiv != 0) {
2143 		if (!(ha->flags & VP_ENABLED)) {
2144 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2145 			    ha->instance);
2146 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2147 			return (NULL);
2148 		}
2149 		if (!(ha->flags & POINT_TO_POINT)) {
2150 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2151 			    ha->instance);
2152 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2153 			return (NULL);
2154 		}
2155 		if (!(ha->flags & FDISC_ENABLED)) {
2156 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2157 			    "FDISC\n", ha->instance);
2158 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2159 			return (NULL);
2160 		}
2161 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2162 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2163 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2164 			    "FC_OUTOFBOUNDS\n", ha->instance);
2165 			port_info->pi_error = FC_OUTOFBOUNDS;
2166 			return (NULL);
2167 		}
2168 	} else if (bind_info->port_num != 0) {
2169 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2170 		    "supported\n", ha->instance, bind_info->port_num);
2171 		port_info->pi_error = FC_OUTOFBOUNDS;
2172 		return (NULL);
2173 	}
2174 
2175 	/* Locate port context. */
2176 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2177 		if (vha->vp_index == bind_info->port_num) {
2178 			break;
2179 		}
2180 	}
2181 
2182 	/* If virtual port does not exist. */
2183 	if (vha == NULL) {
2184 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2185 	}
2186 
2187 	/* make sure this port isn't already bound */
2188 	if (vha->flags & FCA_BOUND) {
2189 		port_info->pi_error = FC_ALREADY;
2190 	} else {
2191 		if (vha->vp_index != 0) {
2192 			bcopy(port_nwwn,
2193 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2194 			bcopy(port_pwwn,
2195 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2196 		}
2197 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2198 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2199 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2200 				    "virtual port=%d\n", ha->instance,
2201 				    vha->vp_index);
2202 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2203 				return (NULL);
2204 			}
2205 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2206 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2207 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2208 			    QL_NAME, ha->instance, vha->vp_index,
2209 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2210 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2211 			    port_pwwn[6], port_pwwn[7],
2212 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2213 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2214 			    port_nwwn[6], port_nwwn[7]);
2215 		}
2216 
2217 		/* stash the bind_info supplied by the FC Transport */
2218 		vha->bind_info.port_handle = bind_info->port_handle;
2219 		vha->bind_info.port_statec_cb =
2220 		    bind_info->port_statec_cb;
2221 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2222 
2223 		/* Set port's source ID. */
2224 		port_info->pi_s_id.port_id = vha->d_id.b24;
2225 
2226 		/* copy out the default login parameters */
2227 		bcopy((void *)&vha->loginparams,
2228 		    (void *)&port_info->pi_login_params,
2229 		    sizeof (la_els_logi_t));
2230 
2231 		/* Set port's hard address if enabled. */
2232 		port_info->pi_hard_addr.hard_addr = 0;
2233 		if (bind_info->port_num == 0) {
2234 			d_id.b24 = ha->d_id.b24;
2235 			if (CFG_IST(ha, CFG_CTRL_242581)) {
2236 				if (ha->init_ctrl_blk.cb24.
2237 				    firmware_options_1[0] & BIT_0) {
2238 					d_id.b.al_pa = ql_index_to_alpa[ha->
2239 					    init_ctrl_blk.cb24.
2240 					    hard_address[0]];
2241 					port_info->pi_hard_addr.hard_addr =
2242 					    d_id.b24;
2243 				}
2244 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2245 			    BIT_0) {
2246 				d_id.b.al_pa = ql_index_to_alpa[ha->
2247 				    init_ctrl_blk.cb.hard_address[0]];
2248 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2249 			}
2250 
2251 			/* Set the node id data */
2252 			if (ql_get_rnid_params(ha,
2253 			    sizeof (port_info->pi_rnid_params.params),
2254 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2255 			    QL_SUCCESS) {
2256 				port_info->pi_rnid_params.status = FC_SUCCESS;
2257 			} else {
2258 				port_info->pi_rnid_params.status = FC_FAILURE;
2259 			}
2260 
2261 			/* Populate T11 FC-HBA details */
2262 			ql_populate_hba_fru_details(ha, port_info);
2263 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2264 			    KM_SLEEP);
2265 			if (ha->pi_attrs != NULL) {
2266 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2267 				    sizeof (fca_port_attrs_t));
2268 			}
2269 		} else {
2270 			port_info->pi_rnid_params.status = FC_FAILURE;
2271 			if (ha->pi_attrs != NULL) {
2272 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2273 				    sizeof (fca_port_attrs_t));
2274 			}
2275 		}
2276 
2277 		/* Generate handle for this FCA. */
2278 		fca_handle = (opaque_t)vha;
2279 
2280 		ADAPTER_STATE_LOCK(ha);
2281 		vha->flags |= FCA_BOUND;
2282 		ADAPTER_STATE_UNLOCK(ha);
2283 		/* Set port's current state. */
2284 		port_info->pi_port_state = vha->state;
2285 	}
2286 
2287 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2288 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2289 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2290 
2291 	return (fca_handle);
2292 }
2293 
2294 /*
2295  * ql_unbind_port
2296  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2297  *
2298  * Input:
2299  *	fca_handle = handle setup by ql_bind_port().
2300  *
2301  * Context:
2302  *	Kernel context.
2303  */
2304 static void
2305 ql_unbind_port(opaque_t fca_handle)
2306 {
2307 	ql_adapter_state_t	*ha;
2308 	ql_tgt_t		*tq;
2309 	uint32_t		flgs;
2310 
2311 	ha = ql_fca_handle_to_state(fca_handle);
2312 	if (ha == NULL) {
2313 		/*EMPTY*/
2314 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2315 		    (void *)fca_handle);
2316 	} else {
2317 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2318 		    ha->vp_index);
2319 
2320 		if (!(ha->flags & FCA_BOUND)) {
2321 			/*EMPTY*/
2322 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2323 			    ha->instance, ha->vp_index);
2324 		} else {
2325 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2326 				if ((tq = ql_loop_id_to_queue(ha,
2327 				    FL_PORT_24XX_HDL)) != NULL) {
2328 					(void) ql_logout_fabric_port(ha, tq);
2329 				}
2330 				(void) ql_vport_control(ha, (uint8_t)
2331 				    (CFG_IST(ha, CFG_CTRL_2425) ?
2332 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2333 				flgs = FCA_BOUND | VP_ENABLED;
2334 			} else {
2335 				flgs = FCA_BOUND;
2336 			}
2337 			ADAPTER_STATE_LOCK(ha);
2338 			ha->flags &= ~flgs;
2339 			ADAPTER_STATE_UNLOCK(ha);
2340 		}
2341 
2342 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2343 		    ha->vp_index);
2344 	}
2345 }
2346 
2347 /*
2348  * ql_init_pkt
2349  *	Initialize FCA portion of packet.
2350  *
2351  * Input:
2352  *	fca_handle = handle setup by ql_bind_port().
2353  *	pkt = pointer to fc_packet.
2354  *
2355  * Returns:
2356  *	FC_SUCCESS - the packet has successfully been initialized.
2357  *	FC_UNBOUND - the fca_handle specified is not bound.
2358  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2359  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2360  *
2361  * Context:
2362  *	Kernel context.
2363  */
2364 /* ARGSUSED */
2365 static int
2366 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2367 {
2368 	ql_adapter_state_t	*ha;
2369 	ql_srb_t		*sp;
2370 
2371 	ha = ql_fca_handle_to_state(fca_handle);
2372 	if (ha == NULL) {
2373 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2374 		    (void *)fca_handle);
2375 		return (FC_UNBOUND);
2376 	}
2377 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2378 
2379 	ASSERT(ha->power_level == PM_LEVEL_D0);
2380 
2381 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2382 	sp->flags = 0;
2383 
2384 	/* init cmd links */
2385 	sp->cmd.base_address = sp;
2386 	sp->cmd.prev = NULL;
2387 	sp->cmd.next = NULL;
2388 	sp->cmd.head = NULL;
2389 
2390 	/* init watchdog links */
2391 	sp->wdg.base_address = sp;
2392 	sp->wdg.prev = NULL;
2393 	sp->wdg.next = NULL;
2394 	sp->wdg.head = NULL;
2395 	sp->pkt = pkt;
2396 	sp->ha = ha;
2397 	sp->magic_number = QL_FCA_BRAND;
2398 
2399 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2400 
2401 	return (FC_SUCCESS);
2402 }
2403 
2404 /*
2405  * ql_un_init_pkt
2406  *	Release all local resources bound to packet.
2407  *
2408  * Input:
2409  *	fca_handle = handle setup by ql_bind_port().
2410  *	pkt = pointer to fc_packet.
2411  *
2412  * Returns:
2413  *	FC_SUCCESS - the packet has successfully been invalidated.
2414  *	FC_UNBOUND - the fca_handle specified is not bound.
2415  *	FC_BADPACKET - the packet has not been initialized or has
2416  *			already been freed by this FCA.
2417  *
2418  * Context:
2419  *	Kernel context.
2420  */
2421 static int
2422 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2423 {
2424 	ql_adapter_state_t *ha;
2425 	int rval;
2426 	ql_srb_t *sp;
2427 
2428 	ha = ql_fca_handle_to_state(fca_handle);
2429 	if (ha == NULL) {
2430 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2431 		    (void *)fca_handle);
2432 		return (FC_UNBOUND);
2433 	}
2434 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2435 
2436 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2437 	ASSERT(sp->magic_number == QL_FCA_BRAND);
2438 
2439 	if (sp->magic_number != QL_FCA_BRAND) {
2440 		EL(ha, "failed, FC_BADPACKET\n");
2441 		rval = FC_BADPACKET;
2442 	} else {
2443 		sp->magic_number = NULL;
2444 
2445 		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
2446 		    SRB_IN_TOKEN_ARRAY)) == 0);
2447 
2448 		rval = FC_SUCCESS;
2449 	}
2450 
2451 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2452 
2453 	return (rval);
2454 }
2455 
2456 /*
2457  * ql_els_send
2458  *	Issue a extended link service request.
2459  *
2460  * Input:
2461  *	fca_handle = handle setup by ql_bind_port().
2462  *	pkt = pointer to fc_packet.
2463  *
2464  * Returns:
2465  *	FC_SUCCESS - the command was successful.
2466  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2467  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2468  *	FC_TRANSPORT_ERROR - a transport error occurred.
2469  *	FC_UNBOUND - the fca_handle specified is not bound.
2470  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2471  *
2472  * Context:
2473  *	Kernel context.
2474  */
2475 static int
2476 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2477 {
2478 	ql_adapter_state_t	*ha;
2479 	int			rval;
2480 	clock_t			timer;
2481 	ls_code_t		els;
2482 	la_els_rjt_t		rjt;
2483 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2484 
2485 	/* Verify proper command. */
2486 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2487 	if (ha == NULL) {
2488 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2489 		    rval, fca_handle);
2490 		return (FC_INVALID_REQUEST);
2491 	}
2492 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2493 
2494 	ASSERT(ha->power_level == PM_LEVEL_D0);
2495 
2496 	/* Wait for suspension to end. */
2497 	TASK_DAEMON_LOCK(ha);
2498 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2499 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2500 
2501 		/* 30 seconds from now */
2502 		timer = ddi_get_lbolt();
2503 		timer += drv_usectohz(30000000);
2504 
2505 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2506 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2507 			/*
2508 			 * The timeout time 'timer' was
2509 			 * reached without the condition
2510 			 * being signaled.
2511 			 */
2512 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2513 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2514 
2515 			/* Release task daemon lock. */
2516 			TASK_DAEMON_UNLOCK(ha);
2517 
2518 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2519 			    QL_FUNCTION_TIMEOUT);
2520 			return (FC_TRAN_BUSY);
2521 		}
2522 	}
2523 	/* Release task daemon lock. */
2524 	TASK_DAEMON_UNLOCK(ha);
2525 
2526 	/* Setup response header. */
2527 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2528 	    sizeof (fc_frame_hdr_t));
2529 
2530 	if (pkt->pkt_rsplen) {
2531 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2532 	}
2533 
2534 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2535 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2536 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2537 	    R_CTL_SOLICITED_CONTROL;
2538 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2539 	    F_CTL_END_SEQ;
2540 
2541 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2542 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2543 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2544 
2545 	sp->flags |= SRB_ELS_PKT;
2546 
2547 	/* map the type of ELS to a function */
2548 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2549 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2550 
2551 #if 0
2552 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2553 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2554 	    sizeof (fc_frame_hdr_t) / 4);
2555 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2556 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2557 #endif
2558 
2559 	sp->iocb = ha->els_cmd;
2560 	sp->req_cnt = 1;
2561 
2562 	switch (els.ls_code) {
2563 	case LA_ELS_RJT:
2564 	case LA_ELS_ACC:
2565 		EL(ha, "LA_ELS_RJT\n");
2566 		pkt->pkt_state = FC_PKT_SUCCESS;
2567 		rval = FC_SUCCESS;
2568 		break;
2569 	case LA_ELS_PLOGI:
2570 	case LA_ELS_PDISC:
2571 		rval = ql_els_plogi(ha, pkt);
2572 		break;
2573 	case LA_ELS_FLOGI:
2574 	case LA_ELS_FDISC:
2575 		rval = ql_els_flogi(ha, pkt);
2576 		break;
2577 	case LA_ELS_LOGO:
2578 		rval = ql_els_logo(ha, pkt);
2579 		break;
2580 	case LA_ELS_PRLI:
2581 		rval = ql_els_prli(ha, pkt);
2582 		break;
2583 	case LA_ELS_PRLO:
2584 		rval = ql_els_prlo(ha, pkt);
2585 		break;
2586 	case LA_ELS_ADISC:
2587 		rval = ql_els_adisc(ha, pkt);
2588 		break;
2589 	case LA_ELS_LINIT:
2590 		rval = ql_els_linit(ha, pkt);
2591 		break;
2592 	case LA_ELS_LPC:
2593 		rval = ql_els_lpc(ha, pkt);
2594 		break;
2595 	case LA_ELS_LSTS:
2596 		rval = ql_els_lsts(ha, pkt);
2597 		break;
2598 	case LA_ELS_SCR:
2599 		rval = ql_els_scr(ha, pkt);
2600 		break;
2601 	case LA_ELS_RSCN:
2602 		rval = ql_els_rscn(ha, pkt);
2603 		break;
2604 	case LA_ELS_FARP_REQ:
2605 		rval = ql_els_farp_req(ha, pkt);
2606 		break;
2607 	case LA_ELS_FARP_REPLY:
2608 		rval = ql_els_farp_reply(ha, pkt);
2609 		break;
2610 	case LA_ELS_RLS:
2611 		rval = ql_els_rls(ha, pkt);
2612 		break;
2613 	case LA_ELS_RNID:
2614 		rval = ql_els_rnid(ha, pkt);
2615 		break;
2616 	default:
2617 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2618 		    els.ls_code);
2619 		/* Build RJT. */
2620 		bzero(&rjt, sizeof (rjt));
2621 		rjt.ls_code.ls_code = LA_ELS_RJT;
2622 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2623 
2624 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2625 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2626 
2627 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2628 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2629 		rval = FC_SUCCESS;
2630 		break;
2631 	}
2632 
2633 #if 0
2634 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2635 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2636 	    sizeof (fc_frame_hdr_t) / 4);
2637 #endif
2638 	/*
2639 	 * Return success if the srb was consumed by an iocb. The packet
2640 	 * completion callback will be invoked by the response handler.
2641 	 */
2642 	if (rval == QL_CONSUMED) {
2643 		rval = FC_SUCCESS;
2644 	} else if (rval == FC_SUCCESS &&
2645 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2646 		/* Do command callback only if no error */
2647 		ql_awaken_task_daemon(ha, sp, 0, 0);
2648 	}
2649 
2650 	if (rval != FC_SUCCESS) {
2651 		EL(ha, "failed, rval = %xh\n", rval);
2652 	} else {
2653 		/*EMPTY*/
2654 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2655 	}
2656 	return (rval);
2657 }
2658 
2659 /*
2660  * ql_get_cap
2661  *	Export FCA hardware and software capabilities.
2662  *
2663  * Input:
2664  *	fca_handle = handle setup by ql_bind_port().
2665  *	cap = pointer to the capabilities string.
2666  *	ptr = buffer pointer for return capability.
2667  *
2668  * Returns:
2669  *	FC_CAP_ERROR - no such capability
2670  *	FC_CAP_FOUND - the capability was returned and cannot be set
2671  *	FC_CAP_SETTABLE - the capability was returned and can be set
2672  *	FC_UNBOUND - the fca_handle specified is not bound.
2673  *
2674  * Context:
2675  *	Kernel context.
2676  */
2677 static int
2678 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2679 {
2680 	ql_adapter_state_t	*ha;
2681 	int			rval;
2682 	uint32_t		*rptr = (uint32_t *)ptr;
2683 
2684 	ha = ql_fca_handle_to_state(fca_handle);
2685 	if (ha == NULL) {
2686 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2687 		    (void *)fca_handle);
2688 		return (FC_UNBOUND);
2689 	}
2690 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2691 
2692 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2693 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2694 		    ptr, 8);
2695 		rval = FC_CAP_FOUND;
2696 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2697 		bcopy((void *)&ha->loginparams, ptr,
2698 		    sizeof (la_els_logi_t));
2699 		rval = FC_CAP_FOUND;
2700 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2701 		*rptr = (uint32_t)QL_UB_LIMIT;
2702 		rval = FC_CAP_FOUND;
2703 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2704 
2705 		dev_info_t	*psydip = NULL;
2706 #ifdef __sparc
2707 		/*
2708 		 * Disable streaming for certain 2 chip adapters
2709 		 * below Psycho to handle Psycho byte hole issue.
2710 		 */
2711 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2712 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2713 			for (psydip = ddi_get_parent(ha->dip); psydip;
2714 			    psydip = ddi_get_parent(psydip)) {
2715 				if (strcmp(ddi_driver_name(psydip),
2716 				    "pcipsy") == 0) {
2717 					break;
2718 				}
2719 			}
2720 		}
2721 #endif	/* __sparc */
2722 
2723 		if (psydip) {
2724 			*rptr = (uint32_t)FC_NO_STREAMING;
2725 			EL(ha, "No Streaming\n");
2726 		} else {
2727 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2728 			EL(ha, "Allow Streaming\n");
2729 		}
2730 		rval = FC_CAP_FOUND;
2731 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2732 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2733 			*rptr = (uint32_t)CHAR_TO_SHORT(
2734 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2735 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2736 		} else {
2737 			*rptr = (uint32_t)CHAR_TO_SHORT(
2738 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2739 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2740 		}
2741 		rval = FC_CAP_FOUND;
2742 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2743 		*rptr = FC_RESET_RETURN_ALL;
2744 		rval = FC_CAP_FOUND;
2745 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2746 		*rptr = FC_NO_DVMA_SPACE;
2747 		rval = FC_CAP_FOUND;
2748 	} else {
2749 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2750 		rval = FC_CAP_ERROR;
2751 	}
2752 
2753 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2754 
2755 	return (rval);
2756 }
2757 
2758 /*
2759  * ql_set_cap
2760  *	Allow the FC Transport to set FCA capabilities if possible.
2761  *
2762  * Input:
2763  *	fca_handle = handle setup by ql_bind_port().
2764  *	cap = pointer to the capabilities string.
2765  *	ptr = buffer pointer for capability.
2766  *
2767  * Returns:
2768  *	FC_CAP_ERROR - no such capability
2769  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2770  *	FC_CAP_SETTABLE - the capability was successfully set.
2771  *	FC_UNBOUND - the fca_handle specified is not bound.
2772  *
2773  * Context:
2774  *	Kernel context.
2775  */
2776 /* ARGSUSED */
2777 static int
2778 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2779 {
2780 	ql_adapter_state_t	*ha;
2781 	int			rval;
2782 
2783 	ha = ql_fca_handle_to_state(fca_handle);
2784 	if (ha == NULL) {
2785 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2786 		    (void *)fca_handle);
2787 		return (FC_UNBOUND);
2788 	}
2789 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2790 
2791 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2792 		rval = FC_CAP_FOUND;
2793 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2794 		rval = FC_CAP_FOUND;
2795 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2796 		rval = FC_CAP_FOUND;
2797 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2798 		rval = FC_CAP_FOUND;
2799 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2800 		rval = FC_CAP_FOUND;
2801 	} else {
2802 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2803 		rval = FC_CAP_ERROR;
2804 	}
2805 
2806 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2807 
2808 	return (rval);
2809 }
2810 
2811 /*
2812  * ql_getmap
2813  *	Request of Arbitrated Loop (AL-PA) map.
2814  *
2815  * Input:
2816  *	fca_handle = handle setup by ql_bind_port().
2817  *	mapbuf= buffer pointer for map.
2818  *
2819  * Returns:
2820  *	FC_OLDPORT - the specified port is not operating in loop mode.
2821  *	FC_OFFLINE - the specified port is not online.
2822  *	FC_NOMAP - there is no loop map available for this port.
2823  *	FC_UNBOUND - the fca_handle specified is not bound.
2824  *	FC_SUCCESS - a valid map has been placed in mapbuf.
2825  *
2826  * Context:
2827  *	Kernel context.
2828  */
2829 static int
2830 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2831 {
2832 	ql_adapter_state_t	*ha;
2833 	clock_t			timer;
2834 	int			rval = FC_SUCCESS;
2835 
2836 	ha = ql_fca_handle_to_state(fca_handle);
2837 	if (ha == NULL) {
2838 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2839 		    (void *)fca_handle);
2840 		return (FC_UNBOUND);
2841 	}
2842 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2843 
2844 	ASSERT(ha->power_level == PM_LEVEL_D0);
2845 
2846 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2847 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2848 
2849 	/* Wait for suspension to end. */
2850 	TASK_DAEMON_LOCK(ha);
2851 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2852 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2853 
2854 		/* 30 seconds from now */
2855 		timer = ddi_get_lbolt();
2856 		timer += drv_usectohz(30000000);
2857 
2858 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2859 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2860 			/*
2861 			 * The timeout time 'timer' was
2862 			 * reached without the condition
2863 			 * being signaled.
2864 			 */
2865 
2866 			/* Release task daemon lock. */
2867 			TASK_DAEMON_UNLOCK(ha);
2868 
2869 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2870 			return (FC_TRAN_BUSY);
2871 		}
2872 	}
2873 	/* Release task daemon lock. */
2874 	TASK_DAEMON_UNLOCK(ha);
2875 
2876 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2877 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2878 		/*
2879 		 * Now, since transport drivers cosider this as an
2880 		 * offline condition, let's wait for few seconds
2881 		 * for any loop transitions before we reset the.
2882 		 * chip and restart all over again.
2883 		 */
2884 		ql_delay(ha, 2000000);
2885 		EL(ha, "failed, FC_NOMAP\n");
2886 		rval = FC_NOMAP;
2887 	} else {
2888 		/*EMPTY*/
2889 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2890 		    "data %xh %xh %xh %xh\n", ha->instance,
2891 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2892 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2893 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2894 	}
2895 
2896 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2897 #if 0
2898 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2899 #endif
2900 	return (rval);
2901 }
2902 
2903 /*
2904  * ql_transport
2905  *	Issue an I/O request. Handles all regular requests.
2906  *
2907  * Input:
2908  *	fca_handle = handle setup by ql_bind_port().
2909  *	pkt = pointer to fc_packet.
2910  *
2911  * Returns:
2912  *	FC_SUCCESS - the packet was accepted for transport.
2913  *	FC_TRANSPORT_ERROR - a transport error occurred.
2914  *	FC_BADPACKET - the packet to be transported had not been
2915  *			initialized by this FCA.
2916  *	FC_UNBOUND - the fca_handle specified is not bound.
2917  *
2918  * Context:
2919  *	Kernel context.
2920  */
2921 static int
2922 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2923 {
2924 	ql_adapter_state_t	*ha;
2925 	int			rval = FC_TRANSPORT_ERROR;
2926 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2927 
2928 	/* Verify proper command. */
2929 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2930 	if (ha == NULL) {
2931 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2932 		    rval, fca_handle);
2933 		return (rval);
2934 	}
2935 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2936 #if 0
2937 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2938 	    sizeof (fc_frame_hdr_t) / 4);
2939 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2940 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2941 #endif
2942 	if (ha->flags & ADAPTER_SUSPENDED) {
2943 		ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
2944 	}
2945 
2946 	ASSERT(ha->power_level == PM_LEVEL_D0);
2947 
2948 	/* Reset SRB flags. */
2949 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2950 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2951 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2952 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2953 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2954 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2955 	    SRB_MS_PKT | SRB_ELS_PKT);
2956 
2957 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2958 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2959 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2960 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2961 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2962 
2963 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2964 	case R_CTL_COMMAND:
2965 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2966 			sp->flags |= SRB_FCP_CMD_PKT;
2967 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2968 		}
2969 		break;
2970 
2971 	default:
2972 		/* Setup response header and buffer. */
2973 		if (pkt->pkt_rsplen) {
2974 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2975 		}
2976 
2977 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
2978 		case R_CTL_UNSOL_DATA:
2979 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
2980 				sp->flags |= SRB_IP_PKT;
2981 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
2982 			}
2983 			break;
2984 
2985 		case R_CTL_UNSOL_CONTROL:
2986 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
2987 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
2988 				rval = ql_fc_services(ha, pkt);
2989 			}
2990 			break;
2991 
2992 		case R_CTL_SOLICITED_DATA:
2993 		case R_CTL_STATUS:
2994 		default:
2995 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
2996 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2997 			rval = FC_TRANSPORT_ERROR;
2998 			EL(ha, "unknown, r_ctl=%xh\n",
2999 			    pkt->pkt_cmd_fhdr.r_ctl);
3000 			break;
3001 		}
3002 	}
3003 
3004 	if (rval != FC_SUCCESS) {
3005 		EL(ha, "failed, rval = %xh\n", rval);
3006 	} else {
3007 		/*EMPTY*/
3008 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3009 	}
3010 
3011 	return (rval);
3012 }
3013 
3014 /*
3015  * ql_ub_alloc
3016  *	Allocate buffers for unsolicited exchanges.
3017  *
3018  * Input:
3019  *	fca_handle = handle setup by ql_bind_port().
3020  *	tokens = token array for each buffer.
3021  *	size = size of each buffer.
3022  *	count = pointer to number of buffers.
3023  *	type = the FC-4 type the buffers are reserved for.
3024  *		1 = Extended Link Services, 5 = LLC/SNAP
3025  *
3026  * Returns:
3027  *	FC_FAILURE - buffers could not be allocated.
3028  *	FC_TOOMANY - the FCA could not allocate the requested
3029  *			number of buffers.
3030  *	FC_SUCCESS - unsolicited buffers were allocated.
3031  *	FC_UNBOUND - the fca_handle specified is not bound.
3032  *
3033  * Context:
3034  *	Kernel context.
3035  */
3036 static int
3037 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3038     uint32_t *count, uint32_t type)
3039 {
3040 	ql_adapter_state_t	*ha;
3041 	caddr_t			bufp = NULL;
3042 	fc_unsol_buf_t		*ubp;
3043 	ql_srb_t		*sp;
3044 	uint32_t		index;
3045 	uint32_t		cnt;
3046 	uint32_t		ub_array_index = 0;
3047 	int			rval = FC_SUCCESS;
3048 	int			ub_updated = FALSE;
3049 
3050 	/* Check handle. */
3051 	ha = ql_fca_handle_to_state(fca_handle);
3052 	if (ha == NULL) {
3053 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3054 		    (void *)fca_handle);
3055 		return (FC_UNBOUND);
3056 	}
3057 	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3058 	    ha->instance, ha->vp_index, *count);
3059 
3060 	QL_PM_LOCK(ha);
3061 	if (ha->power_level != PM_LEVEL_D0) {
3062 		QL_PM_UNLOCK(ha);
3063 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3064 		    ha->vp_index);
3065 		return (FC_FAILURE);
3066 	}
3067 	QL_PM_UNLOCK(ha);
3068 
3069 	/* Acquire adapter state lock. */
3070 	ADAPTER_STATE_LOCK(ha);
3071 
3072 	/* Check the count. */
3073 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3074 		*count = 0;
3075 		EL(ha, "failed, FC_TOOMANY\n");
3076 		rval = FC_TOOMANY;
3077 	}
3078 
3079 	/*
3080 	 * reset ub_array_index
3081 	 */
3082 	ub_array_index = 0;
3083 
3084 	/*
3085 	 * Now proceed to allocate any buffers required
3086 	 */
3087 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3088 		/* Allocate all memory needed. */
3089 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3090 		    KM_SLEEP);
3091 		if (ubp == NULL) {
3092 			EL(ha, "failed, FC_FAILURE\n");
3093 			rval = FC_FAILURE;
3094 		} else {
3095 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3096 			if (sp == NULL) {
3097 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3098 				rval = FC_FAILURE;
3099 			} else {
3100 				if (type == FC_TYPE_IS8802_SNAP) {
3101 #ifdef	__sparc
3102 					if (ql_get_dma_mem(ha,
3103 					    &sp->ub_buffer, size,
3104 					    BIG_ENDIAN_DMA,
3105 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3106 						rval = FC_FAILURE;
3107 						kmem_free(ubp,
3108 						    sizeof (fc_unsol_buf_t));
3109 						kmem_free(sp,
3110 						    sizeof (ql_srb_t));
3111 					} else {
3112 						bufp = sp->ub_buffer.bp;
3113 						sp->ub_size = size;
3114 					}
3115 #else
3116 					if (ql_get_dma_mem(ha,
3117 					    &sp->ub_buffer, size,
3118 					    LITTLE_ENDIAN_DMA,
3119 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3120 						rval = FC_FAILURE;
3121 						kmem_free(ubp,
3122 						    sizeof (fc_unsol_buf_t));
3123 						kmem_free(sp,
3124 						    sizeof (ql_srb_t));
3125 					} else {
3126 						bufp = sp->ub_buffer.bp;
3127 						sp->ub_size = size;
3128 					}
3129 #endif
3130 				} else {
3131 					bufp = kmem_zalloc(size, KM_SLEEP);
3132 					if (bufp == NULL) {
3133 						rval = FC_FAILURE;
3134 						kmem_free(ubp,
3135 						    sizeof (fc_unsol_buf_t));
3136 						kmem_free(sp,
3137 						    sizeof (ql_srb_t));
3138 					} else {
3139 						sp->ub_size = size;
3140 					}
3141 				}
3142 			}
3143 		}
3144 
3145 		if (rval == FC_SUCCESS) {
3146 			/* Find next available slot. */
3147 			QL_UB_LOCK(ha);
3148 			while (ha->ub_array[ub_array_index] != NULL) {
3149 				ub_array_index++;
3150 			}
3151 
3152 			ubp->ub_fca_private = (void *)sp;
3153 
3154 			/* init cmd links */
3155 			sp->cmd.base_address = sp;
3156 			sp->cmd.prev = NULL;
3157 			sp->cmd.next = NULL;
3158 			sp->cmd.head = NULL;
3159 
3160 			/* init wdg links */
3161 			sp->wdg.base_address = sp;
3162 			sp->wdg.prev = NULL;
3163 			sp->wdg.next = NULL;
3164 			sp->wdg.head = NULL;
3165 			sp->ha = ha;
3166 
3167 			ubp->ub_buffer = bufp;
3168 			ubp->ub_bufsize = size;
3169 			ubp->ub_port_handle = fca_handle;
3170 			ubp->ub_token = ub_array_index;
3171 
3172 			/* Save the token. */
3173 			tokens[index] = ub_array_index;
3174 
3175 			/* Setup FCA private information. */
3176 			sp->ub_type = type;
3177 			sp->handle = ub_array_index;
3178 			sp->flags |= SRB_UB_IN_FCA;
3179 
3180 			ha->ub_array[ub_array_index] = ubp;
3181 			ha->ub_allocated++;
3182 			ub_updated = TRUE;
3183 			QL_UB_UNLOCK(ha);
3184 		}
3185 	}
3186 
3187 	/* Release adapter state lock. */
3188 	ADAPTER_STATE_UNLOCK(ha);
3189 
3190 	/* IP buffer. */
3191 	if (ub_updated) {
3192 		if ((type == FC_TYPE_IS8802_SNAP) &&
3193 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3194 
3195 			ADAPTER_STATE_LOCK(ha);
3196 			ha->flags |= IP_ENABLED;
3197 			ADAPTER_STATE_UNLOCK(ha);
3198 
3199 			if (!(ha->flags & IP_INITIALIZED)) {
3200 				if (CFG_IST(ha, CFG_CTRL_2422)) {
3201 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3202 					    LSB(ql_ip_mtu);
3203 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3204 					    MSB(ql_ip_mtu);
3205 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3206 					    LSB(size);
3207 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3208 					    MSB(size);
3209 
3210 					cnt = CHAR_TO_SHORT(
3211 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3212 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3213 
3214 					if (cnt < *count) {
3215 						ha->ip_init_ctrl_blk.cb24.cc[0]
3216 						    = LSB(*count);
3217 						ha->ip_init_ctrl_blk.cb24.cc[1]
3218 						    = MSB(*count);
3219 					}
3220 				} else {
3221 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3222 					    LSB(ql_ip_mtu);
3223 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3224 					    MSB(ql_ip_mtu);
3225 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3226 					    LSB(size);
3227 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3228 					    MSB(size);
3229 
3230 					cnt = CHAR_TO_SHORT(
3231 					    ha->ip_init_ctrl_blk.cb.cc[0],
3232 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3233 
3234 					if (cnt < *count) {
3235 						ha->ip_init_ctrl_blk.cb.cc[0] =
3236 						    LSB(*count);
3237 						ha->ip_init_ctrl_blk.cb.cc[1] =
3238 						    MSB(*count);
3239 					}
3240 				}
3241 
3242 				(void) ql_initialize_ip(ha);
3243 			}
3244 			ql_isp_rcvbuf(ha);
3245 		}
3246 	}
3247 
3248 	if (rval != FC_SUCCESS) {
3249 		EL(ha, "failed=%xh\n", rval);
3250 	} else {
3251 		/*EMPTY*/
3252 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3253 		    ha->vp_index);
3254 	}
3255 	return (rval);
3256 }
3257 
3258 /*
3259  * ql_ub_free
3260  *	Free unsolicited buffers.
3261  *
3262  * Input:
3263  *	fca_handle = handle setup by ql_bind_port().
3264  *	count = number of buffers.
3265  *	tokens = token array for each buffer.
3266  *
3267  * Returns:
3268  *	FC_SUCCESS - the requested buffers have been freed.
3269  *	FC_UNBOUND - the fca_handle specified is not bound.
3270  *	FC_UB_BADTOKEN - an invalid token was encountered.
3271  *			 No buffers have been released.
3272  *
3273  * Context:
3274  *	Kernel context.
3275  */
3276 static int
3277 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3278 {
3279 	ql_adapter_state_t	*ha;
3280 	ql_srb_t		*sp;
3281 	uint32_t		index;
3282 	uint64_t		ub_array_index;
3283 	int			rval = FC_SUCCESS;
3284 
3285 	/* Check handle. */
3286 	ha = ql_fca_handle_to_state(fca_handle);
3287 	if (ha == NULL) {
3288 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3289 		    (void *)fca_handle);
3290 		return (FC_UNBOUND);
3291 	}
3292 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3293 
3294 	/* Acquire adapter state lock. */
3295 	ADAPTER_STATE_LOCK(ha);
3296 
3297 	/* Check all returned tokens. */
3298 	for (index = 0; index < count; index++) {
3299 		fc_unsol_buf_t	*ubp;
3300 
3301 		/* Check the token range. */
3302 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3303 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3304 			rval = FC_UB_BADTOKEN;
3305 			break;
3306 		}
3307 
3308 		/* Check the unsolicited buffer array. */
3309 		QL_UB_LOCK(ha);
3310 		ubp = ha->ub_array[ub_array_index];
3311 
3312 		if (ubp == NULL) {
3313 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3314 			rval = FC_UB_BADTOKEN;
3315 			QL_UB_UNLOCK(ha);
3316 			break;
3317 		}
3318 
3319 		/* Check the state of the unsolicited buffer. */
3320 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3321 		sp->flags |= SRB_UB_FREE_REQUESTED;
3322 
3323 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3324 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3325 			QL_UB_UNLOCK(ha);
3326 			ADAPTER_STATE_UNLOCK(ha);
3327 			delay(drv_usectohz(100000));
3328 			ADAPTER_STATE_LOCK(ha);
3329 			QL_UB_LOCK(ha);
3330 		}
3331 		ha->ub_array[ub_array_index] = NULL;
3332 		QL_UB_UNLOCK(ha);
3333 		ql_free_unsolicited_buffer(ha, ubp);
3334 	}
3335 
3336 	if (rval == FC_SUCCESS) {
3337 		/*
3338 		 * Signal any pending hardware reset when there are
3339 		 * no more unsolicited buffers in use.
3340 		 */
3341 		if (ha->ub_allocated == 0) {
3342 			cv_broadcast(&ha->pha->cv_ub);
3343 		}
3344 	}
3345 
3346 	/* Release adapter state lock. */
3347 	ADAPTER_STATE_UNLOCK(ha);
3348 
3349 	if (rval != FC_SUCCESS) {
3350 		EL(ha, "failed=%xh\n", rval);
3351 	} else {
3352 		/*EMPTY*/
3353 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3354 	}
3355 	return (rval);
3356 }
3357 
3358 /*
3359  * ql_ub_release
3360  *	Release unsolicited buffers from FC Transport
3361  *	to FCA for future use.
3362  *
3363  * Input:
3364  *	fca_handle = handle setup by ql_bind_port().
3365  *	count = number of buffers.
3366  *	tokens = token array for each buffer.
3367  *
3368  * Returns:
3369  *	FC_SUCCESS - the requested buffers have been released.
3370  *	FC_UNBOUND - the fca_handle specified is not bound.
3371  *	FC_UB_BADTOKEN - an invalid token was encountered.
3372  *		No buffers have been released.
3373  *
3374  * Context:
3375  *	Kernel context.
3376  */
3377 static int
3378 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3379 {
3380 	ql_adapter_state_t	*ha;
3381 	ql_srb_t		*sp;
3382 	uint32_t		index;
3383 	uint64_t		ub_array_index;
3384 	int			rval = FC_SUCCESS;
3385 	int			ub_ip_updated = FALSE;
3386 
3387 	/* Check handle. */
3388 	ha = ql_fca_handle_to_state(fca_handle);
3389 	if (ha == NULL) {
3390 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3391 		    (void *)fca_handle);
3392 		return (FC_UNBOUND);
3393 	}
3394 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3395 
3396 	/* Acquire adapter state lock. */
3397 	ADAPTER_STATE_LOCK(ha);
3398 	QL_UB_LOCK(ha);
3399 
3400 	/* Check all returned tokens. */
3401 	for (index = 0; index < count; index++) {
3402 		/* Check the token range. */
3403 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3404 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3405 			rval = FC_UB_BADTOKEN;
3406 			break;
3407 		}
3408 
3409 		/* Check the unsolicited buffer array. */
3410 		if (ha->ub_array[ub_array_index] == NULL) {
3411 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3412 			rval = FC_UB_BADTOKEN;
3413 			break;
3414 		}
3415 
3416 		/* Check the state of the unsolicited buffer. */
3417 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3418 		if (sp->flags & SRB_UB_IN_FCA) {
3419 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3420 			rval = FC_UB_BADTOKEN;
3421 			break;
3422 		}
3423 	}
3424 
3425 	/* If all tokens checkout, release the buffers. */
3426 	if (rval == FC_SUCCESS) {
3427 		/* Check all returned tokens. */
3428 		for (index = 0; index < count; index++) {
3429 			fc_unsol_buf_t	*ubp;
3430 
3431 			ub_array_index = tokens[index];
3432 			ubp = ha->ub_array[ub_array_index];
3433 			sp = ubp->ub_fca_private;
3434 
3435 			ubp->ub_resp_flags = 0;
3436 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3437 			sp->flags |= SRB_UB_IN_FCA;
3438 
3439 			/* IP buffer. */
3440 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3441 				ub_ip_updated = TRUE;
3442 			}
3443 		}
3444 	}
3445 
3446 	QL_UB_UNLOCK(ha);
3447 	/* Release adapter state lock. */
3448 	ADAPTER_STATE_UNLOCK(ha);
3449 
3450 	/*
3451 	 * XXX: We should call ql_isp_rcvbuf() to return a
3452 	 * buffer to ISP only if the number of buffers fall below
3453 	 * the low water mark.
3454 	 */
3455 	if (ub_ip_updated) {
3456 		ql_isp_rcvbuf(ha);
3457 	}
3458 
3459 	if (rval != FC_SUCCESS) {
3460 		EL(ha, "failed, rval = %xh\n", rval);
3461 	} else {
3462 		/*EMPTY*/
3463 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3464 	}
3465 	return (rval);
3466 }
3467 
3468 /*
3469  * ql_abort
3470  *	Abort a packet.
3471  *
3472  * Input:
3473  *	fca_handle = handle setup by ql_bind_port().
3474  *	pkt = pointer to fc_packet.
3475  *	flags = KM_SLEEP flag.
3476  *
3477  * Returns:
3478  *	FC_SUCCESS - the packet has successfully aborted.
3479  *	FC_ABORTED - the packet has successfully aborted.
3480  *	FC_ABORTING - the packet is being aborted.
3481  *	FC_ABORT_FAILED - the packet could not be aborted.
3482  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3483  *		to abort the packet.
3484  *	FC_BADEXCHANGE - no packet found.
3485  *	FC_UNBOUND - the fca_handle specified is not bound.
3486  *
3487  * Context:
3488  *	Kernel context.
3489  */
3490 static int
3491 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3492 {
3493 	port_id_t		d_id;
3494 	ql_link_t		*link;
3495 	ql_adapter_state_t	*ha, *pha;
3496 	ql_srb_t		*sp;
3497 	ql_tgt_t		*tq;
3498 	ql_lun_t		*lq;
3499 	int			rval = FC_ABORTED;
3500 
3501 	ha = ql_fca_handle_to_state(fca_handle);
3502 	if (ha == NULL) {
3503 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3504 		    (void *)fca_handle);
3505 		return (FC_UNBOUND);
3506 	}
3507 
3508 	pha = ha->pha;
3509 
3510 	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3511 
3512 	ASSERT(pha->power_level == PM_LEVEL_D0);
3513 
3514 	/* Get target queue pointer. */
3515 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3516 	tq = ql_d_id_to_queue(ha, d_id);
3517 
3518 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3519 		if (tq == NULL) {
3520 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3521 			rval = FC_TRANSPORT_ERROR;
3522 		} else {
3523 			EL(ha, "failed, FC_OFFLINE\n");
3524 			rval = FC_OFFLINE;
3525 		}
3526 		return (rval);
3527 	}
3528 
3529 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3530 	lq = sp->lun_queue;
3531 
3532 	/* Set poll flag if sleep wanted. */
3533 	if (flags == KM_SLEEP) {
3534 		sp->flags |= SRB_POLL;
3535 	}
3536 
3537 	/* Acquire target queue lock. */
3538 	DEVICE_QUEUE_LOCK(tq);
3539 	REQUEST_RING_LOCK(ha);
3540 
3541 	/* If command not already started. */
3542 	if (!(sp->flags & SRB_ISP_STARTED)) {
3543 		/* Check pending queue for command. */
3544 		sp = NULL;
3545 		for (link = pha->pending_cmds.first; link != NULL;
3546 		    link = link->next) {
3547 			sp = link->base_address;
3548 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3549 				/* Remove srb from q. */
3550 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3551 				break;
3552 			} else {
3553 				sp = NULL;
3554 			}
3555 		}
3556 		REQUEST_RING_UNLOCK(ha);
3557 
3558 		if (sp == NULL) {
3559 			/* Check for cmd on device queue. */
3560 			for (link = lq->cmd.first; link != NULL;
3561 			    link = link->next) {
3562 				sp = link->base_address;
3563 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3564 					/* Remove srb from q. */
3565 					ql_remove_link(&lq->cmd, &sp->cmd);
3566 					break;
3567 				} else {
3568 					sp = NULL;
3569 				}
3570 			}
3571 		}
3572 		/* Release device lock */
3573 		DEVICE_QUEUE_UNLOCK(tq);
3574 
3575 		/* If command on target queue. */
3576 		if (sp != NULL) {
3577 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3578 
3579 			/* Set return status */
3580 			pkt->pkt_reason = CS_ABORTED;
3581 
3582 			sp->cmd.next = NULL;
3583 			ql_done(&sp->cmd);
3584 			rval = FC_ABORTED;
3585 		} else {
3586 			EL(ha, "failed, FC_BADEXCHANGE\n");
3587 			rval = FC_BADEXCHANGE;
3588 		}
3589 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3590 		/* Release device queue lock. */
3591 		REQUEST_RING_UNLOCK(ha);
3592 		DEVICE_QUEUE_UNLOCK(tq);
3593 		EL(ha, "failed, already done, FC_FAILURE\n");
3594 		rval = FC_FAILURE;
3595 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3596 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3597 		/*
3598 		 * If here, target data/resp ctio is with Fw.
3599 		 * Since firmware is supposed to terminate such I/Os
3600 		 * with an error, we need not do any thing. If FW
3601 		 * decides not to terminate those IOs and simply keep
3602 		 * quite then we need to initiate cleanup here by
3603 		 * calling ql_done.
3604 		 */
3605 		REQUEST_RING_UNLOCK(ha);
3606 		DEVICE_QUEUE_UNLOCK(tq);
3607 		rval = FC_ABORTED;
3608 	} else {
3609 		request_t	*ep = pha->request_ring_bp;
3610 		uint16_t	cnt;
3611 
3612 		if (sp->handle != 0) {
3613 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3614 				if (sp->handle == ddi_get32(
3615 				    pha->hba_buf.acc_handle, &ep->handle)) {
3616 					ep->entry_type = INVALID_ENTRY_TYPE;
3617 					break;
3618 				}
3619 				ep++;
3620 			}
3621 		}
3622 
3623 		/* Release device queue lock. */
3624 		REQUEST_RING_UNLOCK(ha);
3625 		DEVICE_QUEUE_UNLOCK(tq);
3626 
3627 		sp->flags |= SRB_ABORTING;
3628 		(void) ql_abort_command(ha, sp);
3629 		pkt->pkt_reason = CS_ABORTED;
3630 		rval = FC_ABORTED;
3631 	}
3632 
3633 	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3634 
3635 	return (rval);
3636 }
3637 
3638 /*
3639  * ql_reset
3640  *	Reset link or hardware.
3641  *
3642  * Input:
3643  *	fca_handle = handle setup by ql_bind_port().
3644  *	cmd = reset type command.
3645  *
3646  * Returns:
3647  *	FC_SUCCESS - reset has successfully finished.
3648  *	FC_UNBOUND - the fca_handle specified is not bound.
3649  *	FC_FAILURE - reset failed.
3650  *
3651  * Context:
3652  *	Kernel context.
3653  */
3654 static int
3655 ql_reset(opaque_t fca_handle, uint32_t cmd)
3656 {
3657 	ql_adapter_state_t	*ha;
3658 	int			rval = FC_SUCCESS, rval2;
3659 
3660 	ha = ql_fca_handle_to_state(fca_handle);
3661 	if (ha == NULL) {
3662 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3663 		    (void *)fca_handle);
3664 		return (FC_UNBOUND);
3665 	}
3666 
3667 	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3668 	    ha->vp_index, cmd);
3669 
3670 	ASSERT(ha->power_level == PM_LEVEL_D0);
3671 
3672 	switch (cmd) {
3673 	case FC_FCA_CORE:
3674 		/* dump firmware core if specified. */
3675 		if (ha->vp_index == 0) {
3676 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3677 				EL(ha, "failed, FC_FAILURE\n");
3678 				rval = FC_FAILURE;
3679 			}
3680 		}
3681 		break;
3682 	case FC_FCA_LINK_RESET:
3683 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3684 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3685 				EL(ha, "failed, FC_FAILURE-2\n");
3686 				rval = FC_FAILURE;
3687 			}
3688 		}
3689 		break;
3690 	case FC_FCA_RESET_CORE:
3691 	case FC_FCA_RESET:
3692 		/* if dump firmware core if specified. */
3693 		if (cmd == FC_FCA_RESET_CORE) {
3694 			if (ha->vp_index != 0) {
3695 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3696 				    ? QL_SUCCESS : ql_loop_reset(ha);
3697 			} else {
3698 				rval2 = ql_dump_firmware(ha);
3699 			}
3700 			if (rval2 != QL_SUCCESS) {
3701 				EL(ha, "failed, FC_FAILURE-3\n");
3702 				rval = FC_FAILURE;
3703 			}
3704 		}
3705 
3706 		/* Free up all unsolicited buffers. */
3707 		if (ha->ub_allocated != 0) {
3708 			/* Inform to release buffers. */
3709 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3710 			ha->state |= FC_STATE_RESET_REQUESTED;
3711 			if (ha->flags & FCA_BOUND) {
3712 				(ha->bind_info.port_statec_cb)
3713 				    (ha->bind_info.port_handle,
3714 				    ha->state);
3715 			}
3716 		}
3717 
3718 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3719 
3720 		/* All buffers freed */
3721 		if (ha->ub_allocated == 0) {
3722 			/* Hardware reset. */
3723 			if (cmd == FC_FCA_RESET) {
3724 				if (ha->vp_index == 0) {
3725 					(void) ql_abort_isp(ha);
3726 				} else if (!(ha->pha->task_daemon_flags &
3727 				    LOOP_DOWN)) {
3728 					(void) ql_loop_reset(ha);
3729 				}
3730 			}
3731 
3732 			/* Inform that the hardware has been reset */
3733 			ha->state |= FC_STATE_RESET;
3734 		} else {
3735 			/*
3736 			 * the port driver expects an online if
3737 			 * buffers are not freed.
3738 			 */
3739 			if (ha->topology & QL_LOOP_CONNECTION) {
3740 				ha->state |= FC_STATE_LOOP;
3741 			} else {
3742 				ha->state |= FC_STATE_ONLINE;
3743 			}
3744 		}
3745 
3746 		TASK_DAEMON_LOCK(ha);
3747 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3748 		TASK_DAEMON_UNLOCK(ha);
3749 
3750 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3751 
3752 		break;
3753 	default:
3754 		EL(ha, "unknown cmd=%xh\n", cmd);
3755 		break;
3756 	}
3757 
3758 	if (rval != FC_SUCCESS) {
3759 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3760 	} else {
3761 		/*EMPTY*/
3762 		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3763 		    ha->vp_index);
3764 	}
3765 
3766 	return (rval);
3767 }
3768 
3769 /*
3770  * ql_port_manage
3771  *	Perform port management or diagnostics.
3772  *
3773  * Input:
3774  *	fca_handle = handle setup by ql_bind_port().
3775  *	cmd = pointer to command structure.
3776  *
3777  * Returns:
3778  *	FC_SUCCESS - the request completed successfully.
3779  *	FC_FAILURE - the request did not complete successfully.
3780  *	FC_UNBOUND - the fca_handle specified is not bound.
3781  *
3782  * Context:
3783  *	Kernel context.
3784  */
3785 static int
3786 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3787 {
3788 	clock_t			timer;
3789 	uint16_t		index;
3790 	uint32_t		*bp;
3791 	port_id_t		d_id;
3792 	ql_link_t		*link;
3793 	ql_adapter_state_t	*ha, *pha;
3794 	ql_tgt_t		*tq;
3795 	dma_mem_t		buffer_xmt, buffer_rcv;
3796 	size_t			length;
3797 	uint32_t		cnt;
3798 	char			buf[80];
3799 	lbp_t			*lb;
3800 	ql_mbx_data_t		mr;
3801 	app_mbx_cmd_t		*mcp;
3802 	int			i0;
3803 	uint8_t			*bptr;
3804 	int			rval2, rval = FC_SUCCESS;
3805 	uint32_t		opcode;
3806 
3807 	ha = ql_fca_handle_to_state(fca_handle);
3808 	if (ha == NULL) {
3809 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3810 		    (void *)fca_handle);
3811 		return (FC_UNBOUND);
3812 	}
3813 	pha = ha->pha;
3814 
3815 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3816 	    cmd->pm_cmd_code);
3817 
3818 	ASSERT(pha->power_level == PM_LEVEL_D0);
3819 
3820 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3821 
3822 	/*
3823 	 * Wait for all outstanding commands to complete
3824 	 */
3825 	index = (uint16_t)ql_wait_outstanding(ha);
3826 
3827 	if (index != MAX_OUTSTANDING_COMMANDS) {
3828 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3829 		ql_restart_queues(ha);
3830 		EL(ha, "failed, FC_TRAN_BUSY\n");
3831 		return (FC_TRAN_BUSY);
3832 	}
3833 
3834 	switch (cmd->pm_cmd_code) {
3835 	case FC_PORT_BYPASS:
3836 		d_id.b24 = *cmd->pm_cmd_buf;
3837 		tq = ql_d_id_to_queue(ha, d_id);
3838 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3839 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3840 			rval = FC_FAILURE;
3841 		}
3842 		break;
3843 	case FC_PORT_UNBYPASS:
3844 		d_id.b24 = *cmd->pm_cmd_buf;
3845 		tq = ql_d_id_to_queue(ha, d_id);
3846 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3847 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3848 			rval = FC_FAILURE;
3849 		}
3850 		break;
3851 	case FC_PORT_GET_FW_REV:
3852 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3853 		    pha->fw_minor_version, pha->fw_subminor_version);
3854 		length = strlen(buf) + 1;
3855 		if (cmd->pm_data_len < length) {
3856 			cmd->pm_data_len = length;
3857 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3858 			rval = FC_FAILURE;
3859 		} else {
3860 			(void) strcpy(cmd->pm_data_buf, buf);
3861 		}
3862 		break;
3863 
3864 	case FC_PORT_GET_FCODE_REV: {
3865 		caddr_t		fcode_ver_buf = NULL;
3866 
3867 		i0 = 0;
3868 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3869 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3870 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3871 		    (caddr_t)&fcode_ver_buf, &i0);
3872 		length = (uint_t)i0;
3873 
3874 		if (rval2 != DDI_PROP_SUCCESS) {
3875 			EL(ha, "failed, getting version = %xh\n", rval2);
3876 			length = 20;
3877 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3878 			if (fcode_ver_buf != NULL) {
3879 				(void) sprintf(fcode_ver_buf,
3880 				    "NO FCODE FOUND");
3881 			}
3882 		}
3883 
3884 		if (cmd->pm_data_len < length) {
3885 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3886 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3887 			cmd->pm_data_len = length;
3888 			rval = FC_FAILURE;
3889 		} else if (fcode_ver_buf != NULL) {
3890 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3891 			    length);
3892 		}
3893 
3894 		if (fcode_ver_buf != NULL) {
3895 			kmem_free(fcode_ver_buf, length);
3896 		}
3897 		break;
3898 	}
3899 
3900 	case FC_PORT_GET_DUMP:
3901 		QL_DUMP_LOCK(pha);
3902 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3903 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3904 			    "length=%lxh\n", cmd->pm_data_len);
3905 			cmd->pm_data_len = pha->risc_dump_size;
3906 			rval = FC_FAILURE;
3907 		} else if (pha->ql_dump_state & QL_DUMPING) {
3908 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3909 			rval = FC_TRAN_BUSY;
3910 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
3911 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3912 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
3913 		} else {
3914 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3915 			rval = FC_FAILURE;
3916 		}
3917 		QL_DUMP_UNLOCK(pha);
3918 		break;
3919 	case FC_PORT_FORCE_DUMP:
3920 		PORTMANAGE_LOCK(ha);
3921 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3922 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3923 			rval = FC_FAILURE;
3924 		}
3925 		PORTMANAGE_UNLOCK(ha);
3926 		break;
3927 	case FC_PORT_DOWNLOAD_FW:
3928 		PORTMANAGE_LOCK(ha);
3929 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3930 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3931 			    (uint32_t)cmd->pm_data_len,
3932 			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
3933 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3934 				rval = FC_FAILURE;
3935 			}
3936 			ql_reset_chip(ha);
3937 			(void) ql_abort_isp(ha);
3938 		} else {
3939 			/* Save copy of the firmware. */
3940 			if (pha->risc_code != NULL) {
3941 				kmem_free(pha->risc_code, pha->risc_code_size);
3942 				pha->risc_code = NULL;
3943 				pha->risc_code_size = 0;
3944 			}
3945 
3946 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3947 			    KM_SLEEP);
3948 			if (pha->risc_code != NULL) {
3949 				pha->risc_code_size =
3950 				    (uint32_t)cmd->pm_data_len;
3951 				bcopy(cmd->pm_data_buf, pha->risc_code,
3952 				    cmd->pm_data_len);
3953 
3954 				/* Do abort to force reload. */
3955 				ql_reset_chip(ha);
3956 				if (ql_abort_isp(ha) != QL_SUCCESS) {
3957 					kmem_free(pha->risc_code,
3958 					    pha->risc_code_size);
3959 					pha->risc_code = NULL;
3960 					pha->risc_code_size = 0;
3961 					ql_reset_chip(ha);
3962 					(void) ql_abort_isp(ha);
3963 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3964 					    " FC_FAILURE\n");
3965 					rval = FC_FAILURE;
3966 				}
3967 			}
3968 		}
3969 		PORTMANAGE_UNLOCK(ha);
3970 		break;
3971 	case FC_PORT_GET_DUMP_SIZE:
3972 		bp = (uint32_t *)cmd->pm_data_buf;
3973 		*bp = pha->risc_dump_size;
3974 		break;
3975 	case FC_PORT_DIAG:
3976 		/*
3977 		 * Prevents concurrent diags
3978 		 */
3979 		PORTMANAGE_LOCK(ha);
3980 
3981 		/* Wait for suspension to end. */
3982 		for (timer = 0; timer < 3000 &&
3983 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
3984 			ql_delay(ha, 10000);
3985 		}
3986 
3987 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
3988 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
3989 			rval = FC_TRAN_BUSY;
3990 			PORTMANAGE_UNLOCK(ha);
3991 			break;
3992 		}
3993 
3994 		switch (cmd->pm_cmd_flags) {
3995 		case QL_DIAG_EXEFMW:
3996 			if (ql_start_firmware(ha) != QL_SUCCESS) {
3997 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
3998 				rval = FC_FAILURE;
3999 			}
4000 			break;
4001 		case QL_DIAG_CHKCMDQUE:
4002 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4003 			    i0++) {
4004 				cnt += (pha->outstanding_cmds[i0] != NULL);
4005 			}
4006 			if (cnt != 0) {
4007 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4008 				    "FC_FAILURE\n");
4009 				rval = FC_FAILURE;
4010 			}
4011 			break;
4012 		case QL_DIAG_FMWCHKSUM:
4013 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4014 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4015 				    "FC_FAILURE\n");
4016 				rval = FC_FAILURE;
4017 			}
4018 			break;
4019 		case QL_DIAG_SLFTST:
4020 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4021 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4022 				rval = FC_FAILURE;
4023 			}
4024 			ql_reset_chip(ha);
4025 			(void) ql_abort_isp(ha);
4026 			break;
4027 		case QL_DIAG_REVLVL:
4028 			if (cmd->pm_stat_len <
4029 			    sizeof (ql_adapter_revlvl_t)) {
4030 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4031 				    "slen=%lxh, rlvllen=%lxh\n",
4032 				    cmd->pm_stat_len,
4033 				    sizeof (ql_adapter_revlvl_t));
4034 				rval = FC_NOMEM;
4035 			} else {
4036 				bcopy((void *)&(pha->adapter_stats->revlvl),
4037 				    cmd->pm_stat_buf,
4038 				    (size_t)cmd->pm_stat_len);
4039 				cmd->pm_stat_len =
4040 				    sizeof (ql_adapter_revlvl_t);
4041 			}
4042 			break;
4043 		case QL_DIAG_LPBMBX:
4044 
4045 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4046 				EL(ha, "failed, QL_DIAG_LPBMBX "
4047 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4048 				    "reqd=%lxh\n", cmd->pm_data_len,
4049 				    sizeof (struct app_mbx_cmd));
4050 				rval = FC_INVALID_REQUEST;
4051 				break;
4052 			}
4053 			/*
4054 			 * Don't do the wrap test on a 2200 when the
4055 			 * firmware is running.
4056 			 */
4057 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4058 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4059 				mr.mb[1] = mcp->mb[1];
4060 				mr.mb[2] = mcp->mb[2];
4061 				mr.mb[3] = mcp->mb[3];
4062 				mr.mb[4] = mcp->mb[4];
4063 				mr.mb[5] = mcp->mb[5];
4064 				mr.mb[6] = mcp->mb[6];
4065 				mr.mb[7] = mcp->mb[7];
4066 
4067 				bcopy(&mr.mb[0], &mr.mb[10],
4068 				    sizeof (uint16_t) * 8);
4069 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4070 					EL(ha, "failed, QL_DIAG_LPBMBX "
4071 					    "FC_FAILURE\n");
4072 					rval = FC_FAILURE;
4073 					break;
4074 				}
4075 				if (mr.mb[i0] != mr.mb[i0 + 10]) {
4076 					EL(ha, "failed, QL_DIAG_LPBMBX "
4077 					    "FC_FAILURE-2\n");
4078 
4079 					(void) ql_flash_errlog(ha,
4080 					    FLASH_ERRLOG_ISP_ERR, 0,
4081 					    RD16_IO_REG(ha, hccr),
4082 					    RD16_IO_REG(ha, istatus));
4083 
4084 					rval = FC_FAILURE;
4085 					break;
4086 				}
4087 			}
4088 			(void) ql_abort_isp(ha);
4089 			break;
4090 		case QL_DIAG_LPBDTA:
4091 			/*
4092 			 * For loopback data, we receive the
4093 			 * data back in pm_stat_buf. This provides
4094 			 * the user an opportunity to compare the
4095 			 * transmitted and received data.
4096 			 *
4097 			 * NB: lb->options are:
4098 			 *	0 --> Ten bit loopback
4099 			 *	1 --> One bit loopback
4100 			 *	2 --> External loopback
4101 			 */
4102 			if (cmd->pm_data_len > 65536) {
4103 				rval = FC_TOOMANY;
4104 				EL(ha, "failed, QL_DIAG_LPBDTA "
4105 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4106 				break;
4107 			}
4108 			if (ql_get_dma_mem(ha, &buffer_xmt,
4109 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4110 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4111 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4112 				rval = FC_NOMEM;
4113 				break;
4114 			}
4115 			if (ql_get_dma_mem(ha, &buffer_rcv,
4116 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4117 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4118 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4119 				rval = FC_NOMEM;
4120 				break;
4121 			}
4122 			ddi_rep_put8(buffer_xmt.acc_handle,
4123 			    (uint8_t *)cmd->pm_data_buf,
4124 			    (uint8_t *)buffer_xmt.bp,
4125 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4126 
4127 			/* 22xx's adapter must be in loop mode for test. */
4128 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4129 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4130 				if (ha->flags & POINT_TO_POINT ||
4131 				    (ha->task_daemon_flags & LOOP_DOWN &&
4132 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4133 					cnt = *bptr;
4134 					*bptr = (uint8_t)
4135 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4136 					(void) ql_abort_isp(ha);
4137 					*bptr = (uint8_t)cnt;
4138 				}
4139 			}
4140 
4141 			/* Shutdown IP. */
4142 			if (pha->flags & IP_INITIALIZED) {
4143 				(void) ql_shutdown_ip(pha);
4144 			}
4145 
4146 			lb = (lbp_t *)cmd->pm_cmd_buf;
4147 			lb->transfer_count =
4148 			    (uint32_t)cmd->pm_data_len;
4149 			lb->transfer_segment_count = 0;
4150 			lb->receive_segment_count = 0;
4151 			lb->transfer_data_address =
4152 			    buffer_xmt.cookie.dmac_address;
4153 			lb->receive_data_address =
4154 			    buffer_rcv.cookie.dmac_address;
4155 
4156 			if ((lb->options & 7) == 2 &&
4157 			    pha->task_daemon_flags &
4158 			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4159 				/* Loop must be up for external */
4160 				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4161 				rval = FC_TRAN_BUSY;
4162 			} else if (ql_loop_back(ha, lb,
4163 			    buffer_xmt.cookie.dmac_notused,
4164 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4165 				bzero((void *)cmd->pm_stat_buf,
4166 				    cmd->pm_stat_len);
4167 				ddi_rep_get8(buffer_rcv.acc_handle,
4168 				    (uint8_t *)cmd->pm_stat_buf,
4169 				    (uint8_t *)buffer_rcv.bp,
4170 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4171 			} else {
4172 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4173 				rval = FC_FAILURE;
4174 			}
4175 
4176 			ql_free_phys(ha, &buffer_xmt);
4177 			ql_free_phys(ha, &buffer_rcv);
4178 
4179 			/* Needed to recover the f/w */
4180 			(void) ql_abort_isp(ha);
4181 
4182 			/* Restart IP if it was shutdown. */
4183 			if (pha->flags & IP_ENABLED &&
4184 			    !(pha->flags & IP_INITIALIZED)) {
4185 				(void) ql_initialize_ip(pha);
4186 				ql_isp_rcvbuf(pha);
4187 			}
4188 
4189 			break;
4190 		case QL_DIAG_ECHO: {
4191 			/*
4192 			 * issue an echo command with a user supplied
4193 			 * data pattern and destination address
4194 			 */
4195 			echo_t		echo;		/* temp echo struct */
4196 
4197 			/* Setup echo cmd & adjust for platform */
4198 			opcode = QL_ECHO_CMD;
4199 			BIG_ENDIAN_32(&opcode);
4200 
4201 			/*
4202 			 * due to limitations in the ql
4203 			 * firmaware the echo data field is
4204 			 * limited to 220
4205 			 */
4206 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4207 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4208 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4209 				    "cmdl1=%lxh, statl2=%lxh\n",
4210 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4211 				rval = FC_TOOMANY;
4212 				break;
4213 			}
4214 
4215 			/*
4216 			 * the input data buffer has the user
4217 			 * supplied data pattern.  The "echoed"
4218 			 * data will be DMAed into the output
4219 			 * data buffer.  Therefore the length
4220 			 * of the output buffer must be equal
4221 			 * to or greater then the input buffer
4222 			 * length
4223 			 */
4224 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4225 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4226 				    " cmdl1=%lxh, statl2=%lxh\n",
4227 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4228 				rval = FC_TOOMANY;
4229 				break;
4230 			}
4231 			/* add four bytes for the opcode */
4232 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4233 
4234 			/*
4235 			 * are we 32 or 64 bit addressed???
4236 			 * We need to get the appropriate
4237 			 * DMA and set the command options;
4238 			 * 64 bit (bit 6) or 32 bit
4239 			 * (no bit 6) addressing.
4240 			 * while we are at it lets ask for
4241 			 * real echo (bit 15)
4242 			 */
4243 			echo.options = BIT_15;
4244 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4245 				echo.options = (uint16_t)
4246 				    (echo.options | BIT_6);
4247 			}
4248 
4249 			/*
4250 			 * Set up the DMA mappings for the
4251 			 * output and input data buffers.
4252 			 * First the output buffer
4253 			 */
4254 			if (ql_get_dma_mem(ha, &buffer_xmt,
4255 			    (uint32_t)(cmd->pm_data_len + 4),
4256 			    LITTLE_ENDIAN_DMA,
4257 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4258 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4259 				rval = FC_NOMEM;
4260 				break;
4261 			}
4262 			echo.transfer_data_address = buffer_xmt.cookie;
4263 
4264 			/* Next the input buffer */
4265 			if (ql_get_dma_mem(ha, &buffer_rcv,
4266 			    (uint32_t)(cmd->pm_data_len + 4),
4267 			    LITTLE_ENDIAN_DMA,
4268 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4269 				/*
4270 				 * since we could not allocate
4271 				 * DMA space for the input
4272 				 * buffer we need to clean up
4273 				 * by freeing the DMA space
4274 				 * we allocated for the output
4275 				 * buffer
4276 				 */
4277 				ql_free_phys(ha, &buffer_xmt);
4278 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4279 				rval = FC_NOMEM;
4280 				break;
4281 			}
4282 			echo.receive_data_address = buffer_rcv.cookie;
4283 
4284 			/*
4285 			 * copy the 4 byte ECHO op code to the
4286 			 * allocated DMA space
4287 			 */
4288 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4289 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4290 
4291 			/*
4292 			 * copy the user supplied data to the
4293 			 * allocated DMA space
4294 			 */
4295 			ddi_rep_put8(buffer_xmt.acc_handle,
4296 			    (uint8_t *)cmd->pm_cmd_buf,
4297 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4298 			    DDI_DEV_AUTOINCR);
4299 
4300 			/* Shutdown IP. */
4301 			if (pha->flags & IP_INITIALIZED) {
4302 				(void) ql_shutdown_ip(pha);
4303 			}
4304 
4305 			/* send the echo */
4306 			if (ql_echo(ha, &echo) == QL_SUCCESS) {
4307 				ddi_rep_put8(buffer_rcv.acc_handle,
4308 				    (uint8_t *)buffer_rcv.bp + 4,
4309 				    (uint8_t *)cmd->pm_stat_buf,
4310 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4311 			} else {
4312 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4313 				rval = FC_FAILURE;
4314 			}
4315 
4316 			/* Restart IP if it was shutdown. */
4317 			if (pha->flags & IP_ENABLED &&
4318 			    !(pha->flags & IP_INITIALIZED)) {
4319 				(void) ql_initialize_ip(pha);
4320 				ql_isp_rcvbuf(pha);
4321 			}
4322 			/* free up our DMA buffers */
4323 			ql_free_phys(ha, &buffer_xmt);
4324 			ql_free_phys(ha, &buffer_rcv);
4325 			break;
4326 		}
4327 		default:
4328 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4329 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4330 			rval = FC_INVALID_REQUEST;
4331 			break;
4332 		}
4333 		PORTMANAGE_UNLOCK(ha);
4334 		break;
4335 	case FC_PORT_LINK_STATE:
4336 		/* Check for name equal to null. */
4337 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4338 		    index++) {
4339 			if (cmd->pm_cmd_buf[index] != 0) {
4340 				break;
4341 			}
4342 		}
4343 
4344 		/* If name not null. */
4345 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4346 			/* Locate device queue. */
4347 			tq = NULL;
4348 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4349 			    tq == NULL; index++) {
4350 				for (link = ha->dev[index].first; link != NULL;
4351 				    link = link->next) {
4352 					tq = link->base_address;
4353 
4354 					if (bcmp((void *)&tq->port_name[0],
4355 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4356 						break;
4357 					} else {
4358 						tq = NULL;
4359 					}
4360 				}
4361 			}
4362 
4363 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4364 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4365 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4366 			} else {
4367 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4368 				    FC_STATE_OFFLINE;
4369 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4370 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4371 			}
4372 		} else {
4373 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4374 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4375 		}
4376 		break;
4377 	case FC_PORT_INITIALIZE:
4378 		if (cmd->pm_cmd_len >= 8) {
4379 			tq = NULL;
4380 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4381 			    tq == NULL; index++) {
4382 				for (link = ha->dev[index].first; link != NULL;
4383 				    link = link->next) {
4384 					tq = link->base_address;
4385 
4386 					if (bcmp((void *)&tq->port_name[0],
4387 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4388 						if (!VALID_DEVICE_ID(ha,
4389 						    tq->loop_id)) {
4390 							tq = NULL;
4391 						}
4392 						break;
4393 					} else {
4394 						tq = NULL;
4395 					}
4396 				}
4397 			}
4398 
4399 			if (tq == NULL || ql_target_reset(ha, tq,
4400 			    ha->loop_reset_delay) != QL_SUCCESS) {
4401 				EL(ha, "failed, FC_PORT_INITIALIZE "
4402 				    "FC_FAILURE\n");
4403 				rval = FC_FAILURE;
4404 			}
4405 		} else {
4406 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4407 			    "clen=%lxh\n", cmd->pm_cmd_len);
4408 
4409 			rval = FC_FAILURE;
4410 		}
4411 		break;
4412 	case FC_PORT_RLS:
4413 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4414 			EL(ha, "failed, buffer size passed: %lxh, "
4415 			    "req: %lxh\n", cmd->pm_data_len,
4416 			    (sizeof (fc_rls_acc_t)));
4417 			rval = FC_FAILURE;
4418 		} else if (LOOP_NOT_READY(pha)) {
4419 			EL(ha, "loop NOT ready\n");
4420 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4421 		} else if (ql_get_link_status(ha, ha->loop_id,
4422 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4423 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4424 			rval = FC_FAILURE;
4425 #ifdef _BIG_ENDIAN
4426 		} else {
4427 			fc_rls_acc_t		*rls;
4428 
4429 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4430 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4431 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4432 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4433 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4434 #endif /* _BIG_ENDIAN */
4435 		}
4436 		break;
4437 	case FC_PORT_GET_NODE_ID:
4438 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4439 		    cmd->pm_data_buf) != QL_SUCCESS) {
4440 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4441 			rval = FC_FAILURE;
4442 		}
4443 		break;
4444 	case FC_PORT_SET_NODE_ID:
4445 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4446 		    cmd->pm_data_buf) != QL_SUCCESS) {
4447 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4448 			rval = FC_FAILURE;
4449 		}
4450 		break;
4451 	case FC_PORT_DOWNLOAD_FCODE:
4452 		PORTMANAGE_LOCK(ha);
4453 		if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
4454 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4455 			    (uint32_t)cmd->pm_data_len);
4456 		} else {
4457 			if (cmd->pm_data_buf[0] == 4 &&
4458 			    cmd->pm_data_buf[8] == 0 &&
4459 			    cmd->pm_data_buf[9] == 0x10 &&
4460 			    cmd->pm_data_buf[10] == 0 &&
4461 			    cmd->pm_data_buf[11] == 0) {
4462 				rval = ql_24xx_load_flash(ha,
4463 				    (uint8_t *)cmd->pm_data_buf,
4464 				    (uint32_t)cmd->pm_data_len,
4465 				    ha->flash_fw_addr << 2);
4466 			} else {
4467 				rval = ql_24xx_load_flash(ha,
4468 				    (uint8_t *)cmd->pm_data_buf,
4469 				    (uint32_t)cmd->pm_data_len, 0);
4470 			}
4471 		}
4472 
4473 		if (rval != QL_SUCCESS) {
4474 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4475 			rval = FC_FAILURE;
4476 		} else {
4477 			rval = FC_SUCCESS;
4478 		}
4479 		ql_reset_chip(ha);
4480 		(void) ql_abort_isp(ha);
4481 		PORTMANAGE_UNLOCK(ha);
4482 		break;
4483 	default:
4484 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4485 		rval = FC_BADCMD;
4486 		break;
4487 	}
4488 
4489 	/* Wait for suspension to end. */
4490 	ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4491 	timer = 0;
4492 
4493 	while (timer++ < 3000 &&
4494 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4495 		ql_delay(ha, 10000);
4496 	}
4497 
4498 	ql_restart_queues(ha);
4499 
4500 	if (rval != FC_SUCCESS) {
4501 		EL(ha, "failed, rval = %xh\n", rval);
4502 	} else {
4503 		/*EMPTY*/
4504 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4505 	}
4506 
4507 	return (rval);
4508 }
4509 
4510 static opaque_t
4511 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4512 {
4513 	port_id_t		id;
4514 	ql_adapter_state_t	*ha;
4515 	ql_tgt_t		*tq;
4516 
4517 	id.r.rsvd_1 = 0;
4518 	id.b24 = d_id.port_id;
4519 
4520 	ha = ql_fca_handle_to_state(fca_handle);
4521 	if (ha == NULL) {
4522 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4523 		    (void *)fca_handle);
4524 		return (NULL);
4525 	}
4526 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4527 
4528 	tq = ql_d_id_to_queue(ha, id);
4529 
4530 	if (tq == NULL) {
4531 		EL(ha, "failed, tq=NULL\n");
4532 	} else {
4533 		/*EMPTY*/
4534 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4535 	}
4536 	return (tq);
4537 }
4538 
4539 /* ************************************************************************ */
4540 /*			FCA Driver Local Support Functions.		    */
4541 /* ************************************************************************ */
4542 
4543 /*
4544  * ql_cmd_setup
4545  *	Verifies proper command.
4546  *
4547  * Input:
4548  *	fca_handle = handle setup by ql_bind_port().
4549  *	pkt = pointer to fc_packet.
4550  *	rval = pointer for return value.
4551  *
4552  * Returns:
4553  *	Adapter state pointer, NULL = failure.
4554  *
4555  * Context:
4556  *	Kernel context.
4557  */
4558 static ql_adapter_state_t *
4559 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4560 {
4561 	ql_adapter_state_t	*ha, *pha;
4562 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4563 	ql_tgt_t		*tq;
4564 	port_id_t		d_id;
4565 
4566 	pkt->pkt_resp_resid = 0;
4567 	pkt->pkt_data_resid = 0;
4568 
4569 	/* check that the handle is assigned by this FCA */
4570 	ha = ql_fca_handle_to_state(fca_handle);
4571 	if (ha == NULL) {
4572 		*rval = FC_UNBOUND;
4573 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4574 		    (void *)fca_handle);
4575 		return (NULL);
4576 	}
4577 	pha = ha->pha;
4578 
4579 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4580 
4581 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4582 		return (ha);
4583 	}
4584 
4585 	if (!(pha->flags & ONLINE)) {
4586 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4587 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4588 		*rval = FC_TRANSPORT_ERROR;
4589 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4590 		return (NULL);
4591 	}
4592 
4593 	/* Exit on loop down. */
4594 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4595 	    pha->task_daemon_flags & LOOP_DOWN &&
4596 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4597 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4598 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4599 		*rval = FC_OFFLINE;
4600 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4601 		return (NULL);
4602 	}
4603 
4604 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4605 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4606 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4607 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4608 			d_id.r.rsvd_1 = 0;
4609 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4610 			tq = ql_d_id_to_queue(ha, d_id);
4611 
4612 			pkt->pkt_fca_device = (opaque_t)tq;
4613 		}
4614 
4615 		if (tq != NULL) {
4616 			DEVICE_QUEUE_LOCK(tq);
4617 			if (tq->flags & (TQF_RSCN_RCVD |
4618 			    TQF_NEED_AUTHENTICATION)) {
4619 				*rval = FC_DEVICE_BUSY;
4620 				DEVICE_QUEUE_UNLOCK(tq);
4621 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4622 				    tq->flags, tq->d_id.b24);
4623 				return (NULL);
4624 			}
4625 			DEVICE_QUEUE_UNLOCK(tq);
4626 		}
4627 	}
4628 
4629 	/*
4630 	 * Check DMA pointers.
4631 	 */
4632 	*rval = DDI_SUCCESS;
4633 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4634 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4635 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4636 		if (*rval == DDI_SUCCESS) {
4637 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4638 		}
4639 	}
4640 
4641 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4642 	    pkt->pkt_rsplen != 0) {
4643 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4644 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4645 		if (*rval == DDI_SUCCESS) {
4646 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4647 		}
4648 	}
4649 
4650 	/*
4651 	 * Minimum branch conditional; Change it with care.
4652 	 */
4653 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4654 	    (pkt->pkt_datalen != 0)) != 0) {
4655 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4656 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4657 		if (*rval == DDI_SUCCESS) {
4658 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4659 		}
4660 	}
4661 
4662 	if (*rval != DDI_SUCCESS) {
4663 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4664 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4665 
4666 		/* Do command callback. */
4667 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4668 			ql_awaken_task_daemon(ha, sp, 0, 0);
4669 		}
4670 		*rval = FC_BADPACKET;
4671 		EL(ha, "failed, bad DMA pointers\n");
4672 		return (NULL);
4673 	}
4674 
4675 	if (sp->magic_number != QL_FCA_BRAND) {
4676 		*rval = FC_BADPACKET;
4677 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4678 		return (NULL);
4679 	}
4680 	*rval = FC_SUCCESS;
4681 
4682 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4683 
4684 	return (ha);
4685 }
4686 
4687 /*
4688  * ql_els_plogi
4689  *	Issue a extended link service port login request.
4690  *
4691  * Input:
4692  *	ha = adapter state pointer.
4693  *	pkt = pointer to fc_packet.
4694  *
4695  * Returns:
4696  *	FC_SUCCESS - the packet was accepted for transport.
4697  *	FC_TRANSPORT_ERROR - a transport error occurred.
4698  *
4699  * Context:
4700  *	Kernel context.
4701  */
4702 static int
4703 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4704 {
4705 	ql_tgt_t		*tq = NULL;
4706 	port_id_t		d_id;
4707 	la_els_logi_t		acc;
4708 	class_svc_param_t	*class3_param;
4709 	int			ret;
4710 	int			rval = FC_SUCCESS;
4711 
4712 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4713 	    pkt->pkt_cmd_fhdr.d_id);
4714 
4715 	TASK_DAEMON_LOCK(ha);
4716 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4717 		TASK_DAEMON_UNLOCK(ha);
4718 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4719 		return (FC_OFFLINE);
4720 	}
4721 	TASK_DAEMON_UNLOCK(ha);
4722 
4723 	bzero(&acc, sizeof (acc));
4724 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4725 
4726 	ret = QL_SUCCESS;
4727 
4728 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4729 		/*
4730 		 * In p2p topology he sends a PLOGI after determining
4731 		 * he has the N_Port login initiative.
4732 		 */
4733 		ret = ql_p2p_plogi(ha, pkt);
4734 	}
4735 	if (ret == QL_CONSUMED) {
4736 		return (ret);
4737 	}
4738 
4739 	switch (ret = ql_login_port(ha, d_id)) {
4740 	case QL_SUCCESS:
4741 		tq = ql_d_id_to_queue(ha, d_id);
4742 		break;
4743 
4744 	case QL_LOOP_ID_USED:
4745 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4746 			tq = ql_d_id_to_queue(ha, d_id);
4747 		}
4748 		break;
4749 
4750 	default:
4751 		break;
4752 	}
4753 
4754 	if (ret != QL_SUCCESS) {
4755 		/*
4756 		 * Invalidate this entry so as to seek a fresh loop ID
4757 		 * in case firmware reassigns it to something else
4758 		 */
4759 		tq = ql_d_id_to_queue(ha, d_id);
4760 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4761 			tq->loop_id = PORT_NO_LOOP_ID;
4762 		}
4763 	} else if (tq) {
4764 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4765 	}
4766 
4767 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4768 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4769 
4770 		/* Build ACC. */
4771 		acc.ls_code.ls_code = LA_ELS_ACC;
4772 		acc.common_service.fcph_version = 0x2006;
4773 		acc.common_service.cmn_features = 0x8800;
4774 		CFG_IST(ha, CFG_CTRL_242581) ?
4775 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4776 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4777 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4778 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4779 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4780 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4781 		acc.common_service.conc_sequences = 0xff;
4782 		acc.common_service.relative_offset = 0x03;
4783 		acc.common_service.e_d_tov = 0x7d0;
4784 
4785 		bcopy((void *)&tq->port_name[0],
4786 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4787 		bcopy((void *)&tq->node_name[0],
4788 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4789 
4790 		class3_param = (class_svc_param_t *)&acc.class_3;
4791 		class3_param->class_valid_svc_opt = 0x8000;
4792 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4793 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4794 		class3_param->conc_sequences = tq->class3_conc_sequences;
4795 		class3_param->open_sequences_per_exch =
4796 		    tq->class3_open_sequences_per_exch;
4797 
4798 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4799 			acc.ls_code.ls_code = LA_ELS_RJT;
4800 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4801 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4802 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4803 			rval = FC_TRAN_BUSY;
4804 		} else {
4805 			DEVICE_QUEUE_LOCK(tq);
4806 			tq->logout_sent = 0;
4807 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4808 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4809 				tq->flags |= TQF_IIDMA_NEEDED;
4810 			}
4811 			DEVICE_QUEUE_UNLOCK(tq);
4812 
4813 			if (CFG_IST(ha, CFG_CTRL_242581)) {
4814 				TASK_DAEMON_LOCK(ha);
4815 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4816 				TASK_DAEMON_UNLOCK(ha);
4817 			}
4818 
4819 			pkt->pkt_state = FC_PKT_SUCCESS;
4820 		}
4821 	} else {
4822 		/* Build RJT. */
4823 		acc.ls_code.ls_code = LA_ELS_RJT;
4824 
4825 		switch (ret) {
4826 		case QL_FUNCTION_TIMEOUT:
4827 			pkt->pkt_state = FC_PKT_TIMEOUT;
4828 			pkt->pkt_reason = FC_REASON_HW_ERROR;
4829 			break;
4830 
4831 		case QL_MEMORY_ALLOC_FAILED:
4832 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4833 			pkt->pkt_reason = FC_REASON_NOMEM;
4834 			rval = FC_TRAN_BUSY;
4835 			break;
4836 
4837 		case QL_FABRIC_NOT_INITIALIZED:
4838 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4839 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4840 			rval = FC_TRAN_BUSY;
4841 			break;
4842 
4843 		default:
4844 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4845 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4846 			break;
4847 		}
4848 
4849 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4850 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4851 		    pkt->pkt_reason, ret, rval);
4852 	}
4853 
4854 	if (tq != NULL) {
4855 		DEVICE_QUEUE_LOCK(tq);
4856 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4857 		if (rval == FC_TRAN_BUSY) {
4858 			if (tq->d_id.b24 != BROADCAST_ADDR) {
4859 				tq->flags |= TQF_NEED_AUTHENTICATION;
4860 			}
4861 		}
4862 		DEVICE_QUEUE_UNLOCK(tq);
4863 	}
4864 
4865 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4866 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4867 
4868 	if (rval != FC_SUCCESS) {
4869 		EL(ha, "failed, rval = %xh\n", rval);
4870 	} else {
4871 		/*EMPTY*/
4872 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4873 	}
4874 	return (rval);
4875 }
4876 
4877 /*
4878  * ql_p2p_plogi
4879  *	Start an extended link service port login request using
4880  *	an ELS Passthru iocb.
4881  *
4882  * Input:
4883  *	ha = adapter state pointer.
4884  *	pkt = pointer to fc_packet.
4885  *
4886  * Returns:
4887  *	QL_CONSUMMED - the iocb was queued for transport.
4888  *
4889  * Context:
4890  *	Kernel context.
4891  */
4892 static int
4893 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4894 {
4895 	uint16_t	id;
4896 	ql_tgt_t	tmp;
4897 	ql_tgt_t	*tq = &tmp;
4898 	int		rval;
4899 
4900 	tq->d_id.b.al_pa = 0;
4901 	tq->d_id.b.area = 0;
4902 	tq->d_id.b.domain = 0;
4903 
4904 	/*
4905 	 * Verify that the port database hasn't moved beneath our feet by
4906 	 * switching to the appropriate n_port_handle if necessary.  This is
4907 	 * less unplesant than the error recovery if the wrong one is used.
4908 	 */
4909 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
4910 		tq->loop_id = id;
4911 		rval = ql_get_port_database(ha, tq, PDF_NONE);
4912 		EL(ha, "rval=%xh\n", rval);
4913 		/* check all the ones not logged in for possible use */
4914 		if (rval == QL_NOT_LOGGED_IN) {
4915 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
4916 				ha->n_port->n_port_handle = tq->loop_id;
4917 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4918 				    tq->loop_id, tq->master_state);
4919 				break;
4920 			}
4921 			/*
4922 			 * Use a 'port unavailable' entry only
4923 			 * if we used it before.
4924 			 */
4925 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
4926 				/* if the port_id matches, reuse it */
4927 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
4928 					EL(ha, "n_port_handle =%xh,"
4929 					    "master state=%xh\n",
4930 					    tq->loop_id, tq->master_state);
4931 					break;
4932 				} else if (tq->loop_id ==
4933 				    ha->n_port->n_port_handle) {
4934 				    // avoid a lint error
4935 					uint16_t *hndl;
4936 					uint16_t val;
4937 
4938 					hndl = &ha->n_port->n_port_handle;
4939 					val = *hndl;
4940 					val++;
4941 					val++;
4942 					*hndl = val;
4943 				}
4944 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4945 			    "master state=%x\n", rval, id, tq->loop_id,
4946 			    tq->master_state);
4947 			}
4948 
4949 		}
4950 		if (rval == QL_SUCCESS) {
4951 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
4952 				ha->n_port->n_port_handle = tq->loop_id;
4953 				EL(ha, "n_port_handle =%xh, master state=%x\n",
4954 				    tq->loop_id, tq->master_state);
4955 				break;
4956 			}
4957 			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4958 			    "master state=%x\n", rval, id, tq->loop_id,
4959 			    tq->master_state);
4960 		}
4961 	}
4962 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
4963 
4964 	ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
4965 
4966 	return (QL_CONSUMED);
4967 }
4968 
4969 
4970 /*
4971  * ql_els_flogi
4972  *	Issue a extended link service fabric login request.
4973  *
4974  * Input:
4975  *	ha = adapter state pointer.
4976  *	pkt = pointer to fc_packet.
4977  *
4978  * Returns:
4979  *	FC_SUCCESS - the packet was accepted for transport.
4980  *	FC_TRANSPORT_ERROR - a transport error occurred.
4981  *
4982  * Context:
4983  *	Kernel context.
4984  */
4985 static int
4986 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4987 {
4988 	ql_tgt_t		*tq = NULL;
4989 	port_id_t		d_id;
4990 	la_els_logi_t		acc;
4991 	class_svc_param_t	*class3_param;
4992 	int			rval = FC_SUCCESS;
4993 	int			accept = 0;
4994 
4995 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4996 	    pkt->pkt_cmd_fhdr.d_id);
4997 
4998 	bzero(&acc, sizeof (acc));
4999 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5000 
5001 	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5002 		/*
5003 		 * d_id of zero in a FLOGI accept response in a point to point
5004 		 * topology triggers evulation of N Port login initiative.
5005 		 */
5006 		pkt->pkt_resp_fhdr.d_id = 0;
5007 		/*
5008 		 * An N_Port already logged in with the firmware
5009 		 * will have the only database entry.
5010 		 */
5011 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5012 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5013 		}
5014 
5015 		if (tq != NULL) {
5016 			/*
5017 			 * If the target port has initiative send
5018 			 * up a PLOGI about the new device.
5019 			 */
5020 			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5021 			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5022 			    &ha->init_ctrl_blk.cb24.port_name[0] :
5023 			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5024 				ha->send_plogi_timer = 3;
5025 			} else {
5026 				ha->send_plogi_timer = 0;
5027 			}
5028 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5029 		} else {
5030 			/*
5031 			 * An N_Port not logged in with the firmware will not
5032 			 * have a database entry.  We accept anyway and rely
5033 			 * on a PLOGI from the upper layers to set the d_id
5034 			 * and s_id.
5035 			 */
5036 			accept = 1;
5037 		}
5038 	} else {
5039 		tq = ql_d_id_to_queue(ha, d_id);
5040 	}
5041 	if ((tq != NULL) || (accept != NULL)) {
5042 		/* Build ACC. */
5043 		pkt->pkt_state = FC_PKT_SUCCESS;
5044 		class3_param = (class_svc_param_t *)&acc.class_3;
5045 
5046 		acc.ls_code.ls_code = LA_ELS_ACC;
5047 		acc.common_service.fcph_version = 0x2006;
5048 		if (ha->topology & QL_N_PORT) {
5049 			/* clear F_Port indicator */
5050 			acc.common_service.cmn_features = 0x0800;
5051 		} else {
5052 			acc.common_service.cmn_features = 0x1b00;
5053 		}
5054 		CFG_IST(ha, CFG_CTRL_242581) ?
5055 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5056 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5057 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5058 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5059 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5060 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5061 		acc.common_service.conc_sequences = 0xff;
5062 		acc.common_service.relative_offset = 0x03;
5063 		acc.common_service.e_d_tov = 0x7d0;
5064 		if (accept) {
5065 			/* Use the saved N_Port WWNN and WWPN */
5066 			if (ha->n_port != NULL) {
5067 				bcopy((void *)&ha->n_port->port_name[0],
5068 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5069 				bcopy((void *)&ha->n_port->node_name[0],
5070 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5071 				/* mark service options invalid */
5072 				class3_param->class_valid_svc_opt = 0x0800;
5073 			} else {
5074 				EL(ha, "ha->n_port is NULL\n");
5075 				/* Build RJT. */
5076 				acc.ls_code.ls_code = LA_ELS_RJT;
5077 
5078 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5079 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5080 			}
5081 		} else {
5082 			bcopy((void *)&tq->port_name[0],
5083 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5084 			bcopy((void *)&tq->node_name[0],
5085 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5086 
5087 			class3_param = (class_svc_param_t *)&acc.class_3;
5088 			class3_param->class_valid_svc_opt = 0x8800;
5089 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5090 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5091 			class3_param->conc_sequences =
5092 			    tq->class3_conc_sequences;
5093 			class3_param->open_sequences_per_exch =
5094 			    tq->class3_open_sequences_per_exch;
5095 		}
5096 	} else {
5097 		/* Build RJT. */
5098 		acc.ls_code.ls_code = LA_ELS_RJT;
5099 
5100 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5101 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5102 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5103 	}
5104 
5105 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5106 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5107 
5108 	if (rval != FC_SUCCESS) {
5109 		EL(ha, "failed, rval = %xh\n", rval);
5110 	} else {
5111 		/*EMPTY*/
5112 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5113 	}
5114 	return (rval);
5115 }
5116 
5117 /*
5118  * ql_els_logo
5119  *	Issue a extended link service logout request.
5120  *
5121  * Input:
5122  *	ha = adapter state pointer.
5123  *	pkt = pointer to fc_packet.
5124  *
5125  * Returns:
5126  *	FC_SUCCESS - the packet was accepted for transport.
5127  *	FC_TRANSPORT_ERROR - a transport error occurred.
5128  *
5129  * Context:
5130  *	Kernel context.
5131  */
5132 static int
5133 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5134 {
5135 	port_id_t	d_id;
5136 	ql_tgt_t	*tq;
5137 	la_els_logo_t	acc;
5138 	int		rval = FC_SUCCESS;
5139 
5140 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5141 	    pkt->pkt_cmd_fhdr.d_id);
5142 
5143 	bzero(&acc, sizeof (acc));
5144 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5145 
5146 	tq = ql_d_id_to_queue(ha, d_id);
5147 	if (tq) {
5148 		DEVICE_QUEUE_LOCK(tq);
5149 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5150 			DEVICE_QUEUE_UNLOCK(tq);
5151 			return (FC_SUCCESS);
5152 		}
5153 
5154 		tq->flags |= TQF_NEED_AUTHENTICATION;
5155 
5156 		do {
5157 			DEVICE_QUEUE_UNLOCK(tq);
5158 			(void) ql_abort_device(ha, tq, 1);
5159 
5160 			/*
5161 			 * Wait for commands to drain in F/W (doesn't
5162 			 * take more than a few milliseconds)
5163 			 */
5164 			ql_delay(ha, 10000);
5165 
5166 			DEVICE_QUEUE_LOCK(tq);
5167 		} while (tq->outcnt);
5168 
5169 		DEVICE_QUEUE_UNLOCK(tq);
5170 	}
5171 
5172 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5173 		/* Build ACC. */
5174 		acc.ls_code.ls_code = LA_ELS_ACC;
5175 
5176 		pkt->pkt_state = FC_PKT_SUCCESS;
5177 	} else {
5178 		/* Build RJT. */
5179 		acc.ls_code.ls_code = LA_ELS_RJT;
5180 
5181 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5182 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5183 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5184 	}
5185 
5186 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5187 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5188 
5189 	if (rval != FC_SUCCESS) {
5190 		EL(ha, "failed, rval = %xh\n", rval);
5191 	} else {
5192 		/*EMPTY*/
5193 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5194 	}
5195 	return (rval);
5196 }
5197 
5198 /*
5199  * ql_els_prli
5200  *	Issue a extended link service process login request.
5201  *
5202  * Input:
5203  *	ha = adapter state pointer.
5204  *	pkt = pointer to fc_packet.
5205  *
5206  * Returns:
5207  *	FC_SUCCESS - the packet was accepted for transport.
5208  *	FC_TRANSPORT_ERROR - a transport error occurred.
5209  *
5210  * Context:
5211  *	Kernel context.
5212  */
5213 static int
5214 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5215 {
5216 	ql_tgt_t		*tq;
5217 	port_id_t		d_id;
5218 	la_els_prli_t		acc;
5219 	prli_svc_param_t	*param;
5220 	int			rval = FC_SUCCESS;
5221 
5222 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5223 	    pkt->pkt_cmd_fhdr.d_id);
5224 
5225 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5226 
5227 	tq = ql_d_id_to_queue(ha, d_id);
5228 	if (tq != NULL) {
5229 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5230 
5231 		if ((ha->topology & QL_N_PORT) &&
5232 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5233 			ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5234 			rval = QL_CONSUMED;
5235 		} else {
5236 			/* Build ACC. */
5237 			bzero(&acc, sizeof (acc));
5238 			acc.ls_code = LA_ELS_ACC;
5239 			acc.page_length = 0x10;
5240 			acc.payload_length = tq->prli_payload_length;
5241 
5242 			param = (prli_svc_param_t *)&acc.service_params[0];
5243 			param->type = 0x08;
5244 			param->rsvd = 0x00;
5245 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5246 			param->process_flags = tq->prli_svc_param_word_3;
5247 
5248 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5249 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5250 			    DDI_DEV_AUTOINCR);
5251 
5252 			pkt->pkt_state = FC_PKT_SUCCESS;
5253 		}
5254 	} else {
5255 		la_els_rjt_t rjt;
5256 
5257 		/* Build RJT. */
5258 		bzero(&rjt, sizeof (rjt));
5259 		rjt.ls_code.ls_code = LA_ELS_RJT;
5260 
5261 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5262 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5263 
5264 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5265 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5266 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5267 	}
5268 
5269 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5270 		EL(ha, "failed, rval = %xh\n", rval);
5271 	} else {
5272 		/*EMPTY*/
5273 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5274 	}
5275 	return (rval);
5276 }
5277 
5278 /*
5279  * ql_els_prlo
5280  *	Issue a extended link service process logout request.
5281  *
5282  * Input:
5283  *	ha = adapter state pointer.
5284  *	pkt = pointer to fc_packet.
5285  *
5286  * Returns:
5287  *	FC_SUCCESS - the packet was accepted for transport.
5288  *	FC_TRANSPORT_ERROR - a transport error occurred.
5289  *
5290  * Context:
5291  *	Kernel context.
5292  */
5293 /* ARGSUSED */
5294 static int
5295 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5296 {
5297 	la_els_prli_t	acc;
5298 	int		rval = FC_SUCCESS;
5299 
5300 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5301 	    pkt->pkt_cmd_fhdr.d_id);
5302 
5303 	/* Build ACC. */
5304 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5305 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5306 
5307 	acc.ls_code = LA_ELS_ACC;
5308 	acc.service_params[2] = 1;
5309 
5310 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5311 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5312 
5313 	pkt->pkt_state = FC_PKT_SUCCESS;
5314 
5315 	if (rval != FC_SUCCESS) {
5316 		EL(ha, "failed, rval = %xh\n", rval);
5317 	} else {
5318 		/*EMPTY*/
5319 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5320 	}
5321 	return (rval);
5322 }
5323 
5324 /*
5325  * ql_els_adisc
5326  *	Issue a extended link service address discovery request.
5327  *
5328  * Input:
5329  *	ha = adapter state pointer.
5330  *	pkt = pointer to fc_packet.
5331  *
5332  * Returns:
5333  *	FC_SUCCESS - the packet was accepted for transport.
5334  *	FC_TRANSPORT_ERROR - a transport error occurred.
5335  *
5336  * Context:
5337  *	Kernel context.
5338  */
5339 static int
5340 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5341 {
5342 	ql_dev_id_list_t	*list;
5343 	uint32_t		list_size;
5344 	ql_link_t		*link;
5345 	ql_tgt_t		*tq;
5346 	ql_lun_t		*lq;
5347 	port_id_t		d_id;
5348 	la_els_adisc_t		acc;
5349 	uint16_t		index, loop_id;
5350 	ql_mbx_data_t		mr;
5351 	int			rval = FC_SUCCESS;
5352 
5353 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5354 
5355 	bzero(&acc, sizeof (acc));
5356 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5357 
5358 	/*
5359 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5360 	 * the device from the firmware
5361 	 */
5362 	index = ql_alpa_to_index[d_id.b.al_pa];
5363 	tq = NULL;
5364 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5365 		tq = link->base_address;
5366 		if (tq->d_id.b24 == d_id.b24) {
5367 			break;
5368 		} else {
5369 			tq = NULL;
5370 		}
5371 	}
5372 
5373 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5374 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5375 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5376 
5377 		if (list != NULL &&
5378 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5379 		    QL_SUCCESS) {
5380 
5381 			for (index = 0; index < mr.mb[1]; index++) {
5382 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5383 
5384 				if (tq->d_id.b24 == d_id.b24) {
5385 					tq->loop_id = loop_id;
5386 					break;
5387 				}
5388 			}
5389 		} else {
5390 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5391 			    QL_NAME, ha->instance, d_id.b24);
5392 			tq = NULL;
5393 		}
5394 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5395 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5396 			    QL_NAME, ha->instance, tq->d_id.b24);
5397 			tq = NULL;
5398 		}
5399 
5400 		if (list != NULL) {
5401 			kmem_free(list, list_size);
5402 		}
5403 	}
5404 
5405 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5406 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5407 
5408 		/* Build ACC. */
5409 
5410 		DEVICE_QUEUE_LOCK(tq);
5411 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5412 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5413 			for (link = tq->lun_queues.first; link != NULL;
5414 			    link = link->next) {
5415 				lq = link->base_address;
5416 
5417 				if (lq->cmd.first != NULL) {
5418 					ql_next(ha, lq);
5419 					DEVICE_QUEUE_LOCK(tq);
5420 				}
5421 			}
5422 		}
5423 		DEVICE_QUEUE_UNLOCK(tq);
5424 
5425 		acc.ls_code.ls_code = LA_ELS_ACC;
5426 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5427 
5428 		bcopy((void *)&tq->port_name[0],
5429 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5430 		bcopy((void *)&tq->node_name[0],
5431 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5432 
5433 		acc.nport_id.port_id = tq->d_id.b24;
5434 
5435 		pkt->pkt_state = FC_PKT_SUCCESS;
5436 	} else {
5437 		/* Build RJT. */
5438 		acc.ls_code.ls_code = LA_ELS_RJT;
5439 
5440 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5441 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5442 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5443 	}
5444 
5445 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5446 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5447 
5448 	if (rval != FC_SUCCESS) {
5449 		EL(ha, "failed, rval = %xh\n", rval);
5450 	} else {
5451 		/*EMPTY*/
5452 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5453 	}
5454 	return (rval);
5455 }
5456 
5457 /*
5458  * ql_els_linit
5459  *	Issue a extended link service loop initialize request.
5460  *
5461  * Input:
5462  *	ha = adapter state pointer.
5463  *	pkt = pointer to fc_packet.
5464  *
5465  * Returns:
5466  *	FC_SUCCESS - the packet was accepted for transport.
5467  *	FC_TRANSPORT_ERROR - a transport error occurred.
5468  *
5469  * Context:
5470  *	Kernel context.
5471  */
5472 static int
5473 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5474 {
5475 	ddi_dma_cookie_t	*cp;
5476 	uint32_t		cnt;
5477 	conv_num_t		n;
5478 	port_id_t		d_id;
5479 	int			rval = FC_SUCCESS;
5480 
5481 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5482 
5483 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5484 	if (ha->topology & QL_SNS_CONNECTION) {
5485 		fc_linit_req_t els;
5486 		lfa_cmd_t lfa;
5487 
5488 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5489 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5490 
5491 		/* Setup LFA mailbox command data. */
5492 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5493 
5494 		lfa.resp_buffer_length[0] = 4;
5495 
5496 		cp = pkt->pkt_resp_cookie;
5497 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5498 			n.size64 = (uint64_t)cp->dmac_laddress;
5499 			LITTLE_ENDIAN_64(&n.size64);
5500 		} else {
5501 			n.size32[0] = LSD(cp->dmac_laddress);
5502 			LITTLE_ENDIAN_32(&n.size32[0]);
5503 			n.size32[1] = MSD(cp->dmac_laddress);
5504 			LITTLE_ENDIAN_32(&n.size32[1]);
5505 		}
5506 
5507 		/* Set buffer address. */
5508 		for (cnt = 0; cnt < 8; cnt++) {
5509 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5510 		}
5511 
5512 		lfa.subcommand_length[0] = 4;
5513 		n.size32[0] = d_id.b24;
5514 		LITTLE_ENDIAN_32(&n.size32[0]);
5515 		lfa.addr[0] = n.size8[0];
5516 		lfa.addr[1] = n.size8[1];
5517 		lfa.addr[2] = n.size8[2];
5518 		lfa.subcommand[1] = 0x70;
5519 		lfa.payload[2] = els.func;
5520 		lfa.payload[4] = els.lip_b3;
5521 		lfa.payload[5] = els.lip_b4;
5522 
5523 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5524 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5525 		} else {
5526 			pkt->pkt_state = FC_PKT_SUCCESS;
5527 		}
5528 	} else {
5529 		fc_linit_resp_t rjt;
5530 
5531 		/* Build RJT. */
5532 		bzero(&rjt, sizeof (rjt));
5533 		rjt.ls_code.ls_code = LA_ELS_RJT;
5534 
5535 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5536 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5537 
5538 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5539 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5540 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5541 	}
5542 
5543 	if (rval != FC_SUCCESS) {
5544 		EL(ha, "failed, rval = %xh\n", rval);
5545 	} else {
5546 		/*EMPTY*/
5547 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5548 	}
5549 	return (rval);
5550 }
5551 
5552 /*
5553  * ql_els_lpc
5554  *	Issue a extended link service loop control request.
5555  *
5556  * Input:
5557  *	ha = adapter state pointer.
5558  *	pkt = pointer to fc_packet.
5559  *
5560  * Returns:
5561  *	FC_SUCCESS - the packet was accepted for transport.
5562  *	FC_TRANSPORT_ERROR - a transport error occurred.
5563  *
5564  * Context:
5565  *	Kernel context.
5566  */
5567 static int
5568 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5569 {
5570 	ddi_dma_cookie_t	*cp;
5571 	uint32_t		cnt;
5572 	conv_num_t		n;
5573 	port_id_t		d_id;
5574 	int			rval = FC_SUCCESS;
5575 
5576 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5577 
5578 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5579 	if (ha->topology & QL_SNS_CONNECTION) {
5580 		ql_lpc_t els;
5581 		lfa_cmd_t lfa;
5582 
5583 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5584 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5585 
5586 		/* Setup LFA mailbox command data. */
5587 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5588 
5589 		lfa.resp_buffer_length[0] = 4;
5590 
5591 		cp = pkt->pkt_resp_cookie;
5592 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5593 			n.size64 = (uint64_t)(cp->dmac_laddress);
5594 			LITTLE_ENDIAN_64(&n.size64);
5595 		} else {
5596 			n.size32[0] = cp->dmac_address;
5597 			LITTLE_ENDIAN_32(&n.size32[0]);
5598 			n.size32[1] = 0;
5599 		}
5600 
5601 		/* Set buffer address. */
5602 		for (cnt = 0; cnt < 8; cnt++) {
5603 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5604 		}
5605 
5606 		lfa.subcommand_length[0] = 20;
5607 		n.size32[0] = d_id.b24;
5608 		LITTLE_ENDIAN_32(&n.size32[0]);
5609 		lfa.addr[0] = n.size8[0];
5610 		lfa.addr[1] = n.size8[1];
5611 		lfa.addr[2] = n.size8[2];
5612 		lfa.subcommand[1] = 0x71;
5613 		lfa.payload[4] = els.port_control;
5614 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5615 
5616 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5617 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5618 		} else {
5619 			pkt->pkt_state = FC_PKT_SUCCESS;
5620 		}
5621 	} else {
5622 		ql_lpc_resp_t rjt;
5623 
5624 		/* Build RJT. */
5625 		bzero(&rjt, sizeof (rjt));
5626 		rjt.ls_code.ls_code = LA_ELS_RJT;
5627 
5628 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5629 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5630 
5631 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5632 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5633 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5634 	}
5635 
5636 	if (rval != FC_SUCCESS) {
5637 		EL(ha, "failed, rval = %xh\n", rval);
5638 	} else {
5639 		/*EMPTY*/
5640 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5641 	}
5642 	return (rval);
5643 }
5644 
5645 /*
5646  * ql_els_lsts
5647  *	Issue a extended link service loop status request.
5648  *
5649  * Input:
5650  *	ha = adapter state pointer.
5651  *	pkt = pointer to fc_packet.
5652  *
5653  * Returns:
5654  *	FC_SUCCESS - the packet was accepted for transport.
5655  *	FC_TRANSPORT_ERROR - a transport error occurred.
5656  *
5657  * Context:
5658  *	Kernel context.
5659  */
5660 static int
5661 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5662 {
5663 	ddi_dma_cookie_t	*cp;
5664 	uint32_t		cnt;
5665 	conv_num_t		n;
5666 	port_id_t		d_id;
5667 	int			rval = FC_SUCCESS;
5668 
5669 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5670 
5671 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5672 	if (ha->topology & QL_SNS_CONNECTION) {
5673 		fc_lsts_req_t els;
5674 		lfa_cmd_t lfa;
5675 
5676 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5677 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5678 
5679 		/* Setup LFA mailbox command data. */
5680 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5681 
5682 		lfa.resp_buffer_length[0] = 84;
5683 
5684 		cp = pkt->pkt_resp_cookie;
5685 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5686 			n.size64 = cp->dmac_laddress;
5687 			LITTLE_ENDIAN_64(&n.size64);
5688 		} else {
5689 			n.size32[0] = cp->dmac_address;
5690 			LITTLE_ENDIAN_32(&n.size32[0]);
5691 			n.size32[1] = 0;
5692 		}
5693 
5694 		/* Set buffer address. */
5695 		for (cnt = 0; cnt < 8; cnt++) {
5696 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5697 		}
5698 
5699 		lfa.subcommand_length[0] = 2;
5700 		n.size32[0] = d_id.b24;
5701 		LITTLE_ENDIAN_32(&n.size32[0]);
5702 		lfa.addr[0] = n.size8[0];
5703 		lfa.addr[1] = n.size8[1];
5704 		lfa.addr[2] = n.size8[2];
5705 		lfa.subcommand[1] = 0x72;
5706 
5707 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5708 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5709 		} else {
5710 			pkt->pkt_state = FC_PKT_SUCCESS;
5711 		}
5712 	} else {
5713 		fc_lsts_resp_t rjt;
5714 
5715 		/* Build RJT. */
5716 		bzero(&rjt, sizeof (rjt));
5717 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5718 
5719 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5720 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5721 
5722 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5723 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5724 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5725 	}
5726 
5727 	if (rval != FC_SUCCESS) {
5728 		EL(ha, "failed=%xh\n", rval);
5729 	} else {
5730 		/*EMPTY*/
5731 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5732 	}
5733 	return (rval);
5734 }
5735 
5736 /*
5737  * ql_els_scr
5738  *	Issue a extended link service state change registration request.
5739  *
5740  * Input:
5741  *	ha = adapter state pointer.
5742  *	pkt = pointer to fc_packet.
5743  *
5744  * Returns:
5745  *	FC_SUCCESS - the packet was accepted for transport.
5746  *	FC_TRANSPORT_ERROR - a transport error occurred.
5747  *
5748  * Context:
5749  *	Kernel context.
5750  */
5751 static int
5752 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5753 {
5754 	fc_scr_resp_t	acc;
5755 	int		rval = FC_SUCCESS;
5756 
5757 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5758 
5759 	bzero(&acc, sizeof (acc));
5760 	if (ha->topology & QL_SNS_CONNECTION) {
5761 		fc_scr_req_t els;
5762 
5763 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5764 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5765 
5766 		if (ql_send_change_request(ha, els.scr_func) ==
5767 		    QL_SUCCESS) {
5768 			/* Build ACC. */
5769 			acc.scr_acc = LA_ELS_ACC;
5770 
5771 			pkt->pkt_state = FC_PKT_SUCCESS;
5772 		} else {
5773 			/* Build RJT. */
5774 			acc.scr_acc = LA_ELS_RJT;
5775 
5776 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5777 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5778 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5779 		}
5780 	} else {
5781 		/* Build RJT. */
5782 		acc.scr_acc = LA_ELS_RJT;
5783 
5784 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5785 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5786 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5787 	}
5788 
5789 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5790 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5791 
5792 	if (rval != FC_SUCCESS) {
5793 		EL(ha, "failed, rval = %xh\n", rval);
5794 	} else {
5795 		/*EMPTY*/
5796 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5797 	}
5798 	return (rval);
5799 }
5800 
5801 /*
5802  * ql_els_rscn
5803  *	Issue a extended link service register state
5804  *	change notification request.
5805  *
5806  * Input:
5807  *	ha = adapter state pointer.
5808  *	pkt = pointer to fc_packet.
5809  *
5810  * Returns:
5811  *	FC_SUCCESS - the packet was accepted for transport.
5812  *	FC_TRANSPORT_ERROR - a transport error occurred.
5813  *
5814  * Context:
5815  *	Kernel context.
5816  */
5817 static int
5818 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5819 {
5820 	ql_rscn_resp_t	acc;
5821 	int		rval = FC_SUCCESS;
5822 
5823 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5824 
5825 	bzero(&acc, sizeof (acc));
5826 	if (ha->topology & QL_SNS_CONNECTION) {
5827 		/* Build ACC. */
5828 		acc.scr_acc = LA_ELS_ACC;
5829 
5830 		pkt->pkt_state = FC_PKT_SUCCESS;
5831 	} else {
5832 		/* Build RJT. */
5833 		acc.scr_acc = LA_ELS_RJT;
5834 
5835 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5836 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5837 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5838 	}
5839 
5840 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5841 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5842 
5843 	if (rval != FC_SUCCESS) {
5844 		EL(ha, "failed, rval = %xh\n", rval);
5845 	} else {
5846 		/*EMPTY*/
5847 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5848 	}
5849 	return (rval);
5850 }
5851 
5852 /*
5853  * ql_els_farp_req
5854  *	Issue FC Address Resolution Protocol (FARP)
5855  *	extended link service request.
5856  *
5857  *	Note: not supported.
5858  *
5859  * Input:
5860  *	ha = adapter state pointer.
5861  *	pkt = pointer to fc_packet.
5862  *
5863  * Returns:
5864  *	FC_SUCCESS - the packet was accepted for transport.
5865  *	FC_TRANSPORT_ERROR - a transport error occurred.
5866  *
5867  * Context:
5868  *	Kernel context.
5869  */
5870 static int
5871 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5872 {
5873 	ql_acc_rjt_t	acc;
5874 	int		rval = FC_SUCCESS;
5875 
5876 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5877 
5878 	bzero(&acc, sizeof (acc));
5879 
5880 	/* Build ACC. */
5881 	acc.ls_code.ls_code = LA_ELS_ACC;
5882 
5883 	pkt->pkt_state = FC_PKT_SUCCESS;
5884 
5885 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5886 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5887 
5888 	if (rval != FC_SUCCESS) {
5889 		EL(ha, "failed, rval = %xh\n", rval);
5890 	} else {
5891 		/*EMPTY*/
5892 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5893 	}
5894 	return (rval);
5895 }
5896 
5897 /*
5898  * ql_els_farp_reply
5899  *	Issue FC Address Resolution Protocol (FARP)
5900  *	extended link service reply.
5901  *
5902  *	Note: not supported.
5903  *
5904  * Input:
5905  *	ha = adapter state pointer.
5906  *	pkt = pointer to fc_packet.
5907  *
5908  * Returns:
5909  *	FC_SUCCESS - the packet was accepted for transport.
5910  *	FC_TRANSPORT_ERROR - a transport error occurred.
5911  *
5912  * Context:
5913  *	Kernel context.
5914  */
5915 /* ARGSUSED */
5916 static int
5917 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5918 {
5919 	ql_acc_rjt_t	acc;
5920 	int		rval = FC_SUCCESS;
5921 
5922 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5923 
5924 	bzero(&acc, sizeof (acc));
5925 
5926 	/* Build ACC. */
5927 	acc.ls_code.ls_code = LA_ELS_ACC;
5928 
5929 	pkt->pkt_state = FC_PKT_SUCCESS;
5930 
5931 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5932 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5933 
5934 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5935 
5936 	return (rval);
5937 }
5938 
5939 static int
5940 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5941 {
5942 	uchar_t			*rnid_acc;
5943 	port_id_t		d_id;
5944 	ql_link_t		*link;
5945 	ql_tgt_t		*tq;
5946 	uint16_t		index;
5947 	la_els_rnid_acc_t	acc;
5948 	la_els_rnid_t		*req;
5949 	size_t			req_len;
5950 
5951 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5952 
5953 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5954 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5955 	index = ql_alpa_to_index[d_id.b.al_pa];
5956 
5957 	tq = NULL;
5958 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5959 		tq = link->base_address;
5960 		if (tq->d_id.b24 == d_id.b24) {
5961 			break;
5962 		} else {
5963 			tq = NULL;
5964 		}
5965 	}
5966 
5967 	/* Allocate memory for rnid status block */
5968 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5969 	ASSERT(rnid_acc != NULL);
5970 
5971 	bzero(&acc, sizeof (acc));
5972 
5973 	req = (la_els_rnid_t *)pkt->pkt_cmd;
5974 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5975 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
5976 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
5977 
5978 		kmem_free(rnid_acc, req_len);
5979 		acc.ls_code.ls_code = LA_ELS_RJT;
5980 
5981 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5982 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5983 
5984 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5985 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5986 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5987 
5988 		return (FC_FAILURE);
5989 	}
5990 
5991 	acc.ls_code.ls_code = LA_ELS_ACC;
5992 	bcopy(rnid_acc, &acc.hdr, req_len);
5993 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5994 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5995 
5996 	kmem_free(rnid_acc, req_len);
5997 	pkt->pkt_state = FC_PKT_SUCCESS;
5998 
5999 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6000 
6001 	return (FC_SUCCESS);
6002 }
6003 
6004 static int
6005 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6006 {
6007 	fc_rls_acc_t		*rls_acc;
6008 	port_id_t		d_id;
6009 	ql_link_t		*link;
6010 	ql_tgt_t		*tq;
6011 	uint16_t		index;
6012 	la_els_rls_acc_t	acc;
6013 
6014 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6015 
6016 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6017 	index = ql_alpa_to_index[d_id.b.al_pa];
6018 
6019 	tq = NULL;
6020 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6021 		tq = link->base_address;
6022 		if (tq->d_id.b24 == d_id.b24) {
6023 			break;
6024 		} else {
6025 			tq = NULL;
6026 		}
6027 	}
6028 
6029 	/* Allocate memory for link error status block */
6030 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6031 	ASSERT(rls_acc != NULL);
6032 
6033 	bzero(&acc, sizeof (la_els_rls_acc_t));
6034 
6035 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6036 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6037 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6038 
6039 		kmem_free(rls_acc, sizeof (*rls_acc));
6040 		acc.ls_code.ls_code = LA_ELS_RJT;
6041 
6042 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6043 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6044 
6045 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6046 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6047 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6048 
6049 		return (FC_FAILURE);
6050 	}
6051 
6052 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6053 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6054 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6055 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6056 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6057 
6058 	acc.ls_code.ls_code = LA_ELS_ACC;
6059 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6060 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6061 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6062 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6063 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6064 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6065 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6066 
6067 	kmem_free(rls_acc, sizeof (*rls_acc));
6068 	pkt->pkt_state = FC_PKT_SUCCESS;
6069 
6070 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6071 
6072 	return (FC_SUCCESS);
6073 }
6074 
6075 static int
6076 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6077 {
6078 	port_id_t	d_id;
6079 	ql_srb_t	*sp;
6080 	fc_unsol_buf_t  *ubp;
6081 	ql_link_t	*link, *next_link;
6082 	int		rval = FC_SUCCESS;
6083 	int		cnt = 5;
6084 
6085 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6086 
6087 	/*
6088 	 * we need to ensure that q->outcnt == 0, otherwise
6089 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6090 	 * will confuse ulps.
6091 	 */
6092 
6093 	DEVICE_QUEUE_LOCK(tq);
6094 	do {
6095 		/*
6096 		 * wait for the cmds to get drained. If they
6097 		 * don't get drained then the transport will
6098 		 * retry PLOGI after few secs.
6099 		 */
6100 		if (tq->outcnt != 0) {
6101 			rval = FC_TRAN_BUSY;
6102 			DEVICE_QUEUE_UNLOCK(tq);
6103 			ql_delay(ha, 10000);
6104 			DEVICE_QUEUE_LOCK(tq);
6105 			cnt--;
6106 			if (!cnt) {
6107 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6108 				    " for %xh outcount %xh", QL_NAME,
6109 				    ha->instance, tq->d_id.b24, tq->outcnt);
6110 			}
6111 		} else {
6112 			rval = FC_SUCCESS;
6113 			break;
6114 		}
6115 	} while (cnt > 0);
6116 	DEVICE_QUEUE_UNLOCK(tq);
6117 
6118 	/*
6119 	 * return, if busy or if the plogi was asynchronous.
6120 	 */
6121 	if ((rval != FC_SUCCESS) ||
6122 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6123 	    pkt->pkt_comp)) {
6124 		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6125 		    ha->instance);
6126 		return (rval);
6127 	}
6128 
6129 	/*
6130 	 * Let us give daemon sufficient time and hopefully
6131 	 * when transport retries PLOGI, it would have flushed
6132 	 * callback queue.
6133 	 */
6134 	TASK_DAEMON_LOCK(ha);
6135 	for (link = ha->callback_queue.first; link != NULL;
6136 	    link = next_link) {
6137 		next_link = link->next;
6138 		sp = link->base_address;
6139 		if (sp->flags & SRB_UB_CALLBACK) {
6140 			ubp = ha->ub_array[sp->handle];
6141 			d_id.b24 = ubp->ub_frame.s_id;
6142 		} else {
6143 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6144 		}
6145 		if (tq->d_id.b24 == d_id.b24) {
6146 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6147 			    ha->instance, tq->d_id.b24);
6148 			rval = FC_TRAN_BUSY;
6149 			break;
6150 		}
6151 	}
6152 	TASK_DAEMON_UNLOCK(ha);
6153 
6154 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6155 
6156 	return (rval);
6157 }
6158 
6159 /*
6160  * ql_login_port
6161  *	Logs in a device if not already logged in.
6162  *
6163  * Input:
6164  *	ha = adapter state pointer.
6165  *	d_id = 24 bit port ID.
6166  *	DEVICE_QUEUE_LOCK must be released.
6167  *
6168  * Returns:
6169  *	QL local function return status code.
6170  *
6171  * Context:
6172  *	Kernel context.
6173  */
6174 static int
6175 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6176 {
6177 	ql_adapter_state_t	*vha;
6178 	ql_link_t		*link;
6179 	uint16_t		index;
6180 	ql_tgt_t		*tq, *tq2;
6181 	uint16_t		loop_id, first_loop_id, last_loop_id;
6182 	int			rval = QL_SUCCESS;
6183 
6184 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6185 	    d_id.b24);
6186 
6187 	/* Get head queue index. */
6188 	index = ql_alpa_to_index[d_id.b.al_pa];
6189 
6190 	/* Check for device already has a queue. */
6191 	tq = NULL;
6192 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6193 		tq = link->base_address;
6194 		if (tq->d_id.b24 == d_id.b24) {
6195 			loop_id = tq->loop_id;
6196 			break;
6197 		} else {
6198 			tq = NULL;
6199 		}
6200 	}
6201 
6202 	/* Let's stop issuing any IO and unsolicited logo */
6203 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6204 		DEVICE_QUEUE_LOCK(tq);
6205 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6206 		tq->flags &= ~TQF_RSCN_RCVD;
6207 		DEVICE_QUEUE_UNLOCK(tq);
6208 	}
6209 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6210 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6211 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6212 	}
6213 
6214 	/* Special case for Nameserver */
6215 	if (d_id.b24 == 0xFFFFFC) {
6216 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
6217 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6218 		if (tq == NULL) {
6219 			ADAPTER_STATE_LOCK(ha);
6220 			tq = ql_dev_init(ha, d_id, loop_id);
6221 			ADAPTER_STATE_UNLOCK(ha);
6222 			if (tq == NULL) {
6223 				EL(ha, "failed=%xh, d_id=%xh\n",
6224 				    QL_FUNCTION_FAILED, d_id.b24);
6225 				return (QL_FUNCTION_FAILED);
6226 			}
6227 		}
6228 		rval = ql_login_fabric_port(ha, tq, loop_id);
6229 		if (rval == QL_SUCCESS) {
6230 			tq->loop_id = loop_id;
6231 			tq->flags |= TQF_FABRIC_DEVICE;
6232 			(void) ql_get_port_database(ha, tq, PDF_NONE);
6233 			ha->topology = (uint8_t)
6234 			    (ha->topology | QL_SNS_CONNECTION);
6235 		}
6236 	/* Check for device already logged in. */
6237 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6238 		if (tq->flags & TQF_FABRIC_DEVICE) {
6239 			rval = ql_login_fabric_port(ha, tq, loop_id);
6240 			if (rval == QL_PORT_ID_USED) {
6241 				rval = QL_SUCCESS;
6242 			}
6243 		} else if (LOCAL_LOOP_ID(loop_id)) {
6244 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6245 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6246 			    LLF_NONE : LLF_PLOGI));
6247 			if (rval == QL_SUCCESS) {
6248 				DEVICE_QUEUE_LOCK(tq);
6249 				tq->loop_id = loop_id;
6250 				DEVICE_QUEUE_UNLOCK(tq);
6251 			}
6252 		}
6253 	} else if (ha->topology & QL_SNS_CONNECTION) {
6254 		/* Locate unused loop ID. */
6255 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6256 			first_loop_id = 0;
6257 			last_loop_id = LAST_N_PORT_HDL;
6258 		} else if (ha->topology & QL_F_PORT) {
6259 			first_loop_id = 0;
6260 			last_loop_id = SNS_LAST_LOOP_ID;
6261 		} else {
6262 			first_loop_id = SNS_FIRST_LOOP_ID;
6263 			last_loop_id = SNS_LAST_LOOP_ID;
6264 		}
6265 
6266 		/* Acquire adapter state lock. */
6267 		ADAPTER_STATE_LOCK(ha);
6268 
6269 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6270 		if (tq == NULL) {
6271 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6272 			    d_id.b24);
6273 
6274 			ADAPTER_STATE_UNLOCK(ha);
6275 
6276 			return (QL_FUNCTION_FAILED);
6277 		}
6278 
6279 		rval = QL_FUNCTION_FAILED;
6280 		loop_id = ha->pha->free_loop_id++;
6281 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6282 		    index--) {
6283 			if (loop_id < first_loop_id ||
6284 			    loop_id > last_loop_id) {
6285 				loop_id = first_loop_id;
6286 				ha->pha->free_loop_id = (uint16_t)
6287 				    (loop_id + 1);
6288 			}
6289 
6290 			/* Bypass if loop ID used. */
6291 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6292 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6293 				if (tq2 != NULL && tq2 != tq) {
6294 					break;
6295 				}
6296 			}
6297 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6298 			    loop_id == ha->loop_id) {
6299 				loop_id = ha->pha->free_loop_id++;
6300 				continue;
6301 			}
6302 
6303 			ADAPTER_STATE_UNLOCK(ha);
6304 			rval = ql_login_fabric_port(ha, tq, loop_id);
6305 
6306 			/*
6307 			 * If PORT_ID_USED is returned
6308 			 * the login_fabric_port() updates
6309 			 * with the correct loop ID
6310 			 */
6311 			switch (rval) {
6312 			case QL_PORT_ID_USED:
6313 				/*
6314 				 * use f/w handle and try to
6315 				 * login again.
6316 				 */
6317 				ADAPTER_STATE_LOCK(ha);
6318 				ha->pha->free_loop_id--;
6319 				ADAPTER_STATE_UNLOCK(ha);
6320 				loop_id = tq->loop_id;
6321 				break;
6322 
6323 			case QL_SUCCESS:
6324 				tq->flags |= TQF_FABRIC_DEVICE;
6325 				(void) ql_get_port_database(ha,
6326 				    tq, PDF_NONE);
6327 				index = 1;
6328 				break;
6329 
6330 			case QL_LOOP_ID_USED:
6331 				tq->loop_id = PORT_NO_LOOP_ID;
6332 				loop_id = ha->pha->free_loop_id++;
6333 				break;
6334 
6335 			case QL_ALL_IDS_IN_USE:
6336 				tq->loop_id = PORT_NO_LOOP_ID;
6337 				index = 1;
6338 				break;
6339 
6340 			default:
6341 				tq->loop_id = PORT_NO_LOOP_ID;
6342 				index = 1;
6343 				break;
6344 			}
6345 
6346 			ADAPTER_STATE_LOCK(ha);
6347 		}
6348 
6349 		ADAPTER_STATE_UNLOCK(ha);
6350 	} else {
6351 		rval = QL_FUNCTION_FAILED;
6352 	}
6353 
6354 	if (rval != QL_SUCCESS) {
6355 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6356 	} else {
6357 		EL(ha, "d_id=%xh, loop_id=%xh, "
6358 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6359 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6360 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6361 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6362 	}
6363 	return (rval);
6364 }
6365 
6366 /*
6367  * ql_login_fabric_port
6368  *	Issue login fabric port mailbox command.
6369  *
6370  * Input:
6371  *	ha:		adapter state pointer.
6372  *	tq:		target queue pointer.
6373  *	loop_id:	FC Loop ID.
6374  *
6375  * Returns:
6376  *	ql local function return status code.
6377  *
6378  * Context:
6379  *	Kernel context.
6380  */
6381 static int
6382 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6383 {
6384 	int		rval;
6385 	int		index;
6386 	int		retry = 0;
6387 	port_id_t	d_id;
6388 	ql_tgt_t	*newq;
6389 	ql_mbx_data_t	mr;
6390 
6391 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6392 	    tq->d_id.b24);
6393 
6394 	/*
6395 	 * QL_PARAMETER_ERROR also means the firmware is
6396 	 * not able to allocate PCB entry due to resource
6397 	 * issues, or collision.
6398 	 */
6399 	do {
6400 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6401 		if ((rval == QL_PARAMETER_ERROR) ||
6402 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6403 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6404 			retry++;
6405 			drv_usecwait(10 * MILLISEC);
6406 		} else {
6407 			break;
6408 		}
6409 	} while (retry < 5);
6410 
6411 	switch (rval) {
6412 	case QL_SUCCESS:
6413 		tq->loop_id = loop_id;
6414 		break;
6415 
6416 	case QL_PORT_ID_USED:
6417 		/*
6418 		 * This Loop ID should NOT be in use in drivers
6419 		 */
6420 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6421 
6422 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6423 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6424 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6425 			    newq->loop_id, newq->d_id.b24);
6426 			ql_send_logo(ha, newq, NULL);
6427 		}
6428 
6429 		tq->loop_id = mr.mb[1];
6430 		break;
6431 
6432 	case QL_LOOP_ID_USED:
6433 		d_id.b.al_pa = LSB(mr.mb[2]);
6434 		d_id.b.area = MSB(mr.mb[2]);
6435 		d_id.b.domain = LSB(mr.mb[1]);
6436 
6437 		newq = ql_d_id_to_queue(ha, d_id);
6438 		if (newq && (newq->loop_id != loop_id)) {
6439 			/*
6440 			 * This should NEVER ever happen; but this
6441 			 * code is needed to bail out when the worst
6442 			 * case happens - or as used to happen before
6443 			 */
6444 			ASSERT(newq->d_id.b24 == d_id.b24);
6445 
6446 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6447 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6448 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6449 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6450 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6451 			    newq->d_id.b24, loop_id);
6452 
6453 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6454 				ADAPTER_STATE_LOCK(ha);
6455 
6456 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6457 				ql_add_link_b(&ha->dev[index], &newq->device);
6458 
6459 				newq->d_id.b24 = d_id.b24;
6460 
6461 				index = ql_alpa_to_index[d_id.b.al_pa];
6462 				ql_add_link_b(&ha->dev[index], &newq->device);
6463 
6464 				ADAPTER_STATE_UNLOCK(ha);
6465 			}
6466 
6467 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6468 
6469 		}
6470 
6471 		/*
6472 		 * Invalidate the loop ID for the
6473 		 * us to obtain a new one.
6474 		 */
6475 		tq->loop_id = PORT_NO_LOOP_ID;
6476 		break;
6477 
6478 	case QL_ALL_IDS_IN_USE:
6479 		rval = QL_FUNCTION_FAILED;
6480 		EL(ha, "no loop id's available\n");
6481 		break;
6482 
6483 	default:
6484 		if (rval == QL_COMMAND_ERROR) {
6485 			switch (mr.mb[1]) {
6486 			case 2:
6487 			case 3:
6488 				rval = QL_MEMORY_ALLOC_FAILED;
6489 				break;
6490 
6491 			case 4:
6492 				rval = QL_FUNCTION_TIMEOUT;
6493 				break;
6494 			case 7:
6495 				rval = QL_FABRIC_NOT_INITIALIZED;
6496 				break;
6497 			default:
6498 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6499 				break;
6500 			}
6501 		} else {
6502 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6503 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6504 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6505 		}
6506 		break;
6507 	}
6508 
6509 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6510 	    rval != QL_LOOP_ID_USED) {
6511 		EL(ha, "failed=%xh\n", rval);
6512 	} else {
6513 		/*EMPTY*/
6514 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6515 	}
6516 	return (rval);
6517 }
6518 
6519 /*
6520  * ql_logout_port
6521  *	Logs out a device if possible.
6522  *
6523  * Input:
6524  *	ha:	adapter state pointer.
6525  *	d_id:	24 bit port ID.
6526  *
6527  * Returns:
6528  *	QL local function return status code.
6529  *
6530  * Context:
6531  *	Kernel context.
6532  */
6533 static int
6534 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6535 {
6536 	ql_link_t	*link;
6537 	ql_tgt_t	*tq;
6538 	uint16_t	index;
6539 
6540 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6541 
6542 	/* Get head queue index. */
6543 	index = ql_alpa_to_index[d_id.b.al_pa];
6544 
6545 	/* Get device queue. */
6546 	tq = NULL;
6547 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6548 		tq = link->base_address;
6549 		if (tq->d_id.b24 == d_id.b24) {
6550 			break;
6551 		} else {
6552 			tq = NULL;
6553 		}
6554 	}
6555 
6556 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6557 		(void) ql_logout_fabric_port(ha, tq);
6558 		tq->loop_id = PORT_NO_LOOP_ID;
6559 	}
6560 
6561 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6562 
6563 	return (QL_SUCCESS);
6564 }
6565 
6566 /*
6567  * ql_dev_init
6568  *	Initialize/allocate device queue.
6569  *
6570  * Input:
6571  *	ha:		adapter state pointer.
6572  *	d_id:		device destination ID
6573  *	loop_id:	device loop ID
6574  *	ADAPTER_STATE_LOCK must be already obtained.
6575  *
6576  * Returns:
6577  *	NULL = failure
6578  *
6579  * Context:
6580  *	Kernel context.
6581  */
6582 ql_tgt_t *
6583 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6584 {
6585 	ql_link_t	*link;
6586 	uint16_t	index;
6587 	ql_tgt_t	*tq;
6588 
6589 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6590 	    ha->instance, d_id.b24, loop_id);
6591 
6592 	index = ql_alpa_to_index[d_id.b.al_pa];
6593 
6594 	/* If device queue exists, set proper loop ID. */
6595 	tq = NULL;
6596 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6597 		tq = link->base_address;
6598 		if (tq->d_id.b24 == d_id.b24) {
6599 			tq->loop_id = loop_id;
6600 
6601 			/* Reset port down retry count. */
6602 			tq->port_down_retry_count = ha->port_down_retry_count;
6603 			tq->qfull_retry_count = ha->qfull_retry_count;
6604 
6605 			break;
6606 		} else {
6607 			tq = NULL;
6608 		}
6609 	}
6610 
6611 	/* If device does not have queue. */
6612 	if (tq == NULL) {
6613 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6614 		if (tq != NULL) {
6615 			/*
6616 			 * mutex to protect the device queue,
6617 			 * does not block interrupts.
6618 			 */
6619 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6620 			    (ha->iflags & IFLG_INTR_AIF) ?
6621 			    (void *)(uintptr_t)ha->intr_pri :
6622 			    (void *)(uintptr_t)ha->iblock_cookie);
6623 
6624 			tq->d_id.b24 = d_id.b24;
6625 			tq->loop_id = loop_id;
6626 			tq->device.base_address = tq;
6627 			tq->iidma_rate = IIDMA_RATE_INIT;
6628 
6629 			/* Reset port down retry count. */
6630 			tq->port_down_retry_count = ha->port_down_retry_count;
6631 			tq->qfull_retry_count = ha->qfull_retry_count;
6632 
6633 			/* Add device to device queue. */
6634 			ql_add_link_b(&ha->dev[index], &tq->device);
6635 		}
6636 	}
6637 
6638 	if (tq == NULL) {
6639 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6640 	} else {
6641 		/*EMPTY*/
6642 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6643 	}
6644 	return (tq);
6645 }
6646 
6647 /*
6648  * ql_dev_free
6649  *	Remove queue from device list and frees resources used by queue.
6650  *
6651  * Input:
6652  *	ha:	adapter state pointer.
6653  *	tq:	target queue pointer.
6654  *	ADAPTER_STATE_LOCK must be already obtained.
6655  *
6656  * Context:
6657  *	Kernel context.
6658  */
6659 void
6660 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6661 {
6662 	ql_link_t	*link;
6663 	uint16_t	index;
6664 	ql_lun_t	*lq;
6665 
6666 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6667 
6668 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6669 		lq = link->base_address;
6670 		if (lq->cmd.first != NULL) {
6671 			return;
6672 		}
6673 	}
6674 
6675 	if (tq->outcnt == 0) {
6676 		/* Get head queue index. */
6677 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6678 		for (link = ha->dev[index].first; link != NULL;
6679 		    link = link->next) {
6680 			if (link->base_address == tq) {
6681 				ql_remove_link(&ha->dev[index], link);
6682 
6683 				link = tq->lun_queues.first;
6684 				while (link != NULL) {
6685 					lq = link->base_address;
6686 					link = link->next;
6687 
6688 					ql_remove_link(&tq->lun_queues,
6689 					    &lq->link);
6690 					kmem_free(lq, sizeof (ql_lun_t));
6691 				}
6692 
6693 				mutex_destroy(&tq->mutex);
6694 				kmem_free(tq, sizeof (ql_tgt_t));
6695 				break;
6696 			}
6697 		}
6698 	}
6699 
6700 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6701 }
6702 
6703 /*
6704  * ql_lun_queue
6705  *	Allocate LUN queue if does not exists.
6706  *
6707  * Input:
6708  *	ha:	adapter state pointer.
6709  *	tq:	target queue.
6710  *	lun:	LUN number.
6711  *
6712  * Returns:
6713  *	NULL = failure
6714  *
6715  * Context:
6716  *	Kernel context.
6717  */
6718 static ql_lun_t *
6719 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6720 {
6721 	ql_lun_t	*lq;
6722 	ql_link_t	*link;
6723 
6724 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6725 
6726 	/* Fast path. */
6727 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6728 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6729 		return (tq->last_lun_queue);
6730 	}
6731 
6732 	if (lun >= MAX_LUNS) {
6733 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6734 		return (NULL);
6735 	}
6736 	/* If device queue exists, set proper loop ID. */
6737 	lq = NULL;
6738 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6739 		lq = link->base_address;
6740 		if (lq->lun_no == lun) {
6741 			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6742 			tq->last_lun_queue = lq;
6743 			return (lq);
6744 		}
6745 	}
6746 
6747 	/* If queue does exist. */
6748 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6749 
6750 	/* Initialize LUN queue. */
6751 	if (lq != NULL) {
6752 		lq->link.base_address = lq;
6753 
6754 		lq->lun_no = lun;
6755 		lq->target_queue = tq;
6756 
6757 		DEVICE_QUEUE_LOCK(tq);
6758 		ql_add_link_b(&tq->lun_queues, &lq->link);
6759 		DEVICE_QUEUE_UNLOCK(tq);
6760 		tq->last_lun_queue = lq;
6761 	}
6762 
6763 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6764 
6765 	return (lq);
6766 }
6767 
6768 /*
6769  * ql_fcp_scsi_cmd
6770  *	Process fibre channel (FCP) SCSI protocol commands.
6771  *
6772  * Input:
6773  *	ha = adapter state pointer.
6774  *	pkt = pointer to fc_packet.
6775  *	sp = srb pointer.
6776  *
6777  * Returns:
6778  *	FC_SUCCESS - the packet was accepted for transport.
6779  *	FC_TRANSPORT_ERROR - a transport error occurred.
6780  *
6781  * Context:
6782  *	Kernel context.
6783  */
6784 static int
6785 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6786 {
6787 	port_id_t	d_id;
6788 	ql_tgt_t	*tq;
6789 	uint64_t	*ptr;
6790 	uint16_t	lun;
6791 
6792 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6793 
6794 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6795 	if (tq == NULL) {
6796 		d_id.r.rsvd_1 = 0;
6797 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6798 		tq = ql_d_id_to_queue(ha, d_id);
6799 	}
6800 
6801 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6802 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6803 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6804 
6805 	if (tq != NULL &&
6806 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6807 
6808 		/*
6809 		 * zero out FCP response; 24 Bytes
6810 		 */
6811 		ptr = (uint64_t *)pkt->pkt_resp;
6812 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6813 
6814 		/* Handle task management function. */
6815 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6816 		    sp->fcp->fcp_cntl.cntl_clr_aca |
6817 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6818 		    sp->fcp->fcp_cntl.cntl_reset_lun |
6819 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6820 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6821 			ql_task_mgmt(ha, tq, pkt, sp);
6822 		} else {
6823 			ha->pha->xioctl->IosRequested++;
6824 			ha->pha->xioctl->BytesRequested += (uint32_t)
6825 			    sp->fcp->fcp_data_len;
6826 
6827 			/*
6828 			 * Setup for commands with data transfer
6829 			 */
6830 			sp->iocb = ha->fcp_cmd;
6831 			if (sp->fcp->fcp_data_len != 0) {
6832 				/*
6833 				 * FCP data is bound to pkt_data_dma
6834 				 */
6835 				if (sp->fcp->fcp_cntl.cntl_write_data) {
6836 					(void) ddi_dma_sync(pkt->pkt_data_dma,
6837 					    0, 0, DDI_DMA_SYNC_FORDEV);
6838 				}
6839 
6840 				/* Setup IOCB count. */
6841 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6842 					uint32_t	cnt;
6843 
6844 					cnt = pkt->pkt_data_cookie_cnt -
6845 					    ha->cmd_segs;
6846 					sp->req_cnt = (uint16_t)
6847 					    (cnt / ha->cmd_cont_segs);
6848 					if (cnt % ha->cmd_cont_segs) {
6849 						sp->req_cnt = (uint16_t)
6850 						    (sp->req_cnt + 2);
6851 					} else {
6852 						sp->req_cnt++;
6853 					}
6854 				} else {
6855 					sp->req_cnt = 1;
6856 				}
6857 			} else {
6858 				sp->req_cnt = 1;
6859 			}
6860 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6861 
6862 			return (ql_start_cmd(ha, tq, pkt, sp));
6863 		}
6864 	} else {
6865 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6866 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6867 
6868 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6869 			ql_awaken_task_daemon(ha, sp, 0, 0);
6870 	}
6871 
6872 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6873 
6874 	return (FC_SUCCESS);
6875 }
6876 
6877 /*
6878  * ql_task_mgmt
6879  *	Task management function processor.
6880  *
6881  * Input:
6882  *	ha:	adapter state pointer.
6883  *	tq:	target queue pointer.
6884  *	pkt:	pointer to fc_packet.
6885  *	sp:	SRB pointer.
6886  *
6887  * Context:
6888  *	Kernel context.
6889  */
6890 static void
6891 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6892     ql_srb_t *sp)
6893 {
6894 	fcp_rsp_t		*fcpr;
6895 	struct fcp_rsp_info	*rsp;
6896 	uint16_t		lun;
6897 
6898 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6899 
6900 	ASSERT(pkt->pkt_cmd_dma == NULL && pkt->pkt_resp_dma == NULL);
6901 
6902 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6903 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6904 
6905 	bzero(fcpr, pkt->pkt_rsplen);
6906 
6907 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6908 	fcpr->fcp_response_len = 8;
6909 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6910 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6911 
6912 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6913 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6914 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6915 		}
6916 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6917 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6918 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6919 		}
6920 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6921 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6922 		    QL_SUCCESS) {
6923 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6924 		}
6925 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6926 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6927 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6928 		}
6929 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6930 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6931 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6932 		}
6933 	} else {
6934 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6935 	}
6936 
6937 	pkt->pkt_state = FC_PKT_SUCCESS;
6938 
6939 	/* Do command callback. */
6940 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6941 		ql_awaken_task_daemon(ha, sp, 0, 0);
6942 	}
6943 
6944 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6945 }
6946 
6947 /*
6948  * ql_fcp_ip_cmd
6949  *	Process fibre channel (FCP) Internet (IP) protocols commands.
6950  *
6951  * Input:
6952  *	ha:	adapter state pointer.
6953  *	pkt:	pointer to fc_packet.
6954  *	sp:	SRB pointer.
6955  *
6956  * Returns:
6957  *	FC_SUCCESS - the packet was accepted for transport.
6958  *	FC_TRANSPORT_ERROR - a transport error occurred.
6959  *
6960  * Context:
6961  *	Kernel context.
6962  */
6963 static int
6964 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6965 {
6966 	port_id_t	d_id;
6967 	ql_tgt_t	*tq;
6968 
6969 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6970 
6971 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6972 	if (tq == NULL) {
6973 		d_id.r.rsvd_1 = 0;
6974 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6975 		tq = ql_d_id_to_queue(ha, d_id);
6976 	}
6977 
6978 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
6979 		/*
6980 		 * IP data is bound to pkt_cmd_dma
6981 		 */
6982 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
6983 		    0, 0, DDI_DMA_SYNC_FORDEV);
6984 
6985 		/* Setup IOCB count. */
6986 		sp->iocb = ha->ip_cmd;
6987 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
6988 			uint32_t	cnt;
6989 
6990 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
6991 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
6992 			if (cnt % ha->cmd_cont_segs) {
6993 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6994 			} else {
6995 				sp->req_cnt++;
6996 			}
6997 		} else {
6998 			sp->req_cnt = 1;
6999 		}
7000 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7001 
7002 		return (ql_start_cmd(ha, tq, pkt, sp));
7003 	} else {
7004 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7005 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7006 
7007 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7008 			ql_awaken_task_daemon(ha, sp, 0, 0);
7009 	}
7010 
7011 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7012 
7013 	return (FC_SUCCESS);
7014 }
7015 
7016 /*
7017  * ql_fc_services
7018  *	Process fibre channel services (name server).
7019  *
7020  * Input:
7021  *	ha:	adapter state pointer.
7022  *	pkt:	pointer to fc_packet.
7023  *
7024  * Returns:
7025  *	FC_SUCCESS - the packet was accepted for transport.
7026  *	FC_TRANSPORT_ERROR - a transport error occurred.
7027  *
7028  * Context:
7029  *	Kernel context.
7030  */
7031 static int
7032 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7033 {
7034 	uint32_t	cnt;
7035 	fc_ct_header_t	hdr;
7036 	la_els_rjt_t	rjt;
7037 	port_id_t	d_id;
7038 	ql_tgt_t	*tq;
7039 	ql_srb_t	*sp;
7040 	int		rval;
7041 
7042 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7043 
7044 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7045 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7046 
7047 	bzero(&rjt, sizeof (rjt));
7048 
7049 	/* Do some sanity checks */
7050 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7051 	    sizeof (fc_ct_header_t));
7052 	ASSERT(cnt <= (uint32_t)pkt->pkt_rsplen);
7053 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7054 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7055 		    pkt->pkt_rsplen);
7056 		return (FC_ELS_MALFORMED);
7057 	}
7058 
7059 	switch (hdr.ct_fcstype) {
7060 	case FCSTYPE_DIRECTORY:
7061 	case FCSTYPE_MGMTSERVICE:
7062 		/* An FCA must make sure that the header is in big endian */
7063 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7064 
7065 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7066 		tq = ql_d_id_to_queue(ha, d_id);
7067 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7068 		if (tq == NULL ||
7069 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7070 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7071 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7072 			rval = QL_SUCCESS;
7073 			break;
7074 		}
7075 
7076 		/*
7077 		 * Services data is bound to pkt_cmd_dma
7078 		 */
7079 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7080 		    DDI_DMA_SYNC_FORDEV);
7081 
7082 		sp->flags |= SRB_MS_PKT;
7083 		sp->retry_count = 32;
7084 
7085 		/* Setup IOCB count. */
7086 		sp->iocb = ha->ms_cmd;
7087 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7088 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7089 			sp->req_cnt =
7090 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7091 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7092 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7093 			} else {
7094 				sp->req_cnt++;
7095 			}
7096 		} else {
7097 			sp->req_cnt = 1;
7098 		}
7099 		rval = ql_start_cmd(ha, tq, pkt, sp);
7100 
7101 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7102 		    ha->instance, rval);
7103 
7104 		return (rval);
7105 
7106 	default:
7107 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7108 		rval = QL_FUNCTION_PARAMETER_ERROR;
7109 		break;
7110 	}
7111 
7112 	if (rval != QL_SUCCESS) {
7113 		/* Build RJT. */
7114 		rjt.ls_code.ls_code = LA_ELS_RJT;
7115 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7116 
7117 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7118 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7119 
7120 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7121 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7122 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7123 	}
7124 
7125 	/* Do command callback. */
7126 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7127 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7128 		    0, 0);
7129 	}
7130 
7131 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7132 
7133 	return (FC_SUCCESS);
7134 }
7135 
7136 /*
7137  * ql_cthdr_endian
7138  *	Change endianess of ct passthrough header and payload.
7139  *
7140  * Input:
7141  *	acc_handle:	DMA buffer access handle.
7142  *	ct_hdr:		Pointer to header.
7143  *	restore:	Restore first flag.
7144  *
7145  * Context:
7146  *	Interrupt or Kernel context, no mailbox commands allowed.
7147  */
7148 void
7149 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7150     boolean_t restore)
7151 {
7152 	uint8_t		i, *bp;
7153 	fc_ct_header_t	hdr;
7154 	uint32_t	*hdrp = (uint32_t *)&hdr;
7155 
7156 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7157 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7158 
7159 	if (restore) {
7160 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7161 			*hdrp = BE_32(*hdrp);
7162 			hdrp++;
7163 		}
7164 	}
7165 
7166 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7167 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7168 
7169 		switch (hdr.ct_cmdrsp) {
7170 		case NS_GA_NXT:
7171 		case NS_GPN_ID:
7172 		case NS_GNN_ID:
7173 		case NS_GCS_ID:
7174 		case NS_GFT_ID:
7175 		case NS_GSPN_ID:
7176 		case NS_GPT_ID:
7177 		case NS_GID_FT:
7178 		case NS_GID_PT:
7179 		case NS_RPN_ID:
7180 		case NS_RNN_ID:
7181 		case NS_RSPN_ID:
7182 		case NS_DA_ID:
7183 			BIG_ENDIAN_32(bp);
7184 			break;
7185 		case NS_RFT_ID:
7186 		case NS_RCS_ID:
7187 		case NS_RPT_ID:
7188 			BIG_ENDIAN_32(bp);
7189 			bp += 4;
7190 			BIG_ENDIAN_32(bp);
7191 			break;
7192 		case NS_GNN_IP:
7193 		case NS_GIPA_IP:
7194 			BIG_ENDIAN(bp, 16);
7195 			break;
7196 		case NS_RIP_NN:
7197 			bp += 8;
7198 			BIG_ENDIAN(bp, 16);
7199 			break;
7200 		case NS_RIPA_NN:
7201 			bp += 8;
7202 			BIG_ENDIAN_64(bp);
7203 			break;
7204 		default:
7205 			break;
7206 		}
7207 	}
7208 
7209 	if (restore == B_FALSE) {
7210 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7211 			*hdrp = BE_32(*hdrp);
7212 			hdrp++;
7213 		}
7214 	}
7215 
7216 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7217 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7218 }
7219 
7220 /*
7221  * ql_start_cmd
7222  *	Finishes starting fibre channel protocol (FCP) command.
7223  *
7224  * Input:
7225  *	ha:	adapter state pointer.
7226  *	tq:	target queue pointer.
7227  *	pkt:	pointer to fc_packet.
7228  *	sp:	SRB pointer.
7229  *
7230  * Context:
7231  *	Kernel context.
7232  */
7233 static int
7234 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7235     ql_srb_t *sp)
7236 {
7237 	int		rval = FC_SUCCESS;
7238 	time_t		poll_wait = 0;
7239 	ql_lun_t	*lq = sp->lun_queue;
7240 
7241 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7242 
7243 	sp->handle = 0;
7244 
7245 	/* Set poll for finish. */
7246 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7247 		sp->flags |= SRB_POLL;
7248 		if (pkt->pkt_timeout == 0) {
7249 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7250 		}
7251 	}
7252 
7253 	/* Acquire device queue lock. */
7254 	DEVICE_QUEUE_LOCK(tq);
7255 
7256 	/*
7257 	 * If we need authentication, report device busy to
7258 	 * upper layers to retry later
7259 	 */
7260 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7261 		DEVICE_QUEUE_UNLOCK(tq);
7262 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7263 		    tq->d_id.b24);
7264 		return (FC_DEVICE_BUSY);
7265 	}
7266 
7267 	/* Insert command onto watchdog queue. */
7268 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7269 		ql_timeout_insert(ha, tq, sp);
7270 	} else {
7271 		/*
7272 		 * Run dump requests in polled mode as kernel threads
7273 		 * and interrupts may have been disabled.
7274 		 */
7275 		sp->flags |= SRB_POLL;
7276 		sp->init_wdg_q_time = 0;
7277 		sp->isp_timeout = 0;
7278 	}
7279 
7280 	/* If a polling command setup wait time. */
7281 	if (sp->flags & SRB_POLL) {
7282 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7283 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7284 		} else {
7285 			poll_wait = pkt->pkt_timeout;
7286 		}
7287 		ASSERT(poll_wait != 0);
7288 	}
7289 
7290 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7291 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7292 		/* Set ending status. */
7293 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7294 
7295 		/* Call done routine to handle completions. */
7296 		sp->cmd.next = NULL;
7297 		DEVICE_QUEUE_UNLOCK(tq);
7298 		ql_done(&sp->cmd);
7299 	} else {
7300 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7301 			int do_lip = 0;
7302 
7303 			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7304 
7305 			DEVICE_QUEUE_UNLOCK(tq);
7306 
7307 			ADAPTER_STATE_LOCK(ha);
7308 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7309 				ha->pha->lip_on_panic++;
7310 			}
7311 			ADAPTER_STATE_UNLOCK(ha);
7312 
7313 			if (!do_lip) {
7314 
7315 				/*
7316 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7317 				 * is helpful here. If a PLOGI fails for some
7318 				 * reason, you would get CS_PORT_LOGGED_OUT
7319 				 * or some such error; and we should get a
7320 				 * careful polled mode login kicked off inside
7321 				 * of this driver itself. You don't have FC
7322 				 * transport's services as all threads are
7323 				 * suspended, interrupts disabled, and so
7324 				 * on. Right now we do re-login if the packet
7325 				 * state isn't FC_PKT_SUCCESS.
7326 				 */
7327 				(void) ql_abort_isp(ha);
7328 			}
7329 
7330 			ql_start_iocb(ha, sp);
7331 		} else {
7332 			/* Add the command to the device queue */
7333 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7334 				ql_add_link_t(&lq->cmd, &sp->cmd);
7335 			} else {
7336 				ql_add_link_b(&lq->cmd, &sp->cmd);
7337 			}
7338 
7339 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7340 
7341 			/* Check whether next message can be processed */
7342 			ql_next(ha, lq);
7343 		}
7344 	}
7345 
7346 	/* If polling, wait for finish. */
7347 	if (poll_wait) {
7348 		ASSERT(sp->flags & SRB_POLL);
7349 
7350 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7351 			int	res;
7352 
7353 			res = ql_abort((opaque_t)ha, pkt, 0);
7354 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7355 				ASSERT(res == FC_OFFLINE ||
7356 				    res == FC_ABORT_FAILED);
7357 
7358 				DEVICE_QUEUE_LOCK(tq);
7359 				ql_remove_link(&lq->cmd, &sp->cmd);
7360 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7361 				DEVICE_QUEUE_UNLOCK(tq);
7362 			}
7363 		}
7364 
7365 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7366 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7367 			rval = FC_TRANSPORT_ERROR;
7368 		}
7369 
7370 		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
7371 		    SRB_IN_TOKEN_ARRAY)) == 0);
7372 
7373 		if (ddi_in_panic()) {
7374 			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7375 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7376 				port_id_t d_id;
7377 
7378 				/*
7379 				 * successful LOGIN implies by design
7380 				 * that PRLI also succeeded for disks
7381 				 * Note also that there is no special
7382 				 * mailbox command to send PRLI.
7383 				 */
7384 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7385 				(void) ql_login_port(ha, d_id);
7386 			}
7387 		}
7388 
7389 		/*
7390 		 * This should only happen during CPR dumping
7391 		 */
7392 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7393 		    pkt->pkt_comp) {
7394 			ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
7395 			sp->flags &= ~SRB_POLL;
7396 			(*pkt->pkt_comp)(pkt);
7397 		}
7398 	}
7399 
7400 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7401 
7402 	return (rval);
7403 }
7404 
7405 /*
7406  * ql_poll_cmd
7407  *	Polls commands for completion.
7408  *
7409  * Input:
7410  *	ha = adapter state pointer.
7411  *	sp = SRB command pointer.
7412  *	poll_wait = poll wait time in seconds.
7413  *
7414  * Returns:
7415  *	QL local function return status code.
7416  *
7417  * Context:
7418  *	Kernel context.
7419  */
7420 static int
7421 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7422 {
7423 	int			rval = QL_SUCCESS;
7424 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7425 	ql_adapter_state_t	*ha = vha->pha;
7426 
7427 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7428 
7429 	while (sp->flags & SRB_POLL) {
7430 
7431 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7432 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7433 
7434 			/* If waiting for restart, do it now. */
7435 			if (ha->port_retry_timer != 0) {
7436 				ADAPTER_STATE_LOCK(ha);
7437 				ha->port_retry_timer = 0;
7438 				ADAPTER_STATE_UNLOCK(ha);
7439 
7440 				TASK_DAEMON_LOCK(ha);
7441 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7442 				TASK_DAEMON_UNLOCK(ha);
7443 			}
7444 
7445 			if ((CFG_IST(ha, CFG_CTRL_242581) ?
7446 			    RD32_IO_REG(ha, istatus) :
7447 			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7448 				(void) ql_isr((caddr_t)ha);
7449 				INTR_LOCK(ha);
7450 				ha->intr_claimed = TRUE;
7451 				INTR_UNLOCK(ha);
7452 			}
7453 
7454 			/*
7455 			 * Call task thread function in case the
7456 			 * daemon is not running.
7457 			 */
7458 			TASK_DAEMON_LOCK(ha);
7459 
7460 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7461 			    QL_TASK_PENDING(ha)) {
7462 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7463 				ql_task_thread(ha);
7464 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7465 			}
7466 
7467 			TASK_DAEMON_UNLOCK(ha);
7468 		}
7469 
7470 		if (msecs_left < 10) {
7471 			rval = QL_FUNCTION_TIMEOUT;
7472 			break;
7473 		}
7474 
7475 		/*
7476 		 * Polling interval is 10 milli seconds; Increasing
7477 		 * the polling interval to seconds since disk IO
7478 		 * timeout values are ~60 seconds is tempting enough,
7479 		 * but CPR dump time increases, and so will the crash
7480 		 * dump time; Don't toy with the settings without due
7481 		 * consideration for all the scenarios that will be
7482 		 * impacted.
7483 		 */
7484 		ql_delay(ha, 10000);
7485 		msecs_left -= 10;
7486 	}
7487 
7488 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7489 
7490 	return (rval);
7491 }
7492 
7493 /*
7494  * ql_next
7495  *	Retrieve and process next job in the device queue.
7496  *
7497  * Input:
7498  *	ha:	adapter state pointer.
7499  *	lq:	LUN queue pointer.
7500  *	DEVICE_QUEUE_LOCK must be already obtained.
7501  *
7502  * Output:
7503  *	Releases DEVICE_QUEUE_LOCK upon exit.
7504  *
7505  * Context:
7506  *	Interrupt or Kernel context, no mailbox commands allowed.
7507  */
7508 void
7509 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7510 {
7511 	ql_srb_t		*sp;
7512 	ql_link_t		*link;
7513 	ql_tgt_t		*tq = lq->target_queue;
7514 	ql_adapter_state_t	*ha = vha->pha;
7515 
7516 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7517 
7518 	if (ddi_in_panic()) {
7519 		DEVICE_QUEUE_UNLOCK(tq);
7520 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7521 		    ha->instance);
7522 		return;
7523 	}
7524 
7525 	while ((link = lq->cmd.first) != NULL) {
7526 		sp = link->base_address;
7527 
7528 		/* Exit if can not start commands. */
7529 		if (DRIVER_SUSPENDED(ha) ||
7530 		    (ha->flags & ONLINE) == 0 ||
7531 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7532 		    sp->flags & SRB_ABORT ||
7533 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7534 		    TQF_QUEUE_SUSPENDED)) {
7535 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7536 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7537 			    ha->task_daemon_flags, tq->flags, sp->flags,
7538 			    ha->flags, tq->loop_id);
7539 			break;
7540 		}
7541 
7542 		/*
7543 		 * Find out the LUN number for untagged command use.
7544 		 * If there is an untagged command pending for the LUN,
7545 		 * we would not submit another untagged command
7546 		 * or if reached LUN execution throttle.
7547 		 */
7548 		if (sp->flags & SRB_FCP_CMD_PKT) {
7549 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7550 			    lq->lun_outcnt >= ha->execution_throttle) {
7551 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7552 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7553 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7554 				break;
7555 			}
7556 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7557 			    FCP_QTYPE_UNTAGGED) {
7558 				/*
7559 				 * Set the untagged-flag for the LUN
7560 				 * so that no more untagged commands
7561 				 * can be submitted for this LUN.
7562 				 */
7563 				lq->flags |= LQF_UNTAGGED_PENDING;
7564 			}
7565 
7566 			/* Count command as sent. */
7567 			lq->lun_outcnt++;
7568 		}
7569 
7570 		/* Remove srb from device queue. */
7571 		ql_remove_link(&lq->cmd, &sp->cmd);
7572 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7573 
7574 		tq->outcnt++;
7575 
7576 		ql_start_iocb(vha, sp);
7577 	}
7578 
7579 	/* Release device queue lock. */
7580 	DEVICE_QUEUE_UNLOCK(tq);
7581 
7582 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7583 }
7584 
7585 /*
7586  * ql_done
7587  *	Process completed commands.
7588  *
7589  * Input:
7590  *	link:	first command link in chain.
7591  *
7592  * Context:
7593  *	Interrupt or Kernel context, no mailbox commands allowed.
7594  */
7595 void
7596 ql_done(ql_link_t *link)
7597 {
7598 	ql_adapter_state_t	*ha;
7599 	ql_link_t		*next_link;
7600 	ql_srb_t		*sp;
7601 	ql_tgt_t		*tq;
7602 	ql_lun_t		*lq;
7603 
7604 	QL_PRINT_3(CE_CONT, "started\n");
7605 
7606 	for (; link != NULL; link = next_link) {
7607 		next_link = link->next;
7608 		sp = link->base_address;
7609 		ha = sp->ha;
7610 
7611 		if (sp->flags & SRB_UB_CALLBACK) {
7612 			QL_UB_LOCK(ha);
7613 			if (sp->flags & SRB_UB_IN_ISP) {
7614 				if (ha->ub_outcnt != 0) {
7615 					ha->ub_outcnt--;
7616 				}
7617 				QL_UB_UNLOCK(ha);
7618 				ql_isp_rcvbuf(ha);
7619 				QL_UB_LOCK(ha);
7620 			}
7621 			QL_UB_UNLOCK(ha);
7622 			ql_awaken_task_daemon(ha, sp, 0, 0);
7623 		} else {
7624 			/* Free outstanding command slot. */
7625 			if (sp->handle != 0) {
7626 				ha->outstanding_cmds[
7627 				    sp->handle & OSC_INDEX_MASK] = NULL;
7628 				sp->handle = 0;
7629 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7630 			}
7631 
7632 			/* Acquire device queue lock. */
7633 			lq = sp->lun_queue;
7634 			tq = lq->target_queue;
7635 			DEVICE_QUEUE_LOCK(tq);
7636 
7637 			/* Decrement outstanding commands on device. */
7638 			if (tq->outcnt != 0) {
7639 				tq->outcnt--;
7640 			}
7641 
7642 			if (sp->flags & SRB_FCP_CMD_PKT) {
7643 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7644 				    FCP_QTYPE_UNTAGGED) {
7645 					/*
7646 					 * Clear the flag for this LUN so that
7647 					 * untagged commands can be submitted
7648 					 * for it.
7649 					 */
7650 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7651 				}
7652 
7653 				if (lq->lun_outcnt != 0) {
7654 					lq->lun_outcnt--;
7655 				}
7656 			}
7657 
7658 			/* Reset port down retry count on good completion. */
7659 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7660 				tq->port_down_retry_count =
7661 				    ha->port_down_retry_count;
7662 				tq->qfull_retry_count = ha->qfull_retry_count;
7663 			}
7664 
7665 			/* Place request back on top of target command queue */
7666 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7667 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7668 			    sp->flags & SRB_RETRY &&
7669 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7670 			    sp->wdg_q_time > 1)) {
7671 				sp->flags &= ~(SRB_ISP_STARTED |
7672 				    SRB_ISP_COMPLETED | SRB_RETRY);
7673 
7674 				/* Reset watchdog timer */
7675 				sp->wdg_q_time = sp->init_wdg_q_time;
7676 
7677 				/* Issue marker command on reset status. */
7678 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7679 				    (sp->pkt->pkt_reason == CS_RESET ||
7680 				    (CFG_IST(ha, CFG_CTRL_242581) &&
7681 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7682 					(void) ql_marker(ha, tq->loop_id, 0,
7683 					    MK_SYNC_ID);
7684 				}
7685 
7686 				ql_add_link_t(&lq->cmd, &sp->cmd);
7687 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7688 				ql_next(ha, lq);
7689 			} else {
7690 				/* Remove command from watchdog queue. */
7691 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7692 					ql_remove_link(&tq->wdg, &sp->wdg);
7693 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7694 				}
7695 
7696 				if (lq->cmd.first != NULL) {
7697 					ql_next(ha, lq);
7698 				} else {
7699 					/* Release LU queue specific lock. */
7700 					DEVICE_QUEUE_UNLOCK(tq);
7701 					if (ha->pha->pending_cmds.first !=
7702 					    NULL) {
7703 						ql_start_iocb(ha, NULL);
7704 					}
7705 				}
7706 
7707 				/* Sync buffers if required.  */
7708 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7709 					(void) ddi_dma_sync(
7710 					    sp->pkt->pkt_resp_dma,
7711 					    0, 0, DDI_DMA_SYNC_FORCPU);
7712 				}
7713 
7714 				/* Map ISP completion codes. */
7715 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7716 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7717 				switch (sp->pkt->pkt_reason) {
7718 				case CS_COMPLETE:
7719 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7720 					break;
7721 				case CS_RESET:
7722 					/* Issue marker command. */
7723 					if (!(ha->task_daemon_flags &
7724 					    LOOP_DOWN)) {
7725 						(void) ql_marker(ha,
7726 						    tq->loop_id, 0,
7727 						    MK_SYNC_ID);
7728 					}
7729 					sp->pkt->pkt_state =
7730 					    FC_PKT_PORT_OFFLINE;
7731 					sp->pkt->pkt_reason =
7732 					    FC_REASON_ABORTED;
7733 					break;
7734 				case CS_RESOUCE_UNAVAILABLE:
7735 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7736 					sp->pkt->pkt_reason =
7737 					    FC_REASON_PKT_BUSY;
7738 					break;
7739 
7740 				case CS_TIMEOUT:
7741 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7742 					sp->pkt->pkt_reason =
7743 					    FC_REASON_HW_ERROR;
7744 					break;
7745 				case CS_DATA_OVERRUN:
7746 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7747 					sp->pkt->pkt_reason =
7748 					    FC_REASON_OVERRUN;
7749 					break;
7750 				case CS_PORT_UNAVAILABLE:
7751 				case CS_PORT_LOGGED_OUT:
7752 					sp->pkt->pkt_state =
7753 					    FC_PKT_PORT_OFFLINE;
7754 					sp->pkt->pkt_reason =
7755 					    FC_REASON_LOGIN_REQUIRED;
7756 					ql_send_logo(ha, tq, NULL);
7757 					break;
7758 				case CS_PORT_CONFIG_CHG:
7759 					sp->pkt->pkt_state =
7760 					    FC_PKT_PORT_OFFLINE;
7761 					sp->pkt->pkt_reason =
7762 					    FC_REASON_OFFLINE;
7763 					break;
7764 				case CS_QUEUE_FULL:
7765 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7766 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7767 					break;
7768 
7769 				case CS_ABORTED:
7770 					DEVICE_QUEUE_LOCK(tq);
7771 					if (tq->flags & (TQF_RSCN_RCVD |
7772 					    TQF_NEED_AUTHENTICATION)) {
7773 						sp->pkt->pkt_state =
7774 						    FC_PKT_PORT_OFFLINE;
7775 						sp->pkt->pkt_reason =
7776 						    FC_REASON_LOGIN_REQUIRED;
7777 					} else {
7778 						sp->pkt->pkt_state =
7779 						    FC_PKT_LOCAL_RJT;
7780 						sp->pkt->pkt_reason =
7781 						    FC_REASON_ABORTED;
7782 					}
7783 					DEVICE_QUEUE_UNLOCK(tq);
7784 					break;
7785 
7786 				case CS_TRANSPORT:
7787 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7788 					sp->pkt->pkt_reason =
7789 					    FC_PKT_TRAN_ERROR;
7790 					break;
7791 
7792 				case CS_DATA_UNDERRUN:
7793 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7794 					sp->pkt->pkt_reason =
7795 					    FC_REASON_UNDERRUN;
7796 					break;
7797 				case CS_DMA_ERROR:
7798 				case CS_BAD_PAYLOAD:
7799 				case CS_UNKNOWN:
7800 				case CS_CMD_FAILED:
7801 				default:
7802 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7803 					sp->pkt->pkt_reason =
7804 					    FC_REASON_HW_ERROR;
7805 					break;
7806 				}
7807 
7808 				/* Now call the pkt completion callback */
7809 				if (sp->flags & SRB_POLL) {
7810 					sp->flags &= ~SRB_POLL;
7811 				} else if (sp->pkt->pkt_comp) {
7812 					if (sp->pkt->pkt_tran_flags &
7813 					    FC_TRAN_IMMEDIATE_CB) {
7814 						(*sp->pkt->pkt_comp)(sp->pkt);
7815 					} else {
7816 						ql_awaken_task_daemon(ha, sp,
7817 						    0, 0);
7818 					}
7819 				}
7820 			}
7821 		}
7822 	}
7823 
7824 	QL_PRINT_3(CE_CONT, "done\n");
7825 }
7826 
7827 /*
7828  * ql_awaken_task_daemon
7829  *	Adds command completion callback to callback queue and/or
7830  *	awakens task daemon thread.
7831  *
7832  * Input:
7833  *	ha:		adapter state pointer.
7834  *	sp:		srb pointer.
7835  *	set_flags:	task daemon flags to set.
7836  *	reset_flags:	task daemon flags to reset.
7837  *
7838  * Context:
7839  *	Interrupt or Kernel context, no mailbox commands allowed.
7840  */
7841 void
7842 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7843     uint32_t set_flags, uint32_t reset_flags)
7844 {
7845 	ql_adapter_state_t	*ha = vha->pha;
7846 
7847 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7848 
7849 	/* Acquire task daemon lock. */
7850 	TASK_DAEMON_LOCK(ha);
7851 
7852 	if (set_flags & ISP_ABORT_NEEDED) {
7853 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7854 			set_flags &= ~ISP_ABORT_NEEDED;
7855 		}
7856 	}
7857 
7858 	ha->task_daemon_flags |= set_flags;
7859 	ha->task_daemon_flags &= ~reset_flags;
7860 
7861 	if (QL_DAEMON_SUSPENDED(ha)) {
7862 		if (sp != NULL) {
7863 			TASK_DAEMON_UNLOCK(ha);
7864 
7865 			/* Do callback. */
7866 			if (sp->flags & SRB_UB_CALLBACK) {
7867 				ql_unsol_callback(sp);
7868 			} else {
7869 				(*sp->pkt->pkt_comp)(sp->pkt);
7870 			}
7871 		} else {
7872 			if (!(curthread->t_flag & T_INTR_THREAD) &&
7873 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7874 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7875 				ql_task_thread(ha);
7876 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7877 			}
7878 
7879 			TASK_DAEMON_UNLOCK(ha);
7880 		}
7881 	} else {
7882 		if (sp != NULL) {
7883 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7884 		}
7885 
7886 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7887 			cv_broadcast(&ha->cv_task_daemon);
7888 		}
7889 		TASK_DAEMON_UNLOCK(ha);
7890 	}
7891 
7892 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7893 }
7894 
7895 /*
7896  * ql_task_daemon
7897  *	Thread that is awaken by the driver when a
7898  *	background needs to be done.
7899  *
7900  * Input:
7901  *	arg = adapter state pointer.
7902  *
7903  * Context:
7904  *	Kernel context.
7905  */
7906 static void
7907 ql_task_daemon(void *arg)
7908 {
7909 	ql_adapter_state_t	*ha = (void *)arg;
7910 
7911 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7912 
7913 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7914 	    "ql_task_daemon");
7915 
7916 	/* Acquire task daemon lock. */
7917 	TASK_DAEMON_LOCK(ha);
7918 
7919 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7920 
7921 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7922 		ql_task_thread(ha);
7923 
7924 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7925 
7926 		/*
7927 		 * Before we wait on the conditional variable, we
7928 		 * need to check if STOP_FLG is set for us to terminate
7929 		 */
7930 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7931 			break;
7932 		}
7933 
7934 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7935 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7936 
7937 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7938 
7939 		/* If killed, stop task daemon */
7940 		if (cv_wait_sig(&ha->cv_task_daemon,
7941 		    &ha->task_daemon_mutex) == 0) {
7942 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7943 		}
7944 
7945 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7946 
7947 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7948 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7949 
7950 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7951 	}
7952 
7953 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7954 	    TASK_DAEMON_ALIVE_FLG);
7955 
7956 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7957 	CALLB_CPR_EXIT(&ha->cprinfo);
7958 
7959 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7960 
7961 	thread_exit();
7962 }
7963 
7964 /*
7965  * ql_task_thread
7966  *	Thread run by daemon.
7967  *
7968  * Input:
7969  *	ha = adapter state pointer.
7970  *	TASK_DAEMON_LOCK must be acquired prior to call.
7971  *
7972  * Context:
7973  *	Kernel context.
7974  */
7975 static void
7976 ql_task_thread(ql_adapter_state_t *ha)
7977 {
7978 	int			loop_again, rval;
7979 	ql_srb_t		*sp;
7980 	ql_head_t		*head;
7981 	ql_link_t		*link;
7982 	caddr_t			msg;
7983 	ql_adapter_state_t	*vha;
7984 
7985 	do {
7986 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
7987 		    ha->instance, ha->task_daemon_flags);
7988 
7989 		loop_again = FALSE;
7990 
7991 		QL_PM_LOCK(ha);
7992 		if (ha->power_level != PM_LEVEL_D0) {
7993 			QL_PM_UNLOCK(ha);
7994 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
7995 			break;
7996 		}
7997 		QL_PM_UNLOCK(ha);
7998 
7999 		/* IDC acknowledge needed. */
8000 		if (ha->task_daemon_flags & IDC_ACK_NEEDED) {
8001 			ha->task_daemon_flags &= ~IDC_ACK_NEEDED;
8002 			ADAPTER_STATE_LOCK(ha);
8003 			switch (ha->idc_mb[2]) {
8004 			case IDC_OPC_DRV_START:
8005 				if (ha->idc_restart_mpi != 0) {
8006 					ha->idc_restart_mpi--;
8007 					if (ha->idc_restart_mpi == 0) {
8008 						ha->restart_mpi_timer = 0;
8009 						ha->task_daemon_flags &=
8010 						    ~TASK_DAEMON_STALLED_FLG;
8011 					}
8012 				}
8013 				if (ha->idc_flash_acc != 0) {
8014 					ha->idc_flash_acc--;
8015 					if (ha->idc_flash_acc == 0) {
8016 						ha->flash_acc_timer = 0;
8017 						GLOBAL_HW_LOCK();
8018 					}
8019 				}
8020 				break;
8021 			case IDC_OPC_FLASH_ACC:
8022 				ha->flash_acc_timer = 30;
8023 				if (ha->idc_flash_acc == 0) {
8024 					GLOBAL_HW_UNLOCK();
8025 				}
8026 				ha->idc_flash_acc++;
8027 				break;
8028 			case IDC_OPC_RESTART_MPI:
8029 				ha->restart_mpi_timer = 30;
8030 				ha->idc_restart_mpi++;
8031 				ha->task_daemon_flags |=
8032 				    TASK_DAEMON_STALLED_FLG;
8033 				break;
8034 			default:
8035 				EL(ha, "Unknown IDC opcode=%xh\n",
8036 				    ha->idc_mb[2]);
8037 				break;
8038 			}
8039 			ADAPTER_STATE_UNLOCK(ha);
8040 
8041 			if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
8042 				TASK_DAEMON_UNLOCK(ha);
8043 				rval = ql_idc_ack(ha);
8044 				if (rval != QL_SUCCESS) {
8045 					EL(ha, "idc_ack status=%xh\n", rval);
8046 				}
8047 				TASK_DAEMON_LOCK(ha);
8048 				loop_again = TRUE;
8049 			}
8050 		}
8051 
8052 		if (ha->flags & ADAPTER_SUSPENDED ||
8053 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
8054 		    DRIVER_STALL) ||
8055 		    (ha->flags & ONLINE) == 0) {
8056 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8057 			break;
8058 		}
8059 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8060 
8061 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8062 			TASK_DAEMON_UNLOCK(ha);
8063 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8064 			TASK_DAEMON_LOCK(ha);
8065 			loop_again = TRUE;
8066 		}
8067 
8068 		/* Idle Check. */
8069 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8070 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8071 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8072 				TASK_DAEMON_UNLOCK(ha);
8073 				ql_idle_check(ha);
8074 				TASK_DAEMON_LOCK(ha);
8075 				loop_again = TRUE;
8076 			}
8077 		}
8078 
8079 		/* Crystal+ port#0 bypass transition */
8080 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8081 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8082 			TASK_DAEMON_UNLOCK(ha);
8083 			(void) ql_initiate_lip(ha);
8084 			TASK_DAEMON_LOCK(ha);
8085 			loop_again = TRUE;
8086 		}
8087 
8088 		/* Abort queues needed. */
8089 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8090 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8091 			TASK_DAEMON_UNLOCK(ha);
8092 			ql_abort_queues(ha);
8093 			TASK_DAEMON_LOCK(ha);
8094 		}
8095 
8096 		/* Not suspended, awaken waiting routines. */
8097 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8098 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8099 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8100 			cv_broadcast(&ha->cv_dr_suspended);
8101 			loop_again = TRUE;
8102 		}
8103 
8104 		/* Handle RSCN changes. */
8105 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8106 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8107 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8108 				TASK_DAEMON_UNLOCK(ha);
8109 				(void) ql_handle_rscn_update(vha);
8110 				TASK_DAEMON_LOCK(ha);
8111 				loop_again = TRUE;
8112 			}
8113 		}
8114 
8115 		/* Handle state changes. */
8116 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8117 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8118 			    !(ha->task_daemon_flags &
8119 			    TASK_DAEMON_POWERING_DOWN)) {
8120 				/* Report state change. */
8121 				EL(vha, "state change = %xh\n", vha->state);
8122 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8123 
8124 				if (vha->task_daemon_flags &
8125 				    COMMAND_WAIT_NEEDED) {
8126 					vha->task_daemon_flags &=
8127 					    ~COMMAND_WAIT_NEEDED;
8128 					if (!(ha->task_daemon_flags &
8129 					    COMMAND_WAIT_ACTIVE)) {
8130 						ha->task_daemon_flags |=
8131 						    COMMAND_WAIT_ACTIVE;
8132 						TASK_DAEMON_UNLOCK(ha);
8133 						ql_cmd_wait(ha);
8134 						TASK_DAEMON_LOCK(ha);
8135 						ha->task_daemon_flags &=
8136 						    ~COMMAND_WAIT_ACTIVE;
8137 					}
8138 				}
8139 
8140 				msg = NULL;
8141 				if (FC_PORT_STATE_MASK(vha->state) ==
8142 				    FC_STATE_OFFLINE) {
8143 					if (vha->task_daemon_flags &
8144 					    STATE_ONLINE) {
8145 						if (ha->topology &
8146 						    QL_LOOP_CONNECTION) {
8147 							msg = "Loop OFFLINE";
8148 						} else {
8149 							msg = "Link OFFLINE";
8150 						}
8151 					}
8152 					vha->task_daemon_flags &=
8153 					    ~STATE_ONLINE;
8154 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8155 				    FC_STATE_LOOP) {
8156 					if (!(vha->task_daemon_flags &
8157 					    STATE_ONLINE)) {
8158 						msg = "Loop ONLINE";
8159 					}
8160 					vha->task_daemon_flags |= STATE_ONLINE;
8161 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8162 				    FC_STATE_ONLINE) {
8163 					if (!(vha->task_daemon_flags &
8164 					    STATE_ONLINE)) {
8165 						msg = "Link ONLINE";
8166 					}
8167 					vha->task_daemon_flags |= STATE_ONLINE;
8168 				} else {
8169 					msg = "Unknown Link state";
8170 				}
8171 
8172 				if (msg != NULL) {
8173 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8174 					    "%s", QL_NAME, ha->instance,
8175 					    vha->vp_index, msg);
8176 				}
8177 
8178 				if (vha->flags & FCA_BOUND) {
8179 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8180 					    "cb state=%xh\n", ha->instance,
8181 					    vha->vp_index, vha->state);
8182 					TASK_DAEMON_UNLOCK(ha);
8183 					(vha->bind_info.port_statec_cb)
8184 					    (vha->bind_info.port_handle,
8185 					    vha->state);
8186 					TASK_DAEMON_LOCK(ha);
8187 				}
8188 				loop_again = TRUE;
8189 			}
8190 		}
8191 
8192 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8193 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8194 			EL(ha, "processing LIP reset\n");
8195 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8196 			TASK_DAEMON_UNLOCK(ha);
8197 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8198 				if (vha->flags & FCA_BOUND) {
8199 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8200 					    "cb reset\n", ha->instance,
8201 					    vha->vp_index);
8202 					(vha->bind_info.port_statec_cb)
8203 					    (vha->bind_info.port_handle,
8204 					    FC_STATE_TARGET_PORT_RESET);
8205 				}
8206 			}
8207 			TASK_DAEMON_LOCK(ha);
8208 			loop_again = TRUE;
8209 		}
8210 
8211 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8212 		    FIRMWARE_UP)) {
8213 			/*
8214 			 * The firmware needs more unsolicited
8215 			 * buffers. We cannot allocate any new
8216 			 * buffers unless the ULP module requests
8217 			 * for new buffers. All we can do here is
8218 			 * to give received buffers from the pool
8219 			 * that is already allocated
8220 			 */
8221 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8222 			TASK_DAEMON_UNLOCK(ha);
8223 			ql_isp_rcvbuf(ha);
8224 			TASK_DAEMON_LOCK(ha);
8225 			loop_again = TRUE;
8226 		}
8227 
8228 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8229 			TASK_DAEMON_UNLOCK(ha);
8230 			(void) ql_abort_isp(ha);
8231 			TASK_DAEMON_LOCK(ha);
8232 			loop_again = TRUE;
8233 		}
8234 
8235 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8236 		    COMMAND_WAIT_NEEDED))) {
8237 			if (QL_IS_SET(ha->task_daemon_flags,
8238 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8239 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8240 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8241 					ha->task_daemon_flags |= RESET_ACTIVE;
8242 					TASK_DAEMON_UNLOCK(ha);
8243 					for (vha = ha; vha != NULL;
8244 					    vha = vha->vp_next) {
8245 						ql_rst_aen(vha);
8246 					}
8247 					TASK_DAEMON_LOCK(ha);
8248 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8249 					loop_again = TRUE;
8250 				}
8251 			}
8252 
8253 			if (QL_IS_SET(ha->task_daemon_flags,
8254 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8255 				if (!(ha->task_daemon_flags &
8256 				    LOOP_RESYNC_ACTIVE)) {
8257 					ha->task_daemon_flags |=
8258 					    LOOP_RESYNC_ACTIVE;
8259 					TASK_DAEMON_UNLOCK(ha);
8260 					(void) ql_loop_resync(ha);
8261 					TASK_DAEMON_LOCK(ha);
8262 					loop_again = TRUE;
8263 				}
8264 			}
8265 		}
8266 
8267 		/* Port retry needed. */
8268 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8269 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8270 			ADAPTER_STATE_LOCK(ha);
8271 			ha->port_retry_timer = 0;
8272 			ADAPTER_STATE_UNLOCK(ha);
8273 
8274 			TASK_DAEMON_UNLOCK(ha);
8275 			ql_restart_queues(ha);
8276 			TASK_DAEMON_LOCK(ha);
8277 			loop_again = B_TRUE;
8278 		}
8279 
8280 		/* iiDMA setting needed? */
8281 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8282 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8283 
8284 			TASK_DAEMON_UNLOCK(ha);
8285 			ql_iidma(ha);
8286 			TASK_DAEMON_LOCK(ha);
8287 			loop_again = B_TRUE;
8288 		}
8289 
8290 		if (ha->task_daemon_flags & SEND_PLOGI) {
8291 			ha->task_daemon_flags &= ~SEND_PLOGI;
8292 			TASK_DAEMON_UNLOCK(ha);
8293 			ql_n_port_plogi(ha);
8294 			TASK_DAEMON_LOCK(ha);
8295 		}
8296 
8297 		head = &ha->callback_queue;
8298 		if (head->first != NULL) {
8299 			sp = head->first->base_address;
8300 			link = &sp->cmd;
8301 
8302 			/* Dequeue command. */
8303 			ql_remove_link(head, link);
8304 
8305 			/* Release task daemon lock. */
8306 			TASK_DAEMON_UNLOCK(ha);
8307 
8308 			ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
8309 			    SRB_IN_TOKEN_ARRAY)) == 0);
8310 
8311 			/* Do callback. */
8312 			if (sp->flags & SRB_UB_CALLBACK) {
8313 				ql_unsol_callback(sp);
8314 			} else {
8315 				(*sp->pkt->pkt_comp)(sp->pkt);
8316 			}
8317 
8318 			/* Acquire task daemon lock. */
8319 			TASK_DAEMON_LOCK(ha);
8320 
8321 			loop_again = TRUE;
8322 		}
8323 
8324 	} while (loop_again);
8325 }
8326 
8327 /*
8328  * ql_idle_check
8329  *	Test for adapter is alive and well.
8330  *
8331  * Input:
8332  *	ha:	adapter state pointer.
8333  *
8334  * Context:
8335  *	Kernel context.
8336  */
8337 static void
8338 ql_idle_check(ql_adapter_state_t *ha)
8339 {
8340 	ddi_devstate_t	state;
8341 	int		rval;
8342 	ql_mbx_data_t	mr;
8343 
8344 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8345 
8346 	/* Firmware Ready Test. */
8347 	rval = ql_get_firmware_state(ha, &mr);
8348 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8349 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8350 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8351 		state = ddi_get_devstate(ha->dip);
8352 		if (state == DDI_DEVSTATE_UP) {
8353 			/*EMPTY*/
8354 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8355 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8356 		}
8357 		TASK_DAEMON_LOCK(ha);
8358 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8359 			EL(ha, "fstate_ready, isp_abort_needed\n");
8360 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8361 		}
8362 		TASK_DAEMON_UNLOCK(ha);
8363 	}
8364 
8365 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8366 }
8367 
8368 /*
8369  * ql_unsol_callback
8370  *	Handle unsolicited buffer callbacks.
8371  *
8372  * Input:
8373  *	ha = adapter state pointer.
8374  *	sp = srb pointer.
8375  *
8376  * Context:
8377  *	Kernel context.
8378  */
8379 static void
8380 ql_unsol_callback(ql_srb_t *sp)
8381 {
8382 	fc_affected_id_t	*af;
8383 	fc_unsol_buf_t		*ubp;
8384 	uchar_t			r_ctl;
8385 	uchar_t			ls_code;
8386 	ql_tgt_t		*tq;
8387 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8388 
8389 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8390 
8391 	ubp = ha->ub_array[sp->handle];
8392 	r_ctl = ubp->ub_frame.r_ctl;
8393 	ls_code = ubp->ub_buffer[0];
8394 
8395 	if (sp->lun_queue == NULL) {
8396 		tq = NULL;
8397 	} else {
8398 		tq = sp->lun_queue->target_queue;
8399 	}
8400 
8401 	QL_UB_LOCK(ha);
8402 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8403 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8404 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8405 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8406 		sp->flags |= SRB_UB_IN_FCA;
8407 		QL_UB_UNLOCK(ha);
8408 		return;
8409 	}
8410 
8411 	/* Process RSCN */
8412 	if (sp->flags & SRB_UB_RSCN) {
8413 		int sendup = 1;
8414 
8415 		/*
8416 		 * Defer RSCN posting until commands return
8417 		 */
8418 		QL_UB_UNLOCK(ha);
8419 
8420 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8421 
8422 		/* Abort outstanding commands */
8423 		sendup = ql_process_rscn(ha, af);
8424 		if (sendup == 0) {
8425 
8426 			TASK_DAEMON_LOCK(ha);
8427 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8428 			TASK_DAEMON_UNLOCK(ha);
8429 
8430 			/*
8431 			 * Wait for commands to drain in F/W (doesn't take
8432 			 * more than a few milliseconds)
8433 			 */
8434 			ql_delay(ha, 10000);
8435 
8436 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8437 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8438 			    af->aff_format, af->aff_d_id);
8439 			return;
8440 		}
8441 
8442 		QL_UB_LOCK(ha);
8443 
8444 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8445 		    af->aff_format, af->aff_d_id);
8446 	}
8447 
8448 	/* Process UNSOL LOGO */
8449 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8450 		QL_UB_UNLOCK(ha);
8451 
8452 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8453 			TASK_DAEMON_LOCK(ha);
8454 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8455 			TASK_DAEMON_UNLOCK(ha);
8456 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8457 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8458 			return;
8459 		}
8460 
8461 		QL_UB_LOCK(ha);
8462 		EL(ha, "sending unsol logout for %xh to transport\n",
8463 		    ubp->ub_frame.s_id);
8464 	}
8465 
8466 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8467 	    SRB_UB_FCP);
8468 
8469 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8470 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8471 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8472 	}
8473 	QL_UB_UNLOCK(ha);
8474 
8475 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8476 	    ubp, sp->ub_type);
8477 
8478 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8479 }
8480 
8481 /*
8482  * ql_send_logo
8483  *
8484  * Input:
8485  *	ha:	adapter state pointer.
8486  *	tq:	target queue pointer.
8487  *	done_q:	done queue pointer.
8488  *
8489  * Context:
8490  *	Interrupt or Kernel context, no mailbox commands allowed.
8491  */
8492 void
8493 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8494 {
8495 	fc_unsol_buf_t		*ubp;
8496 	ql_srb_t		*sp;
8497 	la_els_logo_t		*payload;
8498 	ql_adapter_state_t	*ha = vha->pha;
8499 
8500 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8501 	    tq->d_id.b24);
8502 
8503 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8504 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8505 		return;
8506 	}
8507 
8508 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8509 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8510 
8511 		/* Locate a buffer to use. */
8512 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8513 		if (ubp == NULL) {
8514 			EL(vha, "Failed, get_unsolicited_buffer\n");
8515 			return;
8516 		}
8517 
8518 		DEVICE_QUEUE_LOCK(tq);
8519 		tq->flags |= TQF_NEED_AUTHENTICATION;
8520 		tq->logout_sent++;
8521 		DEVICE_QUEUE_UNLOCK(tq);
8522 
8523 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8524 
8525 		sp = ubp->ub_fca_private;
8526 
8527 		/* Set header. */
8528 		ubp->ub_frame.d_id = vha->d_id.b24;
8529 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8530 		ubp->ub_frame.s_id = tq->d_id.b24;
8531 		ubp->ub_frame.rsvd = 0;
8532 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8533 		    F_CTL_SEQ_INITIATIVE;
8534 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8535 		ubp->ub_frame.seq_cnt = 0;
8536 		ubp->ub_frame.df_ctl = 0;
8537 		ubp->ub_frame.seq_id = 0;
8538 		ubp->ub_frame.rx_id = 0xffff;
8539 		ubp->ub_frame.ox_id = 0xffff;
8540 
8541 		/* set payload. */
8542 		payload = (la_els_logo_t *)ubp->ub_buffer;
8543 		bzero(payload, sizeof (la_els_logo_t));
8544 		/* Make sure ls_code in payload is always big endian */
8545 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8546 		ubp->ub_buffer[1] = 0;
8547 		ubp->ub_buffer[2] = 0;
8548 		ubp->ub_buffer[3] = 0;
8549 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8550 		    &payload->nport_ww_name.raw_wwn[0], 8);
8551 		payload->nport_id.port_id = tq->d_id.b24;
8552 
8553 		QL_UB_LOCK(ha);
8554 		sp->flags |= SRB_UB_CALLBACK;
8555 		QL_UB_UNLOCK(ha);
8556 		if (tq->lun_queues.first != NULL) {
8557 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8558 		} else {
8559 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8560 		}
8561 		if (done_q) {
8562 			ql_add_link_b(done_q, &sp->cmd);
8563 		} else {
8564 			ql_awaken_task_daemon(ha, sp, 0, 0);
8565 		}
8566 	}
8567 
8568 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8569 }
8570 
8571 static int
8572 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8573 {
8574 	port_id_t	d_id;
8575 	ql_srb_t	*sp;
8576 	ql_link_t	*link;
8577 	int		sendup = 1;
8578 
8579 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8580 
8581 	DEVICE_QUEUE_LOCK(tq);
8582 	if (tq->outcnt) {
8583 		DEVICE_QUEUE_UNLOCK(tq);
8584 		sendup = 0;
8585 		(void) ql_abort_device(ha, tq, 1);
8586 		ql_delay(ha, 10000);
8587 	} else {
8588 		DEVICE_QUEUE_UNLOCK(tq);
8589 		TASK_DAEMON_LOCK(ha);
8590 
8591 		for (link = ha->pha->callback_queue.first; link != NULL;
8592 		    link = link->next) {
8593 			sp = link->base_address;
8594 			if (sp->flags & SRB_UB_CALLBACK) {
8595 				continue;
8596 			}
8597 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8598 
8599 			if (tq->d_id.b24 == d_id.b24) {
8600 				sendup = 0;
8601 				break;
8602 			}
8603 		}
8604 
8605 		TASK_DAEMON_UNLOCK(ha);
8606 	}
8607 
8608 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8609 
8610 	return (sendup);
8611 }
8612 
8613 static int
8614 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8615 {
8616 	fc_unsol_buf_t		*ubp;
8617 	ql_srb_t		*sp;
8618 	la_els_logi_t		*payload;
8619 	class_svc_param_t	*class3_param;
8620 
8621 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8622 
8623 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8624 	    LOOP_DOWN)) {
8625 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8626 		return (QL_FUNCTION_FAILED);
8627 	}
8628 
8629 	/* Locate a buffer to use. */
8630 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8631 	if (ubp == NULL) {
8632 		EL(ha, "Failed\n");
8633 		return (QL_FUNCTION_FAILED);
8634 	}
8635 
8636 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8637 	    ha->instance, tq->d_id.b24);
8638 
8639 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8640 
8641 	sp = ubp->ub_fca_private;
8642 
8643 	/* Set header. */
8644 	ubp->ub_frame.d_id = ha->d_id.b24;
8645 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8646 	ubp->ub_frame.s_id = tq->d_id.b24;
8647 	ubp->ub_frame.rsvd = 0;
8648 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8649 	    F_CTL_SEQ_INITIATIVE;
8650 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8651 	ubp->ub_frame.seq_cnt = 0;
8652 	ubp->ub_frame.df_ctl = 0;
8653 	ubp->ub_frame.seq_id = 0;
8654 	ubp->ub_frame.rx_id = 0xffff;
8655 	ubp->ub_frame.ox_id = 0xffff;
8656 
8657 	/* set payload. */
8658 	payload = (la_els_logi_t *)ubp->ub_buffer;
8659 	bzero(payload, sizeof (payload));
8660 
8661 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8662 	payload->common_service.fcph_version = 0x2006;
8663 	payload->common_service.cmn_features = 0x8800;
8664 
8665 	CFG_IST(ha, CFG_CTRL_242581) ?
8666 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8667 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8668 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8669 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8670 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8671 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8672 
8673 	payload->common_service.conc_sequences = 0xff;
8674 	payload->common_service.relative_offset = 0x03;
8675 	payload->common_service.e_d_tov = 0x7d0;
8676 
8677 	bcopy((void *)&tq->port_name[0],
8678 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8679 
8680 	bcopy((void *)&tq->node_name[0],
8681 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8682 
8683 	class3_param = (class_svc_param_t *)&payload->class_3;
8684 	class3_param->class_valid_svc_opt = 0x8000;
8685 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8686 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8687 	class3_param->conc_sequences = tq->class3_conc_sequences;
8688 	class3_param->open_sequences_per_exch =
8689 	    tq->class3_open_sequences_per_exch;
8690 
8691 	QL_UB_LOCK(ha);
8692 	sp->flags |= SRB_UB_CALLBACK;
8693 	QL_UB_UNLOCK(ha);
8694 
8695 	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8696 
8697 	if (done_q) {
8698 		ql_add_link_b(done_q, &sp->cmd);
8699 	} else {
8700 		ql_awaken_task_daemon(ha, sp, 0, 0);
8701 	}
8702 
8703 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8704 
8705 	return (QL_SUCCESS);
8706 }
8707 
8708 /*
8709  * Abort outstanding commands in the Firmware, clear internally
8710  * queued commands in the driver, Synchronize the target with
8711  * the Firmware
8712  */
8713 int
8714 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8715 {
8716 	ql_link_t	*link, *link2;
8717 	ql_lun_t	*lq;
8718 	int		rval = QL_SUCCESS;
8719 	ql_srb_t	*sp;
8720 	ql_head_t	done_q = { NULL, NULL };
8721 
8722 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8723 
8724 	/*
8725 	 * First clear, internally queued commands
8726 	 */
8727 	DEVICE_QUEUE_LOCK(tq);
8728 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8729 		lq = link->base_address;
8730 
8731 		link2 = lq->cmd.first;
8732 		while (link2 != NULL) {
8733 			sp = link2->base_address;
8734 			link2 = link2->next;
8735 
8736 			if (sp->flags & SRB_ABORT) {
8737 				continue;
8738 			}
8739 
8740 			/* Remove srb from device command queue. */
8741 			ql_remove_link(&lq->cmd, &sp->cmd);
8742 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8743 
8744 			/* Set ending status. */
8745 			sp->pkt->pkt_reason = CS_ABORTED;
8746 
8747 			/* Call done routine to handle completions. */
8748 			ql_add_link_b(&done_q, &sp->cmd);
8749 		}
8750 	}
8751 	DEVICE_QUEUE_UNLOCK(tq);
8752 
8753 	if (done_q.first != NULL) {
8754 		ql_done(done_q.first);
8755 	}
8756 
8757 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8758 		rval = ql_abort_target(ha, tq, 0);
8759 	}
8760 
8761 	if (rval != QL_SUCCESS) {
8762 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8763 	} else {
8764 		/*EMPTY*/
8765 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8766 		    ha->vp_index);
8767 	}
8768 
8769 	return (rval);
8770 }
8771 
8772 /*
8773  * ql_rcv_rscn_els
8774  *	Processes received RSCN extended link service.
8775  *
8776  * Input:
8777  *	ha:	adapter state pointer.
8778  *	mb:	array containing input mailbox registers.
8779  *	done_q:	done queue pointer.
8780  *
8781  * Context:
8782  *	Interrupt or Kernel context, no mailbox commands allowed.
8783  */
8784 void
8785 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8786 {
8787 	fc_unsol_buf_t		*ubp;
8788 	ql_srb_t		*sp;
8789 	fc_rscn_t		*rn;
8790 	fc_affected_id_t	*af;
8791 	port_id_t		d_id;
8792 
8793 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8794 
8795 	/* Locate a buffer to use. */
8796 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8797 	if (ubp != NULL) {
8798 		sp = ubp->ub_fca_private;
8799 
8800 		/* Set header. */
8801 		ubp->ub_frame.d_id = ha->d_id.b24;
8802 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8803 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8804 		ubp->ub_frame.rsvd = 0;
8805 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8806 		    F_CTL_SEQ_INITIATIVE;
8807 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8808 		ubp->ub_frame.seq_cnt = 0;
8809 		ubp->ub_frame.df_ctl = 0;
8810 		ubp->ub_frame.seq_id = 0;
8811 		ubp->ub_frame.rx_id = 0xffff;
8812 		ubp->ub_frame.ox_id = 0xffff;
8813 
8814 		/* set payload. */
8815 		rn = (fc_rscn_t *)ubp->ub_buffer;
8816 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8817 
8818 		rn->rscn_code = LA_ELS_RSCN;
8819 		rn->rscn_len = 4;
8820 		rn->rscn_payload_len = 8;
8821 		d_id.b.al_pa = LSB(mb[2]);
8822 		d_id.b.area = MSB(mb[2]);
8823 		d_id.b.domain =	LSB(mb[1]);
8824 		af->aff_d_id = d_id.b24;
8825 		af->aff_format = MSB(mb[1]);
8826 
8827 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8828 		    af->aff_d_id);
8829 
8830 		ql_update_rscn(ha, af);
8831 
8832 		QL_UB_LOCK(ha);
8833 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8834 		QL_UB_UNLOCK(ha);
8835 		ql_add_link_b(done_q, &sp->cmd);
8836 	}
8837 
8838 	if (ubp == NULL) {
8839 		EL(ha, "Failed, get_unsolicited_buffer\n");
8840 	} else {
8841 		/*EMPTY*/
8842 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8843 	}
8844 }
8845 
8846 /*
8847  * ql_update_rscn
8848  *	Update devices from received RSCN.
8849  *
8850  * Input:
8851  *	ha:	adapter state pointer.
8852  *	af:	pointer to RSCN data.
8853  *
8854  * Context:
8855  *	Interrupt or Kernel context, no mailbox commands allowed.
8856  */
8857 static void
8858 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8859 {
8860 	ql_link_t	*link;
8861 	uint16_t	index;
8862 	ql_tgt_t	*tq;
8863 
8864 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8865 
8866 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8867 		port_id_t d_id;
8868 
8869 		d_id.r.rsvd_1 = 0;
8870 		d_id.b24 = af->aff_d_id;
8871 
8872 		tq = ql_d_id_to_queue(ha, d_id);
8873 		if (tq) {
8874 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8875 			DEVICE_QUEUE_LOCK(tq);
8876 			tq->flags |= TQF_RSCN_RCVD;
8877 			DEVICE_QUEUE_UNLOCK(tq);
8878 		}
8879 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8880 		    ha->instance);
8881 
8882 		return;
8883 	}
8884 
8885 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8886 		for (link = ha->dev[index].first; link != NULL;
8887 		    link = link->next) {
8888 			tq = link->base_address;
8889 
8890 			switch (af->aff_format) {
8891 			case FC_RSCN_FABRIC_ADDRESS:
8892 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8893 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8894 					    tq->d_id.b24);
8895 					DEVICE_QUEUE_LOCK(tq);
8896 					tq->flags |= TQF_RSCN_RCVD;
8897 					DEVICE_QUEUE_UNLOCK(tq);
8898 				}
8899 				break;
8900 
8901 			case FC_RSCN_AREA_ADDRESS:
8902 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8903 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8904 					    tq->d_id.b24);
8905 					DEVICE_QUEUE_LOCK(tq);
8906 					tq->flags |= TQF_RSCN_RCVD;
8907 					DEVICE_QUEUE_UNLOCK(tq);
8908 				}
8909 				break;
8910 
8911 			case FC_RSCN_DOMAIN_ADDRESS:
8912 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8913 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8914 					    tq->d_id.b24);
8915 					DEVICE_QUEUE_LOCK(tq);
8916 					tq->flags |= TQF_RSCN_RCVD;
8917 					DEVICE_QUEUE_UNLOCK(tq);
8918 				}
8919 				break;
8920 
8921 			default:
8922 				break;
8923 			}
8924 		}
8925 	}
8926 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8927 }
8928 
8929 /*
8930  * ql_process_rscn
8931  *
8932  * Input:
8933  *	ha:	adapter state pointer.
8934  *	af:	RSCN payload pointer.
8935  *
8936  * Context:
8937  *	Kernel context.
8938  */
8939 static int
8940 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8941 {
8942 	int		sendit;
8943 	int		sendup = 1;
8944 	ql_link_t	*link;
8945 	uint16_t	index;
8946 	ql_tgt_t	*tq;
8947 
8948 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8949 
8950 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8951 		port_id_t d_id;
8952 
8953 		d_id.r.rsvd_1 = 0;
8954 		d_id.b24 = af->aff_d_id;
8955 
8956 		tq = ql_d_id_to_queue(ha, d_id);
8957 		if (tq) {
8958 			sendup = ql_process_rscn_for_device(ha, tq);
8959 		}
8960 
8961 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8962 
8963 		return (sendup);
8964 	}
8965 
8966 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8967 		for (link = ha->dev[index].first; link != NULL;
8968 		    link = link->next) {
8969 
8970 			tq = link->base_address;
8971 			if (tq == NULL) {
8972 				continue;
8973 			}
8974 
8975 			switch (af->aff_format) {
8976 			case FC_RSCN_FABRIC_ADDRESS:
8977 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8978 					sendit = ql_process_rscn_for_device(
8979 					    ha, tq);
8980 					if (sendup) {
8981 						sendup = sendit;
8982 					}
8983 				}
8984 				break;
8985 
8986 			case FC_RSCN_AREA_ADDRESS:
8987 				if ((tq->d_id.b24 & 0xffff00) ==
8988 				    af->aff_d_id) {
8989 					sendit = ql_process_rscn_for_device(
8990 					    ha, tq);
8991 
8992 					if (sendup) {
8993 						sendup = sendit;
8994 					}
8995 				}
8996 				break;
8997 
8998 			case FC_RSCN_DOMAIN_ADDRESS:
8999 				if ((tq->d_id.b24 & 0xff0000) ==
9000 				    af->aff_d_id) {
9001 					sendit = ql_process_rscn_for_device(
9002 					    ha, tq);
9003 
9004 					if (sendup) {
9005 						sendup = sendit;
9006 					}
9007 				}
9008 				break;
9009 
9010 			default:
9011 				break;
9012 			}
9013 		}
9014 	}
9015 
9016 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9017 
9018 	return (sendup);
9019 }
9020 
9021 /*
9022  * ql_process_rscn_for_device
9023  *
9024  * Input:
9025  *	ha:	adapter state pointer.
9026  *	tq:	target queue pointer.
9027  *
9028  * Context:
9029  *	Kernel context.
9030  */
9031 static int
9032 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9033 {
9034 	int sendup = 1;
9035 
9036 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9037 
9038 	DEVICE_QUEUE_LOCK(tq);
9039 
9040 	/*
9041 	 * Let FCP-2 compliant devices continue I/Os
9042 	 * with their low level recoveries.
9043 	 */
9044 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9045 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9046 		/*
9047 		 * Cause ADISC to go out
9048 		 */
9049 		DEVICE_QUEUE_UNLOCK(tq);
9050 
9051 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9052 
9053 		DEVICE_QUEUE_LOCK(tq);
9054 		tq->flags &= ~TQF_RSCN_RCVD;
9055 
9056 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9057 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9058 			tq->flags |= TQF_NEED_AUTHENTICATION;
9059 		}
9060 
9061 		DEVICE_QUEUE_UNLOCK(tq);
9062 
9063 		(void) ql_abort_device(ha, tq, 1);
9064 
9065 		DEVICE_QUEUE_LOCK(tq);
9066 
9067 		if (tq->outcnt) {
9068 			sendup = 0;
9069 		} else {
9070 			tq->flags &= ~TQF_RSCN_RCVD;
9071 		}
9072 	} else {
9073 		tq->flags &= ~TQF_RSCN_RCVD;
9074 	}
9075 
9076 	if (sendup) {
9077 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9078 			tq->flags |= TQF_NEED_AUTHENTICATION;
9079 		}
9080 	}
9081 
9082 	DEVICE_QUEUE_UNLOCK(tq);
9083 
9084 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9085 
9086 	return (sendup);
9087 }
9088 
9089 static int
9090 ql_handle_rscn_update(ql_adapter_state_t *ha)
9091 {
9092 	int			rval;
9093 	ql_tgt_t		*tq;
9094 	uint16_t		index, loop_id;
9095 	ql_dev_id_list_t	*list;
9096 	uint32_t		list_size;
9097 	port_id_t		d_id;
9098 	ql_mbx_data_t		mr;
9099 	ql_head_t		done_q = { NULL, NULL };
9100 
9101 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9102 
9103 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9104 	list = kmem_zalloc(list_size, KM_SLEEP);
9105 	if (list == NULL) {
9106 		rval = QL_MEMORY_ALLOC_FAILED;
9107 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9108 		return (rval);
9109 	}
9110 
9111 	/*
9112 	 * Get data from RISC code d_id list to init each device queue.
9113 	 */
9114 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9115 	if (rval != QL_SUCCESS) {
9116 		kmem_free(list, list_size);
9117 		EL(ha, "get_id_list failed=%xh\n", rval);
9118 		return (rval);
9119 	}
9120 
9121 	/* Acquire adapter state lock. */
9122 	ADAPTER_STATE_LOCK(ha);
9123 
9124 	/* Check for new devices */
9125 	for (index = 0; index < mr.mb[1]; index++) {
9126 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9127 
9128 		if (VALID_DEVICE_ID(ha, loop_id)) {
9129 			d_id.r.rsvd_1 = 0;
9130 
9131 			tq = ql_d_id_to_queue(ha, d_id);
9132 			if (tq != NULL) {
9133 				continue;
9134 			}
9135 
9136 			tq = ql_dev_init(ha, d_id, loop_id);
9137 
9138 			/* Test for fabric device. */
9139 			if (d_id.b.domain != ha->d_id.b.domain ||
9140 			    d_id.b.area != ha->d_id.b.area) {
9141 				tq->flags |= TQF_FABRIC_DEVICE;
9142 			}
9143 
9144 			ADAPTER_STATE_UNLOCK(ha);
9145 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9146 			    QL_SUCCESS) {
9147 				tq->loop_id = PORT_NO_LOOP_ID;
9148 			}
9149 			ADAPTER_STATE_LOCK(ha);
9150 
9151 			/*
9152 			 * Send up a PLOGI about the new device
9153 			 */
9154 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9155 				(void) ql_send_plogi(ha, tq, &done_q);
9156 			}
9157 		}
9158 	}
9159 
9160 	/* Release adapter state lock. */
9161 	ADAPTER_STATE_UNLOCK(ha);
9162 
9163 	if (done_q.first != NULL) {
9164 		ql_done(done_q.first);
9165 	}
9166 
9167 	kmem_free(list, list_size);
9168 
9169 	if (rval != QL_SUCCESS) {
9170 		EL(ha, "failed=%xh\n", rval);
9171 	} else {
9172 		/*EMPTY*/
9173 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9174 	}
9175 
9176 	return (rval);
9177 }
9178 
9179 /*
9180  * ql_free_unsolicited_buffer
9181  *	Frees allocated buffer.
9182  *
9183  * Input:
9184  *	ha = adapter state pointer.
9185  *	index = buffer array index.
9186  *	ADAPTER_STATE_LOCK must be already obtained.
9187  *
9188  * Context:
9189  *	Kernel context.
9190  */
9191 static void
9192 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9193 {
9194 	ql_srb_t	*sp;
9195 	int		status;
9196 
9197 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9198 
9199 	sp = ubp->ub_fca_private;
9200 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9201 		/* Disconnect IP from system buffers. */
9202 		if (ha->flags & IP_INITIALIZED) {
9203 			ADAPTER_STATE_UNLOCK(ha);
9204 			status = ql_shutdown_ip(ha);
9205 			ADAPTER_STATE_LOCK(ha);
9206 			if (status != QL_SUCCESS) {
9207 				cmn_err(CE_WARN,
9208 				    "!Qlogic %s(%d): Failed to shutdown IP",
9209 				    QL_NAME, ha->instance);
9210 				return;
9211 			}
9212 
9213 			ha->flags &= ~IP_ENABLED;
9214 		}
9215 
9216 		ql_free_phys(ha, &sp->ub_buffer);
9217 	} else {
9218 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9219 	}
9220 
9221 	kmem_free(sp, sizeof (ql_srb_t));
9222 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9223 
9224 	if (ha->ub_allocated != 0) {
9225 		ha->ub_allocated--;
9226 	}
9227 
9228 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9229 }
9230 
9231 /*
9232  * ql_get_unsolicited_buffer
9233  *	Locates a free unsolicited buffer.
9234  *
9235  * Input:
9236  *	ha = adapter state pointer.
9237  *	type = buffer type.
9238  *
9239  * Returns:
9240  *	Unsolicited buffer pointer.
9241  *
9242  * Context:
9243  *	Interrupt or Kernel context, no mailbox commands allowed.
9244  */
9245 fc_unsol_buf_t *
9246 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9247 {
9248 	fc_unsol_buf_t	*ubp;
9249 	ql_srb_t	*sp;
9250 	uint16_t	index;
9251 
9252 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9253 
9254 	/* Locate a buffer to use. */
9255 	ubp = NULL;
9256 
9257 	QL_UB_LOCK(ha);
9258 	for (index = 0; index < QL_UB_LIMIT; index++) {
9259 		ubp = ha->ub_array[index];
9260 		if (ubp != NULL) {
9261 			sp = ubp->ub_fca_private;
9262 			if ((sp->ub_type == type) &&
9263 			    (sp->flags & SRB_UB_IN_FCA) &&
9264 			    (!(sp->flags & (SRB_UB_CALLBACK |
9265 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9266 				sp->flags |= SRB_UB_ACQUIRED;
9267 				ubp->ub_resp_flags = 0;
9268 				break;
9269 			}
9270 			ubp = NULL;
9271 		}
9272 	}
9273 	QL_UB_UNLOCK(ha);
9274 
9275 	if (ubp) {
9276 		ubp->ub_resp_token = NULL;
9277 		ubp->ub_class = FC_TRAN_CLASS3;
9278 	}
9279 
9280 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9281 
9282 	return (ubp);
9283 }
9284 
9285 /*
9286  * ql_ub_frame_hdr
9287  *	Processes received unsolicited buffers from ISP.
9288  *
9289  * Input:
9290  *	ha:	adapter state pointer.
9291  *	tq:	target queue pointer.
9292  *	index:	unsolicited buffer array index.
9293  *	done_q:	done queue pointer.
9294  *
9295  * Returns:
9296  *	ql local function return status code.
9297  *
9298  * Context:
9299  *	Interrupt or Kernel context, no mailbox commands allowed.
9300  */
9301 int
9302 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9303     ql_head_t *done_q)
9304 {
9305 	fc_unsol_buf_t	*ubp;
9306 	ql_srb_t	*sp;
9307 	uint16_t	loop_id;
9308 	int		rval = QL_FUNCTION_FAILED;
9309 
9310 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9311 
9312 	QL_UB_LOCK(ha);
9313 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9314 		EL(ha, "Invalid buffer index=%xh\n", index);
9315 		QL_UB_UNLOCK(ha);
9316 		return (rval);
9317 	}
9318 
9319 	sp = ubp->ub_fca_private;
9320 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9321 		EL(ha, "buffer freed index=%xh\n", index);
9322 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9323 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9324 
9325 		sp->flags |= SRB_UB_IN_FCA;
9326 
9327 		QL_UB_UNLOCK(ha);
9328 		return (rval);
9329 	}
9330 
9331 	if ((sp->handle == index) &&
9332 	    (sp->flags & SRB_UB_IN_ISP) &&
9333 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9334 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9335 		/* set broadcast D_ID */
9336 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
9337 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9338 		if (tq->ub_loop_id == loop_id) {
9339 			if (ha->topology & QL_FL_PORT) {
9340 				ubp->ub_frame.d_id = 0x000000;
9341 			} else {
9342 				ubp->ub_frame.d_id = 0xffffff;
9343 			}
9344 		} else {
9345 			ubp->ub_frame.d_id = ha->d_id.b24;
9346 		}
9347 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9348 		ubp->ub_frame.rsvd = 0;
9349 		ubp->ub_frame.s_id = tq->d_id.b24;
9350 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9351 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9352 		ubp->ub_frame.df_ctl = 0;
9353 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9354 		ubp->ub_frame.rx_id = 0xffff;
9355 		ubp->ub_frame.ox_id = 0xffff;
9356 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9357 		    sp->ub_size : tq->ub_sequence_length;
9358 		ubp->ub_frame.ro = tq->ub_frame_ro;
9359 
9360 		tq->ub_sequence_length = (uint16_t)
9361 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9362 		tq->ub_frame_ro += ubp->ub_bufsize;
9363 		tq->ub_seq_cnt++;
9364 
9365 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9366 			if (tq->ub_seq_cnt == 1) {
9367 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9368 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9369 			} else {
9370 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9371 				    F_CTL_END_SEQ;
9372 			}
9373 			tq->ub_total_seg_cnt = 0;
9374 		} else if (tq->ub_seq_cnt == 1) {
9375 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9376 			    F_CTL_FIRST_SEQ;
9377 			ubp->ub_frame.df_ctl = 0x20;
9378 		}
9379 
9380 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9381 		    ha->instance, ubp->ub_frame.d_id);
9382 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9383 		    ha->instance, ubp->ub_frame.s_id);
9384 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9385 		    ha->instance, ubp->ub_frame.seq_cnt);
9386 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9387 		    ha->instance, ubp->ub_frame.seq_id);
9388 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9389 		    ha->instance, ubp->ub_frame.ro);
9390 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9391 		    ha->instance, ubp->ub_frame.f_ctl);
9392 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9393 		    ha->instance, ubp->ub_bufsize);
9394 		QL_DUMP_3(ubp->ub_buffer, 8,
9395 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9396 
9397 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9398 		ql_add_link_b(done_q, &sp->cmd);
9399 		rval = QL_SUCCESS;
9400 	} else {
9401 		if (sp->handle != index) {
9402 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9403 			    sp->handle);
9404 		}
9405 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9406 			EL(ha, "buffer was already in driver, index=%xh\n",
9407 			    index);
9408 		}
9409 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9410 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9411 			    index);
9412 		}
9413 		if (sp->flags & SRB_UB_ACQUIRED) {
9414 			EL(ha, "buffer was being used by driver, index=%xh\n",
9415 			    index);
9416 		}
9417 	}
9418 	QL_UB_UNLOCK(ha);
9419 
9420 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9421 
9422 	return (rval);
9423 }
9424 
9425 /*
9426  * ql_timer
9427  *	One second timer function.
9428  *
9429  * Input:
9430  *	ql_hba.first = first link in adapter list.
9431  *
9432  * Context:
9433  *	Interrupt context, no mailbox commands allowed.
9434  */
9435 static void
9436 ql_timer(void *arg)
9437 {
9438 	ql_link_t		*link;
9439 	uint32_t		set_flags;
9440 	uint32_t		reset_flags;
9441 	ql_adapter_state_t	*ha = NULL, *vha;
9442 
9443 	QL_PRINT_6(CE_CONT, "started\n");
9444 
9445 	/* Acquire global state lock. */
9446 	GLOBAL_STATE_LOCK();
9447 	if (ql_timer_timeout_id == NULL) {
9448 		/* Release global state lock. */
9449 		GLOBAL_STATE_UNLOCK();
9450 		return;
9451 	}
9452 
9453 	for (link = ql_hba.first; link != NULL; link = link->next) {
9454 		ha = link->base_address;
9455 
9456 		/* Skip adapter if suspended of stalled. */
9457 		ADAPTER_STATE_LOCK(ha);
9458 		if (ha->flags & ADAPTER_SUSPENDED ||
9459 		    ha->task_daemon_flags & DRIVER_STALL) {
9460 			ADAPTER_STATE_UNLOCK(ha);
9461 			continue;
9462 		}
9463 		ha->flags |= ADAPTER_TIMER_BUSY;
9464 		ADAPTER_STATE_UNLOCK(ha);
9465 
9466 		QL_PM_LOCK(ha);
9467 		if (ha->power_level != PM_LEVEL_D0) {
9468 			QL_PM_UNLOCK(ha);
9469 
9470 			ADAPTER_STATE_LOCK(ha);
9471 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9472 			ADAPTER_STATE_UNLOCK(ha);
9473 			continue;
9474 		}
9475 		ha->busy++;
9476 		QL_PM_UNLOCK(ha);
9477 
9478 		set_flags = 0;
9479 		reset_flags = 0;
9480 
9481 		/* Port retry timer handler. */
9482 		if (LOOP_READY(ha)) {
9483 			ADAPTER_STATE_LOCK(ha);
9484 			if (ha->port_retry_timer != 0) {
9485 				ha->port_retry_timer--;
9486 				if (ha->port_retry_timer == 0) {
9487 					set_flags |= PORT_RETRY_NEEDED;
9488 				}
9489 			}
9490 			ADAPTER_STATE_UNLOCK(ha);
9491 		}
9492 
9493 		/* Loop down timer handler. */
9494 		if (LOOP_RECONFIGURE(ha) == 0) {
9495 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9496 				ha->loop_down_timer--;
9497 				/*
9498 				 * give the firmware loop down dump flag
9499 				 * a chance to work.
9500 				 */
9501 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9502 					if (CFG_IST(ha,
9503 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9504 						(void) ql_binary_fw_dump(ha,
9505 						    TRUE);
9506 					}
9507 					EL(ha, "loop_down_reset, "
9508 					    "isp_abort_needed\n");
9509 					set_flags |= ISP_ABORT_NEEDED;
9510 				}
9511 			}
9512 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9513 				/* Command abort time handler. */
9514 				if (ha->loop_down_timer ==
9515 				    ha->loop_down_abort_time) {
9516 					ADAPTER_STATE_LOCK(ha);
9517 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9518 					ADAPTER_STATE_UNLOCK(ha);
9519 					set_flags |= ABORT_QUEUES_NEEDED;
9520 					EL(ha, "loop_down_abort_time, "
9521 					    "abort_queues_needed\n");
9522 				}
9523 
9524 				/* Watchdog timer handler. */
9525 				if (ha->watchdog_timer == 0) {
9526 					ha->watchdog_timer = WATCHDOG_TIME;
9527 				} else if (LOOP_READY(ha)) {
9528 					ha->watchdog_timer--;
9529 					if (ha->watchdog_timer == 0) {
9530 						for (vha = ha; vha != NULL;
9531 						    vha = vha->vp_next) {
9532 							ql_watchdog(vha,
9533 							    &set_flags,
9534 							    &reset_flags);
9535 						}
9536 						ha->watchdog_timer =
9537 						    WATCHDOG_TIME;
9538 					}
9539 				}
9540 			}
9541 		}
9542 
9543 		/* Idle timer handler. */
9544 		if (!DRIVER_SUSPENDED(ha)) {
9545 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9546 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9547 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9548 #endif
9549 				ha->idle_timer = 0;
9550 			}
9551 			if (ha->send_plogi_timer != NULL) {
9552 				ha->send_plogi_timer--;
9553 				if (ha->send_plogi_timer == NULL) {
9554 					set_flags |= SEND_PLOGI;
9555 				}
9556 			}
9557 		}
9558 		ADAPTER_STATE_LOCK(ha);
9559 		if (ha->restart_mpi_timer != 0) {
9560 			ha->restart_mpi_timer--;
9561 			if (ha->restart_mpi_timer == 0 &&
9562 			    ha->idc_restart_mpi != 0) {
9563 				ha->idc_restart_mpi = 0;
9564 				reset_flags |= TASK_DAEMON_STALLED_FLG;
9565 			}
9566 		}
9567 		if (ha->flash_acc_timer != 0) {
9568 			ha->flash_acc_timer--;
9569 			if (ha->flash_acc_timer == 0 &&
9570 			    ha->idc_flash_acc != 0) {
9571 				ha->idc_flash_acc = 1;
9572 				ha->idc_mb[1] = 0;
9573 				ha->idc_mb[2] = IDC_OPC_DRV_START;
9574 				set_flags |= IDC_ACK_NEEDED;
9575 			}
9576 		}
9577 		ADAPTER_STATE_UNLOCK(ha);
9578 
9579 		if (set_flags != 0 || reset_flags != 0) {
9580 			ql_awaken_task_daemon(ha, NULL, set_flags,
9581 			    reset_flags);
9582 		}
9583 
9584 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9585 			ql_blink_led(ha);
9586 		}
9587 
9588 		/* Update the IO stats */
9589 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9590 			ha->xioctl->IOInputMByteCnt +=
9591 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9592 			ha->xioctl->IOInputByteCnt %= 0x100000;
9593 		}
9594 
9595 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9596 			ha->xioctl->IOOutputMByteCnt +=
9597 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9598 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9599 		}
9600 
9601 		ADAPTER_STATE_LOCK(ha);
9602 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9603 		ADAPTER_STATE_UNLOCK(ha);
9604 
9605 		QL_PM_LOCK(ha);
9606 		ha->busy--;
9607 		QL_PM_UNLOCK(ha);
9608 	}
9609 
9610 	/* Restart timer, if not being stopped. */
9611 	if (ql_timer_timeout_id != NULL) {
9612 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9613 	}
9614 
9615 	/* Release global state lock. */
9616 	GLOBAL_STATE_UNLOCK();
9617 
9618 	QL_PRINT_6(CE_CONT, "done\n");
9619 }
9620 
9621 /*
9622  * ql_timeout_insert
9623  *	Function used to insert a command block onto the
9624  *	watchdog timer queue.
9625  *
9626  *	Note: Must insure that pkt_time is not zero
9627  *			before calling ql_timeout_insert.
9628  *
9629  * Input:
9630  *	ha:	adapter state pointer.
9631  *	tq:	target queue pointer.
9632  *	sp:	SRB pointer.
9633  *	DEVICE_QUEUE_LOCK must be already obtained.
9634  *
9635  * Context:
9636  *	Kernel context.
9637  */
9638 /* ARGSUSED */
9639 static void
9640 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9641 {
9642 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9643 
9644 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9645 		/* Make sure timeout >= 2 * R_A_TOV */
9646 		sp->isp_timeout = (uint16_t)
9647 		    (sp->pkt->pkt_timeout < ha->r_a_tov ? ha->r_a_tov :
9648 		    sp->pkt->pkt_timeout);
9649 
9650 		/*
9651 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9652 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9653 		 * will expire in the next watchdog call, which could be in
9654 		 * 1 microsecond.
9655 		 *
9656 		 */
9657 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9658 		    WATCHDOG_TIME;
9659 		/*
9660 		 * Added an additional 10 to account for the
9661 		 * firmware timer drift which can occur with
9662 		 * very long timeout values.
9663 		 */
9664 		sp->wdg_q_time += 10;
9665 
9666 		/*
9667 		 * Add 6 more to insure watchdog does not timeout at the same
9668 		 * time as ISP RISC code timeout.
9669 		 */
9670 		sp->wdg_q_time += 6;
9671 
9672 		/* Save initial time for resetting watchdog time. */
9673 		sp->init_wdg_q_time = sp->wdg_q_time;
9674 
9675 		/* Insert command onto watchdog queue. */
9676 		ql_add_link_b(&tq->wdg, &sp->wdg);
9677 
9678 		sp->flags |= SRB_WATCHDOG_ENABLED;
9679 	} else {
9680 		sp->isp_timeout = 0;
9681 		sp->wdg_q_time = 0;
9682 		sp->init_wdg_q_time = 0;
9683 	}
9684 
9685 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9686 }
9687 
9688 /*
9689  * ql_watchdog
9690  *	Timeout handler that runs in interrupt context. The
9691  *	ql_adapter_state_t * argument is the parameter set up when the
9692  *	timeout was initialized (state structure pointer).
9693  *	Function used to update timeout values and if timeout
9694  *	has occurred command will be aborted.
9695  *
9696  * Input:
9697  *	ha:		adapter state pointer.
9698  *	set_flags:	task daemon flags to set.
9699  *	reset_flags:	task daemon flags to reset.
9700  *
9701  * Context:
9702  *	Interrupt context, no mailbox commands allowed.
9703  */
9704 static void
9705 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9706 {
9707 	ql_srb_t	*sp;
9708 	ql_link_t	*link;
9709 	ql_link_t	*next_cmd;
9710 	ql_link_t	*next_device;
9711 	ql_tgt_t	*tq;
9712 	ql_lun_t	*lq;
9713 	uint16_t	index;
9714 	int		q_sane;
9715 
9716 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9717 
9718 	/* Loop through all targets. */
9719 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9720 		for (link = ha->dev[index].first; link != NULL;
9721 		    link = next_device) {
9722 			tq = link->base_address;
9723 
9724 			/* Try to acquire device queue lock. */
9725 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9726 				next_device = NULL;
9727 				continue;
9728 			}
9729 
9730 			next_device = link->next;
9731 
9732 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9733 			    (tq->port_down_retry_count == 0)) {
9734 				/* Release device queue lock. */
9735 				DEVICE_QUEUE_UNLOCK(tq);
9736 				continue;
9737 			}
9738 
9739 			/* Find out if this device is in a sane state. */
9740 			if (tq->flags & (TQF_RSCN_RCVD |
9741 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9742 				q_sane = 0;
9743 			} else {
9744 				q_sane = 1;
9745 			}
9746 			/* Loop through commands on watchdog queue. */
9747 			for (link = tq->wdg.first; link != NULL;
9748 			    link = next_cmd) {
9749 				next_cmd = link->next;
9750 				sp = link->base_address;
9751 				lq = sp->lun_queue;
9752 
9753 				/*
9754 				 * For SCSI commands, if everything seems to
9755 				 * be going fine and this packet is stuck
9756 				 * because of throttling at LUN or target
9757 				 * level then do not decrement the
9758 				 * sp->wdg_q_time
9759 				 */
9760 				if (ha->task_daemon_flags & STATE_ONLINE &&
9761 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9762 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9763 				    lq->lun_outcnt >= ha->execution_throttle) {
9764 					continue;
9765 				}
9766 
9767 				if (sp->wdg_q_time != 0) {
9768 					sp->wdg_q_time--;
9769 
9770 					/* Timeout? */
9771 					if (sp->wdg_q_time != 0) {
9772 						continue;
9773 					}
9774 
9775 					ql_remove_link(&tq->wdg, &sp->wdg);
9776 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9777 
9778 					if (sp->flags & SRB_ISP_STARTED) {
9779 						ql_cmd_timeout(ha, tq, sp,
9780 						    set_flags, reset_flags);
9781 
9782 						DEVICE_QUEUE_UNLOCK(tq);
9783 						tq = NULL;
9784 						next_cmd = NULL;
9785 						next_device = NULL;
9786 						index = DEVICE_HEAD_LIST_SIZE;
9787 					} else {
9788 						ql_cmd_timeout(ha, tq, sp,
9789 						    set_flags, reset_flags);
9790 					}
9791 				}
9792 			}
9793 
9794 			/* Release device queue lock. */
9795 			if (tq != NULL) {
9796 				DEVICE_QUEUE_UNLOCK(tq);
9797 			}
9798 		}
9799 	}
9800 
9801 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9802 }
9803 
9804 /*
9805  * ql_cmd_timeout
9806  *	Command timeout handler.
9807  *
9808  * Input:
9809  *	ha:		adapter state pointer.
9810  *	tq:		target queue pointer.
9811  *	sp:		SRB pointer.
9812  *	set_flags:	task daemon flags to set.
9813  *	reset_flags:	task daemon flags to reset.
9814  *
9815  * Context:
9816  *	Interrupt context, no mailbox commands allowed.
9817  */
9818 /* ARGSUSED */
9819 static void
9820 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9821     uint32_t *set_flags, uint32_t *reset_flags)
9822 {
9823 
9824 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9825 
9826 	if (!(sp->flags & SRB_ISP_STARTED)) {
9827 
9828 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9829 
9830 		REQUEST_RING_LOCK(ha);
9831 
9832 		/* if it's on a queue */
9833 		if (sp->cmd.head) {
9834 			/*
9835 			 * The pending_cmds que needs to be
9836 			 * protected by the ring lock
9837 			 */
9838 			ql_remove_link(sp->cmd.head, &sp->cmd);
9839 		}
9840 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9841 
9842 		/* Release device queue lock. */
9843 		REQUEST_RING_UNLOCK(ha);
9844 		DEVICE_QUEUE_UNLOCK(tq);
9845 
9846 		/* Set timeout status */
9847 		sp->pkt->pkt_reason = CS_TIMEOUT;
9848 
9849 		/* Ensure no retry */
9850 		sp->flags &= ~SRB_RETRY;
9851 
9852 		/* Call done routine to handle completion. */
9853 		ql_done(&sp->cmd);
9854 
9855 		DEVICE_QUEUE_LOCK(tq);
9856 	} else {
9857 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9858 		    "isp_abort_needed\n", (void *)sp,
9859 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9860 		    sp->handle & OSC_INDEX_MASK);
9861 
9862 		/* Release device queue lock. */
9863 		DEVICE_QUEUE_UNLOCK(tq);
9864 
9865 		INTR_LOCK(ha);
9866 		ha->pha->xioctl->ControllerErrorCount++;
9867 		INTR_UNLOCK(ha);
9868 
9869 		/* Set ISP needs to be reset */
9870 		sp->flags |= SRB_COMMAND_TIMEOUT;
9871 
9872 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9873 			(void) ql_binary_fw_dump(ha, TRUE);
9874 		}
9875 
9876 		*set_flags |= ISP_ABORT_NEEDED;
9877 
9878 		DEVICE_QUEUE_LOCK(tq);
9879 	}
9880 
9881 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9882 }
9883 
9884 /*
9885  * ql_rst_aen
9886  *	Processes asynchronous reset.
9887  *
9888  * Input:
9889  *	ha = adapter state pointer.
9890  *
9891  * Context:
9892  *	Kernel context.
9893  */
9894 static void
9895 ql_rst_aen(ql_adapter_state_t *ha)
9896 {
9897 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9898 
9899 	/* Issue marker command. */
9900 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9901 
9902 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9903 }
9904 
9905 /*
9906  * ql_cmd_wait
9907  *	Stall driver until all outstanding commands are returned.
9908  *
9909  * Input:
9910  *	ha = adapter state pointer.
9911  *
9912  * Context:
9913  *	Kernel context.
9914  */
9915 void
9916 ql_cmd_wait(ql_adapter_state_t *ha)
9917 {
9918 	uint16_t		index;
9919 	ql_link_t		*link;
9920 	ql_tgt_t		*tq;
9921 	ql_adapter_state_t	*vha;
9922 
9923 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9924 
9925 	/* Wait for all outstanding commands to be returned. */
9926 	(void) ql_wait_outstanding(ha);
9927 
9928 	/*
9929 	 * clear out internally queued commands
9930 	 */
9931 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9932 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9933 			for (link = vha->dev[index].first; link != NULL;
9934 			    link = link->next) {
9935 				tq = link->base_address;
9936 				if (tq &&
9937 				    (!(tq->prli_svc_param_word_3 &
9938 				    PRLI_W3_RETRY))) {
9939 					(void) ql_abort_device(vha, tq, 0);
9940 				}
9941 			}
9942 		}
9943 	}
9944 
9945 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9946 }
9947 
9948 /*
9949  * ql_wait_outstanding
9950  *	Wait for all outstanding commands to complete.
9951  *
9952  * Input:
9953  *	ha = adapter state pointer.
9954  *
9955  * Returns:
9956  *	index - the index for ql_srb into outstanding_cmds.
9957  *
9958  * Context:
9959  *	Kernel context.
9960  */
9961 static uint16_t
9962 ql_wait_outstanding(ql_adapter_state_t *ha)
9963 {
9964 	ql_srb_t	*sp;
9965 	uint16_t	index, count;
9966 
9967 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9968 
9969 	count = 3000;
9970 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9971 		if (ha->pha->pending_cmds.first != NULL) {
9972 			ql_start_iocb(ha, NULL);
9973 			index = 1;
9974 		}
9975 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
9976 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
9977 			if (count-- != 0) {
9978 				ql_delay(ha, 10000);
9979 				index = 0;
9980 			} else {
9981 				EL(ha, "failed, sp=%ph\n", (void *)sp);
9982 				break;
9983 			}
9984 		}
9985 	}
9986 
9987 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9988 
9989 	return (index);
9990 }
9991 
9992 /*
9993  * ql_restart_queues
9994  *	Restart device queues.
9995  *
9996  * Input:
9997  *	ha = adapter state pointer.
9998  *	DEVICE_QUEUE_LOCK must be released.
9999  *
10000  * Context:
10001  *	Interrupt or Kernel context, no mailbox commands allowed.
10002  */
10003 static void
10004 ql_restart_queues(ql_adapter_state_t *ha)
10005 {
10006 	ql_link_t		*link, *link2;
10007 	ql_tgt_t		*tq;
10008 	ql_lun_t		*lq;
10009 	uint16_t		index;
10010 	ql_adapter_state_t	*vha;
10011 
10012 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10013 
10014 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10015 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10016 			for (link = vha->dev[index].first; link != NULL;
10017 			    link = link->next) {
10018 				tq = link->base_address;
10019 
10020 				/* Acquire device queue lock. */
10021 				DEVICE_QUEUE_LOCK(tq);
10022 
10023 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
10024 
10025 				for (link2 = tq->lun_queues.first;
10026 				    link2 != NULL; link2 = link2->next) {
10027 					lq = link2->base_address;
10028 
10029 					if (lq->cmd.first != NULL) {
10030 						ql_next(vha, lq);
10031 						DEVICE_QUEUE_LOCK(tq);
10032 					}
10033 				}
10034 
10035 				/* Release device queue lock. */
10036 				DEVICE_QUEUE_UNLOCK(tq);
10037 			}
10038 		}
10039 	}
10040 
10041 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10042 }
10043 
10044 /*
10045  * ql_iidma
10046  *	Setup iiDMA parameters to firmware
10047  *
10048  * Input:
10049  *	ha = adapter state pointer.
10050  *	DEVICE_QUEUE_LOCK must be released.
10051  *
10052  * Context:
10053  *	Interrupt or Kernel context, no mailbox commands allowed.
10054  */
10055 static void
10056 ql_iidma(ql_adapter_state_t *ha)
10057 {
10058 	ql_link_t	*link;
10059 	ql_tgt_t	*tq;
10060 	uint16_t	index;
10061 	char		buf[256];
10062 	uint32_t	data;
10063 
10064 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10065 
10066 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10067 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10068 		return;
10069 	}
10070 
10071 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10072 		for (link = ha->dev[index].first; link != NULL;
10073 		    link = link->next) {
10074 			tq = link->base_address;
10075 
10076 			/* Acquire device queue lock. */
10077 			DEVICE_QUEUE_LOCK(tq);
10078 
10079 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10080 				DEVICE_QUEUE_UNLOCK(tq);
10081 				continue;
10082 			}
10083 
10084 			tq->flags &= ~TQF_IIDMA_NEEDED;
10085 
10086 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10087 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10088 				DEVICE_QUEUE_UNLOCK(tq);
10089 				continue;
10090 			}
10091 
10092 			/* Get the iiDMA persistent data */
10093 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10094 				(void) sprintf(buf,
10095 				    "iidma-rate-%02x%02x%02x%02x%02x"
10096 				    "%02x%02x%02x", tq->port_name[0],
10097 				    tq->port_name[1], tq->port_name[2],
10098 				    tq->port_name[3], tq->port_name[4],
10099 				    tq->port_name[5], tq->port_name[6],
10100 				    tq->port_name[7]);
10101 
10102 				if ((data = ql_get_prop(ha, buf)) ==
10103 				    0xffffffff) {
10104 					tq->iidma_rate = IIDMA_RATE_NDEF;
10105 				} else {
10106 					switch (data) {
10107 					case IIDMA_RATE_1GB:
10108 					case IIDMA_RATE_2GB:
10109 					case IIDMA_RATE_4GB:
10110 					case IIDMA_RATE_10GB:
10111 						tq->iidma_rate = data;
10112 						break;
10113 					case IIDMA_RATE_8GB:
10114 						if (CFG_IST(ha,
10115 						    CFG_CTRL_25XX)) {
10116 							tq->iidma_rate = data;
10117 						} else {
10118 							tq->iidma_rate =
10119 							    IIDMA_RATE_4GB;
10120 						}
10121 						break;
10122 					default:
10123 						EL(ha, "invalid data for "
10124 						    "parameter: %s: %xh\n",
10125 						    buf, data);
10126 						tq->iidma_rate =
10127 						    IIDMA_RATE_NDEF;
10128 						break;
10129 					}
10130 				}
10131 			}
10132 
10133 			/* Set the firmware's iiDMA rate */
10134 			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10135 			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
10136 				data = ql_iidma_rate(ha, tq->loop_id,
10137 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10138 				if (data != QL_SUCCESS) {
10139 					EL(ha, "mbx failed: %xh\n", data);
10140 				}
10141 			}
10142 
10143 			/* Release device queue lock. */
10144 			DEVICE_QUEUE_UNLOCK(tq);
10145 		}
10146 	}
10147 
10148 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10149 }
10150 
10151 /*
10152  * ql_abort_queues
10153  *	Abort all commands on device queues.
10154  *
10155  * Input:
10156  *	ha = adapter state pointer.
10157  *
10158  * Context:
10159  *	Interrupt or Kernel context, no mailbox commands allowed.
10160  */
10161 static void
10162 ql_abort_queues(ql_adapter_state_t *ha)
10163 {
10164 	ql_link_t		*link, *link1, *link2;
10165 	ql_tgt_t		*tq;
10166 	ql_lun_t		*lq;
10167 	ql_srb_t		*sp;
10168 	uint16_t		index;
10169 	ql_adapter_state_t	*vha;
10170 
10171 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10172 
10173 	/* Return all commands in outstanding command list. */
10174 	INTR_LOCK(ha);
10175 
10176 	/* Place all commands in outstanding cmd list on device queue. */
10177 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10178 		if (ha->pending_cmds.first != NULL) {
10179 			INTR_UNLOCK(ha);
10180 			ql_start_iocb(ha, NULL);
10181 			/* Delay for system */
10182 			ql_delay(ha, 10000);
10183 			INTR_LOCK(ha);
10184 			index = 1;
10185 		}
10186 		sp = ha->outstanding_cmds[index];
10187 		if (sp != NULL) {
10188 			ha->outstanding_cmds[index] = NULL;
10189 			sp->handle = 0;
10190 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10191 
10192 			INTR_UNLOCK(ha);
10193 
10194 			/* Set ending status. */
10195 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10196 			sp->flags |= SRB_ISP_COMPLETED;
10197 
10198 			/* Call done routine to handle completions. */
10199 			sp->cmd.next = NULL;
10200 			ql_done(&sp->cmd);
10201 
10202 			INTR_LOCK(ha);
10203 		}
10204 	}
10205 	INTR_UNLOCK(ha);
10206 
10207 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10208 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10209 		    vha->instance, vha->vp_index);
10210 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10211 			for (link = vha->dev[index].first; link != NULL;
10212 			    link = link->next) {
10213 				tq = link->base_address;
10214 
10215 				/*
10216 				 * Set port unavailable status for
10217 				 * all commands on device queue.
10218 				 */
10219 				DEVICE_QUEUE_LOCK(tq);
10220 
10221 				for (link1 = tq->lun_queues.first;
10222 				    link1 != NULL; link1 = link1->next) {
10223 					lq = link1->base_address;
10224 
10225 					link2 = lq->cmd.first;
10226 					while (link2 != NULL) {
10227 						sp = link2->base_address;
10228 
10229 						if (sp->flags & SRB_ABORT) {
10230 							link2 = link2->next;
10231 							continue;
10232 						}
10233 
10234 						/* Rem srb from dev cmd q. */
10235 						ql_remove_link(&lq->cmd,
10236 						    &sp->cmd);
10237 						sp->flags &=
10238 						    ~SRB_IN_DEVICE_QUEUE;
10239 
10240 						/* Release device queue lock */
10241 						DEVICE_QUEUE_UNLOCK(tq);
10242 
10243 						/* Set ending status. */
10244 						sp->pkt->pkt_reason =
10245 						    CS_PORT_UNAVAILABLE;
10246 
10247 						/*
10248 						 * Call done routine to handle
10249 						 * completions.
10250 						 */
10251 						ql_done(&sp->cmd);
10252 
10253 						/* Delay for system */
10254 						ql_delay(ha, 10000);
10255 
10256 						/* Acquire device queue lock */
10257 						DEVICE_QUEUE_LOCK(tq);
10258 						link2 = lq->cmd.first;
10259 					}
10260 				}
10261 				/* Release device queue lock. */
10262 				DEVICE_QUEUE_UNLOCK(tq);
10263 			}
10264 		}
10265 	}
10266 
10267 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10268 }
10269 
10270 /*
10271  * ql_loop_resync
10272  *	Resync with fibre channel devices.
10273  *
10274  * Input:
10275  *	ha = adapter state pointer.
10276  *	DEVICE_QUEUE_LOCK must be released.
10277  *
10278  * Returns:
10279  *	ql local function return status code.
10280  *
10281  * Context:
10282  *	Kernel context.
10283  */
10284 static int
10285 ql_loop_resync(ql_adapter_state_t *ha)
10286 {
10287 	int rval;
10288 
10289 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10290 
10291 	if (ha->flags & IP_INITIALIZED) {
10292 		(void) ql_shutdown_ip(ha);
10293 	}
10294 
10295 	rval = ql_fw_ready(ha, 10);
10296 
10297 	TASK_DAEMON_LOCK(ha);
10298 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10299 	TASK_DAEMON_UNLOCK(ha);
10300 
10301 	/* Set loop online, if it really is. */
10302 	if (rval == QL_SUCCESS) {
10303 		ql_loop_online(ha);
10304 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10305 	} else {
10306 		EL(ha, "failed, rval = %xh\n", rval);
10307 	}
10308 
10309 	return (rval);
10310 }
10311 
10312 /*
10313  * ql_loop_online
10314  *	Set loop online status if it really is online.
10315  *
10316  * Input:
10317  *	ha = adapter state pointer.
10318  *	DEVICE_QUEUE_LOCK must be released.
10319  *
10320  * Context:
10321  *	Kernel context.
10322  */
10323 void
10324 ql_loop_online(ql_adapter_state_t *ha)
10325 {
10326 	ql_adapter_state_t	*vha;
10327 
10328 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10329 
10330 	/* Inform the FC Transport that the hardware is online. */
10331 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10332 		if (!(vha->task_daemon_flags &
10333 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10334 			/* Restart IP if it was shutdown. */
10335 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10336 			    !(vha->flags & IP_INITIALIZED)) {
10337 				(void) ql_initialize_ip(vha);
10338 				ql_isp_rcvbuf(vha);
10339 			}
10340 
10341 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10342 			    FC_PORT_STATE_MASK(vha->state) !=
10343 			    FC_STATE_ONLINE) {
10344 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10345 				if (vha->topology & QL_LOOP_CONNECTION) {
10346 					vha->state |= FC_STATE_LOOP;
10347 				} else {
10348 					vha->state |= FC_STATE_ONLINE;
10349 				}
10350 				TASK_DAEMON_LOCK(ha);
10351 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10352 				TASK_DAEMON_UNLOCK(ha);
10353 			}
10354 		}
10355 	}
10356 
10357 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10358 
10359 	/* Restart device queues that may have been stopped. */
10360 	ql_restart_queues(ha);
10361 
10362 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10363 }
10364 
10365 /*
10366  * ql_fca_handle_to_state
10367  *	Verifies handle to be correct.
10368  *
10369  * Input:
10370  *	fca_handle = pointer to state structure.
10371  *
10372  * Returns:
10373  *	NULL = failure
10374  *
10375  * Context:
10376  *	Kernel context.
10377  */
10378 static ql_adapter_state_t *
10379 ql_fca_handle_to_state(opaque_t fca_handle)
10380 {
10381 #ifdef	QL_DEBUG_ROUTINES
10382 	ql_link_t		*link;
10383 	ql_adapter_state_t	*ha = NULL;
10384 	ql_adapter_state_t	*vha = NULL;
10385 
10386 	for (link = ql_hba.first; link != NULL; link = link->next) {
10387 		ha = link->base_address;
10388 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10389 			if ((opaque_t)vha == fca_handle) {
10390 				ha = vha;
10391 				break;
10392 			}
10393 		}
10394 		if ((opaque_t)ha == fca_handle) {
10395 			break;
10396 		} else {
10397 			ha = NULL;
10398 		}
10399 	}
10400 
10401 	if (ha == NULL) {
10402 		/*EMPTY*/
10403 		QL_PRINT_2(CE_CONT, "failed\n");
10404 	}
10405 
10406 	ASSERT(ha != NULL);
10407 #endif /* QL_DEBUG_ROUTINES */
10408 
10409 	return ((ql_adapter_state_t *)fca_handle);
10410 }
10411 
10412 /*
10413  * ql_d_id_to_queue
10414  *	Locate device queue that matches destination ID.
10415  *
10416  * Input:
10417  *	ha = adapter state pointer.
10418  *	d_id = destination ID
10419  *
10420  * Returns:
10421  *	NULL = failure
10422  *
10423  * Context:
10424  *	Interrupt or Kernel context, no mailbox commands allowed.
10425  */
10426 ql_tgt_t *
10427 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10428 {
10429 	uint16_t	index;
10430 	ql_tgt_t	*tq;
10431 	ql_link_t	*link;
10432 
10433 	/* Get head queue index. */
10434 	index = ql_alpa_to_index[d_id.b.al_pa];
10435 
10436 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10437 		tq = link->base_address;
10438 		if (tq->d_id.b24 == d_id.b24 &&
10439 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10440 			return (tq);
10441 		}
10442 	}
10443 
10444 	return (NULL);
10445 }
10446 
10447 /*
10448  * ql_loop_id_to_queue
10449  *	Locate device queue that matches loop ID.
10450  *
10451  * Input:
10452  *	ha:		adapter state pointer.
10453  *	loop_id:	destination ID
10454  *
10455  * Returns:
10456  *	NULL = failure
10457  *
10458  * Context:
10459  *	Interrupt or Kernel context, no mailbox commands allowed.
10460  */
10461 ql_tgt_t *
10462 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10463 {
10464 	uint16_t	index;
10465 	ql_tgt_t	*tq;
10466 	ql_link_t	*link;
10467 
10468 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10469 		for (link = ha->dev[index].first; link != NULL;
10470 		    link = link->next) {
10471 			tq = link->base_address;
10472 			if (tq->loop_id == loop_id) {
10473 				return (tq);
10474 			}
10475 		}
10476 	}
10477 
10478 	return (NULL);
10479 }
10480 
10481 /*
10482  * ql_kstat_update
10483  *	Updates kernel statistics.
10484  *
10485  * Input:
10486  *	ksp - driver kernel statistics structure pointer.
10487  *	rw - function to perform
10488  *
10489  * Returns:
10490  *	0 or EACCES
10491  *
10492  * Context:
10493  *	Kernel context.
10494  */
10495 /* ARGSUSED */
10496 static int
10497 ql_kstat_update(kstat_t *ksp, int rw)
10498 {
10499 	int			rval;
10500 
10501 	QL_PRINT_3(CE_CONT, "started\n");
10502 
10503 	if (rw == KSTAT_WRITE) {
10504 		rval = EACCES;
10505 	} else {
10506 		rval = 0;
10507 	}
10508 
10509 	if (rval != 0) {
10510 		/*EMPTY*/
10511 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10512 	} else {
10513 		/*EMPTY*/
10514 		QL_PRINT_3(CE_CONT, "done\n");
10515 	}
10516 	return (rval);
10517 }
10518 
10519 /*
10520  * ql_load_flash
10521  *	Loads flash.
10522  *
10523  * Input:
10524  *	ha:	adapter state pointer.
10525  *	dp:	data pointer.
10526  *	size:	data length.
10527  *
10528  * Returns:
10529  *	ql local function return status code.
10530  *
10531  * Context:
10532  *	Kernel context.
10533  */
10534 int
10535 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10536 {
10537 	uint32_t	cnt;
10538 	int		rval;
10539 	uint32_t	size_to_offset;
10540 	uint32_t	size_to_compare;
10541 	int		erase_all;
10542 
10543 	if (CFG_IST(ha, CFG_CTRL_242581)) {
10544 		return (ql_24xx_load_flash(ha, dp, size, 0));
10545 	}
10546 
10547 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10548 
10549 	size_to_compare = 0x20000;
10550 	size_to_offset = 0;
10551 	erase_all = 0;
10552 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10553 		if (size == 0x80000) {
10554 			/* Request to flash the entire chip. */
10555 			size_to_compare = 0x80000;
10556 			erase_all = 1;
10557 		} else {
10558 			size_to_compare = 0x40000;
10559 			if (ql_flash_sbus_fpga) {
10560 				size_to_offset = 0x40000;
10561 			}
10562 		}
10563 	}
10564 	if (size > size_to_compare) {
10565 		rval = QL_FUNCTION_PARAMETER_ERROR;
10566 		EL(ha, "failed=%xh\n", rval);
10567 		return (rval);
10568 	}
10569 
10570 	GLOBAL_HW_LOCK();
10571 
10572 	/* Enable Flash Read/Write. */
10573 	ql_flash_enable(ha);
10574 
10575 	/* Erase flash prior to write. */
10576 	rval = ql_erase_flash(ha, erase_all);
10577 
10578 	if (rval == QL_SUCCESS) {
10579 		/* Write data to flash. */
10580 		for (cnt = 0; cnt < size; cnt++) {
10581 			/* Allow other system activity. */
10582 			if (cnt % 0x1000 == 0) {
10583 				ql_delay(ha, 10000);
10584 			}
10585 			rval = ql_program_flash_address(ha,
10586 			    cnt + size_to_offset, *dp++);
10587 			if (rval != QL_SUCCESS) {
10588 				break;
10589 			}
10590 		}
10591 	}
10592 
10593 	ql_flash_disable(ha);
10594 
10595 	GLOBAL_HW_UNLOCK();
10596 
10597 	if (rval != QL_SUCCESS) {
10598 		EL(ha, "failed=%xh\n", rval);
10599 	} else {
10600 		/*EMPTY*/
10601 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10602 	}
10603 	return (rval);
10604 }
10605 
10606 /*
10607  * ql_program_flash_address
10608  *	Program flash address.
10609  *
10610  * Input:
10611  *	ha = adapter state pointer.
10612  *	addr = flash byte address.
10613  *	data = data to be written to flash.
10614  *
10615  * Returns:
10616  *	ql local function return status code.
10617  *
10618  * Context:
10619  *	Kernel context.
10620  */
10621 static int
10622 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10623 {
10624 	int rval;
10625 
10626 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10627 
10628 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10629 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10630 		ql_write_flash_byte(ha, addr, data);
10631 	} else {
10632 		/* Write Program Command Sequence */
10633 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10634 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10635 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10636 		ql_write_flash_byte(ha, addr, data);
10637 	}
10638 
10639 	/* Wait for write to complete. */
10640 	rval = ql_poll_flash(ha, addr, data);
10641 
10642 	if (rval != QL_SUCCESS) {
10643 		EL(ha, "failed=%xh\n", rval);
10644 	} else {
10645 		/*EMPTY*/
10646 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10647 	}
10648 	return (rval);
10649 }
10650 
10651 /*
10652  * ql_erase_flash
10653  *	Erases entire flash.
10654  *
10655  * Input:
10656  *	ha = adapter state pointer.
10657  *
10658  * Returns:
10659  *	ql local function return status code.
10660  *
10661  * Context:
10662  *	Kernel context.
10663  */
10664 int
10665 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10666 {
10667 	int		rval;
10668 	uint32_t	erase_delay = 2000000;
10669 	uint32_t	sStartAddr;
10670 	uint32_t	ssize;
10671 	uint32_t	cnt;
10672 	uint8_t		*bfp;
10673 	uint8_t		*tmp;
10674 
10675 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10676 
10677 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10678 
10679 		if (ql_flash_sbus_fpga == 1) {
10680 			ssize = QL_SBUS_FCODE_SIZE;
10681 			sStartAddr = QL_FCODE_OFFSET;
10682 		} else {
10683 			ssize = QL_FPGA_SIZE;
10684 			sStartAddr = QL_FPGA_OFFSET;
10685 		}
10686 
10687 		erase_delay = 20000000;
10688 
10689 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10690 
10691 		/* Save the section of flash we're not updating to buffer */
10692 		tmp = bfp;
10693 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10694 			/* Allow other system activity. */
10695 			if (cnt % 0x1000 == 0) {
10696 				ql_delay(ha, 10000);
10697 			}
10698 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10699 		}
10700 	}
10701 
10702 	/* Chip Erase Command Sequence */
10703 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10704 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10705 	ql_write_flash_byte(ha, 0x5555, 0x80);
10706 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10707 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10708 	ql_write_flash_byte(ha, 0x5555, 0x10);
10709 
10710 	ql_delay(ha, erase_delay);
10711 
10712 	/* Wait for erase to complete. */
10713 	rval = ql_poll_flash(ha, 0, 0x80);
10714 
10715 	if (rval != QL_SUCCESS) {
10716 		EL(ha, "failed=%xh\n", rval);
10717 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10718 			kmem_free(bfp, ssize);
10719 		}
10720 		return (rval);
10721 	}
10722 
10723 	/* restore the section we saved in the buffer */
10724 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10725 		/* Restore the section we saved off */
10726 		tmp = bfp;
10727 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10728 			/* Allow other system activity. */
10729 			if (cnt % 0x1000 == 0) {
10730 				ql_delay(ha, 10000);
10731 			}
10732 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10733 			if (rval != QL_SUCCESS) {
10734 				break;
10735 			}
10736 		}
10737 
10738 		kmem_free(bfp, ssize);
10739 	}
10740 
10741 	if (rval != QL_SUCCESS) {
10742 		EL(ha, "failed=%xh\n", rval);
10743 	} else {
10744 		/*EMPTY*/
10745 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10746 	}
10747 	return (rval);
10748 }
10749 
10750 /*
10751  * ql_poll_flash
10752  *	Polls flash for completion.
10753  *
10754  * Input:
10755  *	ha = adapter state pointer.
10756  *	addr = flash byte address.
10757  *	data = data to be polled.
10758  *
10759  * Returns:
10760  *	ql local function return status code.
10761  *
10762  * Context:
10763  *	Kernel context.
10764  */
10765 int
10766 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10767 {
10768 	uint8_t		flash_data;
10769 	uint32_t	cnt;
10770 	int		rval = QL_FUNCTION_FAILED;
10771 
10772 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10773 
10774 	poll_data = (uint8_t)(poll_data & BIT_7);
10775 
10776 	/* Wait for 30 seconds for command to finish. */
10777 	for (cnt = 30000000; cnt; cnt--) {
10778 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10779 
10780 		if ((flash_data & BIT_7) == poll_data) {
10781 			rval = QL_SUCCESS;
10782 			break;
10783 		}
10784 		if (flash_data & BIT_5 && cnt > 2) {
10785 			cnt = 2;
10786 		}
10787 		drv_usecwait(1);
10788 	}
10789 
10790 	if (rval != QL_SUCCESS) {
10791 		EL(ha, "failed=%xh\n", rval);
10792 	} else {
10793 		/*EMPTY*/
10794 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10795 	}
10796 	return (rval);
10797 }
10798 
10799 /*
10800  * ql_flash_enable
10801  *	Setup flash for reading/writing.
10802  *
10803  * Input:
10804  *	ha = adapter state pointer.
10805  *
10806  * Context:
10807  *	Kernel context.
10808  */
10809 void
10810 ql_flash_enable(ql_adapter_state_t *ha)
10811 {
10812 	uint16_t	data;
10813 
10814 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10815 
10816 	/* Enable Flash Read/Write. */
10817 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10818 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10819 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10820 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10821 		ddi_put16(ha->sbus_fpga_dev_handle,
10822 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10823 		/* Read reset command sequence */
10824 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10825 		ql_write_flash_byte(ha, 0x555, 0x55);
10826 		ql_write_flash_byte(ha, 0xaaa, 0x20);
10827 		ql_write_flash_byte(ha, 0x555, 0xf0);
10828 	} else {
10829 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10830 		    ISP_FLASH_ENABLE);
10831 		WRT16_IO_REG(ha, ctrl_status, data);
10832 
10833 		/* Read/Reset Command Sequence */
10834 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10835 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10836 		ql_write_flash_byte(ha, 0x5555, 0xf0);
10837 	}
10838 	(void) ql_read_flash_byte(ha, 0);
10839 
10840 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10841 }
10842 
10843 /*
10844  * ql_flash_disable
10845  *	Disable flash and allow RISC to run.
10846  *
10847  * Input:
10848  *	ha = adapter state pointer.
10849  *
10850  * Context:
10851  *	Kernel context.
10852  */
10853 void
10854 ql_flash_disable(ql_adapter_state_t *ha)
10855 {
10856 	uint16_t	data;
10857 
10858 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10859 
10860 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10861 		/*
10862 		 * Lock the flash back up.
10863 		 */
10864 		ql_write_flash_byte(ha, 0x555, 0x90);
10865 		ql_write_flash_byte(ha, 0x555, 0x0);
10866 
10867 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10868 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10869 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10870 		ddi_put16(ha->sbus_fpga_dev_handle,
10871 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10872 	} else {
10873 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10874 		    ~ISP_FLASH_ENABLE);
10875 		WRT16_IO_REG(ha, ctrl_status, data);
10876 	}
10877 
10878 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10879 }
10880 
10881 /*
10882  * ql_write_flash_byte
10883  *	Write byte to flash.
10884  *
10885  * Input:
10886  *	ha = adapter state pointer.
10887  *	addr = flash byte address.
10888  *	data = data to be written.
10889  *
10890  * Context:
10891  *	Kernel context.
10892  */
10893 void
10894 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10895 {
10896 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10897 		ddi_put16(ha->sbus_fpga_dev_handle,
10898 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10899 		    LSW(addr));
10900 		ddi_put16(ha->sbus_fpga_dev_handle,
10901 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10902 		    MSW(addr));
10903 		ddi_put16(ha->sbus_fpga_dev_handle,
10904 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10905 		    (uint16_t)data);
10906 	} else {
10907 		uint16_t bank_select;
10908 
10909 		/* Setup bit 16 of flash address. */
10910 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10911 
10912 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10913 			bank_select = (uint16_t)(bank_select & ~0xf0);
10914 			bank_select = (uint16_t)(bank_select |
10915 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10916 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10917 		} else {
10918 			if (addr & BIT_16 && !(bank_select &
10919 			    ISP_FLASH_64K_BANK)) {
10920 				bank_select = (uint16_t)(bank_select |
10921 				    ISP_FLASH_64K_BANK);
10922 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10923 			} else if (!(addr & BIT_16) && bank_select &
10924 			    ISP_FLASH_64K_BANK) {
10925 				bank_select = (uint16_t)(bank_select &
10926 				    ~ISP_FLASH_64K_BANK);
10927 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10928 			}
10929 		}
10930 
10931 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10932 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10933 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10934 		} else {
10935 			WRT16_IOMAP_REG(ha, flash_address, addr);
10936 			WRT16_IOMAP_REG(ha, flash_data, data);
10937 		}
10938 	}
10939 }
10940 
10941 /*
10942  * ql_read_flash_byte
10943  *	Reads byte from flash, but must read a word from chip.
10944  *
10945  * Input:
10946  *	ha = adapter state pointer.
10947  *	addr = flash byte address.
10948  *
10949  * Returns:
10950  *	byte from flash.
10951  *
10952  * Context:
10953  *	Kernel context.
10954  */
10955 uint8_t
10956 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
10957 {
10958 	uint8_t	data;
10959 
10960 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10961 		ddi_put16(ha->sbus_fpga_dev_handle,
10962 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10963 		    LSW(addr));
10964 		ddi_put16(ha->sbus_fpga_dev_handle,
10965 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10966 		    MSW(addr));
10967 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
10968 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
10969 	} else {
10970 		uint16_t	bank_select;
10971 
10972 		/* Setup bit 16 of flash address. */
10973 		bank_select = RD16_IO_REG(ha, ctrl_status);
10974 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10975 			bank_select = (uint16_t)(bank_select & ~0xf0);
10976 			bank_select = (uint16_t)(bank_select |
10977 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10978 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10979 		} else {
10980 			if (addr & BIT_16 &&
10981 			    !(bank_select & ISP_FLASH_64K_BANK)) {
10982 				bank_select = (uint16_t)(bank_select |
10983 				    ISP_FLASH_64K_BANK);
10984 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10985 			} else if (!(addr & BIT_16) &&
10986 			    bank_select & ISP_FLASH_64K_BANK) {
10987 				bank_select = (uint16_t)(bank_select &
10988 				    ~ISP_FLASH_64K_BANK);
10989 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10990 			}
10991 		}
10992 
10993 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10994 			WRT16_IO_REG(ha, flash_address, addr);
10995 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
10996 		} else {
10997 			WRT16_IOMAP_REG(ha, flash_address, addr);
10998 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
10999 		}
11000 	}
11001 
11002 	return (data);
11003 }
11004 
11005 /*
11006  * ql_24xx_flash_id
11007  *	Get flash IDs.
11008  *
11009  * Input:
11010  *	ha:		adapter state pointer.
11011  *
11012  * Returns:
11013  *	ql local function return status code.
11014  *
11015  * Context:
11016  *	Kernel context.
11017  */
11018 int
11019 ql_24xx_flash_id(ql_adapter_state_t *vha)
11020 {
11021 	int			rval;
11022 	uint32_t		fdata = 0;
11023 	ql_adapter_state_t	*ha = vha->pha;
11024 	ql_xioctl_t		*xp = ha->xioctl;
11025 
11026 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11027 
11028 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11029 
11030 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11031 		fdata = 0;
11032 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11033 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11034 	}
11035 
11036 	if (rval != QL_SUCCESS) {
11037 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11038 	} else if (fdata != 0) {
11039 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11040 		xp->fdesc.flash_id = MSB(LSW(fdata));
11041 		xp->fdesc.flash_len = LSB(MSW(fdata));
11042 	} else {
11043 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11044 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11045 		xp->fdesc.flash_len = 0;
11046 	}
11047 
11048 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11049 
11050 	return (rval);
11051 }
11052 
11053 /*
11054  * ql_24xx_load_flash
11055  *	Loads flash.
11056  *
11057  * Input:
11058  *	ha = adapter state pointer.
11059  *	dp = data pointer.
11060  *	size = data length in bytes.
11061  *	faddr = 32bit word flash byte address.
11062  *
11063  * Returns:
11064  *	ql local function return status code.
11065  *
11066  * Context:
11067  *	Kernel context.
11068  */
11069 int
11070 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11071     uint32_t faddr)
11072 {
11073 	int			rval;
11074 	uint32_t		cnt, rest_addr, fdata, wc;
11075 	dma_mem_t		dmabuf = {0};
11076 	ql_adapter_state_t	*ha = vha->pha;
11077 	ql_xioctl_t		*xp = ha->xioctl;
11078 
11079 	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11080 	    ha->instance, faddr, size);
11081 
11082 	/* start address must be 32 bit word aligned */
11083 	if ((faddr & 0x3) != 0) {
11084 		EL(ha, "incorrect buffer size alignment\n");
11085 		return (QL_FUNCTION_PARAMETER_ERROR);
11086 	}
11087 
11088 	/* Allocate DMA buffer */
11089 	if (CFG_IST(ha, CFG_CTRL_2581)) {
11090 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11091 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11092 		    QL_SUCCESS) {
11093 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11094 			return (rval);
11095 		}
11096 	}
11097 
11098 	GLOBAL_HW_LOCK();
11099 
11100 	/* Enable flash write */
11101 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11102 		GLOBAL_HW_UNLOCK();
11103 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11104 		ql_free_phys(ha, &dmabuf);
11105 		return (rval);
11106 	}
11107 
11108 	/* setup mask of address range within a sector */
11109 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11110 
11111 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11112 
11113 	/*
11114 	 * Write data to flash.
11115 	 */
11116 	cnt = 0;
11117 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11118 
11119 	while (cnt < size) {
11120 		/* Beginning of a sector? */
11121 		if ((faddr & rest_addr) == 0) {
11122 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
11123 				fdata = ha->flash_data_addr | faddr;
11124 				rval = ql_flash_access(ha,
11125 				    FAC_ERASE_SECTOR, fdata, fdata +
11126 				    rest_addr, 0);
11127 				if (rval != QL_SUCCESS) {
11128 					EL(ha, "erase sector status="
11129 					    "%xh, start=%xh, end=%xh"
11130 					    "\n", rval, fdata,
11131 					    fdata + rest_addr);
11132 					break;
11133 				}
11134 			} else {
11135 				fdata = (faddr & ~rest_addr) << 2;
11136 				fdata = (fdata & 0xff00) |
11137 				    (fdata << 16 & 0xff0000) |
11138 				    (fdata >> 16 & 0xff);
11139 
11140 				if (rest_addr == 0x1fff) {
11141 					/* 32kb sector block erase */
11142 					rval = ql_24xx_write_flash(ha,
11143 					    FLASH_CONF_ADDR | 0x0352,
11144 					    fdata);
11145 				} else {
11146 					/* 64kb sector block erase */
11147 					rval = ql_24xx_write_flash(ha,
11148 					    FLASH_CONF_ADDR | 0x03d8,
11149 					    fdata);
11150 				}
11151 				if (rval != QL_SUCCESS) {
11152 					EL(ha, "Unable to flash sector"
11153 					    ": address=%xh\n", faddr);
11154 					break;
11155 				}
11156 			}
11157 		}
11158 
11159 		/* Write data */
11160 		if (CFG_IST(ha, CFG_CTRL_2581) &&
11161 		    ((faddr & 0x3f) == 0)) {
11162 			/*
11163 			 * Limit write up to sector boundary.
11164 			 */
11165 			wc = ((~faddr & (rest_addr>>1)) + 1);
11166 
11167 			if (size - cnt < wc) {
11168 				wc = size - cnt;
11169 			}
11170 
11171 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11172 			    (uint8_t *)dmabuf.bp, wc<<2,
11173 			    DDI_DEV_AUTOINCR);
11174 
11175 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11176 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11177 			if (rval != QL_SUCCESS) {
11178 				EL(ha, "unable to dma to flash "
11179 				    "address=%xh\n", faddr << 2);
11180 				break;
11181 			}
11182 
11183 			cnt += wc;
11184 			faddr += wc;
11185 			dp += wc << 2;
11186 		} else {
11187 			fdata = *dp++;
11188 			fdata |= *dp++ << 8;
11189 			fdata |= *dp++ << 16;
11190 			fdata |= *dp++ << 24;
11191 			rval = ql_24xx_write_flash(ha,
11192 			    ha->flash_data_addr | faddr, fdata);
11193 			if (rval != QL_SUCCESS) {
11194 				EL(ha, "Unable to program flash "
11195 				    "address=%xh data=%xh\n", faddr,
11196 				    *dp);
11197 				break;
11198 			}
11199 			cnt++;
11200 			faddr++;
11201 
11202 			/* Allow other system activity. */
11203 			if (cnt % 0x1000 == 0) {
11204 				ql_delay(ha, 10000);
11205 			}
11206 		}
11207 	}
11208 
11209 	ql_24xx_protect_flash(ha);
11210 
11211 	ql_free_phys(ha, &dmabuf);
11212 
11213 	GLOBAL_HW_UNLOCK();
11214 
11215 	if (rval != QL_SUCCESS) {
11216 		EL(ha, "failed=%xh\n", rval);
11217 	} else {
11218 		/*EMPTY*/
11219 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11220 	}
11221 	return (rval);
11222 }
11223 
11224 /*
11225  * ql_24xx_read_flash
11226  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11227  *
11228  * Input:
11229  *	ha:	adapter state pointer.
11230  *	faddr:	NVRAM/FLASH address.
11231  *	bp:	data pointer.
11232  *
11233  * Returns:
11234  *	ql local function return status code.
11235  *
11236  * Context:
11237  *	Kernel context.
11238  */
11239 int
11240 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11241 {
11242 	uint32_t		timer;
11243 	int			rval = QL_SUCCESS;
11244 	ql_adapter_state_t	*ha = vha->pha;
11245 
11246 	/* Clear access error flag */
11247 	WRT32_IO_REG(ha, ctrl_status,
11248 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11249 
11250 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11251 
11252 	/* Wait for READ cycle to complete. */
11253 	for (timer = 300000; timer; timer--) {
11254 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11255 			break;
11256 		}
11257 		drv_usecwait(10);
11258 	}
11259 
11260 	if (timer == 0) {
11261 		EL(ha, "failed, timeout\n");
11262 		rval = QL_FUNCTION_TIMEOUT;
11263 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11264 		EL(ha, "failed, access error\n");
11265 		rval = QL_FUNCTION_FAILED;
11266 	}
11267 
11268 	*bp = RD32_IO_REG(ha, flash_data);
11269 
11270 	return (rval);
11271 }
11272 
11273 /*
11274  * ql_24xx_write_flash
11275  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11276  *
11277  * Input:
11278  *	ha:	adapter state pointer.
11279  *	addr:	NVRAM/FLASH address.
11280  *	value:	data.
11281  *
11282  * Returns:
11283  *	ql local function return status code.
11284  *
11285  * Context:
11286  *	Kernel context.
11287  */
11288 int
11289 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11290 {
11291 	uint32_t		timer, fdata;
11292 	int			rval = QL_SUCCESS;
11293 	ql_adapter_state_t	*ha = vha->pha;
11294 
11295 	/* Clear access error flag */
11296 	WRT32_IO_REG(ha, ctrl_status,
11297 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11298 
11299 	WRT32_IO_REG(ha, flash_data, data);
11300 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11301 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11302 
11303 	/* Wait for Write cycle to complete. */
11304 	for (timer = 3000000; timer; timer--) {
11305 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11306 			/* Check flash write in progress. */
11307 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11308 				(void) ql_24xx_read_flash(ha,
11309 				    FLASH_CONF_ADDR | 0x005, &fdata);
11310 				if (!(fdata & BIT_0)) {
11311 					break;
11312 				}
11313 			} else {
11314 				break;
11315 			}
11316 		}
11317 		drv_usecwait(10);
11318 	}
11319 	if (timer == 0) {
11320 		EL(ha, "failed, timeout\n");
11321 		rval = QL_FUNCTION_TIMEOUT;
11322 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11323 		EL(ha, "access error\n");
11324 		rval = QL_FUNCTION_FAILED;
11325 	}
11326 
11327 	return (rval);
11328 }
11329 /*
11330  * ql_24xx_unprotect_flash
11331  *	Enable writes
11332  *
11333  * Input:
11334  *	ha:	adapter state pointer.
11335  *
11336  * Returns:
11337  *	ql local function return status code.
11338  *
11339  * Context:
11340  *	Kernel context.
11341  */
11342 int
11343 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11344 {
11345 	int			rval;
11346 	uint32_t		fdata;
11347 	ql_adapter_state_t	*ha = vha->pha;
11348 	ql_xioctl_t		*xp = ha->xioctl;
11349 
11350 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11351 
11352 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11353 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11354 			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11355 			    0)) != QL_SUCCESS) {
11356 				EL(ha, "status=%xh\n", rval);
11357 			}
11358 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11359 			    ha->instance);
11360 			return (rval);
11361 		}
11362 	} else {
11363 		/* Enable flash write. */
11364 		WRT32_IO_REG(ha, ctrl_status,
11365 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11366 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11367 	}
11368 
11369 	/*
11370 	 * Remove block write protection (SST and ST) and
11371 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11372 	 * Unprotect sectors.
11373 	 */
11374 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11375 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11376 
11377 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11378 		for (fdata = 0; fdata < 0x10; fdata++) {
11379 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11380 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11381 		}
11382 
11383 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11384 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11385 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11386 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11387 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11388 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11389 	}
11390 
11391 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11392 
11393 	return (QL_SUCCESS);
11394 }
11395 
11396 /*
11397  * ql_24xx_protect_flash
11398  *	Disable writes
11399  *
11400  * Input:
11401  *	ha:	adapter state pointer.
11402  *
11403  * Context:
11404  *	Kernel context.
11405  */
11406 void
11407 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11408 {
11409 	int			rval;
11410 	uint32_t		fdata;
11411 	ql_adapter_state_t	*ha = vha->pha;
11412 	ql_xioctl_t		*xp = ha->xioctl;
11413 
11414 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11415 
11416 	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11417 		if (ha->task_daemon_flags & FIRMWARE_UP) {
11418 			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11419 			    0)) != QL_SUCCESS) {
11420 				EL(ha, "status=%xh\n", rval);
11421 			}
11422 			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11423 			    ha->instance);
11424 			return;
11425 		}
11426 	} else {
11427 		/* Enable flash write. */
11428 		WRT32_IO_REG(ha, ctrl_status,
11429 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11430 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11431 	}
11432 
11433 	/*
11434 	 * Protect sectors.
11435 	 * Set block write protection (SST and ST) and
11436 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11437 	 */
11438 	if (xp->fdesc.protect_sector_cmd != 0) {
11439 		for (fdata = 0; fdata < 0x10; fdata++) {
11440 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11441 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11442 		}
11443 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11444 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11445 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11446 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11447 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11448 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11449 
11450 		/* TODO: ??? */
11451 		(void) ql_24xx_write_flash(ha,
11452 		    FLASH_CONF_ADDR | 0x101, 0x80);
11453 	} else {
11454 		(void) ql_24xx_write_flash(ha,
11455 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11456 	}
11457 
11458 	/* Disable flash write. */
11459 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11460 		WRT32_IO_REG(ha, ctrl_status,
11461 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11462 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11463 	}
11464 
11465 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11466 }
11467 
11468 /*
11469  * ql_dump_firmware
11470  *	Save RISC code state information.
11471  *
11472  * Input:
11473  *	ha = adapter state pointer.
11474  *
11475  * Returns:
11476  *	QL local function return status code.
11477  *
11478  * Context:
11479  *	Kernel context.
11480  */
11481 static int
11482 ql_dump_firmware(ql_adapter_state_t *vha)
11483 {
11484 	int			rval;
11485 	clock_t			timer;
11486 	ql_adapter_state_t	*ha = vha->pha;
11487 
11488 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11489 
11490 	QL_DUMP_LOCK(ha);
11491 
11492 	if (ha->ql_dump_state & QL_DUMPING ||
11493 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11494 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11495 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11496 		QL_DUMP_UNLOCK(ha);
11497 		return (QL_SUCCESS);
11498 	}
11499 
11500 	QL_DUMP_UNLOCK(ha);
11501 
11502 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11503 
11504 	/*
11505 	 * Wait for all outstanding commands to complete
11506 	 */
11507 	(void) ql_wait_outstanding(ha);
11508 
11509 	/* Dump firmware. */
11510 	rval = ql_binary_fw_dump(ha, TRUE);
11511 
11512 	/* Do abort to force restart. */
11513 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11514 	EL(ha, "restarting, isp_abort_needed\n");
11515 
11516 	/* Acquire task daemon lock. */
11517 	TASK_DAEMON_LOCK(ha);
11518 
11519 	/* Wait for suspension to end. */
11520 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11521 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11522 
11523 		/* 30 seconds from now */
11524 		timer = ddi_get_lbolt();
11525 		timer += drv_usectohz(30000000);
11526 
11527 		if (cv_timedwait(&ha->cv_dr_suspended,
11528 		    &ha->task_daemon_mutex, timer) == -1) {
11529 			/*
11530 			 * The timeout time 'timer' was
11531 			 * reached without the condition
11532 			 * being signaled.
11533 			 */
11534 			break;
11535 		}
11536 	}
11537 
11538 	/* Release task daemon lock. */
11539 	TASK_DAEMON_UNLOCK(ha);
11540 
11541 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11542 		/*EMPTY*/
11543 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11544 	} else {
11545 		EL(ha, "failed, rval = %xh\n", rval);
11546 	}
11547 	return (rval);
11548 }
11549 
11550 /*
11551  * ql_binary_fw_dump
11552  *	Dumps binary data from firmware.
11553  *
11554  * Input:
11555  *	ha = adapter state pointer.
11556  *	lock_needed = mailbox lock needed.
11557  *
11558  * Returns:
11559  *	ql local function return status code.
11560  *
11561  * Context:
11562  *	Interrupt or Kernel context, no mailbox commands allowed.
11563  */
11564 int
11565 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11566 {
11567 	clock_t			timer;
11568 	mbx_cmd_t		mc;
11569 	mbx_cmd_t		*mcp = &mc;
11570 	int			rval = QL_SUCCESS;
11571 	ql_adapter_state_t	*ha = vha->pha;
11572 
11573 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11574 
11575 	QL_DUMP_LOCK(ha);
11576 
11577 	if (ha->ql_dump_state & QL_DUMPING ||
11578 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11579 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11580 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11581 		QL_DUMP_UNLOCK(ha);
11582 		return (QL_DATA_EXISTS);
11583 	}
11584 
11585 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11586 	ha->ql_dump_state |= QL_DUMPING;
11587 
11588 	QL_DUMP_UNLOCK(ha);
11589 
11590 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11591 
11592 		/* Insert Time Stamp */
11593 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11594 		    FTO_INSERT_TIME_STAMP);
11595 		if (rval != QL_SUCCESS) {
11596 			EL(ha, "f/w extended trace insert"
11597 			    "time stamp failed: %xh\n", rval);
11598 		}
11599 	}
11600 
11601 	if (lock_needed == TRUE) {
11602 		/* Acquire mailbox register lock. */
11603 		MBX_REGISTER_LOCK(ha);
11604 
11605 		/* Check for mailbox available, if not wait for signal. */
11606 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11607 			ha->mailbox_flags = (uint8_t)
11608 			    (ha->mailbox_flags | MBX_WANT_FLG);
11609 
11610 			/* 30 seconds from now */
11611 			timer = ddi_get_lbolt();
11612 			timer += (ha->mcp->timeout + 2) *
11613 			    drv_usectohz(1000000);
11614 			if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11615 			    timer) == -1) {
11616 				/*
11617 				 * The timeout time 'timer' was
11618 				 * reached without the condition
11619 				 * being signaled.
11620 				 */
11621 
11622 				/* Release mailbox register lock. */
11623 				MBX_REGISTER_UNLOCK(ha);
11624 
11625 				EL(ha, "failed, rval = %xh\n",
11626 				    QL_FUNCTION_TIMEOUT);
11627 				return (QL_FUNCTION_TIMEOUT);
11628 			}
11629 		}
11630 
11631 		/* Set busy flag. */
11632 		ha->mailbox_flags = (uint8_t)
11633 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11634 		mcp->timeout = 120;
11635 		ha->mcp = mcp;
11636 
11637 		/* Release mailbox register lock. */
11638 		MBX_REGISTER_UNLOCK(ha);
11639 	}
11640 
11641 	/* Free previous dump buffer. */
11642 	if (ha->ql_dump_ptr != NULL) {
11643 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11644 		ha->ql_dump_ptr = NULL;
11645 	}
11646 
11647 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11648 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11649 		    ha->fw_ext_memory_size);
11650 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11651 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11652 		    ha->fw_ext_memory_size);
11653 	} else {
11654 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11655 	}
11656 
11657 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11658 	    NULL) {
11659 		rval = QL_MEMORY_ALLOC_FAILED;
11660 	} else {
11661 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11662 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11663 		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11664 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11665 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11666 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11667 		} else {
11668 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11669 		}
11670 	}
11671 
11672 	/* Reset ISP chip. */
11673 	ql_reset_chip(ha);
11674 
11675 	QL_DUMP_LOCK(ha);
11676 
11677 	if (rval != QL_SUCCESS) {
11678 		if (ha->ql_dump_ptr != NULL) {
11679 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11680 			ha->ql_dump_ptr = NULL;
11681 		}
11682 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11683 		    QL_DUMP_UPLOADED);
11684 		EL(ha, "failed, rval = %xh\n", rval);
11685 	} else {
11686 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11687 		ha->ql_dump_state |= QL_DUMP_VALID;
11688 		EL(ha, "done\n");
11689 	}
11690 
11691 	QL_DUMP_UNLOCK(ha);
11692 
11693 	return (rval);
11694 }
11695 
11696 /*
11697  * ql_ascii_fw_dump
11698  *	Converts firmware binary dump to ascii.
11699  *
11700  * Input:
11701  *	ha = adapter state pointer.
11702  *	bptr = buffer pointer.
11703  *
11704  * Returns:
11705  *	Amount of data buffer used.
11706  *
11707  * Context:
11708  *	Kernel context.
11709  */
11710 size_t
11711 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11712 {
11713 	uint32_t		cnt;
11714 	caddr_t			bp;
11715 	int			mbox_cnt;
11716 	ql_adapter_state_t	*ha = vha->pha;
11717 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11718 
11719 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11720 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11721 	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11722 		return (ql_25xx_ascii_fw_dump(ha, bufp));
11723 	}
11724 
11725 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11726 
11727 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11728 		(void) sprintf(bufp, "\nISP 2300IP ");
11729 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11730 		(void) sprintf(bufp, "\nISP 6322FLX ");
11731 	} else {
11732 		(void) sprintf(bufp, "\nISP 2200IP ");
11733 	}
11734 
11735 	bp = bufp + strlen(bufp);
11736 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11737 	    ha->fw_major_version, ha->fw_minor_version,
11738 	    ha->fw_subminor_version);
11739 
11740 	(void) strcat(bufp, "\nPBIU Registers:");
11741 	bp = bufp + strlen(bufp);
11742 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11743 		if (cnt % 8 == 0) {
11744 			*bp++ = '\n';
11745 		}
11746 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11747 		bp = bp + 6;
11748 	}
11749 
11750 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11751 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11752 		    "registers:");
11753 		bp = bufp + strlen(bufp);
11754 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11755 			if (cnt % 8 == 0) {
11756 				*bp++ = '\n';
11757 			}
11758 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11759 			bp = bp + 6;
11760 		}
11761 	}
11762 
11763 	(void) strcat(bp, "\n\nMailbox Registers:");
11764 	bp = bufp + strlen(bufp);
11765 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11766 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11767 		if (cnt % 8 == 0) {
11768 			*bp++ = '\n';
11769 		}
11770 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11771 		bp = bp + 6;
11772 	}
11773 
11774 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11775 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11776 		bp = bufp + strlen(bufp);
11777 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11778 			if (cnt % 8 == 0) {
11779 				*bp++ = '\n';
11780 			}
11781 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11782 			bp = bp + 6;
11783 		}
11784 	}
11785 
11786 	(void) strcat(bp, "\n\nDMA Registers:");
11787 	bp = bufp + strlen(bufp);
11788 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11789 		if (cnt % 8 == 0) {
11790 			*bp++ = '\n';
11791 		}
11792 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11793 		bp = bp + 6;
11794 	}
11795 
11796 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11797 	bp = bufp + strlen(bufp);
11798 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11799 		if (cnt % 8 == 0) {
11800 			*bp++ = '\n';
11801 		}
11802 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11803 		bp = bp + 6;
11804 	}
11805 
11806 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11807 	bp = bufp + strlen(bufp);
11808 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11809 		if (cnt % 8 == 0) {
11810 			*bp++ = '\n';
11811 		}
11812 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11813 		bp = bp + 6;
11814 	}
11815 
11816 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11817 	bp = bufp + strlen(bufp);
11818 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11819 		if (cnt % 8 == 0) {
11820 			*bp++ = '\n';
11821 		}
11822 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11823 		bp = bp + 6;
11824 	}
11825 
11826 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11827 	bp = bufp + strlen(bufp);
11828 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11829 		if (cnt % 8 == 0) {
11830 			*bp++ = '\n';
11831 		}
11832 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11833 		bp = bp + 6;
11834 	}
11835 
11836 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11837 	bp = bufp + strlen(bufp);
11838 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11839 		if (cnt % 8 == 0) {
11840 			*bp++ = '\n';
11841 		}
11842 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11843 		bp = bp + 6;
11844 	}
11845 
11846 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11847 	bp = bufp + strlen(bufp);
11848 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11849 		if (cnt % 8 == 0) {
11850 			*bp++ = '\n';
11851 		}
11852 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11853 		bp = bp + 6;
11854 	}
11855 
11856 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11857 	bp = bufp + strlen(bufp);
11858 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11859 		if (cnt % 8 == 0) {
11860 			*bp++ = '\n';
11861 		}
11862 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11863 		bp = bp + 6;
11864 	}
11865 
11866 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11867 	bp = bufp + strlen(bufp);
11868 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11869 		if (cnt % 8 == 0) {
11870 			*bp++ = '\n';
11871 		}
11872 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11873 		bp = bp + 6;
11874 	}
11875 
11876 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11877 	bp = bufp + strlen(bufp);
11878 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11879 		if (cnt % 8 == 0) {
11880 			*bp++ = '\n';
11881 		}
11882 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11883 		bp = bp + 6;
11884 	}
11885 
11886 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11887 	bp = bufp + strlen(bufp);
11888 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11889 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11890 		    CFG_CTRL_6322)) == 0))) {
11891 			break;
11892 		}
11893 		if (cnt % 8 == 0) {
11894 			*bp++ = '\n';
11895 		}
11896 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11897 		bp = bp + 6;
11898 	}
11899 
11900 	(void) strcat(bp, "\n\nFPM B0 Registers:");
11901 	bp = bufp + strlen(bufp);
11902 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11903 		if (cnt % 8 == 0) {
11904 			*bp++ = '\n';
11905 		}
11906 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11907 		bp = bp + 6;
11908 	}
11909 
11910 	(void) strcat(bp, "\n\nFPM B1 Registers:");
11911 	bp = bufp + strlen(bufp);
11912 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11913 		if (cnt % 8 == 0) {
11914 			*bp++ = '\n';
11915 		}
11916 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11917 		bp = bp + 6;
11918 	}
11919 
11920 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11921 		(void) strcat(bp, "\n\nCode RAM Dump:");
11922 		bp = bufp + strlen(bufp);
11923 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11924 			if (cnt % 8 == 0) {
11925 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11926 				bp = bp + 8;
11927 			}
11928 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11929 			bp = bp + 6;
11930 		}
11931 
11932 		(void) strcat(bp, "\n\nStack RAM Dump:");
11933 		bp = bufp + strlen(bufp);
11934 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11935 			if (cnt % 8 == 0) {
11936 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11937 				bp = bp + 8;
11938 			}
11939 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11940 			bp = bp + 6;
11941 		}
11942 
11943 		(void) strcat(bp, "\n\nData RAM Dump:");
11944 		bp = bufp + strlen(bufp);
11945 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11946 			if (cnt % 8 == 0) {
11947 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11948 				bp = bp + 8;
11949 			}
11950 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11951 			bp = bp + 6;
11952 		}
11953 	} else {
11954 		(void) strcat(bp, "\n\nRISC SRAM:");
11955 		bp = bufp + strlen(bufp);
11956 		for (cnt = 0; cnt < 0xf000; cnt++) {
11957 			if (cnt % 8 == 0) {
11958 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
11959 				bp = bp + 7;
11960 			}
11961 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11962 			bp = bp + 6;
11963 		}
11964 	}
11965 
11966 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
11967 	bp += strlen(bp);
11968 
11969 	(void) sprintf(bp, "\n\nRequest Queue");
11970 	bp += strlen(bp);
11971 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
11972 		if (cnt % 8 == 0) {
11973 			(void) sprintf(bp, "\n%08x: ", cnt);
11974 			bp += strlen(bp);
11975 		}
11976 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
11977 		bp += strlen(bp);
11978 	}
11979 
11980 	(void) sprintf(bp, "\n\nResponse Queue");
11981 	bp += strlen(bp);
11982 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
11983 		if (cnt % 8 == 0) {
11984 			(void) sprintf(bp, "\n%08x: ", cnt);
11985 			bp += strlen(bp);
11986 		}
11987 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
11988 		bp += strlen(bp);
11989 	}
11990 
11991 	(void) sprintf(bp, "\n");
11992 
11993 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11994 
11995 	return (strlen(bufp));
11996 }
11997 
11998 /*
11999  * ql_24xx_ascii_fw_dump
12000  *	Converts ISP24xx firmware binary dump to ascii.
12001  *
12002  * Input:
12003  *	ha = adapter state pointer.
12004  *	bptr = buffer pointer.
12005  *
12006  * Returns:
12007  *	Amount of data buffer used.
12008  *
12009  * Context:
12010  *	Kernel context.
12011  */
12012 static size_t
12013 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12014 {
12015 	uint32_t		cnt;
12016 	caddr_t			bp = bufp;
12017 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12018 
12019 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12020 
12021 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12022 	    ha->fw_major_version, ha->fw_minor_version,
12023 	    ha->fw_subminor_version, ha->fw_attributes);
12024 	bp += strlen(bp);
12025 
12026 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12027 
12028 	(void) strcat(bp, "\nHost Interface Registers");
12029 	bp += strlen(bp);
12030 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12031 		if (cnt % 8 == 0) {
12032 			(void) sprintf(bp++, "\n");
12033 		}
12034 
12035 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12036 		bp += 9;
12037 	}
12038 
12039 	(void) sprintf(bp, "\n\nMailbox Registers");
12040 	bp += strlen(bp);
12041 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12042 		if (cnt % 16 == 0) {
12043 			(void) sprintf(bp++, "\n");
12044 		}
12045 
12046 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12047 		bp += 5;
12048 	}
12049 
12050 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12051 	bp += strlen(bp);
12052 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12053 		if (cnt % 8 == 0) {
12054 			(void) sprintf(bp++, "\n");
12055 		}
12056 
12057 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12058 		bp += 9;
12059 	}
12060 
12061 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12062 	bp += strlen(bp);
12063 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12064 		if (cnt % 8 == 0) {
12065 			(void) sprintf(bp++, "\n");
12066 		}
12067 
12068 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12069 		bp += 9;
12070 	}
12071 
12072 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12073 	bp += strlen(bp);
12074 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12075 		if (cnt % 8 == 0) {
12076 			(void) sprintf(bp++, "\n");
12077 		}
12078 
12079 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12080 		bp += 9;
12081 	}
12082 
12083 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12084 	bp += strlen(bp);
12085 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12086 		if (cnt % 8 == 0) {
12087 			(void) sprintf(bp++, "\n");
12088 		}
12089 
12090 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12091 		bp += 9;
12092 	}
12093 
12094 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12095 	bp += strlen(bp);
12096 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12097 		if (cnt % 8 == 0) {
12098 			(void) sprintf(bp++, "\n");
12099 		}
12100 
12101 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12102 		bp += 9;
12103 	}
12104 
12105 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12106 	bp += strlen(bp);
12107 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12108 		if (cnt % 8 == 0) {
12109 			(void) sprintf(bp++, "\n");
12110 		}
12111 
12112 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12113 		bp += 9;
12114 	}
12115 
12116 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12117 	bp += strlen(bp);
12118 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12119 		if (cnt % 8 == 0) {
12120 			(void) sprintf(bp++, "\n");
12121 		}
12122 
12123 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12124 		bp += 9;
12125 	}
12126 
12127 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12128 	bp += strlen(bp);
12129 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12130 		if (cnt % 8 == 0) {
12131 			(void) sprintf(bp++, "\n");
12132 		}
12133 
12134 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12135 		bp += 9;
12136 	}
12137 
12138 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12139 	bp += strlen(bp);
12140 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12141 		if (cnt % 8 == 0) {
12142 			(void) sprintf(bp++, "\n");
12143 		}
12144 
12145 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12146 		bp += 9;
12147 	}
12148 
12149 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12150 	bp += strlen(bp);
12151 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12152 		if (cnt % 8 == 0) {
12153 			(void) sprintf(bp++, "\n");
12154 		}
12155 
12156 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12157 		bp += 9;
12158 	}
12159 
12160 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12161 	bp += strlen(bp);
12162 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12163 		if (cnt % 8 == 0) {
12164 			(void) sprintf(bp++, "\n");
12165 		}
12166 
12167 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12168 		bp += 9;
12169 	}
12170 
12171 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12172 	bp += strlen(bp);
12173 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12174 		if (cnt % 8 == 0) {
12175 			(void) sprintf(bp++, "\n");
12176 		}
12177 
12178 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12179 		bp += 9;
12180 	}
12181 
12182 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12183 	bp += strlen(bp);
12184 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12185 		if (cnt % 8 == 0) {
12186 			(void) sprintf(bp++, "\n");
12187 		}
12188 
12189 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12190 		bp += 9;
12191 	}
12192 
12193 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12194 	bp += strlen(bp);
12195 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12196 		if (cnt % 8 == 0) {
12197 			(void) sprintf(bp++, "\n");
12198 		}
12199 
12200 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12201 		bp += 9;
12202 	}
12203 
12204 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12205 	bp += strlen(bp);
12206 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12207 		if (cnt % 8 == 0) {
12208 			(void) sprintf(bp++, "\n");
12209 		}
12210 
12211 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12212 		bp += 9;
12213 	}
12214 
12215 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12216 	bp += strlen(bp);
12217 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12218 		if (cnt % 8 == 0) {
12219 			(void) sprintf(bp++, "\n");
12220 		}
12221 
12222 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12223 		bp += 9;
12224 	}
12225 
12226 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12227 	bp += strlen(bp);
12228 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12229 		if (cnt % 8 == 0) {
12230 			(void) sprintf(bp++, "\n");
12231 		}
12232 
12233 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12234 		bp += 9;
12235 	}
12236 
12237 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12238 	bp += strlen(bp);
12239 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12240 		if (cnt % 8 == 0) {
12241 			(void) sprintf(bp++, "\n");
12242 		}
12243 
12244 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12245 		bp += 9;
12246 	}
12247 
12248 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12249 	bp += strlen(bp);
12250 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12251 		if (cnt % 8 == 0) {
12252 			(void) sprintf(bp++, "\n");
12253 		}
12254 
12255 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12256 		bp += 9;
12257 	}
12258 
12259 	(void) sprintf(bp, "\n\nRISC GP Registers");
12260 	bp += strlen(bp);
12261 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12262 		if (cnt % 8 == 0) {
12263 			(void) sprintf(bp++, "\n");
12264 		}
12265 
12266 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12267 		bp += 9;
12268 	}
12269 
12270 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12271 	bp += strlen(bp);
12272 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12273 		if (cnt % 8 == 0) {
12274 			(void) sprintf(bp++, "\n");
12275 		}
12276 
12277 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12278 		bp += 9;
12279 	}
12280 
12281 	(void) sprintf(bp, "\n\nLMC Registers");
12282 	bp += strlen(bp);
12283 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12284 		if (cnt % 8 == 0) {
12285 			(void) sprintf(bp++, "\n");
12286 		}
12287 
12288 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12289 		bp += 9;
12290 	}
12291 
12292 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12293 	bp += strlen(bp);
12294 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12295 		if (cnt % 8 == 0) {
12296 			(void) sprintf(bp++, "\n");
12297 		}
12298 
12299 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12300 		bp += 9;
12301 	}
12302 
12303 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12304 	bp += strlen(bp);
12305 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12306 		if (cnt % 8 == 0) {
12307 			(void) sprintf(bp++, "\n");
12308 		}
12309 
12310 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12311 		bp += 9;
12312 	}
12313 
12314 	(void) sprintf(bp, "\n\nCode RAM");
12315 	bp += strlen(bp);
12316 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12317 		if (cnt % 8 == 0) {
12318 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12319 			bp += 11;
12320 		}
12321 
12322 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12323 		bp += 9;
12324 	}
12325 
12326 	(void) sprintf(bp, "\n\nExternal Memory");
12327 	bp += strlen(bp);
12328 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12329 		if (cnt % 8 == 0) {
12330 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12331 			bp += 11;
12332 		}
12333 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12334 		bp += 9;
12335 	}
12336 
12337 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12338 	bp += strlen(bp);
12339 
12340 	(void) sprintf(bp, "\n\nRequest Queue");
12341 	bp += strlen(bp);
12342 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12343 		if (cnt % 8 == 0) {
12344 			(void) sprintf(bp, "\n%08x: ", cnt);
12345 			bp += strlen(bp);
12346 		}
12347 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12348 		bp += strlen(bp);
12349 	}
12350 
12351 	(void) sprintf(bp, "\n\nResponse Queue");
12352 	bp += strlen(bp);
12353 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12354 		if (cnt % 8 == 0) {
12355 			(void) sprintf(bp, "\n%08x: ", cnt);
12356 			bp += strlen(bp);
12357 		}
12358 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12359 		bp += strlen(bp);
12360 	}
12361 
12362 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12363 	    (ha->fwexttracebuf.bp != NULL)) {
12364 		uint32_t cnt_b = 0;
12365 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12366 
12367 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12368 		bp += strlen(bp);
12369 		/* show data address as a byte address, data as long words */
12370 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12371 			cnt_b = cnt * 4;
12372 			if (cnt_b % 32 == 0) {
12373 				(void) sprintf(bp, "\n%08x: ",
12374 				    (int)(w64 + cnt_b));
12375 				bp += 11;
12376 			}
12377 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12378 			bp += 9;
12379 		}
12380 	}
12381 
12382 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12383 	    (ha->fwfcetracebuf.bp != NULL)) {
12384 		uint32_t cnt_b = 0;
12385 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12386 
12387 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12388 		bp += strlen(bp);
12389 		/* show data address as a byte address, data as long words */
12390 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12391 			cnt_b = cnt * 4;
12392 			if (cnt_b % 32 == 0) {
12393 				(void) sprintf(bp, "\n%08x: ",
12394 				    (int)(w64 + cnt_b));
12395 				bp += 11;
12396 			}
12397 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12398 			bp += 9;
12399 		}
12400 	}
12401 
12402 	(void) sprintf(bp, "\n\n");
12403 	bp += strlen(bp);
12404 
12405 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12406 
12407 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12408 
12409 	return (cnt);
12410 }
12411 
12412 /*
12413  * ql_25xx_ascii_fw_dump
12414  *	Converts ISP25xx firmware binary dump to ascii.
12415  *
12416  * Input:
12417  *	ha = adapter state pointer.
12418  *	bptr = buffer pointer.
12419  *
12420  * Returns:
12421  *	Amount of data buffer used.
12422  *
12423  * Context:
12424  *	Kernel context.
12425  */
12426 static size_t
12427 ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12428 {
12429 	uint32_t		cnt;
12430 	caddr_t			bp = bufp;
12431 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12432 
12433 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12434 
12435 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12436 	    ha->fw_major_version, ha->fw_minor_version,
12437 	    ha->fw_subminor_version, ha->fw_attributes);
12438 	bp += strlen(bp);
12439 
12440 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12441 	bp += strlen(bp);
12442 
12443 	(void) sprintf(bp, "\nHostRisc Registers");
12444 	bp += strlen(bp);
12445 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12446 		if (cnt % 8 == 0) {
12447 			(void) sprintf(bp++, "\n");
12448 		}
12449 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12450 		bp += 9;
12451 	}
12452 
12453 	(void) sprintf(bp, "\n\nPCIe Registers");
12454 	bp += strlen(bp);
12455 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12456 		if (cnt % 8 == 0) {
12457 			(void) sprintf(bp++, "\n");
12458 		}
12459 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12460 		bp += 9;
12461 	}
12462 
12463 	(void) strcat(bp, "\n\nHost Interface Registers");
12464 	bp += strlen(bp);
12465 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12466 		if (cnt % 8 == 0) {
12467 			(void) sprintf(bp++, "\n");
12468 		}
12469 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12470 		bp += 9;
12471 	}
12472 
12473 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12474 	bp += strlen(bp);
12475 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12476 		if (cnt % 8 == 0) {
12477 			(void) sprintf(bp++, "\n");
12478 		}
12479 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12480 		bp += 9;
12481 	}
12482 
12483 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12484 	    fw->risc_io);
12485 	bp += strlen(bp);
12486 
12487 	(void) sprintf(bp, "\n\nMailbox Registers");
12488 	bp += strlen(bp);
12489 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12490 		if (cnt % 16 == 0) {
12491 			(void) sprintf(bp++, "\n");
12492 		}
12493 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12494 		bp += 5;
12495 	}
12496 
12497 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12498 	bp += strlen(bp);
12499 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12500 		if (cnt % 8 == 0) {
12501 			(void) sprintf(bp++, "\n");
12502 		}
12503 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12504 		bp += 9;
12505 	}
12506 
12507 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12508 	bp += strlen(bp);
12509 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12510 		if (cnt % 8 == 0) {
12511 			(void) sprintf(bp++, "\n");
12512 		}
12513 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12514 		bp += 9;
12515 	}
12516 
12517 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12518 	bp += strlen(bp);
12519 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12520 		if (cnt % 8 == 0) {
12521 			(void) sprintf(bp++, "\n");
12522 		}
12523 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12524 		bp += 9;
12525 	}
12526 
12527 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12528 	bp += strlen(bp);
12529 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12530 		if (cnt % 8 == 0) {
12531 			(void) sprintf(bp++, "\n");
12532 		}
12533 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12534 		bp += 9;
12535 	}
12536 
12537 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12538 	bp += strlen(bp);
12539 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12540 		if (cnt % 8 == 0) {
12541 			(void) sprintf(bp++, "\n");
12542 		}
12543 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12544 		bp += 9;
12545 	}
12546 
12547 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12548 	bp += strlen(bp);
12549 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12550 		if (cnt % 8 == 0) {
12551 			(void) sprintf(bp++, "\n");
12552 		}
12553 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12554 		bp += 9;
12555 	}
12556 
12557 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12558 	bp += strlen(bp);
12559 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12560 		if (cnt % 8 == 0) {
12561 			(void) sprintf(bp++, "\n");
12562 		}
12563 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12564 		bp += 9;
12565 	}
12566 
12567 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12568 	bp += strlen(bp);
12569 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12570 		if (cnt % 8 == 0) {
12571 			(void) sprintf(bp++, "\n");
12572 		}
12573 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12574 		bp += 9;
12575 	}
12576 
12577 	(void) sprintf(bp, "\n\nASEQ-0 GP Registers");
12578 	bp += strlen(bp);
12579 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12580 		if (cnt % 8 == 0) {
12581 			(void) sprintf(bp++, "\n");
12582 		}
12583 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12584 		bp += 9;
12585 	}
12586 
12587 	(void) sprintf(bp, "\n\nASEQ-1 GP Registers");
12588 	bp += strlen(bp);
12589 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12590 		if (cnt % 8 == 0) {
12591 			(void) sprintf(bp++, "\n");
12592 		}
12593 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12594 		bp += 9;
12595 	}
12596 
12597 	(void) sprintf(bp, "\n\nASEQ-2 GP Registers");
12598 	bp += strlen(bp);
12599 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12600 		if (cnt % 8 == 0) {
12601 			(void) sprintf(bp++, "\n");
12602 		}
12603 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12604 		bp += 9;
12605 	}
12606 
12607 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12608 	bp += strlen(bp);
12609 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12610 		if (cnt % 8 == 0) {
12611 			(void) sprintf(bp++, "\n");
12612 		}
12613 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12614 		bp += 9;
12615 	}
12616 
12617 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12618 	bp += strlen(bp);
12619 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12620 		if (cnt % 8 == 0) {
12621 			(void) sprintf(bp++, "\n");
12622 		}
12623 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12624 		bp += 9;
12625 	}
12626 
12627 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12628 	bp += strlen(bp);
12629 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12630 		if (cnt % 8 == 0) {
12631 			(void) sprintf(bp++, "\n");
12632 		}
12633 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12634 		bp += 9;
12635 	}
12636 
12637 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12638 	bp += strlen(bp);
12639 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12640 		if (cnt % 8 == 0) {
12641 			(void) sprintf(bp++, "\n");
12642 		}
12643 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12644 		bp += 9;
12645 	}
12646 
12647 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12648 	bp += strlen(bp);
12649 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12650 		if (cnt % 8 == 0) {
12651 			(void) sprintf(bp++, "\n");
12652 		}
12653 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12654 		bp += 9;
12655 	}
12656 
12657 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12658 	bp += strlen(bp);
12659 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12660 		if (cnt % 8 == 0) {
12661 			(void) sprintf(bp++, "\n");
12662 		}
12663 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12664 		bp += 9;
12665 	}
12666 
12667 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12668 	bp += strlen(bp);
12669 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12670 		if (cnt % 8 == 0) {
12671 			(void) sprintf(bp++, "\n");
12672 		}
12673 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12674 		bp += 9;
12675 	}
12676 
12677 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12678 	bp += strlen(bp);
12679 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12680 		if (cnt % 8 == 0) {
12681 			(void) sprintf(bp++, "\n");
12682 		}
12683 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12684 		bp += 9;
12685 	}
12686 
12687 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12688 	bp += strlen(bp);
12689 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12690 		if (cnt % 8 == 0) {
12691 			(void) sprintf(bp++, "\n");
12692 		}
12693 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12694 		bp += 9;
12695 	}
12696 
12697 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12698 	bp += strlen(bp);
12699 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12700 		if (cnt % 8 == 0) {
12701 			(void) sprintf(bp++, "\n");
12702 		}
12703 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12704 		bp += 9;
12705 	}
12706 
12707 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12708 	bp += strlen(bp);
12709 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12710 		if (cnt % 8 == 0) {
12711 			(void) sprintf(bp++, "\n");
12712 		}
12713 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12714 		bp += 9;
12715 	}
12716 
12717 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12718 	bp += strlen(bp);
12719 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12720 		if (cnt % 8 == 0) {
12721 			(void) sprintf(bp++, "\n");
12722 		}
12723 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12724 		bp += 9;
12725 	}
12726 
12727 	(void) sprintf(bp, "\n\nRISC GP Registers");
12728 	bp += strlen(bp);
12729 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12730 		if (cnt % 8 == 0) {
12731 			(void) sprintf(bp++, "\n");
12732 		}
12733 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12734 		bp += 9;
12735 	}
12736 
12737 	(void) sprintf(bp, "\n\nLMC Registers");
12738 	bp += strlen(bp);
12739 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12740 		if (cnt % 8 == 0) {
12741 			(void) sprintf(bp++, "\n");
12742 		}
12743 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12744 		bp += 9;
12745 	}
12746 
12747 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12748 	bp += strlen(bp);
12749 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12750 		if (cnt % 8 == 0) {
12751 			(void) sprintf(bp++, "\n");
12752 		}
12753 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12754 		bp += 9;
12755 	}
12756 
12757 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12758 	bp += strlen(bp);
12759 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12760 		if (cnt % 8 == 0) {
12761 			(void) sprintf(bp++, "\n");
12762 		}
12763 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12764 		bp += 9;
12765 	}
12766 
12767 	(void) sprintf(bp, "\n\nCode RAM");
12768 	bp += strlen(bp);
12769 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12770 		if (cnt % 8 == 0) {
12771 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12772 			bp += 11;
12773 		}
12774 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12775 		bp += 9;
12776 	}
12777 
12778 	(void) sprintf(bp, "\n\nExternal Memory");
12779 	bp += strlen(bp);
12780 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12781 		if (cnt % 8 == 0) {
12782 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12783 			bp += 11;
12784 		}
12785 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12786 		bp += 9;
12787 	}
12788 
12789 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12790 	bp += strlen(bp);
12791 
12792 	(void) sprintf(bp, "\n\nRequest Queue");
12793 	bp += strlen(bp);
12794 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12795 		if (cnt % 8 == 0) {
12796 			(void) sprintf(bp, "\n%08x: ", cnt);
12797 			bp += strlen(bp);
12798 		}
12799 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12800 		bp += strlen(bp);
12801 	}
12802 
12803 	(void) sprintf(bp, "\n\nResponse Queue");
12804 	bp += strlen(bp);
12805 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12806 		if (cnt % 8 == 0) {
12807 			(void) sprintf(bp, "\n%08x: ", cnt);
12808 			bp += strlen(bp);
12809 		}
12810 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12811 		bp += strlen(bp);
12812 	}
12813 
12814 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12815 	    (ha->fwexttracebuf.bp != NULL)) {
12816 		uint32_t cnt_b = 0;
12817 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12818 
12819 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12820 		bp += strlen(bp);
12821 		/* show data address as a byte address, data as long words */
12822 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12823 			cnt_b = cnt * 4;
12824 			if (cnt_b % 32 == 0) {
12825 				(void) sprintf(bp, "\n%08x: ",
12826 				    (int)(w64 + cnt_b));
12827 				bp += 11;
12828 			}
12829 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12830 			bp += 9;
12831 		}
12832 	}
12833 
12834 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12835 	    (ha->fwfcetracebuf.bp != NULL)) {
12836 		uint32_t cnt_b = 0;
12837 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12838 
12839 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12840 		bp += strlen(bp);
12841 		/* show data address as a byte address, data as long words */
12842 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12843 			cnt_b = cnt * 4;
12844 			if (cnt_b % 32 == 0) {
12845 				(void) sprintf(bp, "\n%08x: ",
12846 				    (int)(w64 + cnt_b));
12847 				bp += 11;
12848 			}
12849 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12850 			bp += 9;
12851 		}
12852 	}
12853 
12854 	(void) sprintf(bp, "\n\n");
12855 	bp += strlen(bp);
12856 
12857 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12858 
12859 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12860 
12861 	return (cnt);
12862 }
12863 
12864 /*
12865  * ql_2200_binary_fw_dump
12866  *
12867  * Input:
12868  *	ha:	adapter state pointer.
12869  *	fw:	firmware dump context pointer.
12870  *
12871  * Returns:
12872  *	ql local function return status code.
12873  *
12874  * Context:
12875  *	Interrupt or Kernel context, no mailbox commands allowed.
12876  */
12877 static int
12878 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12879 {
12880 	uint32_t	cnt;
12881 	uint16_t	risc_address;
12882 	clock_t		timer;
12883 	mbx_cmd_t	mc;
12884 	mbx_cmd_t	*mcp = &mc;
12885 	int		rval = QL_SUCCESS;
12886 
12887 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12888 
12889 	/* Disable ISP interrupts. */
12890 	WRT16_IO_REG(ha, ictrl, 0);
12891 	ADAPTER_STATE_LOCK(ha);
12892 	ha->flags &= ~INTERRUPTS_ENABLED;
12893 	ADAPTER_STATE_UNLOCK(ha);
12894 
12895 	/* Release mailbox registers. */
12896 	WRT16_IO_REG(ha, semaphore, 0);
12897 
12898 	/* Pause RISC. */
12899 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12900 	timer = 30000;
12901 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12902 		if (timer-- != 0) {
12903 			drv_usecwait(MILLISEC);
12904 		} else {
12905 			rval = QL_FUNCTION_TIMEOUT;
12906 			break;
12907 		}
12908 	}
12909 
12910 	if (rval == QL_SUCCESS) {
12911 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12912 		    sizeof (fw->pbiu_reg) / 2, 16);
12913 
12914 		/* In 2200 we only read 8 mailboxes */
12915 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12916 		    8, 16);
12917 
12918 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12919 		    sizeof (fw->dma_reg) / 2, 16);
12920 
12921 		WRT16_IO_REG(ha, ctrl_status, 0);
12922 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12923 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12924 
12925 		WRT16_IO_REG(ha, pcr, 0x2000);
12926 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12927 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12928 
12929 		WRT16_IO_REG(ha, pcr, 0x2100);
12930 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12931 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12932 
12933 		WRT16_IO_REG(ha, pcr, 0x2200);
12934 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12935 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12936 
12937 		WRT16_IO_REG(ha, pcr, 0x2300);
12938 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12939 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12940 
12941 		WRT16_IO_REG(ha, pcr, 0x2400);
12942 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12943 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12944 
12945 		WRT16_IO_REG(ha, pcr, 0x2500);
12946 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12947 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12948 
12949 		WRT16_IO_REG(ha, pcr, 0x2600);
12950 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12951 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12952 
12953 		WRT16_IO_REG(ha, pcr, 0x2700);
12954 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12955 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12956 
12957 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12958 		/* 2200 has only 16 registers */
12959 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12960 		    ha->iobase + 0x80, 16, 16);
12961 
12962 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12963 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12964 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12965 
12966 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12967 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12968 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12969 
12970 		/* Select FPM registers. */
12971 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12972 
12973 		/* FPM Soft Reset. */
12974 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12975 
12976 		/* Select frame buffer registers. */
12977 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12978 
12979 		/* Reset frame buffer FIFOs. */
12980 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12981 
12982 		/* Select RISC module registers. */
12983 		WRT16_IO_REG(ha, ctrl_status, 0);
12984 
12985 		/* Reset RISC module. */
12986 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
12987 
12988 		/* Reset ISP semaphore. */
12989 		WRT16_IO_REG(ha, semaphore, 0);
12990 
12991 		/* Release RISC module. */
12992 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12993 
12994 		/* Wait for RISC to recover from reset. */
12995 		timer = 30000;
12996 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
12997 			if (timer-- != 0) {
12998 				drv_usecwait(MILLISEC);
12999 			} else {
13000 				rval = QL_FUNCTION_TIMEOUT;
13001 				break;
13002 			}
13003 		}
13004 
13005 		/* Disable RISC pause on FPM parity error. */
13006 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13007 	}
13008 
13009 	if (rval == QL_SUCCESS) {
13010 		/* Pause RISC. */
13011 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13012 		timer = 30000;
13013 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13014 			if (timer-- != 0) {
13015 				drv_usecwait(MILLISEC);
13016 			} else {
13017 				rval = QL_FUNCTION_TIMEOUT;
13018 				break;
13019 			}
13020 		}
13021 	}
13022 
13023 	if (rval == QL_SUCCESS) {
13024 		/* Set memory configuration and timing. */
13025 		WRT16_IO_REG(ha, mctr, 0xf2);
13026 
13027 		/* Release RISC. */
13028 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13029 
13030 		/* Get RISC SRAM. */
13031 		risc_address = 0x1000;
13032 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
13033 		for (cnt = 0; cnt < 0xf000; cnt++) {
13034 			WRT16_IO_REG(ha, mailbox[1], risc_address++);
13035 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13036 			for (timer = 6000000; timer != 0; timer--) {
13037 				/* Check for pending interrupts. */
13038 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
13039 					if (RD16_IO_REG(ha, semaphore) &
13040 					    BIT_0) {
13041 						WRT16_IO_REG(ha, hccr,
13042 						    HC_CLR_RISC_INT);
13043 						mcp->mb[0] = RD16_IO_REG(ha,
13044 						    mailbox[0]);
13045 						fw->risc_ram[cnt] =
13046 						    RD16_IO_REG(ha,
13047 						    mailbox[2]);
13048 						WRT16_IO_REG(ha,
13049 						    semaphore, 0);
13050 						break;
13051 					}
13052 					WRT16_IO_REG(ha, hccr,
13053 					    HC_CLR_RISC_INT);
13054 				}
13055 				drv_usecwait(5);
13056 			}
13057 
13058 			if (timer == 0) {
13059 				rval = QL_FUNCTION_TIMEOUT;
13060 			} else {
13061 				rval = mcp->mb[0];
13062 			}
13063 
13064 			if (rval != QL_SUCCESS) {
13065 				break;
13066 			}
13067 		}
13068 	}
13069 
13070 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13071 
13072 	return (rval);
13073 }
13074 
13075 /*
13076  * ql_2300_binary_fw_dump
13077  *
13078  * Input:
13079  *	ha:	adapter state pointer.
13080  *	fw:	firmware dump context pointer.
13081  *
13082  * Returns:
13083  *	ql local function return status code.
13084  *
13085  * Context:
13086  *	Interrupt or Kernel context, no mailbox commands allowed.
13087  */
13088 static int
13089 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13090 {
13091 	clock_t	timer;
13092 	int	rval = QL_SUCCESS;
13093 
13094 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13095 
13096 	/* Disable ISP interrupts. */
13097 	WRT16_IO_REG(ha, ictrl, 0);
13098 	ADAPTER_STATE_LOCK(ha);
13099 	ha->flags &= ~INTERRUPTS_ENABLED;
13100 	ADAPTER_STATE_UNLOCK(ha);
13101 
13102 	/* Release mailbox registers. */
13103 	WRT16_IO_REG(ha, semaphore, 0);
13104 
13105 	/* Pause RISC. */
13106 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13107 	timer = 30000;
13108 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13109 		if (timer-- != 0) {
13110 			drv_usecwait(MILLISEC);
13111 		} else {
13112 			rval = QL_FUNCTION_TIMEOUT;
13113 			break;
13114 		}
13115 	}
13116 
13117 	if (rval == QL_SUCCESS) {
13118 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13119 		    sizeof (fw->pbiu_reg) / 2, 16);
13120 
13121 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13122 		    sizeof (fw->risc_host_reg) / 2, 16);
13123 
13124 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13125 		    sizeof (fw->mailbox_reg) / 2, 16);
13126 
13127 		WRT16_IO_REG(ha, ctrl_status, 0x40);
13128 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13129 		    sizeof (fw->resp_dma_reg) / 2, 16);
13130 
13131 		WRT16_IO_REG(ha, ctrl_status, 0x50);
13132 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13133 		    sizeof (fw->dma_reg) / 2, 16);
13134 
13135 		WRT16_IO_REG(ha, ctrl_status, 0);
13136 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13137 		    sizeof (fw->risc_hdw_reg) / 2, 16);
13138 
13139 		WRT16_IO_REG(ha, pcr, 0x2000);
13140 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13141 		    sizeof (fw->risc_gp0_reg) / 2, 16);
13142 
13143 		WRT16_IO_REG(ha, pcr, 0x2200);
13144 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13145 		    sizeof (fw->risc_gp1_reg) / 2, 16);
13146 
13147 		WRT16_IO_REG(ha, pcr, 0x2400);
13148 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13149 		    sizeof (fw->risc_gp2_reg) / 2, 16);
13150 
13151 		WRT16_IO_REG(ha, pcr, 0x2600);
13152 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13153 		    sizeof (fw->risc_gp3_reg) / 2, 16);
13154 
13155 		WRT16_IO_REG(ha, pcr, 0x2800);
13156 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13157 		    sizeof (fw->risc_gp4_reg) / 2, 16);
13158 
13159 		WRT16_IO_REG(ha, pcr, 0x2A00);
13160 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13161 		    sizeof (fw->risc_gp5_reg) / 2, 16);
13162 
13163 		WRT16_IO_REG(ha, pcr, 0x2C00);
13164 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13165 		    sizeof (fw->risc_gp6_reg) / 2, 16);
13166 
13167 		WRT16_IO_REG(ha, pcr, 0x2E00);
13168 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13169 		    sizeof (fw->risc_gp7_reg) / 2, 16);
13170 
13171 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13172 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13173 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13174 
13175 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13176 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13177 		    sizeof (fw->fpm_b0_reg) / 2, 16);
13178 
13179 		WRT16_IO_REG(ha, ctrl_status, 0x30);
13180 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13181 		    sizeof (fw->fpm_b1_reg) / 2, 16);
13182 
13183 		/* Select FPM registers. */
13184 		WRT16_IO_REG(ha, ctrl_status, 0x20);
13185 
13186 		/* FPM Soft Reset. */
13187 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13188 
13189 		/* Select frame buffer registers. */
13190 		WRT16_IO_REG(ha, ctrl_status, 0x10);
13191 
13192 		/* Reset frame buffer FIFOs. */
13193 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13194 
13195 		/* Select RISC module registers. */
13196 		WRT16_IO_REG(ha, ctrl_status, 0);
13197 
13198 		/* Reset RISC module. */
13199 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13200 
13201 		/* Reset ISP semaphore. */
13202 		WRT16_IO_REG(ha, semaphore, 0);
13203 
13204 		/* Release RISC module. */
13205 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13206 
13207 		/* Wait for RISC to recover from reset. */
13208 		timer = 30000;
13209 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13210 			if (timer-- != 0) {
13211 				drv_usecwait(MILLISEC);
13212 			} else {
13213 				rval = QL_FUNCTION_TIMEOUT;
13214 				break;
13215 			}
13216 		}
13217 
13218 		/* Disable RISC pause on FPM parity error. */
13219 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13220 	}
13221 
13222 	/* Get RISC SRAM. */
13223 	if (rval == QL_SUCCESS) {
13224 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13225 	}
13226 	/* Get STACK SRAM. */
13227 	if (rval == QL_SUCCESS) {
13228 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13229 	}
13230 	/* Get DATA SRAM. */
13231 	if (rval == QL_SUCCESS) {
13232 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13233 	}
13234 
13235 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13236 
13237 	return (rval);
13238 }
13239 
13240 /*
13241  * ql_24xx_binary_fw_dump
13242  *
13243  * Input:
13244  *	ha:	adapter state pointer.
13245  *	fw:	firmware dump context pointer.
13246  *
13247  * Returns:
13248  *	ql local function return status code.
13249  *
13250  * Context:
13251  *	Interrupt or Kernel context, no mailbox commands allowed.
13252  */
13253 static int
13254 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13255 {
13256 	uint32_t	*reg32;
13257 	void		*bp;
13258 	clock_t		timer;
13259 	int		rval = QL_SUCCESS;
13260 
13261 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13262 
13263 	fw->hccr = RD32_IO_REG(ha, hccr);
13264 
13265 	/* Pause RISC. */
13266 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13267 		/* Disable ISP interrupts. */
13268 		WRT16_IO_REG(ha, ictrl, 0);
13269 
13270 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13271 		for (timer = 30000;
13272 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13273 		    rval == QL_SUCCESS; timer--) {
13274 			if (timer) {
13275 				drv_usecwait(100);
13276 			} else {
13277 				rval = QL_FUNCTION_TIMEOUT;
13278 			}
13279 		}
13280 	}
13281 
13282 	if (rval == QL_SUCCESS) {
13283 		/* Host interface registers. */
13284 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13285 		    sizeof (fw->host_reg) / 4, 32);
13286 
13287 		/* Disable ISP interrupts. */
13288 		WRT32_IO_REG(ha, ictrl, 0);
13289 		RD32_IO_REG(ha, ictrl);
13290 		ADAPTER_STATE_LOCK(ha);
13291 		ha->flags &= ~INTERRUPTS_ENABLED;
13292 		ADAPTER_STATE_UNLOCK(ha);
13293 
13294 		/* Shadow registers. */
13295 
13296 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13297 		RD32_IO_REG(ha, io_base_addr);
13298 
13299 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13300 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13301 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13302 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13303 
13304 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13305 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13306 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13307 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13308 
13309 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13310 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13311 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13312 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13313 
13314 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13315 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13316 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13317 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13318 
13319 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13320 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13321 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13322 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13323 
13324 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13325 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13326 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13327 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13328 
13329 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13330 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13331 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13332 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13333 
13334 		/* Mailbox registers. */
13335 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13336 		    sizeof (fw->mailbox_reg) / 2, 16);
13337 
13338 		/* Transfer sequence registers. */
13339 
13340 		/* XSEQ GP */
13341 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13342 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13343 		    16, 32);
13344 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13345 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13346 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13347 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13348 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13349 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13350 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13351 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13352 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13353 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13354 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13355 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13356 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13357 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13358 
13359 		/* XSEQ-0 */
13360 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13361 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13362 		    sizeof (fw->xseq_0_reg) / 4, 32);
13363 
13364 		/* XSEQ-1 */
13365 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13366 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13367 		    sizeof (fw->xseq_1_reg) / 4, 32);
13368 
13369 		/* Receive sequence registers. */
13370 
13371 		/* RSEQ GP */
13372 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13373 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13374 		    16, 32);
13375 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13376 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13377 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13378 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13379 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13380 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13381 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13382 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13383 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13384 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13385 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13386 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13387 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13388 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13389 
13390 		/* RSEQ-0 */
13391 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13392 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13393 		    sizeof (fw->rseq_0_reg) / 4, 32);
13394 
13395 		/* RSEQ-1 */
13396 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13397 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13398 		    sizeof (fw->rseq_1_reg) / 4, 32);
13399 
13400 		/* RSEQ-2 */
13401 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13402 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13403 		    sizeof (fw->rseq_2_reg) / 4, 32);
13404 
13405 		/* Command DMA registers. */
13406 
13407 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13408 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13409 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13410 
13411 		/* Queues. */
13412 
13413 		/* RequestQ0 */
13414 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13415 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13416 		    8, 32);
13417 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13418 
13419 		/* ResponseQ0 */
13420 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13421 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13422 		    8, 32);
13423 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13424 
13425 		/* RequestQ1 */
13426 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13427 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13428 		    8, 32);
13429 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13430 
13431 		/* Transmit DMA registers. */
13432 
13433 		/* XMT0 */
13434 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13435 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13436 		    16, 32);
13437 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13438 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13439 
13440 		/* XMT1 */
13441 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13442 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13443 		    16, 32);
13444 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13445 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13446 
13447 		/* XMT2 */
13448 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13449 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13450 		    16, 32);
13451 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13452 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13453 
13454 		/* XMT3 */
13455 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13456 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13457 		    16, 32);
13458 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13459 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13460 
13461 		/* XMT4 */
13462 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13463 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13464 		    16, 32);
13465 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13466 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13467 
13468 		/* XMT Common */
13469 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13470 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13471 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13472 
13473 		/* Receive DMA registers. */
13474 
13475 		/* RCVThread0 */
13476 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13477 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13478 		    ha->iobase + 0xC0, 16, 32);
13479 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13480 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13481 
13482 		/* RCVThread1 */
13483 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13484 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13485 		    ha->iobase + 0xC0, 16, 32);
13486 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13487 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13488 
13489 		/* RISC registers. */
13490 
13491 		/* RISC GP */
13492 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13493 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13494 		    16, 32);
13495 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13496 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13497 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13498 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13499 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13500 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13501 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13502 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13503 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13504 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13505 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13506 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13507 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13508 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13509 
13510 		/* Local memory controller registers. */
13511 
13512 		/* LMC */
13513 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13514 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13515 		    16, 32);
13516 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13517 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13518 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13519 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13520 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13521 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13522 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13523 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13524 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13525 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13526 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13527 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13528 
13529 		/* Fibre Protocol Module registers. */
13530 
13531 		/* FPM hardware */
13532 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13533 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13534 		    16, 32);
13535 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13536 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13537 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13538 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13539 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13540 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13541 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13542 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13543 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13544 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13545 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13546 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13547 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13548 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13549 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13550 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13551 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13552 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13553 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13554 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13555 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13556 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13557 
13558 		/* Frame Buffer registers. */
13559 
13560 		/* FB hardware */
13561 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13562 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13563 		    16, 32);
13564 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13565 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13566 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13567 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13568 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13569 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13570 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13571 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13572 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13573 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13574 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13575 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13576 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13577 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13578 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13579 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13580 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13581 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13582 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13583 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13584 	}
13585 
13586 	/* Get the request queue */
13587 	if (rval == QL_SUCCESS) {
13588 		uint32_t	cnt;
13589 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13590 
13591 		/* Sync DMA buffer. */
13592 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13593 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13594 		    DDI_DMA_SYNC_FORKERNEL);
13595 
13596 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13597 			fw->req_q[cnt] = *w32++;
13598 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13599 		}
13600 	}
13601 
13602 	/* Get the response queue */
13603 	if (rval == QL_SUCCESS) {
13604 		uint32_t	cnt;
13605 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13606 
13607 		/* Sync DMA buffer. */
13608 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13609 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13610 		    DDI_DMA_SYNC_FORKERNEL);
13611 
13612 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13613 			fw->rsp_q[cnt] = *w32++;
13614 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13615 		}
13616 	}
13617 
13618 	/* Reset RISC. */
13619 	ql_reset_chip(ha);
13620 
13621 	/* Memory. */
13622 	if (rval == QL_SUCCESS) {
13623 		/* Code RAM. */
13624 		rval = ql_read_risc_ram(ha, 0x20000,
13625 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13626 	}
13627 	if (rval == QL_SUCCESS) {
13628 		/* External Memory. */
13629 		rval = ql_read_risc_ram(ha, 0x100000,
13630 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13631 	}
13632 
13633 	/* Get the extended trace buffer */
13634 	if (rval == QL_SUCCESS) {
13635 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13636 		    (ha->fwexttracebuf.bp != NULL)) {
13637 			uint32_t	cnt;
13638 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13639 
13640 			/* Sync DMA buffer. */
13641 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13642 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13643 
13644 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13645 				fw->ext_trace_buf[cnt] = *w32++;
13646 			}
13647 		}
13648 	}
13649 
13650 	/* Get the FC event trace buffer */
13651 	if (rval == QL_SUCCESS) {
13652 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13653 		    (ha->fwfcetracebuf.bp != NULL)) {
13654 			uint32_t	cnt;
13655 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13656 
13657 			/* Sync DMA buffer. */
13658 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13659 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13660 
13661 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13662 				fw->fce_trace_buf[cnt] = *w32++;
13663 			}
13664 		}
13665 	}
13666 
13667 	if (rval != QL_SUCCESS) {
13668 		EL(ha, "failed=%xh\n", rval);
13669 	} else {
13670 		/*EMPTY*/
13671 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13672 	}
13673 
13674 	return (rval);
13675 }
13676 
13677 /*
13678  * ql_25xx_binary_fw_dump
13679  *
13680  * Input:
13681  *	ha:	adapter state pointer.
13682  *	fw:	firmware dump context pointer.
13683  *
13684  * Returns:
13685  *	ql local function return status code.
13686  *
13687  * Context:
13688  *	Interrupt or Kernel context, no mailbox commands allowed.
13689  */
13690 static int
13691 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13692 {
13693 	uint32_t	*reg32;
13694 	void		*bp;
13695 	clock_t		timer;
13696 	int		rval = QL_SUCCESS;
13697 
13698 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13699 
13700 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13701 
13702 	/* Pause RISC. */
13703 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13704 		/* Disable ISP interrupts. */
13705 		WRT16_IO_REG(ha, ictrl, 0);
13706 
13707 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13708 		for (timer = 30000;
13709 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13710 		    rval == QL_SUCCESS; timer--) {
13711 			if (timer) {
13712 				drv_usecwait(100);
13713 				if (timer % 10000 == 0) {
13714 					EL(ha, "risc pause %d\n", timer);
13715 				}
13716 			} else {
13717 				EL(ha, "risc pause timeout\n");
13718 				rval = QL_FUNCTION_TIMEOUT;
13719 			}
13720 		}
13721 	}
13722 
13723 	if (rval == QL_SUCCESS) {
13724 
13725 		/* Host Interface registers */
13726 
13727 		/* HostRisc registers. */
13728 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13729 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13730 		    16, 32);
13731 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13732 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13733 
13734 		/* PCIe registers. */
13735 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13736 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13737 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13738 		    3, 32);
13739 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13740 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13741 
13742 		/* Host interface registers. */
13743 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13744 		    sizeof (fw->host_reg) / 4, 32);
13745 
13746 		/* Disable ISP interrupts. */
13747 
13748 		WRT32_IO_REG(ha, ictrl, 0);
13749 		RD32_IO_REG(ha, ictrl);
13750 		ADAPTER_STATE_LOCK(ha);
13751 		ha->flags &= ~INTERRUPTS_ENABLED;
13752 		ADAPTER_STATE_UNLOCK(ha);
13753 
13754 		/* Shadow registers. */
13755 
13756 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13757 		RD32_IO_REG(ha, io_base_addr);
13758 
13759 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13760 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13761 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13762 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13763 
13764 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13765 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13766 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13767 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13768 
13769 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13770 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13771 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13772 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13773 
13774 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13775 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13776 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13777 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13778 
13779 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13780 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13781 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13782 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13783 
13784 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13785 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13786 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13787 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13788 
13789 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13790 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13791 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13792 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13793 
13794 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13795 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13796 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13797 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13798 
13799 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13800 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13801 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13802 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13803 
13804 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13805 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13806 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13807 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13808 
13809 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13810 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13811 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13812 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13813 
13814 		/* RISC I/O register. */
13815 
13816 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13817 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13818 		    1, 32);
13819 
13820 		/* Mailbox registers. */
13821 
13822 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13823 		    sizeof (fw->mailbox_reg) / 2, 16);
13824 
13825 		/* Transfer sequence registers. */
13826 
13827 		/* XSEQ GP */
13828 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13829 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13830 		    16, 32);
13831 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13832 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13833 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13834 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13835 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13836 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13837 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13838 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13839 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13840 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13841 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13842 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13843 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13844 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13845 
13846 		/* XSEQ-0 */
13847 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13848 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13849 		    16, 32);
13850 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13851 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13852 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13853 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13854 
13855 		/* XSEQ-1 */
13856 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13857 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13858 		    16, 32);
13859 
13860 		/* Receive sequence registers. */
13861 
13862 		/* RSEQ GP */
13863 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13864 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13865 		    16, 32);
13866 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13867 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13868 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13869 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13870 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13871 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13872 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13873 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13874 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13875 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13876 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13877 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13878 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13879 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13880 
13881 		/* RSEQ-0 */
13882 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13883 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13884 		    16, 32);
13885 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13886 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13887 
13888 		/* RSEQ-1 */
13889 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13890 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13891 		    sizeof (fw->rseq_1_reg) / 4, 32);
13892 
13893 		/* RSEQ-2 */
13894 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13895 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13896 		    sizeof (fw->rseq_2_reg) / 4, 32);
13897 
13898 		/* Auxiliary sequencer registers. */
13899 
13900 		/* ASEQ GP */
13901 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13902 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13903 		    16, 32);
13904 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13905 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13906 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13907 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13908 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13909 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13910 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13911 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13912 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13913 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13914 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13915 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13916 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13917 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13918 
13919 		/* ASEQ-0 */
13920 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13921 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13922 		    16, 32);
13923 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13924 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13925 
13926 		/* ASEQ-1 */
13927 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13928 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13929 		    16, 32);
13930 
13931 		/* ASEQ-2 */
13932 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13933 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13934 		    16, 32);
13935 
13936 		/* Command DMA registers. */
13937 
13938 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13939 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13940 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13941 
13942 		/* Queues. */
13943 
13944 		/* RequestQ0 */
13945 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13946 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13947 		    8, 32);
13948 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13949 
13950 		/* ResponseQ0 */
13951 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13952 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13953 		    8, 32);
13954 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13955 
13956 		/* RequestQ1 */
13957 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13958 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13959 		    8, 32);
13960 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13961 
13962 		/* Transmit DMA registers. */
13963 
13964 		/* XMT0 */
13965 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13966 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13967 		    16, 32);
13968 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13969 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13970 
13971 		/* XMT1 */
13972 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13973 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13974 		    16, 32);
13975 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13976 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13977 
13978 		/* XMT2 */
13979 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13980 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13981 		    16, 32);
13982 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13983 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13984 
13985 		/* XMT3 */
13986 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13987 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13988 		    16, 32);
13989 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13990 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13991 
13992 		/* XMT4 */
13993 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13994 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13995 		    16, 32);
13996 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13997 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13998 
13999 		/* XMT Common */
14000 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14001 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14002 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14003 
14004 		/* Receive DMA registers. */
14005 
14006 		/* RCVThread0 */
14007 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14008 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14009 		    ha->iobase + 0xC0, 16, 32);
14010 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14011 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14012 
14013 		/* RCVThread1 */
14014 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14015 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14016 		    ha->iobase + 0xC0, 16, 32);
14017 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14018 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14019 
14020 		/* RISC registers. */
14021 
14022 		/* RISC GP */
14023 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14024 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14025 		    16, 32);
14026 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14027 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14028 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14029 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14030 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14031 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14032 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14033 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14034 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14035 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14036 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14037 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14038 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14039 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14040 
14041 		/* Local memory controller (LMC) registers. */
14042 
14043 		/* LMC */
14044 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14045 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14046 		    16, 32);
14047 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14048 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14049 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14050 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14051 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14052 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14053 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14054 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14055 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14056 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14057 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14058 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14059 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14060 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14061 
14062 		/* Fibre Protocol Module registers. */
14063 
14064 		/* FPM hardware */
14065 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14066 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14067 		    16, 32);
14068 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14069 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14070 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14071 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14072 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14073 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14074 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14075 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14076 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14077 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14078 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14079 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14080 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14081 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14082 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14083 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14084 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14085 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14086 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14087 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14088 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14089 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14090 
14091 		/* Frame Buffer registers. */
14092 
14093 		/* FB hardware */
14094 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14095 			WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14096 		} else {
14097 			WRT32_IO_REG(ha, io_base_addr, 0x6000);
14098 		}
14099 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14100 		    16, 32);
14101 
14102 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14103 			WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14104 		} else {
14105 			WRT32_IO_REG(ha, io_base_addr, 0x6010);
14106 		}
14107 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14108 
14109 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14110 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14111 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14112 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14113 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14114 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14115 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14116 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14117 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14118 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14119 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14120 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14121 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14122 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14123 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14124 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14125 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14126 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14127 
14128 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14129 			WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14130 		} else {
14131 			WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14132 		}
14133 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14134 	}
14135 
14136 	/* Get the request queue */
14137 	if (rval == QL_SUCCESS) {
14138 		uint32_t	cnt;
14139 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14140 
14141 		/* Sync DMA buffer. */
14142 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14143 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14144 		    DDI_DMA_SYNC_FORKERNEL);
14145 
14146 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14147 			fw->req_q[cnt] = *w32++;
14148 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14149 		}
14150 	}
14151 
14152 	/* Get the respons queue */
14153 	if (rval == QL_SUCCESS) {
14154 		uint32_t	cnt;
14155 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14156 
14157 		/* Sync DMA buffer. */
14158 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14159 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14160 		    DDI_DMA_SYNC_FORKERNEL);
14161 
14162 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14163 			fw->rsp_q[cnt] = *w32++;
14164 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14165 		}
14166 	}
14167 
14168 	/* Reset RISC. */
14169 
14170 	ql_reset_chip(ha);
14171 
14172 	/* Memory. */
14173 
14174 	if (rval == QL_SUCCESS) {
14175 		/* Code RAM. */
14176 		rval = ql_read_risc_ram(ha, 0x20000,
14177 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14178 	}
14179 	if (rval == QL_SUCCESS) {
14180 		/* External Memory. */
14181 		rval = ql_read_risc_ram(ha, 0x100000,
14182 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14183 	}
14184 
14185 	/* Get the FC event trace buffer */
14186 	if (rval == QL_SUCCESS) {
14187 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14188 		    (ha->fwfcetracebuf.bp != NULL)) {
14189 			uint32_t	cnt;
14190 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14191 
14192 			/* Sync DMA buffer. */
14193 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14194 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14195 
14196 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14197 				fw->fce_trace_buf[cnt] = *w32++;
14198 			}
14199 		}
14200 	}
14201 
14202 	/* Get the extended trace buffer */
14203 	if (rval == QL_SUCCESS) {
14204 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14205 		    (ha->fwexttracebuf.bp != NULL)) {
14206 			uint32_t	cnt;
14207 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14208 
14209 			/* Sync DMA buffer. */
14210 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14211 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14212 
14213 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14214 				fw->ext_trace_buf[cnt] = *w32++;
14215 			}
14216 		}
14217 	}
14218 
14219 	if (rval != QL_SUCCESS) {
14220 		EL(ha, "failed=%xh\n", rval);
14221 	} else {
14222 		/*EMPTY*/
14223 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14224 	}
14225 
14226 	return (rval);
14227 }
14228 
14229 /*
14230  * ql_read_risc_ram
14231  *	Reads RISC RAM one word at a time.
14232  *	Risc interrupts must be disabled when this routine is called.
14233  *
14234  * Input:
14235  *	ha:	adapter state pointer.
14236  *	risc_address:	RISC code start address.
14237  *	len:		Number of words.
14238  *	buf:		buffer pointer.
14239  *
14240  * Returns:
14241  *	ql local function return status code.
14242  *
14243  * Context:
14244  *	Interrupt or Kernel context, no mailbox commands allowed.
14245  */
14246 static int
14247 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
14248     void *buf)
14249 {
14250 	uint32_t	cnt;
14251 	uint16_t	stat;
14252 	clock_t		timer;
14253 	uint16_t	*buf16 = (uint16_t *)buf;
14254 	uint32_t	*buf32 = (uint32_t *)buf;
14255 	int		rval = QL_SUCCESS;
14256 
14257 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
14258 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
14259 		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
14260 		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
14261 		CFG_IST(ha, CFG_CTRL_242581) ?
14262 		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
14263 		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14264 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
14265 			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
14266 				stat = (uint16_t)
14267 				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
14268 				if ((stat == 1) || (stat == 0x10)) {
14269 					if (CFG_IST(ha, CFG_CTRL_242581)) {
14270 						buf32[cnt] = SHORT_TO_LONG(
14271 						    RD16_IO_REG(ha,
14272 						    mailbox[2]),
14273 						    RD16_IO_REG(ha,
14274 						    mailbox[3]));
14275 					} else {
14276 						buf16[cnt] =
14277 						    RD16_IO_REG(ha, mailbox[2]);
14278 					}
14279 
14280 					break;
14281 				} else if ((stat == 2) || (stat == 0x11)) {
14282 					rval = RD16_IO_REG(ha, mailbox[0]);
14283 					break;
14284 				}
14285 				if (CFG_IST(ha, CFG_CTRL_242581)) {
14286 					WRT32_IO_REG(ha, hccr,
14287 					    HC24_CLR_RISC_INT);
14288 					RD32_IO_REG(ha, hccr);
14289 				} else {
14290 					WRT16_IO_REG(ha, hccr,
14291 					    HC_CLR_RISC_INT);
14292 				}
14293 			}
14294 			drv_usecwait(5);
14295 		}
14296 		if (CFG_IST(ha, CFG_CTRL_242581)) {
14297 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
14298 			RD32_IO_REG(ha, hccr);
14299 		} else {
14300 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
14301 			WRT16_IO_REG(ha, semaphore, 0);
14302 		}
14303 
14304 		if (timer == 0) {
14305 			rval = QL_FUNCTION_TIMEOUT;
14306 		}
14307 	}
14308 
14309 	return (rval);
14310 }
14311 
14312 /*
14313  * ql_read_regs
14314  *	Reads adapter registers to buffer.
14315  *
14316  * Input:
14317  *	ha:	adapter state pointer.
14318  *	buf:	buffer pointer.
14319  *	reg:	start address.
14320  *	count:	number of registers.
14321  *	wds:	register size.
14322  *
14323  * Context:
14324  *	Interrupt or Kernel context, no mailbox commands allowed.
14325  */
14326 static void *
14327 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
14328     uint8_t wds)
14329 {
14330 	uint32_t	*bp32, *reg32;
14331 	uint16_t	*bp16, *reg16;
14332 	uint8_t		*bp8, *reg8;
14333 
14334 	switch (wds) {
14335 	case 32:
14336 		bp32 = buf;
14337 		reg32 = reg;
14338 		while (count--) {
14339 			*bp32++ = RD_REG_DWORD(ha, reg32++);
14340 		}
14341 		return (bp32);
14342 	case 16:
14343 		bp16 = buf;
14344 		reg16 = reg;
14345 		while (count--) {
14346 			*bp16++ = RD_REG_WORD(ha, reg16++);
14347 		}
14348 		return (bp16);
14349 	case 8:
14350 		bp8 = buf;
14351 		reg8 = reg;
14352 		while (count--) {
14353 			*bp8++ = RD_REG_BYTE(ha, reg8++);
14354 		}
14355 		return (bp8);
14356 	default:
14357 		EL(ha, "Unknown word size=%d\n", wds);
14358 		return (buf);
14359 	}
14360 }
14361 
14362 static int
14363 ql_save_config_regs(dev_info_t *dip)
14364 {
14365 	ql_adapter_state_t	*ha;
14366 	int			ret;
14367 	ql_config_space_t	chs;
14368 	caddr_t			prop = "ql-config-space";
14369 
14370 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14371 	ASSERT(ha != NULL);
14372 	if (ha == NULL) {
14373 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14374 		    ddi_get_instance(dip));
14375 		return (DDI_FAILURE);
14376 	}
14377 
14378 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14379 
14380 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14381 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
14382 	    1) {
14383 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14384 		return (DDI_SUCCESS);
14385 	}
14386 
14387 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
14388 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
14389 	    PCI_CONF_HEADER);
14390 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14391 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
14392 		    PCI_BCNF_BCNTRL);
14393 	}
14394 
14395 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
14396 	    PCI_CONF_CACHE_LINESZ);
14397 
14398 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14399 	    PCI_CONF_LATENCY_TIMER);
14400 
14401 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14402 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14403 		    PCI_BCNF_LATENCY_TIMER);
14404 	}
14405 
14406 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
14407 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
14408 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
14409 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
14410 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
14411 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
14412 
14413 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14414 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
14415 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
14416 
14417 	if (ret != DDI_PROP_SUCCESS) {
14418 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
14419 		    QL_NAME, ddi_get_instance(dip), prop);
14420 		return (DDI_FAILURE);
14421 	}
14422 
14423 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14424 
14425 	return (DDI_SUCCESS);
14426 }
14427 
14428 static int
14429 ql_restore_config_regs(dev_info_t *dip)
14430 {
14431 	ql_adapter_state_t	*ha;
14432 	uint_t			elements;
14433 	ql_config_space_t	*chs_p;
14434 	caddr_t			prop = "ql-config-space";
14435 
14436 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14437 	ASSERT(ha != NULL);
14438 	if (ha == NULL) {
14439 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14440 		    ddi_get_instance(dip));
14441 		return (DDI_FAILURE);
14442 	}
14443 
14444 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14445 
14446 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14447 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
14448 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
14449 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
14450 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14451 		return (DDI_FAILURE);
14452 	}
14453 
14454 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
14455 
14456 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14457 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
14458 		    chs_p->chs_bridge_control);
14459 	}
14460 
14461 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
14462 	    chs_p->chs_cache_line_size);
14463 
14464 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
14465 	    chs_p->chs_latency_timer);
14466 
14467 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14468 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
14469 		    chs_p->chs_sec_latency_timer);
14470 	}
14471 
14472 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
14473 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
14474 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
14475 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
14476 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
14477 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
14478 
14479 	ddi_prop_free(chs_p);
14480 
14481 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14482 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
14483 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
14484 		    QL_NAME, ddi_get_instance(dip), prop);
14485 	}
14486 
14487 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14488 
14489 	return (DDI_SUCCESS);
14490 }
14491 
14492 uint8_t
14493 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
14494 {
14495 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14496 		return (ddi_get8(ha->sbus_config_handle,
14497 		    (uint8_t *)(ha->sbus_config_base + off)));
14498 	}
14499 
14500 #ifdef KERNEL_32
14501 	return (pci_config_getb(ha->pci_handle, off));
14502 #else
14503 	return (pci_config_get8(ha->pci_handle, off));
14504 #endif
14505 }
14506 
14507 uint16_t
14508 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
14509 {
14510 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14511 		return (ddi_get16(ha->sbus_config_handle,
14512 		    (uint16_t *)(ha->sbus_config_base + off)));
14513 	}
14514 
14515 #ifdef KERNEL_32
14516 	return (pci_config_getw(ha->pci_handle, off));
14517 #else
14518 	return (pci_config_get16(ha->pci_handle, off));
14519 #endif
14520 }
14521 
14522 uint32_t
14523 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
14524 {
14525 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14526 		return (ddi_get32(ha->sbus_config_handle,
14527 		    (uint32_t *)(ha->sbus_config_base + off)));
14528 	}
14529 
14530 #ifdef KERNEL_32
14531 	return (pci_config_getl(ha->pci_handle, off));
14532 #else
14533 	return (pci_config_get32(ha->pci_handle, off));
14534 #endif
14535 }
14536 
14537 void
14538 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
14539 {
14540 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14541 		ddi_put8(ha->sbus_config_handle,
14542 		    (uint8_t *)(ha->sbus_config_base + off), val);
14543 	} else {
14544 #ifdef KERNEL_32
14545 		pci_config_putb(ha->pci_handle, off, val);
14546 #else
14547 		pci_config_put8(ha->pci_handle, off, val);
14548 #endif
14549 	}
14550 }
14551 
14552 void
14553 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
14554 {
14555 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14556 		ddi_put16(ha->sbus_config_handle,
14557 		    (uint16_t *)(ha->sbus_config_base + off), val);
14558 	} else {
14559 #ifdef KERNEL_32
14560 		pci_config_putw(ha->pci_handle, off, val);
14561 #else
14562 		pci_config_put16(ha->pci_handle, off, val);
14563 #endif
14564 	}
14565 }
14566 
14567 void
14568 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
14569 {
14570 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14571 		ddi_put32(ha->sbus_config_handle,
14572 		    (uint32_t *)(ha->sbus_config_base + off), val);
14573 	} else {
14574 #ifdef KERNEL_32
14575 		pci_config_putl(ha->pci_handle, off, val);
14576 #else
14577 		pci_config_put32(ha->pci_handle, off, val);
14578 #endif
14579 	}
14580 }
14581 
14582 /*
14583  * ql_halt
14584  *	Waits for commands that are running to finish and
14585  *	if they do not, commands are aborted.
14586  *	Finally the adapter is reset.
14587  *
14588  * Input:
14589  *	ha:	adapter state pointer.
14590  *	pwr:	power state.
14591  *
14592  * Context:
14593  *	Kernel context.
14594  */
14595 static void
14596 ql_halt(ql_adapter_state_t *ha, int pwr)
14597 {
14598 	uint32_t	cnt;
14599 	ql_tgt_t	*tq;
14600 	ql_srb_t	*sp;
14601 	uint16_t	index;
14602 	ql_link_t	*link;
14603 
14604 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14605 
14606 	/* Wait for all commands running to finish. */
14607 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
14608 		for (link = ha->dev[index].first; link != NULL;
14609 		    link = link->next) {
14610 			tq = link->base_address;
14611 			(void) ql_abort_device(ha, tq, 0);
14612 
14613 			/* Wait for 30 seconds for commands to finish. */
14614 			for (cnt = 3000; cnt != 0; cnt--) {
14615 				/* Acquire device queue lock. */
14616 				DEVICE_QUEUE_LOCK(tq);
14617 				if (tq->outcnt == 0) {
14618 					/* Release device queue lock. */
14619 					DEVICE_QUEUE_UNLOCK(tq);
14620 					break;
14621 				} else {
14622 					/* Release device queue lock. */
14623 					DEVICE_QUEUE_UNLOCK(tq);
14624 					ql_delay(ha, 10000);
14625 				}
14626 			}
14627 
14628 			/* Finish any commands waiting for more status. */
14629 			if (ha->status_srb != NULL) {
14630 				sp = ha->status_srb;
14631 				ha->status_srb = NULL;
14632 				sp->cmd.next = NULL;
14633 				ql_done(&sp->cmd);
14634 			}
14635 
14636 			/* Abort commands that did not finish. */
14637 			if (cnt == 0) {
14638 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
14639 				    cnt++) {
14640 					if (ha->pending_cmds.first != NULL) {
14641 						ql_start_iocb(ha, NULL);
14642 						cnt = 1;
14643 					}
14644 					sp = ha->outstanding_cmds[cnt];
14645 					if (sp != NULL &&
14646 					    sp->lun_queue->target_queue ==
14647 					    tq) {
14648 						(void) ql_abort((opaque_t)ha,
14649 						    sp->pkt, 0);
14650 					}
14651 				}
14652 			}
14653 		}
14654 	}
14655 
14656 	/* Shutdown IP. */
14657 	if (ha->flags & IP_INITIALIZED) {
14658 		(void) ql_shutdown_ip(ha);
14659 	}
14660 
14661 	/* Stop all timers. */
14662 	ADAPTER_STATE_LOCK(ha);
14663 	ha->port_retry_timer = 0;
14664 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
14665 	ha->watchdog_timer = 0;
14666 	ADAPTER_STATE_UNLOCK(ha);
14667 
14668 	if (pwr == PM_LEVEL_D3) {
14669 		ADAPTER_STATE_LOCK(ha);
14670 		ha->flags &= ~ONLINE;
14671 		ADAPTER_STATE_UNLOCK(ha);
14672 
14673 		/* Reset ISP chip. */
14674 		ql_reset_chip(ha);
14675 	}
14676 
14677 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14678 }
14679 
14680 /*
14681  * ql_get_dma_mem
14682  *	Function used to allocate dma memory.
14683  *
14684  * Input:
14685  *	ha:			adapter state pointer.
14686  *	mem:			pointer to dma memory object.
14687  *	size:			size of the request in bytes
14688  *
14689  * Returns:
14690  *	qn local function return status code.
14691  *
14692  * Context:
14693  *	Kernel context.
14694  */
14695 int
14696 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
14697     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
14698 {
14699 	int	rval;
14700 
14701 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14702 
14703 	mem->size = size;
14704 	mem->type = allocation_type;
14705 	mem->cookie_count = 1;
14706 
14707 	switch (alignment) {
14708 	case QL_DMA_DATA_ALIGN:
14709 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
14710 		break;
14711 	case QL_DMA_RING_ALIGN:
14712 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
14713 		break;
14714 	default:
14715 		EL(ha, "failed, unknown alignment type %x\n", alignment);
14716 		break;
14717 	}
14718 
14719 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
14720 		ql_free_phys(ha, mem);
14721 		EL(ha, "failed, alloc_phys=%xh\n", rval);
14722 	}
14723 
14724 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14725 
14726 	return (rval);
14727 }
14728 
14729 /*
14730  * ql_alloc_phys
14731  *	Function used to allocate memory and zero it.
14732  *	Memory is below 4 GB.
14733  *
14734  * Input:
14735  *	ha:			adapter state pointer.
14736  *	mem:			pointer to dma memory object.
14737  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14738  *	mem->cookie_count	number of segments allowed.
14739  *	mem->type		memory allocation type.
14740  *	mem->size		memory size.
14741  *	mem->alignment		memory alignment.
14742  *
14743  * Returns:
14744  *	qn local function return status code.
14745  *
14746  * Context:
14747  *	Kernel context.
14748  */
14749 int
14750 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14751 {
14752 	size_t			rlen;
14753 	ddi_dma_attr_t		dma_attr;
14754 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
14755 
14756 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14757 
14758 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14759 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14760 
14761 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
14762 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14763 
14764 	/*
14765 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
14766 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
14767 	 * to make sure buffer has enough room for overrun.
14768 	 */
14769 	if (mem->size & 7) {
14770 		mem->size += 8 - (mem->size & 7);
14771 	}
14772 
14773 	mem->flags = DDI_DMA_CONSISTENT;
14774 
14775 	/*
14776 	 * Allocate DMA memory for command.
14777 	 */
14778 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14779 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14780 	    DDI_SUCCESS) {
14781 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14782 		mem->dma_handle = NULL;
14783 		return (QL_MEMORY_ALLOC_FAILED);
14784 	}
14785 
14786 	switch (mem->type) {
14787 	case KERNEL_MEM:
14788 		mem->bp = kmem_zalloc(mem->size, sleep);
14789 		break;
14790 	case BIG_ENDIAN_DMA:
14791 	case LITTLE_ENDIAN_DMA:
14792 	case NO_SWAP_DMA:
14793 		if (mem->type == BIG_ENDIAN_DMA) {
14794 			acc_attr.devacc_attr_endian_flags =
14795 			    DDI_STRUCTURE_BE_ACC;
14796 		} else if (mem->type == NO_SWAP_DMA) {
14797 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
14798 		}
14799 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
14800 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14801 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
14802 		    &mem->acc_handle) == DDI_SUCCESS) {
14803 			bzero(mem->bp, mem->size);
14804 			/* ensure we got what we asked for (32bit) */
14805 			if (dma_attr.dma_attr_addr_hi == NULL) {
14806 				if (mem->cookie.dmac_notused != NULL) {
14807 					EL(ha, "failed, ddi_dma_mem_alloc "
14808 					    "returned 64 bit DMA address\n");
14809 					ql_free_phys(ha, mem);
14810 					return (QL_MEMORY_ALLOC_FAILED);
14811 				}
14812 			}
14813 		} else {
14814 			mem->acc_handle = NULL;
14815 			mem->bp = NULL;
14816 		}
14817 		break;
14818 	default:
14819 		EL(ha, "failed, unknown type=%xh\n", mem->type);
14820 		mem->acc_handle = NULL;
14821 		mem->bp = NULL;
14822 		break;
14823 	}
14824 
14825 	if (mem->bp == NULL) {
14826 		EL(ha, "failed, ddi_dma_mem_alloc\n");
14827 		ddi_dma_free_handle(&mem->dma_handle);
14828 		mem->dma_handle = NULL;
14829 		return (QL_MEMORY_ALLOC_FAILED);
14830 	}
14831 
14832 	mem->flags |= DDI_DMA_RDWR;
14833 
14834 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14835 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
14836 		ql_free_phys(ha, mem);
14837 		return (QL_MEMORY_ALLOC_FAILED);
14838 	}
14839 
14840 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14841 
14842 	return (QL_SUCCESS);
14843 }
14844 
14845 /*
14846  * ql_free_phys
14847  *	Function used to free physical memory.
14848  *
14849  * Input:
14850  *	ha:	adapter state pointer.
14851  *	mem:	pointer to dma memory object.
14852  *
14853  * Context:
14854  *	Kernel context.
14855  */
14856 void
14857 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
14858 {
14859 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14860 
14861 	if (mem != NULL && mem->dma_handle != NULL) {
14862 		ql_unbind_dma_buffer(ha, mem);
14863 		switch (mem->type) {
14864 		case KERNEL_MEM:
14865 			if (mem->bp != NULL) {
14866 				kmem_free(mem->bp, mem->size);
14867 			}
14868 			break;
14869 		case LITTLE_ENDIAN_DMA:
14870 		case BIG_ENDIAN_DMA:
14871 		case NO_SWAP_DMA:
14872 			if (mem->acc_handle != NULL) {
14873 				ddi_dma_mem_free(&mem->acc_handle);
14874 				mem->acc_handle = NULL;
14875 			}
14876 			break;
14877 		default:
14878 			break;
14879 		}
14880 		mem->bp = NULL;
14881 		ddi_dma_free_handle(&mem->dma_handle);
14882 		mem->dma_handle = NULL;
14883 	}
14884 
14885 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14886 }
14887 
14888 /*
14889  * ql_alloc_dma_resouce.
14890  *	Allocates DMA resource for buffer.
14891  *
14892  * Input:
14893  *	ha:			adapter state pointer.
14894  *	mem:			pointer to dma memory object.
14895  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14896  *	mem->cookie_count	number of segments allowed.
14897  *	mem->type		memory allocation type.
14898  *	mem->size		memory size.
14899  *	mem->bp			pointer to memory or struct buf
14900  *
14901  * Returns:
14902  *	qn local function return status code.
14903  *
14904  * Context:
14905  *	Kernel context.
14906  */
14907 int
14908 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14909 {
14910 	ddi_dma_attr_t	dma_attr;
14911 
14912 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14913 
14914 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14915 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14916 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14917 
14918 	/*
14919 	 * Allocate DMA handle for command.
14920 	 */
14921 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14922 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14923 	    DDI_SUCCESS) {
14924 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14925 		mem->dma_handle = NULL;
14926 		return (QL_MEMORY_ALLOC_FAILED);
14927 	}
14928 
14929 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
14930 
14931 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14932 		EL(ha, "failed, bind_dma_buffer\n");
14933 		ddi_dma_free_handle(&mem->dma_handle);
14934 		mem->dma_handle = NULL;
14935 		return (QL_MEMORY_ALLOC_FAILED);
14936 	}
14937 
14938 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14939 
14940 	return (QL_SUCCESS);
14941 }
14942 
14943 /*
14944  * ql_free_dma_resource
14945  *	Frees DMA resources.
14946  *
14947  * Input:
14948  *	ha:		adapter state pointer.
14949  *	mem:		pointer to dma memory object.
14950  *	mem->dma_handle	DMA memory handle.
14951  *
14952  * Context:
14953  *	Kernel context.
14954  */
14955 void
14956 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
14957 {
14958 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14959 
14960 	ql_free_phys(ha, mem);
14961 
14962 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14963 }
14964 
14965 /*
14966  * ql_bind_dma_buffer
14967  *	Binds DMA buffer.
14968  *
14969  * Input:
14970  *	ha:			adapter state pointer.
14971  *	mem:			pointer to dma memory object.
14972  *	sleep:			KM_SLEEP or KM_NOSLEEP.
14973  *	mem->dma_handle		DMA memory handle.
14974  *	mem->cookie_count	number of segments allowed.
14975  *	mem->type		memory allocation type.
14976  *	mem->size		memory size.
14977  *	mem->bp			pointer to memory or struct buf
14978  *
14979  * Returns:
14980  *	mem->cookies		pointer to list of cookies.
14981  *	mem->cookie_count	number of cookies.
14982  *	status			success = DDI_DMA_MAPPED
14983  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
14984  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
14985  *				DDI_DMA_TOOBIG
14986  *
14987  * Context:
14988  *	Kernel context.
14989  */
14990 static int
14991 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14992 {
14993 	int			rval;
14994 	ddi_dma_cookie_t	*cookiep;
14995 	uint32_t		cnt = mem->cookie_count;
14996 
14997 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14998 
14999 	if (mem->type == STRUCT_BUF_MEMORY) {
15000 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15001 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15002 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15003 	} else {
15004 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15005 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
15006 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15007 		    &mem->cookie_count);
15008 	}
15009 
15010 	if (rval == DDI_DMA_MAPPED) {
15011 		if (mem->cookie_count > cnt) {
15012 			(void) ddi_dma_unbind_handle(mem->dma_handle);
15013 			EL(ha, "failed, cookie_count %d > %d\n",
15014 			    mem->cookie_count, cnt);
15015 			rval = DDI_DMA_TOOBIG;
15016 		} else {
15017 			if (mem->cookie_count > 1) {
15018 				if (mem->cookies = kmem_zalloc(
15019 				    sizeof (ddi_dma_cookie_t) *
15020 				    mem->cookie_count, sleep)) {
15021 					*mem->cookies = mem->cookie;
15022 					cookiep = mem->cookies;
15023 					for (cnt = 1; cnt < mem->cookie_count;
15024 					    cnt++) {
15025 						ddi_dma_nextcookie(
15026 						    mem->dma_handle,
15027 						    ++cookiep);
15028 					}
15029 				} else {
15030 					(void) ddi_dma_unbind_handle(
15031 					    mem->dma_handle);
15032 					EL(ha, "failed, kmem_zalloc\n");
15033 					rval = DDI_DMA_NORESOURCES;
15034 				}
15035 			} else {
15036 				/*
15037 				 * It has been reported that dmac_size at times
15038 				 * may be incorrect on sparc machines so for
15039 				 * sparc machines that only have one segment
15040 				 * use the buffer size instead.
15041 				 */
15042 				mem->cookies = &mem->cookie;
15043 				mem->cookies->dmac_size = mem->size;
15044 			}
15045 		}
15046 	}
15047 
15048 	if (rval != DDI_DMA_MAPPED) {
15049 		EL(ha, "failed=%xh\n", rval);
15050 	} else {
15051 		/*EMPTY*/
15052 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15053 	}
15054 
15055 	return (rval);
15056 }
15057 
15058 /*
15059  * ql_unbind_dma_buffer
15060  *	Unbinds DMA buffer.
15061  *
15062  * Input:
15063  *	ha:			adapter state pointer.
15064  *	mem:			pointer to dma memory object.
15065  *	mem->dma_handle		DMA memory handle.
15066  *	mem->cookies		pointer to cookie list.
15067  *	mem->cookie_count	number of cookies.
15068  *
15069  * Context:
15070  *	Kernel context.
15071  */
15072 /* ARGSUSED */
15073 static void
15074 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15075 {
15076 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15077 
15078 	(void) ddi_dma_unbind_handle(mem->dma_handle);
15079 	if (mem->cookie_count > 1) {
15080 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15081 		    mem->cookie_count);
15082 		mem->cookies = NULL;
15083 	}
15084 	mem->cookie_count = 0;
15085 
15086 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15087 }
15088 
15089 static int
15090 ql_suspend_adapter(ql_adapter_state_t *ha)
15091 {
15092 	clock_t timer;
15093 
15094 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15095 
15096 	/*
15097 	 * First we will claim mbox ownership so that no
15098 	 * thread using mbox hangs when we disable the
15099 	 * interrupt in the middle of it.
15100 	 */
15101 	MBX_REGISTER_LOCK(ha);
15102 
15103 	/* Check for mailbox available, if not wait for signal. */
15104 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15105 		ha->mailbox_flags = (uint8_t)
15106 		    (ha->mailbox_flags | MBX_WANT_FLG);
15107 
15108 		/* 30 seconds from now */
15109 		timer = ddi_get_lbolt();
15110 		timer += 32 * drv_usectohz(1000000);
15111 		if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15112 		    timer) == -1) {
15113 
15114 			/* Release mailbox register lock. */
15115 			MBX_REGISTER_UNLOCK(ha);
15116 			EL(ha, "failed, Suspend mbox");
15117 			return (QL_FUNCTION_TIMEOUT);
15118 		}
15119 	}
15120 
15121 	/* Set busy flag. */
15122 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15123 	MBX_REGISTER_UNLOCK(ha);
15124 
15125 	(void) ql_wait_outstanding(ha);
15126 
15127 	/*
15128 	 * here we are sure that there will not be any mbox interrupt.
15129 	 * So, let's make sure that we return back all the outstanding
15130 	 * cmds as well as internally queued commands.
15131 	 */
15132 	ql_halt(ha, PM_LEVEL_D0);
15133 
15134 	if (ha->power_level != PM_LEVEL_D3) {
15135 		/* Disable ISP interrupts. */
15136 		WRT16_IO_REG(ha, ictrl, 0);
15137 	}
15138 
15139 	ADAPTER_STATE_LOCK(ha);
15140 	ha->flags &= ~INTERRUPTS_ENABLED;
15141 	ADAPTER_STATE_UNLOCK(ha);
15142 
15143 	MBX_REGISTER_LOCK(ha);
15144 	/* Reset busy status. */
15145 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15146 
15147 	/* If thread is waiting for mailbox go signal it to start. */
15148 	if (ha->mailbox_flags & MBX_WANT_FLG) {
15149 		ha->mailbox_flags = (uint8_t)
15150 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15151 		cv_broadcast(&ha->cv_mbx_wait);
15152 	}
15153 	/* Release mailbox register lock. */
15154 	MBX_REGISTER_UNLOCK(ha);
15155 
15156 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15157 
15158 	return (QL_SUCCESS);
15159 }
15160 
15161 /*
15162  * ql_add_link_b
15163  *	Add link to the end of the chain.
15164  *
15165  * Input:
15166  *	head = Head of link list.
15167  *	link = link to be added.
15168  *	LOCK must be already obtained.
15169  *
15170  * Context:
15171  *	Interrupt or Kernel context, no mailbox commands allowed.
15172  */
15173 void
15174 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15175 {
15176 	ASSERT(link->base_address != NULL);
15177 
15178 	/* at the end there isn't a next */
15179 	link->next = NULL;
15180 
15181 	if ((link->prev = head->last) == NULL) {
15182 		head->first = link;
15183 	} else {
15184 		head->last->next = link;
15185 	}
15186 
15187 	head->last = link;
15188 	link->head = head;	/* the queue we're on */
15189 }
15190 
15191 /*
15192  * ql_add_link_t
15193  *	Add link to the beginning of the chain.
15194  *
15195  * Input:
15196  *	head = Head of link list.
15197  *	link = link to be added.
15198  *	LOCK must be already obtained.
15199  *
15200  * Context:
15201  *	Interrupt or Kernel context, no mailbox commands allowed.
15202  */
15203 void
15204 ql_add_link_t(ql_head_t *head, ql_link_t *link)
15205 {
15206 	ASSERT(link->base_address != NULL);
15207 
15208 	link->prev = NULL;
15209 
15210 	if ((link->next = head->first) == NULL)	{
15211 		head->last = link;
15212 	} else {
15213 		head->first->prev = link;
15214 	}
15215 
15216 	head->first = link;
15217 	link->head = head;	/* the queue we're on */
15218 }
15219 
15220 /*
15221  * ql_remove_link
15222  *	Remove a link from the chain.
15223  *
15224  * Input:
15225  *	head = Head of link list.
15226  *	link = link to be removed.
15227  *	LOCK must be already obtained.
15228  *
15229  * Context:
15230  *	Interrupt or Kernel context, no mailbox commands allowed.
15231  */
15232 void
15233 ql_remove_link(ql_head_t *head, ql_link_t *link)
15234 {
15235 	ASSERT(link->base_address != NULL);
15236 
15237 	if (link->prev != NULL) {
15238 		if ((link->prev->next = link->next) == NULL) {
15239 			head->last = link->prev;
15240 		} else {
15241 			link->next->prev = link->prev;
15242 		}
15243 	} else if ((head->first = link->next) == NULL) {
15244 		head->last = NULL;
15245 	} else {
15246 		head->first->prev = NULL;
15247 	}
15248 
15249 	/* not on a queue any more */
15250 	link->prev = link->next = NULL;
15251 	link->head = NULL;
15252 }
15253 
15254 /*
15255  * ql_chg_endian
15256  *	Change endianess of byte array.
15257  *
15258  * Input:
15259  *	buf = array pointer.
15260  *	size = size of array in bytes.
15261  *
15262  * Context:
15263  *	Interrupt or Kernel context, no mailbox commands allowed.
15264  */
15265 void
15266 ql_chg_endian(uint8_t buf[], size_t size)
15267 {
15268 	uint8_t byte;
15269 	size_t  cnt1;
15270 	size_t  cnt;
15271 
15272 	cnt1 = size - 1;
15273 	for (cnt = 0; cnt < size / 2; cnt++) {
15274 		byte = buf[cnt1];
15275 		buf[cnt1] = buf[cnt];
15276 		buf[cnt] = byte;
15277 		cnt1--;
15278 	}
15279 }
15280 
15281 /*
15282  * ql_bstr_to_dec
15283  *	Convert decimal byte string to number.
15284  *
15285  * Input:
15286  *	s:	byte string pointer.
15287  *	ans:	interger pointer for number.
15288  *	size:	number of ascii bytes.
15289  *
15290  * Returns:
15291  *	success = number of ascii bytes processed.
15292  *
15293  * Context:
15294  *	Kernel/Interrupt context.
15295  */
15296 static int
15297 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
15298 {
15299 	int			mul, num, cnt, pos;
15300 	char			*str;
15301 
15302 	/* Calculate size of number. */
15303 	if (size == 0) {
15304 		for (str = s; *str >= '0' && *str <= '9'; str++) {
15305 			size++;
15306 		}
15307 	}
15308 
15309 	*ans = 0;
15310 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
15311 		if (*s >= '0' && *s <= '9') {
15312 			num = *s++ - '0';
15313 		} else {
15314 			break;
15315 		}
15316 
15317 		for (mul = 1, pos = 1; pos < size; pos++) {
15318 			mul *= 10;
15319 		}
15320 		*ans += num * mul;
15321 	}
15322 
15323 	return (cnt);
15324 }
15325 
15326 /*
15327  * ql_delay
15328  *	Calls delay routine if threads are not suspended, otherwise, busy waits
15329  *	Minimum = 1 tick = 10ms
15330  *
15331  * Input:
15332  *	dly = delay time in microseconds.
15333  *
15334  * Context:
15335  *	Kernel or Interrupt context, no mailbox commands allowed.
15336  */
15337 void
15338 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
15339 {
15340 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
15341 		drv_usecwait(usecs);
15342 	} else {
15343 		delay(drv_usectohz(usecs));
15344 	}
15345 }
15346 
15347 /*
15348  * ql_stall_drv
15349  *	Stalls one or all driver instances, waits for 30 seconds.
15350  *
15351  * Input:
15352  *	ha:		adapter state pointer or NULL for all.
15353  *	options:	BIT_0 --> leave driver stalled on exit if
15354  *				  failed.
15355  *
15356  * Returns:
15357  *	ql local function return status code.
15358  *
15359  * Context:
15360  *	Kernel context.
15361  */
15362 int
15363 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
15364 {
15365 	ql_link_t		*link;
15366 	ql_adapter_state_t	*ha2;
15367 	uint32_t		timer;
15368 
15369 	QL_PRINT_3(CE_CONT, "started\n");
15370 
15371 	/* Wait for 30 seconds for daemons unstall. */
15372 	timer = 3000;
15373 	link = ha == NULL ? ql_hba.first : &ha->hba;
15374 	while (link != NULL && timer) {
15375 		ha2 = link->base_address;
15376 
15377 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
15378 
15379 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15380 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15381 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
15382 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
15383 			link = ha == NULL ? link->next : NULL;
15384 			continue;
15385 		}
15386 
15387 		ql_delay(ha, 10000);
15388 		timer--;
15389 		link = ha == NULL ? ql_hba.first : &ha->hba;
15390 	}
15391 
15392 	if (ha2 != NULL && timer == 0) {
15393 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
15394 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
15395 		    "unstalled"));
15396 		if (options & BIT_0) {
15397 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15398 		}
15399 		return (QL_FUNCTION_TIMEOUT);
15400 	}
15401 
15402 	QL_PRINT_3(CE_CONT, "done\n");
15403 
15404 	return (QL_SUCCESS);
15405 }
15406 
15407 /*
15408  * ql_restart_driver
15409  *	Restarts one or all driver instances.
15410  *
15411  * Input:
15412  *	ha:	adapter state pointer or NULL for all.
15413  *
15414  * Context:
15415  *	Kernel context.
15416  */
15417 void
15418 ql_restart_driver(ql_adapter_state_t *ha)
15419 {
15420 	ql_link_t		*link;
15421 	ql_adapter_state_t	*ha2;
15422 	uint32_t		timer;
15423 
15424 	QL_PRINT_3(CE_CONT, "started\n");
15425 
15426 	/* Tell all daemons to unstall. */
15427 	link = ha == NULL ? ql_hba.first : &ha->hba;
15428 	while (link != NULL) {
15429 		ha2 = link->base_address;
15430 
15431 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15432 
15433 		link = ha == NULL ? link->next : NULL;
15434 	}
15435 
15436 	/* Wait for 30 seconds for all daemons unstall. */
15437 	timer = 3000;
15438 	link = ha == NULL ? ql_hba.first : &ha->hba;
15439 	while (link != NULL && timer) {
15440 		ha2 = link->base_address;
15441 
15442 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15443 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15444 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
15445 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
15446 			    ha2->instance, ha2->vp_index);
15447 			ql_restart_queues(ha2);
15448 			link = ha == NULL ? link->next : NULL;
15449 			continue;
15450 		}
15451 
15452 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
15453 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
15454 
15455 		ql_delay(ha, 10000);
15456 		timer--;
15457 		link = ha == NULL ? ql_hba.first : &ha->hba;
15458 	}
15459 
15460 	QL_PRINT_3(CE_CONT, "done\n");
15461 }
15462 
15463 /*
15464  * ql_setup_interrupts
15465  *	Sets up interrupts based on the HBA's and platform's
15466  *	capabilities (e.g., legacy / MSI / FIXED).
15467  *
15468  * Input:
15469  *	ha = adapter state pointer.
15470  *
15471  * Returns:
15472  *	DDI_SUCCESS or DDI_FAILURE.
15473  *
15474  * Context:
15475  *	Kernel context.
15476  */
15477 static int
15478 ql_setup_interrupts(ql_adapter_state_t *ha)
15479 {
15480 	int32_t		rval = DDI_FAILURE;
15481 	int32_t		i;
15482 	int32_t		itypes = 0;
15483 
15484 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15485 
15486 	/*
15487 	 * The Solaris Advanced Interrupt Functions (aif) are only
15488 	 * supported on s10U1 or greater.
15489 	 */
15490 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
15491 		EL(ha, "interrupt framework is not supported or is "
15492 		    "disabled, using legacy\n");
15493 		return (ql_legacy_intr(ha));
15494 	} else if (ql_os_release_level == 10) {
15495 		/*
15496 		 * See if the advanced interrupt functions (aif) are
15497 		 * in the kernel
15498 		 */
15499 		void	*fptr = (void *)&ddi_intr_get_supported_types;
15500 
15501 		if (fptr == NULL) {
15502 			EL(ha, "aif is not supported, using legacy "
15503 			    "interrupts (rev)\n");
15504 			return (ql_legacy_intr(ha));
15505 		}
15506 	}
15507 
15508 	/* See what types of interrupts this HBA and platform support */
15509 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
15510 	    DDI_SUCCESS) {
15511 		EL(ha, "get supported types failed, rval=%xh, "
15512 		    "assuming FIXED\n", i);
15513 		itypes = DDI_INTR_TYPE_FIXED;
15514 	}
15515 
15516 	EL(ha, "supported types are: %xh\n", itypes);
15517 
15518 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
15519 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
15520 		EL(ha, "successful MSI-X setup\n");
15521 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
15522 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
15523 		EL(ha, "successful MSI setup\n");
15524 	} else {
15525 		rval = ql_setup_fixed(ha);
15526 	}
15527 
15528 	if (rval != DDI_SUCCESS) {
15529 		EL(ha, "failed, aif, rval=%xh\n", rval);
15530 	} else {
15531 		/*EMPTY*/
15532 		QL_PRINT_3(CE_CONT, "(%d): done\n");
15533 	}
15534 
15535 	return (rval);
15536 }
15537 
15538 /*
15539  * ql_setup_msi
15540  *	Set up aif MSI interrupts
15541  *
15542  * Input:
15543  *	ha = adapter state pointer.
15544  *
15545  * Returns:
15546  *	DDI_SUCCESS or DDI_FAILURE.
15547  *
15548  * Context:
15549  *	Kernel context.
15550  */
15551 static int
15552 ql_setup_msi(ql_adapter_state_t *ha)
15553 {
15554 	int32_t		count = 0;
15555 	int32_t		avail = 0;
15556 	int32_t		actual = 0;
15557 	int32_t		msitype = DDI_INTR_TYPE_MSI;
15558 	int32_t		ret;
15559 	ql_ifunc_t	itrfun[10] = {0};
15560 
15561 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15562 
15563 	if (ql_disable_msi != 0) {
15564 		EL(ha, "MSI is disabled by user\n");
15565 		return (DDI_FAILURE);
15566 	}
15567 
15568 	/* MSI support is only suported on 24xx HBA's. */
15569 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
15570 		EL(ha, "HBA does not support MSI\n");
15571 		return (DDI_FAILURE);
15572 	}
15573 
15574 	/* Get number of MSI interrupts the system supports */
15575 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15576 	    DDI_SUCCESS) || count == 0) {
15577 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15578 		return (DDI_FAILURE);
15579 	}
15580 
15581 	/* Get number of available MSI interrupts */
15582 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15583 	    DDI_SUCCESS) || avail == 0) {
15584 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15585 		return (DDI_FAILURE);
15586 	}
15587 
15588 	/* MSI requires only 1.  */
15589 	count = 1;
15590 	itrfun[0].ifunc = &ql_isr_aif;
15591 
15592 	/* Allocate space for interrupt handles */
15593 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15594 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15595 
15596 	ha->iflags |= IFLG_INTR_MSI;
15597 
15598 	/* Allocate the interrupts */
15599 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
15600 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
15601 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15602 		    "actual=%xh\n", ret, count, actual);
15603 		ql_release_intr(ha);
15604 		return (DDI_FAILURE);
15605 	}
15606 
15607 	ha->intr_cnt = actual;
15608 
15609 	/* Get interrupt priority */
15610 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15611 	    DDI_SUCCESS) {
15612 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15613 		ql_release_intr(ha);
15614 		return (ret);
15615 	}
15616 
15617 	/* Add the interrupt handler */
15618 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
15619 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
15620 		EL(ha, "failed, intr_add ret=%xh\n", ret);
15621 		ql_release_intr(ha);
15622 		return (ret);
15623 	}
15624 
15625 	/* Setup mutexes */
15626 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15627 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15628 		ql_release_intr(ha);
15629 		return (ret);
15630 	}
15631 
15632 	/* Get the capabilities */
15633 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15634 
15635 	/* Enable interrupts */
15636 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15637 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15638 		    DDI_SUCCESS) {
15639 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15640 			ql_destroy_mutex(ha);
15641 			ql_release_intr(ha);
15642 			return (ret);
15643 		}
15644 	} else {
15645 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
15646 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15647 			ql_destroy_mutex(ha);
15648 			ql_release_intr(ha);
15649 			return (ret);
15650 		}
15651 	}
15652 
15653 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15654 
15655 	return (DDI_SUCCESS);
15656 }
15657 
15658 /*
15659  * ql_setup_msix
15660  *	Set up aif MSI-X interrupts
15661  *
15662  * Input:
15663  *	ha = adapter state pointer.
15664  *
15665  * Returns:
15666  *	DDI_SUCCESS or DDI_FAILURE.
15667  *
15668  * Context:
15669  *	Kernel context.
15670  */
15671 static int
15672 ql_setup_msix(ql_adapter_state_t *ha)
15673 {
15674 	uint16_t	hwvect;
15675 	int32_t		count = 0;
15676 	int32_t		avail = 0;
15677 	int32_t		actual = 0;
15678 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
15679 	int32_t		ret;
15680 	uint32_t	i;
15681 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
15682 
15683 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15684 
15685 	if (ql_disable_msix != 0) {
15686 		EL(ha, "MSI-X is disabled by user\n");
15687 		return (DDI_FAILURE);
15688 	}
15689 
15690 	/*
15691 	 * MSI-X support is only available on 24xx HBA's that have
15692 	 * rev A2 parts (revid = 3) or greater.
15693 	 */
15694 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
15695 	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001))) {
15696 		EL(ha, "HBA does not support MSI-X\n");
15697 		return (DDI_FAILURE);
15698 	}
15699 
15700 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
15701 		EL(ha, "HBA does not support MSI-X (revid)\n");
15702 		return (DDI_FAILURE);
15703 	}
15704 
15705 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
15706 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
15707 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
15708 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
15709 		return (DDI_FAILURE);
15710 	}
15711 
15712 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
15713 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
15714 	    ql_pci_config_get16(ha, 0x7e) :
15715 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
15716 
15717 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
15718 
15719 	if (hwvect < QL_MSIX_MAXAIF) {
15720 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
15721 		    QL_MSIX_MAXAIF, hwvect);
15722 		return (DDI_FAILURE);
15723 	}
15724 
15725 	/* Get number of MSI-X interrupts the platform h/w supports */
15726 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15727 	    DDI_SUCCESS) || count == 0) {
15728 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15729 		return (DDI_FAILURE);
15730 	}
15731 
15732 	/* Get number of available system interrupts */
15733 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15734 	    DDI_SUCCESS) || avail == 0) {
15735 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15736 		return (DDI_FAILURE);
15737 	}
15738 
15739 	/* Fill out the intr table */
15740 	count = QL_MSIX_MAXAIF;
15741 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
15742 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
15743 
15744 	/* Allocate space for interrupt handles */
15745 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
15746 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
15747 		ha->hsize = 0;
15748 		EL(ha, "failed, unable to allocate htable space\n");
15749 		return (DDI_FAILURE);
15750 	}
15751 
15752 	ha->iflags |= IFLG_INTR_MSIX;
15753 
15754 	/* Allocate the interrupts */
15755 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
15756 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
15757 	    actual < QL_MSIX_MAXAIF) {
15758 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15759 		    "actual=%xh\n", ret, count, actual);
15760 		ql_release_intr(ha);
15761 		return (DDI_FAILURE);
15762 	}
15763 
15764 	ha->intr_cnt = actual;
15765 
15766 	/* Get interrupt priority */
15767 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15768 	    DDI_SUCCESS) {
15769 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15770 		ql_release_intr(ha);
15771 		return (ret);
15772 	}
15773 
15774 	/* Add the interrupt handlers */
15775 	for (i = 0; i < actual; i++) {
15776 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
15777 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
15778 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
15779 			    actual, ret);
15780 			ql_release_intr(ha);
15781 			return (ret);
15782 		}
15783 	}
15784 
15785 	/*
15786 	 * duplicate the rest of the intr's
15787 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
15788 	 */
15789 #ifdef __sparc
15790 	for (i = actual; i < hwvect; i++) {
15791 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
15792 		    &ha->htable[i])) != DDI_SUCCESS) {
15793 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
15794 			    i, actual, ret);
15795 			ql_release_intr(ha);
15796 			return (ret);
15797 		}
15798 	}
15799 #endif
15800 
15801 	/* Setup mutexes */
15802 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15803 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15804 		ql_release_intr(ha);
15805 		return (ret);
15806 	}
15807 
15808 	/* Get the capabilities */
15809 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15810 
15811 	/* Enable interrupts */
15812 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15813 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15814 		    DDI_SUCCESS) {
15815 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15816 			ql_destroy_mutex(ha);
15817 			ql_release_intr(ha);
15818 			return (ret);
15819 		}
15820 	} else {
15821 		for (i = 0; i < ha->intr_cnt; i++) {
15822 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
15823 			    DDI_SUCCESS) {
15824 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
15825 				ql_destroy_mutex(ha);
15826 				ql_release_intr(ha);
15827 				return (ret);
15828 			}
15829 		}
15830 	}
15831 
15832 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15833 
15834 	return (DDI_SUCCESS);
15835 }
15836 
15837 /*
15838  * ql_setup_fixed
15839  *	Sets up aif FIXED interrupts
15840  *
15841  * Input:
15842  *	ha = adapter state pointer.
15843  *
15844  * Returns:
15845  *	DDI_SUCCESS or DDI_FAILURE.
15846  *
15847  * Context:
15848  *	Kernel context.
15849  */
15850 static int
15851 ql_setup_fixed(ql_adapter_state_t *ha)
15852 {
15853 	int32_t		count = 0;
15854 	int32_t		actual = 0;
15855 	int32_t		ret;
15856 	uint32_t	i;
15857 
15858 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15859 
15860 	/* Get number of fixed interrupts the system supports */
15861 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
15862 	    &count)) != DDI_SUCCESS) || count == 0) {
15863 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15864 		return (DDI_FAILURE);
15865 	}
15866 
15867 	ha->iflags |= IFLG_INTR_FIXED;
15868 
15869 	/* Allocate space for interrupt handles */
15870 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15871 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15872 
15873 	/* Allocate the interrupts */
15874 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
15875 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
15876 	    actual < count) {
15877 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
15878 		    "actual=%xh\n", ret, count, actual);
15879 		ql_release_intr(ha);
15880 		return (DDI_FAILURE);
15881 	}
15882 
15883 	ha->intr_cnt = actual;
15884 
15885 	/* Get interrupt priority */
15886 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15887 	    DDI_SUCCESS) {
15888 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15889 		ql_release_intr(ha);
15890 		return (ret);
15891 	}
15892 
15893 	/* Add the interrupt handlers */
15894 	for (i = 0; i < ha->intr_cnt; i++) {
15895 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
15896 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
15897 			EL(ha, "failed, intr_add ret=%xh\n", ret);
15898 			ql_release_intr(ha);
15899 			return (ret);
15900 		}
15901 	}
15902 
15903 	/* Setup mutexes */
15904 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15905 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15906 		ql_release_intr(ha);
15907 		return (ret);
15908 	}
15909 
15910 	/* Enable interrupts */
15911 	for (i = 0; i < ha->intr_cnt; i++) {
15912 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
15913 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15914 			ql_destroy_mutex(ha);
15915 			ql_release_intr(ha);
15916 			return (ret);
15917 		}
15918 	}
15919 
15920 	EL(ha, "using FIXED interupts\n");
15921 
15922 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15923 
15924 	return (DDI_SUCCESS);
15925 }
15926 
15927 /*
15928  * ql_disable_intr
15929  *	Disables interrupts
15930  *
15931  * Input:
15932  *	ha = adapter state pointer.
15933  *
15934  * Returns:
15935  *
15936  * Context:
15937  *	Kernel context.
15938  */
15939 static void
15940 ql_disable_intr(ql_adapter_state_t *ha)
15941 {
15942 	uint32_t	i, rval;
15943 
15944 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15945 
15946 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15947 
15948 		/* Disable legacy interrupts */
15949 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
15950 
15951 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
15952 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
15953 
15954 		/* Remove AIF block interrupts (MSI) */
15955 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
15956 		    != DDI_SUCCESS) {
15957 			EL(ha, "failed intr block disable, rval=%x\n", rval);
15958 		}
15959 
15960 	} else {
15961 
15962 		/* Remove AIF non-block interrupts (fixed).  */
15963 		for (i = 0; i < ha->intr_cnt; i++) {
15964 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
15965 			    DDI_SUCCESS) {
15966 				EL(ha, "failed intr disable, intr#=%xh, "
15967 				    "rval=%xh\n", i, rval);
15968 			}
15969 		}
15970 	}
15971 
15972 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15973 }
15974 
15975 /*
15976  * ql_release_intr
15977  *	Releases aif legacy interrupt resources
15978  *
15979  * Input:
15980  *	ha = adapter state pointer.
15981  *
15982  * Returns:
15983  *
15984  * Context:
15985  *	Kernel context.
15986  */
15987 static void
15988 ql_release_intr(ql_adapter_state_t *ha)
15989 {
15990 	int32_t 	i;
15991 
15992 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15993 
15994 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15995 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15996 		return;
15997 	}
15998 
15999 	ha->iflags &= ~(IFLG_INTR_AIF);
16000 	if (ha->htable != NULL && ha->hsize > 0) {
16001 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16002 		while (i-- > 0) {
16003 			if (ha->htable[i] == 0) {
16004 				EL(ha, "htable[%x]=0h\n", i);
16005 				continue;
16006 			}
16007 
16008 			(void) ddi_intr_disable(ha->htable[i]);
16009 
16010 			if (i < ha->intr_cnt) {
16011 				(void) ddi_intr_remove_handler(ha->htable[i]);
16012 			}
16013 
16014 			(void) ddi_intr_free(ha->htable[i]);
16015 		}
16016 
16017 		kmem_free(ha->htable, ha->hsize);
16018 		ha->htable = NULL;
16019 	}
16020 
16021 	ha->hsize = 0;
16022 	ha->intr_cnt = 0;
16023 	ha->intr_pri = 0;
16024 	ha->intr_cap = 0;
16025 
16026 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16027 }
16028 
16029 /*
16030  * ql_legacy_intr
16031  *	Sets up legacy interrupts.
16032  *
16033  *	NB: Only to be used if AIF (Advanced Interupt Framework)
16034  *	    if NOT in the kernel.
16035  *
16036  * Input:
16037  *	ha = adapter state pointer.
16038  *
16039  * Returns:
16040  *	DDI_SUCCESS or DDI_FAILURE.
16041  *
16042  * Context:
16043  *	Kernel context.
16044  */
16045 static int
16046 ql_legacy_intr(ql_adapter_state_t *ha)
16047 {
16048 	int	rval = DDI_SUCCESS;
16049 
16050 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16051 
16052 	/* Setup mutexes */
16053 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16054 		EL(ha, "failed, mutex init\n");
16055 		return (DDI_FAILURE);
16056 	}
16057 
16058 	/* Setup standard/legacy interrupt handler */
16059 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16060 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16061 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16062 		    QL_NAME, ha->instance);
16063 		ql_destroy_mutex(ha);
16064 		rval = DDI_FAILURE;
16065 	}
16066 
16067 	if (rval == DDI_SUCCESS) {
16068 		ha->iflags |= IFLG_INTR_LEGACY;
16069 		EL(ha, "using legacy interrupts\n");
16070 	}
16071 
16072 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16073 
16074 	return (rval);
16075 }
16076 
16077 /*
16078  * ql_init_mutex
16079  *	Initializes mutex's
16080  *
16081  * Input:
16082  *	ha = adapter state pointer.
16083  *
16084  * Returns:
16085  *	DDI_SUCCESS or DDI_FAILURE.
16086  *
16087  * Context:
16088  *	Kernel context.
16089  */
16090 static int
16091 ql_init_mutex(ql_adapter_state_t *ha)
16092 {
16093 	int	ret;
16094 	void	*intr;
16095 
16096 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16097 
16098 	if (ha->iflags & IFLG_INTR_AIF) {
16099 		intr = (void *)(uintptr_t)ha->intr_pri;
16100 	} else {
16101 		/* Get iblock cookies to initialize mutexes */
16102 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16103 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16104 			EL(ha, "failed, get_iblock: %xh\n", ret);
16105 			return (DDI_FAILURE);
16106 		}
16107 		intr = (void *)ha->iblock_cookie;
16108 	}
16109 
16110 	/* mutexes to protect the adapter state structure. */
16111 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16112 
16113 	/* mutex to protect the ISP response ring. */
16114 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16115 
16116 	/* mutex to protect the mailbox registers. */
16117 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16118 
16119 	/* power management protection */
16120 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16121 
16122 	/* Mailbox wait and interrupt conditional variable. */
16123 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16124 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16125 
16126 	/* mutex to protect the ISP request ring. */
16127 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16128 
16129 	/* Unsolicited buffer conditional variable. */
16130 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16131 
16132 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16133 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16134 
16135 	/* Suspended conditional variable. */
16136 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16137 
16138 	/* mutex to protect task daemon context. */
16139 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16140 
16141 	/* Task_daemon thread conditional variable. */
16142 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16143 
16144 	/* mutex to protect diag port manage interface */
16145 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16146 
16147 	/* mutex to protect per instance f/w dump flags and buffer */
16148 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16149 
16150 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16151 
16152 	return (DDI_SUCCESS);
16153 }
16154 
16155 /*
16156  * ql_destroy_mutex
16157  *	Destroys mutex's
16158  *
16159  * Input:
16160  *	ha = adapter state pointer.
16161  *
16162  * Returns:
16163  *
16164  * Context:
16165  *	Kernel context.
16166  */
16167 static void
16168 ql_destroy_mutex(ql_adapter_state_t *ha)
16169 {
16170 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16171 
16172 	mutex_destroy(&ha->dump_mutex);
16173 	mutex_destroy(&ha->portmutex);
16174 	cv_destroy(&ha->cv_task_daemon);
16175 	mutex_destroy(&ha->task_daemon_mutex);
16176 	cv_destroy(&ha->cv_dr_suspended);
16177 	mutex_destroy(&ha->cache_mutex);
16178 	mutex_destroy(&ha->ub_mutex);
16179 	cv_destroy(&ha->cv_ub);
16180 	mutex_destroy(&ha->req_ring_mutex);
16181 	cv_destroy(&ha->cv_mbx_intr);
16182 	cv_destroy(&ha->cv_mbx_wait);
16183 	mutex_destroy(&ha->pm_mutex);
16184 	mutex_destroy(&ha->mbx_mutex);
16185 	mutex_destroy(&ha->intr_mutex);
16186 	mutex_destroy(&ha->mutex);
16187 
16188 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16189 }
16190 
16191 /*
16192  * ql_fwmodule_resolve
16193  *	Loads and resolves external firmware module and symbols
16194  *
16195  * Input:
16196  *	ha:		adapter state pointer.
16197  *
16198  * Returns:
16199  *	ql local function return status code:
16200  *		QL_SUCCESS - external f/w module module and symbols resolved
16201  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16202  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16203  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16204  * Context:
16205  *	Kernel context.
16206  *
16207  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
16208  * could switch to a tighter scope around acutal download (and add an extra
16209  * ddi_modopen for module opens that occur before root is mounted).
16210  *
16211  */
16212 uint32_t
16213 ql_fwmodule_resolve(ql_adapter_state_t *ha)
16214 {
16215 	int8_t			module[128];
16216 	int8_t			fw_version[128];
16217 	uint32_t		rval = QL_SUCCESS;
16218 	caddr_t			code, code02;
16219 	uint8_t			*p_ucfw;
16220 	uint16_t		*p_usaddr, *p_uslen;
16221 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
16222 	uint32_t		*p_uiaddr02, *p_uilen02;
16223 	struct fw_table		*fwt;
16224 	extern struct fw_table	fw_table[];
16225 
16226 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16227 
16228 	if (ha->fw_module != NULL) {
16229 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
16230 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
16231 		    ha->fw_subminor_version);
16232 		return (rval);
16233 	}
16234 
16235 	/* make sure the fw_class is in the fw_table of supported classes */
16236 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
16237 		if (fwt->fw_class == ha->fw_class)
16238 			break;			/* match */
16239 	}
16240 	if (fwt->fw_version == NULL) {
16241 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
16242 		    "in driver's fw_table", QL_NAME, ha->instance,
16243 		    ha->fw_class);
16244 		return (QL_FW_NOT_SUPPORTED);
16245 	}
16246 
16247 	/*
16248 	 * open the module related to the fw_class
16249 	 */
16250 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
16251 	    ha->fw_class);
16252 
16253 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
16254 	if (ha->fw_module == NULL) {
16255 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
16256 		    QL_NAME, ha->instance, module);
16257 		return (QL_FWMODLOAD_FAILED);
16258 	}
16259 
16260 	/*
16261 	 * resolve the fw module symbols, data types depend on fw_class
16262 	 */
16263 
16264 	switch (ha->fw_class) {
16265 	case 0x2200:
16266 	case 0x2300:
16267 	case 0x6322:
16268 
16269 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16270 		    NULL)) == NULL) {
16271 			rval = QL_FWSYM_NOT_FOUND;
16272 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16273 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
16274 		    "risc_code_addr01", NULL)) == NULL) {
16275 			rval = QL_FWSYM_NOT_FOUND;
16276 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16277 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
16278 		    "risc_code_length01", NULL)) == NULL) {
16279 			rval = QL_FWSYM_NOT_FOUND;
16280 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16281 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
16282 		    "firmware_version", NULL)) == NULL) {
16283 			rval = QL_FWSYM_NOT_FOUND;
16284 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16285 		}
16286 
16287 		if (rval == QL_SUCCESS) {
16288 			ha->risc_fw[0].code = code;
16289 			ha->risc_fw[0].addr = *p_usaddr;
16290 			ha->risc_fw[0].length = *p_uslen;
16291 
16292 			(void) snprintf(fw_version, sizeof (fw_version),
16293 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
16294 		}
16295 		break;
16296 
16297 	case 0x2400:
16298 	case 0x2500:
16299 	case 0x8100:
16300 
16301 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16302 		    NULL)) == NULL) {
16303 			rval = QL_FWSYM_NOT_FOUND;
16304 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16305 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
16306 		    "risc_code_addr01", NULL)) == NULL) {
16307 			rval = QL_FWSYM_NOT_FOUND;
16308 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16309 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
16310 		    "risc_code_length01", NULL)) == NULL) {
16311 			rval = QL_FWSYM_NOT_FOUND;
16312 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16313 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
16314 		    "firmware_version", NULL)) == NULL) {
16315 			rval = QL_FWSYM_NOT_FOUND;
16316 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16317 		}
16318 
16319 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
16320 		    NULL)) == NULL) {
16321 			rval = QL_FWSYM_NOT_FOUND;
16322 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
16323 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
16324 		    "risc_code_addr02", NULL)) == NULL) {
16325 			rval = QL_FWSYM_NOT_FOUND;
16326 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
16327 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
16328 		    "risc_code_length02", NULL)) == NULL) {
16329 			rval = QL_FWSYM_NOT_FOUND;
16330 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
16331 		}
16332 
16333 		if (rval == QL_SUCCESS) {
16334 			ha->risc_fw[0].code = code;
16335 			ha->risc_fw[0].addr = *p_uiaddr;
16336 			ha->risc_fw[0].length = *p_uilen;
16337 			ha->risc_fw[1].code = code02;
16338 			ha->risc_fw[1].addr = *p_uiaddr02;
16339 			ha->risc_fw[1].length = *p_uilen02;
16340 
16341 			(void) snprintf(fw_version, sizeof (fw_version),
16342 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
16343 		}
16344 		break;
16345 
16346 	default:
16347 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
16348 		rval = QL_FW_NOT_SUPPORTED;
16349 	}
16350 
16351 	if (rval != QL_SUCCESS) {
16352 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
16353 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
16354 		if (ha->fw_module != NULL) {
16355 			(void) ddi_modclose(ha->fw_module);
16356 			ha->fw_module = NULL;
16357 		}
16358 	} else {
16359 		/*
16360 		 * check for firmware version mismatch between module and
16361 		 * compiled in fw_table version.
16362 		 */
16363 
16364 		if (strcmp(fwt->fw_version, fw_version) != 0) {
16365 
16366 			/*
16367 			 * If f/w / driver version mismatches then
16368 			 * return a successful status -- however warn
16369 			 * the user that this is NOT recommended.
16370 			 */
16371 
16372 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
16373 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
16374 			    ha->instance, ha->fw_class, fwt->fw_version,
16375 			    fw_version);
16376 
16377 			ha->cfg_flags |= CFG_FW_MISMATCH;
16378 		} else {
16379 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
16380 		}
16381 	}
16382 
16383 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16384 
16385 	return (rval);
16386 }
16387 
16388 /*
16389  * ql_port_state
16390  *	Set the state on all adapter ports.
16391  *
16392  * Input:
16393  *	ha:	parent adapter state pointer.
16394  *	state:	port state.
16395  *	flags:	task daemon flags to set.
16396  *
16397  * Context:
16398  *	Interrupt or Kernel context, no mailbox commands allowed.
16399  */
16400 void
16401 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
16402 {
16403 	ql_adapter_state_t	*vha;
16404 
16405 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16406 
16407 	TASK_DAEMON_LOCK(ha);
16408 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
16409 		if (FC_PORT_STATE_MASK(vha->state) != state) {
16410 			vha->state = state != FC_STATE_OFFLINE ?
16411 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
16412 			vha->task_daemon_flags |= flags;
16413 		}
16414 	}
16415 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
16416 	TASK_DAEMON_UNLOCK(ha);
16417 
16418 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16419 }
16420 
16421 /*
16422  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
16423  *
16424  * Input:	Pointer to the adapter state structure.
16425  * Returns:	Success or Failure.
16426  * Context:	Kernel context.
16427  */
16428 int
16429 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
16430 {
16431 	int	rval = DDI_SUCCESS;
16432 
16433 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16434 
16435 	ha->el_trace_desc =
16436 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
16437 
16438 	if (ha->el_trace_desc == NULL) {
16439 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
16440 		    QL_NAME, ha->instance);
16441 		rval = DDI_FAILURE;
16442 	} else {
16443 		ha->el_trace_desc->next		= 0;
16444 		ha->el_trace_desc->trace_buffer =
16445 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
16446 
16447 		if (ha->el_trace_desc->trace_buffer == NULL) {
16448 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
16449 			    QL_NAME, ha->instance);
16450 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16451 			rval = DDI_FAILURE;
16452 		} else {
16453 			ha->el_trace_desc->trace_buffer_size =
16454 			    EL_TRACE_BUF_SIZE;
16455 			mutex_init(&ha->el_trace_desc->mutex, NULL,
16456 			    MUTEX_DRIVER, NULL);
16457 		}
16458 	}
16459 
16460 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16461 
16462 	return (rval);
16463 }
16464 
16465 /*
16466  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
16467  *
16468  * Input:	Pointer to the adapter state structure.
16469  * Returns:	Success or Failure.
16470  * Context:	Kernel context.
16471  */
16472 int
16473 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
16474 {
16475 	int	rval = DDI_SUCCESS;
16476 
16477 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16478 
16479 	if (ha->el_trace_desc == NULL) {
16480 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
16481 		    QL_NAME, ha->instance);
16482 		rval = DDI_FAILURE;
16483 	} else {
16484 		if (ha->el_trace_desc->trace_buffer != NULL) {
16485 			kmem_free(ha->el_trace_desc->trace_buffer,
16486 			    ha->el_trace_desc->trace_buffer_size);
16487 		}
16488 		mutex_destroy(&ha->el_trace_desc->mutex);
16489 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16490 	}
16491 
16492 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16493 
16494 	return (rval);
16495 }
16496 
16497 /*
16498  * els_cmd_text	- Return a pointer to a string describing the command
16499  *
16500  * Input:	els_cmd = the els command opcode.
16501  * Returns:	pointer to a string.
16502  * Context:	Kernel context.
16503  */
16504 char *
16505 els_cmd_text(int els_cmd)
16506 {
16507 	cmd_table_t *entry = &els_cmd_tbl[0];
16508 
16509 	return (cmd_text(entry, els_cmd));
16510 }
16511 
16512 /*
16513  * mbx_cmd_text - Return a pointer to a string describing the command
16514  *
16515  * Input:	mbx_cmd = the mailbox command opcode.
16516  * Returns:	pointer to a string.
16517  * Context:	Kernel context.
16518  */
16519 char *
16520 mbx_cmd_text(int mbx_cmd)
16521 {
16522 	cmd_table_t *entry = &mbox_cmd_tbl[0];
16523 
16524 	return (cmd_text(entry, mbx_cmd));
16525 }
16526 
16527 /*
16528  * cmd_text	Return a pointer to a string describing the command
16529  *
16530  * Input:	entry = the command table
16531  *		cmd = the command.
16532  * Returns:	pointer to a string.
16533  * Context:	Kernel context.
16534  */
16535 char *
16536 cmd_text(cmd_table_t *entry, int cmd)
16537 {
16538 	for (; entry->cmd != 0; entry++) {
16539 		if (entry->cmd == cmd) {
16540 			break;
16541 		}
16542 	}
16543 	return (entry->string);
16544 }
16545 
16546 /*
16547  * ql_els_24xx_mbox_cmd_iocb - els request indication.
16548  *
16549  * Input:	ha = adapter state pointer.
16550  *		srb = scsi request block pointer.
16551  *		arg = els passthru entry iocb pointer.
16552  * Returns:
16553  * Context:	Kernel context.
16554  */
16555 void
16556 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
16557 {
16558 	els_descriptor_t	els_desc;
16559 
16560 	/* Extract the ELS information */
16561 	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
16562 
16563 	/* Construct the passthru entry */
16564 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
16565 
16566 	/* Ensure correct endianness */
16567 	ql_isp_els_handle_cmd_endian(ha, srb);
16568 }
16569 
16570 /*
16571  * ql_isp_els_request_map - Extract into an els descriptor the info required
16572  *			    to build an els_passthru iocb from an fc packet.
16573  *
16574  * Input:	ha = adapter state pointer.
16575  *		pkt = fc packet pointer
16576  *		els_desc = els descriptor pointer
16577  * Returns:
16578  * Context:	Kernel context.
16579  */
16580 static void
16581 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
16582     els_descriptor_t *els_desc)
16583 {
16584 	ls_code_t	els;
16585 
16586 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16587 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16588 
16589 	els_desc->els = els.ls_code;
16590 
16591 	els_desc->els_handle = ha->hba_buf.acc_handle;
16592 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
16593 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
16594 	/* if n_port_handle is not < 0x7d use 0 */
16595 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
16596 		els_desc->n_port_handle = ha->n_port->n_port_handle;
16597 	} else {
16598 		els_desc->n_port_handle = 0;
16599 	}
16600 	els_desc->control_flags = 0;
16601 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
16602 	/*
16603 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
16604 	 * (without the frame header) in system memory.
16605 	 */
16606 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
16607 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
16608 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
16609 
16610 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
16611 	/*
16612 	 * Receive DSD. This field defines the ELS response payload buffer
16613 	 * for the ISP24xx firmware transferring the received ELS
16614 	 * response frame to a location in host memory.
16615 	 */
16616 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
16617 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
16618 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
16619 }
16620 
16621 /*
16622  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
16623  * using the els descriptor.
16624  *
16625  * Input:	ha = adapter state pointer.
16626  *		els_desc = els descriptor pointer.
16627  *		els_entry = els passthru entry iocb pointer.
16628  * Returns:
16629  * Context:	Kernel context.
16630  */
16631 static void
16632 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
16633     els_passthru_entry_t *els_entry)
16634 {
16635 	uint32_t	*ptr32;
16636 
16637 	/*
16638 	 * Construct command packet.
16639 	 */
16640 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
16641 	    (uint8_t)ELS_PASSTHRU_TYPE);
16642 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
16643 	    els_desc->n_port_handle);
16644 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
16645 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
16646 	    (uint32_t)0);
16647 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
16648 	    els_desc->els);
16649 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
16650 	    els_desc->d_id.b.al_pa);
16651 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
16652 	    els_desc->d_id.b.area);
16653 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
16654 	    els_desc->d_id.b.domain);
16655 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
16656 	    els_desc->s_id.b.al_pa);
16657 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
16658 	    els_desc->s_id.b.area);
16659 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
16660 	    els_desc->s_id.b.domain);
16661 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
16662 	    els_desc->control_flags);
16663 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
16664 	    els_desc->rsp_byte_count);
16665 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
16666 	    els_desc->cmd_byte_count);
16667 	/* Load transmit data segments and count. */
16668 	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
16669 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
16670 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
16671 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
16672 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
16673 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
16674 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
16675 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
16676 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
16677 }
16678 
16679 /*
16680  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
16681  *				  in host memory.
16682  *
16683  * Input:	ha = adapter state pointer.
16684  *		srb = scsi request block
16685  * Returns:
16686  * Context:	Kernel context.
16687  */
16688 void
16689 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
16690 {
16691 	ls_code_t	els;
16692 	fc_packet_t	*pkt;
16693 	uint8_t		*ptr;
16694 
16695 	pkt = srb->pkt;
16696 
16697 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16698 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16699 
16700 	ptr = (uint8_t *)pkt->pkt_cmd;
16701 
16702 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
16703 }
16704 
16705 /*
16706  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
16707  *				  in host memory.
16708  * Input:	ha = adapter state pointer.
16709  *		srb = scsi request block
16710  * Returns:
16711  * Context:	Kernel context.
16712  */
16713 void
16714 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
16715 {
16716 	ls_code_t	els;
16717 	fc_packet_t	*pkt;
16718 	uint8_t		*ptr;
16719 
16720 	pkt = srb->pkt;
16721 
16722 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16723 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16724 
16725 	ptr = (uint8_t *)pkt->pkt_resp;
16726 	BIG_ENDIAN_32(&els);
16727 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
16728 }
16729 
16730 /*
16731  * ql_isp_els_handle_endian - els requests/responses must be in big endian
16732  *			      in host memory.
16733  * Input:	ha = adapter state pointer.
16734  *		ptr = els request/response buffer pointer.
16735  *		ls_code = els command code.
16736  * Returns:
16737  * Context:	Kernel context.
16738  */
16739 void
16740 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
16741 {
16742 	switch (ls_code) {
16743 	case LA_ELS_PLOGI: {
16744 		BIG_ENDIAN_32(ptr);	/* Command Code */
16745 		ptr += 4;
16746 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
16747 		ptr += 2;
16748 		BIG_ENDIAN_16(ptr);	/* b2b credit */
16749 		ptr += 2;
16750 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
16751 		ptr += 2;
16752 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
16753 		ptr += 2;
16754 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
16755 		ptr += 2;
16756 		BIG_ENDIAN_16(ptr);	/* Rel offset */
16757 		ptr += 2;
16758 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
16759 		ptr += 4;		/* Port Name */
16760 		ptr += 8;		/* Node Name */
16761 		ptr += 8;		/* Class 1 */
16762 		ptr += 16;		/* Class 2 */
16763 		ptr += 16;		/* Class 3 */
16764 		BIG_ENDIAN_16(ptr);	/* Service options */
16765 		ptr += 2;
16766 		BIG_ENDIAN_16(ptr);	/* Initiator control */
16767 		ptr += 2;
16768 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
16769 		ptr += 2;
16770 		BIG_ENDIAN_16(ptr);	/* Rcv size */
16771 		ptr += 2;
16772 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
16773 		ptr += 2;
16774 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
16775 		ptr += 2;
16776 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
16777 		break;
16778 	}
16779 	case LA_ELS_PRLI: {
16780 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
16781 		ptr += 4;		/* Type */
16782 		ptr += 2;
16783 		BIG_ENDIAN_16(ptr);	/* Flags */
16784 		ptr += 2;
16785 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
16786 		ptr += 4;
16787 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
16788 		ptr += 4;
16789 		BIG_ENDIAN_32(ptr);	/* Flags */
16790 		break;
16791 	}
16792 	default:
16793 		EL(ha, "can't handle els code %x\n", ls_code);
16794 		break;
16795 	}
16796 }
16797 
16798 /*
16799  * ql_n_port_plogi
16800  *	In N port 2 N port topology where an N Port has logged in with the
16801  *	firmware because it has the N_Port login initiative, we send up
16802  *	a plogi by proxy which stimulates the login procedure to continue.
16803  *
16804  * Input:
16805  *	ha = adapter state pointer.
16806  * Returns:
16807  *
16808  * Context:
16809  *	Kernel context.
16810  */
16811 static int
16812 ql_n_port_plogi(ql_adapter_state_t *ha)
16813 {
16814 	int		rval;
16815 	ql_tgt_t	*tq;
16816 	ql_head_t done_q = { NULL, NULL };
16817 
16818 	rval = QL_SUCCESS;
16819 
16820 	if (ha->topology & QL_N_PORT) {
16821 		/* if we're doing this the n_port_handle must be good */
16822 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
16823 			tq = ql_loop_id_to_queue(ha,
16824 			    ha->n_port->n_port_handle);
16825 			if (tq != NULL) {
16826 				(void) ql_send_plogi(ha, tq, &done_q);
16827 			} else {
16828 				EL(ha, "n_port_handle = %x, tq = %x\n",
16829 				    ha->n_port->n_port_handle, tq);
16830 			}
16831 		} else {
16832 			EL(ha, "n_port_handle = %x, tq = %x\n",
16833 			    ha->n_port->n_port_handle, tq);
16834 		}
16835 		if (done_q.first != NULL) {
16836 			ql_done(done_q.first);
16837 		}
16838 	}
16839 	return (rval);
16840 }
16841 
16842 /*
16843  * Compare two WWNs. The NAA is omitted for comparison.
16844  *
16845  * Note particularly that the indentation used in this
16846  * function  isn't according to Sun recommendations. It
16847  * is indented to make reading a bit easy.
16848  *
16849  * Return Values:
16850  *   if first == second return  0
16851  *   if first > second  return  1
16852  *   if first < second  return -1
16853  */
16854 int
16855 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
16856 {
16857 	la_wwn_t t1, t2;
16858 	int rval;
16859 
16860 	EL(ha, "WWPN=%08x%08x\n",
16861 	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
16862 	EL(ha, "WWPN=%08x%08x\n",
16863 	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
16864 	/*
16865 	 * Fibre Channel protocol is big endian, so compare
16866 	 * as big endian values
16867 	 */
16868 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
16869 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
16870 
16871 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
16872 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
16873 
16874 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
16875 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
16876 			rval = 0;
16877 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
16878 			rval = 1;
16879 		} else {
16880 			rval = -1;
16881 		}
16882 	} else {
16883 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
16884 			rval = 1;
16885 		} else {
16886 			rval = -1;
16887 		}
16888 	}
16889 	return (rval);
16890 }
16891