1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2015 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
27  * Copyright (c) 2016 by Delphix. All rights reserved.
28  */
29 
30 /*
31  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32  *
33  * ***********************************************************************
34  * *									**
35  * *				NOTICE					**
36  * *		COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION		**
37  * *			ALL RIGHTS RESERVED				**
38  * *									**
39  * ***********************************************************************
40  *
41  */
42 
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_init.h>
47 #include <ql_iocb.h>
48 #include <ql_ioctl.h>
49 #include <ql_isr.h>
50 #include <ql_mbx.h>
51 #include <ql_nx.h>
52 #include <ql_xioctl.h>
53 #include <ql_fm.h>
54 
55 /*
56  * Solaris external defines.
57  */
58 extern pri_t minclsyspri;
59 extern pri_t maxclsyspri;
60 
61 /*
62  * dev_ops functions prototypes
63  */
64 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
65 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
66 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
67 static int ql_power(dev_info_t *, int, int);
68 static int ql_quiesce(dev_info_t *);
69 
70 /*
71  * FCA functions prototypes exported by means of the transport table
72  */
73 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
74     fc_fca_bind_info_t *);
75 static void ql_unbind_port(opaque_t);
76 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
77 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
78 static int ql_els_send(opaque_t, fc_packet_t *);
79 static int ql_get_cap(opaque_t, char *, void *);
80 static int ql_set_cap(opaque_t, char *, void *);
81 static int ql_getmap(opaque_t, fc_lilpmap_t *);
82 static int ql_transport(opaque_t, fc_packet_t *);
83 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
84 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
85 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
86 static int ql_abort(opaque_t, fc_packet_t *, int);
87 static int ql_reset(opaque_t, uint32_t);
88 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
89 static opaque_t ql_get_device(opaque_t, fc_portid_t);
90 
91 /*
92  * FCA Driver Support Function Prototypes.
93  */
94 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
95 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
96 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
97 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
98 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
99 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
100 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
101 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
102 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
103 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
104 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
105 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
106 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
107 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
108 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
112 static int ql_login_port(ql_adapter_state_t *, port_id_t);
113 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
114 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
115 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint64_t);
116 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
117 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
118     ql_srb_t *);
119 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
120 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
122     ql_srb_t *);
123 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
124 static void ql_task_daemon(void *);
125 static void ql_task_thread(ql_adapter_state_t *);
126 static void ql_idle_check(ql_adapter_state_t *);
127 static void ql_unsol_callback(ql_srb_t *);
128 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
129 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
130 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
131 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
132 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
133 static int ql_handle_rscn_update(ql_adapter_state_t *);
134 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
135     fc_unsol_buf_t *);
136 static void ql_timer(void *);
137 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
138 static void ql_watchdog(ql_adapter_state_t *);
139 static void ql_wdg_tq_list(ql_adapter_state_t *, ql_tgt_t *);
140 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *);
141 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
142 static void ql_iidma(ql_adapter_state_t *);
143 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
144 static void ql_loop_resync(ql_adapter_state_t *);
145 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
146 static int ql_kstat_update(kstat_t *, int);
147 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
148 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
149 static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
150 static size_t ql_81xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
151 static size_t ql_8021_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
152 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
154 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
155 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
156 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
157 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
158     void *);
159 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
160     uint8_t);
161 static int ql_save_config_regs(dev_info_t *);
162 static int ql_restore_config_regs(dev_info_t *);
163 static void ql_halt(ql_adapter_state_t *, int);
164 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
165 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
166 static int ql_suspend_adapter(ql_adapter_state_t *);
167 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
168 static int ql_setup_interrupts(ql_adapter_state_t *);
169 static int ql_setup_msi(ql_adapter_state_t *);
170 static int ql_setup_msix(ql_adapter_state_t *);
171 static int ql_setup_fixed(ql_adapter_state_t *);
172 static void ql_release_intr(ql_adapter_state_t *);
173 static int ql_legacy_intr(ql_adapter_state_t *);
174 static int ql_init_mutex(ql_adapter_state_t *);
175 static void ql_destroy_mutex(ql_adapter_state_t *);
176 static void ql_fca_isp_els_request(ql_adapter_state_t *, ql_request_q_t *,
177     fc_packet_t *, els_descriptor_t *);
178 static void ql_isp_els_request_ctor(els_descriptor_t *,
179     els_passthru_entry_t *);
180 static int ql_n_port_plogi(ql_adapter_state_t *);
181 static int ql_create_queues(ql_adapter_state_t *);
182 static int ql_create_rsp_queue(ql_adapter_state_t *, uint16_t);
183 static void ql_delete_queues(ql_adapter_state_t *);
184 static int ql_multi_queue_support(ql_adapter_state_t *);
185 static int ql_map_mem_bar(ql_adapter_state_t *, ddi_acc_handle_t *, caddr_t *,
186     uint32_t, uint32_t);
187 static void ql_completion_thread(void *);
188 static void ql_process_comp_queue(void *);
189 static int ql_abort_io(ql_adapter_state_t *vha, ql_srb_t *);
190 static void ql_idc(ql_adapter_state_t *);
191 static int ql_83xx_binary_fw_dump(ql_adapter_state_t *, ql_83xx_fw_dump_t *);
192 static size_t ql_83xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
193 static caddr_t ql_str_ptr(ql_adapter_state_t *, caddr_t, uint32_t *);
194 static int ql_27xx_binary_fw_dump(ql_adapter_state_t *);
195 static size_t ql_27xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
196 static uint32_t ql_2700_dmp_parse_template(ql_adapter_state_t *, ql_dt_hdr_t *,
197     uint8_t *, uint32_t);
198 static int ql_2700_dt_riob1(ql_adapter_state_t *, ql_dt_riob1_t *, uint8_t *,
199     uint8_t *);
200 static void ql_2700_dt_wiob1(ql_adapter_state_t *, ql_dt_wiob1_t *, uint8_t *,
201     uint8_t *);
202 static int ql_2700_dt_riob2(ql_adapter_state_t *, ql_dt_riob2_t *, uint8_t *,
203     uint8_t *);
204 static void ql_2700_dt_wiob2(ql_adapter_state_t *, ql_dt_wiob2_t *, uint8_t *,
205     uint8_t *);
206 static int ql_2700_dt_rpci(ql_adapter_state_t *, ql_dt_rpci_t *, uint8_t *,
207     uint8_t *);
208 static void ql_2700_dt_wpci(ql_adapter_state_t *, ql_dt_wpci_t *, uint8_t *,
209     uint8_t *);
210 static int ql_2700_dt_rram(ql_adapter_state_t *, ql_dt_rram_t *, uint8_t *,
211     uint8_t *);
212 static int ql_2700_dt_gque(ql_adapter_state_t *, ql_dt_gque_t *, uint8_t *,
213     uint8_t *);
214 static int ql_2700_dt_gfce(ql_adapter_state_t *, ql_dt_gfce_t *, uint8_t *,
215     uint8_t *);
216 static void ql_2700_dt_prisc(ql_adapter_state_t *, ql_dt_prisc_t *, uint8_t *,
217     uint8_t *);
218 static void ql_2700_dt_rrisc(ql_adapter_state_t *, ql_dt_rrisc_t *, uint8_t *,
219     uint8_t *);
220 static void ql_2700_dt_dint(ql_adapter_state_t *, ql_dt_dint_t *, uint8_t *,
221     uint8_t *);
222 static int ql_2700_dt_ghbd(ql_adapter_state_t *, ql_dt_ghbd_t *, uint8_t *,
223     uint8_t *);
224 static int ql_2700_dt_scra(ql_adapter_state_t *, ql_dt_scra_t *, uint8_t *,
225     uint8_t *);
226 static int ql_2700_dt_rrreg(ql_adapter_state_t *, ql_dt_rrreg_t *, uint8_t *,
227     uint8_t *);
228 static void ql_2700_dt_wrreg(ql_adapter_state_t *, ql_dt_wrreg_t *, uint8_t *,
229     uint8_t *);
230 static int ql_2700_dt_rrram(ql_adapter_state_t *, ql_dt_rrram_t *, uint8_t *,
231     uint8_t *);
232 static int ql_2700_dt_rpcic(ql_adapter_state_t *, ql_dt_rpcic_t *, uint8_t *,
233     uint8_t *);
234 static int ql_2700_dt_gques(ql_adapter_state_t *, ql_dt_gques_t *, uint8_t *,
235     uint8_t *);
236 static int ql_2700_dt_wdmp(ql_adapter_state_t *, ql_dt_wdmp_t *, uint8_t *,
237     uint8_t *);
238 static int ql_2700_dump_ram(ql_adapter_state_t *, uint16_t, uint32_t, uint32_t,
239     uint8_t *);
240 
241 /*
242  * Global data
243  */
244 static uint8_t	ql_enable_pm = 1;
245 static int	ql_flash_sbus_fpga = 0;
246 uint32_t	ql_os_release_level;
247 uint32_t	ql_disable_aif = 0;
248 uint32_t	ql_disable_intx = 0;
249 uint32_t	ql_disable_msi = 0;
250 uint32_t	ql_disable_msix = 0;
251 uint32_t	ql_enable_ets = 0;
252 uint16_t	ql_osc_wait_count = 1000;
253 uint32_t	ql_task_cb_dly = 64;
254 uint32_t	qlc_disable_load = 0;
255 
256 /* Timer routine variables. */
257 static timeout_id_t	ql_timer_timeout_id = NULL;
258 static clock_t		ql_timer_ticks;
259 
260 /* Soft state head pointer. */
261 void *ql_state = NULL;
262 
263 /* Head adapter link. */
264 ql_head_t ql_hba = {
265 	NULL,
266 	NULL
267 };
268 
269 /* Global hba index */
270 uint32_t ql_gfru_hba_index = 1;
271 
272 /*
273  * Some IP defines and globals
274  */
275 uint32_t	ql_ip_buffer_count = 128;
276 uint32_t	ql_ip_low_water = 10;
277 uint8_t		ql_ip_fast_post_count = 5;
278 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
279 
280 /* Device AL_PA to Device Head Queue index array. */
281 uint8_t ql_alpa_to_index[] = {
282 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
283 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
284 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
285 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
286 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
287 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
288 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
289 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
290 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
291 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
292 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
293 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
294 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
295 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
296 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
297 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
298 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
299 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
300 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
301 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
302 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
303 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
304 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
305 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
306 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
307 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
308 };
309 
310 /* Device loop_id to ALPA array. */
311 static uint8_t ql_index_to_alpa[] = {
312 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
313 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
314 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
315 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
316 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
317 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
318 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
319 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
320 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
321 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
322 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
323 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
324 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
325 };
326 
327 /* 2200 register offsets */
328 static reg_off_t reg_off_2200 = {
329 	0x00,	/* flash_address */
330 	0x02,	/* flash_data */
331 	0x06,	/* ctrl_status */
332 	0x08,	/* ictrl */
333 	0x0a,	/* istatus */
334 	0x0c,	/* semaphore */
335 	0x0e,	/* nvram */
336 	0x18,	/* req_in */
337 	0x18,	/* req_out */
338 	0x1a,	/* resp_in */
339 	0x1a,	/* resp_out */
340 	0xff,	/* risc2host - n/a */
341 	24,	/* Number of mailboxes */
342 
343 	/* Mailbox in register offsets 0 - 23 */
344 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
345 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
346 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
347 	/* 2200 does not have mailbox 24-31 - n/a */
348 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
349 
350 	/* Mailbox out register offsets 0 - 23 */
351 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
352 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
353 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
354 	/* 2200 does not have mailbox 24-31 - n/a */
355 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
356 
357 	0x96,	/* fpm_diag_config */
358 	0xa4,	/* pcr */
359 	0xb0,	/* mctr */
360 	0xb8,	/* fb_cmd */
361 	0xc0,	/* hccr */
362 	0xcc,	/* gpiod */
363 	0xce,	/* gpioe */
364 	0xff,	/* host_to_host_sema - n/a */
365 	0xff,	/* pri_req_in - n/a */
366 	0xff,	/* pri_req_out - n/a */
367 	0xff,	/* atio_req_in - n/a */
368 	0xff,	/* atio_req_out - n/a */
369 	0xff,	/* io_base_addr - n/a */
370 	0xff,	/* nx_host_int - n/a */
371 	0xff	/* nx_risc_int - n/a */
372 };
373 
374 /* 2300 register offsets */
375 static reg_off_t reg_off_2300 = {
376 	0x00,	/* flash_address */
377 	0x02,	/* flash_data */
378 	0x06,	/* ctrl_status */
379 	0x08,	/* ictrl */
380 	0x0a,	/* istatus */
381 	0x0c,	/* semaphore */
382 	0x0e,	/* nvram */
383 	0x10,	/* req_in */
384 	0x12,	/* req_out */
385 	0x14,	/* resp_in */
386 	0x16,	/* resp_out */
387 	0x18,	/* risc2host */
388 	32,	/* Number of mailboxes */
389 
390 	/* Mailbox in register offsets 0 - 31 */
391 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
392 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
393 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
394 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
395 
396 	/* Mailbox out register offsets 0 - 31 */
397 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
398 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
399 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
400 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
401 
402 	0x96,	/* fpm_diag_config */
403 	0xa4,	/* pcr */
404 	0xb0,	/* mctr */
405 	0x80,	/* fb_cmd */
406 	0xc0,	/* hccr */
407 	0xcc,	/* gpiod */
408 	0xce,	/* gpioe */
409 	0x1c,	/* host_to_host_sema */
410 	0xff,	/* pri_req_in - n/a */
411 	0xff,	/* pri_req_out - n/a */
412 	0xff,	/* atio_req_in - n/a */
413 	0xff,	/* atio_req_out - n/a */
414 	0xff,	/* io_base_addr - n/a */
415 	0xff,	/* nx_host_int - n/a */
416 	0xff	/* nx_risc_int - n/a */
417 };
418 
419 /* 2400/2500 register offsets */
420 reg_off_t reg_off_2400_2500 = {
421 	0x00,	/* flash_address */
422 	0x04,	/* flash_data */
423 	0x08,	/* ctrl_status */
424 	0x0c,	/* ictrl */
425 	0x10,	/* istatus */
426 	0xff,	/* semaphore - n/a */
427 	0xff,	/* nvram - n/a */
428 	0x1c,	/* req_in */
429 	0x20,	/* req_out */
430 	0x24,	/* resp_in */
431 	0x28,	/* resp_out */
432 	0x44,	/* risc2host */
433 	32,	/* Number of mailboxes */
434 
435 	/* Mailbox in register offsets 0 - 31 */
436 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
437 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
438 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
439 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
440 
441 	/* Mailbox out register offsets 0 - 31 */
442 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
443 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
444 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
445 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
446 
447 	0xff,	/* fpm_diag_config  - n/a */
448 	0xff,	/* pcr - n/a */
449 	0xff,	/* mctr - n/a */
450 	0xff,	/* fb_cmd - n/a */
451 	0x48,	/* hccr */
452 	0x4c,	/* gpiod */
453 	0x50,	/* gpioe */
454 	0xff,	/* host_to_host_sema - n/a */
455 	0x2c,	/* pri_req_in */
456 	0x30,	/* pri_req_out */
457 	0x3c,	/* atio_req_in */
458 	0x40,	/* atio_req_out */
459 	0x54,	/* io_base_addr */
460 	0xff,	/* nx_host_int - n/a */
461 	0xff	/* nx_risc_int - n/a */
462 };
463 
464 /* P3 register offsets */
465 static reg_off_t reg_off_8021 = {
466 	0x00,	/* flash_address */
467 	0x04,	/* flash_data */
468 	0x08,	/* ctrl_status */
469 	0x0c,	/* ictrl */
470 	0x10,	/* istatus */
471 	0xff,	/* semaphore - n/a */
472 	0xff,	/* nvram - n/a */
473 	0xff,	/* req_in - n/a */
474 	0x0,	/* req_out */
475 	0x100,	/* resp_in */
476 	0x200,	/* resp_out */
477 	0x500,	/* risc2host */
478 	32,	/* Number of mailboxes */
479 
480 	/* Mailbox in register offsets 0 - 31 */
481 	0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
482 	0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
483 	0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
484 	0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
485 
486 	/* Mailbox out register offsets 0 - 31 */
487 	0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
488 	0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
489 	0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
490 	0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
491 
492 	0xff,	/* fpm_diag_config  - n/a */
493 	0xff,	/* pcr - n/a */
494 	0xff,	/* mctr - n/a */
495 	0xff,	/* fb_cmd - n/a */
496 	0x48,	/* hccr */
497 	0x4c,	/* gpiod */
498 	0x50,	/* gpioe */
499 	0xff,	/* host_to_host_sema - n/a */
500 	0x2c,	/* pri_req_in */
501 	0x30,	/* pri_req_out */
502 	0x3c,	/* atio_req_in */
503 	0x40,	/* atio_req_out */
504 	0x54,	/* io_base_addr */
505 	0x380,	/* nx_host_int */
506 	0x504	/* nx_risc_int */
507 };
508 
509 /* 2700/8300 register offsets */
510 static reg_off_t reg_off_2700_8300 = {
511 	0x00,	/* flash_address */
512 	0x04,	/* flash_data */
513 	0x08,	/* ctrl_status */
514 	0x0c,	/* ictrl */
515 	0x10,	/* istatus */
516 	0xff,	/* semaphore - n/a */
517 	0xff,	/* nvram - n/a */
518 	0xff,	/* req_in - n/a */
519 	0xff,	/* req_out - n/a */
520 	0xff,	/* resp_in - n/a */
521 	0xff,	/* resp_out - n/a */
522 	0x44,	/* risc2host */
523 	32,	/* Number of mailboxes */
524 
525 	/* Mailbox in register offsets 0 - 31 */
526 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
527 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
528 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
529 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
530 
531 	/* Mailbox out register offsets 0 - 31 */
532 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
533 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
534 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
535 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
536 
537 	0xff,	/* fpm_diag_config - n/a */
538 	0xff,	/* pcr - n/a */
539 	0xff,	/* mctr - n/a */
540 	0xff,	/* fb_cmd - n/a */
541 	0x48,	/* hccr */
542 	0x4c,	/* gpiod */
543 	0x50,	/* gpioe */
544 	0x58,	/* host_to_host_sema - n/a */
545 	0xff,	/* pri_req_in - n/a */
546 	0xff,	/* pri_req_out - n/a */
547 	0xff,	/* atio_req_in - n/a */
548 	0xff,	/* atio_req_out - n/a */
549 	0x54,	/* io_base_addr */
550 	0xff,	/* nx_host_int - n/a */
551 	0xff	/* nx_risc_int - n/a */
552 };
553 
554 /* mutex for protecting variables shared by all instances of the driver */
555 kmutex_t ql_global_mutex;
556 kmutex_t ql_global_hw_mutex;
557 kmutex_t ql_global_el_mutex;
558 kmutex_t ql_global_timer_mutex;
559 
560 /* DMA access attribute structure. */
561 ddi_device_acc_attr_t ql_dev_acc_attr = {
562 	DDI_DEVICE_ATTR_V0,
563 	DDI_STRUCTURE_LE_ACC,
564 	DDI_STRICTORDER_ACC
565 };
566 
567 /* I/O DMA attributes structures. */
568 ddi_dma_attr_t ql_64bit_io_dma_attr = {
569 	DMA_ATTR_V0,			/* dma_attr_version */
570 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
571 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
572 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
573 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
574 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
575 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
576 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
577 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
578 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
579 	QL_DMA_GRANULARITY,		/* granularity of device */
580 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
581 };
582 
583 ddi_dma_attr_t ql_32bit_io_dma_attr = {
584 	DMA_ATTR_V0,			/* dma_attr_version */
585 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
586 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
587 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
588 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
589 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
590 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
591 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
592 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
593 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
594 	QL_DMA_GRANULARITY,		/* granularity of device */
595 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
596 };
597 
598 /* Static declarations of cb_ops entry point functions... */
599 static struct cb_ops ql_cb_ops = {
600 	ql_open,			/* b/c open */
601 	ql_close,			/* b/c close */
602 	nodev,				/* b strategy */
603 	nodev,				/* b print */
604 	nodev,				/* b dump */
605 	nodev,				/* c read */
606 	nodev,				/* c write */
607 	ql_ioctl,			/* c ioctl */
608 	nodev,				/* c devmap */
609 	nodev,				/* c mmap */
610 	nodev,				/* c segmap */
611 	nochpoll,			/* c poll */
612 	nodev,				/* cb_prop_op */
613 	NULL,				/* streamtab  */
614 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
615 	CB_REV,				/* cb_ops revision */
616 	nodev,				/* c aread */
617 	nodev				/* c awrite */
618 };
619 
620 /* Static declarations of dev_ops entry point functions... */
621 static struct dev_ops ql_devops = {
622 	DEVO_REV,			/* devo_rev */
623 	0,				/* refcnt */
624 	ql_getinfo,			/* getinfo */
625 	nulldev,			/* identify */
626 	nulldev,			/* probe */
627 	ql_attach,			/* attach */
628 	ql_detach,			/* detach */
629 	nodev,				/* reset */
630 	&ql_cb_ops,			/* char/block ops */
631 	NULL,				/* bus operations */
632 	ql_power,			/* power management */
633 	ql_quiesce			/* quiesce device */
634 };
635 
636 /* ELS command code to text converter */
637 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
638 /* Mailbox command code to text converter */
639 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
640 
641 char ql_driver_version[] = QL_VERSION;
642 
643 uint32_t ql_log_entries = QL_LOG_ENTRIES;
644 
645 /*
646  * Loadable Driver Interface Structures.
647  * Declare and initialize the module configuration section...
648  */
649 static struct modldrv modldrv = {
650 	&mod_driverops,				/* type of module: driver */
651 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
652 	&ql_devops				/* driver dev_ops */
653 };
654 
655 static struct modlinkage modlinkage = {
656 	MODREV_1,
657 	&modldrv,
658 	NULL
659 };
660 
661 /* ************************************************************************ */
662 /*				Loadable Module Routines.		    */
663 /* ************************************************************************ */
664 
665 /*
666  * _init
667  *	Initializes a loadable module. It is called before any other
668  *	routine in a loadable module.
669  *
670  * Returns:
671  *	0 = success
672  *
673  * Context:
674  *	Kernel context.
675  */
676 int
_init(void)677 _init(void)
678 {
679 	uint16_t	w16;
680 	int		rval = 0;
681 
682 	if (qlc_disable_load) {
683 		cmn_err(CE_WARN, "%s load disabled", QL_NAME);
684 		return (EINVAL);
685 	}
686 
687 	/* Get OS major release level. */
688 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
689 		if (utsname.release[w16] == '.') {
690 			w16++;
691 			break;
692 		}
693 	}
694 	if (w16 < sizeof (utsname.release)) {
695 		(void) ql_bstr_to_dec(&utsname.release[w16],
696 		    &ql_os_release_level, 0);
697 	} else {
698 		ql_os_release_level = 0;
699 	}
700 	if (ql_os_release_level < 6) {
701 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
702 		    QL_NAME, ql_os_release_level);
703 		rval = EINVAL;
704 	}
705 	if (ql_os_release_level == 6) {
706 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
707 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
708 	}
709 
710 	if (rval == 0) {
711 		rval = ddi_soft_state_init(&ql_state,
712 		    sizeof (ql_adapter_state_t), 0);
713 	}
714 	if (rval == 0) {
715 		/* allow the FC Transport to tweak the dev_ops */
716 		fc_fca_init(&ql_devops);
717 
718 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
719 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
720 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
721 		mutex_init(&ql_global_timer_mutex, NULL, MUTEX_DRIVER, NULL);
722 		rval = mod_install(&modlinkage);
723 		if (rval != 0) {
724 			mutex_destroy(&ql_global_timer_mutex);
725 			mutex_destroy(&ql_global_el_mutex);
726 			mutex_destroy(&ql_global_hw_mutex);
727 			mutex_destroy(&ql_global_mutex);
728 			ddi_soft_state_fini(&ql_state);
729 		}
730 	}
731 
732 	if (rval != 0) {
733 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
734 		    QL_NAME);
735 	}
736 
737 	return (rval);
738 }
739 
740 /*
741  * _fini
742  *	Prepares a module for unloading. It is called when the system
743  *	wants to unload a module. If the module determines that it can
744  *	be unloaded, then _fini() returns the value returned by
745  *	mod_remove(). Upon successful return from _fini() no other
746  *	routine in the module will be called before _init() is called.
747  *
748  * Returns:
749  *	0 = success
750  *
751  * Context:
752  *	Kernel context.
753  */
754 int
_fini(void)755 _fini(void)
756 {
757 	int	rval;
758 
759 	rval = mod_remove(&modlinkage);
760 	if (rval == 0) {
761 		mutex_destroy(&ql_global_timer_mutex);
762 		mutex_destroy(&ql_global_el_mutex);
763 		mutex_destroy(&ql_global_hw_mutex);
764 		mutex_destroy(&ql_global_mutex);
765 		ddi_soft_state_fini(&ql_state);
766 	}
767 
768 	return (rval);
769 }
770 
771 /*
772  * _info
773  *	Returns information about loadable module.
774  *
775  * Input:
776  *	modinfo = pointer to module information structure.
777  *
778  * Returns:
779  *	Value returned by mod_info().
780  *
781  * Context:
782  *	Kernel context.
783  */
784 int
_info(struct modinfo * modinfop)785 _info(struct modinfo *modinfop)
786 {
787 	return (mod_info(&modlinkage, modinfop));
788 }
789 
790 /* ************************************************************************ */
791 /*			dev_ops functions				    */
792 /* ************************************************************************ */
793 
794 /*
795  * ql_getinfo
796  *	Returns the pointer associated with arg when cmd is
797  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
798  *	instance number associated with arg when cmd is set
799  *	to DDI_INFO_DEV2INSTANCE.
800  *
801  * Input:
802  *	dip = Do not use.
803  *	cmd = command argument.
804  *	arg = command specific argument.
805  *	resultp = pointer to where request information is stored.
806  *
807  * Returns:
808  *	DDI_SUCCESS or DDI_FAILURE.
809  *
810  * Context:
811  *	Kernel context.
812  */
813 /* ARGSUSED */
814 static int
ql_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** resultp)815 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
816 {
817 	ql_adapter_state_t	*ha;
818 	int			minor;
819 	int			rval = DDI_FAILURE;
820 
821 	minor = (int)(getminor((dev_t)arg));
822 	ha = ddi_get_soft_state(ql_state, minor);
823 	if (ha == NULL) {
824 		QL_PRINT_2(ha, "failed, unknown minor=%d\n",
825 		    getminor((dev_t)arg));
826 		*resultp = NULL;
827 		return (rval);
828 	}
829 
830 	QL_PRINT_3(ha, "started\n");
831 
832 	switch (cmd) {
833 	case DDI_INFO_DEVT2DEVINFO:
834 		*resultp = ha->dip;
835 		rval = DDI_SUCCESS;
836 		break;
837 	case DDI_INFO_DEVT2INSTANCE:
838 		*resultp = (void *)(uintptr_t)(ha->instance);
839 		rval = DDI_SUCCESS;
840 		break;
841 	default:
842 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
843 		rval = DDI_FAILURE;
844 		break;
845 	}
846 
847 	QL_PRINT_3(ha, "done\n");
848 
849 	return (rval);
850 }
851 
852 /*
853  * ql_attach
854  *	Configure and attach an instance of the driver
855  *	for a port.
856  *
857  * Input:
858  *	dip = pointer to device information structure.
859  *	cmd = attach type.
860  *
861  * Returns:
862  *	DDI_SUCCESS or DDI_FAILURE.
863  *
864  * Context:
865  *	Kernel context.
866  */
867 static int
ql_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)868 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
869 {
870 	off_t			regsize;
871 	uint32_t		size;
872 	int			rval, *ptr;
873 	uint_t			progress = 0;
874 	char			*buf, taskq_name[32];
875 	ushort_t		caps_ptr, cap;
876 	fc_fca_tran_t		*tran;
877 	ql_adapter_state_t	*ha = NULL;
878 	int			instance = ddi_get_instance(dip);
879 
880 	static char *pmcomps[] = {
881 		NULL,
882 		PM_LEVEL_D3_STR,		/* Device OFF */
883 		PM_LEVEL_D0_STR,		/* Device ON */
884 	};
885 
886 	QL_PRINT_3(NULL, "started, instance=%d, cmd=%xh\n",
887 	    ddi_get_instance(dip), cmd);
888 
889 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
890 
891 	switch (cmd) {
892 	case DDI_ATTACH:
893 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
894 		    QL_NAME, instance, QL_VERSION);
895 
896 		/* Correct OS version? */
897 		if (ql_os_release_level != 11) {
898 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
899 			    "11", QL_NAME, instance);
900 			goto attach_failed;
901 		}
902 
903 		/* Hardware is installed in a DMA-capable slot? */
904 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
905 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
906 			    instance);
907 			goto attach_failed;
908 		}
909 
910 		/* Allocate our per-device-instance structure */
911 		if (ddi_soft_state_zalloc(ql_state,
912 		    instance) != DDI_SUCCESS) {
913 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
914 			    QL_NAME, instance);
915 			goto attach_failed;
916 		}
917 
918 		ha = ddi_get_soft_state(ql_state, instance);
919 		if (ha == NULL) {
920 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
921 			    QL_NAME, instance);
922 			goto attach_failed;
923 		}
924 		ha->dip = dip;
925 		ha->instance = instance;
926 		ha->hba.base_address = ha;
927 		ha->pha = ha;
928 
929 		ha->bit32_io_dma_attr = ql_32bit_io_dma_attr;
930 		ha->bit64_io_dma_attr = ql_64bit_io_dma_attr;
931 
932 		(void) ql_el_trace_alloc(ha);
933 
934 		progress |= QL_SOFT_STATE_ALLOCED;
935 
936 		/* Get extended logging and dump flags. */
937 		ql_common_properties(ha);
938 
939 		qlc_fm_init(ha);
940 		progress |= QL_FCA_INIT_FM;
941 
942 		ha->io_dma_attr = ha->bit32_io_dma_attr;
943 
944 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
945 		    "sbus") == 0) {
946 			EL(ha, "%s SBUS card detected\n", QL_NAME);
947 			ha->cfg_flags |= CFG_SBUS_CARD;
948 		}
949 
950 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
951 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
952 
953 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
954 		    QL_UB_LIMIT, KM_SLEEP);
955 
956 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
957 		    KM_SLEEP);
958 
959 		(void) ddi_pathname(dip, buf);
960 		ha->devpath = kmem_zalloc(strlen(buf) + 1, KM_SLEEP);
961 		if (ha->devpath == NULL) {
962 			EL(ha, "devpath mem alloc failed\n");
963 		} else {
964 			(void) strcpy(ha->devpath, buf);
965 			EL(ha, "devpath is: %s\n", ha->devpath);
966 		}
967 
968 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
969 			/*
970 			 * For cards where PCI is mapped to sbus e.g. Ivory.
971 			 *
972 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
973 			 *	: 0x100 - 0x3FF PCI IO space for 2200
974 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
975 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
976 			 */
977 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
978 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
979 			    DDI_SUCCESS) {
980 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
981 				    " registers", QL_NAME, instance);
982 				goto attach_failed;
983 			}
984 			if (ddi_regs_map_setup(dip, 1,
985 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
986 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
987 			    DDI_SUCCESS) {
988 				/* We should not fail attach here */
989 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
990 				    QL_NAME, instance);
991 				ha->sbus_fpga_iobase = NULL;
992 			}
993 			progress |= QL_REGS_MAPPED;
994 
995 			/*
996 			 * We should map config space before adding interrupt
997 			 * So that the chip type (2200 or 2300) can be
998 			 * determined before the interrupt routine gets a
999 			 * chance to execute.
1000 			 */
1001 			if (ddi_regs_map_setup(dip, 0,
1002 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
1003 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
1004 			    DDI_SUCCESS) {
1005 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
1006 				    "config registers", QL_NAME, instance);
1007 				goto attach_failed;
1008 			}
1009 			progress |= QL_CONFIG_SPACE_SETUP;
1010 		} else {
1011 			/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
1012 			rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1013 			    DDI_PROP_DONTPASS, "reg", &ptr, &size);
1014 			if (rval != DDI_PROP_SUCCESS) {
1015 				cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
1016 				    "address registers", QL_NAME, instance);
1017 				goto attach_failed;
1018 			} else {
1019 				ha->pci_bus_addr = ptr[0];
1020 				ha->pci_function_number = (uint8_t)
1021 				    (ha->pci_bus_addr >> 8 & 7);
1022 				ddi_prop_free(ptr);
1023 			}
1024 
1025 			/*
1026 			 * We should map config space before adding interrupt
1027 			 * So that the chip type (2200 or 2300) can be
1028 			 * determined before the interrupt routine gets a
1029 			 * chance to execute.
1030 			 */
1031 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
1032 			    DDI_SUCCESS) {
1033 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
1034 				    "config space", QL_NAME, instance);
1035 				goto attach_failed;
1036 			}
1037 			progress |= QL_CONFIG_SPACE_SETUP;
1038 
1039 			/*
1040 			 * Setup the ISP2200 registers address mapping to be
1041 			 * accessed by this particular driver.
1042 			 * 0x0   Configuration Space
1043 			 * 0x1   I/O Space
1044 			 * 0x2   32-bit Memory Space address
1045 			 * 0x3   64-bit Memory Space address
1046 			 */
1047 			size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
1048 			    2 : 1;
1049 
1050 			if (qlc_fm_check_acc_handle(ha, ha->pci_handle)
1051 			    != DDI_FM_OK) {
1052 				qlc_fm_report_err_impact(ha,
1053 				    QL_FM_EREPORT_ACC_HANDLE_CHECK);
1054 				goto attach_failed;
1055 			}
1056 
1057 			if (ddi_dev_regsize(dip, size, &regsize) !=
1058 			    DDI_SUCCESS ||
1059 			    ddi_regs_map_setup(dip, size, &ha->iobase,
1060 			    0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1061 			    DDI_SUCCESS) {
1062 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1063 				    "failed", QL_NAME, instance);
1064 				goto attach_failed;
1065 			}
1066 			progress |= QL_REGS_MAPPED;
1067 
1068 			if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1069 			    != DDI_FM_OK) {
1070 				qlc_fm_report_err_impact(ha,
1071 				    QL_FM_EREPORT_ACC_HANDLE_CHECK);
1072 				goto attach_failed;
1073 			}
1074 
1075 			/*
1076 			 * We need I/O space mappings for 23xx HBAs for
1077 			 * loading flash (FCode). The chip has a bug due to
1078 			 * which loading flash fails through mem space
1079 			 * mappings in PCI-X mode.
1080 			 */
1081 			if (size == 1) {
1082 				ha->iomap_iobase = ha->iobase;
1083 				ha->iomap_dev_handle = ha->dev_handle;
1084 			} else {
1085 				if (ddi_dev_regsize(dip, 1, &regsize) !=
1086 				    DDI_SUCCESS ||
1087 				    ddi_regs_map_setup(dip, 1,
1088 				    &ha->iomap_iobase, 0, regsize,
1089 				    &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1090 				    DDI_SUCCESS) {
1091 					cmn_err(CE_WARN, "%s(%d): regs_map_"
1092 					    "setup(I/O) failed", QL_NAME,
1093 					    instance);
1094 					goto attach_failed;
1095 				}
1096 				progress |= QL_IOMAP_IOBASE_MAPPED;
1097 
1098 				if (qlc_fm_check_acc_handle(ha,
1099 				    ha->iomap_dev_handle) != DDI_FM_OK) {
1100 					qlc_fm_report_err_impact(ha,
1101 					    QL_FM_EREPORT_ACC_HANDLE_CHECK);
1102 					goto attach_failed;
1103 				}
1104 			}
1105 		}
1106 
1107 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1108 		    PCI_CONF_SUBSYSID);
1109 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1110 		    PCI_CONF_SUBVENID);
1111 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1112 		    PCI_CONF_VENID);
1113 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1114 		    PCI_CONF_DEVID);
1115 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1116 		    PCI_CONF_REVID);
1117 
1118 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1119 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1120 		    ha->subven_id, ha->subsys_id);
1121 
1122 		switch (ha->device_id) {
1123 		case 0x2300:
1124 		case 0x2312:
1125 		case 0x2322:
1126 		case 0x6312:
1127 		case 0x6322:
1128 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1129 				ha->function_number = 1;
1130 			}
1131 			if (ha->device_id == 0x2322 ||
1132 			    ha->device_id == 0x6322) {
1133 				ha->cfg_flags |= CFG_CTRL_63XX;
1134 				ha->fw_class = 0x6322;
1135 				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1136 			} else {
1137 				ha->cfg_flags |= CFG_CTRL_23XX;
1138 				ha->fw_class = 0x2300;
1139 				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1140 			}
1141 			ha->reg_off = &reg_off_2300;
1142 			ha->interrupt_count = 1;
1143 			ha->osc_max_cnt = 1024;
1144 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1145 				goto attach_failed;
1146 			}
1147 			ha->fcp_cmd = ql_command_iocb;
1148 			ha->ip_cmd = ql_ip_iocb;
1149 			ha->ms_cmd = ql_ms_iocb;
1150 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1151 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1152 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1153 			} else {
1154 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1155 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1156 			}
1157 			break;
1158 
1159 		case 0x2200:
1160 			ha->cfg_flags |= CFG_CTRL_22XX;
1161 			ha->reg_off = &reg_off_2200;
1162 			ha->interrupt_count = 1;
1163 			ha->osc_max_cnt = 1024;
1164 			ha->fw_class = 0x2200;
1165 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1166 				goto attach_failed;
1167 			}
1168 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1169 			ha->fcp_cmd = ql_command_iocb;
1170 			ha->ip_cmd = ql_ip_iocb;
1171 			ha->ms_cmd = ql_ms_iocb;
1172 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1173 				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1174 				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1175 			} else {
1176 				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1177 				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1178 			}
1179 			break;
1180 
1181 		case 0x2422:
1182 		case 0x2432:
1183 		case 0x5422:
1184 		case 0x5432:
1185 		case 0x8432:
1186 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1187 				ha->function_number = 1;
1188 			}
1189 			ha->cfg_flags |= CFG_CTRL_24XX;
1190 			if (ha->device_id == 0x8432) {
1191 				ha->cfg_flags |= CFG_CTRL_MENLO;
1192 			} else {
1193 				ha->flags |= VP_ENABLED;
1194 				ha->max_vports = MAX_24_VIRTUAL_PORTS;
1195 			}
1196 
1197 			ha->reg_off = &reg_off_2400_2500;
1198 			ha->interrupt_count = 2;
1199 			ha->osc_max_cnt = 2048;
1200 			ha->fw_class = 0x2400;
1201 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1202 				goto attach_failed;
1203 			}
1204 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1205 			ha->fcp_cmd = ql_command_24xx_iocb;
1206 			ha->ip_cmd = ql_ip_24xx_iocb;
1207 			ha->ms_cmd = ql_ms_24xx_iocb;
1208 			ha->els_cmd = ql_els_24xx_iocb;
1209 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1210 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1211 			break;
1212 
1213 		case 0x2522:
1214 		case 0x2532:
1215 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1216 				ha->function_number = 1;
1217 			}
1218 			ha->cfg_flags |= CFG_CTRL_25XX;
1219 			ha->flags |= VP_ENABLED;
1220 			ha->max_vports = MAX_25_VIRTUAL_PORTS;
1221 			ha->reg_off = &reg_off_2400_2500;
1222 			ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1223 			ha->interrupt_count = 2;
1224 			ha->osc_max_cnt = 2048;
1225 			ha->fw_class = 0x2500;
1226 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1227 				goto attach_failed;
1228 			}
1229 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1230 			ha->fcp_cmd = ql_command_24xx_iocb;
1231 			ha->ms_cmd = ql_ms_24xx_iocb;
1232 			ha->els_cmd = ql_els_24xx_iocb;
1233 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1234 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1235 			if (ql_multi_queue_support(ha) == QL_SUCCESS) {
1236 				ha->flags |= MULTI_QUEUE;
1237 			}
1238 			break;
1239 
1240 		case 0x2031:
1241 			/* Get queue pointer memory mapped registers */
1242 			if (ddi_dev_regsize(dip, 3, &regsize) != DDI_SUCCESS ||
1243 			    ddi_regs_map_setup(dip, 3, &ha->mbar,
1244 			    0, regsize, &ql_dev_acc_attr,
1245 			    &ha->mbar_dev_handle) != DDI_SUCCESS) {
1246 				cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1247 				    "(mbar) failed", QL_NAME, instance);
1248 				goto attach_failed;
1249 			}
1250 			ha->mbar_size = (uint32_t)regsize;
1251 
1252 			if (ha->pci_function_number != 0 &&
1253 			    ha->pci_function_number != 2) {
1254 				ha->function_number = 1;
1255 			}
1256 			ha->cfg_flags |= CFG_CTRL_83XX;
1257 			ha->flags |= VP_ENABLED | MULTI_QUEUE;
1258 			ha->max_vports = MAX_83_VIRTUAL_PORTS;
1259 			ha->reg_off = &reg_off_2700_8300;
1260 			ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1261 			ha->interrupt_count = 2;
1262 			ha->osc_max_cnt = 2048;
1263 			ha->fw_class = 0x8301fc;
1264 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1265 				goto attach_failed;
1266 			}
1267 			ha->risc_dump_size = QL_83XX_FW_DUMP_SIZE;
1268 			ha->fcp_cmd = ql_command_24xx_iocb;
1269 			ha->ms_cmd = ql_ms_24xx_iocb;
1270 			ha->els_cmd = ql_els_24xx_iocb;
1271 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1272 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1273 			break;
1274 
1275 		case 0x2071:
1276 		case 0x2261:
1277 		case 0x2271:
1278 			/* Get queue pointer memory mapped registers */
1279 			if (ddi_dev_regsize(dip, 3, &regsize) != DDI_SUCCESS ||
1280 			    ddi_regs_map_setup(dip, 3, &ha->mbar,
1281 			    0, regsize, &ql_dev_acc_attr,
1282 			    &ha->mbar_dev_handle) != DDI_SUCCESS) {
1283 				cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1284 				    "(mbar) failed", QL_NAME, instance);
1285 				goto attach_failed;
1286 			}
1287 			ha->mbar_size = (uint32_t)regsize;
1288 
1289 			ha->function_number = ha->pci_function_number;
1290 			ha->cfg_flags |= CFG_CTRL_27XX;
1291 			ha->flags |= VP_ENABLED | MULTI_QUEUE |
1292 			    QUEUE_SHADOW_PTRS;
1293 			ha->max_vports = MAX_27_VIRTUAL_PORTS;
1294 			ha->reg_off = &reg_off_2700_8300;
1295 			ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1296 			ha->interrupt_count = 2;
1297 			ha->osc_max_cnt = 2048;
1298 			ha->fw_class = 0x2700;
1299 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1300 				goto attach_failed;
1301 			}
1302 			ha->risc_dump_size = QL_27XX_FW_DUMP_SIZE;
1303 			ha->fcp_cmd = ql_command_24xx_iocb;
1304 			ha->ms_cmd = ql_ms_24xx_iocb;
1305 			ha->els_cmd = ql_els_24xx_iocb;
1306 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1307 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1308 			break;
1309 
1310 		case 0x8001:
1311 			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1312 				ha->function_number = 1;
1313 			}
1314 			ha->cfg_flags |= CFG_CTRL_81XX;
1315 			ha->flags |= VP_ENABLED;
1316 			ha->max_vports = MAX_81XX_VIRTUAL_PORTS;
1317 			ha->reg_off = &reg_off_2400_2500;
1318 			ha->mbar_queue_offset = MBAR2_REG_OFFSET;
1319 			ha->interrupt_count = 2;
1320 			ha->osc_max_cnt = 2048;
1321 			ha->fw_class = 0x8100;
1322 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1323 				goto attach_failed;
1324 			}
1325 			ha->risc_dump_size = QL_81XX_FW_DUMP_SIZE;
1326 			ha->fcp_cmd = ql_command_24xx_iocb;
1327 			ha->ms_cmd = ql_ms_24xx_iocb;
1328 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1329 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1330 			if (ql_multi_queue_support(ha) == QL_SUCCESS) {
1331 				ha->flags |= MULTI_QUEUE;
1332 			}
1333 			break;
1334 
1335 		case 0x8021:
1336 			if (ha->pci_function_number & BIT_0) {
1337 				ha->function_number = 1;
1338 			}
1339 			ha->cfg_flags |= CFG_CTRL_82XX;
1340 			ha->flags |= VP_ENABLED;
1341 			ha->max_vports = MAX_8021_VIRTUAL_PORTS;
1342 			ha->reg_off = &reg_off_8021;
1343 			ha->interrupt_count = 2;
1344 			ha->osc_max_cnt = 2048;
1345 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1346 			ha->fcp_cmd = ql_command_24xx_iocb;
1347 			ha->ms_cmd = ql_ms_24xx_iocb;
1348 			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1349 			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1350 			ha->io_dma_attr.dma_attr_flags |=
1351 			    DDI_DMA_RELAXED_ORDERING;
1352 
1353 			ha->nx_pcibase = ha->iobase;
1354 			ha->iobase += 0xBC000 + (ha->pci_function_number << 11);
1355 			ha->iomap_iobase += 0xBC000 +
1356 			    (ha->pci_function_number << 11);
1357 
1358 			/* map doorbell */
1359 			if (ddi_dev_regsize(dip, 2, &regsize) != DDI_SUCCESS ||
1360 			    ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1361 			    0, regsize, &ql_dev_acc_attr,
1362 			    &ha->db_dev_handle) !=
1363 			    DDI_SUCCESS) {
1364 				cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1365 				    "(doorbell) failed", QL_NAME, instance);
1366 				goto attach_failed;
1367 			}
1368 			progress |= QL_DB_IOBASE_MAPPED;
1369 
1370 			if (qlc_fm_check_acc_handle(ha, ha->db_dev_handle)
1371 			    != DDI_FM_OK) {
1372 				qlc_fm_report_err_impact(ha,
1373 				    QL_FM_EREPORT_ACC_HANDLE_CHECK);
1374 				goto attach_failed;
1375 			}
1376 
1377 			ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1378 			    (ha->pci_function_number << 12));
1379 			ha->db_read = ha->nx_pcibase + (512 * 1024) +
1380 			    (ha->pci_function_number * 8);
1381 
1382 			ql_8021_update_crb_int_ptr(ha);
1383 			ql_8021_set_drv_active(ha);
1384 			break;
1385 
1386 		default:
1387 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1388 			    QL_NAME, instance, ha->device_id);
1389 			goto attach_failed;
1390 		}
1391 
1392 		ha->outstanding_cmds = kmem_zalloc(
1393 		    sizeof (*ha->outstanding_cmds) * ha->osc_max_cnt,
1394 		    KM_SLEEP);
1395 
1396 		/* Setup interrupts */
1397 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1398 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1399 			    "rval=%xh", QL_NAME, instance, rval);
1400 			goto attach_failed;
1401 		}
1402 
1403 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1404 
1405 		/* Setup hba buffer. */
1406 		if (ql_create_queues(ha) != QL_SUCCESS) {
1407 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1408 			    "alloc failed", QL_NAME, instance);
1409 			goto attach_failed;
1410 		}
1411 		progress |= QL_HBA_BUFFER_SETUP;
1412 
1413 		/* Allocate resource for QLogic IOCTL */
1414 		(void) ql_alloc_xioctl_resource(ha);
1415 
1416 
1417 		if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1418 			cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1419 			    QL_NAME, instance);
1420 			goto attach_failed;
1421 		}
1422 
1423 		progress |= QL_NVRAM_CACHE_CREATED;
1424 
1425 		if (ql_plogi_params_desc_ctor(ha) != DDI_SUCCESS) {
1426 			cmn_err(CE_WARN, "%s(%d): can't setup plogi params",
1427 			    QL_NAME, instance);
1428 			goto attach_failed;
1429 		}
1430 
1431 		progress |= QL_PLOGI_PARAMS_CREATED;
1432 
1433 		/*
1434 		 * Allocate an N Port information structure
1435 		 * for use when in P2P topology.
1436 		 */
1437 		ha->n_port = (ql_n_port_info_t *)
1438 		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1439 		if (ha->n_port == NULL) {
1440 			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1441 			    QL_NAME, instance);
1442 			goto attach_failed;
1443 		}
1444 
1445 		progress |= QL_N_PORT_INFO_CREATED;
1446 
1447 		/*
1448 		 * Determine support for Power Management
1449 		 */
1450 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1451 
1452 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1453 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1454 			if (cap == PCI_CAP_ID_PM) {
1455 				ha->pm_capable = 1;
1456 				break;
1457 			}
1458 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1459 			    PCI_CAP_NEXT_PTR);
1460 		}
1461 
1462 		if (ha->pm_capable) {
1463 			/*
1464 			 * Enable PM for 2200 based HBAs only.
1465 			 */
1466 			if (ha->device_id != 0x2200) {
1467 				ha->pm_capable = 0;
1468 			}
1469 		}
1470 
1471 		if (ha->pm_capable) {
1472 			ha->pm_capable = ql_enable_pm;
1473 		}
1474 
1475 		if (ha->pm_capable) {
1476 			/*
1477 			 * Initialize power management bookkeeping;
1478 			 * components are created idle.
1479 			 */
1480 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1481 			pmcomps[0] = buf;
1482 
1483 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1484 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1485 			    dip, "pm-components", pmcomps,
1486 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1487 			    DDI_PROP_SUCCESS) {
1488 				cmn_err(CE_WARN, "%s(%d): failed to create"
1489 				    " pm-components property", QL_NAME,
1490 				    instance);
1491 
1492 				/* Initialize adapter. */
1493 				ha->power_level = PM_LEVEL_D0;
1494 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1495 					cmn_err(CE_WARN, "%s(%d): failed to"
1496 					    " initialize adapter", QL_NAME,
1497 					    instance);
1498 					goto attach_failed;
1499 				}
1500 			} else {
1501 				ha->power_level = PM_LEVEL_D3;
1502 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1503 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1504 					cmn_err(CE_WARN, "%s(%d): failed to"
1505 					    " raise power or initialize"
1506 					    " adapter", QL_NAME, instance);
1507 				}
1508 			}
1509 		} else {
1510 			/* Initialize adapter. */
1511 			ha->power_level = PM_LEVEL_D0;
1512 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1513 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1514 				    " adapter", QL_NAME, instance);
1515 			}
1516 		}
1517 
1518 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1519 		    ha->fw_subminor_version == 0) {
1520 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1521 			    QL_NAME, ha->instance);
1522 		} else {
1523 			int	rval, rval1;
1524 			char	ver_fmt[256];
1525 
1526 			rval1 = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1527 			    "Firmware version %d.%d.%d", ha->fw_major_version,
1528 			    ha->fw_minor_version, ha->fw_subminor_version);
1529 
1530 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
1531 				rval = (int)snprintf(ver_fmt + rval1,
1532 				    (size_t)sizeof (ver_fmt),
1533 				    ", MPI fw version %d.%d.%d",
1534 				    ha->mpi_fw_major_version,
1535 				    ha->mpi_fw_minor_version,
1536 				    ha->mpi_fw_subminor_version);
1537 
1538 				if (ha->subsys_id == 0x17B ||
1539 				    ha->subsys_id == 0x17D) {
1540 					(void) snprintf(ver_fmt + rval1 + rval,
1541 					    (size_t)sizeof (ver_fmt),
1542 					    ", PHY fw version %d.%d.%d",
1543 					    ha->phy_fw_major_version,
1544 					    ha->phy_fw_minor_version,
1545 					    ha->phy_fw_subminor_version);
1546 				}
1547 			}
1548 			cmn_err(CE_NOTE, "!%s(%d): %s",
1549 			    QL_NAME, ha->instance, ver_fmt);
1550 		}
1551 
1552 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1553 		    "controller", KSTAT_TYPE_RAW,
1554 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1555 		if (ha->k_stats == NULL) {
1556 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1557 			    QL_NAME, instance);
1558 			goto attach_failed;
1559 		}
1560 		progress |= QL_KSTAT_CREATED;
1561 
1562 		ha->adapter_stats->version = 1;
1563 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1564 		ha->k_stats->ks_private = ha;
1565 		ha->k_stats->ks_update = ql_kstat_update;
1566 		ha->k_stats->ks_ndata = 1;
1567 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1568 		kstat_install(ha->k_stats);
1569 
1570 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1571 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1572 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1573 			    QL_NAME, instance);
1574 			goto attach_failed;
1575 		}
1576 		progress |= QL_MINOR_NODE_CREATED;
1577 
1578 		/* Allocate a transport structure for this instance */
1579 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1580 		if (tran == NULL) {
1581 			cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1582 			    QL_NAME, instance);
1583 			goto attach_failed;
1584 		}
1585 
1586 		progress |= QL_FCA_TRAN_ALLOCED;
1587 
1588 		/* fill in the structure */
1589 		tran->fca_numports = 1;
1590 		tran->fca_version = FCTL_FCA_MODREV_5;
1591 		tran->fca_num_npivports = ha->max_vports ?
1592 		    ha->max_vports - 1 : 0;
1593 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1594 		    tran->fca_perm_pwwn.raw_wwn, 8);
1595 
1596 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1597 			ha->io_dma_attr = ha->bit64_io_dma_attr;
1598 			ha->fcsm_cmd_dma_attr = ha->bit64_io_dma_attr;
1599 			ha->fcsm_rsp_dma_attr = ha->bit64_io_dma_attr;
1600 			ha->fcip_cmd_dma_attr = ha->bit64_io_dma_attr;
1601 			ha->fcip_rsp_dma_attr = ha->bit64_io_dma_attr;
1602 			ha->fcp_cmd_dma_attr = ha->bit64_io_dma_attr;
1603 			ha->fcp_rsp_dma_attr = ha->bit64_io_dma_attr;
1604 			ha->fcp_data_dma_attr = ha->bit64_io_dma_attr;
1605 		} else {
1606 			ha->io_dma_attr = ha->bit32_io_dma_attr;
1607 			ha->fcsm_cmd_dma_attr = ha->bit32_io_dma_attr;
1608 			ha->fcsm_rsp_dma_attr = ha->bit32_io_dma_attr;
1609 			ha->fcip_cmd_dma_attr = ha->bit32_io_dma_attr;
1610 			ha->fcip_rsp_dma_attr = ha->bit32_io_dma_attr;
1611 			ha->fcp_cmd_dma_attr = ha->bit32_io_dma_attr;
1612 			ha->fcp_rsp_dma_attr = ha->bit32_io_dma_attr;
1613 			ha->fcp_data_dma_attr = ha->bit32_io_dma_attr;
1614 		}
1615 		ha->fcsm_cmd_dma_attr.dma_attr_sgllen = QL_FCSM_CMD_SGLLEN;
1616 		ha->fcsm_rsp_dma_attr.dma_attr_sgllen = QL_FCSM_RSP_SGLLEN;
1617 		ha->fcip_cmd_dma_attr.dma_attr_sgllen = QL_FCIP_CMD_SGLLEN;
1618 		ha->fcip_rsp_dma_attr.dma_attr_sgllen = QL_FCIP_RSP_SGLLEN;
1619 		ha->fcp_cmd_dma_attr.dma_attr_sgllen = QL_FCP_CMD_SGLLEN;
1620 		ha->fcp_rsp_dma_attr.dma_attr_sgllen = QL_FCP_RSP_SGLLEN;
1621 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
1622 			ha->io_dma_attr.dma_attr_flags |=
1623 			    DDI_DMA_RELAXED_ORDERING;
1624 			ha->fcsm_cmd_dma_attr.dma_attr_flags |=
1625 			    DDI_DMA_RELAXED_ORDERING;
1626 			ha->fcsm_rsp_dma_attr.dma_attr_flags |=
1627 			    DDI_DMA_RELAXED_ORDERING;
1628 			ha->fcip_cmd_dma_attr.dma_attr_flags |=
1629 			    DDI_DMA_RELAXED_ORDERING;
1630 			ha->fcip_rsp_dma_attr.dma_attr_flags |=
1631 			    DDI_DMA_RELAXED_ORDERING;
1632 			ha->fcp_cmd_dma_attr.dma_attr_flags |=
1633 			    DDI_DMA_RELAXED_ORDERING;
1634 			ha->fcp_rsp_dma_attr.dma_attr_flags |=
1635 			    DDI_DMA_RELAXED_ORDERING;
1636 			ha->fcp_data_dma_attr.dma_attr_flags |=
1637 			    DDI_DMA_RELAXED_ORDERING;
1638 		}
1639 
1640 		/* Specify the amount of space needed in each packet */
1641 		tran->fca_pkt_size = sizeof (ql_srb_t);
1642 
1643 		/* command limits are usually dictated by hardware */
1644 		tran->fca_cmd_max = ha->osc_max_cnt;
1645 
1646 		/* dmaattr are static, set elsewhere. */
1647 		tran->fca_dma_attr = &ha->io_dma_attr;
1648 		tran->fca_dma_fcp_cmd_attr = &ha->fcp_cmd_dma_attr;
1649 		tran->fca_dma_fcp_rsp_attr = &ha->fcp_rsp_dma_attr;
1650 		tran->fca_dma_fcp_data_attr = &ha->fcp_data_dma_attr;
1651 		tran->fca_dma_fcsm_cmd_attr = &ha->fcsm_cmd_dma_attr;
1652 		tran->fca_dma_fcsm_rsp_attr = &ha->fcsm_rsp_dma_attr;
1653 		tran->fca_dma_fcip_cmd_attr = &ha->fcip_cmd_dma_attr;
1654 		tran->fca_dma_fcip_rsp_attr = &ha->fcip_rsp_dma_attr;
1655 		tran->fca_acc_attr = &ql_dev_acc_attr;
1656 		tran->fca_iblock = &(ha->iblock_cookie);
1657 
1658 		/* the remaining values are simply function vectors */
1659 		tran->fca_bind_port = ql_bind_port;
1660 		tran->fca_unbind_port = ql_unbind_port;
1661 		tran->fca_init_pkt = ql_init_pkt;
1662 		tran->fca_un_init_pkt = ql_un_init_pkt;
1663 		tran->fca_els_send = ql_els_send;
1664 		tran->fca_get_cap = ql_get_cap;
1665 		tran->fca_set_cap = ql_set_cap;
1666 		tran->fca_getmap = ql_getmap;
1667 		tran->fca_transport = ql_transport;
1668 		tran->fca_ub_alloc = ql_ub_alloc;
1669 		tran->fca_ub_free = ql_ub_free;
1670 		tran->fca_ub_release = ql_ub_release;
1671 		tran->fca_abort = ql_abort;
1672 		tran->fca_reset = ql_reset;
1673 		tran->fca_port_manage = ql_port_manage;
1674 		tran->fca_get_device = ql_get_device;
1675 
1676 		EL(ha, "Transport interface setup. FCA version %d\n",
1677 		    tran->fca_version);
1678 
1679 		/* give it to the FC transport */
1680 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1681 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1682 			    instance);
1683 			goto attach_failed;
1684 		}
1685 		progress |= QL_FCA_ATTACH_DONE;
1686 
1687 		/* Stash the structure so it can be freed at detach */
1688 		ha->tran = tran;
1689 
1690 		/* Acquire global state lock. */
1691 		GLOBAL_STATE_LOCK();
1692 
1693 		/* Add adapter structure to link list. */
1694 		ql_add_link_b(&ql_hba, &ha->hba);
1695 
1696 		/* Determine and populate HBA fru info */
1697 		ql_setup_fruinfo(ha);
1698 
1699 		/* Release global state lock. */
1700 		GLOBAL_STATE_UNLOCK();
1701 
1702 		/* Start one second driver timer. */
1703 		GLOBAL_TIMER_LOCK();
1704 		if (ql_timer_timeout_id == NULL) {
1705 			ql_timer_ticks = drv_usectohz(1000000);
1706 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1707 			    ql_timer_ticks);
1708 		}
1709 		GLOBAL_TIMER_UNLOCK();
1710 
1711 		/* Setup task_daemon thread. */
1712 		(void) snprintf(taskq_name, sizeof (taskq_name),
1713 		    "qlc_%d_driver_thread", instance);
1714 		ha->driver_thread_taskq = ddi_taskq_create(NULL, taskq_name, 1,
1715 		    TASKQ_DEFAULTPRI, 0);
1716 		(void) ddi_taskq_dispatch(ha->driver_thread_taskq,
1717 		    ql_task_daemon, ha, DDI_SLEEP);
1718 		ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
1719 
1720 		(void) snprintf(taskq_name, sizeof (taskq_name),
1721 		    "qlc_%d_comp_thd", instance);
1722 		ha->completion_taskq = ddi_taskq_create(0, taskq_name,
1723 		    ha->completion_thds, maxclsyspri, 0);
1724 		for (size = 0; size < ha->completion_thds; size++) {
1725 			(void) ddi_taskq_dispatch(ha->completion_taskq,
1726 			    ql_completion_thread, ha, DDI_SLEEP);
1727 		}
1728 
1729 		progress |= QL_TASK_DAEMON_STARTED;
1730 
1731 		ddi_report_dev(dip);
1732 
1733 		/* Disable link reset in panic path */
1734 		ha->lip_on_panic = 1;
1735 
1736 		rval = DDI_SUCCESS;
1737 		break;
1738 
1739 attach_failed:
1740 		if (progress & QL_FCA_INIT_FM) {
1741 			qlc_fm_fini(ha);
1742 			progress &= ~QL_FCA_INIT_FM;
1743 		}
1744 
1745 		if (progress & QL_FCA_ATTACH_DONE) {
1746 			(void) fc_fca_detach(dip);
1747 			progress &= ~QL_FCA_ATTACH_DONE;
1748 		}
1749 
1750 		if (progress & QL_FCA_TRAN_ALLOCED) {
1751 			kmem_free(tran, sizeof (fc_fca_tran_t));
1752 			progress &= ~QL_FCA_TRAN_ALLOCED;
1753 		}
1754 
1755 		if (progress & QL_MINOR_NODE_CREATED) {
1756 			ddi_remove_minor_node(dip, "devctl");
1757 			progress &= ~QL_MINOR_NODE_CREATED;
1758 		}
1759 
1760 		if (progress & QL_KSTAT_CREATED) {
1761 			kstat_delete(ha->k_stats);
1762 			progress &= ~QL_KSTAT_CREATED;
1763 		}
1764 
1765 		if (progress & QL_N_PORT_INFO_CREATED) {
1766 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1767 			progress &= ~QL_N_PORT_INFO_CREATED;
1768 		}
1769 
1770 		if (progress & QL_PLOGI_PARAMS_CREATED) {
1771 			(void) ql_plogi_params_desc_dtor(ha);
1772 			progress &= ~QL_PLOGI_PARAMS_CREATED;
1773 		}
1774 
1775 		if (progress & QL_NVRAM_CACHE_CREATED) {
1776 			(void) ql_nvram_cache_desc_dtor(ha);
1777 			progress &= ~QL_NVRAM_CACHE_CREATED;
1778 		}
1779 
1780 		if (progress & QL_TASK_DAEMON_STARTED) {
1781 			if (ha->driver_thread_taskq) {
1782 				while (ha->task_daemon_flags &
1783 				    TASK_DAEMON_ALIVE_FLG) {
1784 					/* Delay for 1 tick (10 ms). */
1785 					ql_awaken_task_daemon(ha, NULL,
1786 					    TASK_DAEMON_STOP_FLG, 0);
1787 					delay(1);
1788 				}
1789 				ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1790 
1791 				ddi_taskq_destroy(ha->driver_thread_taskq);
1792 				ha->driver_thread_taskq = NULL;
1793 			}
1794 			if (ha->completion_taskq) {
1795 				ADAPTER_STATE_LOCK(ha);
1796 				ha->flags |= COMP_THD_TERMINATE;
1797 				ADAPTER_STATE_UNLOCK(ha);
1798 
1799 				do {
1800 					COMP_Q_LOCK(ha);
1801 					cv_broadcast(&ha->cv_comp_thread);
1802 					COMP_Q_UNLOCK(ha);
1803 					ql_delay(ha, 10000);
1804 				} while (ha->comp_thds_active != 0);
1805 
1806 				ddi_taskq_destroy(ha->completion_taskq);
1807 				ha->completion_taskq = NULL;
1808 			}
1809 			progress &= ~QL_TASK_DAEMON_STARTED;
1810 		}
1811 
1812 		if (progress & QL_DB_IOBASE_MAPPED) {
1813 			ql_8021_clr_drv_active(ha);
1814 			ddi_regs_map_free(&ha->db_dev_handle);
1815 			progress &= ~QL_DB_IOBASE_MAPPED;
1816 		}
1817 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1818 			ddi_regs_map_free(&ha->iomap_dev_handle);
1819 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1820 		}
1821 		if (progress & QL_REGS_MAPPED) {
1822 			if (ha->mbar_dev_handle) {
1823 				ddi_regs_map_free(&ha->mbar_dev_handle);
1824 				ha->mbar_dev_handle = 0;
1825 			}
1826 		}
1827 
1828 		if (progress & QL_CONFIG_SPACE_SETUP) {
1829 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1830 				ddi_regs_map_free(&ha->sbus_config_handle);
1831 			} else {
1832 				pci_config_teardown(&ha->pci_handle);
1833 			}
1834 			progress &= ~QL_CONFIG_SPACE_SETUP;
1835 		}
1836 
1837 		if (progress & QL_INTR_ADDED) {
1838 			ql_disable_intr(ha);
1839 			ql_release_intr(ha);
1840 			progress &= ~QL_INTR_ADDED;
1841 		}
1842 
1843 		if (progress & QL_MUTEX_CV_INITED) {
1844 			ql_destroy_mutex(ha);
1845 			progress &= ~QL_MUTEX_CV_INITED;
1846 		}
1847 
1848 		if (progress & QL_HBA_BUFFER_SETUP) {
1849 			ql_delete_queues(ha);
1850 			progress &= ~QL_HBA_BUFFER_SETUP;
1851 		}
1852 
1853 		if (progress & QL_REGS_MAPPED) {
1854 			ddi_regs_map_free(&ha->dev_handle);
1855 			if (ha->sbus_fpga_iobase != NULL) {
1856 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1857 			}
1858 			progress &= ~QL_REGS_MAPPED;
1859 		}
1860 
1861 		if (progress & QL_SOFT_STATE_ALLOCED) {
1862 
1863 			ql_fcache_rel(ha->fcache);
1864 
1865 			kmem_free(ha->adapter_stats,
1866 			    sizeof (*ha->adapter_stats));
1867 
1868 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1869 			    QL_UB_LIMIT);
1870 
1871 			if (ha->outstanding_cmds != NULL) {
1872 				kmem_free(ha->outstanding_cmds,
1873 				    sizeof (*ha->outstanding_cmds) *
1874 				    ha->osc_max_cnt);
1875 			}
1876 
1877 			if (ha->devpath != NULL) {
1878 				kmem_free(ha->devpath,
1879 				    strlen(ha->devpath) + 1);
1880 			}
1881 
1882 			kmem_free(ha->dev, sizeof (*ha->dev) *
1883 			    DEVICE_HEAD_LIST_SIZE);
1884 
1885 			if (ha->xioctl != NULL) {
1886 				ql_free_xioctl_resource(ha);
1887 			}
1888 
1889 			if (ha->fw_module != NULL) {
1890 				(void) ddi_modclose(ha->fw_module);
1891 			}
1892 			(void) ql_el_trace_dealloc(ha);
1893 
1894 			ddi_soft_state_free(ql_state, instance);
1895 			progress &= ~QL_SOFT_STATE_ALLOCED;
1896 		}
1897 
1898 		ddi_prop_remove_all(dip);
1899 		rval = DDI_FAILURE;
1900 		break;
1901 
1902 	case DDI_RESUME:
1903 		rval = DDI_FAILURE;
1904 
1905 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1906 		if (ha == NULL) {
1907 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1908 			    QL_NAME, instance);
1909 			break;
1910 		}
1911 
1912 		ha->power_level = PM_LEVEL_D3;
1913 		if (ha->pm_capable) {
1914 			/*
1915 			 * Get ql_power to do power on initialization
1916 			 */
1917 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1918 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1919 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1920 				    " power", QL_NAME, instance);
1921 			}
1922 		}
1923 
1924 		/*
1925 		 * There is a bug in DR that prevents PM framework
1926 		 * from calling ql_power.
1927 		 */
1928 		if (ha->power_level == PM_LEVEL_D3) {
1929 			ha->power_level = PM_LEVEL_D0;
1930 
1931 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1932 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1933 				    " adapter", QL_NAME, instance);
1934 			}
1935 
1936 			/* Wake up task_daemon. */
1937 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1938 			    0);
1939 		}
1940 
1941 		/* Restart driver timer. */
1942 		GLOBAL_TIMER_LOCK();
1943 		if (ql_timer_timeout_id == NULL) {
1944 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1945 			    ql_timer_ticks);
1946 		}
1947 		GLOBAL_TIMER_LOCK();
1948 
1949 		/* Wake up command start routine. */
1950 		ADAPTER_STATE_LOCK(ha);
1951 		ha->flags &= ~ADAPTER_SUSPENDED;
1952 		ADAPTER_STATE_UNLOCK(ha);
1953 
1954 		rval = DDI_SUCCESS;
1955 
1956 		/* Restart IP if it was running. */
1957 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1958 			(void) ql_initialize_ip(ha);
1959 			ql_isp_rcvbuf(ha);
1960 		}
1961 		break;
1962 
1963 	default:
1964 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1965 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1966 		rval = DDI_FAILURE;
1967 		break;
1968 	}
1969 
1970 	kmem_free(buf, MAXPATHLEN);
1971 
1972 	if (rval != DDI_SUCCESS) {
1973 		/*EMPTY*/
1974 		QL_PRINT_2(ha, "failed instance=%d, rval = %xh\n",
1975 		    ddi_get_instance(dip), rval);
1976 	} else {
1977 		/*EMPTY*/
1978 		QL_PRINT_3(ha, "done\n");
1979 	}
1980 
1981 	return (rval);
1982 }
1983 
1984 /*
1985  * ql_detach
1986  *	Used to remove all the states associated with a given
1987  *	instances of a device node prior to the removal of that
1988  *	instance from the system.
1989  *
1990  * Input:
1991  *	dip = pointer to device information structure.
1992  *	cmd = type of detach.
1993  *
1994  * Returns:
1995  *	DDI_SUCCESS or DDI_FAILURE.
1996  *
1997  * Context:
1998  *	Kernel context.
1999  */
2000 static int
ql_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2001 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2002 {
2003 	ql_adapter_state_t	*ha, *vha;
2004 	ql_tgt_t		*tq;
2005 	uint16_t		index;
2006 	ql_link_t		*link;
2007 	char			*buf;
2008 	timeout_id_t		timer_id = NULL;
2009 	int			suspend, rval = DDI_SUCCESS;
2010 
2011 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2012 	if (ha == NULL) {
2013 		QL_PRINT_2(NULL, "no adapter, instance=%d\n",
2014 		    ddi_get_instance(dip));
2015 		return (DDI_FAILURE);
2016 	}
2017 
2018 	QL_PRINT_3(ha, "started, cmd=%xh\n", cmd);
2019 
2020 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2021 
2022 	switch (cmd) {
2023 	case DDI_DETACH:
2024 		ADAPTER_STATE_LOCK(ha);
2025 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
2026 		ADAPTER_STATE_UNLOCK(ha);
2027 
2028 		/* Wait for task thread to see suspend flag. */
2029 		while (!(ha->task_daemon_flags & TASK_DAEMON_STALLED_FLG) &&
2030 		    ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
2031 			ql_awaken_task_daemon(ha, NULL, 0, 0);
2032 			/* Delay for 1 tick (10 milliseconds). */
2033 			delay(1);
2034 		}
2035 
2036 		if (ha->driver_thread_taskq) {
2037 			while (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
2038 				/* Delay for 1 tick (10 milliseconds). */
2039 				ql_awaken_task_daemon(ha, NULL,
2040 				    TASK_DAEMON_STOP_FLG, 0);
2041 				delay(1);
2042 			}
2043 			ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
2044 
2045 			ddi_taskq_destroy(ha->driver_thread_taskq);
2046 			ha->driver_thread_taskq = NULL;
2047 		}
2048 
2049 		if (ha->completion_taskq) {
2050 			ADAPTER_STATE_LOCK(ha);
2051 			ha->flags |= COMP_THD_TERMINATE;
2052 			ADAPTER_STATE_UNLOCK(ha);
2053 
2054 			do {
2055 				COMP_Q_LOCK(ha);
2056 				cv_broadcast(&ha->cv_comp_thread);
2057 				COMP_Q_UNLOCK(ha);
2058 				ql_delay(ha, 10000);
2059 			} while (ha->comp_thds_active != 0);
2060 
2061 			ddi_taskq_destroy(ha->completion_taskq);
2062 			ha->completion_taskq = NULL;
2063 		}
2064 
2065 		/* Disable driver timer if no adapters. */
2066 		GLOBAL_TIMER_LOCK();
2067 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2068 		    ql_hba.last == &ha->hba) {
2069 			timer_id = ql_timer_timeout_id;
2070 			ql_timer_timeout_id = NULL;
2071 		}
2072 		GLOBAL_TIMER_UNLOCK();
2073 
2074 		if (timer_id) {
2075 			(void) untimeout(timer_id);
2076 		}
2077 
2078 		GLOBAL_STATE_LOCK();
2079 		ql_remove_link(&ql_hba, &ha->hba);
2080 		GLOBAL_STATE_UNLOCK();
2081 
2082 		if (ha->pm_capable) {
2083 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
2084 			    PM_LEVEL_D3) != DDI_SUCCESS) {
2085 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
2086 				    " power", QL_NAME, ha->instance);
2087 			}
2088 		}
2089 
2090 		/*
2091 		 * If pm_lower_power shutdown the adapter, there
2092 		 * isn't much else to do
2093 		 */
2094 		if (ha->power_level != PM_LEVEL_D3) {
2095 			ql_halt(ha, PM_LEVEL_D3);
2096 		}
2097 
2098 		/* Remove virtual ports. */
2099 		while ((vha = ha->vp_next) != NULL) {
2100 			ql_vport_destroy(vha);
2101 		}
2102 
2103 		/* Free target queues. */
2104 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
2105 			link = ha->dev[index].first;
2106 			while (link != NULL) {
2107 				tq = link->base_address;
2108 				link = link->next;
2109 				ql_dev_free(ha, tq);
2110 			}
2111 		}
2112 
2113 		/*
2114 		 * Free unsolicited buffers.
2115 		 * If we are here then there are no ULPs still
2116 		 * alive that wish to talk to ql so free up
2117 		 * any SRB_IP_UB_UNUSED buffers that are
2118 		 * lingering around
2119 		 */
2120 		QL_UB_LOCK(ha);
2121 		for (index = 0; index < QL_UB_LIMIT; index++) {
2122 			fc_unsol_buf_t *ubp = ha->ub_array[index];
2123 
2124 			if (ubp != NULL) {
2125 				ql_srb_t *sp = ubp->ub_fca_private;
2126 
2127 				sp->flags |= SRB_UB_FREE_REQUESTED;
2128 
2129 				while (!(sp->flags & SRB_UB_IN_FCA) ||
2130 				    (sp->flags & (SRB_UB_CALLBACK |
2131 				    SRB_UB_ACQUIRED))) {
2132 					QL_UB_UNLOCK(ha);
2133 					delay(drv_usectohz(100000));
2134 					QL_UB_LOCK(ha);
2135 				}
2136 				ha->ub_array[index] = NULL;
2137 
2138 				QL_UB_UNLOCK(ha);
2139 				ql_free_unsolicited_buffer(ha, ubp);
2140 				QL_UB_LOCK(ha);
2141 			}
2142 		}
2143 		QL_UB_UNLOCK(ha);
2144 
2145 		/* Free any saved RISC code. */
2146 		if (ha->risc_code != NULL) {
2147 			kmem_free(ha->risc_code, ha->risc_code_size);
2148 			ha->risc_code = NULL;
2149 			ha->risc_code_size = 0;
2150 		}
2151 
2152 		if (ha->fw_module != NULL) {
2153 			(void) ddi_modclose(ha->fw_module);
2154 			ha->fw_module = NULL;
2155 		}
2156 
2157 		/* Free resources. */
2158 		ddi_prop_remove_all(dip);
2159 		(void) fc_fca_detach(dip);
2160 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
2161 		ddi_remove_minor_node(dip, "devctl");
2162 		if (ha->k_stats != NULL) {
2163 			kstat_delete(ha->k_stats);
2164 		}
2165 
2166 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
2167 			ddi_regs_map_free(&ha->sbus_config_handle);
2168 		} else {
2169 			if (CFG_IST(ha, CFG_CTRL_82XX)) {
2170 				ql_8021_clr_drv_active(ha);
2171 				ddi_regs_map_free(&ha->db_dev_handle);
2172 			}
2173 			if (ha->iomap_dev_handle != ha->dev_handle) {
2174 				ddi_regs_map_free(&ha->iomap_dev_handle);
2175 			}
2176 			pci_config_teardown(&ha->pci_handle);
2177 		}
2178 
2179 		ql_disable_intr(ha);
2180 		ql_release_intr(ha);
2181 
2182 		ql_free_xioctl_resource(ha);
2183 
2184 		ql_destroy_mutex(ha);
2185 
2186 		ql_delete_queues(ha);
2187 		ql_free_phys(ha, &ha->fwexttracebuf);
2188 		ql_free_phys(ha, &ha->fwfcetracebuf);
2189 
2190 		ddi_regs_map_free(&ha->dev_handle);
2191 		if (ha->sbus_fpga_iobase != NULL) {
2192 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
2193 		}
2194 		if (ha->mbar_dev_handle != NULL) {
2195 			ddi_regs_map_free(&ha->mbar_dev_handle);
2196 		}
2197 
2198 		ql_fcache_rel(ha->fcache);
2199 		if (ha->vcache != NULL) {
2200 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
2201 		}
2202 
2203 		if (ha->pi_attrs != NULL) {
2204 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
2205 		}
2206 
2207 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
2208 
2209 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
2210 
2211 		kmem_free(ha->outstanding_cmds,
2212 		    sizeof (*ha->outstanding_cmds) * ha->osc_max_cnt);
2213 
2214 		if (ha->n_port != NULL) {
2215 			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
2216 		}
2217 
2218 		if (ha->devpath != NULL) {
2219 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
2220 		}
2221 
2222 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
2223 
2224 		(void) ql_plogi_params_desc_dtor(ha);
2225 
2226 		(void) ql_nvram_cache_desc_dtor(ha);
2227 
2228 		(void) qlc_fm_fini(ha);
2229 
2230 		EL(ha, "detached\n");
2231 
2232 		(void) ql_el_trace_dealloc(ha);
2233 
2234 		ddi_soft_state_free(ql_state, (int)ha->instance);
2235 
2236 		rval = DDI_SUCCESS;
2237 
2238 		break;
2239 
2240 	case DDI_SUSPEND:
2241 		ADAPTER_STATE_LOCK(ha);
2242 		ha->flags |= ADAPTER_SUSPENDED;
2243 		ADAPTER_STATE_UNLOCK(ha);
2244 
2245 		/* Disable driver timer if last adapter. */
2246 		GLOBAL_TIMER_LOCK();
2247 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2248 		    ql_hba.last == &ha->hba) {
2249 			timer_id = ql_timer_timeout_id;
2250 			ql_timer_timeout_id = NULL;
2251 		}
2252 		GLOBAL_TIMER_UNLOCK();
2253 
2254 		if (timer_id) {
2255 			(void) untimeout(timer_id);
2256 		}
2257 
2258 		if (ha->flags & IP_INITIALIZED) {
2259 			(void) ql_shutdown_ip(ha);
2260 		}
2261 
2262 		if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
2263 			ADAPTER_STATE_LOCK(ha);
2264 			ha->flags &= ~ADAPTER_SUSPENDED;
2265 			ADAPTER_STATE_UNLOCK(ha);
2266 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2267 			    QL_NAME, ha->instance, suspend);
2268 
2269 			/* Restart IP if it was running. */
2270 			if (ha->flags & IP_ENABLED &&
2271 			    !(ha->flags & IP_INITIALIZED)) {
2272 				(void) ql_initialize_ip(ha);
2273 				ql_isp_rcvbuf(ha);
2274 			}
2275 			rval = DDI_FAILURE;
2276 			break;
2277 		}
2278 
2279 		EL(ha, "suspended\n");
2280 
2281 		break;
2282 
2283 	default:
2284 		rval = DDI_FAILURE;
2285 		break;
2286 	}
2287 
2288 	kmem_free(buf, MAXPATHLEN);
2289 
2290 	if (rval != DDI_SUCCESS) {
2291 		EL(ha, "failed, rval = %xh\n", rval);
2292 	} else {
2293 		/*EMPTY*/
2294 		QL_PRINT_3(ha, "done\n");
2295 	}
2296 
2297 	return (rval);
2298 }
2299 
2300 /*
2301  * ql_power
2302  *	Power a device attached to the system.
2303  *
2304  * Input:
2305  *	dip = pointer to device information structure.
2306  *	component = device.
2307  *	level = power level.
2308  *
2309  * Returns:
2310  *	DDI_SUCCESS or DDI_FAILURE.
2311  *
2312  * Context:
2313  *	Kernel context.
2314  */
2315 /* ARGSUSED */
2316 static int
ql_power(dev_info_t * dip,int component,int level)2317 ql_power(dev_info_t *dip, int component, int level)
2318 {
2319 	int			rval = DDI_FAILURE;
2320 	off_t			csr;
2321 	uint8_t			saved_pm_val;
2322 	ql_adapter_state_t	*ha;
2323 	char			*buf;
2324 	char			*path;
2325 
2326 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2327 	if (ha == NULL || ha->pm_capable == 0) {
2328 		QL_PRINT_2(ha, "no hba or PM not supported\n");
2329 		return (rval);
2330 	}
2331 
2332 	QL_PRINT_10(ha, "started\n");
2333 
2334 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2335 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2336 
2337 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2338 	    level != PM_LEVEL_D3)) {
2339 		EL(ha, "invalid, component=%xh or level=%xh\n",
2340 		    component, level);
2341 		return (rval);
2342 	}
2343 
2344 	GLOBAL_HW_LOCK();
2345 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2346 	GLOBAL_HW_UNLOCK();
2347 
2348 	(void) snprintf(buf, MAXPATHLEN,
2349 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2350 	    ddi_pathname(dip, path));
2351 
2352 	switch (level) {
2353 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
2354 
2355 		QL_PM_LOCK(ha);
2356 		if (ha->power_level == PM_LEVEL_D0) {
2357 			QL_PM_UNLOCK(ha);
2358 			rval = DDI_SUCCESS;
2359 			break;
2360 		}
2361 
2362 		/*
2363 		 * Enable interrupts now
2364 		 */
2365 		saved_pm_val = ha->power_level;
2366 		ha->power_level = PM_LEVEL_D0;
2367 		QL_PM_UNLOCK(ha);
2368 
2369 		GLOBAL_HW_LOCK();
2370 
2371 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2372 
2373 		/*
2374 		 * Delay after reset, for chip to recover.
2375 		 * Otherwise causes system PANIC
2376 		 */
2377 		drv_usecwait(200000);
2378 
2379 		GLOBAL_HW_UNLOCK();
2380 
2381 		if (ha->config_saved) {
2382 			ha->config_saved = 0;
2383 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2384 				QL_PM_LOCK(ha);
2385 				ha->power_level = saved_pm_val;
2386 				QL_PM_UNLOCK(ha);
2387 				cmn_err(CE_WARN, "%s failed to restore "
2388 				    "config regs", buf);
2389 				break;
2390 			}
2391 		}
2392 
2393 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2394 			cmn_err(CE_WARN, "%s adapter initialization failed",
2395 			    buf);
2396 		}
2397 
2398 		/* Wake up task_daemon. */
2399 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2400 		    TASK_DAEMON_SLEEPING_FLG, 0);
2401 
2402 		/* Restart IP if it was running. */
2403 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2404 			(void) ql_initialize_ip(ha);
2405 			ql_isp_rcvbuf(ha);
2406 		}
2407 
2408 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2409 		    ha->instance, QL_NAME);
2410 
2411 		rval = DDI_SUCCESS;
2412 		break;
2413 
2414 	case PM_LEVEL_D3:	/* power down to D3 state - off */
2415 
2416 		QL_PM_LOCK(ha);
2417 
2418 		if (ha->pm_busy || ((ha->task_daemon_flags &
2419 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
2420 			QL_PM_UNLOCK(ha);
2421 			break;
2422 		}
2423 
2424 		if (ha->power_level == PM_LEVEL_D3) {
2425 			rval = DDI_SUCCESS;
2426 			QL_PM_UNLOCK(ha);
2427 			break;
2428 		}
2429 		QL_PM_UNLOCK(ha);
2430 
2431 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2432 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2433 			    " config regs", QL_NAME, ha->instance, buf);
2434 			break;
2435 		}
2436 		ha->config_saved = 1;
2437 
2438 		/*
2439 		 * Don't enable interrupts. Running mailbox commands with
2440 		 * interrupts enabled could cause hangs since pm_run_scan()
2441 		 * runs out of a callout thread and on single cpu systems
2442 		 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2443 		 * would not get to run.
2444 		 */
2445 		TASK_DAEMON_LOCK(ha);
2446 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2447 		TASK_DAEMON_UNLOCK(ha);
2448 
2449 		ql_halt(ha, PM_LEVEL_D3);
2450 
2451 		/*
2452 		 * Setup ql_intr to ignore interrupts from here on.
2453 		 */
2454 		QL_PM_LOCK(ha);
2455 		ha->power_level = PM_LEVEL_D3;
2456 		QL_PM_UNLOCK(ha);
2457 
2458 		/*
2459 		 * Wait for ISR to complete.
2460 		 */
2461 		INTR_LOCK(ha);
2462 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2463 		INTR_UNLOCK(ha);
2464 
2465 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2466 		    ha->instance, QL_NAME);
2467 
2468 		rval = DDI_SUCCESS;
2469 		break;
2470 	}
2471 
2472 	kmem_free(buf, MAXPATHLEN);
2473 	kmem_free(path, MAXPATHLEN);
2474 
2475 	QL_PRINT_10(ha, "done\n");
2476 
2477 	return (rval);
2478 }
2479 
2480 /*
2481  * ql_quiesce
2482  *	quiesce a device attached to the system.
2483  *
2484  * Input:
2485  *	dip = pointer to device information structure.
2486  *
2487  * Returns:
2488  *	DDI_SUCCESS
2489  *
2490  * Context:
2491  *	Kernel context.
2492  */
2493 static int
ql_quiesce(dev_info_t * dip)2494 ql_quiesce(dev_info_t *dip)
2495 {
2496 	ql_adapter_state_t	*ha;
2497 	uint32_t		timer;
2498 	uint32_t		stat;
2499 
2500 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2501 	if (ha == NULL) {
2502 		/* Oh well.... */
2503 		QL_PRINT_2(NULL, "no adapter, instance=%d\n",
2504 		    ddi_get_instance(dip));
2505 		return (DDI_SUCCESS);
2506 	}
2507 
2508 	QL_PRINT_3(ha, "started\n");
2509 
2510 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
2511 		ql_8021_clr_hw_intr(ha);
2512 		ql_8021_clr_fw_intr(ha);
2513 		WRT16_IO_REG(ha, mailbox_in[0], MBC_TOGGLE_INTERRUPT);
2514 		WRT16_IO_REG(ha, mailbox_in[1], 0);
2515 		WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
2516 		for (timer = 0; timer < 20000; timer++) {
2517 			stat = RD32_IO_REG(ha, risc2host);
2518 			if (stat & BIT_15) {
2519 				ql_8021_clr_hw_intr(ha);
2520 				if ((stat & 0xff) < 0x12) {
2521 					ql_8021_clr_fw_intr(ha);
2522 					break;
2523 				}
2524 				ql_8021_clr_fw_intr(ha);
2525 			}
2526 			drv_usecwait(100);
2527 		}
2528 		ql_8021_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2529 		WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2530 		WRT16_IO_REG(ha, mailbox_in[1], 0);
2531 		WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
2532 		for (timer = 0; timer < 20000; timer++) {
2533 			stat = RD32_IO_REG(ha, risc2host);
2534 			if (stat & BIT_15) {
2535 				ql_8021_clr_hw_intr(ha);
2536 				if ((stat & 0xff) < 0x12) {
2537 					ql_8021_clr_fw_intr(ha);
2538 					break;
2539 				}
2540 				ql_8021_clr_fw_intr(ha);
2541 			}
2542 			drv_usecwait(100);
2543 		}
2544 	} else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2545 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2546 		WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2547 		WRT16_IO_REG(ha, mailbox_in[1], 0);
2548 		WRT16_IO_REG(ha, mailbox_in[2], 0);
2549 		WRT16_IO_REG(ha, mailbox_in[3], 0);
2550 		WRT16_IO_REG(ha, mailbox_in[4], 0);
2551 		WRT16_IO_REG(ha, mailbox_in[5], 0);
2552 		WRT16_IO_REG(ha, mailbox_in[6], 0);
2553 		WRT16_IO_REG(ha, mailbox_in[7], 0);
2554 		WRT16_IO_REG(ha, mailbox_in[8], 0);
2555 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2556 		for (timer = 0; timer < 30000; timer++) {
2557 			stat = RD32_IO_REG(ha, risc2host);
2558 			if (stat & BIT_15) {
2559 				if ((stat & 0xff) < 0x12) {
2560 					WRT32_IO_REG(ha, hccr,
2561 					    HC24_CLR_RISC_INT);
2562 					break;
2563 				}
2564 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2565 			}
2566 			drv_usecwait(100);
2567 		}
2568 		/* Reset the chip. */
2569 		if (CFG_IST(ha, CFG_MWB_4096_SUPPORT)) {
2570 			WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2571 			    MWB_4096_BYTES);
2572 		} else {
2573 			WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN);
2574 		}
2575 		drv_usecwait(100);
2576 
2577 	} else {
2578 		/* Disable ISP interrupts. */
2579 		WRT16_IO_REG(ha, ictrl, 0);
2580 		/* Select RISC module registers. */
2581 		WRT16_IO_REG(ha, ctrl_status, 0);
2582 		/* Reset ISP semaphore. */
2583 		WRT16_IO_REG(ha, semaphore, 0);
2584 		/* Reset RISC module. */
2585 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2586 		/* Release RISC module. */
2587 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2588 	}
2589 
2590 	QL_PRINT_3(ha, "done\n");
2591 
2592 	return (DDI_SUCCESS);
2593 }
2594 
2595 /* ************************************************************************ */
2596 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2597 /* ************************************************************************ */
2598 
2599 /*
2600  * ql_bind_port
2601  *	Handling port binding. The FC Transport attempts to bind an FCA port
2602  *	when it is ready to start transactions on the port. The FC Transport
2603  *	will call the fca_bind_port() function specified in the fca_transport
2604  *	structure it receives. The FCA must fill in the port_info structure
2605  *	passed in the call and also stash the information for future calls.
2606  *
2607  * Input:
2608  *	dip = pointer to FCA information structure.
2609  *	port_info = pointer to port information structure.
2610  *	bind_info = pointer to bind information structure.
2611  *
2612  * Returns:
2613  *	NULL = failure
2614  *
2615  * Context:
2616  *	Kernel context.
2617  */
2618 static opaque_t
ql_bind_port(dev_info_t * dip,fc_fca_port_info_t * port_info,fc_fca_bind_info_t * bind_info)2619 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2620     fc_fca_bind_info_t *bind_info)
2621 {
2622 	ql_adapter_state_t	*ha, *vha;
2623 	opaque_t		fca_handle = NULL;
2624 	port_id_t		d_id;
2625 	int			port_npiv = bind_info->port_npiv;
2626 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2627 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2628 
2629 	/* get state info based on the dip */
2630 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2631 	if (ha == NULL) {
2632 		QL_PRINT_2(ha, "no adapter, instance=%d\n",
2633 		    ddi_get_instance(dip));
2634 		return (NULL);
2635 	}
2636 	QL_PRINT_10(ha, "started\n");
2637 
2638 	/* Verify port number is supported. */
2639 	if (port_npiv != 0) {
2640 		if (!(ha->flags & VP_ENABLED)) {
2641 			QL_PRINT_2(ha, "FC_NPIV_NOT_SUPPORTED\n");
2642 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2643 			return (NULL);
2644 		}
2645 		if (!(ha->flags & POINT_TO_POINT)) {
2646 			QL_PRINT_2(ha, "FC_NPIV_WRONG_TOPOLOGY\n");
2647 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2648 			return (NULL);
2649 		}
2650 		if (!(ha->flags & FDISC_ENABLED)) {
2651 			QL_PRINT_2(ha, "switch does not support "
2652 			    "FDISC\n");
2653 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2654 			return (NULL);
2655 		}
2656 		if (bind_info->port_num >= ha->max_vports) {
2657 			QL_PRINT_2(ha, "port number=%d "
2658 			    "FC_OUTOFBOUNDS\n", bind_info->port_num);
2659 			port_info->pi_error = FC_OUTOFBOUNDS;
2660 			return (NULL);
2661 		}
2662 	} else if (bind_info->port_num != 0) {
2663 		QL_PRINT_2(ha, "failed, port number=%d is not "
2664 		    "supported\n", bind_info->port_num);
2665 		port_info->pi_error = FC_OUTOFBOUNDS;
2666 		return (NULL);
2667 	}
2668 
2669 	/* Locate port context. */
2670 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2671 		if (vha->vp_index == bind_info->port_num) {
2672 			break;
2673 		}
2674 	}
2675 
2676 	/* If virtual port does not exist. */
2677 	if (vha == NULL) {
2678 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2679 	}
2680 
2681 	/* make sure this port isn't already bound */
2682 	if (vha->flags & FCA_BOUND) {
2683 		port_info->pi_error = FC_ALREADY;
2684 	} else {
2685 		if (vha->vp_index != 0) {
2686 			bcopy(port_nwwn,
2687 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2688 			bcopy(port_pwwn,
2689 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2690 		}
2691 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2692 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2693 				QL_PRINT_2(ha, "failed to enable "
2694 				    "virtual port=%d\n",
2695 				    vha->vp_index);
2696 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2697 				return (NULL);
2698 			}
2699 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2700 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2701 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2702 			    QL_NAME, ha->instance, vha->vp_index,
2703 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2704 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2705 			    port_pwwn[6], port_pwwn[7],
2706 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2707 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2708 			    port_nwwn[6], port_nwwn[7]);
2709 		}
2710 
2711 		/* stash the bind_info supplied by the FC Transport */
2712 		vha->bind_info.port_handle = bind_info->port_handle;
2713 		vha->bind_info.port_statec_cb = bind_info->port_statec_cb;
2714 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2715 
2716 		/* Set port's source ID. */
2717 		port_info->pi_s_id.port_id = vha->d_id.b24;
2718 
2719 		/* copy out the default login parameters */
2720 		bcopy((void *)&vha->loginparams,
2721 		    (void *)&port_info->pi_login_params,
2722 		    sizeof (la_els_logi_t));
2723 
2724 		/* Set port's hard address if enabled. */
2725 		port_info->pi_hard_addr.hard_addr = 0;
2726 		if (bind_info->port_num == 0) {
2727 			d_id.b24 = ha->d_id.b24;
2728 			if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2729 				if (ha->init_ctrl_blk.cb24.
2730 				    firmware_options_1[0] & BIT_0) {
2731 					d_id.b.al_pa = ql_index_to_alpa[ha->
2732 					    init_ctrl_blk.cb24.
2733 					    hard_address[0]];
2734 					port_info->pi_hard_addr.hard_addr =
2735 					    d_id.b24;
2736 				}
2737 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2738 			    BIT_0) {
2739 				d_id.b.al_pa = ql_index_to_alpa[ha->
2740 				    init_ctrl_blk.cb.hard_address[0]];
2741 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2742 			}
2743 
2744 			/* Set the node id data */
2745 			if (ql_get_rnid_params(ha,
2746 			    sizeof (port_info->pi_rnid_params.params),
2747 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2748 			    QL_SUCCESS) {
2749 				port_info->pi_rnid_params.status = FC_SUCCESS;
2750 			} else {
2751 				port_info->pi_rnid_params.status = FC_FAILURE;
2752 			}
2753 
2754 			/* Populate T11 FC-HBA details */
2755 			ql_populate_hba_fru_details(ha, port_info);
2756 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2757 			    KM_SLEEP);
2758 			if (ha->pi_attrs != NULL) {
2759 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2760 				    sizeof (fca_port_attrs_t));
2761 			}
2762 		} else {
2763 			port_info->pi_rnid_params.status = FC_FAILURE;
2764 			if (ha->pi_attrs != NULL) {
2765 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2766 				    sizeof (fca_port_attrs_t));
2767 			}
2768 		}
2769 
2770 		/* Generate handle for this FCA. */
2771 		fca_handle = (opaque_t)vha;
2772 
2773 		ADAPTER_STATE_LOCK(ha);
2774 		vha->flags |= FCA_BOUND;
2775 		ADAPTER_STATE_UNLOCK(ha);
2776 		/* Set port's current state. */
2777 		port_info->pi_port_state = vha->state;
2778 	}
2779 
2780 	QL_PRINT_10(ha, "done, pi_port_state=%xh, "
2781 	    "pi_s_id.port_id=%xh\n",
2782 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2783 
2784 	return (fca_handle);
2785 }
2786 
2787 /*
2788  * ql_unbind_port
2789  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2790  *
2791  * Input:
2792  *	fca_handle = handle setup by ql_bind_port().
2793  *
2794  * Context:
2795  *	Kernel context.
2796  */
2797 static void
ql_unbind_port(opaque_t fca_handle)2798 ql_unbind_port(opaque_t fca_handle)
2799 {
2800 	ql_adapter_state_t	*ha;
2801 	ql_tgt_t		*tq;
2802 	uint32_t		flgs;
2803 
2804 	ha = ql_fca_handle_to_state(fca_handle);
2805 	if (ha == NULL) {
2806 		/*EMPTY*/
2807 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2808 		    (void *)fca_handle);
2809 	} else {
2810 		QL_PRINT_10(ha, "started\n");
2811 
2812 		if (!(ha->flags & FCA_BOUND)) {
2813 			/*EMPTY*/
2814 			QL_PRINT_2(ha, "port already unbound\n");
2815 		} else {
2816 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2817 				(void) ql_vport_control(ha, (uint8_t)
2818 				    (CFG_IST(ha, CFG_FC_TYPE) ?
2819 				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2820 				if ((tq = ql_loop_id_to_queue(ha,
2821 				    FL_PORT_24XX_HDL)) != NULL) {
2822 					(void) ql_logout_fabric_port(ha, tq);
2823 				}
2824 				flgs = FCA_BOUND | VP_ENABLED;
2825 			} else {
2826 				flgs = FCA_BOUND;
2827 			}
2828 			ADAPTER_STATE_LOCK(ha);
2829 			ha->flags &= ~flgs;
2830 			ADAPTER_STATE_UNLOCK(ha);
2831 		}
2832 
2833 		QL_PRINT_10(ha, "done\n");
2834 	}
2835 }
2836 
2837 /*
2838  * ql_init_pkt
2839  *	Initialize FCA portion of packet.
2840  *
2841  * Input:
2842  *	fca_handle = handle setup by ql_bind_port().
2843  *	pkt = pointer to fc_packet.
2844  *
2845  * Returns:
2846  *	FC_SUCCESS - the packet has successfully been initialized.
2847  *	FC_UNBOUND - the fca_handle specified is not bound.
2848  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2849  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2850  *
2851  * Context:
2852  *	Kernel context.
2853  */
2854 /* ARGSUSED */
2855 static int
ql_init_pkt(opaque_t fca_handle,fc_packet_t * pkt,int sleep)2856 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2857 {
2858 	ql_adapter_state_t	*ha;
2859 	ql_srb_t		*sp;
2860 	int			rval = FC_SUCCESS;
2861 
2862 	ha = ql_fca_handle_to_state(fca_handle);
2863 	if (ha == NULL) {
2864 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2865 		    (void *)fca_handle);
2866 		return (FC_UNBOUND);
2867 	}
2868 	QL_PRINT_3(ha, "started\n");
2869 
2870 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2871 	sp->flags = 0;
2872 	sp->handle = 0;
2873 
2874 	/* init cmd links */
2875 	sp->cmd.base_address = sp;
2876 	sp->cmd.prev = NULL;
2877 	sp->cmd.next = NULL;
2878 	sp->cmd.head = NULL;
2879 
2880 	/* init watchdog links */
2881 	sp->wdg.base_address = sp;
2882 	sp->wdg.prev = NULL;
2883 	sp->wdg.next = NULL;
2884 	sp->wdg.head = NULL;
2885 	sp->pkt = pkt;
2886 	sp->ha = ha;
2887 	sp->magic_number = QL_FCA_BRAND;
2888 	sp->sg_dma.dma_handle = NULL;
2889 #ifndef __sparc
2890 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
2891 		/* Setup DMA for scatter gather list. */
2892 		sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2893 		sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2894 		sp->sg_dma.max_cookie_count = 1;
2895 		sp->sg_dma.alignment = 64;
2896 		if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2897 			rval = FC_NOMEM;
2898 		}
2899 	}
2900 #endif	/* __sparc */
2901 
2902 	QL_PRINT_3(ha, "done\n");
2903 
2904 	return (rval);
2905 }
2906 
2907 /*
2908  * ql_un_init_pkt
2909  *	Release all local resources bound to packet.
2910  *
2911  * Input:
2912  *	fca_handle = handle setup by ql_bind_port().
2913  *	pkt = pointer to fc_packet.
2914  *
2915  * Returns:
2916  *	FC_SUCCESS - the packet has successfully been invalidated.
2917  *	FC_UNBOUND - the fca_handle specified is not bound.
2918  *	FC_BADPACKET - the packet has not been initialized or has
2919  *			already been freed by this FCA.
2920  *
2921  * Context:
2922  *	Kernel context.
2923  */
2924 static int
ql_un_init_pkt(opaque_t fca_handle,fc_packet_t * pkt)2925 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2926 {
2927 	ql_adapter_state_t *ha;
2928 	int rval;
2929 	ql_srb_t *sp;
2930 
2931 	ha = ql_fca_handle_to_state(fca_handle);
2932 	if (ha == NULL) {
2933 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
2934 		    (void *)fca_handle);
2935 		return (FC_UNBOUND);
2936 	}
2937 	QL_PRINT_3(ha, "started\n");
2938 
2939 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2940 
2941 	if (sp->magic_number != QL_FCA_BRAND) {
2942 		EL(ha, "failed, FC_BADPACKET\n");
2943 		rval = FC_BADPACKET;
2944 	} else {
2945 		sp->magic_number = 0;
2946 		ql_free_phys(ha, &sp->sg_dma);
2947 		rval = FC_SUCCESS;
2948 	}
2949 
2950 	QL_PRINT_3(ha, "done\n");
2951 
2952 	return (rval);
2953 }
2954 
2955 /*
2956  * ql_els_send
2957  *	Issue a extended link service request.
2958  *
2959  * Input:
2960  *	fca_handle = handle setup by ql_bind_port().
2961  *	pkt = pointer to fc_packet.
2962  *
2963  * Returns:
2964  *	FC_SUCCESS - the command was successful.
2965  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2966  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2967  *	FC_TRANSPORT_ERROR - a transport error occurred.
2968  *	FC_UNBOUND - the fca_handle specified is not bound.
2969  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2970  *
2971  * Context:
2972  *	Kernel context.
2973  */
2974 static int
ql_els_send(opaque_t fca_handle,fc_packet_t * pkt)2975 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2976 {
2977 	ql_adapter_state_t	*ha;
2978 	int			rval;
2979 	clock_t			timer = drv_usectohz(30000000);
2980 	ls_code_t		els;
2981 	la_els_rjt_t		rjt;
2982 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2983 
2984 	/* Verify proper command. */
2985 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2986 	if (ha == NULL) {
2987 		QL_PRINT_2(NULL, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2988 		    rval, fca_handle);
2989 		return (FC_INVALID_REQUEST);
2990 	}
2991 	QL_PRINT_3(ha, "started\n");
2992 
2993 	/* Wait for suspension to end. */
2994 	TASK_DAEMON_LOCK(ha);
2995 	while (DRIVER_SUSPENDED(ha)) {
2996 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2997 
2998 		/* 30 seconds from now */
2999 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3000 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3001 			/*
3002 			 * The timeout time 'timer' was
3003 			 * reached without the condition
3004 			 * being signaled.
3005 			 */
3006 			pkt->pkt_state = FC_PKT_TRAN_BSY;
3007 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
3008 
3009 			/* Release task daemon lock. */
3010 			TASK_DAEMON_UNLOCK(ha);
3011 
3012 			EL(ha, "QL_SUSPENDED failed=%xh\n",
3013 			    QL_FUNCTION_TIMEOUT);
3014 			return (FC_TRAN_BUSY);
3015 		}
3016 	}
3017 	/* Release task daemon lock. */
3018 	TASK_DAEMON_UNLOCK(ha);
3019 
3020 	/* Setup response header. */
3021 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
3022 	    sizeof (fc_frame_hdr_t));
3023 
3024 	if (pkt->pkt_rsplen) {
3025 		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3026 	}
3027 
3028 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3029 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3030 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
3031 	    R_CTL_SOLICITED_CONTROL;
3032 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
3033 	    F_CTL_END_SEQ;
3034 
3035 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
3036 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
3037 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
3038 
3039 	sp->flags |= SRB_ELS_PKT;
3040 
3041 	/* map the type of ELS to a function */
3042 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
3043 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
3044 
3045 	QL_PRINT_10(ha, "els.ls_code=%xh, d_id=%xh\n", els.ls_code,
3046 	    pkt->pkt_cmd_fhdr.d_id);
3047 
3048 	sp->iocb = ha->els_cmd;
3049 	sp->req_cnt = 1;
3050 
3051 	switch (els.ls_code) {
3052 	case LA_ELS_RJT:
3053 	case LA_ELS_ACC:
3054 		pkt->pkt_state = FC_PKT_SUCCESS;
3055 		rval = FC_SUCCESS;
3056 		break;
3057 	case LA_ELS_PLOGI:
3058 	case LA_ELS_PDISC:
3059 		rval = ql_els_plogi(ha, pkt);
3060 		break;
3061 	case LA_ELS_FLOGI:
3062 	case LA_ELS_FDISC:
3063 		rval = ql_els_flogi(ha, pkt);
3064 		break;
3065 	case LA_ELS_LOGO:
3066 		rval = ql_els_logo(ha, pkt);
3067 		break;
3068 	case LA_ELS_PRLI:
3069 		rval = ql_els_prli(ha, pkt);
3070 		break;
3071 	case LA_ELS_PRLO:
3072 		rval = ql_els_prlo(ha, pkt);
3073 		break;
3074 	case LA_ELS_ADISC:
3075 		rval = ql_els_adisc(ha, pkt);
3076 		break;
3077 	case LA_ELS_LINIT:
3078 		rval = ql_els_linit(ha, pkt);
3079 		break;
3080 	case LA_ELS_LPC:
3081 		rval = ql_els_lpc(ha, pkt);
3082 		break;
3083 	case LA_ELS_LSTS:
3084 		rval = ql_els_lsts(ha, pkt);
3085 		break;
3086 	case LA_ELS_SCR:
3087 		rval = ql_els_scr(ha, pkt);
3088 		break;
3089 	case LA_ELS_RSCN:
3090 		rval = ql_els_rscn(ha, pkt);
3091 		break;
3092 	case LA_ELS_FARP_REQ:
3093 		rval = ql_els_farp_req(ha, pkt);
3094 		break;
3095 	case LA_ELS_FARP_REPLY:
3096 		rval = ql_els_farp_reply(ha, pkt);
3097 		break;
3098 	case LA_ELS_RLS:
3099 		rval = ql_els_rls(ha, pkt);
3100 		break;
3101 	case LA_ELS_RNID:
3102 		rval = ql_els_rnid(ha, pkt);
3103 		break;
3104 	default:
3105 		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
3106 		    els.ls_code);
3107 		/* Build RJT. */
3108 		bzero(&rjt, sizeof (rjt));
3109 		rjt.ls_code.ls_code = LA_ELS_RJT;
3110 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
3111 
3112 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
3113 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
3114 
3115 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
3116 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3117 		rval = FC_SUCCESS;
3118 		break;
3119 	}
3120 
3121 	/*
3122 	 * Return success if the srb was consumed by an iocb. The packet
3123 	 * completion callback will be invoked by the response handler.
3124 	 */
3125 	if (rval == QL_CONSUMED) {
3126 		rval = FC_SUCCESS;
3127 	} else if (rval == FC_SUCCESS &&
3128 	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
3129 		/* Do command callback only if no error */
3130 		ql_io_comp(sp);
3131 	}
3132 
3133 	if (rval != FC_SUCCESS) {
3134 		EL(ha, "rval=%x, ls_code=%xh sent to d_id=%xh, sp=%ph\n",
3135 		    rval, els.ls_code, pkt->pkt_cmd_fhdr.d_id, sp);
3136 	} else {
3137 		/*EMPTY*/
3138 		QL_PRINT_10(ha, "done\n");
3139 	}
3140 	return (rval);
3141 }
3142 
3143 /*
3144  * ql_get_cap
3145  *	Export FCA hardware and software capabilities.
3146  *
3147  * Input:
3148  *	fca_handle = handle setup by ql_bind_port().
3149  *	cap = pointer to the capabilities string.
3150  *	ptr = buffer pointer for return capability.
3151  *
3152  * Returns:
3153  *	FC_CAP_ERROR - no such capability
3154  *	FC_CAP_FOUND - the capability was returned and cannot be set
3155  *	FC_CAP_SETTABLE - the capability was returned and can be set
3156  *	FC_UNBOUND - the fca_handle specified is not bound.
3157  *
3158  * Context:
3159  *	Kernel context.
3160  */
3161 static int
ql_get_cap(opaque_t fca_handle,char * cap,void * ptr)3162 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
3163 {
3164 	ql_adapter_state_t	*ha;
3165 	int			rval;
3166 	uint32_t		*rptr = (uint32_t *)ptr;
3167 
3168 	ha = ql_fca_handle_to_state(fca_handle);
3169 	if (ha == NULL) {
3170 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3171 		    (void *)fca_handle);
3172 		return (FC_UNBOUND);
3173 	}
3174 	QL_PRINT_3(ha, "started\n");
3175 
3176 	if (strcmp(cap, FC_NODE_WWN) == 0) {
3177 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
3178 		    ptr, 8);
3179 		rval = FC_CAP_FOUND;
3180 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3181 		bcopy((void *)&ha->loginparams, ptr,
3182 		    sizeof (la_els_logi_t));
3183 		rval = FC_CAP_FOUND;
3184 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3185 		*rptr = (uint32_t)QL_UB_LIMIT;
3186 		rval = FC_CAP_FOUND;
3187 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
3188 
3189 		dev_info_t	*psydip = NULL;
3190 #ifdef __sparc
3191 		/*
3192 		 * Disable streaming for certain 2 chip adapters
3193 		 * below Psycho to handle Psycho byte hole issue.
3194 		 */
3195 		if (ha->flags & MULTI_CHIP_ADAPTER &&
3196 		    !CFG_IST(ha, CFG_SBUS_CARD)) {
3197 			for (psydip = ddi_get_parent(ha->dip); psydip;
3198 			    psydip = ddi_get_parent(psydip)) {
3199 				if (strcmp(ddi_driver_name(psydip),
3200 				    "pcipsy") == 0) {
3201 					break;
3202 				}
3203 			}
3204 		}
3205 #endif	/* __sparc */
3206 
3207 		if (psydip) {
3208 			*rptr = (uint32_t)FC_NO_STREAMING;
3209 			EL(ha, "No Streaming\n");
3210 		} else {
3211 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
3212 			EL(ha, "Allow Streaming\n");
3213 		}
3214 		rval = FC_CAP_FOUND;
3215 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3216 		*rptr = ha->loginparams.common_service.rx_bufsize;
3217 		rval = FC_CAP_FOUND;
3218 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3219 		*rptr = FC_RESET_RETURN_ALL;
3220 		rval = FC_CAP_FOUND;
3221 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
3222 		*rptr = FC_NO_DVMA_SPACE;
3223 		rval = FC_CAP_FOUND;
3224 	} else {
3225 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3226 		rval = FC_CAP_ERROR;
3227 	}
3228 
3229 	QL_PRINT_3(ha, "done\n");
3230 
3231 	return (rval);
3232 }
3233 
3234 /*
3235  * ql_set_cap
3236  *	Allow the FC Transport to set FCA capabilities if possible.
3237  *
3238  * Input:
3239  *	fca_handle = handle setup by ql_bind_port().
3240  *	cap = pointer to the capabilities string.
3241  *	ptr = buffer pointer for capability.
3242  *
3243  * Returns:
3244  *	FC_CAP_ERROR - no such capability
3245  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
3246  *	FC_CAP_SETTABLE - the capability was successfully set.
3247  *	FC_UNBOUND - the fca_handle specified is not bound.
3248  *
3249  * Context:
3250  *	Kernel context.
3251  */
3252 /* ARGSUSED */
3253 static int
ql_set_cap(opaque_t fca_handle,char * cap,void * ptr)3254 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
3255 {
3256 	ql_adapter_state_t	*ha;
3257 	int			rval;
3258 
3259 	ha = ql_fca_handle_to_state(fca_handle);
3260 	if (ha == NULL) {
3261 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3262 		    (void *)fca_handle);
3263 		return (FC_UNBOUND);
3264 	}
3265 	QL_PRINT_3(ha, "started\n");
3266 
3267 	if (strcmp(cap, FC_NODE_WWN) == 0) {
3268 		rval = FC_CAP_FOUND;
3269 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3270 		rval = FC_CAP_FOUND;
3271 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3272 		rval = FC_CAP_FOUND;
3273 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3274 		rval = FC_CAP_FOUND;
3275 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3276 		rval = FC_CAP_FOUND;
3277 	} else {
3278 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3279 		rval = FC_CAP_ERROR;
3280 	}
3281 
3282 	QL_PRINT_3(ha, "done\n");
3283 
3284 	return (rval);
3285 }
3286 
3287 /*
3288  * ql_getmap
3289  *	Request of Arbitrated Loop (AL-PA) map.
3290  *
3291  * Input:
3292  *	fca_handle = handle setup by ql_bind_port().
3293  *	mapbuf= buffer pointer for map.
3294  *
3295  * Returns:
3296  *	FC_OLDPORT - the specified port is not operating in loop mode.
3297  *	FC_OFFLINE - the specified port is not online.
3298  *	FC_NOMAP - there is no loop map available for this port.
3299  *	FC_UNBOUND - the fca_handle specified is not bound.
3300  *	FC_SUCCESS - a valid map has been placed in mapbuf.
3301  *
3302  * Context:
3303  *	Kernel context.
3304  */
3305 static int
ql_getmap(opaque_t fca_handle,fc_lilpmap_t * mapbuf)3306 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3307 {
3308 	ql_adapter_state_t	*ha;
3309 	clock_t			timer = drv_usectohz(30000000);
3310 	int			rval = FC_SUCCESS;
3311 
3312 	ha = ql_fca_handle_to_state(fca_handle);
3313 	if (ha == NULL) {
3314 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3315 		    (void *)fca_handle);
3316 		return (FC_UNBOUND);
3317 	}
3318 	QL_PRINT_3(ha, "started\n");
3319 
3320 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3321 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3322 
3323 	/* Wait for suspension to end. */
3324 	TASK_DAEMON_LOCK(ha);
3325 	while (DRIVER_SUSPENDED(ha)) {
3326 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3327 
3328 		/* 30 seconds from now */
3329 		if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3330 		    &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3331 			/*
3332 			 * The timeout time 'timer' was
3333 			 * reached without the condition
3334 			 * being signaled.
3335 			 */
3336 
3337 			/* Release task daemon lock. */
3338 			TASK_DAEMON_UNLOCK(ha);
3339 
3340 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3341 			return (FC_TRAN_BUSY);
3342 		}
3343 	}
3344 	/* Release task daemon lock. */
3345 	TASK_DAEMON_UNLOCK(ha);
3346 
3347 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3348 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3349 		/*
3350 		 * Now, since transport drivers cosider this as an
3351 		 * offline condition, let's wait for few seconds
3352 		 * for any loop transitions before we reset the.
3353 		 * chip and restart all over again.
3354 		 */
3355 		ql_delay(ha, 2000000);
3356 		EL(ha, "failed, FC_NO_MAP\n");
3357 		rval = FC_NO_MAP;
3358 	} else {
3359 		/*EMPTY*/
3360 		QL_PRINT_3(ha, "my_alpa %xh len %xh "
3361 		    "data %xh %xh %xh %xh\n",
3362 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
3363 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3364 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3365 	}
3366 
3367 	QL_PRINT_3(ha, "done\n");
3368 #if 0
3369 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3370 #endif
3371 	return (rval);
3372 }
3373 
3374 /*
3375  * ql_transport
3376  *	Issue an I/O request. Handles all regular requests.
3377  *
3378  * Input:
3379  *	fca_handle = handle setup by ql_bind_port().
3380  *	pkt = pointer to fc_packet.
3381  *
3382  * Returns:
3383  *	FC_SUCCESS - the packet was accepted for transport.
3384  *	FC_TRANSPORT_ERROR - a transport error occurred.
3385  *	FC_BADPACKET - the packet to be transported had not been
3386  *			initialized by this FCA.
3387  *	FC_UNBOUND - the fca_handle specified is not bound.
3388  *
3389  * Context:
3390  *	Kernel context.
3391  */
3392 static int
ql_transport(opaque_t fca_handle,fc_packet_t * pkt)3393 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3394 {
3395 	ql_adapter_state_t	*ha;
3396 	int			rval = FC_TRANSPORT_ERROR;
3397 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
3398 
3399 	/* Verify proper command. */
3400 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
3401 	if (ha == NULL) {
3402 		QL_PRINT_2(NULL, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3403 		    rval, fca_handle);
3404 		return (rval);
3405 	}
3406 	QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
3407 
3408 	/* Reset SRB flags. */
3409 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3410 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_UB_CALLBACK |
3411 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3412 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3413 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3414 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3415 	    SRB_MS_PKT | SRB_ELS_PKT);
3416 
3417 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3418 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3419 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3420 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3421 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3422 
3423 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
3424 	case R_CTL_COMMAND:
3425 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3426 			sp->flags |= SRB_FCP_CMD_PKT;
3427 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3428 		} else {
3429 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
3430 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3431 			rval = FC_TRANSPORT_ERROR;
3432 		}
3433 		break;
3434 
3435 	default:
3436 		/* Setup response header and buffer. */
3437 		if (pkt->pkt_rsplen) {
3438 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3439 		}
3440 
3441 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
3442 		case R_CTL_UNSOL_DATA:
3443 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3444 				if (CFG_IST(ha, CFG_FCIP_SUPPORT) &&
3445 				    ha->vp_index == 0) {
3446 					sp->flags |= SRB_IP_PKT;
3447 					rval = ql_fcp_ip_cmd(ha, pkt, sp);
3448 				} else {
3449 					cmn_err(CE_NOTE, "%s(%d) FC-IP is not "
3450 					    "supported on this adapter\n",
3451 					    QL_NAME, ha->instance);
3452 					pkt->pkt_state = FC_PKT_LOCAL_RJT;
3453 					pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3454 					rval = FC_TRANSPORT_ERROR;
3455 				}
3456 			}
3457 			break;
3458 
3459 		case R_CTL_UNSOL_CONTROL:
3460 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3461 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
3462 				rval = ql_fc_services(ha, pkt);
3463 			} else {
3464 				pkt->pkt_state = FC_PKT_LOCAL_RJT;
3465 				pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3466 				rval = FC_TRANSPORT_ERROR;
3467 			}
3468 			break;
3469 
3470 		case R_CTL_SOLICITED_DATA:
3471 		case R_CTL_STATUS:
3472 		default:
3473 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
3474 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3475 			rval = FC_TRANSPORT_ERROR;
3476 			EL(ha, "unknown, r_ctl=%xh\n",
3477 			    pkt->pkt_cmd_fhdr.r_ctl);
3478 			break;
3479 		}
3480 	}
3481 
3482 	if (rval != FC_SUCCESS) {
3483 		EL(ha, "failed, rval = %xh\n", rval);
3484 	} else {
3485 		/*EMPTY*/
3486 		QL_PRINT_3(ha, "done\n");
3487 	}
3488 
3489 	return (rval);
3490 }
3491 
3492 /*
3493  * ql_ub_alloc
3494  *	Allocate buffers for unsolicited exchanges.
3495  *
3496  * Input:
3497  *	fca_handle = handle setup by ql_bind_port().
3498  *	tokens = token array for each buffer.
3499  *	size = size of each buffer.
3500  *	count = pointer to number of buffers.
3501  *	type = the FC-4 type the buffers are reserved for.
3502  *		1 = Extended Link Services, 5 = LLC/SNAP
3503  *
3504  * Returns:
3505  *	FC_FAILURE - buffers could not be allocated.
3506  *	FC_TOOMANY - the FCA could not allocate the requested
3507  *			number of buffers.
3508  *	FC_SUCCESS - unsolicited buffers were allocated.
3509  *	FC_UNBOUND - the fca_handle specified is not bound.
3510  *
3511  * Context:
3512  *	Kernel context.
3513  */
3514 static int
ql_ub_alloc(opaque_t fca_handle,uint64_t tokens[],uint32_t size,uint32_t * count,uint32_t type)3515 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3516     uint32_t *count, uint32_t type)
3517 {
3518 	ql_adapter_state_t	*ha;
3519 	caddr_t			bufp = NULL;
3520 	fc_unsol_buf_t		*ubp;
3521 	ql_srb_t		*sp;
3522 	uint32_t		index;
3523 	uint32_t		cnt;
3524 	uint32_t		ub_array_index = 0;
3525 	int			rval = FC_SUCCESS;
3526 	int			ub_updated = FALSE;
3527 
3528 	/* Check handle. */
3529 	ha = ql_fca_handle_to_state(fca_handle);
3530 	if (ha == NULL) {
3531 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3532 		    (void *)fca_handle);
3533 		return (FC_UNBOUND);
3534 	}
3535 	QL_PRINT_3(ha, "started, count = %xh\n", *count);
3536 
3537 	QL_PM_LOCK(ha);
3538 	if (ha->power_level != PM_LEVEL_D0) {
3539 		QL_PM_UNLOCK(ha);
3540 		QL_PRINT_3(ha, "down done\n");
3541 		return (FC_FAILURE);
3542 	}
3543 	QL_PM_UNLOCK(ha);
3544 
3545 	/* Check the count. */
3546 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3547 		*count = 0;
3548 		EL(ha, "failed, FC_TOOMANY\n");
3549 		rval = FC_TOOMANY;
3550 	}
3551 
3552 	/*
3553 	 * reset ub_array_index
3554 	 */
3555 	ub_array_index = 0;
3556 
3557 	/*
3558 	 * Now proceed to allocate any buffers required
3559 	 */
3560 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3561 		/* Allocate all memory needed. */
3562 		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3563 		    KM_SLEEP);
3564 		if (ubp == NULL) {
3565 			EL(ha, "failed, FC_FAILURE\n");
3566 			rval = FC_FAILURE;
3567 		} else {
3568 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3569 			if (sp == NULL) {
3570 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3571 				rval = FC_FAILURE;
3572 			} else {
3573 				if (type == FC_TYPE_IS8802_SNAP) {
3574 #ifdef	__sparc
3575 					if (ql_get_dma_mem(ha,
3576 					    &sp->ub_buffer, size,
3577 					    BIG_ENDIAN_DMA,
3578 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3579 						rval = FC_FAILURE;
3580 						kmem_free(ubp,
3581 						    sizeof (fc_unsol_buf_t));
3582 						kmem_free(sp,
3583 						    sizeof (ql_srb_t));
3584 					} else {
3585 						bufp = sp->ub_buffer.bp;
3586 						sp->ub_size = size;
3587 					}
3588 #else
3589 					if (ql_get_dma_mem(ha,
3590 					    &sp->ub_buffer, size,
3591 					    LITTLE_ENDIAN_DMA,
3592 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3593 						rval = FC_FAILURE;
3594 						kmem_free(ubp,
3595 						    sizeof (fc_unsol_buf_t));
3596 						kmem_free(sp,
3597 						    sizeof (ql_srb_t));
3598 					} else {
3599 						bufp = sp->ub_buffer.bp;
3600 						sp->ub_size = size;
3601 					}
3602 #endif
3603 				} else {
3604 					bufp = kmem_zalloc(size, KM_SLEEP);
3605 					if (bufp == NULL) {
3606 						rval = FC_FAILURE;
3607 						kmem_free(ubp,
3608 						    sizeof (fc_unsol_buf_t));
3609 						kmem_free(sp,
3610 						    sizeof (ql_srb_t));
3611 					} else {
3612 						sp->ub_size = size;
3613 					}
3614 				}
3615 			}
3616 		}
3617 
3618 		if (rval == FC_SUCCESS) {
3619 			/* Find next available slot. */
3620 			QL_UB_LOCK(ha);
3621 			while (ha->ub_array[ub_array_index] != NULL) {
3622 				ub_array_index++;
3623 			}
3624 
3625 			ubp->ub_fca_private = (void *)sp;
3626 
3627 			/* init cmd links */
3628 			sp->cmd.base_address = sp;
3629 			sp->cmd.prev = NULL;
3630 			sp->cmd.next = NULL;
3631 			sp->cmd.head = NULL;
3632 
3633 			/* init wdg links */
3634 			sp->wdg.base_address = sp;
3635 			sp->wdg.prev = NULL;
3636 			sp->wdg.next = NULL;
3637 			sp->wdg.head = NULL;
3638 			sp->ha = ha;
3639 
3640 			ubp->ub_buffer = bufp;
3641 			ubp->ub_bufsize = size;
3642 			ubp->ub_port_handle = fca_handle;
3643 			ubp->ub_token = ub_array_index;
3644 
3645 			/* Save the token. */
3646 			tokens[index] = ub_array_index;
3647 
3648 			/* Setup FCA private information. */
3649 			sp->ub_type = type;
3650 			sp->handle = ub_array_index;
3651 			sp->flags |= SRB_UB_IN_FCA;
3652 
3653 			ha->ub_array[ub_array_index] = ubp;
3654 			ha->ub_allocated++;
3655 			ub_updated = TRUE;
3656 			QL_UB_UNLOCK(ha);
3657 		}
3658 	}
3659 
3660 	/* IP buffer. */
3661 	if (ub_updated) {
3662 		if (type == FC_TYPE_IS8802_SNAP &&
3663 		    CFG_IST(ha, CFG_FCIP_SUPPORT) &&
3664 		    ha->vp_index == 0) {
3665 
3666 			ADAPTER_STATE_LOCK(ha);
3667 			ha->flags |= IP_ENABLED;
3668 			ADAPTER_STATE_UNLOCK(ha);
3669 
3670 			if (!(ha->flags & IP_INITIALIZED)) {
3671 				if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3672 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3673 					    LSB(ql_ip_mtu);
3674 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3675 					    MSB(ql_ip_mtu);
3676 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3677 					    LSB(size);
3678 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3679 					    MSB(size);
3680 
3681 					cnt = CHAR_TO_SHORT(
3682 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3683 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3684 
3685 					if (cnt < *count) {
3686 						ha->ip_init_ctrl_blk.cb24.cc[0]
3687 						    = LSB(*count);
3688 						ha->ip_init_ctrl_blk.cb24.cc[1]
3689 						    = MSB(*count);
3690 					}
3691 				} else {
3692 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3693 					    LSB(ql_ip_mtu);
3694 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3695 					    MSB(ql_ip_mtu);
3696 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3697 					    LSB(size);
3698 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3699 					    MSB(size);
3700 
3701 					cnt = CHAR_TO_SHORT(
3702 					    ha->ip_init_ctrl_blk.cb.cc[0],
3703 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3704 
3705 					if (cnt < *count) {
3706 						ha->ip_init_ctrl_blk.cb.cc[0] =
3707 						    LSB(*count);
3708 						ha->ip_init_ctrl_blk.cb.cc[1] =
3709 						    MSB(*count);
3710 					}
3711 				}
3712 
3713 				(void) ql_initialize_ip(ha);
3714 			}
3715 			ql_isp_rcvbuf(ha);
3716 		}
3717 	}
3718 
3719 	if (rval != FC_SUCCESS) {
3720 		EL(ha, "failed=%xh\n", rval);
3721 	} else {
3722 		/*EMPTY*/
3723 		QL_PRINT_3(ha, "done\n");
3724 	}
3725 	return (rval);
3726 }
3727 
3728 /*
3729  * ql_ub_free
3730  *	Free unsolicited buffers.
3731  *
3732  * Input:
3733  *	fca_handle = handle setup by ql_bind_port().
3734  *	count = number of buffers.
3735  *	tokens = token array for each buffer.
3736  *
3737  * Returns:
3738  *	FC_SUCCESS - the requested buffers have been freed.
3739  *	FC_UNBOUND - the fca_handle specified is not bound.
3740  *	FC_UB_BADTOKEN - an invalid token was encountered.
3741  *			 No buffers have been released.
3742  *
3743  * Context:
3744  *	Kernel context.
3745  */
3746 static int
ql_ub_free(opaque_t fca_handle,uint32_t count,uint64_t tokens[])3747 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3748 {
3749 	ql_adapter_state_t	*ha;
3750 	ql_srb_t		*sp;
3751 	uint32_t		index;
3752 	uint64_t		ub_array_index;
3753 	int			rval = FC_SUCCESS;
3754 
3755 	/* Check handle. */
3756 	ha = ql_fca_handle_to_state(fca_handle);
3757 	if (ha == NULL) {
3758 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3759 		    (void *)fca_handle);
3760 		return (FC_UNBOUND);
3761 	}
3762 	QL_PRINT_3(ha, "started\n");
3763 
3764 	/* Check all returned tokens. */
3765 	for (index = 0; index < count; index++) {
3766 		fc_unsol_buf_t	*ubp;
3767 
3768 		/* Check the token range. */
3769 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3770 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3771 			rval = FC_UB_BADTOKEN;
3772 			break;
3773 		}
3774 
3775 		/* Check the unsolicited buffer array. */
3776 		QL_UB_LOCK(ha);
3777 		ubp = ha->ub_array[ub_array_index];
3778 
3779 		if (ubp == NULL) {
3780 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3781 			rval = FC_UB_BADTOKEN;
3782 			QL_UB_UNLOCK(ha);
3783 			break;
3784 		}
3785 
3786 		/* Check the state of the unsolicited buffer. */
3787 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3788 		sp->flags |= SRB_UB_FREE_REQUESTED;
3789 
3790 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3791 		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3792 			QL_UB_UNLOCK(ha);
3793 			delay(drv_usectohz(100000));
3794 			QL_UB_LOCK(ha);
3795 		}
3796 		ha->ub_array[ub_array_index] = NULL;
3797 		QL_UB_UNLOCK(ha);
3798 		ql_free_unsolicited_buffer(ha, ubp);
3799 	}
3800 
3801 	if (rval == FC_SUCCESS) {
3802 		/*
3803 		 * Signal any pending hardware reset when there are
3804 		 * no more unsolicited buffers in use.
3805 		 */
3806 		if (ha->ub_allocated == 0) {
3807 			QL_UB_LOCK(ha);
3808 			cv_broadcast(&ha->pha->cv_ub);
3809 			QL_UB_UNLOCK(ha);
3810 		}
3811 	}
3812 
3813 	if (rval != FC_SUCCESS) {
3814 		EL(ha, "failed=%xh\n", rval);
3815 	} else {
3816 		/*EMPTY*/
3817 		QL_PRINT_3(ha, "done\n");
3818 	}
3819 	return (rval);
3820 }
3821 
3822 /*
3823  * ql_ub_release
3824  *	Release unsolicited buffers from FC Transport
3825  *	to FCA for future use.
3826  *
3827  * Input:
3828  *	fca_handle = handle setup by ql_bind_port().
3829  *	count = number of buffers.
3830  *	tokens = token array for each buffer.
3831  *
3832  * Returns:
3833  *	FC_SUCCESS - the requested buffers have been released.
3834  *	FC_UNBOUND - the fca_handle specified is not bound.
3835  *	FC_UB_BADTOKEN - an invalid token was encountered.
3836  *		No buffers have been released.
3837  *
3838  * Context:
3839  *	Kernel context.
3840  */
3841 static int
ql_ub_release(opaque_t fca_handle,uint32_t count,uint64_t tokens[])3842 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3843 {
3844 	ql_adapter_state_t	*ha;
3845 	ql_srb_t		*sp;
3846 	uint32_t		index;
3847 	uint64_t		ub_array_index;
3848 	int			rval = FC_SUCCESS;
3849 	int			ub_ip_updated = FALSE;
3850 
3851 	/* Check handle. */
3852 	ha = ql_fca_handle_to_state(fca_handle);
3853 	if (ha == NULL) {
3854 		QL_PRINT_2(NULL, ": failed, no adapter=%ph\n",
3855 		    (void *)fca_handle);
3856 		return (FC_UNBOUND);
3857 	}
3858 	QL_PRINT_3(ha, "started\n");
3859 
3860 	/* Acquire adapter state lock. */
3861 	QL_UB_LOCK(ha);
3862 
3863 	/* Check all returned tokens. */
3864 	for (index = 0; index < count; index++) {
3865 		/* Check the token range. */
3866 		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3867 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3868 			rval = FC_UB_BADTOKEN;
3869 			break;
3870 		}
3871 
3872 		/* Check the unsolicited buffer array. */
3873 		if (ha->ub_array[ub_array_index] == NULL) {
3874 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3875 			rval = FC_UB_BADTOKEN;
3876 			break;
3877 		}
3878 
3879 		/* Check the state of the unsolicited buffer. */
3880 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3881 		if (sp->flags & SRB_UB_IN_FCA) {
3882 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3883 			rval = FC_UB_BADTOKEN;
3884 			break;
3885 		}
3886 	}
3887 
3888 	/* If all tokens checkout, release the buffers. */
3889 	if (rval == FC_SUCCESS) {
3890 		/* Check all returned tokens. */
3891 		for (index = 0; index < count; index++) {
3892 			fc_unsol_buf_t	*ubp;
3893 
3894 			ub_array_index = tokens[index];
3895 			ubp = ha->ub_array[ub_array_index];
3896 			sp = ubp->ub_fca_private;
3897 
3898 			ubp->ub_resp_flags = 0;
3899 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3900 			sp->flags |= SRB_UB_IN_FCA;
3901 
3902 			/* IP buffer. */
3903 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3904 				ub_ip_updated = TRUE;
3905 			}
3906 		}
3907 	}
3908 
3909 	QL_UB_UNLOCK(ha);
3910 
3911 	/*
3912 	 * XXX: We should call ql_isp_rcvbuf() to return a
3913 	 * buffer to ISP only if the number of buffers fall below
3914 	 * the low water mark.
3915 	 */
3916 	if (ub_ip_updated) {
3917 		ql_isp_rcvbuf(ha);
3918 	}
3919 
3920 	if (rval != FC_SUCCESS) {
3921 		EL(ha, "failed, rval = %xh\n", rval);
3922 	} else {
3923 		/*EMPTY*/
3924 		QL_PRINT_3(ha, "done\n");
3925 	}
3926 	return (rval);
3927 }
3928 
3929 /*
3930  * ql_abort
3931  *	Abort a packet.
3932  *
3933  * Input:
3934  *	fca_handle = handle setup by ql_bind_port().
3935  *	pkt = pointer to fc_packet.
3936  *	flags = KM_SLEEP flag.
3937  *
3938  * Returns:
3939  *	FC_SUCCESS - the packet has successfully aborted.
3940  *	FC_ABORTED - the packet has successfully aborted.
3941  *	FC_ABORTING - the packet is being aborted.
3942  *	FC_ABORT_FAILED - the packet could not be aborted.
3943  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3944  *		to abort the packet.
3945  *	FC_BADEXCHANGE - no packet found.
3946  *	FC_UNBOUND - the fca_handle specified is not bound.
3947  *
3948  * Context:
3949  *	Kernel context.
3950  */
3951 /*ARGSUSED*/
3952 static int
ql_abort(opaque_t fca_handle,fc_packet_t * pkt,int flags)3953 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3954 {
3955 	port_id_t		d_id;
3956 	ql_link_t		*link;
3957 	ql_adapter_state_t	*ha, *pha;
3958 	ql_tgt_t		*tq;
3959 	ql_lun_t		*lq;
3960 	int			rval = FC_ABORTED;
3961 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
3962 
3963 	ha = ql_fca_handle_to_state(fca_handle);
3964 	if (ha == NULL) {
3965 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
3966 		    (void *)fca_handle);
3967 		return (FC_UNBOUND);
3968 	}
3969 
3970 	pha = ha->pha;
3971 
3972 	QL_PRINT_3(ha, "started\n");
3973 
3974 	/* Get target queue pointer. */
3975 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3976 	tq = ql_d_id_to_queue(ha, d_id);
3977 
3978 	if ((tq == NULL) || (lq = sp->lun_queue) == NULL ||
3979 	    (pha->task_daemon_flags & LOOP_DOWN)) {
3980 		if (tq == NULL || lq == NULL) {
3981 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3982 			rval = FC_TRANSPORT_ERROR;
3983 		} else {
3984 			EL(ha, "failed, FC_OFFLINE\n");
3985 			rval = FC_OFFLINE;
3986 		}
3987 		return (rval);
3988 	}
3989 
3990 	/* Acquire target queue lock. */
3991 	DEVICE_QUEUE_LOCK(tq);
3992 	REQUEST_RING_LOCK(ha);
3993 
3994 	/* If command not already started. */
3995 	if (!(sp->flags & SRB_ISP_STARTED)) {
3996 		/* Check pending queue for command. */
3997 		sp = NULL;
3998 		for (link = pha->pending_cmds.first; link != NULL;
3999 		    link = link->next) {
4000 			sp = link->base_address;
4001 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
4002 				/* Remove srb from q. */
4003 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
4004 				break;
4005 			} else {
4006 				sp = NULL;
4007 			}
4008 		}
4009 		REQUEST_RING_UNLOCK(ha);
4010 
4011 		if (sp == NULL) {
4012 			/* Check for cmd on device queue. */
4013 			for (link = lq->cmd.first; link != NULL;
4014 			    link = link->next) {
4015 				sp = link->base_address;
4016 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
4017 					/* Remove srb from q. */
4018 					ql_remove_link(&lq->cmd, &sp->cmd);
4019 					break;
4020 				} else {
4021 					sp = NULL;
4022 				}
4023 			}
4024 		}
4025 		/* Release device lock */
4026 		DEVICE_QUEUE_UNLOCK(tq);
4027 
4028 		/* If command on target queue. */
4029 		if (sp != NULL) {
4030 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
4031 
4032 			/* Set return status */
4033 			pkt->pkt_reason = CS_ABORTED;
4034 
4035 			sp->cmd.next = NULL;
4036 			ql_done(&sp->cmd, B_TRUE);
4037 			rval = FC_ABORTED;
4038 		} else {
4039 			EL(ha, "failed, FC_BADEXCHANGE\n");
4040 			rval = FC_BADEXCHANGE;
4041 		}
4042 	} else if (sp->flags & SRB_ISP_COMPLETED) {
4043 		/* Release device queue lock. */
4044 		REQUEST_RING_UNLOCK(ha);
4045 		DEVICE_QUEUE_UNLOCK(tq);
4046 		EL(ha, "failed, already done, FC_FAILURE\n");
4047 		rval = FC_FAILURE;
4048 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
4049 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
4050 		/*
4051 		 * If here, target data/resp ctio is with Fw.
4052 		 * Since firmware is supposed to terminate such I/Os
4053 		 * with an error, we need not do any thing. If FW
4054 		 * decides not to terminate those IOs and simply keep
4055 		 * quite then we need to initiate cleanup here by
4056 		 * calling ql_done.
4057 		 */
4058 		REQUEST_RING_UNLOCK(ha);
4059 		DEVICE_QUEUE_UNLOCK(tq);
4060 		rval = FC_ABORTED;
4061 	} else {
4062 		ql_request_q_t	*req_q;
4063 		request_t	*pio;
4064 		uint32_t	index;
4065 
4066 		REQUEST_RING_UNLOCK(ha);
4067 		DEVICE_QUEUE_UNLOCK(tq);
4068 
4069 		INTR_LOCK(ha);
4070 		sp->flags |= SRB_ABORTING;
4071 		if (sp->handle != 0) {
4072 			index = sp->handle & OSC_INDEX_MASK;
4073 			if (ha->outstanding_cmds[index] == sp) {
4074 				ha->outstanding_cmds[index] =
4075 				    QL_ABORTED_SRB(ha);
4076 			}
4077 			if (ha->req_q[1] != NULL && sp->rsp_q_number != 0) {
4078 				req_q = ha->req_q[1];
4079 			} else {
4080 				req_q = ha->req_q[0];
4081 			}
4082 			pio = sp->request_ring_ptr;
4083 			if (sp->handle ==
4084 			    ddi_get32(req_q->req_ring.acc_handle,
4085 			    &pio->handle)) {
4086 				EL(ha, "inflight sp=%ph, handle=%xh, "
4087 				    "invalidated\n", (void *)sp, sp->handle);
4088 				for (index = 0; index < sp->req_cnt; index++) {
4089 					ddi_put8(req_q->req_ring.acc_handle,
4090 					    &pio->entry_type,
4091 					    ABORTED_ENTRY_TYPE);
4092 					pio++;
4093 					if (pio == (request_t *)
4094 					    ((uintptr_t)req_q->req_ring.bp +
4095 					    req_q->req_ring.size)) {
4096 						pio = req_q->req_ring.bp;
4097 					}
4098 				}
4099 			}
4100 			/* Decrement outstanding commands on device. */
4101 			if (tq->outcnt != 0) {
4102 				tq->outcnt--;
4103 			}
4104 			if (sp->flags & SRB_FCP_CMD_PKT &&
4105 			    lq->lun_outcnt != 0) {
4106 				lq->lun_outcnt--;
4107 			}
4108 			/* Remove command from watchdog queue. */
4109 			if (sp->flags & SRB_WATCHDOG_ENABLED) {
4110 				ql_remove_link(&tq->wdg, &sp->wdg);
4111 				sp->flags &= ~SRB_WATCHDOG_ENABLED;
4112 			}
4113 			/* Release device queue lock. */
4114 			INTR_UNLOCK(ha);
4115 
4116 			(void) ql_abort_command(ha, sp);
4117 			sp->handle = 0;
4118 		} else {
4119 			/* Release device queue lock. */
4120 			INTR_UNLOCK(ha);
4121 		}
4122 
4123 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
4124 		sp->flags |= SRB_ISP_COMPLETED;
4125 		pkt->pkt_reason = CS_ABORTED;
4126 		rval = FC_ABORTED;
4127 	}
4128 
4129 	QL_PRINT_3(ha, "done\n");
4130 
4131 	return (rval);
4132 }
4133 
4134 /*
4135  * ql_reset
4136  *	Reset link or hardware.
4137  *
4138  * Input:
4139  *	fca_handle = handle setup by ql_bind_port().
4140  *	cmd = reset type command.
4141  *
4142  * Returns:
4143  *	FC_SUCCESS - reset has successfully finished.
4144  *	FC_UNBOUND - the fca_handle specified is not bound.
4145  *	FC_FAILURE - reset failed.
4146  *
4147  * Context:
4148  *	Kernel context.
4149  */
4150 static int
ql_reset(opaque_t fca_handle,uint32_t cmd)4151 ql_reset(opaque_t fca_handle, uint32_t cmd)
4152 {
4153 	ql_adapter_state_t	*ha;
4154 	int			rval = FC_SUCCESS, rval2;
4155 
4156 	ha = ql_fca_handle_to_state(fca_handle);
4157 	if (ha == NULL) {
4158 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
4159 		    (void *)fca_handle);
4160 		return (FC_UNBOUND);
4161 	}
4162 
4163 	QL_PRINT_3(ha, "started, cmd=%d\n", cmd);
4164 
4165 	if (ha->task_daemon_flags & (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE |
4166 	    DRIVER_STALL | ISP_ABORT_NEEDED | LOOP_RESYNC_NEEDED)) {
4167 		EL(ha, "driver stalled, FC_TRAN_BUSY, dtf=%xh\n",
4168 		    ha->task_daemon_flags);
4169 		return (FC_TRAN_BUSY);
4170 	}
4171 
4172 	switch (cmd) {
4173 	case FC_FCA_CORE:
4174 		/* dump firmware core if specified. */
4175 		if (ha->vp_index == 0) {
4176 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
4177 				EL(ha, "failed, FC_FAILURE\n");
4178 				rval = FC_FAILURE;
4179 			}
4180 		}
4181 		break;
4182 	case FC_FCA_LINK_RESET:
4183 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4184 			if (ql_loop_reset(ha) != QL_SUCCESS) {
4185 				EL(ha, "failed, FC_FAILURE-2\n");
4186 				rval = FC_FAILURE;
4187 			}
4188 		}
4189 		break;
4190 	case FC_FCA_RESET_CORE:
4191 	case FC_FCA_RESET:
4192 		/* if dump firmware core if specified. */
4193 		if (cmd == FC_FCA_RESET_CORE) {
4194 			if (ha->vp_index != 0) {
4195 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
4196 				    ? QL_SUCCESS : ql_loop_reset(ha);
4197 			} else {
4198 				rval2 = ql_dump_firmware(ha);
4199 			}
4200 			if (rval2 != QL_SUCCESS) {
4201 				EL(ha, "failed, FC_FAILURE-3\n");
4202 				rval = FC_FAILURE;
4203 			}
4204 		}
4205 
4206 		/* Free up all unsolicited buffers. */
4207 		if (ha->ub_allocated != 0) {
4208 			/* Inform to release buffers. */
4209 			ha->state = FC_PORT_SPEED_MASK(ha->state);
4210 			ha->state |= FC_STATE_RESET_REQUESTED;
4211 			if (ha->flags & FCA_BOUND) {
4212 				(ha->bind_info.port_statec_cb)
4213 				    (ha->bind_info.port_handle,
4214 				    ha->state);
4215 			}
4216 		}
4217 
4218 		ha->state = FC_PORT_SPEED_MASK(ha->state);
4219 
4220 		/* All buffers freed */
4221 		if (ha->ub_allocated == 0) {
4222 			/* Hardware reset. */
4223 			if (cmd == FC_FCA_RESET) {
4224 				if (ha->vp_index == 0) {
4225 					(void) ql_abort_isp(ha);
4226 				} else if (!(ha->pha->task_daemon_flags &
4227 				    LOOP_DOWN)) {
4228 					(void) ql_loop_reset(ha);
4229 				}
4230 			}
4231 
4232 			/* Inform that the hardware has been reset */
4233 			ha->state |= FC_STATE_RESET;
4234 		} else {
4235 			/*
4236 			 * the port driver expects an online if
4237 			 * buffers are not freed.
4238 			 */
4239 			if (ha->topology & QL_LOOP_CONNECTION) {
4240 				ha->state |= FC_STATE_LOOP;
4241 			} else {
4242 				ha->state |= FC_STATE_ONLINE;
4243 			}
4244 		}
4245 
4246 		TASK_DAEMON_LOCK(ha);
4247 		ha->task_daemon_flags |= FC_STATE_CHANGE;
4248 		TASK_DAEMON_UNLOCK(ha);
4249 
4250 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
4251 
4252 		break;
4253 	default:
4254 		EL(ha, "unknown cmd=%xh\n", cmd);
4255 		break;
4256 	}
4257 
4258 	if (rval != FC_SUCCESS) {
4259 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
4260 	} else {
4261 		/*EMPTY*/
4262 		QL_PRINT_3(ha, "done\n");
4263 	}
4264 
4265 	return (rval);
4266 }
4267 
4268 /*
4269  * ql_port_manage
4270  *	Perform port management or diagnostics.
4271  *
4272  * Input:
4273  *	fca_handle = handle setup by ql_bind_port().
4274  *	cmd = pointer to command structure.
4275  *
4276  * Returns:
4277  *	FC_SUCCESS - the request completed successfully.
4278  *	FC_FAILURE - the request did not complete successfully.
4279  *	FC_UNBOUND - the fca_handle specified is not bound.
4280  *
4281  * Context:
4282  *	Kernel context.
4283  */
4284 static int
ql_port_manage(opaque_t fca_handle,fc_fca_pm_t * cmd)4285 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
4286 {
4287 	clock_t			timer;
4288 	uint16_t		index;
4289 	uint32_t		*bp;
4290 	port_id_t		d_id;
4291 	ql_link_t		*link;
4292 	ql_adapter_state_t	*ha, *pha;
4293 	ql_tgt_t		*tq;
4294 	dma_mem_t		buffer_xmt, buffer_rcv;
4295 	size_t			length;
4296 	uint32_t		cnt;
4297 	char			buf[80];
4298 	lbp_t			*lb;
4299 	ql_mbx_data_t		mr;
4300 	app_mbx_cmd_t		*mcp;
4301 	int			i0;
4302 	uint8_t			*bptr;
4303 	int			rval2, rval = FC_SUCCESS;
4304 	uint32_t		opcode;
4305 	uint32_t		set_flags = 0;
4306 	fc_fca_p2p_info_t	*p2p_info;
4307 
4308 	ha = ql_fca_handle_to_state(fca_handle);
4309 	if (ha == NULL) {
4310 		QL_PRINT_2(NULL, ": failed, no adapter=%ph\n",
4311 		    (void *)fca_handle);
4312 		return (FC_UNBOUND);
4313 	}
4314 	pha = ha->pha;
4315 
4316 #ifdef	QL_DEBUG_LEVEL_10
4317 	if (cmd->pm_cmd_code != FC_PORT_GET_FW_REV) {
4318 		QL_PRINT_10(ha, "started=%xh\n", cmd->pm_cmd_code);
4319 	}
4320 #endif
4321 
4322 	if (ha->task_daemon_flags & (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE |
4323 	    DRIVER_STALL | ISP_ABORT_NEEDED | LOOP_RESYNC_NEEDED)) {
4324 		EL(ha, "driver stalled, FC_TRAN_BUSY, dtf=%xh\n",
4325 		    ha->task_daemon_flags);
4326 		return (FC_TRAN_BUSY);
4327 	}
4328 
4329 	switch (cmd->pm_cmd_code) {
4330 	case FC_PORT_BYPASS:
4331 		d_id.b24 = *cmd->pm_cmd_buf;
4332 		tq = ql_d_id_to_queue(ha, d_id);
4333 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4334 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4335 			rval = FC_FAILURE;
4336 		}
4337 		break;
4338 	case FC_PORT_UNBYPASS:
4339 		d_id.b24 = *cmd->pm_cmd_buf;
4340 		tq = ql_d_id_to_queue(ha, d_id);
4341 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4342 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4343 			rval = FC_FAILURE;
4344 		}
4345 		break;
4346 	case FC_PORT_GET_FW_REV:
4347 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4348 		    pha->fw_minor_version, pha->fw_subminor_version);
4349 		length = strlen(buf) + 1;
4350 		if (cmd->pm_data_len < length) {
4351 			cmd->pm_data_len = length;
4352 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4353 			rval = FC_FAILURE;
4354 		} else {
4355 			(void) strcpy(cmd->pm_data_buf, buf);
4356 		}
4357 		break;
4358 
4359 	case FC_PORT_GET_FCODE_REV: {
4360 		caddr_t		fcode_ver_buf = NULL;
4361 
4362 		i0 = 0;
4363 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4364 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4365 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4366 		    (caddr_t)&fcode_ver_buf, &i0);
4367 		length = (uint_t)i0;
4368 
4369 		if (rval2 != DDI_PROP_SUCCESS) {
4370 			EL(ha, "failed, getting version = %xh\n", rval2);
4371 			length = 20;
4372 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4373 			if (fcode_ver_buf != NULL) {
4374 				(void) sprintf(fcode_ver_buf,
4375 				    "NO FCODE FOUND");
4376 			}
4377 		}
4378 
4379 		if (cmd->pm_data_len < length) {
4380 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4381 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4382 			cmd->pm_data_len = length;
4383 			rval = FC_FAILURE;
4384 		} else if (fcode_ver_buf != NULL) {
4385 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4386 			    length);
4387 		}
4388 
4389 		if (fcode_ver_buf != NULL) {
4390 			kmem_free(fcode_ver_buf, length);
4391 		}
4392 		break;
4393 	}
4394 
4395 	case FC_PORT_GET_DUMP:
4396 		QL_DUMP_LOCK(pha);
4397 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4398 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4399 			    "length=%lxh\n", cmd->pm_data_len);
4400 			cmd->pm_data_len = pha->risc_dump_size;
4401 			rval = FC_FAILURE;
4402 		} else if (pha->ql_dump_state & QL_DUMPING) {
4403 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4404 			rval = FC_TRAN_BUSY;
4405 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
4406 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4407 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
4408 		} else {
4409 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4410 			rval = FC_FAILURE;
4411 		}
4412 		QL_DUMP_UNLOCK(pha);
4413 		break;
4414 	case FC_PORT_FORCE_DUMP:
4415 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
4416 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4417 			rval = FC_FAILURE;
4418 		}
4419 		break;
4420 	case FC_PORT_GET_DUMP_SIZE:
4421 		bp = (uint32_t *)cmd->pm_data_buf;
4422 		*bp = pha->risc_dump_size;
4423 		break;
4424 	case FC_PORT_DIAG:
4425 		EL(ha, "diag cmd=%xh\n", cmd->pm_cmd_flags);
4426 
4427 		/* Wait for suspension to end. */
4428 		for (timer = 0; timer < 3000 &&
4429 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4430 			ql_delay(ha, 10000);
4431 		}
4432 
4433 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4434 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
4435 			rval = FC_TRAN_BUSY;
4436 			break;
4437 		}
4438 
4439 		if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4440 			EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4441 			    rval2);
4442 			ql_restart_driver(ha);
4443 			rval = FC_TRAN_BUSY;
4444 			break;
4445 		}
4446 
4447 		switch (cmd->pm_cmd_flags) {
4448 		case QL_DIAG_EXEFMW:
4449 			if (ql_start_firmware(ha) != QL_SUCCESS) {
4450 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4451 				rval = FC_FAILURE;
4452 			}
4453 			break;
4454 		case QL_DIAG_CHKCMDQUE:
4455 			for (i0 = 1, cnt = 0; i0 < pha->osc_max_cnt;
4456 			    i0++) {
4457 				cnt += (pha->outstanding_cmds[i0] != NULL);
4458 			}
4459 			if (cnt != 0) {
4460 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4461 				    "FC_FAILURE\n");
4462 				rval = FC_FAILURE;
4463 			}
4464 			break;
4465 		case QL_DIAG_FMWCHKSUM:
4466 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4467 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4468 				    "FC_FAILURE\n");
4469 				rval = FC_FAILURE;
4470 			}
4471 			break;
4472 		case QL_DIAG_SLFTST:
4473 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4474 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4475 				rval = FC_FAILURE;
4476 			}
4477 			ql_reset_chip(ha);
4478 			set_flags |= ISP_ABORT_NEEDED;
4479 			break;
4480 		case QL_DIAG_REVLVL:
4481 			if (cmd->pm_stat_len <
4482 			    sizeof (ql_adapter_revlvl_t)) {
4483 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4484 				    "slen=%lxh, rlvllen=%lxh\n",
4485 				    cmd->pm_stat_len,
4486 				    sizeof (ql_adapter_revlvl_t));
4487 				rval = FC_NOMEM;
4488 			} else {
4489 				bcopy((void *)&(pha->adapter_stats->revlvl),
4490 				    cmd->pm_stat_buf,
4491 				    (size_t)cmd->pm_stat_len);
4492 				cmd->pm_stat_len =
4493 				    sizeof (ql_adapter_revlvl_t);
4494 			}
4495 			break;
4496 		case QL_DIAG_LPBMBX:
4497 
4498 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4499 				EL(ha, "failed, QL_DIAG_LPBMBX "
4500 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4501 				    "reqd=%lxh\n", cmd->pm_data_len,
4502 				    sizeof (struct app_mbx_cmd));
4503 				rval = FC_INVALID_REQUEST;
4504 				break;
4505 			}
4506 			/*
4507 			 * Don't do the wrap test on a 2200 when the
4508 			 * firmware is running.
4509 			 */
4510 			if (!CFG_IST(ha, CFG_CTRL_22XX)) {
4511 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4512 				mr.mb[1] = mcp->mb[1];
4513 				mr.mb[2] = mcp->mb[2];
4514 				mr.mb[3] = mcp->mb[3];
4515 				mr.mb[4] = mcp->mb[4];
4516 				mr.mb[5] = mcp->mb[5];
4517 				mr.mb[6] = mcp->mb[6];
4518 				mr.mb[7] = mcp->mb[7];
4519 
4520 				bcopy(&mr.mb[0], &mr.mb[10],
4521 				    sizeof (uint16_t) * 8);
4522 
4523 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4524 					EL(ha, "failed, QL_DIAG_LPBMBX "
4525 					    "FC_FAILURE\n");
4526 					rval = FC_FAILURE;
4527 					break;
4528 				} else {
4529 					for (i0 = 1; i0 < 8; i0++) {
4530 						if (mr.mb[i0] !=
4531 						    mr.mb[i0 + 10]) {
4532 							EL(ha, "failed, "
4533 							    "QL_DIAG_LPBMBX "
4534 							    "FC_FAILURE-2\n");
4535 							rval = FC_FAILURE;
4536 							break;
4537 						}
4538 					}
4539 				}
4540 
4541 				if (rval == FC_FAILURE) {
4542 					(void) ql_flash_errlog(ha,
4543 					    FLASH_ERRLOG_ISP_ERR, 0,
4544 					    RD16_IO_REG(ha, hccr),
4545 					    RD16_IO_REG(ha, istatus));
4546 					set_flags |= ISP_ABORT_NEEDED;
4547 				}
4548 			}
4549 			break;
4550 		case QL_DIAG_LPBDTA:
4551 			/*
4552 			 * For loopback data, we receive the
4553 			 * data back in pm_stat_buf. This provides
4554 			 * the user an opportunity to compare the
4555 			 * transmitted and received data.
4556 			 *
4557 			 * NB: lb->options are:
4558 			 *	0 --> Ten bit loopback
4559 			 *	1 --> One bit loopback
4560 			 *	2 --> External loopback
4561 			 */
4562 			if (cmd->pm_data_len > 65536) {
4563 				rval = FC_TOOMANY;
4564 				EL(ha, "failed, QL_DIAG_LPBDTA "
4565 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4566 				break;
4567 			}
4568 			if (ql_get_dma_mem(ha, &buffer_xmt,
4569 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4570 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4571 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4572 				rval = FC_NOMEM;
4573 				break;
4574 			}
4575 			if (ql_get_dma_mem(ha, &buffer_rcv,
4576 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4577 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4578 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4579 				rval = FC_NOMEM;
4580 				break;
4581 			}
4582 			ddi_rep_put8(buffer_xmt.acc_handle,
4583 			    (uint8_t *)cmd->pm_data_buf,
4584 			    (uint8_t *)buffer_xmt.bp,
4585 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4586 
4587 			/* 22xx's adapter must be in loop mode for test. */
4588 			if (CFG_IST(ha, CFG_CTRL_22XX)) {
4589 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4590 				if (ha->flags & POINT_TO_POINT ||
4591 				    (ha->task_daemon_flags & LOOP_DOWN &&
4592 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4593 					cnt = *bptr;
4594 					*bptr = (uint8_t)
4595 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4596 					(void) ql_abort_isp(ha);
4597 					*bptr = (uint8_t)cnt;
4598 				}
4599 			}
4600 
4601 			/* Shutdown IP. */
4602 			if (pha->flags & IP_INITIALIZED) {
4603 				(void) ql_shutdown_ip(pha);
4604 			}
4605 
4606 			lb = (lbp_t *)cmd->pm_cmd_buf;
4607 			lb->transfer_count =
4608 			    (uint32_t)cmd->pm_data_len;
4609 			lb->transfer_segment_count = 0;
4610 			lb->receive_segment_count = 0;
4611 			lb->transfer_data_address =
4612 			    buffer_xmt.cookie.dmac_address;
4613 			lb->receive_data_address =
4614 			    buffer_rcv.cookie.dmac_address;
4615 
4616 			if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
4617 				(void) ql_set_loop_point(ha, lb->options);
4618 			}
4619 
4620 			if (ql_loop_back(ha, 0, lb,
4621 			    buffer_xmt.cookie.dmac_notused,
4622 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4623 				bzero((void *)cmd->pm_stat_buf,
4624 				    cmd->pm_stat_len);
4625 				ddi_rep_get8(buffer_rcv.acc_handle,
4626 				    (uint8_t *)cmd->pm_stat_buf,
4627 				    (uint8_t *)buffer_rcv.bp,
4628 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4629 				rval = FC_SUCCESS;
4630 			} else {
4631 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4632 				rval = FC_FAILURE;
4633 			}
4634 
4635 			if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
4636 				(void) ql_set_loop_point(ha, 0);
4637 			}
4638 
4639 			ql_free_phys(ha, &buffer_xmt);
4640 			ql_free_phys(ha, &buffer_rcv);
4641 
4642 			/* Needed to recover the f/w */
4643 			set_flags |= ISP_ABORT_NEEDED;
4644 
4645 			/* Restart IP if it was shutdown. */
4646 			if (pha->flags & IP_ENABLED &&
4647 			    !(pha->flags & IP_INITIALIZED)) {
4648 				(void) ql_initialize_ip(pha);
4649 				ql_isp_rcvbuf(pha);
4650 			}
4651 
4652 			break;
4653 		case QL_DIAG_ECHO: {
4654 			/*
4655 			 * issue an echo command with a user supplied
4656 			 * data pattern and destination address
4657 			 */
4658 			echo_t		echo;		/* temp echo struct */
4659 
4660 			/* Setup echo cmd & adjust for platform */
4661 			opcode = QL_ECHO_CMD;
4662 			BIG_ENDIAN_32(&opcode);
4663 
4664 			/*
4665 			 * due to limitations in the ql
4666 			 * firmaware the echo data field is
4667 			 * limited to 220
4668 			 */
4669 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4670 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4671 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4672 				    "cmdl1=%lxh, statl2=%lxh\n",
4673 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4674 				rval = FC_TOOMANY;
4675 				break;
4676 			}
4677 
4678 			/*
4679 			 * the input data buffer has the user
4680 			 * supplied data pattern.  The "echoed"
4681 			 * data will be DMAed into the output
4682 			 * data buffer.  Therefore the length
4683 			 * of the output buffer must be equal
4684 			 * to or greater then the input buffer
4685 			 * length
4686 			 */
4687 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4688 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4689 				    " cmdl1=%lxh, statl2=%lxh\n",
4690 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4691 				rval = FC_TOOMANY;
4692 				break;
4693 			}
4694 			/* add four bytes for the opcode */
4695 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4696 
4697 			/*
4698 			 * are we 32 or 64 bit addressed???
4699 			 * We need to get the appropriate
4700 			 * DMA and set the command options;
4701 			 * 64 bit (bit 6) or 32 bit
4702 			 * (no bit 6) addressing.
4703 			 * while we are at it lets ask for
4704 			 * real echo (bit 15)
4705 			 */
4706 			echo.options = BIT_15;
4707 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4708 			    !(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
4709 				echo.options = (uint16_t)
4710 				    (echo.options | BIT_6);
4711 			}
4712 
4713 			/*
4714 			 * Set up the DMA mappings for the
4715 			 * output and input data buffers.
4716 			 * First the output buffer
4717 			 */
4718 			if (ql_get_dma_mem(ha, &buffer_xmt,
4719 			    (uint32_t)(cmd->pm_data_len + 4),
4720 			    LITTLE_ENDIAN_DMA,
4721 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4722 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4723 				rval = FC_NOMEM;
4724 				break;
4725 			}
4726 			echo.transfer_data_address = buffer_xmt.cookie;
4727 
4728 			/* Next the input buffer */
4729 			if (ql_get_dma_mem(ha, &buffer_rcv,
4730 			    (uint32_t)(cmd->pm_data_len + 4),
4731 			    LITTLE_ENDIAN_DMA,
4732 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4733 				/*
4734 				 * since we could not allocate
4735 				 * DMA space for the input
4736 				 * buffer we need to clean up
4737 				 * by freeing the DMA space
4738 				 * we allocated for the output
4739 				 * buffer
4740 				 */
4741 				ql_free_phys(ha, &buffer_xmt);
4742 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4743 				rval = FC_NOMEM;
4744 				break;
4745 			}
4746 			echo.receive_data_address = buffer_rcv.cookie;
4747 
4748 			/*
4749 			 * copy the 4 byte ECHO op code to the
4750 			 * allocated DMA space
4751 			 */
4752 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4753 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4754 
4755 			/*
4756 			 * copy the user supplied data to the
4757 			 * allocated DMA space
4758 			 */
4759 			ddi_rep_put8(buffer_xmt.acc_handle,
4760 			    (uint8_t *)cmd->pm_cmd_buf,
4761 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4762 			    DDI_DEV_AUTOINCR);
4763 
4764 			/* Shutdown IP. */
4765 			if (pha->flags & IP_INITIALIZED) {
4766 				(void) ql_shutdown_ip(pha);
4767 			}
4768 
4769 			/* send the echo */
4770 			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4771 				ddi_rep_put8(buffer_rcv.acc_handle,
4772 				    (uint8_t *)buffer_rcv.bp + 4,
4773 				    (uint8_t *)cmd->pm_stat_buf,
4774 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4775 			} else {
4776 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4777 				rval = FC_FAILURE;
4778 			}
4779 
4780 			/* Restart IP if it was shutdown. */
4781 			if (pha->flags & IP_ENABLED &&
4782 			    !(pha->flags & IP_INITIALIZED)) {
4783 				(void) ql_initialize_ip(pha);
4784 				ql_isp_rcvbuf(pha);
4785 			}
4786 			/* free up our DMA buffers */
4787 			ql_free_phys(ha, &buffer_xmt);
4788 			ql_free_phys(ha, &buffer_rcv);
4789 			break;
4790 		}
4791 		default:
4792 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4793 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4794 			rval = FC_INVALID_REQUEST;
4795 			break;
4796 		}
4797 		ql_restart_driver(ha);
4798 		break;
4799 	case FC_PORT_LINK_STATE:
4800 		/* Check for name equal to null. */
4801 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4802 		    index++) {
4803 			if (cmd->pm_cmd_buf[index] != 0) {
4804 				break;
4805 			}
4806 		}
4807 
4808 		/* If name not null. */
4809 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4810 			/* Locate device queue. */
4811 			tq = NULL;
4812 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4813 			    tq == NULL; index++) {
4814 				for (link = ha->dev[index].first; link != NULL;
4815 				    link = link->next) {
4816 					tq = link->base_address;
4817 
4818 					if (bcmp((void *)&tq->port_name[0],
4819 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4820 						break;
4821 					} else {
4822 						tq = NULL;
4823 					}
4824 				}
4825 			}
4826 
4827 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4828 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4829 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4830 			} else {
4831 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4832 				    FC_STATE_OFFLINE;
4833 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4834 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4835 			}
4836 		} else {
4837 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4838 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4839 		}
4840 		break;
4841 	case FC_PORT_INITIALIZE:
4842 		if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4843 			EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4844 			    rval2);
4845 			ql_restart_driver(ha);
4846 			rval = FC_TRAN_BUSY;
4847 			break;
4848 		}
4849 		if (cmd->pm_cmd_len >= 8) {
4850 			tq = NULL;
4851 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4852 			    tq == NULL; index++) {
4853 				for (link = ha->dev[index].first; link != NULL;
4854 				    link = link->next) {
4855 					tq = link->base_address;
4856 
4857 					if (bcmp((void *)&tq->port_name[0],
4858 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4859 						if (!VALID_DEVICE_ID(ha,
4860 						    tq->loop_id)) {
4861 							tq = NULL;
4862 						}
4863 						break;
4864 					} else {
4865 						tq = NULL;
4866 					}
4867 				}
4868 			}
4869 
4870 			if (tq == NULL || ql_target_reset(ha, tq,
4871 			    ha->loop_reset_delay) != QL_SUCCESS) {
4872 				EL(ha, "failed, FC_PORT_INITIALIZE "
4873 				    "FC_FAILURE\n");
4874 				rval = FC_FAILURE;
4875 			}
4876 		} else {
4877 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4878 			    "clen=%lxh\n", cmd->pm_cmd_len);
4879 
4880 			rval = FC_FAILURE;
4881 		}
4882 		ql_restart_driver(ha);
4883 		break;
4884 	case FC_PORT_RLS:
4885 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4886 			EL(ha, "failed, buffer size passed: %lxh, "
4887 			    "req: %lxh\n", cmd->pm_data_len,
4888 			    (sizeof (fc_rls_acc_t)));
4889 			rval = FC_FAILURE;
4890 		} else if (LOOP_NOT_READY(pha)) {
4891 			EL(ha, "loop NOT ready\n");
4892 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4893 		} else if (ql_get_link_status(ha, ha->loop_id,
4894 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4895 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4896 			rval = FC_FAILURE;
4897 #ifdef _BIG_ENDIAN
4898 		} else {
4899 			fc_rls_acc_t		*rls;
4900 
4901 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4902 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4903 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4904 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4905 			LITTLE_ENDIAN_32(&rls->rls_prim_seq_err);
4906 			LITTLE_ENDIAN_32(&rls->rls_invalid_word);
4907 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4908 #endif /* _BIG_ENDIAN */
4909 		}
4910 		break;
4911 	case FC_PORT_GET_NODE_ID:
4912 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4913 		    cmd->pm_data_buf) != QL_SUCCESS) {
4914 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4915 			rval = FC_FAILURE;
4916 		}
4917 		break;
4918 	case FC_PORT_SET_NODE_ID:
4919 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4920 		    cmd->pm_data_buf) != QL_SUCCESS) {
4921 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4922 			rval = FC_FAILURE;
4923 		}
4924 		break;
4925 	case FC_PORT_DOWNLOAD_FCODE:
4926 		if ((rval2 = ql_stall_driver(ha, 0)) != QL_SUCCESS) {
4927 			EL(ha, "stall_driver status=%xh, FC_TRAN_BUSY\n",
4928 			    rval2);
4929 			ql_restart_driver(ha);
4930 			rval = FC_TRAN_BUSY;
4931 			break;
4932 		}
4933 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
4934 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4935 			    (uint32_t)cmd->pm_data_len);
4936 		} else {
4937 			if (cmd->pm_data_buf[0] == 4 &&
4938 			    cmd->pm_data_buf[8] == 0 &&
4939 			    cmd->pm_data_buf[9] == 0x10 &&
4940 			    cmd->pm_data_buf[10] == 0 &&
4941 			    cmd->pm_data_buf[11] == 0) {
4942 				rval = ql_24xx_load_flash(ha,
4943 				    (uint8_t *)cmd->pm_data_buf,
4944 				    (uint32_t)cmd->pm_data_len,
4945 				    ha->flash_fw_addr << 2);
4946 			} else {
4947 				rval = ql_24xx_load_flash(ha,
4948 				    (uint8_t *)cmd->pm_data_buf,
4949 				    (uint32_t)cmd->pm_data_len, 0);
4950 			}
4951 		}
4952 
4953 		if (rval != QL_SUCCESS) {
4954 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4955 			rval = FC_FAILURE;
4956 		} else {
4957 			rval = FC_SUCCESS;
4958 		}
4959 		ql_reset_chip(ha);
4960 		set_flags |= ISP_ABORT_NEEDED;
4961 		ql_restart_driver(ha);
4962 		break;
4963 
4964 	case FC_PORT_GET_P2P_INFO:
4965 
4966 		bzero(cmd->pm_data_buf, cmd->pm_data_len);
4967 		if (cmd->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
4968 			EL(ha, "inadequate data length")
4969 			rval = FC_NOMEM;
4970 			break;
4971 		}
4972 
4973 		p2p_info = (fc_fca_p2p_info_t *)cmd->pm_data_buf;
4974 
4975 		if ((ha->topology & QL_N_PORT) &&
4976 		    (ha->flags & POINT_TO_POINT)) {
4977 			p2p_info->fca_d_id = ha->d_id.b24;
4978 			p2p_info->d_id = ha->n_port->d_id.b24;
4979 
4980 			bcopy((void *) &ha->n_port->port_name[0],
4981 			    (caddr_t)&p2p_info->pwwn, 8);
4982 			bcopy((void *) &ha->n_port->node_name[0],
4983 			    (caddr_t)&p2p_info->nwwn, 8);
4984 			rval = FC_SUCCESS;
4985 
4986 			EL(ha, "P2P HID=%xh, d_id=%xh, WWPN=%02x%02x%02x%02x"
4987 			    "%02x%02x%02x%02x : "
4988 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
4989 			    p2p_info->fca_d_id, p2p_info->d_id,
4990 			    ha->n_port->port_name[0],
4991 			    ha->n_port->port_name[1], ha->n_port->port_name[2],
4992 			    ha->n_port->port_name[3], ha->n_port->port_name[4],
4993 			    ha->n_port->port_name[5], ha->n_port->port_name[6],
4994 			    ha->n_port->port_name[7], ha->n_port->node_name[0],
4995 			    ha->n_port->node_name[1], ha->n_port->node_name[2],
4996 			    ha->n_port->node_name[3], ha->n_port->node_name[4],
4997 			    ha->n_port->node_name[5], ha->n_port->node_name[6],
4998 			    ha->n_port->node_name[7]);
4999 			break;
5000 		} else {
5001 			EL(ha, "No p2p info reported in non n2n topology\n");
5002 			rval = FC_BADCMD;
5003 		}
5004 		break;
5005 
5006 	case FC_PORT_DOWNLOAD_FW:
5007 		EL(ha, "unsupported=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
5008 		rval = FC_BADCMD;
5009 		break;
5010 	default:
5011 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
5012 		rval = FC_BADCMD;
5013 		break;
5014 	}
5015 
5016 	/* Wait for suspension to end. */
5017 	ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
5018 	timer = 0;
5019 
5020 	while (timer++ < 3000 &&
5021 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
5022 		ql_delay(ha, 10000);
5023 	}
5024 
5025 	if (rval != FC_SUCCESS) {
5026 		EL(ha, "failed, rval = %xh\n", rval);
5027 	} else {
5028 		/*EMPTY*/
5029 		QL_PRINT_3(ha, "done\n");
5030 	}
5031 
5032 	return (rval);
5033 }
5034 
5035 static opaque_t
ql_get_device(opaque_t fca_handle,fc_portid_t d_id)5036 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
5037 {
5038 	port_id_t		id;
5039 	ql_adapter_state_t	*ha;
5040 	ql_tgt_t		*tq;
5041 
5042 	id.r.rsvd_1 = 0;
5043 	id.b24 = d_id.port_id;
5044 
5045 	ha = ql_fca_handle_to_state(fca_handle);
5046 	if (ha == NULL) {
5047 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
5048 		    (void *)fca_handle);
5049 		return (NULL);
5050 	}
5051 	QL_PRINT_3(ha, "started, d_id=%xh\n", id.b24);
5052 
5053 	tq = ql_d_id_to_queue(ha, id);
5054 
5055 	if (tq == NULL && id.b24 != 0 && id.b24 != FS_BROADCAST) {
5056 		EL(ha, "failed, no tq available for d_id: %xh\n", id.b24);
5057 	} else {
5058 		/*EMPTY*/
5059 		QL_PRINT_3(ha, "done\n");
5060 	}
5061 	return (tq);
5062 }
5063 
5064 /* ************************************************************************ */
5065 /*			FCA Driver Local Support Functions.		    */
5066 /* ************************************************************************ */
5067 
5068 /*
5069  * ql_cmd_setup
5070  *	Verifies proper command.
5071  *
5072  * Input:
5073  *	fca_handle = handle setup by ql_bind_port().
5074  *	pkt = pointer to fc_packet.
5075  *	rval = pointer for return value.
5076  *
5077  * Returns:
5078  *	Adapter state pointer, NULL = failure.
5079  *
5080  * Context:
5081  *	Kernel context.
5082  */
5083 static ql_adapter_state_t *
ql_cmd_setup(opaque_t fca_handle,fc_packet_t * pkt,int * rval)5084 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
5085 {
5086 	ql_adapter_state_t	*ha, *pha;
5087 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
5088 	ql_tgt_t		*tq;
5089 	port_id_t		d_id;
5090 
5091 	pkt->pkt_resp_resid = 0;
5092 	pkt->pkt_data_resid = 0;
5093 
5094 	/* check that the handle is assigned by this FCA */
5095 	ha = ql_fca_handle_to_state(fca_handle);
5096 	if (ha == NULL) {
5097 		*rval = FC_UNBOUND;
5098 		QL_PRINT_2(NULL, "failed, no adapter=%ph\n",
5099 		    (void *)fca_handle);
5100 		return (NULL);
5101 	}
5102 	pha = ha->pha;
5103 
5104 	QL_PRINT_3(ha, "started\n");
5105 
5106 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
5107 		return (ha);
5108 	}
5109 
5110 	if (!(pha->flags & ONLINE)) {
5111 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
5112 		pkt->pkt_reason = FC_REASON_HW_ERROR;
5113 		*rval = FC_TRANSPORT_ERROR;
5114 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
5115 		return (NULL);
5116 	}
5117 
5118 	/* Exit on loop down. */
5119 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
5120 	    pha->task_daemon_flags & LOOP_DOWN &&
5121 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
5122 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5123 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5124 		*rval = FC_OFFLINE;
5125 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
5126 		return (NULL);
5127 	}
5128 
5129 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
5130 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
5131 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
5132 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5133 			d_id.r.rsvd_1 = 0;
5134 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5135 			tq = ql_d_id_to_queue(ha, d_id);
5136 
5137 			pkt->pkt_fca_device = (opaque_t)tq;
5138 		}
5139 
5140 		if (tq != NULL) {
5141 			DEVICE_QUEUE_LOCK(tq);
5142 			if (tq->flags & (TQF_RSCN_RCVD |
5143 			    TQF_NEED_AUTHENTICATION)) {
5144 				*rval = FC_DEVICE_BUSY;
5145 				DEVICE_QUEUE_UNLOCK(tq);
5146 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
5147 				    tq->flags, tq->d_id.b24);
5148 				return (NULL);
5149 			}
5150 			DEVICE_QUEUE_UNLOCK(tq);
5151 		}
5152 	}
5153 
5154 	/* Check for packet already running. */
5155 	if (sp->handle != 0) {
5156 		*rval = FC_DEVICE_BUSY;
5157 		cmn_err(CE_WARN, "%s(%d) already running pkt=%p, sp=%p, "
5158 		    "sp->pkt=%p, sp->hdl=%x, spf=%x, cq=%p\n", QL_NAME,
5159 		    ha->instance, (void *)pkt, (void *)sp, (void *)sp->pkt,
5160 		    sp->handle, sp->flags, (void *)sp->cmd.head);
5161 		return (NULL);
5162 	}
5163 	if (ha->rsp_queues_cnt > 1) {
5164 		ADAPTER_STATE_LOCK(ha);
5165 		sp->rsp_q_number = ha->rsp_q_number++;
5166 		if (ha->rsp_q_number == ha->rsp_queues_cnt) {
5167 			ha->rsp_q_number = 0;
5168 		}
5169 		ADAPTER_STATE_UNLOCK(ha);
5170 	} else {
5171 		sp->rsp_q_number = 0;
5172 	}
5173 
5174 	/*
5175 	 * Check DMA pointers.
5176 	 */
5177 	*rval = DDI_SUCCESS;
5178 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
5179 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
5180 
5181 		*rval = qlc_fm_check_dma_handle(ha, pkt->pkt_cmd_dma);
5182 		if (*rval == DDI_FM_OK) {
5183 			*rval = qlc_fm_check_acc_handle(ha,
5184 			    pkt->pkt_cmd_acc);
5185 		}
5186 	}
5187 
5188 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
5189 	    pkt->pkt_rsplen != 0) {
5190 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
5191 
5192 		*rval = qlc_fm_check_dma_handle(ha, pkt->pkt_resp_dma);
5193 		if (*rval == DDI_FM_OK) {
5194 			*rval = qlc_fm_check_acc_handle(ha,
5195 			    pkt->pkt_resp_acc);
5196 		}
5197 	}
5198 
5199 	/*
5200 	 * Minimum branch conditional; Change it with care.
5201 	 */
5202 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
5203 	    (pkt->pkt_datalen != 0)) != 0) {
5204 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
5205 
5206 		*rval = qlc_fm_check_dma_handle(ha, pkt->pkt_data_dma);
5207 		if (*rval == DDI_FM_OK) {
5208 			*rval = qlc_fm_check_acc_handle(ha,
5209 			    pkt->pkt_data_acc);
5210 		}
5211 	}
5212 
5213 	if (*rval != DDI_FM_OK) {
5214 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5215 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
5216 		pkt->pkt_expln = FC_EXPLN_NONE;
5217 		pkt->pkt_action = FC_ACTION_RETRYABLE;
5218 
5219 		/* Do command callback. */
5220 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
5221 			ql_io_comp(sp);
5222 		}
5223 		*rval = FC_BADPACKET;
5224 		EL(ha, "failed, bad DMA pointers\n");
5225 		return (NULL);
5226 	}
5227 
5228 	if (sp->magic_number != QL_FCA_BRAND) {
5229 		*rval = FC_BADPACKET;
5230 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
5231 		return (NULL);
5232 	}
5233 	*rval = FC_SUCCESS;
5234 
5235 	QL_PRINT_3(ha, "done\n");
5236 
5237 	return (ha);
5238 }
5239 
5240 /*
5241  * ql_els_plogi
5242  *	Issue a extended link service port login request.
5243  *
5244  * Input:
5245  *	ha = adapter state pointer.
5246  *	pkt = pointer to fc_packet.
5247  *
5248  * Returns:
5249  *	FC_SUCCESS - the packet was accepted for transport.
5250  *	FC_TRANSPORT_ERROR - a transport error occurred.
5251  *
5252  * Context:
5253  *	Kernel context.
5254  */
5255 static int
ql_els_plogi(ql_adapter_state_t * ha,fc_packet_t * pkt)5256 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5257 {
5258 	ql_tgt_t		*tq = NULL;
5259 	port_id_t		d_id;
5260 	la_els_logi_t		acc;
5261 	class_svc_param_t	*class3_param;
5262 	int			ret;
5263 	int			rval = FC_SUCCESS;
5264 
5265 	QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5266 
5267 	TASK_DAEMON_LOCK(ha);
5268 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
5269 		TASK_DAEMON_UNLOCK(ha);
5270 		QL_PRINT_3(ha, "offline done\n");
5271 		return (FC_OFFLINE);
5272 	}
5273 	TASK_DAEMON_UNLOCK(ha);
5274 
5275 	bzero(&acc, sizeof (acc));
5276 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5277 
5278 	ret = QL_SUCCESS;
5279 
5280 	if (CFG_IST(ha, CFG_N2N_SUPPORT) && ha->topology & QL_N_PORT) {
5281 		/*
5282 		 * In p2p topology it sends a PLOGI after determining
5283 		 * it has the N_Port login initiative.
5284 		 */
5285 		ret = ql_p2p_plogi(ha, pkt);
5286 	}
5287 	if (ret == QL_CONSUMED) {
5288 		return (ret);
5289 	}
5290 
5291 	switch (ret = ql_login_port(ha, d_id)) {
5292 	case QL_SUCCESS:
5293 		tq = ql_d_id_to_queue(ha, d_id);
5294 		break;
5295 
5296 	case QL_LOOP_ID_USED:
5297 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
5298 			tq = ql_d_id_to_queue(ha, d_id);
5299 		}
5300 		break;
5301 
5302 	default:
5303 		break;
5304 	}
5305 
5306 	if (ret != QL_SUCCESS) {
5307 		/*
5308 		 * Invalidate this entry so as to seek a fresh loop ID
5309 		 * in case firmware reassigns it to something else
5310 		 */
5311 		tq = ql_d_id_to_queue(ha, d_id);
5312 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
5313 			tq->loop_id = PORT_NO_LOOP_ID;
5314 		}
5315 	} else if (tq) {
5316 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
5317 	}
5318 
5319 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
5320 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
5321 
5322 		/* Build ACC. */
5323 		acc.ls_code.ls_code = LA_ELS_ACC;
5324 		acc.common_service.fcph_version = 0x2006;
5325 		acc.common_service.cmn_features = 0x8800;
5326 		acc.common_service.rx_bufsize =
5327 		    ha->loginparams.common_service.rx_bufsize;
5328 		acc.common_service.conc_sequences = 0xff;
5329 		acc.common_service.relative_offset = 0x03;
5330 		acc.common_service.e_d_tov = 0x7d0;
5331 
5332 		bcopy((void *)&tq->port_name[0],
5333 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5334 		bcopy((void *)&tq->node_name[0],
5335 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5336 
5337 		class3_param = (class_svc_param_t *)&acc.class_3;
5338 		class3_param->class_valid_svc_opt = 0x8000;
5339 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
5340 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
5341 		class3_param->conc_sequences = tq->class3_conc_sequences;
5342 		class3_param->open_sequences_per_exch =
5343 		    tq->class3_open_sequences_per_exch;
5344 
5345 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
5346 			acc.ls_code.ls_code = LA_ELS_RJT;
5347 			pkt->pkt_state = FC_PKT_TRAN_BSY;
5348 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
5349 			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
5350 			rval = FC_TRAN_BUSY;
5351 		} else {
5352 			DEVICE_QUEUE_LOCK(tq);
5353 			tq->logout_sent = 0;
5354 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
5355 			if (CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
5356 				tq->flags |= TQF_IIDMA_NEEDED;
5357 			}
5358 			DEVICE_QUEUE_UNLOCK(tq);
5359 
5360 			if (CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
5361 				TASK_DAEMON_LOCK(ha);
5362 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5363 				TASK_DAEMON_UNLOCK(ha);
5364 			}
5365 
5366 			pkt->pkt_state = FC_PKT_SUCCESS;
5367 		}
5368 	} else {
5369 		/* Build RJT. */
5370 		acc.ls_code.ls_code = LA_ELS_RJT;
5371 
5372 		switch (ret) {
5373 		case QL_FUNCTION_TIMEOUT:
5374 			pkt->pkt_state = FC_PKT_TIMEOUT;
5375 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5376 			break;
5377 
5378 		case QL_MEMORY_ALLOC_FAILED:
5379 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
5380 			pkt->pkt_reason = FC_REASON_NOMEM;
5381 			rval = FC_TRAN_BUSY;
5382 			break;
5383 
5384 		case QL_FABRIC_NOT_INITIALIZED:
5385 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
5386 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5387 			rval = FC_TRAN_BUSY;
5388 			break;
5389 
5390 		default:
5391 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5392 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5393 			break;
5394 		}
5395 
5396 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5397 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5398 		    pkt->pkt_reason, ret, rval);
5399 	}
5400 
5401 	if (tq != NULL) {
5402 		DEVICE_QUEUE_LOCK(tq);
5403 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5404 		if (rval == FC_TRAN_BUSY) {
5405 			if (tq->d_id.b24 != BROADCAST_ADDR) {
5406 				tq->flags |= TQF_NEED_AUTHENTICATION;
5407 			}
5408 		}
5409 		DEVICE_QUEUE_UNLOCK(tq);
5410 	}
5411 
5412 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5413 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5414 
5415 	if (rval != FC_SUCCESS) {
5416 		EL(ha, "failed, rval = %xh\n", rval);
5417 	} else {
5418 		/*EMPTY*/
5419 		QL_PRINT_3(ha, "done\n");
5420 	}
5421 	return (rval);
5422 }
5423 
5424 /*
5425  * ql_p2p_plogi
5426  *	Start an extended link service port login request using
5427  *	an ELS Passthru iocb.
5428  *
5429  * Input:
5430  *	ha = adapter state pointer.
5431  *	pkt = pointer to fc_packet.
5432  *
5433  * Returns:
5434  *	QL_CONSUMMED - the iocb was queued for transport.
5435  *
5436  * Context:
5437  *	Kernel context.
5438  */
5439 static int
ql_p2p_plogi(ql_adapter_state_t * ha,fc_packet_t * pkt)5440 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5441 {
5442 	uint16_t	id;
5443 	ql_tgt_t	tmp;
5444 	ql_tgt_t	*tq = &tmp;
5445 	int		rval;
5446 	port_id_t	d_id;
5447 	ql_srb_t	*sp = (ql_srb_t *)pkt->pkt_fca_private;
5448 	uint16_t	loop_id;
5449 
5450 	tq->d_id.b.al_pa = 0;
5451 	tq->d_id.b.area = 0;
5452 	tq->d_id.b.domain = 0;
5453 
5454 	/*
5455 	 * Verify that the port database hasn't moved beneath our feet by
5456 	 * switching to the appropriate n_port_handle if necessary.  This is
5457 	 * less unplesant than the error recovery if the wrong one is used.
5458 	 */
5459 	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5460 		tq->loop_id = id;
5461 		rval = ql_get_port_database(ha, tq, PDF_NONE);
5462 		EL(ha, "rval=%xh, id=%x\n", rval, id);
5463 		/* check all the ones not logged in for possible use */
5464 		if (rval == QL_NOT_LOGGED_IN) {
5465 			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5466 				ha->n_port->n_port_handle = tq->loop_id;
5467 				EL(ha, "loop_id=%xh, master state=%x\n",
5468 				    tq->loop_id, tq->master_state);
5469 				break;
5470 			}
5471 			/*
5472 			 * Use a 'port unavailable' entry only
5473 			 * if we used it before.
5474 			 */
5475 			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5476 				/* if the port_id matches, reuse it */
5477 				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5478 					EL(ha, "n_port_handle loop_id=%xh, "
5479 					    "master state=%xh\n",
5480 					    tq->loop_id, tq->master_state);
5481 					break;
5482 				} else if (tq->loop_id ==
5483 				    ha->n_port->n_port_handle) {
5484 				    /* avoid a lint error */
5485 					uint16_t *hndl;
5486 					uint16_t val;
5487 
5488 					hndl = &ha->n_port->n_port_handle;
5489 					val = *hndl;
5490 					val++;
5491 					val++;
5492 					*hndl = val;
5493 				}
5494 			EL(ha, "rval=%xh, id=%d, n_port_handle loop_id=%xh, "
5495 			    "master state=%x\n", rval, id, tq->loop_id,
5496 			    tq->master_state);
5497 			}
5498 
5499 		}
5500 		if (rval == QL_SUCCESS) {
5501 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5502 				ha->n_port->n_port_handle = tq->loop_id;
5503 				EL(ha, "n_port_handle =%xh, master state=%x\n",
5504 				    tq->loop_id, tq->master_state);
5505 				break;
5506 			}
5507 			EL(ha, "rval=%xh, id=%d, n_port_handle loop_id=%xh, "
5508 			    "master state=%x\n", rval, id, tq->loop_id,
5509 			    tq->master_state);
5510 		}
5511 	}
5512 	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5513 
5514 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5515 
5516 	/*
5517 	 * In case fw does not have the loop id ready, driver assume 0 is
5518 	 * used since this is p2p and there is only one remote port.
5519 	 */
5520 	if (id == LAST_LOCAL_LOOP_ID + 1) {
5521 		EL(ha, "out of range loop id; rval=%xh, id=%xh, d_id=%xh\n",
5522 		    rval, id, d_id.b24);
5523 	} else {
5524 		EL(ha, "remote port loop_id '%x' has been logged in, d_id=%x\n",
5525 		    id, d_id.b24);
5526 	}
5527 
5528 	tq = ql_d_id_to_queue(ha, d_id);
5529 
5530 	/*
5531 	 * LV could use any d_id it likes.
5532 	 * tq may not be available yet.
5533 	 */
5534 	if (tq == NULL) {
5535 		if (id != LAST_LOCAL_LOOP_ID + 1) {
5536 			loop_id = id;
5537 		} else {
5538 			loop_id = 0;
5539 		}
5540 		/* Acquire adapter state lock. */
5541 		ADAPTER_STATE_LOCK(ha);
5542 
5543 		tq = ql_dev_init(ha, d_id, loop_id);
5544 
5545 		ADAPTER_STATE_UNLOCK(ha);
5546 	}
5547 
5548 	/*
5549 	 * Lun0 should always allocated since tq is
5550 	 * derived from lun queue in ql_els_passthru_entry
5551 	 * in the interrupt handler.
5552 	 */
5553 	sp->lun_queue = ql_lun_queue(ha, tq, 0);
5554 
5555 	DEVICE_QUEUE_LOCK(tq);
5556 	ql_timeout_insert(ha, tq, sp);
5557 	DEVICE_QUEUE_UNLOCK(tq);
5558 
5559 	ql_start_iocb(ha, sp);
5560 
5561 	return (QL_CONSUMED);
5562 }
5563 
5564 
5565 /*
5566  * ql_els_flogi
5567  *	Issue a extended link service fabric login request.
5568  *
5569  * Input:
5570  *	ha = adapter state pointer.
5571  *	pkt = pointer to fc_packet.
5572  *
5573  * Returns:
5574  *	FC_SUCCESS - the packet was accepted for transport.
5575  *	FC_TRANSPORT_ERROR - a transport error occurred.
5576  *
5577  * Context:
5578  *	Kernel context.
5579  */
5580 static int
ql_els_flogi(ql_adapter_state_t * ha,fc_packet_t * pkt)5581 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5582 {
5583 	ql_tgt_t		*tq = NULL;
5584 	port_id_t		d_id;
5585 	la_els_logi_t		acc;
5586 	class_svc_param_t	*class3_param;
5587 	int			rval = FC_SUCCESS;
5588 	int			accept = 0;
5589 
5590 	QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5591 
5592 	bzero(&acc, sizeof (acc));
5593 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5594 
5595 	if (CFG_IST(ha, CFG_N2N_SUPPORT) && ha->topology & QL_N_PORT) {
5596 		/*
5597 		 * d_id of zero in a FLOGI accept response in a point to point
5598 		 * topology triggers evaluation of N Port login initiative.
5599 		 */
5600 		pkt->pkt_resp_fhdr.d_id = 0;
5601 		/*
5602 		 * An N_Port already logged in with the firmware
5603 		 * will have the only database entry.
5604 		 */
5605 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5606 			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5607 		}
5608 
5609 		if (tq != NULL) {
5610 			/*
5611 			 * If the target port has initiative send
5612 			 * up a PLOGI about the new device.
5613 			 */
5614 			if (ql_wwn_cmp(ha, (la_wwn_t *)tq->port_name,
5615 			    (la_wwn_t *)ha->loginparams.nport_ww_name.raw_wwn)
5616 			    == 1) {
5617 				ha->send_plogi_timer = 3;
5618 			} else {
5619 				ha->send_plogi_timer = 0;
5620 			}
5621 			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5622 		} else {
5623 			/*
5624 			 * An N_Port not logged in with the firmware will not
5625 			 * have a database entry.  We accept anyway and rely
5626 			 * on a PLOGI from the upper layers to set the d_id
5627 			 * and s_id.
5628 			 */
5629 			accept = 1;
5630 		}
5631 	} else {
5632 		tq = ql_d_id_to_queue(ha, d_id);
5633 	}
5634 	if ((tq != NULL) || (accept != 0)) {
5635 		/* Build ACC. */
5636 		pkt->pkt_state = FC_PKT_SUCCESS;
5637 		class3_param = (class_svc_param_t *)&acc.class_3;
5638 
5639 		acc.ls_code.ls_code = LA_ELS_ACC;
5640 		acc.common_service.fcph_version = 0x2006;
5641 		if (ha->topology & QL_N_PORT) {
5642 			/* clear F_Port indicator */
5643 			acc.common_service.cmn_features = 0x0800;
5644 		} else {
5645 			acc.common_service.cmn_features = 0x1b00;
5646 		}
5647 		acc.common_service.rx_bufsize =
5648 		    ha->loginparams.common_service.rx_bufsize;
5649 		acc.common_service.conc_sequences = 0xff;
5650 		acc.common_service.relative_offset = 0x03;
5651 		acc.common_service.e_d_tov = 0x7d0;
5652 		if (accept) {
5653 			/* Use the saved N_Port WWNN and WWPN */
5654 			if (ha->n_port != NULL) {
5655 				bcopy((void *)&ha->n_port->port_name[0],
5656 				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5657 				bcopy((void *)&ha->n_port->node_name[0],
5658 				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5659 				/* mark service options invalid */
5660 				class3_param->class_valid_svc_opt = 0x0800;
5661 			} else {
5662 				EL(ha, "ha->n_port is NULL\n");
5663 				/* Build RJT. */
5664 				acc.ls_code.ls_code = LA_ELS_RJT;
5665 
5666 				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5667 				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5668 			}
5669 		} else {
5670 			bcopy((void *)&tq->port_name[0],
5671 			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5672 			bcopy((void *)&tq->node_name[0],
5673 			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5674 
5675 			class3_param = (class_svc_param_t *)&acc.class_3;
5676 			class3_param->class_valid_svc_opt = 0x8800;
5677 			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5678 			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5679 			class3_param->conc_sequences =
5680 			    tq->class3_conc_sequences;
5681 			class3_param->open_sequences_per_exch =
5682 			    tq->class3_open_sequences_per_exch;
5683 		}
5684 	} else {
5685 		/* Build RJT. */
5686 		acc.ls_code.ls_code = LA_ELS_RJT;
5687 
5688 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5689 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5690 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5691 	}
5692 
5693 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5694 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5695 
5696 	if (rval != FC_SUCCESS) {
5697 		EL(ha, "failed, rval = %xh\n", rval);
5698 	} else {
5699 		/*EMPTY*/
5700 		QL_PRINT_3(ha, "done\n");
5701 	}
5702 	return (rval);
5703 }
5704 
5705 /*
5706  * ql_els_logo
5707  *	Issue a extended link service logout request.
5708  *
5709  * Input:
5710  *	ha = adapter state pointer.
5711  *	pkt = pointer to fc_packet.
5712  *
5713  * Returns:
5714  *	FC_SUCCESS - the packet was accepted for transport.
5715  *	FC_TRANSPORT_ERROR - a transport error occurred.
5716  *
5717  * Context:
5718  *	Kernel context.
5719  */
5720 static int
ql_els_logo(ql_adapter_state_t * ha,fc_packet_t * pkt)5721 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5722 {
5723 	port_id_t	d_id;
5724 	ql_tgt_t	*tq;
5725 	la_els_logo_t	acc;
5726 
5727 	QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5728 
5729 	bzero(&acc, sizeof (acc));
5730 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5731 
5732 	tq = ql_d_id_to_queue(ha, d_id);
5733 	if (tq) {
5734 		DEVICE_QUEUE_LOCK(tq);
5735 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5736 			DEVICE_QUEUE_UNLOCK(tq);
5737 			return (FC_SUCCESS);
5738 		}
5739 
5740 		tq->flags |= TQF_NEED_AUTHENTICATION;
5741 
5742 		do {
5743 			DEVICE_QUEUE_UNLOCK(tq);
5744 			(void) ql_abort_device(ha, tq, 1);
5745 
5746 			/*
5747 			 * Wait for commands to drain in F/W (doesn't
5748 			 * take more than a few milliseconds)
5749 			 */
5750 			ql_delay(ha, 10000);
5751 
5752 			DEVICE_QUEUE_LOCK(tq);
5753 		} while (tq->outcnt);
5754 
5755 		DEVICE_QUEUE_UNLOCK(tq);
5756 	}
5757 
5758 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5759 		/* Build ACC. */
5760 		acc.ls_code.ls_code = LA_ELS_ACC;
5761 
5762 		pkt->pkt_state = FC_PKT_SUCCESS;
5763 	} else {
5764 		/* Build RJT. */
5765 		acc.ls_code.ls_code = LA_ELS_RJT;
5766 
5767 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5768 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5769 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5770 	}
5771 
5772 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5773 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5774 
5775 	QL_PRINT_3(ha, "done\n");
5776 
5777 	return (FC_SUCCESS);
5778 }
5779 
5780 /*
5781  * ql_els_prli
5782  *	Issue a extended link service process login request.
5783  *
5784  * Input:
5785  *	ha = adapter state pointer.
5786  *	pkt = pointer to fc_packet.
5787  *
5788  * Returns:
5789  *	FC_SUCCESS - the packet was accepted for transport.
5790  *	FC_TRANSPORT_ERROR - a transport error occurred.
5791  *
5792  * Context:
5793  *	Kernel context.
5794  */
5795 static int
ql_els_prli(ql_adapter_state_t * ha,fc_packet_t * pkt)5796 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5797 {
5798 	ql_tgt_t		*tq;
5799 	port_id_t		d_id;
5800 	la_els_prli_t		acc;
5801 	prli_svc_param_t	*param;
5802 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
5803 	int			rval = FC_SUCCESS;
5804 
5805 	QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5806 
5807 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5808 
5809 	tq = ql_d_id_to_queue(ha, d_id);
5810 	if (tq != NULL) {
5811 		(void) ql_get_port_database(ha, tq, PDF_NONE);
5812 
5813 		if ((ha->topology & QL_N_PORT) &&
5814 		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5815 
5816 			/* always set lun_queue */
5817 			sp->lun_queue = ql_lun_queue(ha, tq, 0);
5818 
5819 			DEVICE_QUEUE_LOCK(tq);
5820 			ql_timeout_insert(ha, tq, sp);
5821 			DEVICE_QUEUE_UNLOCK(tq);
5822 			ql_start_iocb(ha, sp);
5823 			rval = QL_CONSUMED;
5824 		} else {
5825 			/* Build ACC. */
5826 			bzero(&acc, sizeof (acc));
5827 			acc.ls_code = LA_ELS_ACC;
5828 			acc.page_length = 0x10;
5829 			acc.payload_length = tq->prli_payload_length;
5830 
5831 			param = (prli_svc_param_t *)&acc.service_params[0];
5832 			param->type = 0x08;
5833 			param->rsvd = 0x00;
5834 			param->process_assoc_flags = tq->prli_svc_param_word_0;
5835 			param->process_flags = tq->prli_svc_param_word_3;
5836 
5837 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5838 			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5839 			    DDI_DEV_AUTOINCR);
5840 
5841 			pkt->pkt_state = FC_PKT_SUCCESS;
5842 		}
5843 	} else {
5844 		/* in case of P2P, tq might not have been created yet */
5845 		if (ha->topology & QL_N_PORT) {
5846 
5847 			/* Acquire adapter state lock. */
5848 			ADAPTER_STATE_LOCK(ha);
5849 			tq = ql_dev_init(ha, d_id, ha->n_port->n_port_handle);
5850 			ADAPTER_STATE_UNLOCK(ha);
5851 
5852 			/* always alloc lun #0 */
5853 			sp->lun_queue = ql_lun_queue(ha, tq, 0);
5854 			bcopy((void *)&ha->n_port->port_name[0],
5855 			    (void *) &tq->port_name[0], 8);
5856 			bcopy((void *)&ha->n_port->node_name[0],
5857 			    (void *) &tq->node_name[0], 8);
5858 
5859 			DEVICE_QUEUE_LOCK(tq);
5860 			ql_timeout_insert(ha, tq, sp);
5861 			DEVICE_QUEUE_UNLOCK(tq);
5862 
5863 			ql_start_iocb(ha, sp);
5864 			rval = QL_CONSUMED;
5865 
5866 		} else {
5867 
5868 			la_els_rjt_t rjt;
5869 
5870 			/* Build RJT. */
5871 			bzero(&rjt, sizeof (rjt));
5872 			rjt.ls_code.ls_code = LA_ELS_RJT;
5873 
5874 			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5875 			    (uint8_t *)pkt->pkt_resp, sizeof (rjt),
5876 			    DDI_DEV_AUTOINCR);
5877 
5878 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5879 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5880 			EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5881 		}
5882 	}
5883 
5884 	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5885 		EL(ha, "failed, rval = %xh\n", rval);
5886 	} else {
5887 		/*EMPTY*/
5888 		QL_PRINT_3(ha, "done\n");
5889 	}
5890 	return (rval);
5891 }
5892 
5893 /*
5894  * ql_els_prlo
5895  *	Issue a extended link service process logout request.
5896  *
5897  * Input:
5898  *	ha = adapter state pointer.
5899  *	pkt = pointer to fc_packet.
5900  *
5901  * Returns:
5902  *	FC_SUCCESS - the packet was accepted for transport.
5903  *	FC_TRANSPORT_ERROR - a transport error occurred.
5904  *
5905  * Context:
5906  *	Kernel context.
5907  */
5908 /* ARGSUSED */
5909 static int
ql_els_prlo(ql_adapter_state_t * ha,fc_packet_t * pkt)5910 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5911 {
5912 	la_els_prli_t	acc;
5913 
5914 	QL_PRINT_3(ha, "started, d_id=%xh\n", pkt->pkt_cmd_fhdr.d_id);
5915 
5916 	/* Build ACC. */
5917 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5918 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5919 
5920 	acc.ls_code = LA_ELS_ACC;
5921 	acc.service_params[2] = 1;
5922 
5923 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5924 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5925 
5926 	pkt->pkt_state = FC_PKT_SUCCESS;
5927 
5928 	QL_PRINT_3(ha, "done\n");
5929 
5930 	return (FC_SUCCESS);
5931 }
5932 
5933 /*
5934  * ql_els_adisc
5935  *	Issue a extended link service address discovery request.
5936  *
5937  * Input:
5938  *	ha = adapter state pointer.
5939  *	pkt = pointer to fc_packet.
5940  *
5941  * Returns:
5942  *	FC_SUCCESS - the packet was accepted for transport.
5943  *	FC_TRANSPORT_ERROR - a transport error occurred.
5944  *
5945  * Context:
5946  *	Kernel context.
5947  */
5948 static int
ql_els_adisc(ql_adapter_state_t * ha,fc_packet_t * pkt)5949 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5950 {
5951 	ql_dev_id_list_t	*list;
5952 	uint32_t		list_size;
5953 	ql_link_t		*link;
5954 	ql_tgt_t		*tq;
5955 	ql_lun_t		*lq;
5956 	port_id_t		d_id;
5957 	la_els_adisc_t		acc;
5958 	uint16_t		index, loop_id;
5959 	ql_mbx_data_t		mr;
5960 
5961 	QL_PRINT_3(ha, "started\n");
5962 
5963 	bzero(&acc, sizeof (acc));
5964 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5965 
5966 	/*
5967 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5968 	 * the device from the firmware
5969 	 */
5970 	index = ql_alpa_to_index[d_id.b.al_pa];
5971 	tq = NULL;
5972 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5973 		tq = link->base_address;
5974 		if (tq->d_id.b24 == d_id.b24) {
5975 			break;
5976 		} else {
5977 			tq = NULL;
5978 		}
5979 	}
5980 
5981 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5982 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5983 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5984 
5985 		if (list != NULL &&
5986 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5987 		    QL_SUCCESS) {
5988 
5989 			for (index = 0; index < mr.mb[1]; index++) {
5990 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5991 
5992 				if (tq->d_id.b24 == d_id.b24) {
5993 					tq->loop_id = loop_id;
5994 					break;
5995 				}
5996 			}
5997 		} else {
5998 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5999 			    QL_NAME, ha->instance, d_id.b24);
6000 			tq = NULL;
6001 		}
6002 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
6003 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
6004 			    QL_NAME, ha->instance, tq->d_id.b24);
6005 			tq = NULL;
6006 		}
6007 
6008 		if (list != NULL) {
6009 			kmem_free(list, list_size);
6010 		}
6011 	}
6012 
6013 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
6014 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
6015 
6016 		/* Build ACC. */
6017 
6018 		DEVICE_QUEUE_LOCK(tq);
6019 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
6020 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
6021 			for (link = tq->lun_queues.first; link != NULL;
6022 			    link = link->next) {
6023 				lq = link->base_address;
6024 
6025 				if (lq->cmd.first != NULL) {
6026 					ql_next(ha, lq);
6027 					DEVICE_QUEUE_LOCK(tq);
6028 				}
6029 			}
6030 		}
6031 		DEVICE_QUEUE_UNLOCK(tq);
6032 
6033 		acc.ls_code.ls_code = LA_ELS_ACC;
6034 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
6035 
6036 		bcopy((void *)&tq->port_name[0],
6037 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
6038 		bcopy((void *)&tq->node_name[0],
6039 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
6040 
6041 		acc.nport_id.port_id = tq->d_id.b24;
6042 
6043 		pkt->pkt_state = FC_PKT_SUCCESS;
6044 	} else {
6045 		/* Build RJT. */
6046 		acc.ls_code.ls_code = LA_ELS_RJT;
6047 
6048 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6049 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6050 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6051 	}
6052 
6053 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6054 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6055 
6056 	QL_PRINT_3(ha, "done\n");
6057 
6058 	return (FC_SUCCESS);
6059 }
6060 
6061 /*
6062  * ql_els_linit
6063  *	Issue a extended link service loop initialize request.
6064  *
6065  * Input:
6066  *	ha = adapter state pointer.
6067  *	pkt = pointer to fc_packet.
6068  *
6069  * Returns:
6070  *	FC_SUCCESS - the packet was accepted for transport.
6071  *	FC_TRANSPORT_ERROR - a transport error occurred.
6072  *
6073  * Context:
6074  *	Kernel context.
6075  */
6076 static int
ql_els_linit(ql_adapter_state_t * ha,fc_packet_t * pkt)6077 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
6078 {
6079 	ddi_dma_cookie_t	*cp;
6080 	uint32_t		cnt;
6081 	conv_num_t		n;
6082 	port_id_t		d_id;
6083 
6084 	QL_PRINT_3(ha, "started\n");
6085 
6086 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6087 	if (ha->topology & QL_FABRIC_CONNECTION) {
6088 		fc_linit_req_t els;
6089 		lfa_cmd_t lfa;
6090 
6091 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6092 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6093 
6094 		/* Setup LFA mailbox command data. */
6095 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
6096 
6097 		lfa.resp_buffer_length[0] = 4;
6098 
6099 		cp = pkt->pkt_resp_cookie;
6100 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6101 			n.size64 = (uint64_t)cp->dmac_laddress;
6102 			LITTLE_ENDIAN_64(&n.size64);
6103 		} else {
6104 			n.size32[0] = LSD(cp->dmac_laddress);
6105 			LITTLE_ENDIAN_32(&n.size32[0]);
6106 			n.size32[1] = MSD(cp->dmac_laddress);
6107 			LITTLE_ENDIAN_32(&n.size32[1]);
6108 		}
6109 
6110 		/* Set buffer address. */
6111 		for (cnt = 0; cnt < 8; cnt++) {
6112 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
6113 		}
6114 
6115 		lfa.subcommand_length[0] = 4;
6116 		n.size32[0] = d_id.b24;
6117 		LITTLE_ENDIAN_32(&n.size32[0]);
6118 		lfa.addr[0] = n.size8[0];
6119 		lfa.addr[1] = n.size8[1];
6120 		lfa.addr[2] = n.size8[2];
6121 		lfa.subcommand[1] = 0x70;
6122 		lfa.payload[2] = els.func;
6123 		lfa.payload[4] = els.lip_b3;
6124 		lfa.payload[5] = els.lip_b4;
6125 
6126 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6127 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
6128 		} else {
6129 			pkt->pkt_state = FC_PKT_SUCCESS;
6130 		}
6131 	} else {
6132 		fc_linit_resp_t rjt;
6133 
6134 		/* Build RJT. */
6135 		bzero(&rjt, sizeof (rjt));
6136 		rjt.ls_code.ls_code = LA_ELS_RJT;
6137 
6138 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6139 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6140 
6141 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6142 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6143 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6144 	}
6145 
6146 	QL_PRINT_3(ha, "done\n");
6147 
6148 	return (FC_SUCCESS);
6149 }
6150 
6151 /*
6152  * ql_els_lpc
6153  *	Issue a extended link service loop control request.
6154  *
6155  * Input:
6156  *	ha = adapter state pointer.
6157  *	pkt = pointer to fc_packet.
6158  *
6159  * Returns:
6160  *	FC_SUCCESS - the packet was accepted for transport.
6161  *	FC_TRANSPORT_ERROR - a transport error occurred.
6162  *
6163  * Context:
6164  *	Kernel context.
6165  */
6166 static int
ql_els_lpc(ql_adapter_state_t * ha,fc_packet_t * pkt)6167 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
6168 {
6169 	ddi_dma_cookie_t	*cp;
6170 	uint32_t		cnt;
6171 	conv_num_t		n;
6172 	port_id_t		d_id;
6173 
6174 	QL_PRINT_3(ha, "started\n");
6175 
6176 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6177 	if (ha->topology & QL_FABRIC_CONNECTION) {
6178 		ql_lpc_t els;
6179 		lfa_cmd_t lfa;
6180 
6181 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6182 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6183 
6184 		/* Setup LFA mailbox command data. */
6185 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
6186 
6187 		lfa.resp_buffer_length[0] = 4;
6188 
6189 		cp = pkt->pkt_resp_cookie;
6190 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6191 			n.size64 = (uint64_t)(cp->dmac_laddress);
6192 			LITTLE_ENDIAN_64(&n.size64);
6193 		} else {
6194 			n.size32[0] = cp->dmac_address;
6195 			LITTLE_ENDIAN_32(&n.size32[0]);
6196 			n.size32[1] = 0;
6197 		}
6198 
6199 		/* Set buffer address. */
6200 		for (cnt = 0; cnt < 8; cnt++) {
6201 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
6202 		}
6203 
6204 		lfa.subcommand_length[0] = 20;
6205 		n.size32[0] = d_id.b24;
6206 		LITTLE_ENDIAN_32(&n.size32[0]);
6207 		lfa.addr[0] = n.size8[0];
6208 		lfa.addr[1] = n.size8[1];
6209 		lfa.addr[2] = n.size8[2];
6210 		lfa.subcommand[1] = 0x71;
6211 		lfa.payload[4] = els.port_control;
6212 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 16);
6213 
6214 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6215 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
6216 		} else {
6217 			pkt->pkt_state = FC_PKT_SUCCESS;
6218 		}
6219 	} else {
6220 		ql_lpc_resp_t rjt;
6221 
6222 		/* Build RJT. */
6223 		bzero(&rjt, sizeof (rjt));
6224 		rjt.ls_code.ls_code = LA_ELS_RJT;
6225 
6226 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6227 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6228 
6229 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6230 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6231 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6232 	}
6233 
6234 	QL_PRINT_3(ha, "done\n");
6235 
6236 	return (FC_SUCCESS);
6237 }
6238 
6239 /*
6240  * ql_els_lsts
6241  *	Issue a extended link service loop status request.
6242  *
6243  * Input:
6244  *	ha = adapter state pointer.
6245  *	pkt = pointer to fc_packet.
6246  *
6247  * Returns:
6248  *	FC_SUCCESS - the packet was accepted for transport.
6249  *	FC_TRANSPORT_ERROR - a transport error occurred.
6250  *
6251  * Context:
6252  *	Kernel context.
6253  */
6254 static int
ql_els_lsts(ql_adapter_state_t * ha,fc_packet_t * pkt)6255 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
6256 {
6257 	ddi_dma_cookie_t	*cp;
6258 	uint32_t		cnt;
6259 	conv_num_t		n;
6260 	port_id_t		d_id;
6261 
6262 	QL_PRINT_3(ha, "started\n");
6263 
6264 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6265 	if (ha->topology & QL_FABRIC_CONNECTION) {
6266 		fc_lsts_req_t els;
6267 		lfa_cmd_t lfa;
6268 
6269 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6270 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6271 
6272 		/* Setup LFA mailbox command data. */
6273 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
6274 
6275 		lfa.resp_buffer_length[0] = 84;
6276 
6277 		cp = pkt->pkt_resp_cookie;
6278 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
6279 			n.size64 = cp->dmac_laddress;
6280 			LITTLE_ENDIAN_64(&n.size64);
6281 		} else {
6282 			n.size32[0] = cp->dmac_address;
6283 			LITTLE_ENDIAN_32(&n.size32[0]);
6284 			n.size32[1] = 0;
6285 		}
6286 
6287 		/* Set buffer address. */
6288 		for (cnt = 0; cnt < 8; cnt++) {
6289 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
6290 		}
6291 
6292 		lfa.subcommand_length[0] = 2;
6293 		n.size32[0] = d_id.b24;
6294 		LITTLE_ENDIAN_32(&n.size32[0]);
6295 		lfa.addr[0] = n.size8[0];
6296 		lfa.addr[1] = n.size8[1];
6297 		lfa.addr[2] = n.size8[2];
6298 		lfa.subcommand[1] = 0x72;
6299 
6300 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
6301 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
6302 		} else {
6303 			pkt->pkt_state = FC_PKT_SUCCESS;
6304 		}
6305 	} else {
6306 		fc_lsts_resp_t rjt;
6307 
6308 		/* Build RJT. */
6309 		bzero(&rjt, sizeof (rjt));
6310 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
6311 
6312 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
6313 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
6314 
6315 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6316 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6317 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6318 	}
6319 
6320 	QL_PRINT_3(ha, "done\n");
6321 
6322 	return (FC_SUCCESS);
6323 }
6324 
6325 /*
6326  * ql_els_scr
6327  *	Issue a extended link service state change registration request.
6328  *
6329  * Input:
6330  *	ha = adapter state pointer.
6331  *	pkt = pointer to fc_packet.
6332  *
6333  * Returns:
6334  *	FC_SUCCESS - the packet was accepted for transport.
6335  *	FC_TRANSPORT_ERROR - a transport error occurred.
6336  *
6337  * Context:
6338  *	Kernel context.
6339  */
6340 static int
ql_els_scr(ql_adapter_state_t * ha,fc_packet_t * pkt)6341 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
6342 {
6343 	fc_scr_resp_t	acc;
6344 
6345 	QL_PRINT_3(ha, "started\n");
6346 
6347 	bzero(&acc, sizeof (acc));
6348 	if (ha->topology & QL_FABRIC_CONNECTION) {
6349 		fc_scr_req_t els;
6350 
6351 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
6352 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
6353 
6354 		if (ql_send_change_request(ha, els.scr_func) ==
6355 		    QL_SUCCESS) {
6356 			/* Build ACC. */
6357 			acc.scr_acc = LA_ELS_ACC;
6358 
6359 			pkt->pkt_state = FC_PKT_SUCCESS;
6360 		} else {
6361 			/* Build RJT. */
6362 			acc.scr_acc = LA_ELS_RJT;
6363 
6364 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
6365 			pkt->pkt_reason = FC_REASON_HW_ERROR;
6366 			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
6367 		}
6368 	} else {
6369 		/* Build RJT. */
6370 		acc.scr_acc = LA_ELS_RJT;
6371 
6372 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6373 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6374 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6375 	}
6376 
6377 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6378 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6379 
6380 	QL_PRINT_3(ha, "done\n");
6381 
6382 	return (FC_SUCCESS);
6383 }
6384 
6385 /*
6386  * ql_els_rscn
6387  *	Issue a extended link service register state
6388  *	change notification request.
6389  *
6390  * Input:
6391  *	ha = adapter state pointer.
6392  *	pkt = pointer to fc_packet.
6393  *
6394  * Returns:
6395  *	FC_SUCCESS - the packet was accepted for transport.
6396  *	FC_TRANSPORT_ERROR - a transport error occurred.
6397  *
6398  * Context:
6399  *	Kernel context.
6400  */
6401 static int
ql_els_rscn(ql_adapter_state_t * ha,fc_packet_t * pkt)6402 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6403 {
6404 	ql_rscn_resp_t	acc;
6405 
6406 	QL_PRINT_3(ha, "started\n");
6407 
6408 	bzero(&acc, sizeof (acc));
6409 	if (ha->topology & QL_FABRIC_CONNECTION) {
6410 		/* Build ACC. */
6411 		acc.scr_acc = LA_ELS_ACC;
6412 
6413 		pkt->pkt_state = FC_PKT_SUCCESS;
6414 	} else {
6415 		/* Build RJT. */
6416 		acc.scr_acc = LA_ELS_RJT;
6417 
6418 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6419 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6420 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6421 	}
6422 
6423 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6424 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6425 
6426 	QL_PRINT_3(ha, "done\n");
6427 
6428 	return (FC_SUCCESS);
6429 }
6430 
6431 /*
6432  * ql_els_farp_req
6433  *	Issue FC Address Resolution Protocol (FARP)
6434  *	extended link service request.
6435  *
6436  *	Note: not supported.
6437  *
6438  * Input:
6439  *	ha = adapter state pointer.
6440  *	pkt = pointer to fc_packet.
6441  *
6442  * Returns:
6443  *	FC_SUCCESS - the packet was accepted for transport.
6444  *	FC_TRANSPORT_ERROR - a transport error occurred.
6445  *
6446  * Context:
6447  *	Kernel context.
6448  */
6449 /* ARGSUSED */
6450 static int
ql_els_farp_req(ql_adapter_state_t * ha,fc_packet_t * pkt)6451 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6452 {
6453 	ql_acc_rjt_t	acc;
6454 
6455 	QL_PRINT_3(ha, "started\n");
6456 
6457 	bzero(&acc, sizeof (acc));
6458 
6459 	/* Build ACC. */
6460 	acc.ls_code.ls_code = LA_ELS_ACC;
6461 
6462 	pkt->pkt_state = FC_PKT_SUCCESS;
6463 
6464 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6465 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6466 
6467 	QL_PRINT_3(ha, "done\n");
6468 
6469 	return (FC_SUCCESS);
6470 }
6471 
6472 /*
6473  * ql_els_farp_reply
6474  *	Issue FC Address Resolution Protocol (FARP)
6475  *	extended link service reply.
6476  *
6477  *	Note: not supported.
6478  *
6479  * Input:
6480  *	ha = adapter state pointer.
6481  *	pkt = pointer to fc_packet.
6482  *
6483  * Returns:
6484  *	FC_SUCCESS - the packet was accepted for transport.
6485  *	FC_TRANSPORT_ERROR - a transport error occurred.
6486  *
6487  * Context:
6488  *	Kernel context.
6489  */
6490 /* ARGSUSED */
6491 static int
ql_els_farp_reply(ql_adapter_state_t * ha,fc_packet_t * pkt)6492 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6493 {
6494 	ql_acc_rjt_t	acc;
6495 
6496 	QL_PRINT_3(ha, "started\n");
6497 
6498 	bzero(&acc, sizeof (acc));
6499 
6500 	/* Build ACC. */
6501 	acc.ls_code.ls_code = LA_ELS_ACC;
6502 
6503 	pkt->pkt_state = FC_PKT_SUCCESS;
6504 
6505 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6506 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6507 
6508 	QL_PRINT_3(ha, "done\n");
6509 
6510 	return (FC_SUCCESS);
6511 }
6512 
6513 static int
ql_els_rnid(ql_adapter_state_t * ha,fc_packet_t * pkt)6514 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6515 {
6516 	uchar_t			*rnid_acc;
6517 	port_id_t		d_id;
6518 	ql_link_t		*link;
6519 	ql_tgt_t		*tq;
6520 	uint16_t		index;
6521 	la_els_rnid_acc_t	acc;
6522 	la_els_rnid_t		*req;
6523 	size_t			req_len;
6524 
6525 	QL_PRINT_3(ha, "started\n");
6526 
6527 	req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6528 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6529 	index = ql_alpa_to_index[d_id.b.al_pa];
6530 
6531 	tq = NULL;
6532 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6533 		tq = link->base_address;
6534 		if (tq->d_id.b24 == d_id.b24) {
6535 			break;
6536 		} else {
6537 			tq = NULL;
6538 		}
6539 	}
6540 
6541 	/* Allocate memory for rnid status block */
6542 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6543 
6544 	bzero(&acc, sizeof (acc));
6545 
6546 	req = (la_els_rnid_t *)pkt->pkt_cmd;
6547 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6548 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6549 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
6550 
6551 		kmem_free(rnid_acc, req_len);
6552 		acc.ls_code.ls_code = LA_ELS_RJT;
6553 
6554 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6555 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6556 
6557 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6558 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6559 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6560 
6561 		return (FC_FAILURE);
6562 	}
6563 
6564 	acc.ls_code.ls_code = LA_ELS_ACC;
6565 	bcopy(rnid_acc, &acc.hdr, sizeof (fc_rnid_hdr_t));
6566 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6567 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6568 
6569 	kmem_free(rnid_acc, req_len);
6570 	pkt->pkt_state = FC_PKT_SUCCESS;
6571 
6572 	QL_PRINT_3(ha, "done\n");
6573 
6574 	return (FC_SUCCESS);
6575 }
6576 
6577 static int
ql_els_rls(ql_adapter_state_t * ha,fc_packet_t * pkt)6578 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6579 {
6580 	fc_rls_acc_t		*rls_acc;
6581 	port_id_t		d_id;
6582 	ql_link_t		*link;
6583 	ql_tgt_t		*tq;
6584 	uint16_t		index;
6585 	la_els_rls_acc_t	acc;
6586 
6587 	QL_PRINT_3(ha, "started\n");
6588 
6589 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6590 	index = ql_alpa_to_index[d_id.b.al_pa];
6591 
6592 	tq = NULL;
6593 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6594 		tq = link->base_address;
6595 		if (tq->d_id.b24 == d_id.b24) {
6596 			break;
6597 		} else {
6598 			tq = NULL;
6599 		}
6600 	}
6601 
6602 	/* Allocate memory for link error status block */
6603 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6604 
6605 	bzero(&acc, sizeof (la_els_rls_acc_t));
6606 
6607 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6608 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6609 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6610 
6611 		kmem_free(rls_acc, sizeof (*rls_acc));
6612 		acc.ls_code.ls_code = LA_ELS_RJT;
6613 
6614 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6615 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6616 
6617 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6618 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6619 		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6620 
6621 		return (FC_FAILURE);
6622 	}
6623 
6624 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6625 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6626 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6627 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6628 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6629 
6630 	acc.ls_code.ls_code = LA_ELS_ACC;
6631 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6632 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6633 	acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6634 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6635 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6636 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6637 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6638 
6639 	kmem_free(rls_acc, sizeof (*rls_acc));
6640 	pkt->pkt_state = FC_PKT_SUCCESS;
6641 
6642 	QL_PRINT_3(ha, "done\n");
6643 
6644 	return (FC_SUCCESS);
6645 }
6646 
6647 static int
ql_busy_plogi(ql_adapter_state_t * ha,fc_packet_t * pkt,ql_tgt_t * tq)6648 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6649 {
6650 	port_id_t	d_id;
6651 	ql_srb_t	*sp;
6652 	fc_unsol_buf_t	*ubp;
6653 	ql_link_t	*link, *next_link;
6654 	int		rval = FC_SUCCESS;
6655 	int		cnt = 5;
6656 
6657 	QL_PRINT_3(ha, "started\n");
6658 
6659 	/*
6660 	 * we need to ensure that q->outcnt == 0, otherwise
6661 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6662 	 * will confuse ulps.
6663 	 */
6664 
6665 	DEVICE_QUEUE_LOCK(tq);
6666 	do {
6667 		/*
6668 		 * wait for the cmds to get drained. If they
6669 		 * don't get drained then the transport will
6670 		 * retry PLOGI after few secs.
6671 		 */
6672 		if (tq->outcnt != 0) {
6673 			rval = FC_TRAN_BUSY;
6674 			DEVICE_QUEUE_UNLOCK(tq);
6675 			ql_delay(ha, 10000);
6676 			DEVICE_QUEUE_LOCK(tq);
6677 			cnt--;
6678 			if (!cnt) {
6679 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6680 				    " for %xh outcount %xh", QL_NAME,
6681 				    ha->instance, tq->d_id.b24, tq->outcnt);
6682 			}
6683 		} else {
6684 			rval = FC_SUCCESS;
6685 			break;
6686 		}
6687 	} while (cnt > 0);
6688 	DEVICE_QUEUE_UNLOCK(tq);
6689 
6690 	/*
6691 	 * return, if busy or if the plogi was asynchronous.
6692 	 */
6693 	if ((rval != FC_SUCCESS) ||
6694 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6695 	    pkt->pkt_comp)) {
6696 		QL_PRINT_3(ha, "done, busy or async\n");
6697 		return (rval);
6698 	}
6699 
6700 	/*
6701 	 * Let us give daemon sufficient time and hopefully
6702 	 * when transport retries PLOGI, it would have flushed
6703 	 * callback queue.
6704 	 */
6705 	TASK_DAEMON_LOCK(ha);
6706 	for (link = ha->unsol_callback_queue.first; link != NULL;
6707 	    link = next_link) {
6708 		next_link = link->next;
6709 		sp = link->base_address;
6710 		if (sp->flags & SRB_UB_CALLBACK) {
6711 			ubp = ha->ub_array[sp->handle];
6712 			d_id.b24 = ubp->ub_frame.s_id;
6713 		} else {
6714 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6715 		}
6716 		if (tq->d_id.b24 == d_id.b24) {
6717 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6718 			    ha->instance, tq->d_id.b24);
6719 			rval = FC_TRAN_BUSY;
6720 			break;
6721 		}
6722 	}
6723 	TASK_DAEMON_UNLOCK(ha);
6724 
6725 	QL_PRINT_3(ha, "done\n");
6726 
6727 	return (rval);
6728 }
6729 
6730 /*
6731  * ql_login_port
6732  *	Logs in a device if not already logged in.
6733  *
6734  * Input:
6735  *	ha = adapter state pointer.
6736  *	d_id = 24 bit port ID.
6737  *	DEVICE_QUEUE_LOCK must be released.
6738  *
6739  * Returns:
6740  *	QL local function return status code.
6741  *
6742  * Context:
6743  *	Kernel context.
6744  */
6745 static int
ql_login_port(ql_adapter_state_t * ha,port_id_t d_id)6746 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6747 {
6748 	ql_adapter_state_t	*vha;
6749 	ql_link_t		*link;
6750 	uint16_t		index;
6751 	ql_tgt_t		*tq, *tq2;
6752 	uint16_t		loop_id, first_loop_id, last_loop_id;
6753 	int			rval = QL_SUCCESS;
6754 
6755 	QL_PRINT_3(ha, "started, d_id=%xh\n", d_id.b24);
6756 
6757 	/* Do not login vports */
6758 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6759 		if (vha->d_id.b24 == d_id.b24) {
6760 			EL(ha, "failed=%xh, d_id=%xh vp_index=%xh\n",
6761 			    QL_FUNCTION_FAILED, d_id.b24, vha->vp_index);
6762 			return (QL_FUNCTION_FAILED);
6763 		}
6764 	}
6765 
6766 	/* Get head queue index. */
6767 	index = ql_alpa_to_index[d_id.b.al_pa];
6768 
6769 	/* Check for device already has a queue. */
6770 	tq = NULL;
6771 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6772 		tq = link->base_address;
6773 		if (tq->d_id.b24 == d_id.b24) {
6774 			loop_id = tq->loop_id;
6775 			break;
6776 		} else {
6777 			tq = NULL;
6778 		}
6779 	}
6780 
6781 	/* Let's stop issuing any IO and unsolicited logo */
6782 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6783 		DEVICE_QUEUE_LOCK(tq);
6784 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6785 		tq->flags &= ~TQF_RSCN_RCVD;
6786 		DEVICE_QUEUE_UNLOCK(tq);
6787 	}
6788 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6789 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6790 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6791 	}
6792 
6793 	/* Special case for Nameserver */
6794 	if (d_id.b24 == FS_NAME_SERVER) {
6795 		if (!(ha->topology & QL_FABRIC_CONNECTION)) {
6796 			EL(ha, "failed=%xh, d_id=%xh no fabric\n",
6797 			    QL_FUNCTION_FAILED, d_id.b24);
6798 			return (QL_FUNCTION_FAILED);
6799 		}
6800 
6801 		loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
6802 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6803 		if (tq == NULL) {
6804 			ADAPTER_STATE_LOCK(ha);
6805 			tq = ql_dev_init(ha, d_id, loop_id);
6806 			ADAPTER_STATE_UNLOCK(ha);
6807 			if (tq == NULL) {
6808 				EL(ha, "failed=%xh, d_id=%xh\n",
6809 				    QL_FUNCTION_FAILED, d_id.b24);
6810 				return (QL_FUNCTION_FAILED);
6811 			}
6812 		}
6813 		if (!(CFG_IST(ha, CFG_CTRL_82XX))) {
6814 			rval = ql_login_fabric_port(ha, tq, loop_id);
6815 			if (rval == QL_SUCCESS) {
6816 				tq->loop_id = loop_id;
6817 				tq->flags |= TQF_FABRIC_DEVICE;
6818 				(void) ql_get_port_database(ha, tq, PDF_NONE);
6819 			}
6820 		}
6821 	/* Check for device already logged in. */
6822 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6823 		if (tq->flags & TQF_FABRIC_DEVICE) {
6824 			rval = ql_login_fabric_port(ha, tq, loop_id);
6825 			if (rval == QL_PORT_ID_USED) {
6826 				rval = QL_SUCCESS;
6827 			}
6828 		} else if (LOCAL_LOOP_ID(loop_id)) {
6829 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6830 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6831 			    LLF_NONE : LLF_PLOGI));
6832 			if (rval == QL_SUCCESS) {
6833 				DEVICE_QUEUE_LOCK(tq);
6834 				tq->loop_id = loop_id;
6835 				DEVICE_QUEUE_UNLOCK(tq);
6836 			}
6837 		}
6838 	} else if (ha->topology & QL_FABRIC_CONNECTION) {
6839 		/* Locate unused loop ID. */
6840 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6841 			first_loop_id = 0;
6842 			last_loop_id = LAST_N_PORT_HDL;
6843 		} else if (ha->topology & QL_F_PORT) {
6844 			first_loop_id = 0;
6845 			last_loop_id = SNS_LAST_LOOP_ID;
6846 		} else {
6847 			first_loop_id = SNS_FIRST_LOOP_ID;
6848 			last_loop_id = SNS_LAST_LOOP_ID;
6849 		}
6850 
6851 		/* Acquire adapter state lock. */
6852 		ADAPTER_STATE_LOCK(ha);
6853 
6854 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6855 		if (tq == NULL) {
6856 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6857 			    d_id.b24);
6858 
6859 			ADAPTER_STATE_UNLOCK(ha);
6860 
6861 			return (QL_FUNCTION_FAILED);
6862 		}
6863 
6864 		rval = QL_FUNCTION_FAILED;
6865 		loop_id = ha->pha->free_loop_id++;
6866 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6867 		    index--) {
6868 			if (loop_id < first_loop_id ||
6869 			    loop_id > last_loop_id) {
6870 				loop_id = first_loop_id;
6871 				ha->pha->free_loop_id = (uint16_t)
6872 				    (loop_id + 1);
6873 			}
6874 
6875 			/* Bypass if loop ID used. */
6876 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6877 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6878 				if (tq2 != NULL && tq2 != tq) {
6879 					break;
6880 				}
6881 			}
6882 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6883 			    loop_id == ha->loop_id) {
6884 				loop_id = ha->pha->free_loop_id++;
6885 				continue;
6886 			}
6887 
6888 			ADAPTER_STATE_UNLOCK(ha);
6889 			rval = ql_login_fabric_port(ha, tq, loop_id);
6890 
6891 			/*
6892 			 * If PORT_ID_USED is returned
6893 			 * the login_fabric_port() updates
6894 			 * with the correct loop ID
6895 			 */
6896 			switch (rval) {
6897 			case QL_PORT_ID_USED:
6898 				/*
6899 				 * use f/w handle and try to
6900 				 * login again.
6901 				 */
6902 				ADAPTER_STATE_LOCK(ha);
6903 				ha->pha->free_loop_id--;
6904 				ADAPTER_STATE_UNLOCK(ha);
6905 				loop_id = tq->loop_id;
6906 				break;
6907 
6908 			case QL_SUCCESS:
6909 				tq->flags |= TQF_FABRIC_DEVICE;
6910 				(void) ql_get_port_database(ha,
6911 				    tq, PDF_NONE);
6912 				index = 1;
6913 				break;
6914 
6915 			case QL_LOOP_ID_USED:
6916 				tq->loop_id = PORT_NO_LOOP_ID;
6917 				ADAPTER_STATE_LOCK(ha);
6918 				loop_id = ha->pha->free_loop_id++;
6919 				ADAPTER_STATE_UNLOCK(ha);
6920 				break;
6921 
6922 			case QL_ALL_IDS_IN_USE:
6923 				tq->loop_id = PORT_NO_LOOP_ID;
6924 				index = 1;
6925 				break;
6926 
6927 			default:
6928 				tq->loop_id = PORT_NO_LOOP_ID;
6929 				index = 1;
6930 				break;
6931 			}
6932 
6933 			ADAPTER_STATE_LOCK(ha);
6934 		}
6935 
6936 		ADAPTER_STATE_UNLOCK(ha);
6937 	} else {
6938 		rval = QL_FUNCTION_FAILED;
6939 	}
6940 
6941 	if (rval != QL_SUCCESS) {
6942 		EL(ha, "failed, rval=%xh, d_id=%xh\n",
6943 		    rval, d_id.b24);
6944 	} else {
6945 		EL(ha, "d_id=%xh, loop_id=%xh, "
6946 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6947 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6948 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6949 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6950 	}
6951 	return (rval);
6952 }
6953 
6954 /*
6955  * ql_login_fabric_port
6956  *	Issue login fabric port mailbox command.
6957  *
6958  * Input:
6959  *	ha:		adapter state pointer.
6960  *	tq:		target queue pointer.
6961  *	loop_id:	FC Loop ID.
6962  *
6963  * Returns:
6964  *	ql local function return status code.
6965  *
6966  * Context:
6967  *	Kernel context.
6968  */
6969 static int
ql_login_fabric_port(ql_adapter_state_t * ha,ql_tgt_t * tq,uint16_t loop_id)6970 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6971 {
6972 	int		rval;
6973 	int		index;
6974 	int		retry = 0;
6975 	port_id_t	d_id;
6976 	ql_tgt_t	*newq;
6977 	ql_mbx_data_t	mr;
6978 
6979 	QL_PRINT_3(ha, "started, d_id=%xh\n", tq->d_id.b24);
6980 
6981 	/*
6982 	 * QL_PARAMETER_ERROR also means the firmware is not able to allocate
6983 	 * PCB entry due to resource issues, or collision.
6984 	 */
6985 	do {
6986 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6987 		if ((rval == QL_PARAMETER_ERROR) ||
6988 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6989 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6990 			retry++;
6991 			drv_usecwait(ha->plogi_params->retry_dly_usec);
6992 		} else {
6993 			break;
6994 		}
6995 	} while (retry < ha->plogi_params->retry_cnt);
6996 
6997 	switch (rval) {
6998 	case QL_SUCCESS:
6999 		tq->loop_id = loop_id;
7000 		break;
7001 
7002 	case QL_PORT_ID_USED:
7003 		/*
7004 		 * This Loop ID should NOT be in use in drivers
7005 		 */
7006 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
7007 
7008 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
7009 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
7010 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
7011 			    newq->loop_id, newq->d_id.b24);
7012 			ql_send_logo(ha, newq, NULL);
7013 		}
7014 
7015 		tq->loop_id = mr.mb[1];
7016 		break;
7017 
7018 	case QL_LOOP_ID_USED:
7019 		d_id.b.al_pa = LSB(mr.mb[2]);
7020 		d_id.b.area = MSB(mr.mb[2]);
7021 		d_id.b.domain = LSB(mr.mb[1]);
7022 
7023 		newq = ql_d_id_to_queue(ha, d_id);
7024 		if (newq && (newq->loop_id != loop_id)) {
7025 			/*
7026 			 * This should NEVER ever happen; but this
7027 			 * code is needed to bail out when the worst
7028 			 * case happens - or as used to happen before
7029 			 */
7030 			QL_PRINT_2(ha, "Loop ID is now "
7031 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
7032 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
7033 			    tq->d_id.b24, loop_id,
7034 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
7035 			    newq->d_id.b24, loop_id);
7036 
7037 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
7038 				ADAPTER_STATE_LOCK(ha);
7039 
7040 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
7041 				ql_add_link_b(&ha->dev[index], &newq->device);
7042 
7043 				newq->d_id.b24 = d_id.b24;
7044 
7045 				index = ql_alpa_to_index[d_id.b.al_pa];
7046 				ql_add_link_b(&ha->dev[index], &newq->device);
7047 
7048 				ADAPTER_STATE_UNLOCK(ha);
7049 			}
7050 
7051 			(void) ql_get_port_database(ha, newq, PDF_NONE);
7052 
7053 		}
7054 
7055 		/*
7056 		 * Invalidate the loop ID for the
7057 		 * us to obtain a new one.
7058 		 */
7059 		tq->loop_id = PORT_NO_LOOP_ID;
7060 		break;
7061 
7062 	case QL_ALL_IDS_IN_USE:
7063 		rval = QL_FUNCTION_FAILED;
7064 		EL(ha, "no loop id's available\n");
7065 		break;
7066 
7067 	default:
7068 		if (rval == QL_COMMAND_ERROR) {
7069 			switch (mr.mb[1]) {
7070 			case 2:
7071 			case 3:
7072 				rval = QL_MEMORY_ALLOC_FAILED;
7073 				break;
7074 
7075 			case 0xd:
7076 			case 4:
7077 				rval = QL_FUNCTION_TIMEOUT;
7078 				break;
7079 			case 1:
7080 			case 5:
7081 			case 7:
7082 				rval = QL_FABRIC_NOT_INITIALIZED;
7083 				break;
7084 			default:
7085 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
7086 				break;
7087 			}
7088 		} else {
7089 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
7090 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
7091 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
7092 		}
7093 		break;
7094 	}
7095 
7096 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
7097 	    rval != QL_LOOP_ID_USED) {
7098 		EL(ha, "failed=%xh\n", rval);
7099 	} else {
7100 		/*EMPTY*/
7101 		QL_PRINT_3(ha, "done\n");
7102 	}
7103 	return (rval);
7104 }
7105 
7106 /*
7107  * ql_logout_port
7108  *	Logs out a device if possible.
7109  *
7110  * Input:
7111  *	ha:	adapter state pointer.
7112  *	d_id:	24 bit port ID.
7113  *
7114  * Returns:
7115  *	QL local function return status code.
7116  *
7117  * Context:
7118  *	Kernel context.
7119  */
7120 static int
ql_logout_port(ql_adapter_state_t * ha,port_id_t d_id)7121 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
7122 {
7123 	ql_link_t	*link;
7124 	ql_tgt_t	*tq;
7125 	uint16_t	index;
7126 
7127 	QL_PRINT_3(ha, "started\n");
7128 
7129 	/* Get head queue index. */
7130 	index = ql_alpa_to_index[d_id.b.al_pa];
7131 
7132 	/* Get device queue. */
7133 	tq = NULL;
7134 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
7135 		tq = link->base_address;
7136 		if (tq->d_id.b24 == d_id.b24) {
7137 			break;
7138 		} else {
7139 			tq = NULL;
7140 		}
7141 	}
7142 
7143 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
7144 		(void) ql_logout_fabric_port(ha, tq);
7145 		tq->loop_id = PORT_NO_LOOP_ID;
7146 	}
7147 
7148 	QL_PRINT_3(ha, "done\n");
7149 
7150 	return (QL_SUCCESS);
7151 }
7152 
7153 /*
7154  * ql_dev_init
7155  *	Initialize/allocate device queue.
7156  *
7157  * Input:
7158  *	ha:		adapter state pointer.
7159  *	d_id:		device destination ID
7160  *	loop_id:	device loop ID
7161  *	ADAPTER_STATE_LOCK must be already obtained.
7162  *
7163  * Returns:
7164  *	NULL = failure
7165  *
7166  * Context:
7167  *	Kernel context.
7168  */
7169 ql_tgt_t *
ql_dev_init(ql_adapter_state_t * ha,port_id_t d_id,uint16_t loop_id)7170 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
7171 {
7172 	ql_link_t	*link;
7173 	uint16_t	index;
7174 	ql_tgt_t	*tq;
7175 
7176 	QL_PRINT_3(ha, "started, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
7177 
7178 	index = ql_alpa_to_index[d_id.b.al_pa];
7179 
7180 	/* If device queue exists, set proper loop ID. */
7181 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
7182 		tq = link->base_address;
7183 		if (tq->d_id.b24 == d_id.b24) {
7184 			tq->loop_id = loop_id;
7185 
7186 			/* Reset port down retry count. */
7187 			tq->port_down_retry_count = ha->port_down_retry_count;
7188 			tq->qfull_retry_count = ha->qfull_retry_count;
7189 
7190 			break;
7191 		}
7192 	}
7193 
7194 	/* If device does not have queue. */
7195 	if (link == NULL) {
7196 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
7197 		if (tq != NULL) {
7198 			/*
7199 			 * mutex to protect the device queue,
7200 			 * does not block interrupts.
7201 			 */
7202 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
7203 			    ha->intr_pri);
7204 
7205 			tq->d_id.b24 = d_id.b24;
7206 			tq->loop_id = loop_id;
7207 			tq->device.base_address = tq;
7208 			tq->iidma_rate = IIDMA_RATE_INIT;
7209 
7210 			/* Reset port down retry count. */
7211 			tq->port_down_retry_count = ha->port_down_retry_count;
7212 			tq->qfull_retry_count = ha->qfull_retry_count;
7213 
7214 			/* Add device to device queue. */
7215 			ql_add_link_b(&ha->dev[index], &tq->device);
7216 		}
7217 	}
7218 
7219 	if (tq == NULL) {
7220 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
7221 	} else {
7222 		/*EMPTY*/
7223 		QL_PRINT_3(ha, "done\n");
7224 	}
7225 	return (tq);
7226 }
7227 
7228 /*
7229  * ql_dev_free
7230  *	Remove queue from device list and frees resources used by queue.
7231  *
7232  * Input:
7233  *	ha:	adapter state pointer.
7234  *	tq:	target queue pointer.
7235  *	ADAPTER_STATE_LOCK must be already obtained.
7236  *
7237  * Context:
7238  *	Kernel context.
7239  */
7240 void
ql_dev_free(ql_adapter_state_t * ha,ql_tgt_t * tq)7241 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
7242 {
7243 	ql_link_t	*link;
7244 	uint16_t	index;
7245 	ql_lun_t	*lq;
7246 
7247 	QL_PRINT_3(ha, "started\n");
7248 
7249 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
7250 		lq = link->base_address;
7251 		if (lq->cmd.first != NULL) {
7252 			EL(ha, "cmd %ph pending in lq=%ph, lun=%xh\n",
7253 			    lq->cmd.first, lq, lq->lun_no);
7254 			return;
7255 		}
7256 	}
7257 
7258 	if (tq->outcnt == 0) {
7259 		/* Get head queue index. */
7260 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
7261 		for (link = ha->dev[index].first; link != NULL;
7262 		    link = link->next) {
7263 			if (link->base_address == tq) {
7264 				ql_remove_link(&ha->dev[index], link);
7265 
7266 				link = tq->lun_queues.first;
7267 				while (link != NULL) {
7268 					lq = link->base_address;
7269 					link = link->next;
7270 
7271 					ql_remove_link(&tq->lun_queues,
7272 					    &lq->link);
7273 					kmem_free(lq, sizeof (ql_lun_t));
7274 				}
7275 
7276 				mutex_destroy(&tq->mutex);
7277 				kmem_free(tq, sizeof (ql_tgt_t));
7278 				break;
7279 			}
7280 		}
7281 	}
7282 
7283 	QL_PRINT_3(ha, "done\n");
7284 }
7285 
7286 /*
7287  * ql_lun_queue
7288  *	Allocate LUN queue if does not exists.
7289  *
7290  * Input:
7291  *	ha:	adapter state pointer.
7292  *	tq:		target queue.
7293  *	lun_addr:	LUN number.
7294  *
7295  * Returns:
7296  *	NULL = failure
7297  *
7298  * Context:
7299  *	Kernel context.
7300  */
7301 static ql_lun_t *
ql_lun_queue(ql_adapter_state_t * ha,ql_tgt_t * tq,uint64_t lun_addr)7302 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint64_t lun_addr)
7303 {
7304 	ql_lun_t	*lq;
7305 	ql_link_t	*link;
7306 	uint16_t	lun_no, lun_no_tmp;
7307 	fcp_ent_addr_t	*fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
7308 
7309 	QL_PRINT_3(ha, "started\n");
7310 
7311 	/* Fast path. */
7312 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_addr ==
7313 	    lun_addr) {
7314 		QL_PRINT_3(ha, "fast done\n");
7315 		return (tq->last_lun_queue);
7316 	}
7317 
7318 	/* If device queue exists, set proper loop ID. */
7319 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
7320 		lq = link->base_address;
7321 		if (lq->lun_addr == lun_addr) {
7322 			QL_PRINT_3(ha, "found done\n");
7323 			tq->last_lun_queue = lq;
7324 			return (lq);
7325 		}
7326 	}
7327 
7328 	/* Check the LUN addressing levels. */
7329 	if (fcp_ent_addr->ent_addr_1 != 0 || fcp_ent_addr->ent_addr_2 != 0 ||
7330 	    fcp_ent_addr->ent_addr_3 != 0) {
7331 		EL(ha, "Unsupported LUN Addressing level=0x%llxh", lun_addr);
7332 	}
7333 
7334 	lun_no_tmp = CHAR_TO_SHORT(lobyte(fcp_ent_addr->ent_addr_0),
7335 	    hibyte(fcp_ent_addr->ent_addr_0));
7336 
7337 	lun_no = lun_no_tmp & ~(QL_LUN_AM_MASK << 8);
7338 
7339 	if (lun_no_tmp & (QL_LUN_AM_LUN << 8)) {
7340 		EL(ha, "Unsupported first level LUN Addressing method=%xh, "
7341 		    "lun=%d(%xh)\n", lun_no_tmp & (QL_LUN_AM_MASK << 8),
7342 		    lun_no, lun_no_tmp);
7343 	}
7344 
7345 	/* Create and initialize LUN queue. */
7346 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
7347 	if (lq != NULL) {
7348 		lq->link.base_address = lq;
7349 		lq->target_queue = tq;
7350 		lq->lun_addr = lun_addr;
7351 		lq->lun_no = lun_no;
7352 
7353 		DEVICE_QUEUE_LOCK(tq);
7354 		ql_add_link_b(&tq->lun_queues, &lq->link);
7355 		DEVICE_QUEUE_UNLOCK(tq);
7356 		tq->last_lun_queue = lq;
7357 	}
7358 
7359 	QL_PRINT_3(ha, "done\n");
7360 
7361 	return (lq);
7362 }
7363 
7364 /*
7365  * ql_fcp_scsi_cmd
7366  *	Process fibre channel (FCP) SCSI protocol commands.
7367  *
7368  * Input:
7369  *	ha = adapter state pointer.
7370  *	pkt = pointer to fc_packet.
7371  *	sp = srb pointer.
7372  *
7373  * Returns:
7374  *	FC_SUCCESS - the packet was accepted for transport.
7375  *	FC_TRANSPORT_ERROR - a transport error occurred.
7376  *
7377  * Context:
7378  *	Kernel context.
7379  */
7380 static int
ql_fcp_scsi_cmd(ql_adapter_state_t * ha,fc_packet_t * pkt,ql_srb_t * sp)7381 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7382 {
7383 	port_id_t	d_id;
7384 	ql_tgt_t	*tq;
7385 	uint64_t	*ptr;
7386 	uint64_t	fcp_ent_addr = 0;
7387 
7388 	QL_PRINT_3(ha, "started\n");
7389 
7390 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
7391 	if (tq == NULL) {
7392 		d_id.r.rsvd_1 = 0;
7393 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7394 		tq = ql_d_id_to_queue(ha, d_id);
7395 	}
7396 
7397 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7398 	fcp_ent_addr = *(uint64_t *)(&sp->fcp->fcp_ent_addr);
7399 	if (tq != NULL &&
7400 	    (sp->lun_queue = ql_lun_queue(ha, tq, fcp_ent_addr)) != NULL) {
7401 
7402 		/*
7403 		 * zero out FCP response; 24 Bytes
7404 		 */
7405 		ptr = (uint64_t *)pkt->pkt_resp;
7406 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7407 
7408 		/* Handle task management function. */
7409 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7410 		    sp->fcp->fcp_cntl.cntl_clr_aca |
7411 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
7412 		    sp->fcp->fcp_cntl.cntl_reset_lun |
7413 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
7414 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7415 			ql_task_mgmt(ha, tq, pkt, sp);
7416 		} else {
7417 			ha->pha->xioctl->IosRequested++;
7418 			ha->pha->xioctl->BytesRequested += (uint32_t)
7419 			    sp->fcp->fcp_data_len;
7420 
7421 			/*
7422 			 * Setup for commands with data transfer
7423 			 */
7424 			sp->iocb = ha->fcp_cmd;
7425 			sp->req_cnt = 1;
7426 			if (sp->fcp->fcp_data_len != 0) {
7427 				/*
7428 				 * FCP data is bound to pkt_data_dma
7429 				 */
7430 				if (sp->fcp->fcp_cntl.cntl_write_data) {
7431 					(void) ddi_dma_sync(pkt->pkt_data_dma,
7432 					    0, 0, DDI_DMA_SYNC_FORDEV);
7433 				}
7434 
7435 				/* Setup IOCB count. */
7436 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7437 				    (!CFG_IST(ha, CFG_CTRL_82XX) ||
7438 				    sp->sg_dma.dma_handle == NULL)) {
7439 					uint32_t	cnt;
7440 
7441 					cnt = pkt->pkt_data_cookie_cnt -
7442 					    ha->cmd_segs;
7443 					sp->req_cnt = (uint16_t)
7444 					    (cnt / ha->cmd_cont_segs);
7445 					if (cnt % ha->cmd_cont_segs) {
7446 						sp->req_cnt = (uint16_t)
7447 						    (sp->req_cnt + 2);
7448 					} else {
7449 						sp->req_cnt++;
7450 					}
7451 				}
7452 			}
7453 			QL_PRINT_3(ha, "done\n");
7454 
7455 			return (ql_start_cmd(ha, tq, pkt, sp));
7456 		}
7457 	} else {
7458 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7459 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7460 
7461 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7462 			ql_io_comp(sp);
7463 		}
7464 	}
7465 
7466 	QL_PRINT_3(ha, "done\n");
7467 
7468 	return (FC_SUCCESS);
7469 }
7470 
7471 /*
7472  * ql_task_mgmt
7473  *	Task management function processor.
7474  *
7475  * Input:
7476  *	ha:	adapter state pointer.
7477  *	tq:	target queue pointer.
7478  *	pkt:	pointer to fc_packet.
7479  *	sp:	SRB pointer.
7480  *
7481  * Context:
7482  *	Kernel context.
7483  */
7484 static void
ql_task_mgmt(ql_adapter_state_t * ha,ql_tgt_t * tq,fc_packet_t * pkt,ql_srb_t * sp)7485 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7486     ql_srb_t *sp)
7487 {
7488 	fcp_rsp_t		*fcpr;
7489 	struct fcp_rsp_info	*rsp;
7490 	ql_lun_t		*lq = sp->lun_queue;
7491 
7492 	QL_PRINT_3(ha, "started\n");
7493 
7494 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7495 	rsp = (struct fcp_rsp_info *)(pkt->pkt_resp + sizeof (fcp_rsp_t));
7496 
7497 	bzero(fcpr, pkt->pkt_rsplen);
7498 
7499 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7500 	fcpr->fcp_response_len = 8;
7501 
7502 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7503 		if (ql_clear_aca(ha, tq, lq) != QL_SUCCESS) {
7504 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7505 		}
7506 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7507 		if (ql_lun_reset(ha, tq, lq) != QL_SUCCESS) {
7508 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7509 		}
7510 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7511 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7512 		    QL_SUCCESS) {
7513 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7514 		}
7515 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7516 		if (ql_clear_task_set(ha, tq, lq) != QL_SUCCESS) {
7517 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7518 		}
7519 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7520 		if (ql_abort_task_set(ha, tq, lq) != QL_SUCCESS) {
7521 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7522 		}
7523 	} else {
7524 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7525 	}
7526 
7527 	pkt->pkt_state = FC_PKT_SUCCESS;
7528 
7529 	/* Do command callback. */
7530 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7531 		ql_io_comp(sp);
7532 	}
7533 
7534 	QL_PRINT_3(ha, "done\n");
7535 }
7536 
7537 /*
7538  * ql_fcp_ip_cmd
7539  *	Process fibre channel (FCP) Internet (IP) protocols commands.
7540  *
7541  * Input:
7542  *	ha:	adapter state pointer.
7543  *	pkt:	pointer to fc_packet.
7544  *	sp:	SRB pointer.
7545  *
7546  * Returns:
7547  *	FC_SUCCESS - the packet was accepted for transport.
7548  *	FC_TRANSPORT_ERROR - a transport error occurred.
7549  *
7550  * Context:
7551  *	Kernel context.
7552  */
7553 static int
ql_fcp_ip_cmd(ql_adapter_state_t * ha,fc_packet_t * pkt,ql_srb_t * sp)7554 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7555 {
7556 	port_id_t	d_id;
7557 	ql_tgt_t	*tq;
7558 
7559 	QL_PRINT_3(ha, "started\n");
7560 
7561 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
7562 	if (tq == NULL) {
7563 		d_id.r.rsvd_1 = 0;
7564 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7565 		tq = ql_d_id_to_queue(ha, d_id);
7566 	}
7567 
7568 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7569 		/*
7570 		 * IP data is bound to pkt_cmd_dma
7571 		 */
7572 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
7573 		    0, 0, DDI_DMA_SYNC_FORDEV);
7574 
7575 		/* Setup IOCB count. */
7576 		sp->iocb = ha->ip_cmd;
7577 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7578 			uint32_t	cnt;
7579 
7580 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7581 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7582 			if (cnt % ha->cmd_cont_segs) {
7583 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7584 			} else {
7585 				sp->req_cnt++;
7586 			}
7587 		} else {
7588 			sp->req_cnt = 1;
7589 		}
7590 		QL_PRINT_3(ha, "done\n");
7591 
7592 		return (ql_start_cmd(ha, tq, pkt, sp));
7593 	} else {
7594 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7595 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7596 
7597 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7598 			ql_io_comp(sp);
7599 	}
7600 
7601 	QL_PRINT_3(ha, "done\n");
7602 
7603 	return (FC_SUCCESS);
7604 }
7605 
7606 /*
7607  * ql_fc_services
7608  *	Process fibre channel services (name server).
7609  *
7610  * Input:
7611  *	ha:	adapter state pointer.
7612  *	pkt:	pointer to fc_packet.
7613  *
7614  * Returns:
7615  *	FC_SUCCESS - the packet was accepted for transport.
7616  *	FC_TRANSPORT_ERROR - a transport error occurred.
7617  *
7618  * Context:
7619  *	Kernel context.
7620  */
7621 static int
ql_fc_services(ql_adapter_state_t * ha,fc_packet_t * pkt)7622 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7623 {
7624 	uint32_t	cnt;
7625 	fc_ct_header_t	hdr;
7626 	la_els_rjt_t	rjt;
7627 	port_id_t	d_id;
7628 	ql_tgt_t	*tq;
7629 	ql_srb_t	*sp;
7630 	int		rval;
7631 
7632 	QL_PRINT_3(ha, "started\n");
7633 
7634 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7635 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7636 
7637 	bzero(&rjt, sizeof (rjt));
7638 
7639 	/* Do some sanity checks */
7640 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7641 	    sizeof (fc_ct_header_t));
7642 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7643 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7644 		    pkt->pkt_rsplen);
7645 		return (FC_ELS_MALFORMED);
7646 	}
7647 
7648 	switch (hdr.ct_fcstype) {
7649 	case FCSTYPE_DIRECTORY:
7650 	case FCSTYPE_MGMTSERVICE:
7651 
7652 		/* An FCA must make sure that the header is in big endian */
7653 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7654 
7655 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7656 		tq = ql_d_id_to_queue(ha, d_id);
7657 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7658 
7659 		if (tq == NULL ||
7660 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7661 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7662 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7663 			rval = QL_SUCCESS;
7664 			break;
7665 		}
7666 
7667 		if (tq->flags & TQF_LOGIN_NEEDED) {
7668 			DEVICE_QUEUE_LOCK(tq);
7669 			tq->flags &= ~TQF_LOGIN_NEEDED;
7670 			DEVICE_QUEUE_UNLOCK(tq);
7671 			(void) ql_login_fport(ha, tq, tq->loop_id, LFF_NONE,
7672 			    NULL);
7673 		}
7674 		/*
7675 		 * Services data is bound to pkt_cmd_dma
7676 		 */
7677 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7678 		    DDI_DMA_SYNC_FORDEV);
7679 
7680 		sp->flags |= SRB_MS_PKT;
7681 		sp->retry_count = 32;
7682 
7683 		/* Setup IOCB count. */
7684 		sp->iocb = ha->ms_cmd;
7685 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7686 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7687 			sp->req_cnt =
7688 			    (uint16_t)(cnt / ha->cmd_cont_segs);
7689 			if (cnt % ha->cmd_cont_segs) {
7690 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7691 			} else {
7692 				sp->req_cnt++;
7693 			}
7694 		} else {
7695 			sp->req_cnt = 1;
7696 		}
7697 		rval = ql_start_cmd(ha, tq, pkt, sp);
7698 
7699 		QL_PRINT_3(ha, "done, ql_start_cmd=%xh\n", rval);
7700 
7701 		return (rval);
7702 
7703 	default:
7704 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7705 		rval = QL_FUNCTION_PARAMETER_ERROR;
7706 		break;
7707 	}
7708 
7709 	if (rval != QL_SUCCESS) {
7710 		/* Build RJT. */
7711 		rjt.ls_code.ls_code = LA_ELS_RJT;
7712 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7713 
7714 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7715 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7716 
7717 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7718 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7719 		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7720 	}
7721 
7722 	/* Do command callback. */
7723 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7724 		ql_io_comp((ql_srb_t *)pkt->pkt_fca_private);
7725 	}
7726 
7727 	QL_PRINT_3(ha, "done\n");
7728 
7729 	return (FC_SUCCESS);
7730 }
7731 
7732 /*
7733  * ql_cthdr_endian
7734  *	Change endianess of ct passthrough header and payload.
7735  *
7736  * Input:
7737  *	acc_handle:	DMA buffer access handle.
7738  *	ct_hdr:		Pointer to header.
7739  *	restore:	Restore first flag.
7740  *
7741  * Context:
7742  *	Interrupt or Kernel context, no mailbox commands allowed.
7743  */
7744 void
ql_cthdr_endian(ddi_acc_handle_t acc_handle,caddr_t ct_hdr,boolean_t restore)7745 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7746     boolean_t restore)
7747 {
7748 	uint8_t		i, *bp;
7749 	fc_ct_header_t	hdr;
7750 	uint32_t	*hdrp = (uint32_t *)&hdr;
7751 
7752 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7753 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7754 
7755 	if (restore) {
7756 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7757 			*hdrp = BE_32(*hdrp);
7758 			hdrp++;
7759 		}
7760 	}
7761 
7762 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7763 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7764 
7765 		switch (hdr.ct_cmdrsp) {
7766 		case NS_GA_NXT:
7767 		case NS_GPN_ID:
7768 		case NS_GNN_ID:
7769 		case NS_GCS_ID:
7770 		case NS_GFT_ID:
7771 		case NS_GSPN_ID:
7772 		case NS_GPT_ID:
7773 		case NS_GID_FT:
7774 		case NS_GID_PT:
7775 		case NS_RPN_ID:
7776 		case NS_RNN_ID:
7777 		case NS_RSPN_ID:
7778 		case NS_DA_ID:
7779 			BIG_ENDIAN_32(bp);
7780 			break;
7781 		case NS_RFT_ID:
7782 		case NS_RCS_ID:
7783 		case NS_RPT_ID:
7784 			BIG_ENDIAN_32(bp);
7785 			bp += 4;
7786 			BIG_ENDIAN_32(bp);
7787 			break;
7788 		case NS_GNN_IP:
7789 		case NS_GIPA_IP:
7790 			BIG_ENDIAN(bp, 16);
7791 			break;
7792 		case NS_RIP_NN:
7793 			bp += 8;
7794 			BIG_ENDIAN(bp, 16);
7795 			break;
7796 		case NS_RIPA_NN:
7797 			bp += 8;
7798 			BIG_ENDIAN_64(bp);
7799 			break;
7800 		default:
7801 			break;
7802 		}
7803 	}
7804 
7805 	if (restore == B_FALSE) {
7806 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7807 			*hdrp = BE_32(*hdrp);
7808 			hdrp++;
7809 		}
7810 	}
7811 
7812 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7813 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7814 }
7815 
7816 /*
7817  * ql_start_cmd
7818  *	Finishes starting fibre channel protocol (FCP) command.
7819  *
7820  * Input:
7821  *	ha:	adapter state pointer.
7822  *	tq:	target queue pointer.
7823  *	pkt:	pointer to fc_packet.
7824  *	sp:	SRB pointer.
7825  *
7826  * Context:
7827  *	Kernel context.
7828  */
7829 static int
ql_start_cmd(ql_adapter_state_t * ha,ql_tgt_t * tq,fc_packet_t * pkt,ql_srb_t * sp)7830 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7831     ql_srb_t *sp)
7832 {
7833 	int		rval = FC_SUCCESS;
7834 	time_t		poll_wait = 0;
7835 	ql_lun_t	*lq = sp->lun_queue;
7836 
7837 	QL_PRINT_3(ha, "started\n");
7838 
7839 	sp->handle = 0;
7840 
7841 	/* Set poll for finish. */
7842 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7843 		sp->flags |= SRB_POLL;
7844 		if (pkt->pkt_timeout == 0) {
7845 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7846 		}
7847 	}
7848 
7849 	/* Acquire device queue lock. */
7850 	DEVICE_QUEUE_LOCK(tq);
7851 
7852 	/*
7853 	 * If we need authentication, report device busy to
7854 	 * upper layers to retry later
7855 	 */
7856 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7857 		DEVICE_QUEUE_UNLOCK(tq);
7858 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7859 		    tq->d_id.b24);
7860 		return (FC_DEVICE_BUSY);
7861 	}
7862 
7863 	/* Insert command onto watchdog queue. */
7864 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7865 		ql_timeout_insert(ha, tq, sp);
7866 	} else {
7867 		/*
7868 		 * Run dump requests in polled mode as kernel threads
7869 		 * and interrupts may have been disabled.
7870 		 */
7871 		sp->flags |= SRB_POLL;
7872 		sp->init_wdg_q_time = 0;
7873 		sp->isp_timeout = 0;
7874 	}
7875 
7876 	/* If a polling command setup wait time. */
7877 	if (sp->flags & SRB_POLL) {
7878 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7879 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7880 		} else {
7881 			poll_wait = pkt->pkt_timeout;
7882 		}
7883 	}
7884 
7885 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7886 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7887 		/* Set ending status. */
7888 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7889 
7890 		/* Call done routine to handle completions. */
7891 		sp->cmd.next = NULL;
7892 		DEVICE_QUEUE_UNLOCK(tq);
7893 		ql_done(&sp->cmd, B_FALSE);
7894 	} else {
7895 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7896 			int do_lip = 0;
7897 
7898 			DEVICE_QUEUE_UNLOCK(tq);
7899 
7900 			ADAPTER_STATE_LOCK(ha);
7901 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7902 				ha->pha->lip_on_panic++;
7903 			}
7904 			ADAPTER_STATE_UNLOCK(ha);
7905 
7906 			if (!do_lip) {
7907 
7908 				/*
7909 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7910 				 * is helpful here. If a PLOGI fails for some
7911 				 * reason, you would get CS_PORT_LOGGED_OUT
7912 				 * or some such error; and we should get a
7913 				 * careful polled mode login kicked off inside
7914 				 * of this driver itself. You don't have FC
7915 				 * transport's services as all threads are
7916 				 * suspended, interrupts disabled, and so
7917 				 * on. Right now we do re-login if the packet
7918 				 * state isn't FC_PKT_SUCCESS.
7919 				 */
7920 				(void) ql_abort_isp(ha);
7921 			}
7922 
7923 			ql_start_iocb(ha, sp);
7924 		} else {
7925 			/* Add the command to the device queue */
7926 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7927 				ql_add_link_t(&lq->cmd, &sp->cmd);
7928 			} else {
7929 				ql_add_link_b(&lq->cmd, &sp->cmd);
7930 			}
7931 
7932 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7933 
7934 			/* Check whether next message can be processed */
7935 			ql_next(ha, lq);
7936 		}
7937 	}
7938 
7939 	/* If polling, wait for finish. */
7940 	if (poll_wait) {
7941 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS &&
7942 		    pkt->pkt_state == FC_PKT_SUCCESS) {
7943 			pkt->pkt_state = FC_PKT_TIMEOUT;
7944 			pkt->pkt_reason = FC_REASON_HW_ERROR;
7945 		}
7946 
7947 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7948 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7949 			rval = FC_TRANSPORT_ERROR;
7950 		}
7951 
7952 		if (ddi_in_panic()) {
7953 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7954 				port_id_t d_id;
7955 
7956 				/*
7957 				 * successful LOGIN implies by design
7958 				 * that PRLI also succeeded for disks
7959 				 * Note also that there is no special
7960 				 * mailbox command to send PRLI.
7961 				 */
7962 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7963 				(void) ql_login_port(ha, d_id);
7964 			}
7965 		}
7966 
7967 		(void) qlc_fm_check_pkt_dma_handle(ha, sp);
7968 		/*
7969 		 * This should only happen during CPR dumping
7970 		 */
7971 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7972 		    pkt->pkt_comp) {
7973 			sp->flags &= ~SRB_POLL;
7974 			(*pkt->pkt_comp)(pkt);
7975 		}
7976 	}
7977 
7978 	QL_PRINT_3(ha, "done\n");
7979 
7980 	return (rval);
7981 }
7982 
7983 /*
7984  * ql_poll_cmd
7985  *	Polls commands for completion.
7986  *
7987  * Input:
7988  *	ha = adapter state pointer.
7989  *	sp = SRB command pointer.
7990  *	poll_wait = poll wait time in seconds.
7991  *
7992  * Returns:
7993  *	QL local function return status code.
7994  *
7995  * Context:
7996  *	Kernel context.
7997  */
7998 static int
ql_poll_cmd(ql_adapter_state_t * vha,ql_srb_t * sp,time_t poll_wait)7999 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
8000 {
8001 	uint32_t		index;
8002 	int			rval = QL_SUCCESS;
8003 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
8004 	ql_adapter_state_t	*ha = vha->pha;
8005 
8006 	QL_PRINT_3(ha, "started\n");
8007 
8008 	while (sp->flags & SRB_POLL) {
8009 
8010 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
8011 		    ha->idle_timer >= 15 || ddi_in_panic() ||
8012 		    curthread->t_flag & T_INTR_THREAD) {
8013 
8014 			/* If waiting for restart, do it now. */
8015 			if (ha->port_retry_timer != 0) {
8016 				ADAPTER_STATE_LOCK(ha);
8017 				ha->port_retry_timer = 0;
8018 				ADAPTER_STATE_UNLOCK(ha);
8019 
8020 				TASK_DAEMON_LOCK(ha);
8021 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
8022 				TASK_DAEMON_UNLOCK(ha);
8023 			}
8024 
8025 			ADAPTER_STATE_LOCK(ha);
8026 			ha->flags |= POLL_INTR;
8027 			ADAPTER_STATE_UNLOCK(ha);
8028 
8029 			if (INTERRUPT_PENDING(ha)) {
8030 				(void) ql_isr_aif((caddr_t)ha, 0);
8031 				INTR_LOCK(ha);
8032 				ha->intr_claimed = TRUE;
8033 				INTR_UNLOCK(ha);
8034 			}
8035 			if (ha->flags & NO_INTR_HANDSHAKE) {
8036 				for (index = 0; index < ha->rsp_queues_cnt;
8037 				    index++) {
8038 					(void) ql_isr_aif((caddr_t)ha,
8039 					    (caddr_t)((uintptr_t)(index + 1)));
8040 				}
8041 			}
8042 
8043 			ADAPTER_STATE_LOCK(ha);
8044 			ha->flags &= ~POLL_INTR;
8045 			ADAPTER_STATE_UNLOCK(ha);
8046 
8047 			/*
8048 			 * Call task thread function in case the
8049 			 * daemon is not running.
8050 			 */
8051 			TASK_DAEMON_LOCK(ha);
8052 
8053 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
8054 			    QL_TASK_PENDING(ha)) {
8055 				ql_task_thread(ha);
8056 			}
8057 
8058 			TASK_DAEMON_UNLOCK(ha);
8059 		}
8060 
8061 		if (msecs_left == 0) {
8062 			if (rval == QL_SUCCESS) {
8063 				EL(ha, "timeout\n");
8064 				rval = QL_FUNCTION_TIMEOUT;
8065 				if (ql_abort_io(ha, sp) == QL_SUCCESS) {
8066 					sp->pkt->pkt_reason = CS_ABORTED;
8067 					sp->cmd.next = NULL;
8068 					ql_done(&sp->cmd, B_FALSE);
8069 					break;
8070 				}
8071 				sp->flags |= SRB_COMMAND_TIMEOUT;
8072 				EL(ha, "abort failed, isp_abort_needed\n");
8073 				ql_awaken_task_daemon(ha, NULL,
8074 				    ISP_ABORT_NEEDED, 0);
8075 				msecs_left = 30 * 100;
8076 			} else {
8077 				break;
8078 			}
8079 		}
8080 
8081 		/*
8082 		 * Polling interval is 10 milli seconds; Increasing
8083 		 * the polling interval to seconds since disk IO
8084 		 * timeout values are ~60 seconds is tempting enough,
8085 		 * but CPR dump time increases, and so will the crash
8086 		 * dump time; Don't toy with the settings without due
8087 		 * consideration for all the scenarios that will be
8088 		 * impacted.
8089 		 */
8090 		ql_delay(ha, 10000);
8091 		msecs_left -= 10;
8092 	}
8093 
8094 	QL_PRINT_3(ha, "done\n");
8095 
8096 	return (rval);
8097 }
8098 
8099 /*
8100  * ql_next
8101  *	Retrieve and process next job in the device queue.
8102  *
8103  * Input:
8104  *	ha:	adapter state pointer.
8105  *	lq:	LUN queue pointer.
8106  *	DEVICE_QUEUE_LOCK must be already obtained.
8107  *
8108  * Output:
8109  *	Releases DEVICE_QUEUE_LOCK upon exit.
8110  *
8111  * Context:
8112  *	Interrupt or Kernel context, no mailbox commands allowed.
8113  */
8114 void
ql_next(ql_adapter_state_t * vha,ql_lun_t * lq)8115 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
8116 {
8117 	ql_srb_t		*sp;
8118 	ql_link_t		*link;
8119 	ql_tgt_t		*tq = lq->target_queue;
8120 	ql_adapter_state_t	*ha = vha->pha;
8121 
8122 	QL_PRINT_3(ha, "started\n");
8123 
8124 	if (ddi_in_panic()) {
8125 		DEVICE_QUEUE_UNLOCK(tq);
8126 		QL_PRINT_3(ha, "panic/active exit\n");
8127 		return;
8128 	}
8129 
8130 	while ((link = lq->cmd.first) != NULL) {
8131 		sp = link->base_address;
8132 
8133 		/* Exit if can not start commands. */
8134 		if (DRIVER_SUSPENDED(ha) ||
8135 		    (ha->flags & ONLINE) == 0 ||
8136 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
8137 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
8138 		    TQF_QUEUE_SUSPENDED)) {
8139 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
8140 			    "haf=%xh, loop_id=%xh sp=%ph\n", tq->d_id.b24,
8141 			    ha->task_daemon_flags, tq->flags, sp->flags,
8142 			    ha->flags, tq->loop_id, sp);
8143 			break;
8144 		}
8145 
8146 		/*
8147 		 * Find out the LUN number for untagged command use.
8148 		 * If there is an untagged command pending for the LUN,
8149 		 * we would not submit another untagged command
8150 		 * or if reached LUN execution throttle.
8151 		 */
8152 		if (sp->flags & SRB_FCP_CMD_PKT) {
8153 			if (lq->flags & LQF_UNTAGGED_PENDING ||
8154 			    lq->lun_outcnt >= ha->execution_throttle) {
8155 				QL_PRINT_8(ha, "break, d_id=%xh, "
8156 				    "lf=%xh, lun_outcnt=%xh\n",
8157 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
8158 				break;
8159 			}
8160 			if (sp->fcp->fcp_cntl.cntl_qtype ==
8161 			    FCP_QTYPE_UNTAGGED) {
8162 				/*
8163 				 * Set the untagged-flag for the LUN
8164 				 * so that no more untagged commands
8165 				 * can be submitted for this LUN.
8166 				 */
8167 				lq->flags |= LQF_UNTAGGED_PENDING;
8168 			}
8169 
8170 			/* Count command as sent. */
8171 			lq->lun_outcnt++;
8172 		}
8173 
8174 		/* Remove srb from device queue. */
8175 		ql_remove_link(&lq->cmd, &sp->cmd);
8176 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8177 
8178 		tq->outcnt++;
8179 
8180 		ql_start_iocb(vha, sp);
8181 	}
8182 
8183 	/* Release device queue lock. */
8184 	DEVICE_QUEUE_UNLOCK(tq);
8185 
8186 	QL_PRINT_3(ha, "done\n");
8187 }
8188 
8189 /*
8190  * ql_done
8191  *	Process completed commands.
8192  *
8193  * Input:
8194  *	link:	first command link in chain.
8195  *	cmplt:	do command complete call back.
8196  *
8197  * Context:
8198  *	Interrupt or Kernel context, no mailbox commands allowed.
8199  */
8200 void
ql_done(ql_link_t * link,boolean_t cmplt)8201 ql_done(ql_link_t *link, boolean_t cmplt)
8202 {
8203 	ql_adapter_state_t	*ha;
8204 	ql_link_t		*next_link;
8205 	ql_srb_t		*sp;
8206 	ql_tgt_t		*tq;
8207 	ql_lun_t		*lq;
8208 	uint64_t		set_flags;
8209 
8210 	QL_PRINT_3(NULL, "started\n");
8211 
8212 	for (; link != NULL; link = next_link) {
8213 		next_link = link->next;
8214 		sp = link->base_address;
8215 		link->prev = link->next = NULL;
8216 		link->head = NULL;
8217 		ha = sp->ha;
8218 		set_flags = 0;
8219 
8220 		if (sp->flags & SRB_UB_CALLBACK) {
8221 			QL_UB_LOCK(ha);
8222 			if (sp->flags & SRB_UB_IN_ISP) {
8223 				if (ha->ub_outcnt != 0) {
8224 					ha->ub_outcnt--;
8225 				}
8226 				if (ha->flags & IP_ENABLED) {
8227 					set_flags |= NEED_UNSOLICITED_BUFFERS;
8228 				}
8229 			}
8230 			QL_UB_UNLOCK(ha);
8231 			ql_awaken_task_daemon(ha, sp, set_flags, 0);
8232 		} else {
8233 			/* Free outstanding command slot. */
8234 			INTR_LOCK(ha);
8235 			if (sp->handle != 0) {
8236 				EL(ha, "free sp=%ph, sp->hdl=%xh\n",
8237 				    (void *)sp, sp->handle);
8238 				ha->pha->outstanding_cmds[
8239 				    sp->handle & OSC_INDEX_MASK] = NULL;
8240 				sp->handle = 0;
8241 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
8242 			}
8243 			INTR_UNLOCK(ha);
8244 
8245 			/* Acquire device queue lock. */
8246 			lq = sp->lun_queue;
8247 			tq = lq->target_queue;
8248 			DEVICE_QUEUE_LOCK(tq);
8249 
8250 			/* Decrement outstanding commands on device. */
8251 			if (tq->outcnt != 0) {
8252 				tq->outcnt--;
8253 			}
8254 
8255 			if (sp->flags & SRB_FCP_CMD_PKT) {
8256 				if (sp->fcp->fcp_cntl.cntl_qtype ==
8257 				    FCP_QTYPE_UNTAGGED) {
8258 					/*
8259 					 * Clear the flag for this LUN so that
8260 					 * untagged commands can be submitted
8261 					 * for it.
8262 					 */
8263 					lq->flags &= ~LQF_UNTAGGED_PENDING;
8264 				}
8265 
8266 				if (lq->lun_outcnt != 0) {
8267 					lq->lun_outcnt--;
8268 				}
8269 			}
8270 
8271 			/* Reset port down retry count on good completion. */
8272 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
8273 				tq->port_down_retry_count =
8274 				    ha->port_down_retry_count;
8275 				tq->qfull_retry_count = ha->qfull_retry_count;
8276 			}
8277 
8278 
8279 			/* Alter aborted status for fast timeout feature */
8280 			if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
8281 			    (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
8282 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
8283 			    sp->flags & SRB_RETRY &&
8284 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
8285 			    sp->wdg_q_time > 1)) {
8286 				EL(ha, "fast abort modify change\n");
8287 				sp->flags &= ~(SRB_RETRY);
8288 				sp->pkt->pkt_reason = CS_TIMEOUT;
8289 			}
8290 
8291 			/* Place request back on top of target command queue */
8292 			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
8293 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
8294 			    sp->flags & SRB_RETRY &&
8295 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
8296 			    sp->wdg_q_time > 1)) {
8297 				sp->flags &= ~(SRB_ISP_STARTED |
8298 				    SRB_ISP_COMPLETED | SRB_RETRY);
8299 
8300 				/* Reset watchdog timer */
8301 				sp->wdg_q_time = sp->init_wdg_q_time;
8302 
8303 				/* Issue marker command on reset status. */
8304 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
8305 				    (sp->pkt->pkt_reason == CS_RESET ||
8306 				    (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
8307 				    sp->pkt->pkt_reason == CS_ABORTED))) {
8308 					(void) ql_marker(ha, tq->loop_id, 0,
8309 					    MK_SYNC_ID);
8310 				}
8311 
8312 				ql_add_link_t(&lq->cmd, &sp->cmd);
8313 				sp->flags |= SRB_IN_DEVICE_QUEUE;
8314 				ql_next(ha, lq);
8315 			} else {
8316 				/* Remove command from watchdog queue. */
8317 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
8318 					ql_remove_link(&tq->wdg, &sp->wdg);
8319 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
8320 				}
8321 
8322 				if (lq->cmd.first != NULL) {
8323 					ql_next(ha, lq);
8324 				} else {
8325 					/* Release LU queue specific lock. */
8326 					DEVICE_QUEUE_UNLOCK(tq);
8327 					if (ha->pha->pending_cmds.first !=
8328 					    NULL) {
8329 						ql_start_iocb(ha, NULL);
8330 					}
8331 				}
8332 
8333 				/* Sync buffers if required.  */
8334 				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
8335 					(void) ddi_dma_sync(
8336 					    sp->pkt->pkt_resp_dma,
8337 					    0, 0, DDI_DMA_SYNC_FORCPU);
8338 				}
8339 
8340 				/* Map ISP completion codes. */
8341 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
8342 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
8343 				switch (sp->pkt->pkt_reason) {
8344 				case CS_COMPLETE:
8345 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
8346 					break;
8347 				case CS_RESET:
8348 					sp->pkt->pkt_state =
8349 					    FC_PKT_PORT_OFFLINE;
8350 					sp->pkt->pkt_reason =
8351 					    FC_REASON_ABORTED;
8352 					break;
8353 				case CS_RESOUCE_UNAVAILABLE:
8354 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
8355 					sp->pkt->pkt_reason =
8356 					    FC_REASON_PKT_BUSY;
8357 					break;
8358 
8359 				case CS_TIMEOUT:
8360 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
8361 					sp->pkt->pkt_reason =
8362 					    FC_REASON_HW_ERROR;
8363 					break;
8364 				case CS_DATA_OVERRUN:
8365 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8366 					sp->pkt->pkt_reason =
8367 					    FC_REASON_OVERRUN;
8368 					break;
8369 				case CS_PORT_UNAVAILABLE:
8370 				case CS_PORT_LOGGED_OUT:
8371 					sp->pkt->pkt_state =
8372 					    FC_PKT_PORT_OFFLINE;
8373 					sp->pkt->pkt_reason =
8374 					    FC_REASON_LOGIN_REQUIRED;
8375 					ql_send_logo(ha, tq, NULL);
8376 					break;
8377 				case CS_PORT_CONFIG_CHG:
8378 					sp->pkt->pkt_state =
8379 					    FC_PKT_PORT_OFFLINE;
8380 					sp->pkt->pkt_reason =
8381 					    FC_REASON_OFFLINE;
8382 					break;
8383 				case CS_QUEUE_FULL:
8384 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8385 					sp->pkt->pkt_reason = FC_REASON_QFULL;
8386 					break;
8387 
8388 				case CS_ABORTED:
8389 					DEVICE_QUEUE_LOCK(tq);
8390 					if (tq->flags & (TQF_RSCN_RCVD |
8391 					    TQF_NEED_AUTHENTICATION)) {
8392 						sp->pkt->pkt_state =
8393 						    FC_PKT_PORT_OFFLINE;
8394 						sp->pkt->pkt_reason =
8395 						    FC_REASON_LOGIN_REQUIRED;
8396 					} else {
8397 						sp->pkt->pkt_state =
8398 						    FC_PKT_LOCAL_RJT;
8399 						sp->pkt->pkt_reason =
8400 						    FC_REASON_ABORTED;
8401 					}
8402 					DEVICE_QUEUE_UNLOCK(tq);
8403 					break;
8404 
8405 				case CS_TRANSPORT:
8406 				case CS_DEV_NOT_READY:
8407 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8408 					sp->pkt->pkt_reason =
8409 					    FC_PKT_TRAN_ERROR;
8410 					break;
8411 
8412 				case CS_DATA_UNDERRUN:
8413 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8414 					sp->pkt->pkt_reason =
8415 					    FC_REASON_UNDERRUN;
8416 					break;
8417 				case CS_DMA_ERROR:
8418 				case CS_BAD_PAYLOAD:
8419 				case CS_UNKNOWN:
8420 				case CS_CMD_FAILED:
8421 				default:
8422 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
8423 					sp->pkt->pkt_reason =
8424 					    FC_REASON_HW_ERROR;
8425 					break;
8426 				}
8427 
8428 				(void) qlc_fm_check_pkt_dma_handle(ha, sp);
8429 
8430 				/* Now call the pkt completion callback */
8431 				if (sp->flags & SRB_POLL) {
8432 					sp->flags &= ~SRB_POLL;
8433 				} else if (cmplt == B_TRUE &&
8434 				    sp->pkt->pkt_comp) {
8435 					(sp->pkt->pkt_comp)(sp->pkt);
8436 				} else {
8437 					ql_io_comp(sp);
8438 				}
8439 			}
8440 		}
8441 	}
8442 
8443 	QL_PRINT_3(ha, "done\n");
8444 }
8445 
8446 /*
8447  * ql_awaken_task_daemon
8448  *	Adds command completion callback to callback queue and/or
8449  *	awakens task daemon thread.
8450  *
8451  * Input:
8452  *	ha:		adapter state pointer.
8453  *	sp:		srb pointer.
8454  *	set_flags:	task daemon flags to set.
8455  *	reset_flags:	task daemon flags to reset.
8456  *
8457  * Context:
8458  *	Interrupt or Kernel context, no mailbox commands allowed.
8459  */
8460 void
ql_awaken_task_daemon(ql_adapter_state_t * vha,ql_srb_t * sp,uint64_t set_flags,uint64_t reset_flags)8461 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8462     uint64_t set_flags, uint64_t reset_flags)
8463 {
8464 	ql_adapter_state_t	*ha = vha->pha;
8465 
8466 	QL_PRINT_3(ha, "started, sp=%p set_flags=%llx reset_flags=%llx\n",
8467 	    sp, set_flags, reset_flags);
8468 
8469 	/* Acquire task daemon lock. */
8470 	TASK_DAEMON_LOCK(ha);
8471 
8472 	if (set_flags) {
8473 		ha->task_daemon_flags |= set_flags;
8474 	}
8475 	if (reset_flags) {
8476 		ha->task_daemon_flags &= ~reset_flags;
8477 	}
8478 
8479 	if (!(ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG)) {
8480 		EL(ha, "done, not alive dtf=%xh\n", ha->task_daemon_flags);
8481 		TASK_DAEMON_UNLOCK(ha);
8482 		return;
8483 	}
8484 
8485 	if (sp != NULL) {
8486 		if (sp->flags & SRB_UB_CALLBACK) {
8487 			ql_add_link_b(&ha->unsol_callback_queue, &sp->cmd);
8488 		} else {
8489 			EL(ha, "sp=%p, spf=%xh is not SRB_UB_CALLBACK",
8490 			    sp->flags);
8491 		}
8492 	}
8493 
8494 	if (!ha->driver_thread_awake) {
8495 		QL_PRINT_3(ha, "driver_thread_awake\n");
8496 		cv_broadcast(&ha->cv_task_daemon);
8497 	}
8498 
8499 	TASK_DAEMON_UNLOCK(ha);
8500 
8501 	QL_PRINT_3(ha, "done\n");
8502 }
8503 
8504 /*
8505  * ql_task_daemon
8506  *	Thread that is awaken by the driver when a
8507  *	background needs to be done.
8508  *
8509  * Input:
8510  *	arg = adapter state pointer.
8511  *
8512  * Context:
8513  *	Kernel context.
8514  */
8515 static void
ql_task_daemon(void * arg)8516 ql_task_daemon(void *arg)
8517 {
8518 	ql_adapter_state_t	*ha = (void *)arg;
8519 
8520 	QL_PRINT_3(ha, "started\n");
8521 
8522 	/* Acquire task daemon lock. */
8523 	TASK_DAEMON_LOCK(ha);
8524 
8525 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8526 		ql_task_thread(ha);
8527 
8528 		/*
8529 		 * Before we wait on the conditional variable, we
8530 		 * need to check if STOP_FLG is set for us to terminate
8531 		 */
8532 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8533 			break;
8534 		}
8535 
8536 		QL_PRINT_3(ha, "Going to sleep\n");
8537 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8538 
8539 		/* If killed, stop task daemon */
8540 		if (cv_wait_sig(&ha->cv_task_daemon,
8541 		    &ha->task_daemon_mutex) == 0) {
8542 			QL_PRINT_10(ha, "killed\n");
8543 			break;
8544 		}
8545 
8546 		QL_PRINT_3(ha, "Awakened\n");
8547 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8548 	}
8549 
8550 	ha->task_daemon_flags &= ~(TASK_DAEMON_SLEEPING_FLG |
8551 	    TASK_DAEMON_ALIVE_FLG);
8552 
8553 	TASK_DAEMON_UNLOCK(ha);
8554 
8555 	QL_PRINT_3(ha, "done\n");
8556 }
8557 
8558 /*
8559  * ql_task_thread
8560  *	Thread run by daemon.
8561  *
8562  * Input:
8563  *	ha = adapter state pointer.
8564  *	TASK_DAEMON_LOCK must be acquired prior to call.
8565  *
8566  * Context:
8567  *	Kernel context.
8568  */
8569 static void
ql_task_thread(ql_adapter_state_t * ha)8570 ql_task_thread(ql_adapter_state_t *ha)
8571 {
8572 	boolean_t		loop_again;
8573 	ql_srb_t		*sp;
8574 	ql_link_t		*link;
8575 	caddr_t			msg;
8576 	ql_adapter_state_t	*vha;
8577 
8578 	ha->driver_thread_awake++;
8579 	do {
8580 		loop_again = B_FALSE;
8581 
8582 		if (ha->sf != ha->flags ||
8583 		    (ha->task_daemon_flags & ~DTF_EL_MSG_SKIP_FLGS) != ha->df ||
8584 		    ha->cf != ha->cfg_flags) {
8585 			ha->sf = ha->flags;
8586 			ha->df = ha->task_daemon_flags & ~DTF_EL_MSG_SKIP_FLGS;
8587 			ha->cf = ha->cfg_flags;
8588 			EL(ha, "df=%xh, sf=%xh, cf=%xh\n",
8589 			    ha->df, ha->sf, ha->cf);
8590 		}
8591 
8592 		QL_PM_LOCK(ha);
8593 		if (ha->power_level != PM_LEVEL_D0) {
8594 			QL_PM_UNLOCK(ha);
8595 			ha->task_daemon_flags |= DRIVER_STALL |
8596 			    TASK_DAEMON_STALLED_FLG;
8597 			break;
8598 		}
8599 		QL_PM_UNLOCK(ha);
8600 
8601 		if (ha->flags & ADAPTER_SUSPENDED) {
8602 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8603 			break;
8604 		}
8605 
8606 		/* Handle FW IDC events. */
8607 		while (ha->flags & (IDC_STALL_NEEDED | IDC_RESTART_NEEDED |
8608 		    IDC_ACK_NEEDED)) {
8609 			TASK_DAEMON_UNLOCK(ha);
8610 			ql_idc(ha);
8611 			TASK_DAEMON_LOCK(ha);
8612 			loop_again = B_TRUE;
8613 		}
8614 
8615 		if (ha->task_daemon_flags &
8616 		    (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8617 		    !(ha->flags & ONLINE)) {
8618 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8619 			break;
8620 		}
8621 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8622 
8623 		/* Store error log. */
8624 		if (ha->errlog[0] != 0 &&
8625 		    !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
8626 			TASK_DAEMON_UNLOCK(ha);
8627 			(void) ql_flash_errlog(ha, ha->errlog[0],
8628 			    ha->errlog[1], ha->errlog[2], ha->errlog[3]);
8629 			ha->errlog[0] = 0;
8630 			TASK_DAEMON_LOCK(ha);
8631 			loop_again = B_TRUE;
8632 		}
8633 
8634 		/* Idle Check. */
8635 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8636 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8637 			if (!DRIVER_SUSPENDED(ha)) {
8638 				TASK_DAEMON_UNLOCK(ha);
8639 				ql_idle_check(ha);
8640 				TASK_DAEMON_LOCK(ha);
8641 				loop_again = B_TRUE;
8642 			}
8643 		}
8644 
8645 		/* Crystal+ port#0 bypass transition */
8646 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8647 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8648 			TASK_DAEMON_UNLOCK(ha);
8649 			(void) ql_initiate_lip(ha);
8650 			TASK_DAEMON_LOCK(ha);
8651 			loop_again = B_TRUE;
8652 		}
8653 
8654 		/* Abort queues needed. */
8655 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8656 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8657 			if (ha->flags & ABORT_CMDS_LOOP_DOWN_TMO) {
8658 				TASK_DAEMON_UNLOCK(ha);
8659 				ql_abort_queues(ha);
8660 				TASK_DAEMON_LOCK(ha);
8661 				loop_again = B_TRUE;
8662 			}
8663 		}
8664 
8665 		/* Not suspended, awaken waiting routines. */
8666 		if (!DRIVER_SUSPENDED(ha) &&
8667 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8668 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8669 			cv_broadcast(&ha->cv_dr_suspended);
8670 			loop_again = B_TRUE;
8671 		}
8672 
8673 		/* Handle RSCN changes. */
8674 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8675 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8676 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8677 				TASK_DAEMON_UNLOCK(ha);
8678 				(void) ql_handle_rscn_update(vha);
8679 				TASK_DAEMON_LOCK(ha);
8680 				loop_again = B_TRUE;
8681 			}
8682 		}
8683 
8684 		/* Handle state changes. */
8685 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8686 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8687 			    !(ha->task_daemon_flags &
8688 			    TASK_DAEMON_POWERING_DOWN)) {
8689 				/* Report state change. */
8690 				EL(vha, "state change = %xh\n", vha->state);
8691 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8692 
8693 				if (vha->task_daemon_flags &
8694 				    COMMAND_WAIT_NEEDED) {
8695 					vha->task_daemon_flags &=
8696 					    ~COMMAND_WAIT_NEEDED;
8697 					if (!(ha->task_daemon_flags &
8698 					    COMMAND_WAIT_ACTIVE)) {
8699 						ha->task_daemon_flags |=
8700 						    COMMAND_WAIT_ACTIVE;
8701 						TASK_DAEMON_UNLOCK(ha);
8702 						ql_cmd_wait(ha);
8703 						TASK_DAEMON_LOCK(ha);
8704 						ha->task_daemon_flags &=
8705 						    ~COMMAND_WAIT_ACTIVE;
8706 						loop_again = B_TRUE;
8707 					}
8708 				}
8709 
8710 				msg = NULL;
8711 				if (FC_PORT_STATE_MASK(vha->state) ==
8712 				    FC_STATE_OFFLINE) {
8713 					if (vha->task_daemon_flags &
8714 					    STATE_ONLINE) {
8715 						if (ha->topology &
8716 						    QL_LOOP_CONNECTION) {
8717 							msg = "Loop OFFLINE";
8718 						} else {
8719 							msg = "Link OFFLINE";
8720 						}
8721 					}
8722 					vha->task_daemon_flags &=
8723 					    ~STATE_ONLINE;
8724 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8725 				    FC_STATE_LOOP) {
8726 					if (!(vha->task_daemon_flags &
8727 					    STATE_ONLINE)) {
8728 						msg = "Loop ONLINE";
8729 					}
8730 					vha->task_daemon_flags |= STATE_ONLINE;
8731 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8732 				    FC_STATE_ONLINE) {
8733 					if (!(vha->task_daemon_flags &
8734 					    STATE_ONLINE)) {
8735 						msg = "Link ONLINE";
8736 					}
8737 					vha->task_daemon_flags |= STATE_ONLINE;
8738 				} else {
8739 					msg = "Unknown Link state";
8740 				}
8741 
8742 				if (msg != NULL) {
8743 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8744 					    "%s", QL_NAME, ha->instance,
8745 					    vha->vp_index, msg);
8746 				}
8747 
8748 				if (vha->flags & FCA_BOUND) {
8749 					QL_PRINT_10(vha, "statec_"
8750 					    "cb state=%xh\n",
8751 					    vha->state);
8752 					TASK_DAEMON_UNLOCK(ha);
8753 					(vha->bind_info.port_statec_cb)
8754 					    (vha->bind_info.port_handle,
8755 					    vha->state);
8756 					TASK_DAEMON_LOCK(ha);
8757 					loop_again = B_TRUE;
8758 				}
8759 			}
8760 		}
8761 
8762 		if (ha->task_daemon_flags & NEED_UNSOLICITED_BUFFERS &&
8763 		    ha->task_daemon_flags & FIRMWARE_UP) {
8764 			/*
8765 			 * The firmware needs more unsolicited
8766 			 * buffers. We cannot allocate any new
8767 			 * buffers unless the ULP module requests
8768 			 * for new buffers. All we can do here is
8769 			 * to give received buffers from the pool
8770 			 * that is already allocated
8771 			 */
8772 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8773 			TASK_DAEMON_UNLOCK(ha);
8774 			ql_isp_rcvbuf(ha);
8775 			TASK_DAEMON_LOCK(ha);
8776 			loop_again = B_TRUE;
8777 		}
8778 
8779 		if (ha->task_daemon_flags & WATCHDOG_NEEDED) {
8780 			ha->task_daemon_flags &= ~WATCHDOG_NEEDED;
8781 			TASK_DAEMON_UNLOCK(ha);
8782 			ql_watchdog(ha);
8783 			TASK_DAEMON_LOCK(ha);
8784 			loop_again = B_TRUE;
8785 		}
8786 
8787 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8788 			TASK_DAEMON_UNLOCK(ha);
8789 			(void) ql_abort_isp(ha);
8790 			TASK_DAEMON_LOCK(ha);
8791 			loop_again = B_TRUE;
8792 		}
8793 
8794 		if (!(ha->task_daemon_flags & (COMMAND_WAIT_NEEDED |
8795 		    ABORT_QUEUES_NEEDED | ISP_ABORT_NEEDED | LOOP_DOWN)) &&
8796 		    ha->task_daemon_flags & FIRMWARE_UP) {
8797 			if (ha->task_daemon_flags & MARKER_NEEDED) {
8798 				if (!(ha->task_daemon_flags & MARKER_ACTIVE)) {
8799 					ha->task_daemon_flags |= MARKER_ACTIVE;
8800 					ha->task_daemon_flags &= ~MARKER_NEEDED;
8801 					TASK_DAEMON_UNLOCK(ha);
8802 					for (vha = ha; vha != NULL;
8803 					    vha = vha->vp_next) {
8804 						(void) ql_marker(vha, 0, 0,
8805 						    MK_SYNC_ALL);
8806 					}
8807 					TASK_DAEMON_LOCK(ha);
8808 					ha->task_daemon_flags &= ~MARKER_ACTIVE;
8809 					TASK_DAEMON_UNLOCK(ha);
8810 					ql_restart_queues(ha);
8811 					TASK_DAEMON_LOCK(ha);
8812 					loop_again = B_TRUE;
8813 				} else {
8814 					ha->task_daemon_flags &= ~MARKER_NEEDED;
8815 				}
8816 			}
8817 
8818 			if (ha->task_daemon_flags & LOOP_RESYNC_NEEDED) {
8819 				if (!(ha->task_daemon_flags &
8820 				    LOOP_RESYNC_ACTIVE)) {
8821 					ha->task_daemon_flags |=
8822 					    LOOP_RESYNC_ACTIVE;
8823 					TASK_DAEMON_UNLOCK(ha);
8824 					ql_loop_resync(ha);
8825 					TASK_DAEMON_LOCK(ha);
8826 					loop_again = B_TRUE;
8827 				}
8828 			}
8829 		}
8830 
8831 		/* Port retry needed. */
8832 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8833 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8834 			ADAPTER_STATE_LOCK(ha);
8835 			ha->port_retry_timer = 0;
8836 			ADAPTER_STATE_UNLOCK(ha);
8837 
8838 			TASK_DAEMON_UNLOCK(ha);
8839 			ql_restart_queues(ha);
8840 			TASK_DAEMON_LOCK(ha);
8841 			loop_again = B_TRUE;
8842 		}
8843 
8844 		/* iiDMA setting needed? */
8845 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8846 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8847 			TASK_DAEMON_UNLOCK(ha);
8848 			ql_iidma(ha);
8849 			TASK_DAEMON_LOCK(ha);
8850 			loop_again = B_TRUE;
8851 		}
8852 
8853 		if (ha->task_daemon_flags & SEND_PLOGI) {
8854 			ha->task_daemon_flags &= ~SEND_PLOGI;
8855 			TASK_DAEMON_UNLOCK(ha);
8856 			(void) ql_n_port_plogi(ha);
8857 			TASK_DAEMON_LOCK(ha);
8858 			loop_again = B_TRUE;
8859 		}
8860 
8861 		if (ha->unsol_callback_queue.first != NULL) {
8862 			sp = (ha->unsol_callback_queue.first)->base_address;
8863 			link = &sp->cmd;
8864 			ql_remove_link(&ha->unsol_callback_queue, link);
8865 			TASK_DAEMON_UNLOCK(ha);
8866 			ql_unsol_callback(sp);
8867 			TASK_DAEMON_LOCK(ha);
8868 			loop_again = B_TRUE;
8869 		}
8870 
8871 		if (ha->task_daemon_flags & IDC_POLL_NEEDED) {
8872 			ha->task_daemon_flags &= ~IDC_POLL_NEEDED;
8873 			TASK_DAEMON_UNLOCK(ha);
8874 			ql_8021_idc_poll(ha);
8875 			TASK_DAEMON_LOCK(ha);
8876 			loop_again = B_TRUE;
8877 		}
8878 
8879 		if (ha->task_daemon_flags & LED_BLINK) {
8880 			ha->task_daemon_flags &= ~LED_BLINK;
8881 			TASK_DAEMON_UNLOCK(ha);
8882 			ql_blink_led(ha);
8883 			TASK_DAEMON_LOCK(ha);
8884 			loop_again = B_TRUE;
8885 		}
8886 
8887 	} while (loop_again == B_TRUE);
8888 
8889 	if (ha->driver_thread_awake) {
8890 		ha->driver_thread_awake--;
8891 	}
8892 	QL_PRINT_3(ha, "done\n");
8893 }
8894 
8895 /*
8896  * ql_idle_check
8897  *	Test for adapter is alive and well.
8898  *
8899  * Input:
8900  *	ha:	adapter state pointer.
8901  *
8902  * Context:
8903  *	Kernel context.
8904  */
8905 static void
ql_idle_check(ql_adapter_state_t * ha)8906 ql_idle_check(ql_adapter_state_t *ha)
8907 {
8908 	int		rval;
8909 	ql_mbx_data_t	mr;
8910 
8911 	QL_PRINT_3(ha, "started\n");
8912 
8913 	/* Firmware Ready Test. */
8914 	rval = ql_get_firmware_state(ha, &mr);
8915 	if (!DRIVER_SUSPENDED(ha) &&
8916 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8917 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8918 		TASK_DAEMON_LOCK(ha);
8919 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8920 			EL(ha, "fstate_ready, isp_abort_needed\n");
8921 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8922 		}
8923 		TASK_DAEMON_UNLOCK(ha);
8924 	}
8925 
8926 	QL_PRINT_3(ha, "done\n");
8927 }
8928 
8929 /*
8930  * ql_unsol_callback
8931  *	Handle unsolicited buffer callbacks.
8932  *
8933  * Input:
8934  *	ha = adapter state pointer.
8935  *	sp = srb pointer.
8936  *
8937  * Context:
8938  *	Kernel context.
8939  */
8940 static void
ql_unsol_callback(ql_srb_t * sp)8941 ql_unsol_callback(ql_srb_t *sp)
8942 {
8943 	fc_affected_id_t	*af;
8944 	fc_unsol_buf_t		*ubp;
8945 	uchar_t			r_ctl;
8946 	uchar_t			ls_code;
8947 	ql_tgt_t		*tq;
8948 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8949 
8950 	QL_PRINT_3(ha, "started\n");
8951 
8952 	ubp = ha->ub_array[sp->handle];
8953 	r_ctl = ubp->ub_frame.r_ctl;
8954 	ls_code = ubp->ub_buffer[0];
8955 
8956 	if (sp->lun_queue == NULL) {
8957 		tq = NULL;
8958 	} else {
8959 		tq = sp->lun_queue->target_queue;
8960 	}
8961 
8962 	QL_UB_LOCK(ha);
8963 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8964 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8965 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8966 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8967 		sp->flags |= SRB_UB_IN_FCA;
8968 		QL_UB_UNLOCK(ha);
8969 		return;
8970 	}
8971 
8972 	/* Process RSCN */
8973 	if (sp->flags & SRB_UB_RSCN) {
8974 		int sendup;
8975 
8976 		/*
8977 		 * Defer RSCN posting until commands return
8978 		 */
8979 		QL_UB_UNLOCK(ha);
8980 
8981 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8982 
8983 		/* Abort outstanding commands */
8984 		sendup = ql_process_rscn(ha, af);
8985 		if (sendup == 0) {
8986 
8987 			TASK_DAEMON_LOCK(ha);
8988 			ql_add_link_b(&pha->unsol_callback_queue, &sp->cmd);
8989 			TASK_DAEMON_UNLOCK(ha);
8990 
8991 			/*
8992 			 * Wait for commands to drain in F/W (doesn't take
8993 			 * more than a few milliseconds)
8994 			 */
8995 			ql_delay(ha, 10000);
8996 
8997 			QL_PRINT_2(ha, "done rscn_sendup=0, "
8998 			    "fmt=%xh, d_id=%xh\n",
8999 			    af->aff_format, af->aff_d_id);
9000 			return;
9001 		}
9002 
9003 		QL_UB_LOCK(ha);
9004 
9005 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
9006 		    af->aff_format, af->aff_d_id);
9007 	}
9008 
9009 	/* Process UNSOL LOGO */
9010 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
9011 		QL_UB_UNLOCK(ha);
9012 
9013 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
9014 			TASK_DAEMON_LOCK(ha);
9015 			ql_add_link_b(&pha->unsol_callback_queue, &sp->cmd);
9016 			TASK_DAEMON_UNLOCK(ha);
9017 			QL_PRINT_2(ha, "logo_sendup=0, d_id=%xh"
9018 			    "\n", tq->d_id.b24);
9019 			return;
9020 		}
9021 
9022 		QL_UB_LOCK(ha);
9023 		EL(ha, "sending unsol logout for %xh to transport\n",
9024 		    ubp->ub_frame.s_id);
9025 	}
9026 
9027 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_PLOGI)) {
9028 		EL(ha, "sending unsol plogi for %xh to transport\n",
9029 		    ubp->ub_frame.s_id);
9030 	}
9031 
9032 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
9033 	    SRB_UB_FCP);
9034 
9035 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9036 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
9037 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
9038 	}
9039 	QL_UB_UNLOCK(ha);
9040 
9041 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
9042 	    ubp, sp->ub_type);
9043 
9044 	QL_PRINT_3(ha, "done\n");
9045 }
9046 
9047 /*
9048  * ql_send_logo
9049  *
9050  * Input:
9051  *	ha:	adapter state pointer.
9052  *	tq:	target queue pointer.
9053  *	done_q:	done queue pointer.
9054  *
9055  * Context:
9056  *	Interrupt or Kernel context, no mailbox commands allowed.
9057  */
9058 void
ql_send_logo(ql_adapter_state_t * vha,ql_tgt_t * tq,ql_head_t * done_q)9059 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
9060 {
9061 	fc_unsol_buf_t		*ubp;
9062 	ql_srb_t		*sp;
9063 	la_els_logo_t		*payload;
9064 	ql_adapter_state_t	*ha = vha->pha;
9065 
9066 	QL_PRINT_3(ha, "started, d_id=%xh\n", tq->d_id.b24);
9067 
9068 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == FS_BROADCAST)) {
9069 		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
9070 		return;
9071 	}
9072 
9073 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
9074 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
9075 
9076 		/* Locate a buffer to use. */
9077 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
9078 		if (ubp == NULL) {
9079 			EL(vha, "Failed, get_unsolicited_buffer\n");
9080 			return;
9081 		}
9082 
9083 		DEVICE_QUEUE_LOCK(tq);
9084 		tq->flags |= TQF_NEED_AUTHENTICATION;
9085 		tq->logout_sent++;
9086 		DEVICE_QUEUE_UNLOCK(tq);
9087 
9088 		sp = ubp->ub_fca_private;
9089 
9090 		/* Set header. */
9091 		ubp->ub_frame.d_id = vha->d_id.b24;
9092 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9093 		ubp->ub_frame.s_id = tq->d_id.b24;
9094 		ubp->ub_frame.rsvd = 0;
9095 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9096 		    F_CTL_SEQ_INITIATIVE;
9097 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9098 		ubp->ub_frame.seq_cnt = 0;
9099 		ubp->ub_frame.df_ctl = 0;
9100 		ubp->ub_frame.seq_id = 0;
9101 		ubp->ub_frame.rx_id = 0xffff;
9102 		ubp->ub_frame.ox_id = 0xffff;
9103 
9104 		/* set payload. */
9105 		payload = (la_els_logo_t *)ubp->ub_buffer;
9106 		bzero(payload, sizeof (la_els_logo_t));
9107 		/* Make sure ls_code in payload is always big endian */
9108 		ubp->ub_buffer[0] = LA_ELS_LOGO;
9109 		ubp->ub_buffer[1] = 0;
9110 		ubp->ub_buffer[2] = 0;
9111 		ubp->ub_buffer[3] = 0;
9112 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
9113 		    &payload->nport_ww_name.raw_wwn[0], 8);
9114 		payload->nport_id.port_id = tq->d_id.b24;
9115 
9116 		QL_UB_LOCK(ha);
9117 		sp->flags |= SRB_UB_CALLBACK;
9118 		QL_UB_UNLOCK(ha);
9119 		if (tq->lun_queues.first != NULL) {
9120 			sp->lun_queue = (tq->lun_queues.first)->base_address;
9121 		} else {
9122 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
9123 		}
9124 		if (done_q) {
9125 			ql_add_link_b(done_q, &sp->cmd);
9126 		} else {
9127 			ql_awaken_task_daemon(ha, sp, 0, 0);
9128 		}
9129 	}
9130 
9131 	QL_PRINT_3(ha, "done\n");
9132 }
9133 
9134 static int
ql_process_logo_for_device(ql_adapter_state_t * ha,ql_tgt_t * tq)9135 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9136 {
9137 	port_id_t	d_id;
9138 	ql_srb_t	*sp;
9139 	ql_link_t	*link;
9140 	int		sendup = 1;
9141 
9142 	QL_PRINT_3(ha, "started\n");
9143 
9144 	DEVICE_QUEUE_LOCK(tq);
9145 	if (tq->outcnt) {
9146 		DEVICE_QUEUE_UNLOCK(tq);
9147 		sendup = 0;
9148 		(void) ql_abort_device(ha, tq, 1);
9149 		ql_delay(ha, 10000);
9150 	} else {
9151 		DEVICE_QUEUE_UNLOCK(tq);
9152 		TASK_DAEMON_LOCK(ha);
9153 
9154 		for (link = ha->pha->unsol_callback_queue.first; link != NULL;
9155 		    link = link->next) {
9156 			sp = link->base_address;
9157 			if (sp->flags & SRB_UB_CALLBACK) {
9158 				continue;
9159 			}
9160 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
9161 
9162 			if (tq->d_id.b24 == d_id.b24) {
9163 				sendup = 0;
9164 				break;
9165 			}
9166 		}
9167 
9168 		TASK_DAEMON_UNLOCK(ha);
9169 	}
9170 
9171 	QL_PRINT_3(ha, "done\n");
9172 
9173 	return (sendup);
9174 }
9175 
9176 static int
ql_send_plogi(ql_adapter_state_t * ha,ql_tgt_t * tq,ql_head_t * done_q)9177 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
9178 {
9179 	fc_unsol_buf_t		*ubp;
9180 	ql_srb_t		*sp;
9181 	la_els_logi_t		*payload;
9182 	class_svc_param_t	*class3_param;
9183 
9184 	QL_PRINT_3(ha, "started\n");
9185 
9186 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
9187 	    LOOP_DOWN)) {
9188 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
9189 		return (QL_FUNCTION_FAILED);
9190 	}
9191 
9192 	/* Locate a buffer to use. */
9193 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
9194 	if (ubp == NULL) {
9195 		EL(ha, "Failed\n");
9196 		return (QL_FUNCTION_FAILED);
9197 	}
9198 
9199 	QL_PRINT_3(ha, "Received LOGO from = %xh\n", tq->d_id.b24);
9200 
9201 	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
9202 
9203 	sp = ubp->ub_fca_private;
9204 
9205 	/* Set header. */
9206 	ubp->ub_frame.d_id = ha->d_id.b24;
9207 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9208 	ubp->ub_frame.s_id = tq->d_id.b24;
9209 	ubp->ub_frame.rsvd = 0;
9210 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9211 	    F_CTL_SEQ_INITIATIVE;
9212 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9213 	ubp->ub_frame.seq_cnt = 0;
9214 	ubp->ub_frame.df_ctl = 0;
9215 	ubp->ub_frame.seq_id = 0;
9216 	ubp->ub_frame.rx_id = 0xffff;
9217 	ubp->ub_frame.ox_id = 0xffff;
9218 
9219 	/* set payload. */
9220 	payload = (la_els_logi_t *)ubp->ub_buffer;
9221 	bzero(payload, sizeof (la_els_logi_t));
9222 
9223 	payload->ls_code.ls_code = LA_ELS_PLOGI;
9224 	payload->common_service.fcph_version = 0x2006;
9225 	payload->common_service.cmn_features =
9226 	    ha->topology & QL_N_PORT ? 0x8000 : 0x8800;
9227 	payload->common_service.rx_bufsize =
9228 	    ha->loginparams.common_service.rx_bufsize;
9229 	payload->common_service.conc_sequences = 0xff;
9230 	payload->common_service.relative_offset = 0x03;
9231 	payload->common_service.e_d_tov = 0x7d0;
9232 
9233 	bcopy((void *)&tq->port_name[0],
9234 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
9235 
9236 	bcopy((void *)&tq->node_name[0],
9237 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
9238 
9239 	class3_param = (class_svc_param_t *)&payload->class_3;
9240 	class3_param->class_valid_svc_opt = 0x8000;
9241 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
9242 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
9243 	class3_param->conc_sequences = tq->class3_conc_sequences;
9244 	class3_param->open_sequences_per_exch =
9245 	    tq->class3_open_sequences_per_exch;
9246 
9247 	QL_UB_LOCK(ha);
9248 	sp->flags |= SRB_UB_CALLBACK;
9249 	QL_UB_UNLOCK(ha);
9250 
9251 	if (done_q) {
9252 		ql_add_link_b(done_q, &sp->cmd);
9253 	} else {
9254 		ql_awaken_task_daemon(ha, sp, 0, 0);
9255 	}
9256 
9257 	QL_PRINT_3(ha, "done\n");
9258 
9259 	return (QL_SUCCESS);
9260 }
9261 
9262 /*
9263  * Abort outstanding commands in the Firmware, clear internally
9264  * queued commands in the driver, Synchronize the target with
9265  * the Firmware
9266  */
9267 int
ql_abort_device(ql_adapter_state_t * ha,ql_tgt_t * tq,int drain)9268 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
9269 {
9270 	ql_link_t	*link, *link2;
9271 	ql_lun_t	*lq;
9272 	int		rval = QL_SUCCESS;
9273 	ql_srb_t	*sp;
9274 	ql_head_t	done_q = { NULL, NULL };
9275 
9276 	QL_PRINT_10(ha, "started\n");
9277 
9278 	/*
9279 	 * First clear, internally queued commands
9280 	 */
9281 	DEVICE_QUEUE_LOCK(tq);
9282 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
9283 		lq = link->base_address;
9284 
9285 		link2 = lq->cmd.first;
9286 		while (link2 != NULL) {
9287 			sp = link2->base_address;
9288 			link2 = link2->next;
9289 
9290 			/* Remove srb from device command queue. */
9291 			ql_remove_link(&lq->cmd, &sp->cmd);
9292 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9293 
9294 			/* Set ending status. */
9295 			sp->pkt->pkt_reason = CS_ABORTED;
9296 
9297 			/* Call done routine to handle completions. */
9298 			ql_add_link_b(&done_q, &sp->cmd);
9299 		}
9300 	}
9301 	DEVICE_QUEUE_UNLOCK(tq);
9302 
9303 	if (done_q.first != NULL) {
9304 		ql_done(done_q.first, B_FALSE);
9305 	}
9306 
9307 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
9308 		rval = ql_abort_target(ha, tq, 0);
9309 	}
9310 
9311 	if (rval != QL_SUCCESS) {
9312 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
9313 	} else {
9314 		/*EMPTY*/
9315 		QL_PRINT_10(ha, "done\n");
9316 	}
9317 
9318 	return (rval);
9319 }
9320 
9321 /*
9322  * ql_rcv_rscn_els
9323  *	Processes received RSCN extended link service.
9324  *
9325  * Input:
9326  *	ha:	adapter state pointer.
9327  *	mb:	array containing input mailbox registers.
9328  *	done_q:	done queue pointer.
9329  *
9330  * Context:
9331  *	Interrupt or Kernel context, no mailbox commands allowed.
9332  */
9333 void
ql_rcv_rscn_els(ql_adapter_state_t * ha,uint16_t * mb,ql_head_t * done_q)9334 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
9335 {
9336 	fc_unsol_buf_t		*ubp;
9337 	ql_srb_t		*sp;
9338 	fc_rscn_t		*rn;
9339 	fc_affected_id_t	*af;
9340 	port_id_t		d_id;
9341 
9342 	QL_PRINT_3(ha, "started\n");
9343 
9344 	/* Locate a buffer to use. */
9345 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
9346 	if (ubp != NULL) {
9347 		sp = ubp->ub_fca_private;
9348 
9349 		/* Set header. */
9350 		ubp->ub_frame.d_id = ha->d_id.b24;
9351 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
9352 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
9353 		ubp->ub_frame.rsvd = 0;
9354 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
9355 		    F_CTL_SEQ_INITIATIVE;
9356 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
9357 		ubp->ub_frame.seq_cnt = 0;
9358 		ubp->ub_frame.df_ctl = 0;
9359 		ubp->ub_frame.seq_id = 0;
9360 		ubp->ub_frame.rx_id = 0xffff;
9361 		ubp->ub_frame.ox_id = 0xffff;
9362 
9363 		/* set payload. */
9364 		rn = (fc_rscn_t *)ubp->ub_buffer;
9365 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
9366 
9367 		rn->rscn_code = LA_ELS_RSCN;
9368 		rn->rscn_len = 4;
9369 		rn->rscn_payload_len = 8;
9370 		d_id.b.al_pa = LSB(mb[2]);
9371 		d_id.b.area = MSB(mb[2]);
9372 		d_id.b.domain =	LSB(mb[1]);
9373 		af->aff_d_id = d_id.b24;
9374 		af->aff_format = MSB(mb[1]);
9375 
9376 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
9377 		    af->aff_d_id);
9378 
9379 		ql_update_rscn(ha, af);
9380 
9381 		QL_UB_LOCK(ha);
9382 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
9383 		QL_UB_UNLOCK(ha);
9384 		ql_add_link_b(done_q, &sp->cmd);
9385 	}
9386 
9387 	if (ubp == NULL) {
9388 		EL(ha, "Failed, get_unsolicited_buffer\n");
9389 	} else {
9390 		/*EMPTY*/
9391 		QL_PRINT_3(ha, "done\n");
9392 	}
9393 }
9394 
9395 /*
9396  * ql_update_rscn
9397  *	Update devices from received RSCN.
9398  *
9399  * Input:
9400  *	ha:	adapter state pointer.
9401  *	af:	pointer to RSCN data.
9402  *
9403  * Context:
9404  *	Interrupt or Kernel context, no mailbox commands allowed.
9405  */
9406 static void
ql_update_rscn(ql_adapter_state_t * ha,fc_affected_id_t * af)9407 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9408 {
9409 	ql_link_t	*link;
9410 	uint16_t	index;
9411 	ql_tgt_t	*tq;
9412 
9413 	QL_PRINT_3(ha, "started\n");
9414 
9415 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9416 		port_id_t d_id;
9417 
9418 		d_id.r.rsvd_1 = 0;
9419 		d_id.b24 = af->aff_d_id;
9420 
9421 		tq = ql_d_id_to_queue(ha, d_id);
9422 		if (tq) {
9423 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9424 			DEVICE_QUEUE_LOCK(tq);
9425 			tq->flags |= TQF_RSCN_RCVD;
9426 			ql_requeue_pending_cmds(ha, tq);
9427 			DEVICE_QUEUE_UNLOCK(tq);
9428 		}
9429 		QL_PRINT_3(ha, "FC_RSCN_PORT_ADDRESS done\n");
9430 
9431 		return;
9432 	}
9433 
9434 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9435 		for (link = ha->dev[index].first; link != NULL;
9436 		    link = link->next) {
9437 			tq = link->base_address;
9438 
9439 			switch (af->aff_format) {
9440 			case FC_RSCN_FABRIC_ADDRESS:
9441 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9442 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9443 					    tq->d_id.b24);
9444 					DEVICE_QUEUE_LOCK(tq);
9445 					tq->flags |= TQF_RSCN_RCVD;
9446 					ql_requeue_pending_cmds(ha, tq);
9447 					DEVICE_QUEUE_UNLOCK(tq);
9448 				}
9449 				break;
9450 
9451 			case FC_RSCN_AREA_ADDRESS:
9452 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9453 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9454 					    tq->d_id.b24);
9455 					DEVICE_QUEUE_LOCK(tq);
9456 					tq->flags |= TQF_RSCN_RCVD;
9457 					ql_requeue_pending_cmds(ha, tq);
9458 					DEVICE_QUEUE_UNLOCK(tq);
9459 				}
9460 				break;
9461 
9462 			case FC_RSCN_DOMAIN_ADDRESS:
9463 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9464 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9465 					    tq->d_id.b24);
9466 					DEVICE_QUEUE_LOCK(tq);
9467 					tq->flags |= TQF_RSCN_RCVD;
9468 					ql_requeue_pending_cmds(ha, tq);
9469 					DEVICE_QUEUE_UNLOCK(tq);
9470 				}
9471 				break;
9472 
9473 			default:
9474 				break;
9475 			}
9476 		}
9477 	}
9478 	QL_PRINT_3(ha, "done\n");
9479 }
9480 
9481 /*
9482  * ql_requeue_pending_cmds
9483  *	Requeue target commands from pending queue to LUN queue
9484  *
9485  * Input:
9486  *	ha:	adapter state pointer.
9487  *	tq:	target queue pointer.
9488  *	DEVICE_QUEUE_LOCK must be already obtained.
9489  *
9490  * Context:
9491  *	Interrupt or Kernel context, no mailbox commands allowed.
9492  */
9493 void
ql_requeue_pending_cmds(ql_adapter_state_t * vha,ql_tgt_t * tq)9494 ql_requeue_pending_cmds(ql_adapter_state_t *vha, ql_tgt_t *tq)
9495 {
9496 	ql_link_t		*link;
9497 	ql_srb_t		*sp;
9498 	ql_lun_t		*lq;
9499 	ql_adapter_state_t	*ha = vha->pha;
9500 
9501 	QL_PRINT_3(ha, "started\n");
9502 
9503 	REQUEST_RING_LOCK(ha);
9504 	for (link = ha->pending_cmds.first; link != NULL; link = link->next) {
9505 		sp = link->base_address;
9506 		if ((lq = sp->lun_queue) == NULL || lq->target_queue != tq) {
9507 			continue;
9508 		}
9509 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
9510 
9511 		if (tq->outcnt) {
9512 			tq->outcnt--;
9513 		}
9514 		if (sp->flags & SRB_FCP_CMD_PKT) {
9515 			if (sp->fcp->fcp_cntl.cntl_qtype ==
9516 			    FCP_QTYPE_UNTAGGED) {
9517 				lq->flags &= ~LQF_UNTAGGED_PENDING;
9518 			}
9519 			if (lq->lun_outcnt != 0) {
9520 				lq->lun_outcnt--;
9521 			}
9522 		}
9523 		ql_add_link_t(&lq->cmd, &sp->cmd);
9524 		sp->flags |= SRB_IN_DEVICE_QUEUE;
9525 	}
9526 	REQUEST_RING_UNLOCK(ha);
9527 
9528 	QL_PRINT_3(ha, "done\n");
9529 }
9530 
9531 /*
9532  * ql_process_rscn
9533  *
9534  * Input:
9535  *	ha:	adapter state pointer.
9536  *	af:	RSCN payload pointer.
9537  *
9538  * Context:
9539  *	Kernel context.
9540  */
9541 static int
ql_process_rscn(ql_adapter_state_t * ha,fc_affected_id_t * af)9542 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9543 {
9544 	int		sendit;
9545 	int		sendup = 1;
9546 	ql_link_t	*link;
9547 	uint16_t	index;
9548 	ql_tgt_t	*tq;
9549 
9550 	QL_PRINT_3(ha, "started\n");
9551 
9552 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9553 		port_id_t d_id;
9554 
9555 		d_id.r.rsvd_1 = 0;
9556 		d_id.b24 = af->aff_d_id;
9557 
9558 		tq = ql_d_id_to_queue(ha, d_id);
9559 		if (tq) {
9560 			sendup = ql_process_rscn_for_device(ha, tq);
9561 		}
9562 
9563 		QL_PRINT_3(ha, "done\n");
9564 
9565 		return (sendup);
9566 	}
9567 
9568 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9569 		for (link = ha->dev[index].first; link != NULL;
9570 		    link = link->next) {
9571 
9572 			tq = link->base_address;
9573 			if (tq == NULL) {
9574 				continue;
9575 			}
9576 
9577 			switch (af->aff_format) {
9578 			case FC_RSCN_FABRIC_ADDRESS:
9579 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9580 					sendit = ql_process_rscn_for_device(
9581 					    ha, tq);
9582 					if (sendup) {
9583 						sendup = sendit;
9584 					}
9585 				}
9586 				break;
9587 
9588 			case FC_RSCN_AREA_ADDRESS:
9589 				if ((tq->d_id.b24 & 0xffff00) ==
9590 				    af->aff_d_id) {
9591 					sendit = ql_process_rscn_for_device(
9592 					    ha, tq);
9593 
9594 					if (sendup) {
9595 						sendup = sendit;
9596 					}
9597 				}
9598 				break;
9599 
9600 			case FC_RSCN_DOMAIN_ADDRESS:
9601 				if ((tq->d_id.b24 & 0xff0000) ==
9602 				    af->aff_d_id) {
9603 					sendit = ql_process_rscn_for_device(
9604 					    ha, tq);
9605 
9606 					if (sendup) {
9607 						sendup = sendit;
9608 					}
9609 				}
9610 				break;
9611 
9612 			default:
9613 				break;
9614 			}
9615 		}
9616 	}
9617 
9618 	QL_PRINT_3(ha, "done\n");
9619 
9620 	return (sendup);
9621 }
9622 
9623 /*
9624  * ql_process_rscn_for_device
9625  *
9626  * Input:
9627  *	ha:	adapter state pointer.
9628  *	tq:	target queue pointer.
9629  *
9630  * Context:
9631  *	Kernel context.
9632  */
9633 static int
ql_process_rscn_for_device(ql_adapter_state_t * ha,ql_tgt_t * tq)9634 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9635 {
9636 	int sendup = 1;
9637 
9638 	QL_PRINT_3(ha, "started\n");
9639 
9640 	DEVICE_QUEUE_LOCK(tq);
9641 
9642 	/*
9643 	 * Let FCP-2 compliant devices continue I/Os
9644 	 * with their low level recoveries.
9645 	 */
9646 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9647 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9648 		/*
9649 		 * Cause ADISC to go out
9650 		 */
9651 		DEVICE_QUEUE_UNLOCK(tq);
9652 
9653 		(void) ql_get_port_database(ha, tq, PDF_NONE);
9654 
9655 		DEVICE_QUEUE_LOCK(tq);
9656 		tq->flags &= ~TQF_RSCN_RCVD;
9657 
9658 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9659 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9660 			tq->flags |= TQF_NEED_AUTHENTICATION;
9661 		}
9662 
9663 		DEVICE_QUEUE_UNLOCK(tq);
9664 
9665 		(void) ql_abort_device(ha, tq, 1);
9666 
9667 		DEVICE_QUEUE_LOCK(tq);
9668 
9669 		if (tq->outcnt) {
9670 			EL(ha, "busy tq->outcnt=%d\n", tq->outcnt);
9671 			sendup = 0;
9672 		} else {
9673 			tq->flags &= ~TQF_RSCN_RCVD;
9674 		}
9675 	} else {
9676 		tq->flags &= ~TQF_RSCN_RCVD;
9677 	}
9678 
9679 	if (sendup) {
9680 		if (tq->d_id.b24 != BROADCAST_ADDR) {
9681 			tq->flags |= TQF_NEED_AUTHENTICATION;
9682 		}
9683 	}
9684 
9685 	DEVICE_QUEUE_UNLOCK(tq);
9686 
9687 	QL_PRINT_3(ha, "done\n");
9688 
9689 	return (sendup);
9690 }
9691 
9692 static int
ql_handle_rscn_update(ql_adapter_state_t * ha)9693 ql_handle_rscn_update(ql_adapter_state_t *ha)
9694 {
9695 	int			rval;
9696 	ql_tgt_t		*tq;
9697 	uint16_t		index, loop_id;
9698 	ql_dev_id_list_t	*list;
9699 	uint32_t		list_size;
9700 	port_id_t		d_id;
9701 	ql_mbx_data_t		mr;
9702 	ql_head_t		done_q = { NULL, NULL };
9703 
9704 	QL_PRINT_3(ha, "started\n");
9705 
9706 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9707 	list = kmem_zalloc(list_size, KM_SLEEP);
9708 	if (list == NULL) {
9709 		rval = QL_MEMORY_ALLOC_FAILED;
9710 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9711 		return (rval);
9712 	}
9713 
9714 	/*
9715 	 * Get data from RISC code d_id list to init each device queue.
9716 	 */
9717 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9718 	if (rval != QL_SUCCESS) {
9719 		kmem_free(list, list_size);
9720 		EL(ha, "get_id_list failed=%xh\n", rval);
9721 		return (rval);
9722 	}
9723 
9724 	/* Acquire adapter state lock. */
9725 	ADAPTER_STATE_LOCK(ha);
9726 
9727 	/* Check for new devices */
9728 	for (index = 0; index < mr.mb[1]; index++) {
9729 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9730 
9731 		if (VALID_DEVICE_ID(ha, loop_id)) {
9732 			d_id.r.rsvd_1 = 0;
9733 
9734 			tq = ql_d_id_to_queue(ha, d_id);
9735 			if (tq != NULL) {
9736 				continue;
9737 			}
9738 
9739 			tq = ql_dev_init(ha, d_id, loop_id);
9740 
9741 			/* Test for fabric device. */
9742 			if (ha->topology & QL_F_PORT ||
9743 			    d_id.b.domain != ha->d_id.b.domain ||
9744 			    d_id.b.area != ha->d_id.b.area) {
9745 				tq->flags |= TQF_FABRIC_DEVICE;
9746 			}
9747 
9748 			ADAPTER_STATE_UNLOCK(ha);
9749 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9750 			    QL_SUCCESS) {
9751 				tq->loop_id = PORT_NO_LOOP_ID;
9752 			}
9753 			ADAPTER_STATE_LOCK(ha);
9754 
9755 			/*
9756 			 * Send up a PLOGI about the new device
9757 			 */
9758 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9759 				(void) ql_send_plogi(ha, tq, &done_q);
9760 			}
9761 		}
9762 	}
9763 
9764 	/* Release adapter state lock. */
9765 	ADAPTER_STATE_UNLOCK(ha);
9766 
9767 	if (done_q.first != NULL) {
9768 		ql_done(done_q.first, B_FALSE);
9769 	}
9770 
9771 	kmem_free(list, list_size);
9772 
9773 	if (rval != QL_SUCCESS) {
9774 		EL(ha, "failed=%xh\n", rval);
9775 	} else {
9776 		/*EMPTY*/
9777 		QL_PRINT_3(ha, "done\n");
9778 	}
9779 
9780 	return (rval);
9781 }
9782 
9783 /*
9784  * ql_free_unsolicited_buffer
9785  *	Frees allocated buffer.
9786  *
9787  * Input:
9788  *	ha = adapter state pointer.
9789  *	index = buffer array index.
9790  *	ADAPTER_STATE_LOCK must be already obtained.
9791  *
9792  * Context:
9793  *	Kernel context.
9794  */
9795 static void
ql_free_unsolicited_buffer(ql_adapter_state_t * ha,fc_unsol_buf_t * ubp)9796 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9797 {
9798 	ql_srb_t	*sp;
9799 	int		status;
9800 
9801 	QL_PRINT_3(ha, "started\n");
9802 
9803 	sp = ubp->ub_fca_private;
9804 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9805 		/* Disconnect IP from system buffers. */
9806 		if (ha->flags & IP_INITIALIZED) {
9807 			status = ql_shutdown_ip(ha);
9808 			if (status != QL_SUCCESS) {
9809 				cmn_err(CE_WARN,
9810 				    "!Qlogic %s(%d): Failed to shutdown IP",
9811 				    QL_NAME, ha->instance);
9812 				return;
9813 			}
9814 
9815 			ha->flags &= ~IP_ENABLED;
9816 		}
9817 
9818 		ql_free_phys(ha, &sp->ub_buffer);
9819 	} else {
9820 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9821 	}
9822 
9823 	kmem_free(sp, sizeof (ql_srb_t));
9824 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9825 
9826 	QL_UB_LOCK(ha);
9827 	if (ha->ub_allocated != 0) {
9828 		ha->ub_allocated--;
9829 	}
9830 	QL_UB_UNLOCK(ha);
9831 
9832 	QL_PRINT_3(ha, "done\n");
9833 }
9834 
9835 /*
9836  * ql_get_unsolicited_buffer
9837  *	Locates a free unsolicited buffer.
9838  *
9839  * Input:
9840  *	ha = adapter state pointer.
9841  *	type = buffer type.
9842  *
9843  * Returns:
9844  *	Unsolicited buffer pointer.
9845  *
9846  * Context:
9847  *	Interrupt or Kernel context, no mailbox commands allowed.
9848  */
9849 fc_unsol_buf_t *
ql_get_unsolicited_buffer(ql_adapter_state_t * ha,uint32_t type)9850 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9851 {
9852 	fc_unsol_buf_t	*ubp;
9853 	ql_srb_t	*sp;
9854 	uint16_t	index;
9855 
9856 	QL_PRINT_3(ha, "started\n");
9857 
9858 	/* Locate a buffer to use. */
9859 	ubp = NULL;
9860 
9861 	QL_UB_LOCK(ha);
9862 	for (index = 0; index < QL_UB_LIMIT; index++) {
9863 		ubp = ha->ub_array[index];
9864 		if (ubp != NULL) {
9865 			sp = ubp->ub_fca_private;
9866 			if ((sp->ub_type == type) &&
9867 			    (sp->flags & SRB_UB_IN_FCA) &&
9868 			    (!(sp->flags & (SRB_UB_CALLBACK |
9869 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9870 				sp->flags |= SRB_UB_ACQUIRED;
9871 				ubp->ub_resp_flags = 0;
9872 				break;
9873 			}
9874 			ubp = NULL;
9875 		}
9876 	}
9877 	QL_UB_UNLOCK(ha);
9878 
9879 	if (ubp) {
9880 		ubp->ub_resp_token = NULL;
9881 		ubp->ub_class = FC_TRAN_CLASS3;
9882 	}
9883 
9884 	QL_PRINT_3(ha, "done\n");
9885 
9886 	return (ubp);
9887 }
9888 
9889 /*
9890  * ql_ub_frame_hdr
9891  *	Processes received unsolicited buffers from ISP.
9892  *
9893  * Input:
9894  *	ha:	adapter state pointer.
9895  *	tq:	target queue pointer.
9896  *	index:	unsolicited buffer array index.
9897  *	done_q:	done queue pointer.
9898  *
9899  * Returns:
9900  *	ql local function return status code.
9901  *
9902  * Context:
9903  *	Interrupt or Kernel context, no mailbox commands allowed.
9904  */
9905 int
ql_ub_frame_hdr(ql_adapter_state_t * ha,ql_tgt_t * tq,uint16_t index,ql_head_t * done_q)9906 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9907     ql_head_t *done_q)
9908 {
9909 	fc_unsol_buf_t	*ubp;
9910 	ql_srb_t	*sp;
9911 	uint16_t	loop_id;
9912 	int		rval = QL_FUNCTION_FAILED;
9913 
9914 	QL_PRINT_3(ha, "started\n");
9915 
9916 	QL_UB_LOCK(ha);
9917 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9918 		EL(ha, "Invalid buffer index=%xh\n", index);
9919 		QL_UB_UNLOCK(ha);
9920 		return (rval);
9921 	}
9922 
9923 	sp = ubp->ub_fca_private;
9924 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9925 		EL(ha, "buffer freed index=%xh\n", index);
9926 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9927 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9928 
9929 		sp->flags |= SRB_UB_IN_FCA;
9930 
9931 		QL_UB_UNLOCK(ha);
9932 		return (rval);
9933 	}
9934 
9935 	if ((sp->handle == index) &&
9936 	    (sp->flags & SRB_UB_IN_ISP) &&
9937 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9938 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9939 		/* set broadcast D_ID */
9940 		loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
9941 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9942 		if (tq->ub_loop_id == loop_id) {
9943 			if (ha->topology & QL_FL_PORT) {
9944 				ubp->ub_frame.d_id = 0x000000;
9945 			} else {
9946 				ubp->ub_frame.d_id = FS_BROADCAST;
9947 			}
9948 		} else {
9949 			ubp->ub_frame.d_id = ha->d_id.b24;
9950 		}
9951 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9952 		ubp->ub_frame.rsvd = 0;
9953 		ubp->ub_frame.s_id = tq->d_id.b24;
9954 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9955 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9956 		ubp->ub_frame.df_ctl = 0;
9957 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9958 		ubp->ub_frame.rx_id = 0xffff;
9959 		ubp->ub_frame.ox_id = 0xffff;
9960 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9961 		    sp->ub_size : tq->ub_sequence_length;
9962 		ubp->ub_frame.ro = tq->ub_frame_ro;
9963 
9964 		tq->ub_sequence_length = (uint16_t)
9965 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9966 		tq->ub_frame_ro += ubp->ub_bufsize;
9967 		tq->ub_seq_cnt++;
9968 
9969 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9970 			if (tq->ub_seq_cnt == 1) {
9971 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9972 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9973 			} else {
9974 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9975 				    F_CTL_END_SEQ;
9976 			}
9977 			tq->ub_total_seg_cnt = 0;
9978 		} else if (tq->ub_seq_cnt == 1) {
9979 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9980 			    F_CTL_FIRST_SEQ;
9981 			ubp->ub_frame.df_ctl = 0x20;
9982 		}
9983 
9984 		QL_PRINT_3(ha, "ub_frame.d_id=%xh\n", ubp->ub_frame.d_id);
9985 		QL_PRINT_3(ha, "ub_frame.s_id=%xh\n", ubp->ub_frame.s_id);
9986 		QL_PRINT_3(ha, "ub_frame.seq_cnt=%xh\n", ubp->ub_frame.seq_cnt);
9987 		QL_PRINT_3(ha, "ub_frame.seq_id=%xh\n", ubp->ub_frame.seq_id);
9988 		QL_PRINT_3(ha, "ub_frame.ro=%xh\n", ubp->ub_frame.ro);
9989 		QL_PRINT_3(ha, "ub_frame.f_ctl=%xh\n", ubp->ub_frame.f_ctl);
9990 		QL_PRINT_3(ha, "ub_bufsize=%xh\n", ubp->ub_bufsize);
9991 		QL_DUMP_3(ubp->ub_buffer, 8,
9992 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9993 
9994 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9995 		ql_add_link_b(done_q, &sp->cmd);
9996 		rval = QL_SUCCESS;
9997 	} else {
9998 		if (sp->handle != index) {
9999 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
10000 			    sp->handle);
10001 		}
10002 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
10003 			EL(ha, "buffer was already in driver, index=%xh\n",
10004 			    index);
10005 		}
10006 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
10007 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
10008 			    index);
10009 		}
10010 		if (sp->flags & SRB_UB_ACQUIRED) {
10011 			EL(ha, "buffer was being used by driver, index=%xh\n",
10012 			    index);
10013 		}
10014 	}
10015 	QL_UB_UNLOCK(ha);
10016 
10017 	QL_PRINT_3(ha, "done\n");
10018 
10019 	return (rval);
10020 }
10021 
10022 /*
10023  * ql_timer
10024  *	One second timer function.
10025  *
10026  * Input:
10027  *	ql_hba.first = first link in adapter list.
10028  *
10029  * Context:
10030  *	Interrupt context, no mailbox commands allowed.
10031  */
10032 static void
ql_timer(void * arg)10033 ql_timer(void *arg)
10034 {
10035 	ql_link_t		*link;
10036 	uint64_t		set_flags;
10037 	ql_adapter_state_t	*ha;
10038 	static uint32_t		sec_cnt = 0;
10039 
10040 	QL_PRINT_6(NULL, "started\n");
10041 
10042 	/* Acquire global state lock. */
10043 	GLOBAL_TIMER_LOCK();
10044 	if (ql_timer_timeout_id == NULL) {
10045 		/* Release global state lock. */
10046 		GLOBAL_TIMER_UNLOCK();
10047 		return;
10048 	}
10049 
10050 	sec_cnt++;
10051 	for (link = ql_hba.first; link != NULL; link = link->next) {
10052 		ha = link->base_address;
10053 
10054 		/* Skip adapter if suspended or stalled. */
10055 		if (ha->flags & ADAPTER_SUSPENDED ||
10056 		    ha->task_daemon_flags & DRIVER_STALL ||
10057 		    !(ha->task_daemon_flags & FIRMWARE_UP)) {
10058 			continue;
10059 		}
10060 
10061 		QL_PM_LOCK(ha);
10062 		if (ha->power_level != PM_LEVEL_D0) {
10063 			QL_PM_UNLOCK(ha);
10064 			continue;
10065 		}
10066 		ha->pm_busy++;
10067 		QL_PM_UNLOCK(ha);
10068 
10069 		set_flags = 0;
10070 
10071 		/* All completion treads busy, wake up a helper thread. */
10072 		if (ha->comp_thds_awake == ha->comp_thds_active &&
10073 		    ha->comp_q.first != NULL) {
10074 			QL_PRINT_10(ha, "comp queue helper thrd started\n");
10075 			(void) timeout(ql_process_comp_queue, (void *)ha, 1);
10076 		}
10077 
10078 		/* Port retry timer handler. */
10079 		if (LOOP_READY(ha)) {
10080 			ADAPTER_STATE_LOCK(ha);
10081 			if (ha->port_retry_timer != 0) {
10082 				ha->port_retry_timer--;
10083 				if (ha->port_retry_timer == 0) {
10084 					set_flags |= PORT_RETRY_NEEDED;
10085 				}
10086 			}
10087 			ADAPTER_STATE_UNLOCK(ha);
10088 		}
10089 
10090 		/* Loop down timer handler. */
10091 		if (LOOP_RECONFIGURE(ha) == 0) {
10092 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
10093 				ha->loop_down_timer--;
10094 				/*
10095 				 * give the firmware loop down dump flag
10096 				 * a chance to work.
10097 				 */
10098 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
10099 					if (CFG_IST(ha,
10100 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
10101 						ADAPTER_STATE_LOCK(ha);
10102 						ha->flags |= FW_DUMP_NEEDED;
10103 						ADAPTER_STATE_UNLOCK(ha);
10104 					}
10105 					EL(ha, "loop_down_reset, "
10106 					    "isp_abort_needed\n");
10107 					set_flags |= ISP_ABORT_NEEDED;
10108 				}
10109 			}
10110 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
10111 				/* Command abort time handler. */
10112 				if (ha->loop_down_timer ==
10113 				    ha->loop_down_abort_time) {
10114 					ADAPTER_STATE_LOCK(ha);
10115 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
10116 					ADAPTER_STATE_UNLOCK(ha);
10117 					set_flags |= ABORT_QUEUES_NEEDED;
10118 					EL(ha, "loop_down_abort_time, "
10119 					    "abort_queues_needed\n");
10120 				}
10121 
10122 				/* Watchdog timer handler. */
10123 				if (ha->watchdog_timer == 0) {
10124 					ha->watchdog_timer = WATCHDOG_TIME;
10125 				} else if (LOOP_READY(ha)) {
10126 					ha->watchdog_timer--;
10127 					if (ha->watchdog_timer == 0) {
10128 						set_flags |= WATCHDOG_NEEDED;
10129 					}
10130 				}
10131 			}
10132 		}
10133 
10134 		/* Idle timer handler. */
10135 		if (!DRIVER_SUSPENDED(ha)) {
10136 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
10137 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
10138 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
10139 #endif
10140 				ha->idle_timer = 0;
10141 			}
10142 			if (ha->send_plogi_timer != 0) {
10143 				ha->send_plogi_timer--;
10144 				if (ha->send_plogi_timer == 0) {
10145 					set_flags |= SEND_PLOGI;
10146 				}
10147 			}
10148 		}
10149 
10150 		if (CFG_IST(ha, CFG_CTRL_82XX) && ha->flags & ONLINE &&
10151 		    !(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
10152 		    ABORT_ISP_ACTIVE)) &&
10153 		    !(sec_cnt % 2)) {
10154 			set_flags |= IDC_POLL_NEEDED;
10155 		}
10156 
10157 		if (ha->ledstate.BeaconState == BEACON_ON) {
10158 			set_flags |= LED_BLINK;
10159 		}
10160 
10161 		if (set_flags != 0) {
10162 			ql_awaken_task_daemon(ha, NULL, set_flags, 0);
10163 		}
10164 
10165 		/* Update the IO stats */
10166 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
10167 			ha->xioctl->IOInputMByteCnt +=
10168 			    (ha->xioctl->IOInputByteCnt / 0x100000);
10169 			ha->xioctl->IOInputByteCnt %= 0x100000;
10170 		}
10171 
10172 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
10173 			ha->xioctl->IOOutputMByteCnt +=
10174 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
10175 			ha->xioctl->IOOutputByteCnt %= 0x100000;
10176 		}
10177 
10178 		QL_PM_LOCK(ha);
10179 		if (ha->pm_busy) {
10180 			ha->pm_busy--;
10181 		}
10182 		QL_PM_UNLOCK(ha);
10183 	}
10184 
10185 	/* Restart timer, if not being stopped. */
10186 	if (ql_timer_timeout_id != NULL) {
10187 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
10188 	}
10189 
10190 	/* Release global state lock. */
10191 	GLOBAL_TIMER_UNLOCK();
10192 
10193 	QL_PRINT_6(ha, "done\n");
10194 }
10195 
10196 /*
10197  * ql_timeout_insert
10198  *	Function used to insert a command block onto the
10199  *	watchdog timer queue.
10200  *
10201  *	Note: Must insure that pkt_time is not zero
10202  *			before calling ql_timeout_insert.
10203  *
10204  * Input:
10205  *	ha:	adapter state pointer.
10206  *	tq:	target queue pointer.
10207  *	sp:	SRB pointer.
10208  *	DEVICE_QUEUE_LOCK must be already obtained.
10209  *
10210  * Context:
10211  *	Kernel context.
10212  */
10213 /* ARGSUSED */
10214 static void
ql_timeout_insert(ql_adapter_state_t * ha,ql_tgt_t * tq,ql_srb_t * sp)10215 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
10216 {
10217 	QL_PRINT_3(ha, "started\n");
10218 
10219 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
10220 		sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
10221 		/*
10222 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
10223 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
10224 		 * will expire in the next watchdog call, which could be in
10225 		 * 1 microsecond.
10226 		 *
10227 		 */
10228 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
10229 		    WATCHDOG_TIME;
10230 		/*
10231 		 * Added an additional 10 to account for the
10232 		 * firmware timer drift which can occur with
10233 		 * very long timeout values.
10234 		 */
10235 		sp->wdg_q_time += 10;
10236 
10237 		/*
10238 		 * Add 6 more to insure watchdog does not timeout at the same
10239 		 * time as ISP RISC code timeout.
10240 		 */
10241 		sp->wdg_q_time += 6;
10242 
10243 		/* Save initial time for resetting watchdog time. */
10244 		sp->init_wdg_q_time = sp->wdg_q_time;
10245 
10246 		/* Insert command onto watchdog queue. */
10247 		ql_add_link_b(&tq->wdg, &sp->wdg);
10248 
10249 		sp->flags |= SRB_WATCHDOG_ENABLED;
10250 	} else {
10251 		sp->isp_timeout = 0;
10252 		sp->wdg_q_time = 0;
10253 		sp->init_wdg_q_time = 0;
10254 	}
10255 
10256 	QL_PRINT_3(ha, "done\n");
10257 }
10258 
10259 /*
10260  * ql_watchdog
10261  *	Timeout handler that runs in interrupt context. The
10262  *	ql_adapter_state_t * argument is the parameter set up when the
10263  *	timeout was initialized (state structure pointer).
10264  *	Function used to update timeout values and if timeout
10265  *	has occurred command will be aborted.
10266  *
10267  * Input:
10268  *	ha:	adapter state pointer.
10269  *
10270  * Context:
10271  *	Kernel context.
10272  */
10273 static void
ql_watchdog(ql_adapter_state_t * ha)10274 ql_watchdog(ql_adapter_state_t *ha)
10275 {
10276 	ql_link_t		*link;
10277 	ql_tgt_t		*tq;
10278 	uint16_t		index;
10279 	ql_adapter_state_t	*vha;
10280 
10281 	QL_PRINT_6(ha, "started\n");
10282 
10283 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10284 		/* Loop through all targets. */
10285 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10286 			for (link = vha->dev[index].first; link != NULL;
10287 			    link = link->next) {
10288 				tq = link->base_address;
10289 
10290 				/* Try to acquire device queue lock. */
10291 				if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
10292 					break;
10293 				}
10294 
10295 				if (!(CFG_IST(ha,
10296 				    CFG_ENABLE_LINK_DOWN_REPORTING)) &&
10297 				    (tq->port_down_retry_count == 0)) {
10298 					/* Release device queue lock. */
10299 					DEVICE_QUEUE_UNLOCK(tq);
10300 					continue;
10301 				}
10302 				ql_wdg_tq_list(vha, tq);
10303 			}
10304 		}
10305 	}
10306 	ha->watchdog_timer = WATCHDOG_TIME;
10307 
10308 	QL_PRINT_6(ha, "done\n");
10309 }
10310 
10311 /*
10312  * ql_wdg_tq_list
10313  *	Timeout handler that runs in interrupt context. The
10314  *	ql_adapter_state_t * argument is the parameter set up when the
10315  *	timeout was initialized (state structure pointer).
10316  *	Function used to update timeout values and if timeout
10317  *	has occurred command will be aborted.
10318  *
10319  * Input:
10320  *	ha:	adapter state pointer.
10321  *	tq:	target queue pointer.
10322  *	DEVICE_QUEUE_LOCK must be already obtained.
10323  *
10324  * Output:
10325  *	Releases DEVICE_QUEUE_LOCK upon exit.
10326  *
10327  * Context:
10328  *	Kernel context.
10329  */
10330 static void
ql_wdg_tq_list(ql_adapter_state_t * ha,ql_tgt_t * tq)10331 ql_wdg_tq_list(ql_adapter_state_t *ha, ql_tgt_t *tq)
10332 {
10333 	ql_srb_t	*sp;
10334 	ql_link_t	*link, *next_cmd;
10335 	ql_lun_t	*lq;
10336 	boolean_t	q_sane, timeout = B_FALSE;
10337 
10338 	QL_PRINT_6(ha, "started\n");
10339 
10340 	/* Find out if this device is in a sane state */
10341 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
10342 	    TQF_QUEUE_SUSPENDED)) {
10343 		q_sane = B_FALSE;
10344 	} else {
10345 		q_sane = B_TRUE;
10346 	}
10347 	/* Loop through commands on watchdog queue. */
10348 	for (link = tq->wdg.first; link != NULL; link = next_cmd) {
10349 		next_cmd = link->next;
10350 		sp = link->base_address;
10351 		lq = sp->lun_queue;
10352 
10353 		/*
10354 		 * For SCSI commands, if everything
10355 		 * seems to * be going fine and this
10356 		 * packet is stuck
10357 		 * because of throttling at LUN or
10358 		 * target level then do not decrement
10359 		 * the sp->wdg_q_time
10360 		 */
10361 		if (ha->task_daemon_flags & STATE_ONLINE &&
10362 		    !(sp->flags & SRB_ISP_STARTED) &&
10363 		    q_sane == B_TRUE &&
10364 		    sp->flags & SRB_FCP_CMD_PKT &&
10365 		    lq->lun_outcnt >= ha->execution_throttle) {
10366 			continue;
10367 		}
10368 
10369 		if (sp->wdg_q_time != 0) {
10370 			sp->wdg_q_time--;
10371 
10372 			/* Timeout? */
10373 			if (sp->wdg_q_time != 0) {
10374 				continue;
10375 			}
10376 
10377 			sp->flags |= SRB_COMMAND_TIMEOUT;
10378 			timeout = B_TRUE;
10379 		}
10380 	}
10381 
10382 	/*
10383 	 * Loop through commands on watchdog queue and
10384 	 * abort timed out commands.
10385 	 */
10386 	if (timeout == B_TRUE) {
10387 		for (link = tq->wdg.first; link != NULL; link = next_cmd) {
10388 			sp = link->base_address;
10389 			next_cmd = link->next;
10390 
10391 			if (sp->flags & SRB_COMMAND_TIMEOUT) {
10392 				ql_remove_link(&tq->wdg, &sp->wdg);
10393 				sp->flags &= ~(SRB_WATCHDOG_ENABLED |
10394 				    SRB_COMMAND_TIMEOUT);
10395 				ql_cmd_timeout(ha, tq, sp);
10396 				next_cmd = tq->wdg.first;
10397 			}
10398 		}
10399 	}
10400 
10401 	/* Release device queue lock. */
10402 	DEVICE_QUEUE_UNLOCK(tq);
10403 
10404 	QL_PRINT_6(ha, "done\n");
10405 }
10406 
10407 /*
10408  * ql_cmd_timeout
10409  *	Command timeout handler.
10410  *
10411  * Input:
10412  *	ha:		adapter state pointer.
10413  *	tq:		target queue pointer.
10414  *	sp:		SRB pointer.
10415  *
10416  * Context:
10417  *	Kernel context.
10418  */
10419 static void
ql_cmd_timeout(ql_adapter_state_t * ha,ql_tgt_t * tq,ql_srb_t * sp)10420 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
10421 {
10422 	int	rval = 0;
10423 
10424 	QL_PRINT_3(ha, "started\n");
10425 
10426 	REQUEST_RING_LOCK(ha);
10427 	if (!(sp->flags & SRB_ISP_STARTED)) {
10428 		EL(ha, "command timed out in driver, sp=%ph spf=%xh\n",
10429 		    (void *)sp, sp->flags);
10430 
10431 		/* if it's on a queue */
10432 		if (sp->cmd.head) {
10433 			/*
10434 			 * The pending_cmds que needs to be
10435 			 * protected by the ring lock
10436 			 */
10437 			ql_remove_link(sp->cmd.head, &sp->cmd);
10438 		}
10439 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10440 
10441 		/* Release device queue lock. */
10442 		REQUEST_RING_UNLOCK(ha);
10443 		DEVICE_QUEUE_UNLOCK(tq);
10444 
10445 		/* Set timeout status */
10446 		sp->pkt->pkt_reason = CS_TIMEOUT;
10447 
10448 		/* Ensure no retry */
10449 		sp->flags &= ~SRB_RETRY;
10450 
10451 		/* Call done routine to handle completion. */
10452 		ql_done(&sp->cmd, B_FALSE);
10453 	} else if (CFG_IST(ha, CFG_CTRL_82XX)) {
10454 		REQUEST_RING_UNLOCK(ha);
10455 		DEVICE_QUEUE_UNLOCK(tq);
10456 
10457 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10458 		    "spf=%xh\n", (void *)sp,
10459 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10460 		    sp->handle & OSC_INDEX_MASK, sp->flags);
10461 
10462 		if (ha->pha->timeout_cnt++ > TIMEOUT_THRESHOLD ||
10463 		    (rval = ql_abort_io(ha, sp)) != QL_SUCCESS) {
10464 			sp->flags |= SRB_COMMAND_TIMEOUT;
10465 			TASK_DAEMON_LOCK(ha);
10466 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
10467 			TASK_DAEMON_UNLOCK(ha);
10468 			EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10469 			    "needed\n", rval, ha->pha->timeout_cnt);
10470 		}
10471 	} else {
10472 		REQUEST_RING_UNLOCK(ha);
10473 		DEVICE_QUEUE_UNLOCK(tq);
10474 
10475 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10476 		    "spf=%xh, isp_abort_needed\n", (void *)sp,
10477 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10478 		    sp->handle & OSC_INDEX_MASK, sp->flags);
10479 
10480 		INTR_LOCK(ha);
10481 		ha->pha->xioctl->ControllerErrorCount++;
10482 		INTR_UNLOCK(ha);
10483 
10484 		/* Set ISP needs to be reset */
10485 		sp->flags |= SRB_COMMAND_TIMEOUT;
10486 
10487 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10488 			ADAPTER_STATE_LOCK(ha);
10489 			ha->flags |= FW_DUMP_NEEDED;
10490 			ADAPTER_STATE_UNLOCK(ha);
10491 		}
10492 
10493 		TASK_DAEMON_LOCK(ha);
10494 		ha->task_daemon_flags |= ISP_ABORT_NEEDED;
10495 		TASK_DAEMON_UNLOCK(ha);
10496 	}
10497 	DEVICE_QUEUE_LOCK(tq);
10498 
10499 	QL_PRINT_3(ha, "done\n");
10500 }
10501 
10502 /*
10503  * ql_cmd_wait
10504  *	Stall driver until all outstanding commands are returned.
10505  *
10506  * Input:
10507  *	ha = adapter state pointer.
10508  *
10509  * Context:
10510  *	Kernel context.
10511  */
10512 void
ql_cmd_wait(ql_adapter_state_t * ha)10513 ql_cmd_wait(ql_adapter_state_t *ha)
10514 {
10515 	uint16_t		index;
10516 	ql_link_t		*link;
10517 	ql_tgt_t		*tq;
10518 	ql_adapter_state_t	*vha;
10519 
10520 	QL_PRINT_3(ha, "started\n");
10521 
10522 	/* Wait for all outstanding commands to be returned. */
10523 	(void) ql_wait_outstanding(ha);
10524 
10525 	/*
10526 	 * clear out internally queued commands
10527 	 */
10528 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10529 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10530 			for (link = vha->dev[index].first; link != NULL;
10531 			    link = link->next) {
10532 				tq = link->base_address;
10533 				if (tq &&
10534 				    (!(tq->prli_svc_param_word_3 &
10535 				    PRLI_W3_RETRY) ||
10536 				    ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
10537 					(void) ql_abort_device(vha, tq, 0);
10538 				}
10539 			}
10540 		}
10541 	}
10542 
10543 	QL_PRINT_3(ha, "done\n");
10544 }
10545 
10546 /*
10547  * ql_wait_outstanding
10548  *	Wait for all outstanding commands to complete.
10549  *
10550  * Input:
10551  *	ha = adapter state pointer.
10552  *
10553  * Returns:
10554  *	index - the index for ql_srb into outstanding_cmds.
10555  *
10556  * Context:
10557  *	Kernel context.
10558  */
10559 static uint16_t
ql_wait_outstanding(ql_adapter_state_t * ha)10560 ql_wait_outstanding(ql_adapter_state_t *ha)
10561 {
10562 	ql_srb_t	*sp;
10563 	uint16_t	index, count;
10564 
10565 	QL_PRINT_3(ha, "started\n");
10566 
10567 	count = ql_osc_wait_count;
10568 	for (index = 1; index < ha->pha->osc_max_cnt; index++) {
10569 		if (ha->pha->pending_cmds.first != NULL) {
10570 			ql_start_iocb(ha, NULL);
10571 			index = 1;
10572 		}
10573 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10574 		    sp != QL_ABORTED_SRB(ha) &&
10575 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10576 			if (count-- != 0) {
10577 				ql_delay(ha, 10000);
10578 				index = 0;
10579 			} else {
10580 				EL(ha, "still in OSC,sp=%ph,oci=%d,sph=%xh,"
10581 				    "spf=%xh\n", (void *) sp, index, sp->handle,
10582 				    sp->flags);
10583 				break;
10584 			}
10585 		}
10586 	}
10587 
10588 	QL_PRINT_3(ha, "done\n");
10589 
10590 	return (index);
10591 }
10592 
10593 /*
10594  * ql_restart_queues
10595  *	Restart device queues.
10596  *
10597  * Input:
10598  *	ha = adapter state pointer.
10599  *	DEVICE_QUEUE_LOCK must be released.
10600  *
10601  * Context:
10602  *	Interrupt or Kernel context, no mailbox commands allowed.
10603  */
10604 void
ql_restart_queues(ql_adapter_state_t * ha)10605 ql_restart_queues(ql_adapter_state_t *ha)
10606 {
10607 	ql_link_t		*link, *link2;
10608 	ql_tgt_t		*tq;
10609 	ql_lun_t		*lq;
10610 	uint16_t		index;
10611 	ql_adapter_state_t	*vha;
10612 
10613 	QL_PRINT_3(ha, "started\n");
10614 
10615 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10616 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10617 			for (link = vha->dev[index].first; link != NULL;
10618 			    link = link->next) {
10619 				tq = link->base_address;
10620 
10621 				/* Acquire device queue lock. */
10622 				DEVICE_QUEUE_LOCK(tq);
10623 
10624 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
10625 
10626 				for (link2 = tq->lun_queues.first;
10627 				    link2 != NULL; link2 = link2->next) {
10628 					lq = link2->base_address;
10629 
10630 					if (lq->cmd.first != NULL) {
10631 						ql_next(vha, lq);
10632 						DEVICE_QUEUE_LOCK(tq);
10633 					}
10634 				}
10635 
10636 				/* Release device queue lock. */
10637 				DEVICE_QUEUE_UNLOCK(tq);
10638 			}
10639 		}
10640 	}
10641 
10642 	QL_PRINT_3(ha, "done\n");
10643 }
10644 
10645 /*
10646  * ql_iidma
10647  *	Setup iiDMA parameters to firmware
10648  *
10649  * Input:
10650  *	ha = adapter state pointer.
10651  *	DEVICE_QUEUE_LOCK must be released.
10652  *
10653  * Context:
10654  *	Interrupt or Kernel context, no mailbox commands allowed.
10655  */
10656 static void
ql_iidma(ql_adapter_state_t * ha)10657 ql_iidma(ql_adapter_state_t *ha)
10658 {
10659 	ql_link_t	*link;
10660 	ql_tgt_t	*tq;
10661 	uint16_t	index;
10662 	char		buf[256];
10663 	uint32_t	data;
10664 
10665 	QL_PRINT_3(ha, "started\n");
10666 
10667 	if (!CFG_IST(ha, CFG_IIDMA_SUPPORT)) {
10668 		QL_PRINT_3(ha, "done\n");
10669 		return;
10670 	}
10671 
10672 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10673 		for (link = ha->dev[index].first; link != NULL;
10674 		    link = link->next) {
10675 			tq = link->base_address;
10676 
10677 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10678 				continue;
10679 			}
10680 
10681 			/* Acquire device queue lock. */
10682 			DEVICE_QUEUE_LOCK(tq);
10683 
10684 			tq->flags &= ~TQF_IIDMA_NEEDED;
10685 
10686 			/* Release device queue lock. */
10687 			DEVICE_QUEUE_UNLOCK(tq);
10688 
10689 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10690 			    (tq->d_id.b24 == FS_MANAGEMENT_SERVER) ||
10691 			    (tq->flags & TQF_INITIATOR_DEVICE) ||
10692 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10693 				continue;
10694 			}
10695 
10696 			/* Get the iiDMA persistent data */
10697 			(void) snprintf(buf, sizeof (buf),
10698 			    "iidma-rate-%02x%02x%02x%02x%02x"
10699 			    "%02x%02x%02x", tq->port_name[0],
10700 			    tq->port_name[1], tq->port_name[2],
10701 			    tq->port_name[3], tq->port_name[4],
10702 			    tq->port_name[5], tq->port_name[6],
10703 			    tq->port_name[7]);
10704 
10705 			if ((data = ql_get_prop(ha, buf)) ==
10706 			    0xffffffff) {
10707 				tq->iidma_rate = IIDMA_RATE_NDEF;
10708 			} else {
10709 				switch (data) {
10710 				case IIDMA_RATE_4GB:
10711 				case IIDMA_RATE_8GB:
10712 				case IIDMA_RATE_10GB:
10713 				case IIDMA_RATE_16GB:
10714 				case IIDMA_RATE_32GB:
10715 					tq->iidma_rate = data;
10716 					break;
10717 				default:
10718 					EL(ha, "invalid data for "
10719 					    "parameter: %s: %xh\n",
10720 					    buf, data);
10721 					tq->iidma_rate =
10722 					    IIDMA_RATE_NDEF;
10723 					break;
10724 				}
10725 			}
10726 
10727 			EL(ha, "d_id = %xh iidma_rate = %xh\n",
10728 			    tq->d_id.b24, tq->iidma_rate);
10729 
10730 			/* Set the firmware's iiDMA rate */
10731 			if (!CFG_IST(ha, CFG_FCOE_SUPPORT)) {
10732 				if (tq->iidma_rate <= IIDMA_RATE_MAX) {
10733 					data = ql_iidma_rate(ha, tq->loop_id,
10734 					    &tq->iidma_rate,
10735 					    EXT_IIDMA_MODE_SET);
10736 					if (data != QL_SUCCESS) {
10737 						EL(ha, "mbx failed: %xh\n",
10738 						    data);
10739 					}
10740 				}
10741 			}
10742 		}
10743 	}
10744 
10745 	QL_PRINT_3(ha, "done\n");
10746 }
10747 
10748 /*
10749  * ql_abort_queues
10750  *	Abort all commands on device queues.
10751  *
10752  * Input:
10753  *	ha = adapter state pointer.
10754  *
10755  * Context:
10756  *	Interrupt or Kernel context, no mailbox commands allowed.
10757  */
10758 void
ql_abort_queues(ql_adapter_state_t * ha)10759 ql_abort_queues(ql_adapter_state_t *ha)
10760 {
10761 	ql_link_t		*link;
10762 	ql_tgt_t		*tq;
10763 	ql_srb_t		*sp;
10764 	uint16_t		index;
10765 	ql_adapter_state_t	*vha;
10766 
10767 	QL_PRINT_10(ha, "started\n");
10768 
10769 	/* Return all commands in outstanding command list. */
10770 	INTR_LOCK(ha);
10771 
10772 	/* Place all commands in outstanding cmd list on device queue. */
10773 	for (index = 1; index < ha->osc_max_cnt; index++) {
10774 		if (ha->pending_cmds.first != NULL) {
10775 			INTR_UNLOCK(ha);
10776 			ql_start_iocb(ha, NULL);
10777 			/* Delay for system */
10778 			ql_delay(ha, 10000);
10779 			INTR_LOCK(ha);
10780 			index = 1;
10781 		}
10782 		sp = ha->outstanding_cmds[index];
10783 
10784 		if (sp && (sp == QL_ABORTED_SRB(ha) || sp->ha != ha)) {
10785 			continue;
10786 		}
10787 
10788 		/* skip devices capable of FCP2 retrys */
10789 		if (sp != NULL &&
10790 		    (sp->lun_queue == NULL ||
10791 		    (tq = sp->lun_queue->target_queue) == NULL ||
10792 		    !(tq->prli_svc_param_word_3 & PRLI_W3_RETRY) ||
10793 		    ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
10794 			ha->outstanding_cmds[index] = NULL;
10795 			sp->handle = 0;
10796 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10797 
10798 			INTR_UNLOCK(ha);
10799 
10800 			/* Set ending status. */
10801 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10802 			sp->flags |= SRB_ISP_COMPLETED;
10803 
10804 			/* Call done routine to handle completions. */
10805 			sp->cmd.next = NULL;
10806 			ql_done(&sp->cmd, B_FALSE);
10807 
10808 			INTR_LOCK(ha);
10809 		}
10810 	}
10811 	INTR_UNLOCK(ha);
10812 
10813 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10814 		QL_PRINT_10(vha, "abort instance\n");
10815 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10816 			for (link = vha->dev[index].first; link != NULL;
10817 			    link = link->next) {
10818 				tq = link->base_address;
10819 				/* skip devices capable of FCP2 retrys */
10820 				if (!(tq->prli_svc_param_word_3 &
10821 				    PRLI_W3_RETRY) ||
10822 				    ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
10823 					/*
10824 					 * Set port unavailable status and
10825 					 * return all commands on a devices
10826 					 * queues.
10827 					 */
10828 					ql_abort_device_queues(ha, tq);
10829 				}
10830 			}
10831 		}
10832 	}
10833 	QL_PRINT_3(ha, "done\n");
10834 }
10835 
10836 /*
10837  * ql_abort_device_queues
10838  *	Abort all commands on device queues.
10839  *
10840  * Input:
10841  *	ha = adapter state pointer.
10842  *
10843  * Context:
10844  *	Interrupt or Kernel context, no mailbox commands allowed.
10845  */
10846 static void
ql_abort_device_queues(ql_adapter_state_t * ha,ql_tgt_t * tq)10847 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10848 {
10849 	ql_link_t	*lun_link, *cmd_link;
10850 	ql_srb_t	*sp;
10851 	ql_lun_t	*lq;
10852 
10853 	QL_PRINT_10(ha, "started\n");
10854 
10855 	DEVICE_QUEUE_LOCK(tq);
10856 	ql_requeue_pending_cmds(ha, tq);
10857 
10858 	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10859 	    lun_link = lun_link->next) {
10860 		lq = lun_link->base_address;
10861 
10862 		cmd_link = lq->cmd.first;
10863 		while (cmd_link != NULL) {
10864 			sp = cmd_link->base_address;
10865 
10866 			/* Remove srb from device cmd queue. */
10867 			ql_remove_link(&lq->cmd, &sp->cmd);
10868 
10869 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10870 
10871 			DEVICE_QUEUE_UNLOCK(tq);
10872 
10873 			/* Set ending status. */
10874 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10875 
10876 			/* Call done routine to handle completion. */
10877 			ql_done(&sp->cmd, B_FALSE);
10878 
10879 			/* Delay for system */
10880 			ql_delay(ha, 10000);
10881 
10882 			DEVICE_QUEUE_LOCK(tq);
10883 			cmd_link = lq->cmd.first;
10884 		}
10885 	}
10886 	DEVICE_QUEUE_UNLOCK(tq);
10887 
10888 	QL_PRINT_10(ha, "done\n");
10889 }
10890 
10891 /*
10892  * ql_loop_resync
10893  *	Resync with fibre channel devices.
10894  *
10895  * Input:
10896  *	ha = adapter state pointer.
10897  *	DEVICE_QUEUE_LOCK must be released.
10898  *
10899  * Context:
10900  *	Kernel context.
10901  */
10902 static void
ql_loop_resync(ql_adapter_state_t * ha)10903 ql_loop_resync(ql_adapter_state_t *ha)
10904 {
10905 	int rval;
10906 
10907 	QL_PRINT_3(ha, "started\n");
10908 
10909 	if (ha->flags & IP_INITIALIZED) {
10910 		(void) ql_shutdown_ip(ha);
10911 	}
10912 
10913 	rval = ql_fw_ready(ha, 10);
10914 
10915 	TASK_DAEMON_LOCK(ha);
10916 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10917 	TASK_DAEMON_UNLOCK(ha);
10918 
10919 	/* Set loop online, if it really is. */
10920 	if (rval == QL_SUCCESS) {
10921 		ql_loop_online(ha);
10922 		QL_PRINT_3(ha, "done\n");
10923 	} else {
10924 		EL(ha, "failed, rval = %xh\n", rval);
10925 	}
10926 }
10927 
10928 /*
10929  * ql_loop_online
10930  *	Set loop online status if it really is online.
10931  *
10932  * Input:
10933  *	ha = adapter state pointer.
10934  *	DEVICE_QUEUE_LOCK must be released.
10935  *
10936  * Context:
10937  *	Kernel context.
10938  */
10939 void
ql_loop_online(ql_adapter_state_t * ha)10940 ql_loop_online(ql_adapter_state_t *ha)
10941 {
10942 	ql_adapter_state_t	*vha;
10943 
10944 	QL_PRINT_3(ha, "started\n");
10945 
10946 	/* Inform the FC Transport that the hardware is online. */
10947 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10948 		if (!(vha->task_daemon_flags &
10949 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10950 			/* Restart IP if it was shutdown. */
10951 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10952 			    !(vha->flags & IP_INITIALIZED)) {
10953 				(void) ql_initialize_ip(vha);
10954 				ql_isp_rcvbuf(vha);
10955 			}
10956 
10957 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10958 			    FC_PORT_STATE_MASK(vha->state) !=
10959 			    FC_STATE_ONLINE) {
10960 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10961 				if (vha->topology & QL_LOOP_CONNECTION) {
10962 					vha->state |= FC_STATE_LOOP;
10963 				} else {
10964 					vha->state |= FC_STATE_ONLINE;
10965 				}
10966 				TASK_DAEMON_LOCK(ha);
10967 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10968 				TASK_DAEMON_UNLOCK(ha);
10969 			}
10970 		}
10971 	}
10972 
10973 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10974 
10975 	/* Restart device queues that may have been stopped. */
10976 	ql_restart_queues(ha);
10977 
10978 	QL_PRINT_3(ha, "done\n");
10979 }
10980 
10981 /*
10982  * ql_fca_handle_to_state
10983  *	Verifies handle to be correct.
10984  *
10985  * Input:
10986  *	fca_handle = pointer to state structure.
10987  *
10988  * Returns:
10989  *	NULL = failure
10990  *
10991  * Context:
10992  *	Kernel context.
10993  */
10994 static ql_adapter_state_t *
ql_fca_handle_to_state(opaque_t fca_handle)10995 ql_fca_handle_to_state(opaque_t fca_handle)
10996 {
10997 #ifdef	QL_DEBUG_ROUTINES
10998 	ql_link_t		*link;
10999 	ql_adapter_state_t	*ha = NULL;
11000 	ql_adapter_state_t	*vha = NULL;
11001 
11002 	for (link = ql_hba.first; link != NULL; link = link->next) {
11003 		ha = link->base_address;
11004 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
11005 			if ((opaque_t)vha == fca_handle) {
11006 				ha = vha;
11007 				break;
11008 			}
11009 		}
11010 		if ((opaque_t)ha == fca_handle) {
11011 			break;
11012 		} else {
11013 			ha = NULL;
11014 		}
11015 	}
11016 
11017 	if (ha == NULL) {
11018 		/*EMPTY*/
11019 		QL_PRINT_2(ha, "failed\n");
11020 	}
11021 
11022 #endif /* QL_DEBUG_ROUTINES */
11023 
11024 	return ((ql_adapter_state_t *)fca_handle);
11025 }
11026 
11027 /*
11028  * ql_d_id_to_queue
11029  *	Locate device queue that matches destination ID.
11030  *
11031  * Input:
11032  *	ha = adapter state pointer.
11033  *	d_id = destination ID
11034  *
11035  * Returns:
11036  *	NULL = failure
11037  *
11038  * Context:
11039  *	Interrupt or Kernel context, no mailbox commands allowed.
11040  */
11041 ql_tgt_t *
ql_d_id_to_queue(ql_adapter_state_t * ha,port_id_t d_id)11042 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
11043 {
11044 	uint16_t	index;
11045 	ql_tgt_t	*tq;
11046 	ql_link_t	*link;
11047 
11048 	/* Get head queue index. */
11049 	index = ql_alpa_to_index[d_id.b.al_pa];
11050 
11051 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
11052 		tq = link->base_address;
11053 		if (tq->d_id.b24 == d_id.b24 &&
11054 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
11055 			return (tq);
11056 		}
11057 	}
11058 
11059 	return (NULL);
11060 }
11061 
11062 /*
11063  * ql_loop_id_to_queue
11064  *	Locate device queue that matches loop ID.
11065  *
11066  * Input:
11067  *	ha:		adapter state pointer.
11068  *	loop_id:	destination ID
11069  *
11070  * Returns:
11071  *	NULL = failure
11072  *
11073  * Context:
11074  *	Interrupt or Kernel context, no mailbox commands allowed.
11075  */
11076 ql_tgt_t *
ql_loop_id_to_queue(ql_adapter_state_t * ha,uint16_t loop_id)11077 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
11078 {
11079 	uint16_t	index;
11080 	ql_tgt_t	*tq;
11081 	ql_link_t	*link;
11082 
11083 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
11084 		for (link = ha->dev[index].first; link != NULL;
11085 		    link = link->next) {
11086 			tq = link->base_address;
11087 			if (tq->loop_id == loop_id) {
11088 				return (tq);
11089 			}
11090 		}
11091 	}
11092 
11093 	return (NULL);
11094 }
11095 
11096 /*
11097  * ql_kstat_update
11098  *	Updates kernel statistics.
11099  *
11100  * Input:
11101  *	ksp - driver kernel statistics structure pointer.
11102  *	rw - function to perform
11103  *
11104  * Returns:
11105  *	0 or EACCES
11106  *
11107  * Context:
11108  *	Kernel context.
11109  */
11110 /* ARGSUSED */
11111 static int
ql_kstat_update(kstat_t * ksp,int rw)11112 ql_kstat_update(kstat_t *ksp, int rw)
11113 {
11114 	int	rval;
11115 
11116 	QL_PRINT_3(ksp->ks_private, "started\n");
11117 
11118 	if (rw == KSTAT_WRITE) {
11119 		rval = EACCES;
11120 	} else {
11121 		rval = 0;
11122 	}
11123 
11124 	if (rval != 0) {
11125 		/*EMPTY*/
11126 		QL_PRINT_2(ksp->ks_private, "failed, rval = %xh\n", rval);
11127 	} else {
11128 		/*EMPTY*/
11129 		QL_PRINT_3(ksp->ks_private, "done\n");
11130 	}
11131 	return (rval);
11132 }
11133 
11134 /*
11135  * ql_load_flash
11136  *	Loads flash.
11137  *
11138  * Input:
11139  *	ha:	adapter state pointer.
11140  *	dp:	data pointer.
11141  *	size:	data length.
11142  *
11143  * Returns:
11144  *	ql local function return status code.
11145  *
11146  * Context:
11147  *	Kernel context.
11148  */
11149 int
ql_load_flash(ql_adapter_state_t * ha,uint8_t * dp,uint32_t size)11150 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
11151 {
11152 	uint32_t	cnt;
11153 	int		rval;
11154 	uint32_t	size_to_offset;
11155 	uint32_t	size_to_compare;
11156 	int		erase_all;
11157 
11158 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
11159 		return (ql_24xx_load_flash(ha, dp, size, 0));
11160 	}
11161 
11162 	QL_PRINT_3(ha, "started\n");
11163 
11164 	size_to_compare = 0x20000;
11165 	size_to_offset = 0;
11166 	erase_all = 0;
11167 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11168 		if (size == 0x80000) {
11169 			/* Request to flash the entire chip. */
11170 			size_to_compare = 0x80000;
11171 			erase_all = 1;
11172 		} else {
11173 			size_to_compare = 0x40000;
11174 			if (ql_flash_sbus_fpga) {
11175 				size_to_offset = 0x40000;
11176 			}
11177 		}
11178 	}
11179 	if (size > size_to_compare) {
11180 		rval = QL_FUNCTION_PARAMETER_ERROR;
11181 		EL(ha, "failed=%xh\n", rval);
11182 		return (rval);
11183 	}
11184 
11185 	/* Enable Flash Read/Write. */
11186 	ql_flash_enable(ha);
11187 
11188 	/* Erase flash prior to write. */
11189 	rval = ql_erase_flash(ha, erase_all);
11190 
11191 	if (rval == QL_SUCCESS) {
11192 		/* Write data to flash. */
11193 		for (cnt = 0; cnt < size; cnt++) {
11194 			/* Allow other system activity. */
11195 			if (cnt % 0x1000 == 0) {
11196 				ql_delay(ha, 10000);
11197 			}
11198 			rval = ql_program_flash_address(ha,
11199 			    cnt + size_to_offset, *dp++);
11200 			if (rval != QL_SUCCESS) {
11201 				break;
11202 			}
11203 		}
11204 	}
11205 
11206 	ql_flash_disable(ha);
11207 
11208 	if (rval != QL_SUCCESS) {
11209 		EL(ha, "failed=%xh\n", rval);
11210 	} else {
11211 		/*EMPTY*/
11212 		QL_PRINT_3(ha, "done\n");
11213 	}
11214 	return (rval);
11215 }
11216 
11217 /*
11218  * ql_program_flash_address
11219  *	Program flash address.
11220  *
11221  * Input:
11222  *	ha = adapter state pointer.
11223  *	addr = flash byte address.
11224  *	data = data to be written to flash.
11225  *
11226  * Returns:
11227  *	ql local function return status code.
11228  *
11229  * Context:
11230  *	Kernel context.
11231  */
11232 static int
ql_program_flash_address(ql_adapter_state_t * ha,uint32_t addr,uint8_t data)11233 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11234 {
11235 	int rval;
11236 
11237 	QL_PRINT_3(ha, "started\n");
11238 
11239 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11240 		ql_write_flash_byte(ha, 0x5555, 0xa0);
11241 		ql_write_flash_byte(ha, addr, data);
11242 	} else {
11243 		/* Write Program Command Sequence */
11244 		ql_write_flash_byte(ha, 0x5555, 0xaa);
11245 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
11246 		ql_write_flash_byte(ha, 0x5555, 0xa0);
11247 		ql_write_flash_byte(ha, addr, data);
11248 	}
11249 
11250 	/* Wait for write to complete. */
11251 	rval = ql_poll_flash(ha, addr, data);
11252 
11253 	if (rval != QL_SUCCESS) {
11254 		EL(ha, "failed=%xh\n", rval);
11255 	} else {
11256 		/*EMPTY*/
11257 		QL_PRINT_3(ha, "done\n");
11258 	}
11259 	return (rval);
11260 }
11261 
11262 /*
11263  * ql_erase_flash
11264  *	Erases entire flash.
11265  *
11266  * Input:
11267  *	ha = adapter state pointer.
11268  *
11269  * Returns:
11270  *	ql local function return status code.
11271  *
11272  * Context:
11273  *	Kernel context.
11274  */
11275 int
ql_erase_flash(ql_adapter_state_t * ha,int erase_all)11276 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
11277 {
11278 	int		rval;
11279 	uint32_t	erase_delay = 2000000;
11280 	uint32_t	sStartAddr;
11281 	uint32_t	ssize;
11282 	uint32_t	cnt;
11283 	uint8_t		*bfp;
11284 	uint8_t		*tmp;
11285 
11286 	QL_PRINT_3(ha, "started\n");
11287 
11288 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
11289 		if (ql_flash_sbus_fpga == 1) {
11290 			ssize = QL_SBUS_FCODE_SIZE;
11291 			sStartAddr = QL_FCODE_OFFSET;
11292 		} else {
11293 			ssize = QL_FPGA_SIZE;
11294 			sStartAddr = QL_FPGA_OFFSET;
11295 		}
11296 
11297 		erase_delay = 20000000;
11298 
11299 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
11300 
11301 		/* Save the section of flash we're not updating to buffer */
11302 		tmp = bfp;
11303 		for (cnt = sStartAddr; cnt < ssize + sStartAddr; cnt++) {
11304 			/* Allow other system activity. */
11305 			if (cnt % 0x1000 == 0) {
11306 				ql_delay(ha, 10000);
11307 			}
11308 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
11309 		}
11310 
11311 		/* Chip Erase Command Sequence */
11312 		ql_write_flash_byte(ha, 0x5555, 0xaa);
11313 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
11314 		ql_write_flash_byte(ha, 0x5555, 0x80);
11315 		ql_write_flash_byte(ha, 0x5555, 0xaa);
11316 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
11317 		ql_write_flash_byte(ha, 0x5555, 0x10);
11318 
11319 		ql_delay(ha, erase_delay);
11320 
11321 		/* Wait for erase to complete. */
11322 		rval = ql_poll_flash(ha, 0, 0x80);
11323 
11324 		if (rval == QL_SUCCESS) {
11325 			/* Restore the section we saved off */
11326 			tmp = bfp;
11327 			for (cnt = sStartAddr; cnt < ssize + sStartAddr;
11328 			    cnt++) {
11329 				/* Allow other system activity. */
11330 				if (cnt % 0x1000 == 0) {
11331 					ql_delay(ha, 10000);
11332 				}
11333 				rval = ql_program_flash_address(ha, cnt,
11334 				    *tmp++);
11335 				if (rval != QL_SUCCESS) {
11336 					break;
11337 				}
11338 			}
11339 		}
11340 		kmem_free(bfp, ssize);
11341 	} else {
11342 		/* Chip Erase Command Sequence */
11343 		ql_write_flash_byte(ha, 0x5555, 0xaa);
11344 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
11345 		ql_write_flash_byte(ha, 0x5555, 0x80);
11346 		ql_write_flash_byte(ha, 0x5555, 0xaa);
11347 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
11348 		ql_write_flash_byte(ha, 0x5555, 0x10);
11349 
11350 		ql_delay(ha, erase_delay);
11351 
11352 		/* Wait for erase to complete. */
11353 		rval = ql_poll_flash(ha, 0, 0x80);
11354 	}
11355 
11356 	if (rval != QL_SUCCESS) {
11357 		EL(ha, "failed=%xh\n", rval);
11358 	} else {
11359 		/*EMPTY*/
11360 		QL_PRINT_3(ha, "done\n");
11361 	}
11362 	return (rval);
11363 }
11364 
11365 /*
11366  * ql_poll_flash
11367  *	Polls flash for completion.
11368  *
11369  * Input:
11370  *	ha = adapter state pointer.
11371  *	addr = flash byte address.
11372  *	data = data to be polled.
11373  *
11374  * Returns:
11375  *	ql local function return status code.
11376  *
11377  * Context:
11378  *	Kernel context.
11379  */
11380 int
ql_poll_flash(ql_adapter_state_t * ha,uint32_t addr,uint8_t poll_data)11381 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
11382 {
11383 	uint8_t		flash_data;
11384 	uint32_t	cnt;
11385 	int		rval = QL_FUNCTION_FAILED;
11386 
11387 	QL_PRINT_3(ha, "started\n");
11388 
11389 	poll_data = (uint8_t)(poll_data & BIT_7);
11390 
11391 	/* Wait for 30 seconds for command to finish. */
11392 	for (cnt = 30000000; cnt; cnt--) {
11393 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11394 
11395 		if ((flash_data & BIT_7) == poll_data) {
11396 			rval = QL_SUCCESS;
11397 			break;
11398 		}
11399 		if (flash_data & BIT_5 && cnt > 2) {
11400 			cnt = 2;
11401 		}
11402 		drv_usecwait(1);
11403 	}
11404 
11405 	if (rval != QL_SUCCESS) {
11406 		EL(ha, "failed=%xh\n", rval);
11407 	} else {
11408 		/*EMPTY*/
11409 		QL_PRINT_3(ha, "done\n");
11410 	}
11411 	return (rval);
11412 }
11413 
11414 /*
11415  * ql_flash_enable
11416  *	Setup flash for reading/writing.
11417  *
11418  * Input:
11419  *	ha = adapter state pointer.
11420  *
11421  * Context:
11422  *	Kernel context.
11423  */
11424 void
ql_flash_enable(ql_adapter_state_t * ha)11425 ql_flash_enable(ql_adapter_state_t *ha)
11426 {
11427 	uint16_t	data;
11428 
11429 	QL_PRINT_3(ha, "started\n");
11430 
11431 	/* Enable Flash Read/Write. */
11432 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11433 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11434 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11435 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11436 		ddi_put16(ha->sbus_fpga_dev_handle,
11437 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11438 		/* Read reset command sequence */
11439 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
11440 		ql_write_flash_byte(ha, 0x555, 0x55);
11441 		ql_write_flash_byte(ha, 0xaaa, 0x20);
11442 		ql_write_flash_byte(ha, 0x555, 0xf0);
11443 	} else {
11444 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11445 		    ISP_FLASH_ENABLE);
11446 		WRT16_IO_REG(ha, ctrl_status, data);
11447 
11448 		/* Read/Reset Command Sequence */
11449 		ql_write_flash_byte(ha, 0x5555, 0xaa);
11450 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
11451 		ql_write_flash_byte(ha, 0x5555, 0xf0);
11452 	}
11453 	(void) ql_read_flash_byte(ha, 0);
11454 
11455 	QL_PRINT_3(ha, "done\n");
11456 }
11457 
11458 /*
11459  * ql_flash_disable
11460  *	Disable flash and allow RISC to run.
11461  *
11462  * Input:
11463  *	ha = adapter state pointer.
11464  *
11465  * Context:
11466  *	Kernel context.
11467  */
11468 void
ql_flash_disable(ql_adapter_state_t * ha)11469 ql_flash_disable(ql_adapter_state_t *ha)
11470 {
11471 	uint16_t	data;
11472 
11473 	QL_PRINT_3(ha, "started\n");
11474 
11475 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11476 		/*
11477 		 * Lock the flash back up.
11478 		 */
11479 		ql_write_flash_byte(ha, 0x555, 0x90);
11480 		ql_write_flash_byte(ha, 0x555, 0x0);
11481 
11482 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11483 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11484 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11485 		ddi_put16(ha->sbus_fpga_dev_handle,
11486 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11487 	} else {
11488 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11489 		    ~ISP_FLASH_ENABLE);
11490 		WRT16_IO_REG(ha, ctrl_status, data);
11491 	}
11492 
11493 	QL_PRINT_3(ha, "done\n");
11494 }
11495 
11496 /*
11497  * ql_write_flash_byte
11498  *	Write byte to flash.
11499  *
11500  * Input:
11501  *	ha = adapter state pointer.
11502  *	addr = flash byte address.
11503  *	data = data to be written.
11504  *
11505  * Context:
11506  *	Kernel context.
11507  */
11508 void
ql_write_flash_byte(ql_adapter_state_t * ha,uint32_t addr,uint8_t data)11509 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11510 {
11511 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11512 		ddi_put16(ha->sbus_fpga_dev_handle,
11513 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11514 		    LSW(addr));
11515 		ddi_put16(ha->sbus_fpga_dev_handle,
11516 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11517 		    MSW(addr));
11518 		ddi_put16(ha->sbus_fpga_dev_handle,
11519 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11520 		    (uint16_t)data);
11521 	} else {
11522 		uint16_t bank_select;
11523 
11524 		/* Setup bit 16 of flash address. */
11525 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11526 
11527 		if (ha->device_id == 0x2322 || ha->device_id == 0x6322) {
11528 			bank_select = (uint16_t)(bank_select & ~0xf0);
11529 			bank_select = (uint16_t)(bank_select |
11530 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11531 			WRT16_IO_REG(ha, ctrl_status, bank_select);
11532 		} else {
11533 			if (addr & BIT_16 && !(bank_select &
11534 			    ISP_FLASH_64K_BANK)) {
11535 				bank_select = (uint16_t)(bank_select |
11536 				    ISP_FLASH_64K_BANK);
11537 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11538 			} else if (!(addr & BIT_16) && bank_select &
11539 			    ISP_FLASH_64K_BANK) {
11540 				bank_select = (uint16_t)(bank_select &
11541 				    ~ISP_FLASH_64K_BANK);
11542 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11543 			}
11544 		}
11545 
11546 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11547 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11548 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11549 		} else {
11550 			WRT16_IOMAP_REG(ha, flash_address, addr);
11551 			WRT16_IOMAP_REG(ha, flash_data, data);
11552 		}
11553 	}
11554 }
11555 
11556 /*
11557  * ql_read_flash_byte
11558  *	Reads byte from flash, but must read a word from chip.
11559  *
11560  * Input:
11561  *	ha = adapter state pointer.
11562  *	addr = flash byte address.
11563  *
11564  * Returns:
11565  *	byte from flash.
11566  *
11567  * Context:
11568  *	Kernel context.
11569  */
11570 uint8_t
ql_read_flash_byte(ql_adapter_state_t * ha,uint32_t addr)11571 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11572 {
11573 	uint8_t	data;
11574 
11575 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
11576 		ddi_put16(ha->sbus_fpga_dev_handle,
11577 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11578 		    LSW(addr));
11579 		ddi_put16(ha->sbus_fpga_dev_handle,
11580 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11581 		    MSW(addr));
11582 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11583 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11584 	} else {
11585 		uint16_t	bank_select;
11586 
11587 		/* Setup bit 16 of flash address. */
11588 		bank_select = RD16_IO_REG(ha, ctrl_status);
11589 		if (ha->device_id == 0x2322 || ha->device_id == 0x6322) {
11590 			bank_select = (uint16_t)(bank_select & ~0xf0);
11591 			bank_select = (uint16_t)(bank_select |
11592 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11593 			WRT16_IO_REG(ha, ctrl_status, bank_select);
11594 		} else {
11595 			if (addr & BIT_16 &&
11596 			    !(bank_select & ISP_FLASH_64K_BANK)) {
11597 				bank_select = (uint16_t)(bank_select |
11598 				    ISP_FLASH_64K_BANK);
11599 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11600 			} else if (!(addr & BIT_16) &&
11601 			    bank_select & ISP_FLASH_64K_BANK) {
11602 				bank_select = (uint16_t)(bank_select &
11603 				    ~ISP_FLASH_64K_BANK);
11604 				WRT16_IO_REG(ha, ctrl_status, bank_select);
11605 			}
11606 		}
11607 
11608 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11609 			WRT16_IO_REG(ha, flash_address, addr);
11610 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
11611 		} else {
11612 			WRT16_IOMAP_REG(ha, flash_address, addr);
11613 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11614 		}
11615 	}
11616 
11617 	return (data);
11618 }
11619 
11620 /*
11621  * ql_24xx_flash_id
11622  *	Get flash IDs.
11623  *
11624  * Input:
11625  *	ha:		adapter state pointer.
11626  *
11627  * Returns:
11628  *	ql local function return status code.
11629  *
11630  * Context:
11631  *	Kernel context.
11632  */
11633 int
ql_24xx_flash_id(ql_adapter_state_t * vha)11634 ql_24xx_flash_id(ql_adapter_state_t *vha)
11635 {
11636 	int			rval;
11637 	uint32_t		fdata = 0;
11638 	ql_adapter_state_t	*ha = vha->pha;
11639 	ql_xioctl_t		*xp = ha->xioctl;
11640 
11641 	QL_PRINT_3(ha, "started\n");
11642 
11643 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11644 	if (CFG_IST(ha, CFG_CTRL_24XX)) {
11645 		if (rval != QL_SUCCESS || fdata == 0) {
11646 			fdata = 0;
11647 			rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x39F,
11648 			    &fdata);
11649 		}
11650 	} else {
11651 		fdata = 0;
11652 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11653 		    (CFG_IST(ha, CFG_CTRL_25XX) ? 0x49F : 0x39F), &fdata);
11654 	}
11655 
11656 	if (rval != QL_SUCCESS) {
11657 		EL(ha, "24xx read_flash failed=%xh\n", rval);
11658 	} else if (fdata != 0) {
11659 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11660 		xp->fdesc.flash_id = MSB(LSW(fdata));
11661 		xp->fdesc.flash_len = LSB(MSW(fdata));
11662 	} else {
11663 		xp->fdesc.flash_manuf = ATMEL_FLASH;
11664 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11665 		xp->fdesc.flash_len = 0;
11666 	}
11667 
11668 	QL_PRINT_3(ha, "done\n");
11669 
11670 	return (rval);
11671 }
11672 
11673 /*
11674  * ql_24xx_load_flash
11675  *	Loads flash.
11676  *
11677  * Input:
11678  *	ha = adapter state pointer.
11679  *	dp = data pointer.
11680  *	size = data length in bytes.
11681  *	faddr = 32bit word flash byte address.
11682  *
11683  * Returns:
11684  *	ql local function return status code.
11685  *
11686  * Context:
11687  *	Kernel context.
11688  */
11689 int
ql_24xx_load_flash(ql_adapter_state_t * vha,uint8_t * dp,uint32_t size,uint32_t faddr)11690 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11691     uint32_t faddr)
11692 {
11693 	int			rval;
11694 	uint32_t		cnt, rest_addr, fdata, wc;
11695 	dma_mem_t		dmabuf = {0};
11696 	ql_adapter_state_t	*ha = vha->pha;
11697 	ql_xioctl_t		*xp = ha->xioctl;
11698 
11699 	QL_PRINT_3(ha, "started, faddr=%xh, size=%xh\n",
11700 	    ha->instance, faddr, size);
11701 
11702 	/* start address must be 32 bit word aligned */
11703 	if ((faddr & 0x3) != 0) {
11704 		EL(ha, "incorrect buffer size alignment\n");
11705 		return (QL_FUNCTION_PARAMETER_ERROR);
11706 	}
11707 
11708 	/* Allocate DMA buffer */
11709 	if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT)) {
11710 		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11711 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11712 		    QL_SUCCESS) {
11713 			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11714 			return (rval);
11715 		}
11716 	}
11717 
11718 	/* Enable flash write */
11719 	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11720 		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11721 		ql_free_phys(ha, &dmabuf);
11722 		return (rval);
11723 	}
11724 
11725 	/* setup mask of address range within a sector */
11726 	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11727 
11728 	faddr = faddr >> 2;	/* flash gets 32 bit words */
11729 
11730 	/*
11731 	 * Write data to flash.
11732 	 */
11733 	cnt = 0;
11734 	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11735 
11736 	while (cnt < size) {
11737 		/* Beginning of a sector? */
11738 		if ((faddr & rest_addr) == 0) {
11739 			if (CFG_IST(ha, CFG_CTRL_82XX)) {
11740 				fdata = ha->flash_data_addr | faddr;
11741 				rval = ql_8021_rom_erase(ha, fdata);
11742 				if (rval != QL_SUCCESS) {
11743 					EL(ha, "8021 erase sector status="
11744 					    "%xh, start=%xh, end=%xh"
11745 					    "\n", rval, fdata,
11746 					    fdata + rest_addr);
11747 					break;
11748 				}
11749 			} else if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
11750 				fdata = ha->flash_data_addr | faddr;
11751 				rval = ql_flash_access(ha,
11752 				    FAC_ERASE_SECTOR, fdata, fdata +
11753 				    rest_addr, 0);
11754 				if (rval != QL_SUCCESS) {
11755 					EL(ha, "erase sector status="
11756 					    "%xh, start=%xh, end=%xh"
11757 					    "\n", rval, fdata,
11758 					    fdata + rest_addr);
11759 					break;
11760 				}
11761 			} else {
11762 				fdata = (faddr & ~rest_addr) << 2;
11763 				fdata = (fdata & 0xff00) |
11764 				    (fdata << 16 & 0xff0000) |
11765 				    (fdata >> 16 & 0xff);
11766 
11767 				if (rest_addr == 0x1fff) {
11768 					/* 32kb sector block erase */
11769 					rval = ql_24xx_write_flash(ha,
11770 					    FLASH_CONF_ADDR | 0x0352,
11771 					    fdata);
11772 				} else {
11773 					/* 64kb sector block erase */
11774 					rval = ql_24xx_write_flash(ha,
11775 					    FLASH_CONF_ADDR | 0x03d8,
11776 					    fdata);
11777 				}
11778 				if (rval != QL_SUCCESS) {
11779 					EL(ha, "Unable to flash sector"
11780 					    ": address=%xh\n", faddr);
11781 					break;
11782 				}
11783 			}
11784 		}
11785 
11786 		/* Write data */
11787 		if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT) &&
11788 		    ((faddr & 0x3f) == 0)) {
11789 			/*
11790 			 * Limit write up to sector boundary.
11791 			 */
11792 			wc = ((~faddr & (rest_addr>>1)) + 1);
11793 
11794 			if (size - cnt < wc) {
11795 				wc = size - cnt;
11796 			}
11797 
11798 			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11799 			    (uint8_t *)dmabuf.bp, wc<<2,
11800 			    DDI_DEV_AUTOINCR);
11801 
11802 			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11803 			    faddr, dmabuf.cookie.dmac_laddress, wc);
11804 			if (rval != QL_SUCCESS) {
11805 				EL(ha, "unable to dma to flash "
11806 				    "address=%xh\n", faddr << 2);
11807 				break;
11808 			}
11809 
11810 			cnt += wc;
11811 			faddr += wc;
11812 			dp += wc << 2;
11813 		} else {
11814 			fdata = *dp++;
11815 			fdata |= *dp++ << 8;
11816 			fdata |= *dp++ << 16;
11817 			fdata |= *dp++ << 24;
11818 			rval = ql_24xx_write_flash(ha,
11819 			    ha->flash_data_addr | faddr, fdata);
11820 			if (rval != QL_SUCCESS) {
11821 				EL(ha, "Unable to program flash "
11822 				    "address=%xh data=%xh\n", faddr,
11823 				    *dp);
11824 				break;
11825 			}
11826 			cnt++;
11827 			faddr++;
11828 
11829 			/* Allow other system activity. */
11830 			if (cnt % 0x1000 == 0) {
11831 				ql_delay(ha, 10000);
11832 			}
11833 		}
11834 	}
11835 
11836 	ql_24xx_protect_flash(ha);
11837 
11838 	if (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT)) {
11839 		ql_free_phys(ha, &dmabuf);
11840 	}
11841 
11842 	if (rval != QL_SUCCESS) {
11843 		EL(ha, "failed=%xh\n", rval);
11844 	} else {
11845 		/*EMPTY*/
11846 		QL_PRINT_3(ha, "done\n");
11847 	}
11848 	return (rval);
11849 }
11850 
11851 /*
11852  * ql_24xx_read_flash
11853  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11854  *
11855  * Input:
11856  *	ha:	adapter state pointer.
11857  *	faddr:	NVRAM/FLASH address.
11858  *	bp:	data pointer.
11859  *
11860  * Returns:
11861  *	ql local function return status code.
11862  *
11863  * Context:
11864  *	Kernel context.
11865  */
11866 int
ql_24xx_read_flash(ql_adapter_state_t * vha,uint32_t faddr,uint32_t * bp)11867 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11868 {
11869 	uint32_t		timer;
11870 	int			rval = QL_SUCCESS;
11871 	ql_adapter_state_t	*ha = vha->pha;
11872 
11873 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
11874 		if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11875 			EL(ha, "8021 access error\n");
11876 		}
11877 		return (rval);
11878 	}
11879 
11880 	/* Clear access error flag */
11881 	WRT32_IO_REG(ha, ctrl_status,
11882 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11883 
11884 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11885 
11886 	/* Wait for READ cycle to complete. */
11887 	for (timer = 300000; timer; timer--) {
11888 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11889 			break;
11890 		}
11891 		drv_usecwait(10);
11892 	}
11893 
11894 	if (timer == 0) {
11895 		EL(ha, "failed, timeout\n");
11896 		rval = QL_FUNCTION_TIMEOUT;
11897 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11898 		EL(ha, "failed, access error\n");
11899 		rval = QL_FUNCTION_FAILED;
11900 	}
11901 
11902 	*bp = RD32_IO_REG(ha, flash_data);
11903 
11904 	return (rval);
11905 }
11906 
11907 /*
11908  * ql_24xx_write_flash
11909  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11910  *
11911  * Input:
11912  *	ha:	adapter state pointer.
11913  *	addr:	NVRAM/FLASH address.
11914  *	value:	data.
11915  *
11916  * Returns:
11917  *	ql local function return status code.
11918  *
11919  * Context:
11920  *	Kernel context.
11921  */
11922 int
ql_24xx_write_flash(ql_adapter_state_t * vha,uint32_t addr,uint32_t data)11923 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11924 {
11925 	uint32_t		timer, fdata;
11926 	int			rval = QL_SUCCESS;
11927 	ql_adapter_state_t	*ha = vha->pha;
11928 
11929 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
11930 		if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11931 			EL(ha, "8021 access error\n");
11932 		}
11933 		return (rval);
11934 	}
11935 	/* Clear access error flag */
11936 	WRT32_IO_REG(ha, ctrl_status,
11937 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11938 
11939 	WRT32_IO_REG(ha, flash_data, data);
11940 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11941 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11942 
11943 	/* Wait for Write cycle to complete. */
11944 	for (timer = 3000000; timer; timer--) {
11945 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11946 			/* Check flash write in progress. */
11947 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11948 				(void) ql_24xx_read_flash(ha,
11949 				    FLASH_CONF_ADDR | 0x105, &fdata);
11950 				if (!(fdata & BIT_0)) {
11951 					break;
11952 				}
11953 			} else {
11954 				break;
11955 			}
11956 		}
11957 		drv_usecwait(10);
11958 	}
11959 	if (timer == 0) {
11960 		EL(ha, "failed, timeout\n");
11961 		rval = QL_FUNCTION_TIMEOUT;
11962 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11963 		EL(ha, "access error\n");
11964 		rval = QL_FUNCTION_FAILED;
11965 	}
11966 
11967 	return (rval);
11968 }
11969 /*
11970  * ql_24xx_unprotect_flash
11971  *	Enable writes
11972  *
11973  * Input:
11974  *	ha:	adapter state pointer.
11975  *
11976  * Returns:
11977  *	ql local function return status code.
11978  *
11979  * Context:
11980  *	Kernel context.
11981  */
11982 int
ql_24xx_unprotect_flash(ql_adapter_state_t * vha)11983 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11984 {
11985 	int			rval;
11986 	uint32_t		fdata, timer;
11987 	ql_adapter_state_t	*ha = vha->pha;
11988 	ql_xioctl_t		*xp = ha->xioctl;
11989 
11990 	QL_PRINT_3(ha, "started\n");
11991 
11992 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
11993 		(void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11994 		rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11995 		if (rval != QL_SUCCESS) {
11996 			EL(ha, "8021 access error\n");
11997 		}
11998 		return (rval);
11999 	}
12000 	if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12001 		if (ha->task_daemon_flags & FIRMWARE_UP) {
12002 			for (timer = 3000; timer; timer--) {
12003 				if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
12004 					EL(ha, "ISP_ABORT_NEEDED done\n");
12005 					return (QL_ABORTED);
12006 				}
12007 				rval = ql_flash_access(ha, FAC_SEMA_LOCK,
12008 				    0, 0, NULL);
12009 				if (rval == QL_SUCCESS ||
12010 				    rval == QL_FUNCTION_TIMEOUT) {
12011 					EL(ha, "lock status=%xh\n", rval);
12012 					break;
12013 				}
12014 				delay(1);
12015 			}
12016 
12017 			if (rval == QL_SUCCESS &&
12018 			    (rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0,
12019 			    0, NULL)) != QL_SUCCESS) {
12020 				EL(ha, "WRT_ENABLE status=%xh\n", rval);
12021 				(void) ql_flash_access(ha, FAC_SEMA_UNLOCK,
12022 				    0, 0, NULL);
12023 			}
12024 		} else {
12025 			rval = QL_SUCCESS;
12026 		}
12027 		QL_PRINT_3(ha, "CFG_FLASH_ACC_SUPPORT done\n");
12028 		return (rval);
12029 	} else {
12030 		/* Enable flash write. */
12031 		WRT32_IO_REG(ha, ctrl_status,
12032 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
12033 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
12034 	}
12035 
12036 	/* Sector/Block Protection Register Lock (SST, ST, ATMEL). */
12037 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12038 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12039 
12040 	/*
12041 	 * Remove block write protection (SST and ST)
12042 	 * Global unprotect sectors (ATMEL).
12043 	 */
12044 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12045 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12046 
12047 	if (xp->fdesc.unprotect_sector_cmd != 0) {
12048 		for (fdata = 0; fdata < 0x10; fdata++) {
12049 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
12050 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
12051 		}
12052 
12053 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12054 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
12055 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12056 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
12057 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12058 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
12059 	}
12060 
12061 	QL_PRINT_3(ha, "done\n");
12062 
12063 	return (QL_SUCCESS);
12064 }
12065 
12066 /*
12067  * ql_24xx_protect_flash
12068  *	Disable writes
12069  *
12070  * Input:
12071  *	ha:	adapter state pointer.
12072  *
12073  * Context:
12074  *	Kernel context.
12075  */
12076 void
ql_24xx_protect_flash(ql_adapter_state_t * vha)12077 ql_24xx_protect_flash(ql_adapter_state_t *vha)
12078 {
12079 	int			rval;
12080 	uint32_t		fdata, timer;
12081 	ql_adapter_state_t	*ha = vha->pha;
12082 	ql_xioctl_t		*xp = ha->xioctl;
12083 
12084 	QL_PRINT_3(ha, "started\n");
12085 
12086 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
12087 		(void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
12088 		rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
12089 		if (rval != QL_SUCCESS) {
12090 			EL(ha, "8021 access error\n");
12091 		}
12092 		return;
12093 	}
12094 	if (CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12095 		if (ha->task_daemon_flags & FIRMWARE_UP) {
12096 			for (timer = 3000; timer; timer--) {
12097 				if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
12098 					EL(ha, "ISP_ABORT_NEEDED done\n");
12099 					return;
12100 				}
12101 				rval = ql_flash_access(ha, FAC_SEMA_LOCK,
12102 				    0, 0, NULL);
12103 				if (rval == QL_SUCCESS ||
12104 				    rval == QL_FUNCTION_TIMEOUT) {
12105 					if (rval != QL_SUCCESS) {
12106 						EL(ha, "lock status=%xh\n",
12107 						    rval);
12108 					}
12109 					break;
12110 				}
12111 				delay(1);
12112 			}
12113 
12114 			if (rval == QL_SUCCESS &&
12115 			    (rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0,
12116 			    0, NULL)) != QL_SUCCESS) {
12117 				EL(ha, "protect status=%xh\n", rval);
12118 				(void) ql_flash_access(ha, FAC_SEMA_UNLOCK, 0,
12119 				    0, NULL);
12120 			}
12121 			QL_PRINT_3(ha, "CFG_FLASH_ACC_SUPPORT done\n");
12122 			return;
12123 		}
12124 	} else {
12125 		/* Enable flash write. */
12126 		WRT32_IO_REG(ha, ctrl_status,
12127 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
12128 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
12129 	}
12130 
12131 	/*
12132 	 * Protect sectors.
12133 	 * Set block write protection (SST and ST) and
12134 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
12135 	 */
12136 	if (xp->fdesc.protect_sector_cmd != 0) {
12137 		for (fdata = 0; fdata < 0x10; fdata++) {
12138 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
12139 			    0x300 | xp->fdesc.protect_sector_cmd, fdata);
12140 		}
12141 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12142 		    xp->fdesc.protect_sector_cmd, 0x00400f);
12143 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12144 		    xp->fdesc.protect_sector_cmd, 0x00600f);
12145 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
12146 		    xp->fdesc.protect_sector_cmd, 0x00800f);
12147 	}
12148 
12149 	/* Remove Sector Protection Registers Locked (SPRL) bit. */
12150 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12151 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
12152 
12153 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
12154 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_disable_bits);
12155 
12156 	/* Disable flash write. */
12157 	if (!CFG_IST(ha, CFG_FLASH_ACC_SUPPORT)) {
12158 		WRT32_IO_REG(ha, ctrl_status,
12159 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
12160 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
12161 	}
12162 
12163 	QL_PRINT_3(ha, "done\n");
12164 }
12165 
12166 /*
12167  * ql_dump_firmware
12168  *	Save RISC code state information.
12169  *
12170  * Input:
12171  *	ha = adapter state pointer.
12172  *
12173  * Returns:
12174  *	QL local function return status code.
12175  *
12176  * Context:
12177  *	Kernel context.
12178  */
12179 int
ql_dump_firmware(ql_adapter_state_t * vha)12180 ql_dump_firmware(ql_adapter_state_t *vha)
12181 {
12182 	int			rval;
12183 	clock_t			timer = drv_usectohz(30000000);
12184 	ql_adapter_state_t	*ha = vha->pha;
12185 
12186 	QL_PRINT_3(ha, "started\n");
12187 
12188 	QL_DUMP_LOCK(ha);
12189 
12190 	if (ha->ql_dump_state & QL_DUMPING ||
12191 	    (ha->ql_dump_state & QL_DUMP_VALID &&
12192 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
12193 		QL_PRINT_3(ha, "done\n");
12194 		QL_DUMP_UNLOCK(ha);
12195 		return (QL_SUCCESS);
12196 	}
12197 
12198 	QL_DUMP_UNLOCK(ha);
12199 
12200 	(void) ql_stall_driver(ha, 0);
12201 
12202 	/* Dump firmware. */
12203 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
12204 		rval = ql_binary_fw_dump(ha, FALSE);
12205 	} else {
12206 		rval = ql_binary_fw_dump(ha, TRUE);
12207 	}
12208 
12209 	/* Do abort to force restart. */
12210 	ql_restart_driver(ha);
12211 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
12212 	EL(ha, "restarting, isp_abort_needed\n");
12213 
12214 	/* Acquire task daemon lock. */
12215 	TASK_DAEMON_LOCK(ha);
12216 
12217 	/* Wait for suspension to end. */
12218 	while (DRIVER_SUSPENDED(ha)) {
12219 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
12220 
12221 		/* 30 seconds from now */
12222 		if (cv_reltimedwait(&ha->cv_dr_suspended,
12223 		    &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
12224 			/*
12225 			 * The timeout time 'timer' was
12226 			 * reached without the condition
12227 			 * being signaled.
12228 			 */
12229 			break;
12230 		}
12231 	}
12232 
12233 	/* Release task daemon lock. */
12234 	TASK_DAEMON_UNLOCK(ha);
12235 
12236 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
12237 		/*EMPTY*/
12238 		QL_PRINT_3(ha, "done\n");
12239 	} else {
12240 		EL(ha, "failed, rval = %xh\n", rval);
12241 	}
12242 	return (rval);
12243 }
12244 
12245 /*
12246  * ql_binary_fw_dump
12247  *	Dumps binary data from firmware.
12248  *
12249  * Input:
12250  *	ha = adapter state pointer.
12251  *	lock_needed = mailbox lock needed.
12252  *
12253  * Returns:
12254  *	ql local function return status code.
12255  *
12256  * Context:
12257  *	Interrupt or Kernel context, no mailbox commands allowed.
12258  */
12259 int
ql_binary_fw_dump(ql_adapter_state_t * vha,int lock_needed)12260 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
12261 {
12262 	uint32_t		cnt, index;
12263 	clock_t			timer;
12264 	int			rval = QL_SUCCESS;
12265 	ql_adapter_state_t	*ha = vha->pha;
12266 
12267 	QL_PRINT_3(ha, "started\n");
12268 
12269 	ADAPTER_STATE_LOCK(ha);
12270 	ha->flags &= ~FW_DUMP_NEEDED;
12271 	ADAPTER_STATE_UNLOCK(ha);
12272 
12273 	if (CFG_IST(ha, CFG_CTRL_82XX) && ha->md_capture_size == 0) {
12274 		EL(ha, "8021 not supported\n");
12275 		return (QL_NOT_SUPPORTED);
12276 	}
12277 
12278 	QL_DUMP_LOCK(ha);
12279 
12280 	if (ha->ql_dump_state & QL_DUMPING ||
12281 	    (ha->ql_dump_state & QL_DUMP_VALID &&
12282 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
12283 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
12284 		QL_DUMP_UNLOCK(ha);
12285 		return (QL_DATA_EXISTS);
12286 	}
12287 
12288 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
12289 	ha->ql_dump_state |= QL_DUMPING;
12290 
12291 	QL_DUMP_UNLOCK(ha);
12292 
12293 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
12294 		/* Insert Time Stamp */
12295 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
12296 		    FTO_INSERT_TIME_STAMP, NULL);
12297 		if (rval != QL_SUCCESS) {
12298 			EL(ha, "f/w extended trace insert"
12299 			    "time stamp failed: %xh\n", rval);
12300 		}
12301 	}
12302 
12303 	if (lock_needed == TRUE) {
12304 		/* Acquire mailbox register lock. */
12305 		MBX_REGISTER_LOCK(ha);
12306 		timer = ((MAILBOX_TOV + 6) * drv_usectohz(1000000));
12307 
12308 		/* Check for mailbox available, if not wait for signal. */
12309 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
12310 			ha->mailbox_flags = (uint8_t)
12311 			    (ha->mailbox_flags | MBX_WANT_FLG);
12312 
12313 			/* 30 seconds from now */
12314 			if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
12315 			    timer, TR_CLOCK_TICK) == -1) {
12316 				/*
12317 				 * The timeout time 'timer' was
12318 				 * reached without the condition
12319 				 * being signaled.
12320 				 */
12321 
12322 				/* Release mailbox register lock. */
12323 				MBX_REGISTER_UNLOCK(ha);
12324 
12325 				EL(ha, "failed, rval = %xh\n",
12326 				    QL_FUNCTION_TIMEOUT);
12327 				return (QL_FUNCTION_TIMEOUT);
12328 			}
12329 		}
12330 
12331 		/* Set busy flag. */
12332 		ha->mailbox_flags = (uint8_t)
12333 		    (ha->mailbox_flags | MBX_BUSY_FLG);
12334 
12335 		/* Release mailbox register lock. */
12336 		MBX_REGISTER_UNLOCK(ha);
12337 	}
12338 
12339 	/* Free previous dump buffer. */
12340 	if (ha->ql_dump_ptr != NULL) {
12341 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
12342 		ha->ql_dump_ptr = NULL;
12343 	}
12344 
12345 	if (CFG_IST(ha, CFG_CTRL_24XX)) {
12346 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
12347 		    ha->fw_ext_memory_size);
12348 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12349 		cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12350 		    ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12351 		index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12352 
12353 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
12354 		    cnt + index + ha->fw_ext_memory_size +
12355 		    (ha->rsp_queues_cnt * 16));
12356 
12357 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12358 		cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12359 		    ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12360 		index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12361 
12362 		ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
12363 		    cnt + index + ha->fw_ext_memory_size +
12364 		    (ha->rsp_queues_cnt * 16));
12365 
12366 	} else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12367 		cnt = ha->rsp_queues_cnt > 1 ? ha->req_q[0]->req_ring.size +
12368 		    ha->req_q[1]->req_ring.size : ha->req_q[0]->req_ring.size;
12369 		index = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
12370 
12371 		ha->ql_dump_size = (uint32_t)(sizeof (ql_83xx_fw_dump_t) +
12372 		    cnt + index + ha->fw_ext_memory_size +
12373 		    (ha->rsp_queues_cnt * 16));
12374 	} else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12375 		ha->ql_dump_size = ha->md_capture_size;
12376 	} else {
12377 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
12378 	}
12379 
12380 	if (CFG_IST(ha, CFG_CTRL_27XX)) {
12381 		rval = ql_27xx_binary_fw_dump(ha);
12382 	} else {
12383 		if ((ha->ql_dump_ptr =
12384 		    kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) == NULL) {
12385 			rval = QL_MEMORY_ALLOC_FAILED;
12386 		} else {
12387 			if (CFG_IST(ha, CFG_CTRL_2363)) {
12388 				rval = ql_2300_binary_fw_dump(ha,
12389 				    ha->ql_dump_ptr);
12390 			} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12391 				rval = ql_81xx_binary_fw_dump(ha,
12392 				    ha->ql_dump_ptr);
12393 			} else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12394 				rval = ql_83xx_binary_fw_dump(ha,
12395 				    ha->ql_dump_ptr);
12396 			} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12397 				rval = ql_25xx_binary_fw_dump(ha,
12398 				    ha->ql_dump_ptr);
12399 			} else if (CFG_IST(ha, CFG_CTRL_24XX)) {
12400 				rval = ql_24xx_binary_fw_dump(ha,
12401 				    ha->ql_dump_ptr);
12402 			} else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12403 				(void) ql_8021_reset_fw(ha);
12404 				rval = QL_SUCCESS;
12405 			} else {
12406 				rval = ql_2200_binary_fw_dump(ha,
12407 				    ha->ql_dump_ptr);
12408 			}
12409 		}
12410 	}
12411 
12412 	/* Reset ISP chip. */
12413 	ql_reset_chip(ha);
12414 
12415 	QL_DUMP_LOCK(ha);
12416 
12417 	if (rval != QL_SUCCESS) {
12418 		if (ha->ql_dump_ptr != NULL) {
12419 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
12420 			ha->ql_dump_ptr = NULL;
12421 		}
12422 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
12423 		    QL_DUMP_UPLOADED);
12424 		EL(ha, "failed, rval = %xh\n", rval);
12425 	} else {
12426 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
12427 		ha->ql_dump_state |= QL_DUMP_VALID;
12428 		EL(ha, "done\n");
12429 	}
12430 
12431 	QL_DUMP_UNLOCK(ha);
12432 
12433 	return (rval);
12434 }
12435 
12436 /*
12437  * ql_ascii_fw_dump
12438  *	Converts firmware binary dump to ascii.
12439  *
12440  * Input:
12441  *	ha = adapter state pointer.
12442  *	bptr = buffer pointer.
12443  *
12444  * Returns:
12445  *	Amount of data buffer used.
12446  *
12447  * Context:
12448  *	Kernel context.
12449  */
12450 size_t
ql_ascii_fw_dump(ql_adapter_state_t * vha,caddr_t bufp)12451 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
12452 {
12453 	uint32_t		cnt;
12454 	caddr_t			bp;
12455 	int			mbox_cnt;
12456 	ql_adapter_state_t	*ha = vha->pha;
12457 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
12458 
12459 	if (CFG_IST(ha, CFG_CTRL_24XX)) {
12460 		return (ql_24xx_ascii_fw_dump(ha, bufp));
12461 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
12462 		return (ql_25xx_ascii_fw_dump(ha, bufp));
12463 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
12464 		return (ql_81xx_ascii_fw_dump(ha, bufp));
12465 	} else if (CFG_IST(ha, CFG_CTRL_82XX)) {
12466 		return (ql_8021_ascii_fw_dump(ha, bufp));
12467 	} else if (CFG_IST(ha, CFG_CTRL_83XX)) {
12468 		return (ql_83xx_ascii_fw_dump(ha, bufp));
12469 	} else if (CFG_IST(ha, CFG_CTRL_27XX)) {
12470 		return (ql_27xx_ascii_fw_dump(ha, bufp));
12471 	}
12472 
12473 	QL_PRINT_3(ha, "started\n");
12474 
12475 	if (CFG_IST(ha, CFG_CTRL_23XX)) {
12476 		(void) sprintf(bufp, "\nISP 2300IP ");
12477 	} else if (CFG_IST(ha, CFG_CTRL_63XX)) {
12478 		(void) sprintf(bufp, "\nISP 2322/6322FLX ");
12479 	} else {
12480 		(void) sprintf(bufp, "\nISP 2200IP ");
12481 	}
12482 
12483 	bp = bufp + strlen(bufp);
12484 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12485 	    ha->fw_major_version, ha->fw_minor_version,
12486 	    ha->fw_subminor_version);
12487 
12488 	(void) strcat(bufp, "\nPBIU Registers:");
12489 	bp = bufp + strlen(bufp);
12490 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12491 		if (cnt % 8 == 0) {
12492 			*bp++ = '\n';
12493 		}
12494 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
12495 		bp = bp + 6;
12496 	}
12497 
12498 	if (CFG_IST(ha, CFG_CTRL_2363)) {
12499 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12500 		    "registers:");
12501 		bp = bufp + strlen(bufp);
12502 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12503 			if (cnt % 8 == 0) {
12504 				*bp++ = '\n';
12505 			}
12506 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
12507 			bp = bp + 6;
12508 		}
12509 	}
12510 
12511 	(void) strcat(bp, "\n\nMailbox Registers:");
12512 	bp = bufp + strlen(bufp);
12513 	mbox_cnt = CFG_IST(ha, CFG_CTRL_2363) ? 16 : 8;
12514 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
12515 		if (cnt % 8 == 0) {
12516 			*bp++ = '\n';
12517 		}
12518 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
12519 		bp = bp + 6;
12520 	}
12521 
12522 	if (CFG_IST(ha, CFG_CTRL_2363)) {
12523 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12524 		bp = bufp + strlen(bufp);
12525 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12526 			if (cnt % 8 == 0) {
12527 				*bp++ = '\n';
12528 			}
12529 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
12530 			bp = bp + 6;
12531 		}
12532 	}
12533 
12534 	(void) strcat(bp, "\n\nDMA Registers:");
12535 	bp = bufp + strlen(bufp);
12536 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12537 		if (cnt % 8 == 0) {
12538 			*bp++ = '\n';
12539 		}
12540 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
12541 		bp = bp + 6;
12542 	}
12543 
12544 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
12545 	bp = bufp + strlen(bufp);
12546 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12547 		if (cnt % 8 == 0) {
12548 			*bp++ = '\n';
12549 		}
12550 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
12551 		bp = bp + 6;
12552 	}
12553 
12554 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
12555 	bp = bufp + strlen(bufp);
12556 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12557 		if (cnt % 8 == 0) {
12558 			*bp++ = '\n';
12559 		}
12560 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
12561 		bp = bp + 6;
12562 	}
12563 
12564 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
12565 	bp = bufp + strlen(bufp);
12566 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12567 		if (cnt % 8 == 0) {
12568 			*bp++ = '\n';
12569 		}
12570 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
12571 		bp = bp + 6;
12572 	}
12573 
12574 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
12575 	bp = bufp + strlen(bufp);
12576 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12577 		if (cnt % 8 == 0) {
12578 			*bp++ = '\n';
12579 		}
12580 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
12581 		bp = bp + 6;
12582 	}
12583 
12584 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
12585 	bp = bufp + strlen(bufp);
12586 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12587 		if (cnt % 8 == 0) {
12588 			*bp++ = '\n';
12589 		}
12590 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
12591 		bp = bp + 6;
12592 	}
12593 
12594 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
12595 	bp = bufp + strlen(bufp);
12596 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12597 		if (cnt % 8 == 0) {
12598 			*bp++ = '\n';
12599 		}
12600 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
12601 		bp = bp + 6;
12602 	}
12603 
12604 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
12605 	bp = bufp + strlen(bufp);
12606 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12607 		if (cnt % 8 == 0) {
12608 			*bp++ = '\n';
12609 		}
12610 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
12611 		bp = bp + 6;
12612 	}
12613 
12614 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
12615 	bp = bufp + strlen(bufp);
12616 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12617 		if (cnt % 8 == 0) {
12618 			*bp++ = '\n';
12619 		}
12620 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
12621 		bp = bp + 6;
12622 	}
12623 
12624 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
12625 	bp = bufp + strlen(bufp);
12626 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12627 		if (cnt % 8 == 0) {
12628 			*bp++ = '\n';
12629 		}
12630 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
12631 		bp = bp + 6;
12632 	}
12633 
12634 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12635 	bp = bufp + strlen(bufp);
12636 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12637 		if (cnt == 16 && !CFG_IST(ha, CFG_CTRL_2363)) {
12638 			break;
12639 		}
12640 		if (cnt % 8 == 0) {
12641 			*bp++ = '\n';
12642 		}
12643 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
12644 		bp = bp + 6;
12645 	}
12646 
12647 	(void) strcat(bp, "\n\nFPM B0 Registers:");
12648 	bp = bufp + strlen(bufp);
12649 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12650 		if (cnt % 8 == 0) {
12651 			*bp++ = '\n';
12652 		}
12653 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
12654 		bp = bp + 6;
12655 	}
12656 
12657 	(void) strcat(bp, "\n\nFPM B1 Registers:");
12658 	bp = bufp + strlen(bufp);
12659 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12660 		if (cnt % 8 == 0) {
12661 			*bp++ = '\n';
12662 		}
12663 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
12664 		bp = bp + 6;
12665 	}
12666 
12667 	if (CFG_IST(ha, CFG_CTRL_2363)) {
12668 		(void) strcat(bp, "\n\nCode RAM Dump:");
12669 		bp = bufp + strlen(bufp);
12670 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12671 			if (cnt % 8 == 0) {
12672 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12673 				bp = bp + 8;
12674 			}
12675 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12676 			bp = bp + 6;
12677 		}
12678 
12679 		(void) strcat(bp, "\n\nStack RAM Dump:");
12680 		bp = bufp + strlen(bufp);
12681 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12682 			if (cnt % 8 == 0) {
12683 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12684 				bp = bp + 8;
12685 			}
12686 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
12687 			bp = bp + 6;
12688 		}
12689 
12690 		(void) strcat(bp, "\n\nData RAM Dump:");
12691 		bp = bufp + strlen(bufp);
12692 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12693 			if (cnt % 8 == 0) {
12694 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12695 				bp = bp + 8;
12696 			}
12697 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
12698 			bp = bp + 6;
12699 		}
12700 	} else {
12701 		(void) strcat(bp, "\n\nRISC SRAM:");
12702 		bp = bufp + strlen(bufp);
12703 		for (cnt = 0; cnt < 0xf000; cnt++) {
12704 			if (cnt % 8 == 0) {
12705 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12706 				bp = bp + 7;
12707 			}
12708 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
12709 			bp = bp + 6;
12710 		}
12711 	}
12712 
12713 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12714 	bp += strlen(bp);
12715 
12716 	(void) sprintf(bp, "\n\nRequest Queue");
12717 	bp += strlen(bp);
12718 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12719 		if (cnt % 8 == 0) {
12720 			(void) sprintf(bp, "\n%08x: ", cnt);
12721 			bp += strlen(bp);
12722 		}
12723 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12724 		bp += strlen(bp);
12725 	}
12726 
12727 	(void) sprintf(bp, "\n\nResponse Queue");
12728 	bp += strlen(bp);
12729 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12730 		if (cnt % 8 == 0) {
12731 			(void) sprintf(bp, "\n%08x: ", cnt);
12732 			bp += strlen(bp);
12733 		}
12734 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12735 		bp += strlen(bp);
12736 	}
12737 
12738 	(void) sprintf(bp, "\n");
12739 
12740 	QL_PRINT_10(ha, "done, size=0x%x\n", strlen(bufp));
12741 
12742 	return (strlen(bufp));
12743 }
12744 
12745 /*
12746  * ql_24xx_ascii_fw_dump
12747  *	Converts ISP24xx firmware binary dump to ascii.
12748  *
12749  * Input:
12750  *	ha = adapter state pointer.
12751  *	bptr = buffer pointer.
12752  *
12753  * Returns:
12754  *	Amount of data buffer used.
12755  *
12756  * Context:
12757  *	Kernel context.
12758  */
12759 static size_t
ql_24xx_ascii_fw_dump(ql_adapter_state_t * ha,caddr_t bufp)12760 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12761 {
12762 	uint32_t		cnt;
12763 	caddr_t			bp = bufp;
12764 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12765 
12766 	QL_PRINT_3(ha, "started\n");
12767 
12768 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12769 	    ha->fw_major_version, ha->fw_minor_version,
12770 	    ha->fw_subminor_version, ha->fw_attributes);
12771 	bp += strlen(bp);
12772 
12773 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12774 
12775 	(void) strcat(bp, "\nHost Interface Registers");
12776 	bp += strlen(bp);
12777 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12778 		if (cnt % 8 == 0) {
12779 			(void) sprintf(bp++, "\n");
12780 		}
12781 
12782 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12783 		bp += 9;
12784 	}
12785 
12786 	(void) sprintf(bp, "\n\nMailbox Registers");
12787 	bp += strlen(bp);
12788 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12789 		if (cnt % 16 == 0) {
12790 			(void) sprintf(bp++, "\n");
12791 		}
12792 
12793 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12794 		bp += 5;
12795 	}
12796 
12797 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12798 	bp += strlen(bp);
12799 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12800 		if (cnt % 8 == 0) {
12801 			(void) sprintf(bp++, "\n");
12802 		}
12803 
12804 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12805 		bp += 9;
12806 	}
12807 
12808 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12809 	bp += strlen(bp);
12810 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12811 		if (cnt % 8 == 0) {
12812 			(void) sprintf(bp++, "\n");
12813 		}
12814 
12815 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12816 		bp += 9;
12817 	}
12818 
12819 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12820 	bp += strlen(bp);
12821 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12822 		if (cnt % 8 == 0) {
12823 			(void) sprintf(bp++, "\n");
12824 		}
12825 
12826 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12827 		bp += 9;
12828 	}
12829 
12830 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12831 	bp += strlen(bp);
12832 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12833 		if (cnt % 8 == 0) {
12834 			(void) sprintf(bp++, "\n");
12835 		}
12836 
12837 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12838 		bp += 9;
12839 	}
12840 
12841 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12842 	bp += strlen(bp);
12843 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12844 		if (cnt % 8 == 0) {
12845 			(void) sprintf(bp++, "\n");
12846 		}
12847 
12848 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12849 		bp += 9;
12850 	}
12851 
12852 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12853 	bp += strlen(bp);
12854 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12855 		if (cnt % 8 == 0) {
12856 			(void) sprintf(bp++, "\n");
12857 		}
12858 
12859 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12860 		bp += 9;
12861 	}
12862 
12863 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12864 	bp += strlen(bp);
12865 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12866 		if (cnt % 8 == 0) {
12867 			(void) sprintf(bp++, "\n");
12868 		}
12869 
12870 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12871 		bp += 9;
12872 	}
12873 
12874 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12875 	bp += strlen(bp);
12876 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12877 		if (cnt % 8 == 0) {
12878 			(void) sprintf(bp++, "\n");
12879 		}
12880 
12881 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12882 		bp += 9;
12883 	}
12884 
12885 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12886 	bp += strlen(bp);
12887 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12888 		if (cnt % 8 == 0) {
12889 			(void) sprintf(bp++, "\n");
12890 		}
12891 
12892 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12893 		bp += 9;
12894 	}
12895 
12896 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12897 	bp += strlen(bp);
12898 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12899 		if (cnt % 8 == 0) {
12900 			(void) sprintf(bp++, "\n");
12901 		}
12902 
12903 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12904 		bp += 9;
12905 	}
12906 
12907 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12908 	bp += strlen(bp);
12909 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12910 		if (cnt % 8 == 0) {
12911 			(void) sprintf(bp++, "\n");
12912 		}
12913 
12914 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12915 		bp += 9;
12916 	}
12917 
12918 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12919 	bp += strlen(bp);
12920 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12921 		if (cnt % 8 == 0) {
12922 			(void) sprintf(bp++, "\n");
12923 		}
12924 
12925 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12926 		bp += 9;
12927 	}
12928 
12929 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12930 	bp += strlen(bp);
12931 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12932 		if (cnt % 8 == 0) {
12933 			(void) sprintf(bp++, "\n");
12934 		}
12935 
12936 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12937 		bp += 9;
12938 	}
12939 
12940 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12941 	bp += strlen(bp);
12942 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12943 		if (cnt % 8 == 0) {
12944 			(void) sprintf(bp++, "\n");
12945 		}
12946 
12947 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12948 		bp += 9;
12949 	}
12950 
12951 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12952 	bp += strlen(bp);
12953 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12954 		if (cnt % 8 == 0) {
12955 			(void) sprintf(bp++, "\n");
12956 		}
12957 
12958 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12959 		bp += 9;
12960 	}
12961 
12962 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12963 	bp += strlen(bp);
12964 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12965 		if (cnt % 8 == 0) {
12966 			(void) sprintf(bp++, "\n");
12967 		}
12968 
12969 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12970 		bp += 9;
12971 	}
12972 
12973 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12974 	bp += strlen(bp);
12975 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12976 		if (cnt % 8 == 0) {
12977 			(void) sprintf(bp++, "\n");
12978 		}
12979 
12980 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12981 		bp += 9;
12982 	}
12983 
12984 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12985 	bp += strlen(bp);
12986 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12987 		if (cnt % 8 == 0) {
12988 			(void) sprintf(bp++, "\n");
12989 		}
12990 
12991 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12992 		bp += 9;
12993 	}
12994 
12995 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12996 	bp += strlen(bp);
12997 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12998 		if (cnt % 8 == 0) {
12999 			(void) sprintf(bp++, "\n");
13000 		}
13001 
13002 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13003 		bp += 9;
13004 	}
13005 
13006 	(void) sprintf(bp, "\n\nRISC GP Registers");
13007 	bp += strlen(bp);
13008 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13009 		if (cnt % 8 == 0) {
13010 			(void) sprintf(bp++, "\n");
13011 		}
13012 
13013 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13014 		bp += 9;
13015 	}
13016 
13017 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
13018 	bp += strlen(bp);
13019 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
13020 		if (cnt % 8 == 0) {
13021 			(void) sprintf(bp++, "\n");
13022 		}
13023 
13024 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
13025 		bp += 9;
13026 	}
13027 
13028 	(void) sprintf(bp, "\n\nLMC Registers");
13029 	bp += strlen(bp);
13030 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13031 		if (cnt % 8 == 0) {
13032 			(void) sprintf(bp++, "\n");
13033 		}
13034 
13035 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13036 		bp += 9;
13037 	}
13038 
13039 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
13040 	bp += strlen(bp);
13041 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
13042 		if (cnt % 8 == 0) {
13043 			(void) sprintf(bp++, "\n");
13044 		}
13045 
13046 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13047 		bp += 9;
13048 	}
13049 
13050 	(void) sprintf(bp, "\n\nFB Hardware Registers");
13051 	bp += strlen(bp);
13052 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
13053 		if (cnt % 8 == 0) {
13054 			(void) sprintf(bp++, "\n");
13055 		}
13056 
13057 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13058 		bp += 9;
13059 	}
13060 
13061 	(void) sprintf(bp, "\n\nCode RAM");
13062 	bp += strlen(bp);
13063 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13064 		if (cnt % 8 == 0) {
13065 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13066 			bp += 11;
13067 		}
13068 
13069 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13070 		bp += 9;
13071 	}
13072 
13073 	(void) sprintf(bp, "\n\nExternal Memory");
13074 	bp += strlen(bp);
13075 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13076 		if (cnt % 8 == 0) {
13077 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13078 			bp += 11;
13079 		}
13080 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13081 		bp += 9;
13082 	}
13083 
13084 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13085 	bp += strlen(bp);
13086 
13087 	(void) sprintf(bp, "\n\nRequest Queue");
13088 	bp += strlen(bp);
13089 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13090 		if (cnt % 8 == 0) {
13091 			(void) sprintf(bp, "\n%08x: ", cnt);
13092 			bp += strlen(bp);
13093 		}
13094 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13095 		bp += strlen(bp);
13096 	}
13097 
13098 	(void) sprintf(bp, "\n\nResponse Queue");
13099 	bp += strlen(bp);
13100 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13101 		if (cnt % 8 == 0) {
13102 			(void) sprintf(bp, "\n%08x: ", cnt);
13103 			bp += strlen(bp);
13104 		}
13105 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13106 		bp += strlen(bp);
13107 	}
13108 
13109 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13110 	    (ha->fwexttracebuf.bp != NULL)) {
13111 		uint32_t cnt_b = 0;
13112 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13113 
13114 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13115 		bp += strlen(bp);
13116 		/* show data address as a byte address, data as long words */
13117 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13118 			cnt_b = cnt * 4;
13119 			if (cnt_b % 32 == 0) {
13120 				(void) sprintf(bp, "\n%08x: ",
13121 				    (int)(w64 + cnt_b));
13122 				bp += 11;
13123 			}
13124 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13125 			bp += 9;
13126 		}
13127 	}
13128 
13129 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13130 	    (ha->fwfcetracebuf.bp != NULL)) {
13131 		uint32_t cnt_b = 0;
13132 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13133 
13134 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13135 		bp += strlen(bp);
13136 		/* show data address as a byte address, data as long words */
13137 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13138 			cnt_b = cnt * 4;
13139 			if (cnt_b % 32 == 0) {
13140 				(void) sprintf(bp, "\n%08x: ",
13141 				    (int)(w64 + cnt_b));
13142 				bp += 11;
13143 			}
13144 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13145 			bp += 9;
13146 		}
13147 	}
13148 
13149 	(void) sprintf(bp, "\n\n");
13150 	bp += strlen(bp);
13151 
13152 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13153 
13154 	QL_PRINT_10(ha, "done=%xh\n", cnt);
13155 
13156 	return (cnt);
13157 }
13158 
13159 /*
13160  * ql_25xx_ascii_fw_dump
13161  *	Converts ISP25xx firmware binary dump to ascii.
13162  *
13163  * Input:
13164  *	ha = adapter state pointer.
13165  *	bptr = buffer pointer.
13166  *
13167  * Returns:
13168  *	Amount of data buffer used.
13169  *
13170  * Context:
13171  *	Kernel context.
13172  */
13173 static size_t
ql_25xx_ascii_fw_dump(ql_adapter_state_t * ha,caddr_t bufp)13174 ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
13175 {
13176 	uint32_t		cnt, cnt1, *dp, *dp2;
13177 	caddr_t			bp = bufp;
13178 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
13179 
13180 	QL_PRINT_3(ha, "started\n");
13181 
13182 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
13183 	    ha->fw_major_version, ha->fw_minor_version,
13184 	    ha->fw_subminor_version, ha->fw_attributes);
13185 	bp += strlen(bp);
13186 
13187 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
13188 	bp += strlen(bp);
13189 
13190 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
13191 	bp += strlen(bp);
13192 
13193 	(void) sprintf(bp, "\nAER Uncorrectable Error Status Register\n%08x\n",
13194 	    fw->aer_ues);
13195 	bp += strlen(bp);
13196 
13197 	(void) sprintf(bp, "\nHostRisc Registers");
13198 	bp += strlen(bp);
13199 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
13200 		if (cnt % 8 == 0) {
13201 			(void) sprintf(bp++, "\n");
13202 		}
13203 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
13204 		bp += 9;
13205 	}
13206 
13207 	(void) sprintf(bp, "\n\nPCIe Registers");
13208 	bp += strlen(bp);
13209 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
13210 		if (cnt % 8 == 0) {
13211 			(void) sprintf(bp++, "\n");
13212 		}
13213 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
13214 		bp += 9;
13215 	}
13216 
13217 	(void) strcat(bp, "\n\nHost Interface Registers");
13218 	bp += strlen(bp);
13219 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
13220 		if (cnt % 8 == 0) {
13221 			(void) sprintf(bp++, "\n");
13222 		}
13223 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
13224 		bp += 9;
13225 	}
13226 
13227 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
13228 	bp += strlen(bp);
13229 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
13230 		if (cnt % 8 == 0) {
13231 			(void) sprintf(bp++, "\n");
13232 		}
13233 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
13234 		bp += 9;
13235 	}
13236 
13237 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
13238 	    fw->risc_io);
13239 	bp += strlen(bp);
13240 
13241 	(void) sprintf(bp, "\n\nMailbox Registers");
13242 	bp += strlen(bp);
13243 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
13244 		if (cnt % 16 == 0) {
13245 			(void) sprintf(bp++, "\n");
13246 		}
13247 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
13248 		bp += 5;
13249 	}
13250 
13251 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
13252 	bp += strlen(bp);
13253 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
13254 		if (cnt % 8 == 0) {
13255 			(void) sprintf(bp++, "\n");
13256 		}
13257 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
13258 		bp += 9;
13259 	}
13260 
13261 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
13262 	bp += strlen(bp);
13263 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
13264 		if (cnt % 8 == 0) {
13265 			(void) sprintf(bp++, "\n");
13266 		}
13267 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
13268 		bp += 9;
13269 	}
13270 
13271 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
13272 	bp += strlen(bp);
13273 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
13274 		if (cnt % 8 == 0) {
13275 			(void) sprintf(bp++, "\n");
13276 		}
13277 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
13278 		bp += 9;
13279 	}
13280 
13281 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
13282 	bp += strlen(bp);
13283 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
13284 		if (cnt % 8 == 0) {
13285 			(void) sprintf(bp++, "\n");
13286 		}
13287 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
13288 		bp += 9;
13289 	}
13290 
13291 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
13292 	bp += strlen(bp);
13293 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
13294 		if (cnt % 8 == 0) {
13295 			(void) sprintf(bp++, "\n");
13296 		}
13297 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
13298 		bp += 9;
13299 	}
13300 
13301 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
13302 	bp += strlen(bp);
13303 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
13304 		if (cnt % 8 == 0) {
13305 			(void) sprintf(bp++, "\n");
13306 		}
13307 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
13308 		bp += 9;
13309 	}
13310 
13311 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
13312 	bp += strlen(bp);
13313 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
13314 		if (cnt % 8 == 0) {
13315 			(void) sprintf(bp++, "\n");
13316 		}
13317 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
13318 		bp += 9;
13319 	}
13320 
13321 	(void) sprintf(bp, "\n\nASEQ GP Registers");
13322 	bp += strlen(bp);
13323 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
13324 		if (cnt % 8 == 0) {
13325 			(void) sprintf(bp++, "\n");
13326 		}
13327 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
13328 		bp += 9;
13329 	}
13330 
13331 	(void) sprintf(bp, "\n\nASEQ-0 Registers");
13332 	bp += strlen(bp);
13333 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
13334 		if (cnt % 8 == 0) {
13335 			(void) sprintf(bp++, "\n");
13336 		}
13337 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
13338 		bp += 9;
13339 	}
13340 
13341 	(void) sprintf(bp, "\n\nASEQ-1 Registers");
13342 	bp += strlen(bp);
13343 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
13344 		if (cnt % 8 == 0) {
13345 			(void) sprintf(bp++, "\n");
13346 		}
13347 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
13348 		bp += 9;
13349 	}
13350 
13351 	(void) sprintf(bp, "\n\nASEQ-2 Registers");
13352 	bp += strlen(bp);
13353 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
13354 		if (cnt % 8 == 0) {
13355 			(void) sprintf(bp++, "\n");
13356 		}
13357 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
13358 		bp += 9;
13359 	}
13360 
13361 	(void) sprintf(bp, "\n\nCommand DMA Registers");
13362 	bp += strlen(bp);
13363 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
13364 		if (cnt % 8 == 0) {
13365 			(void) sprintf(bp++, "\n");
13366 		}
13367 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
13368 		bp += 9;
13369 	}
13370 
13371 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
13372 	bp += strlen(bp);
13373 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
13374 		if (cnt % 8 == 0) {
13375 			(void) sprintf(bp++, "\n");
13376 		}
13377 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
13378 		bp += 9;
13379 	}
13380 
13381 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
13382 	bp += strlen(bp);
13383 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
13384 		if (cnt % 8 == 0) {
13385 			(void) sprintf(bp++, "\n");
13386 		}
13387 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
13388 		bp += 9;
13389 	}
13390 
13391 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
13392 	bp += strlen(bp);
13393 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
13394 		if (cnt % 8 == 0) {
13395 			(void) sprintf(bp++, "\n");
13396 		}
13397 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
13398 		bp += 9;
13399 	}
13400 
13401 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
13402 	bp += strlen(bp);
13403 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
13404 		if (cnt % 8 == 0) {
13405 			(void) sprintf(bp++, "\n");
13406 		}
13407 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
13408 		bp += 9;
13409 	}
13410 
13411 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
13412 	bp += strlen(bp);
13413 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
13414 		if (cnt % 8 == 0) {
13415 			(void) sprintf(bp++, "\n");
13416 		}
13417 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
13418 		bp += 9;
13419 	}
13420 
13421 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
13422 	bp += strlen(bp);
13423 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
13424 		if (cnt % 8 == 0) {
13425 			(void) sprintf(bp++, "\n");
13426 		}
13427 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
13428 		bp += 9;
13429 	}
13430 
13431 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
13432 	bp += strlen(bp);
13433 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
13434 		if (cnt % 8 == 0) {
13435 			(void) sprintf(bp++, "\n");
13436 		}
13437 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
13438 		bp += 9;
13439 	}
13440 
13441 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
13442 	bp += strlen(bp);
13443 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
13444 		if (cnt % 8 == 0) {
13445 			(void) sprintf(bp++, "\n");
13446 		}
13447 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
13448 		bp += 9;
13449 	}
13450 
13451 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
13452 	bp += strlen(bp);
13453 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
13454 		if (cnt % 8 == 0) {
13455 			(void) sprintf(bp++, "\n");
13456 		}
13457 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
13458 		bp += 9;
13459 	}
13460 
13461 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
13462 	bp += strlen(bp);
13463 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
13464 		if (cnt % 8 == 0) {
13465 			(void) sprintf(bp++, "\n");
13466 		}
13467 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
13468 		bp += 9;
13469 	}
13470 
13471 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
13472 	bp += strlen(bp);
13473 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
13474 		if (cnt % 8 == 0) {
13475 			(void) sprintf(bp++, "\n");
13476 		}
13477 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13478 		bp += 9;
13479 	}
13480 
13481 	(void) sprintf(bp, "\n\nRISC GP Registers");
13482 	bp += strlen(bp);
13483 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13484 		if (cnt % 8 == 0) {
13485 			(void) sprintf(bp++, "\n");
13486 		}
13487 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13488 		bp += 9;
13489 	}
13490 
13491 	(void) sprintf(bp, "\n\nLMC Registers");
13492 	bp += strlen(bp);
13493 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13494 		if (cnt % 8 == 0) {
13495 			(void) sprintf(bp++, "\n");
13496 		}
13497 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13498 		bp += 9;
13499 	}
13500 
13501 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
13502 	bp += strlen(bp);
13503 	cnt1 = sizeof (fw->fpm_hdw_reg);
13504 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13505 		if (cnt % 8 == 0) {
13506 			(void) sprintf(bp++, "\n");
13507 		}
13508 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13509 		bp += 9;
13510 	}
13511 
13512 	(void) sprintf(bp, "\n\nFB Hardware Registers");
13513 	bp += strlen(bp);
13514 	cnt1 = sizeof (fw->fb_hdw_reg);
13515 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13516 		if (cnt % 8 == 0) {
13517 			(void) sprintf(bp++, "\n");
13518 		}
13519 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13520 		bp += 9;
13521 	}
13522 
13523 	(void) sprintf(bp, "\n\nCode RAM");
13524 	bp += strlen(bp);
13525 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13526 		if (cnt % 8 == 0) {
13527 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13528 			bp += 11;
13529 		}
13530 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13531 		bp += 9;
13532 	}
13533 
13534 	(void) sprintf(bp, "\n\nExternal Memory");
13535 	bp += strlen(bp);
13536 	dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
13537 	    fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
13538 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13539 		if (cnt % 8 == 0) {
13540 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13541 			bp += 11;
13542 		}
13543 		(void) sprintf(bp, "%08x ", *dp++);
13544 		bp += 9;
13545 	}
13546 
13547 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13548 	bp += strlen(bp);
13549 
13550 	dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
13551 	for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
13552 		dp2 = dp;
13553 		for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
13554 			if (*dp2++) {
13555 				break;
13556 			}
13557 		}
13558 		if (cnt1 == fw->req_q_size[cnt] / 4) {
13559 			dp = dp2;
13560 			continue;
13561 		}
13562 		(void) sprintf(bp, "\n\nRequest Queue\nQueue %d:", cnt);
13563 		bp += strlen(bp);
13564 		for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
13565 			if (cnt1 % 8 == 0) {
13566 				(void) sprintf(bp, "\n%08x: ", cnt1);
13567 				bp += strlen(bp);
13568 			}
13569 			(void) sprintf(bp, "%08x ", *dp++);
13570 			bp += strlen(bp);
13571 		}
13572 	}
13573 
13574 	for (cnt = 0; cnt < ha->rsp_queues_cnt && cnt < 16; cnt++) {
13575 		dp2 = dp;
13576 		for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
13577 		    cnt1++) {
13578 			if (*dp2++) {
13579 				break;
13580 			}
13581 		}
13582 		if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
13583 			dp = dp2;
13584 			continue;
13585 		}
13586 		(void) sprintf(bp, "\n\nResponse Queue\nQueue %d:", cnt);
13587 		bp += strlen(bp);
13588 		for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
13589 		    cnt1++) {
13590 			if (cnt1 % 8 == 0) {
13591 				(void) sprintf(bp, "\n%08x: ", cnt1);
13592 				bp += strlen(bp);
13593 			}
13594 			(void) sprintf(bp, "%08x ", *dp++);
13595 			bp += strlen(bp);
13596 		}
13597 	}
13598 
13599 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13600 	    (ha->fwexttracebuf.bp != NULL)) {
13601 		uint32_t cnt_b = 0;
13602 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13603 
13604 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13605 		bp += strlen(bp);
13606 		/* show data address as a byte address, data as long words */
13607 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13608 			cnt_b = cnt * 4;
13609 			if (cnt_b % 32 == 0) {
13610 				(void) sprintf(bp, "\n%08x: ",
13611 				    (int)(w64 + cnt_b));
13612 				bp += 11;
13613 			}
13614 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13615 			bp += 9;
13616 		}
13617 	}
13618 
13619 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13620 	    (ha->fwfcetracebuf.bp != NULL)) {
13621 		uint32_t cnt_b = 0;
13622 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13623 
13624 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13625 		bp += strlen(bp);
13626 		/* show data address as a byte address, data as long words */
13627 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13628 			cnt_b = cnt * 4;
13629 			if (cnt_b % 32 == 0) {
13630 				(void) sprintf(bp, "\n%08x: ",
13631 				    (int)(w64 + cnt_b));
13632 				bp += 11;
13633 			}
13634 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13635 			bp += 9;
13636 		}
13637 	}
13638 
13639 	(void) sprintf(bp, "\n\n");
13640 	bp += strlen(bp);
13641 
13642 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13643 
13644 	QL_PRINT_10(ha, "done=%xh\n", cnt);
13645 
13646 	return (cnt);
13647 }
13648 
13649 /*
13650  * ql_81xx_ascii_fw_dump
13651  *	Converts ISP81xx firmware binary dump to ascii.
13652  *
13653  * Input:
13654  *	ha = adapter state pointer.
13655  *	bptr = buffer pointer.
13656  *
13657  * Returns:
13658  *	Amount of data buffer used.
13659  *
13660  * Context:
13661  *	Kernel context.
13662  */
13663 static size_t
ql_81xx_ascii_fw_dump(ql_adapter_state_t * ha,caddr_t bufp)13664 ql_81xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
13665 {
13666 	uint32_t		cnt, cnt1, *dp, *dp2;
13667 	caddr_t			bp = bufp;
13668 	ql_81xx_fw_dump_t	*fw = ha->ql_dump_ptr;
13669 
13670 	QL_PRINT_3(ha, "started\n");
13671 
13672 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
13673 	    ha->fw_major_version, ha->fw_minor_version,
13674 	    ha->fw_subminor_version, ha->fw_attributes);
13675 	bp += strlen(bp);
13676 
13677 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
13678 	bp += strlen(bp);
13679 
13680 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
13681 	bp += strlen(bp);
13682 
13683 	(void) sprintf(bp, "\nAER Uncorrectable Error Status Register\n%08x\n",
13684 	    fw->aer_ues);
13685 	bp += strlen(bp);
13686 
13687 	(void) sprintf(bp, "\nHostRisc Registers");
13688 	bp += strlen(bp);
13689 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
13690 		if (cnt % 8 == 0) {
13691 			(void) sprintf(bp++, "\n");
13692 		}
13693 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
13694 		bp += 9;
13695 	}
13696 
13697 	(void) sprintf(bp, "\n\nPCIe Registers");
13698 	bp += strlen(bp);
13699 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
13700 		if (cnt % 8 == 0) {
13701 			(void) sprintf(bp++, "\n");
13702 		}
13703 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
13704 		bp += 9;
13705 	}
13706 
13707 	(void) strcat(bp, "\n\nHost Interface Registers");
13708 	bp += strlen(bp);
13709 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
13710 		if (cnt % 8 == 0) {
13711 			(void) sprintf(bp++, "\n");
13712 		}
13713 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
13714 		bp += 9;
13715 	}
13716 
13717 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
13718 	bp += strlen(bp);
13719 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
13720 		if (cnt % 8 == 0) {
13721 			(void) sprintf(bp++, "\n");
13722 		}
13723 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
13724 		bp += 9;
13725 	}
13726 
13727 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
13728 	    fw->risc_io);
13729 	bp += strlen(bp);
13730 
13731 	(void) sprintf(bp, "\n\nMailbox Registers");
13732 	bp += strlen(bp);
13733 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
13734 		if (cnt % 16 == 0) {
13735 			(void) sprintf(bp++, "\n");
13736 		}
13737 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
13738 		bp += 5;
13739 	}
13740 
13741 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
13742 	bp += strlen(bp);
13743 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
13744 		if (cnt % 8 == 0) {
13745 			(void) sprintf(bp++, "\n");
13746 		}
13747 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
13748 		bp += 9;
13749 	}
13750 
13751 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
13752 	bp += strlen(bp);
13753 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
13754 		if (cnt % 8 == 0) {
13755 			(void) sprintf(bp++, "\n");
13756 		}
13757 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
13758 		bp += 9;
13759 	}
13760 
13761 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
13762 	bp += strlen(bp);
13763 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
13764 		if (cnt % 8 == 0) {
13765 			(void) sprintf(bp++, "\n");
13766 		}
13767 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
13768 		bp += 9;
13769 	}
13770 
13771 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
13772 	bp += strlen(bp);
13773 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
13774 		if (cnt % 8 == 0) {
13775 			(void) sprintf(bp++, "\n");
13776 		}
13777 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
13778 		bp += 9;
13779 	}
13780 
13781 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
13782 	bp += strlen(bp);
13783 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
13784 		if (cnt % 8 == 0) {
13785 			(void) sprintf(bp++, "\n");
13786 		}
13787 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
13788 		bp += 9;
13789 	}
13790 
13791 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
13792 	bp += strlen(bp);
13793 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
13794 		if (cnt % 8 == 0) {
13795 			(void) sprintf(bp++, "\n");
13796 		}
13797 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
13798 		bp += 9;
13799 	}
13800 
13801 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
13802 	bp += strlen(bp);
13803 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
13804 		if (cnt % 8 == 0) {
13805 			(void) sprintf(bp++, "\n");
13806 		}
13807 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
13808 		bp += 9;
13809 	}
13810 
13811 	(void) sprintf(bp, "\n\nASEQ GP Registers");
13812 	bp += strlen(bp);
13813 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
13814 		if (cnt % 8 == 0) {
13815 			(void) sprintf(bp++, "\n");
13816 		}
13817 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
13818 		bp += 9;
13819 	}
13820 
13821 	(void) sprintf(bp, "\n\nASEQ-0 Registers");
13822 	bp += strlen(bp);
13823 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
13824 		if (cnt % 8 == 0) {
13825 			(void) sprintf(bp++, "\n");
13826 		}
13827 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
13828 		bp += 9;
13829 	}
13830 
13831 	(void) sprintf(bp, "\n\nASEQ-1 Registers");
13832 	bp += strlen(bp);
13833 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
13834 		if (cnt % 8 == 0) {
13835 			(void) sprintf(bp++, "\n");
13836 		}
13837 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
13838 		bp += 9;
13839 	}
13840 
13841 	(void) sprintf(bp, "\n\nASEQ-2 Registers");
13842 	bp += strlen(bp);
13843 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
13844 		if (cnt % 8 == 0) {
13845 			(void) sprintf(bp++, "\n");
13846 		}
13847 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
13848 		bp += 9;
13849 	}
13850 
13851 	(void) sprintf(bp, "\n\nCommand DMA Registers");
13852 	bp += strlen(bp);
13853 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
13854 		if (cnt % 8 == 0) {
13855 			(void) sprintf(bp++, "\n");
13856 		}
13857 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
13858 		bp += 9;
13859 	}
13860 
13861 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
13862 	bp += strlen(bp);
13863 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
13864 		if (cnt % 8 == 0) {
13865 			(void) sprintf(bp++, "\n");
13866 		}
13867 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
13868 		bp += 9;
13869 	}
13870 
13871 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
13872 	bp += strlen(bp);
13873 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
13874 		if (cnt % 8 == 0) {
13875 			(void) sprintf(bp++, "\n");
13876 		}
13877 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
13878 		bp += 9;
13879 	}
13880 
13881 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
13882 	bp += strlen(bp);
13883 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
13884 		if (cnt % 8 == 0) {
13885 			(void) sprintf(bp++, "\n");
13886 		}
13887 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
13888 		bp += 9;
13889 	}
13890 
13891 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
13892 	bp += strlen(bp);
13893 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
13894 		if (cnt % 8 == 0) {
13895 			(void) sprintf(bp++, "\n");
13896 		}
13897 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
13898 		bp += 9;
13899 	}
13900 
13901 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
13902 	bp += strlen(bp);
13903 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
13904 		if (cnt % 8 == 0) {
13905 			(void) sprintf(bp++, "\n");
13906 		}
13907 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
13908 		bp += 9;
13909 	}
13910 
13911 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
13912 	bp += strlen(bp);
13913 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
13914 		if (cnt % 8 == 0) {
13915 			(void) sprintf(bp++, "\n");
13916 		}
13917 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
13918 		bp += 9;
13919 	}
13920 
13921 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
13922 	bp += strlen(bp);
13923 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
13924 		if (cnt % 8 == 0) {
13925 			(void) sprintf(bp++, "\n");
13926 		}
13927 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
13928 		bp += 9;
13929 	}
13930 
13931 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
13932 	bp += strlen(bp);
13933 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
13934 		if (cnt % 8 == 0) {
13935 			(void) sprintf(bp++, "\n");
13936 		}
13937 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
13938 		bp += 9;
13939 	}
13940 
13941 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
13942 	bp += strlen(bp);
13943 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
13944 		if (cnt % 8 == 0) {
13945 			(void) sprintf(bp++, "\n");
13946 		}
13947 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
13948 		bp += 9;
13949 	}
13950 
13951 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
13952 	bp += strlen(bp);
13953 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
13954 		if (cnt % 8 == 0) {
13955 			(void) sprintf(bp++, "\n");
13956 		}
13957 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
13958 		bp += 9;
13959 	}
13960 
13961 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
13962 	bp += strlen(bp);
13963 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
13964 		if (cnt % 8 == 0) {
13965 			(void) sprintf(bp++, "\n");
13966 		}
13967 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
13968 		bp += 9;
13969 	}
13970 
13971 	(void) sprintf(bp, "\n\nRISC GP Registers");
13972 	bp += strlen(bp);
13973 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13974 		if (cnt % 8 == 0) {
13975 			(void) sprintf(bp++, "\n");
13976 		}
13977 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13978 		bp += 9;
13979 	}
13980 
13981 	(void) sprintf(bp, "\n\nLMC Registers");
13982 	bp += strlen(bp);
13983 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13984 		if (cnt % 8 == 0) {
13985 			(void) sprintf(bp++, "\n");
13986 		}
13987 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13988 		bp += 9;
13989 	}
13990 
13991 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
13992 	bp += strlen(bp);
13993 	cnt1 = sizeof (fw->fpm_hdw_reg);
13994 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13995 		if (cnt % 8 == 0) {
13996 			(void) sprintf(bp++, "\n");
13997 		}
13998 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13999 		bp += 9;
14000 	}
14001 
14002 	(void) sprintf(bp, "\n\nFB Hardware Registers");
14003 	bp += strlen(bp);
14004 	cnt1 = sizeof (fw->fb_hdw_reg);
14005 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
14006 		if (cnt % 8 == 0) {
14007 			(void) sprintf(bp++, "\n");
14008 		}
14009 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
14010 		bp += 9;
14011 	}
14012 
14013 	(void) sprintf(bp, "\n\nCode RAM");
14014 	bp += strlen(bp);
14015 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
14016 		if (cnt % 8 == 0) {
14017 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
14018 			bp += 11;
14019 		}
14020 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
14021 		bp += 9;
14022 	}
14023 
14024 	(void) sprintf(bp, "\n\nExternal Memory");
14025 	bp += strlen(bp);
14026 	dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
14027 	    fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
14028 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
14029 		if (cnt % 8 == 0) {
14030 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
14031 			bp += 11;
14032 		}
14033 		(void) sprintf(bp, "%08x ", *dp++);
14034 		bp += 9;
14035 	}
14036 
14037 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
14038 	bp += strlen(bp);
14039 
14040 	dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
14041 	for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
14042 		dp2 = dp;
14043 		for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
14044 			if (*dp2++) {
14045 				break;
14046 			}
14047 		}
14048 		if (cnt1 == fw->req_q_size[cnt] / 4) {
14049 			dp = dp2;
14050 			continue;
14051 		}
14052 		(void) sprintf(bp, "\n\nRequest Queue\nQueue %d:", cnt);
14053 		bp += strlen(bp);
14054 		for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
14055 			if (cnt1 % 8 == 0) {
14056 				(void) sprintf(bp, "\n%08x: ", cnt1);
14057 				bp += strlen(bp);
14058 			}
14059 			(void) sprintf(bp, "%08x ", *dp++);
14060 			bp += strlen(bp);
14061 		}
14062 	}
14063 
14064 	for (cnt = 0; cnt < ha->rsp_queues_cnt && cnt < 16; cnt++) {
14065 		dp2 = dp;
14066 		for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
14067 		    cnt1++) {
14068 			if (*dp2++) {
14069 				break;
14070 			}
14071 		}
14072 		if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
14073 			dp = dp2;
14074 			continue;
14075 		}
14076 		(void) sprintf(bp, "\n\nResponse Queue\nQueue %d:", cnt);
14077 		bp += strlen(bp);
14078 		for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
14079 		    cnt1++) {
14080 			if (cnt1 % 8 == 0) {
14081 				(void) sprintf(bp, "\n%08x: ", cnt1);
14082 				bp += strlen(bp);
14083 			}
14084 			(void) sprintf(bp, "%08x ", *dp++);
14085 			bp += strlen(bp);
14086 		}
14087 	}
14088 
14089 	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14090 	    (ha->fwexttracebuf.bp != NULL)) {
14091 		uint32_t cnt_b = 0;
14092 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
14093 
14094 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
14095 		bp += strlen(bp);
14096 		/* show data address as a byte address, data as long words */
14097 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14098 			cnt_b = cnt * 4;
14099 			if (cnt_b % 32 == 0) {
14100 				(void) sprintf(bp, "\n%08x: ",
14101 				    (int)(w64 + cnt_b));
14102 				bp += 11;
14103 			}
14104 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
14105 			bp += 9;
14106 		}
14107 	}
14108 
14109 	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14110 	    (ha->fwfcetracebuf.bp != NULL)) {
14111 		uint32_t cnt_b = 0;
14112 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
14113 
14114 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
14115 		bp += strlen(bp);
14116 		/* show data address as a byte address, data as long words */
14117 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14118 			cnt_b = cnt * 4;
14119 			if (cnt_b % 32 == 0) {
14120 				(void) sprintf(bp, "\n%08x: ",
14121 				    (int)(w64 + cnt_b));
14122 				bp += 11;
14123 			}
14124 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
14125 			bp += 9;
14126 		}
14127 	}
14128 
14129 	(void) sprintf(bp, "\n\n");
14130 	bp += strlen(bp);
14131 
14132 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
14133 
14134 	QL_PRINT_10(ha, "done=%xh\n", cnt);
14135 
14136 	return (cnt);
14137 }
14138 
14139 /*
14140  * ql_8021_ascii_fw_dump
14141  *	Converts ISP8021 firmware binary dump to ascii.
14142  *
14143  * Input:
14144  *	ha = adapter state pointer.
14145  *	bptr = buffer pointer.
14146  *
14147  * Returns:
14148  *	Amount of data buffer used.
14149  *
14150  * Context:
14151  *	Kernel context.
14152  */
14153 static size_t
ql_8021_ascii_fw_dump(ql_adapter_state_t * ha,caddr_t bufp)14154 ql_8021_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
14155 {
14156 	uint32_t	cnt;
14157 	caddr_t		bp = bufp;
14158 	uint8_t		*fw = ha->ql_dump_ptr;
14159 
14160 	/*
14161 	 * 2 ascii bytes per binary byte + a space and
14162 	 * a newline every 16 binary bytes
14163 	 */
14164 	cnt = 0;
14165 	while (cnt < ha->ql_dump_size) {
14166 		(void) sprintf(bp, "%02x ", *fw++);
14167 		bp += strlen(bp);
14168 		if (++cnt % 16 == 0) {
14169 			(void) sprintf(bp, "\n");
14170 			bp += strlen(bp);
14171 		}
14172 	}
14173 	if (cnt % 16 != 0) {
14174 		(void) sprintf(bp, "\n");
14175 		bp += strlen(bp);
14176 	}
14177 	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
14178 	QL_PRINT_10(ha, "done=%xh\n", cnt);
14179 	return (cnt);
14180 }
14181 
14182 /*
14183  * ql_2200_binary_fw_dump
14184  *
14185  * Input:
14186  *	ha:	adapter state pointer.
14187  *	fw:	firmware dump context pointer.
14188  *
14189  * Returns:
14190  *	ql local function return status code.
14191  *
14192  * Context:
14193  *	Interrupt or Kernel context, no mailbox commands allowed.
14194  */
14195 static int
ql_2200_binary_fw_dump(ql_adapter_state_t * ha,ql_fw_dump_t * fw)14196 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
14197 {
14198 	uint32_t	cnt;
14199 	uint16_t	risc_address;
14200 	clock_t		timer;
14201 	mbx_cmd_t	mc;
14202 	mbx_cmd_t	*mcp = &mc;
14203 	int		rval = QL_SUCCESS;
14204 
14205 	QL_PRINT_3(ha, "started\n");
14206 
14207 	/* Disable ISP interrupts. */
14208 	ql_disable_intr(ha);
14209 
14210 	/* Release mailbox registers. */
14211 	WRT16_IO_REG(ha, semaphore, 0);
14212 
14213 	/* Pause RISC. */
14214 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14215 	timer = 30000;
14216 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14217 		if (timer-- != 0) {
14218 			drv_usecwait(MILLISEC);
14219 		} else {
14220 			rval = QL_FUNCTION_TIMEOUT;
14221 			break;
14222 		}
14223 	}
14224 
14225 	if (rval == QL_SUCCESS) {
14226 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
14227 		    sizeof (fw->pbiu_reg) / 2, 16);
14228 
14229 		/* In 2200 we only read 8 mailboxes */
14230 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
14231 		    8, 16);
14232 
14233 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
14234 		    sizeof (fw->dma_reg) / 2, 16);
14235 
14236 		WRT16_IO_REG(ha, ctrl_status, 0);
14237 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
14238 		    sizeof (fw->risc_hdw_reg) / 2, 16);
14239 
14240 		WRT16_IO_REG(ha, pcr, 0x2000);
14241 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
14242 		    sizeof (fw->risc_gp0_reg) / 2, 16);
14243 
14244 		WRT16_IO_REG(ha, pcr, 0x2100);
14245 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
14246 		    sizeof (fw->risc_gp1_reg) / 2, 16);
14247 
14248 		WRT16_IO_REG(ha, pcr, 0x2200);
14249 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
14250 		    sizeof (fw->risc_gp2_reg) / 2, 16);
14251 
14252 		WRT16_IO_REG(ha, pcr, 0x2300);
14253 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
14254 		    sizeof (fw->risc_gp3_reg) / 2, 16);
14255 
14256 		WRT16_IO_REG(ha, pcr, 0x2400);
14257 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
14258 		    sizeof (fw->risc_gp4_reg) / 2, 16);
14259 
14260 		WRT16_IO_REG(ha, pcr, 0x2500);
14261 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
14262 		    sizeof (fw->risc_gp5_reg) / 2, 16);
14263 
14264 		WRT16_IO_REG(ha, pcr, 0x2600);
14265 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
14266 		    sizeof (fw->risc_gp6_reg) / 2, 16);
14267 
14268 		WRT16_IO_REG(ha, pcr, 0x2700);
14269 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
14270 		    sizeof (fw->risc_gp7_reg) / 2, 16);
14271 
14272 		WRT16_IO_REG(ha, ctrl_status, 0x10);
14273 		/* 2200 has only 16 registers */
14274 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
14275 		    ha->iobase + 0x80, 16, 16);
14276 
14277 		WRT16_IO_REG(ha, ctrl_status, 0x20);
14278 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
14279 		    sizeof (fw->fpm_b0_reg) / 2, 16);
14280 
14281 		WRT16_IO_REG(ha, ctrl_status, 0x30);
14282 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
14283 		    sizeof (fw->fpm_b1_reg) / 2, 16);
14284 
14285 		/* Select FPM registers. */
14286 		WRT16_IO_REG(ha, ctrl_status, 0x20);
14287 
14288 		/* FPM Soft Reset. */
14289 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
14290 
14291 		/* Select frame buffer registers. */
14292 		WRT16_IO_REG(ha, ctrl_status, 0x10);
14293 
14294 		/* Reset frame buffer FIFOs. */
14295 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
14296 
14297 		/* Select RISC module registers. */
14298 		WRT16_IO_REG(ha, ctrl_status, 0);
14299 
14300 		/* Reset RISC module. */
14301 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
14302 
14303 		/* Reset ISP semaphore. */
14304 		WRT16_IO_REG(ha, semaphore, 0);
14305 
14306 		/* Release RISC module. */
14307 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
14308 
14309 		/* Wait for RISC to recover from reset. */
14310 		timer = 30000;
14311 		while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_ROM_BUSY) {
14312 			if (timer-- != 0) {
14313 				drv_usecwait(MILLISEC);
14314 			} else {
14315 				rval = QL_FUNCTION_TIMEOUT;
14316 				break;
14317 			}
14318 		}
14319 
14320 		/* Disable RISC pause on FPM parity error. */
14321 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
14322 	}
14323 
14324 	if (rval == QL_SUCCESS) {
14325 		/* Pause RISC. */
14326 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14327 		timer = 30000;
14328 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14329 			if (timer-- != 0) {
14330 				drv_usecwait(MILLISEC);
14331 			} else {
14332 				rval = QL_FUNCTION_TIMEOUT;
14333 				break;
14334 			}
14335 		}
14336 	}
14337 
14338 	if (rval == QL_SUCCESS) {
14339 		/* Set memory configuration and timing. */
14340 		WRT16_IO_REG(ha, mctr, 0xf2);
14341 
14342 		/* Release RISC. */
14343 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
14344 
14345 		/* Get RISC SRAM. */
14346 		risc_address = 0x1000;
14347 		WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
14348 		for (cnt = 0; cnt < 0xf000; cnt++) {
14349 			WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
14350 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14351 			for (timer = 6000000; timer != 0; timer--) {
14352 				/* Check for pending interrupts. */
14353 				if (INTERRUPT_PENDING(ha)) {
14354 					if (RD16_IO_REG(ha, semaphore) &
14355 					    BIT_0) {
14356 						WRT16_IO_REG(ha, hccr,
14357 						    HC_CLR_RISC_INT);
14358 						mcp->mb[0] = RD16_IO_REG(ha,
14359 						    mailbox_out[0]);
14360 						fw->risc_ram[cnt] =
14361 						    RD16_IO_REG(ha,
14362 						    mailbox_out[2]);
14363 						WRT16_IO_REG(ha,
14364 						    semaphore, 0);
14365 						break;
14366 					}
14367 					WRT16_IO_REG(ha, hccr,
14368 					    HC_CLR_RISC_INT);
14369 				}
14370 				drv_usecwait(5);
14371 			}
14372 
14373 			if (timer == 0) {
14374 				rval = QL_FUNCTION_TIMEOUT;
14375 			} else {
14376 				rval = mcp->mb[0];
14377 			}
14378 
14379 			if (rval != QL_SUCCESS) {
14380 				break;
14381 			}
14382 		}
14383 	}
14384 
14385 	QL_PRINT_3(ha, "done\n");
14386 
14387 	return (rval);
14388 }
14389 
14390 /*
14391  * ql_2300_binary_fw_dump
14392  *
14393  * Input:
14394  *	ha:	adapter state pointer.
14395  *	fw:	firmware dump context pointer.
14396  *
14397  * Returns:
14398  *	ql local function return status code.
14399  *
14400  * Context:
14401  *	Interrupt or Kernel context, no mailbox commands allowed.
14402  */
14403 static int
ql_2300_binary_fw_dump(ql_adapter_state_t * ha,ql_fw_dump_t * fw)14404 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
14405 {
14406 	clock_t	timer;
14407 	int	rval = QL_SUCCESS;
14408 
14409 	QL_PRINT_3(ha, "started\n");
14410 
14411 	/* Disable ISP interrupts. */
14412 	ql_disable_intr(ha);
14413 
14414 	/* Release mailbox registers. */
14415 	WRT16_IO_REG(ha, semaphore, 0);
14416 
14417 	/* Pause RISC. */
14418 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
14419 	timer = 30000;
14420 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
14421 		if (timer-- != 0) {
14422 			drv_usecwait(MILLISEC);
14423 		} else {
14424 			rval = QL_FUNCTION_TIMEOUT;
14425 			break;
14426 		}
14427 	}
14428 
14429 	if (rval == QL_SUCCESS) {
14430 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
14431 		    sizeof (fw->pbiu_reg) / 2, 16);
14432 
14433 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
14434 		    sizeof (fw->risc_host_reg) / 2, 16);
14435 
14436 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
14437 		    sizeof (fw->mailbox_reg) / 2, 16);
14438 
14439 		WRT16_IO_REG(ha, ctrl_status, 0x40);
14440 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
14441 		    sizeof (fw->resp_dma_reg) / 2, 16);
14442 
14443 		WRT16_IO_REG(ha, ctrl_status, 0x50);
14444 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
14445 		    sizeof (fw->dma_reg) / 2, 16);
14446 
14447 		WRT16_IO_REG(ha, ctrl_status, 0);
14448 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
14449 		    sizeof (fw->risc_hdw_reg) / 2, 16);
14450 
14451 		WRT16_IO_REG(ha, pcr, 0x2000);
14452 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
14453 		    sizeof (fw->risc_gp0_reg) / 2, 16);
14454 
14455 		WRT16_IO_REG(ha, pcr, 0x2200);
14456 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
14457 		    sizeof (fw->risc_gp1_reg) / 2, 16);
14458 
14459 		WRT16_IO_REG(ha, pcr, 0x2400);
14460 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
14461 		    sizeof (fw->risc_gp2_reg) / 2, 16);
14462 
14463 		WRT16_IO_REG(ha, pcr, 0x2600);
14464 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
14465 		    sizeof (fw->risc_gp3_reg) / 2, 16);
14466 
14467 		WRT16_IO_REG(ha, pcr, 0x2800);
14468 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
14469 		    sizeof (fw->risc_gp4_reg) / 2, 16);
14470 
14471 		WRT16_IO_REG(ha, pcr, 0x2A00);
14472 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
14473 		    sizeof (fw->risc_gp5_reg) / 2, 16);
14474 
14475 		WRT16_IO_REG(ha, pcr, 0x2C00);
14476 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
14477 		    sizeof (fw->risc_gp6_reg) / 2, 16);
14478 
14479 		WRT16_IO_REG(ha, pcr, 0x2E00);
14480 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
14481 		    sizeof (fw->risc_gp7_reg) / 2, 16);
14482 
14483 		WRT16_IO_REG(ha, ctrl_status, 0x10);
14484 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
14485 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
14486 
14487 		WRT16_IO_REG(ha, ctrl_status, 0x20);
14488 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
14489 		    sizeof (fw->fpm_b0_reg) / 2, 16);
14490 
14491 		WRT16_IO_REG(ha, ctrl_status, 0x30);
14492 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
14493 		    sizeof (fw->fpm_b1_reg) / 2, 16);
14494 
14495 		/* Select FPM registers. */
14496 		WRT16_IO_REG(ha, ctrl_status, 0x20);
14497 
14498 		/* FPM Soft Reset. */
14499 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
14500 
14501 		/* Select frame buffer registers. */
14502 		WRT16_IO_REG(ha, ctrl_status, 0x10);
14503 
14504 		/* Reset frame buffer FIFOs. */
14505 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
14506 
14507 		/* Select RISC module registers. */
14508 		WRT16_IO_REG(ha, ctrl_status, 0);
14509 
14510 		/* Reset RISC module. */
14511 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
14512 
14513 		/* Reset ISP semaphore. */
14514 		WRT16_IO_REG(ha, semaphore, 0);
14515 
14516 		/* Release RISC module. */
14517 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
14518 
14519 		/* Wait for RISC to recover from reset. */
14520 		timer = 30000;
14521 		while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_ROM_BUSY) {
14522 			if (timer-- != 0) {
14523 				drv_usecwait(MILLISEC);
14524 			} else {
14525 				rval = QL_FUNCTION_TIMEOUT;
14526 				break;
14527 			}
14528 		}
14529 
14530 		/* Disable RISC pause on FPM parity error. */
14531 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
14532 	}
14533 
14534 	/* Get RISC SRAM. */
14535 	if (rval == QL_SUCCESS) {
14536 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
14537 	}
14538 	/* Get STACK SRAM. */
14539 	if (rval == QL_SUCCESS) {
14540 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
14541 	}
14542 	/* Get DATA SRAM. */
14543 	if (rval == QL_SUCCESS) {
14544 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
14545 	}
14546 
14547 	QL_PRINT_3(ha, "done\n");
14548 
14549 	return (rval);
14550 }
14551 
14552 /*
14553  * ql_24xx_binary_fw_dump
14554  *
14555  * Input:
14556  *	ha:	adapter state pointer.
14557  *	fw:	firmware dump context pointer.
14558  *
14559  * Returns:
14560  *	ql local function return status code.
14561  *
14562  * Context:
14563  *	Interrupt or Kernel context, no mailbox commands allowed.
14564  */
14565 static int
ql_24xx_binary_fw_dump(ql_adapter_state_t * ha,ql_24xx_fw_dump_t * fw)14566 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
14567 {
14568 	uint32_t	*reg32;
14569 	void		*bp;
14570 	clock_t		timer;
14571 	int		rval = QL_SUCCESS;
14572 
14573 	QL_PRINT_3(ha, "started\n");
14574 
14575 	fw->hccr = RD32_IO_REG(ha, hccr);
14576 
14577 	/* Pause RISC. */
14578 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14579 		/* Disable ISP interrupts. */
14580 		ql_disable_intr(ha);
14581 
14582 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14583 		for (timer = 30000;
14584 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14585 		    rval == QL_SUCCESS; timer--) {
14586 			if (timer) {
14587 				drv_usecwait(100);
14588 			} else {
14589 				rval = QL_FUNCTION_TIMEOUT;
14590 			}
14591 		}
14592 	}
14593 
14594 	if (rval == QL_SUCCESS) {
14595 		/* Host interface registers. */
14596 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14597 		    sizeof (fw->host_reg) / 4, 32);
14598 
14599 		/* Disable ISP interrupts. */
14600 		ql_disable_intr(ha);
14601 
14602 		/* Shadow registers. */
14603 
14604 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14605 		RD32_IO_REG(ha, io_base_addr);
14606 
14607 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14608 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
14609 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14610 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14611 
14612 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14613 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
14614 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14615 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14616 
14617 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14618 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
14619 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14620 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14621 
14622 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14623 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
14624 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14625 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14626 
14627 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14628 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
14629 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14630 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14631 
14632 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14633 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
14634 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14635 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14636 
14637 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14638 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
14639 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14640 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14641 
14642 		/* Mailbox registers. */
14643 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14644 		    sizeof (fw->mailbox_reg) / 2, 16);
14645 
14646 		/* Transfer sequence registers. */
14647 
14648 		/* XSEQ GP */
14649 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14650 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14651 		    16, 32);
14652 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14653 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14654 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14655 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14656 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14657 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14658 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14659 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14660 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14661 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14662 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14663 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14664 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14665 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14666 
14667 		/* XSEQ-0 */
14668 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14669 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14670 		    sizeof (fw->xseq_0_reg) / 4, 32);
14671 
14672 		/* XSEQ-1 */
14673 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14674 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14675 		    sizeof (fw->xseq_1_reg) / 4, 32);
14676 
14677 		/* Receive sequence registers. */
14678 
14679 		/* RSEQ GP */
14680 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14681 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14682 		    16, 32);
14683 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14684 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14685 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14686 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14687 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14688 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14689 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14690 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14691 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14692 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14693 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14694 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14695 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14696 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14697 
14698 		/* RSEQ-0 */
14699 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14700 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14701 		    sizeof (fw->rseq_0_reg) / 4, 32);
14702 
14703 		/* RSEQ-1 */
14704 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14705 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14706 		    sizeof (fw->rseq_1_reg) / 4, 32);
14707 
14708 		/* RSEQ-2 */
14709 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14710 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14711 		    sizeof (fw->rseq_2_reg) / 4, 32);
14712 
14713 		/* Command DMA registers. */
14714 
14715 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
14716 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14717 		    sizeof (fw->cmd_dma_reg) / 4, 32);
14718 
14719 		/* Queues. */
14720 
14721 		/* RequestQ0 */
14722 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
14723 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14724 		    8, 32);
14725 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14726 
14727 		/* ResponseQ0 */
14728 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
14729 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14730 		    8, 32);
14731 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14732 
14733 		/* RequestQ1 */
14734 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
14735 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14736 		    8, 32);
14737 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14738 
14739 		/* Transmit DMA registers. */
14740 
14741 		/* XMT0 */
14742 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
14743 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14744 		    16, 32);
14745 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
14746 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14747 
14748 		/* XMT1 */
14749 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
14750 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14751 		    16, 32);
14752 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14753 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14754 
14755 		/* XMT2 */
14756 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14757 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14758 		    16, 32);
14759 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14760 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14761 
14762 		/* XMT3 */
14763 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14764 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14765 		    16, 32);
14766 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14767 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14768 
14769 		/* XMT4 */
14770 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14771 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14772 		    16, 32);
14773 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14774 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14775 
14776 		/* XMT Common */
14777 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14778 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14779 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14780 
14781 		/* Receive DMA registers. */
14782 
14783 		/* RCVThread0 */
14784 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14785 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14786 		    ha->iobase + 0xC0, 16, 32);
14787 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14788 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14789 
14790 		/* RCVThread1 */
14791 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14792 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14793 		    ha->iobase + 0xC0, 16, 32);
14794 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14795 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14796 
14797 		/* RISC registers. */
14798 
14799 		/* RISC GP */
14800 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14801 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14802 		    16, 32);
14803 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14804 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14805 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14806 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14807 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14808 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14809 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14810 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14811 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14812 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14813 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14814 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14815 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14816 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14817 
14818 		/* Local memory controller registers. */
14819 
14820 		/* LMC */
14821 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14822 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14823 		    16, 32);
14824 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14825 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14826 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14827 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14828 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14829 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14830 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14831 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14832 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14833 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14834 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14835 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14836 
14837 		/* Fibre Protocol Module registers. */
14838 
14839 		/* FPM hardware */
14840 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14841 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14842 		    16, 32);
14843 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14844 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14845 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14846 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14847 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14848 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14849 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14850 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14851 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14852 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14853 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14854 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14855 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14856 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14857 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14858 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14859 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14860 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14861 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14862 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14863 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14864 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14865 
14866 		/* Frame Buffer registers. */
14867 
14868 		/* FB hardware */
14869 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
14870 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14871 		    16, 32);
14872 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
14873 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14874 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14875 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14876 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14877 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14878 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14879 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14880 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14881 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14882 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14883 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14884 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14885 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14886 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14887 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14888 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14889 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14890 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14891 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14892 	}
14893 
14894 	/* Get the request queue */
14895 	if (rval == QL_SUCCESS) {
14896 		uint32_t	cnt;
14897 		uint32_t	*w32 = (uint32_t *)ha->req_q[0]->req_ring.bp;
14898 
14899 		/* Sync DMA buffer. */
14900 		(void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle,
14901 		    0, sizeof (fw->req_q), DDI_DMA_SYNC_FORKERNEL);
14902 
14903 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14904 			fw->req_q[cnt] = *w32++;
14905 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14906 		}
14907 	}
14908 
14909 	/* Get the response queue */
14910 	if (rval == QL_SUCCESS) {
14911 		uint32_t	cnt;
14912 		uint32_t	*w32 =
14913 		    (uint32_t *)ha->rsp_queues[0]->rsp_ring.bp;
14914 
14915 		/* Sync DMA buffer. */
14916 		(void) ddi_dma_sync(ha->rsp_queues[0]->rsp_ring.dma_handle,
14917 		    0, sizeof (fw->rsp_q), DDI_DMA_SYNC_FORKERNEL);
14918 
14919 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14920 			fw->rsp_q[cnt] = *w32++;
14921 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14922 		}
14923 	}
14924 
14925 	/* Reset RISC. */
14926 	ql_reset_chip(ha);
14927 
14928 	/* Memory. */
14929 	if (rval == QL_SUCCESS) {
14930 		/* Code RAM. */
14931 		rval = ql_read_risc_ram(ha, 0x20000,
14932 		    sizeof (fw->code_ram) / 4, fw->code_ram);
14933 	}
14934 	if (rval == QL_SUCCESS) {
14935 		/* External Memory. */
14936 		rval = ql_read_risc_ram(ha, 0x100000,
14937 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14938 	}
14939 
14940 	/* Get the extended trace buffer */
14941 	if (rval == QL_SUCCESS) {
14942 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14943 		    (ha->fwexttracebuf.bp != NULL)) {
14944 			uint32_t	cnt;
14945 			uint32_t	*w32 = ha->fwexttracebuf.bp;
14946 
14947 			/* Sync DMA buffer. */
14948 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14949 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14950 
14951 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14952 				fw->ext_trace_buf[cnt] = *w32++;
14953 			}
14954 		}
14955 	}
14956 
14957 	/* Get the FC event trace buffer */
14958 	if (rval == QL_SUCCESS) {
14959 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14960 		    (ha->fwfcetracebuf.bp != NULL)) {
14961 			uint32_t	cnt;
14962 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14963 
14964 			/* Sync DMA buffer. */
14965 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14966 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14967 
14968 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14969 				fw->fce_trace_buf[cnt] = *w32++;
14970 			}
14971 		}
14972 	}
14973 
14974 	if (rval != QL_SUCCESS) {
14975 		EL(ha, "failed=%xh\n", rval);
14976 	} else {
14977 		/*EMPTY*/
14978 		QL_PRINT_3(ha, "done\n");
14979 	}
14980 
14981 	return (rval);
14982 }
14983 
14984 /*
14985  * ql_25xx_binary_fw_dump
14986  *
14987  * Input:
14988  *	ha:	adapter state pointer.
14989  *	fw:	firmware dump context pointer.
14990  *
14991  * Returns:
14992  *	ql local function return status code.
14993  *
14994  * Context:
14995  *	Interrupt or Kernel context, no mailbox commands allowed.
14996  */
14997 static int
ql_25xx_binary_fw_dump(ql_adapter_state_t * ha,ql_25xx_fw_dump_t * fw)14998 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
14999 {
15000 	uint32_t	*reg32,	cnt, *w32ptr, index, *dp;
15001 	void		*bp;
15002 	clock_t		timer;
15003 	int		rval = QL_SUCCESS;
15004 
15005 	QL_PRINT_3(ha, "started\n");
15006 
15007 	fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
15008 	if (ha->req_q[1] != NULL) {
15009 		fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
15010 	}
15011 	fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
15012 
15013 	fw->hccr = RD32_IO_REG(ha, hccr);
15014 	fw->r2h_status = RD32_IO_REG(ha, risc2host);
15015 	fw->aer_ues = ql_pci_config_get32(ha, 0x104);
15016 
15017 	/* Pause RISC. */
15018 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
15019 		/* Disable ISP interrupts. */
15020 		ql_disable_intr(ha);
15021 
15022 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
15023 		for (timer = 30000;
15024 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
15025 		    rval == QL_SUCCESS; timer--) {
15026 			if (timer) {
15027 				drv_usecwait(100);
15028 				if (timer % 10000 == 0) {
15029 					EL(ha, "risc pause %d\n", timer);
15030 				}
15031 			} else {
15032 				EL(ha, "risc pause timeout\n");
15033 				rval = QL_FUNCTION_TIMEOUT;
15034 			}
15035 		}
15036 	}
15037 
15038 	if (rval == QL_SUCCESS) {
15039 
15040 		/* Host Interface registers */
15041 
15042 		/* HostRisc registers. */
15043 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
15044 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
15045 		    16, 32);
15046 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
15047 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15048 
15049 		/* PCIe registers. */
15050 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
15051 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
15052 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
15053 		    3, 32);
15054 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
15055 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
15056 
15057 		/* Host interface registers. */
15058 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
15059 		    sizeof (fw->host_reg) / 4, 32);
15060 
15061 		/* Disable ISP interrupts. */
15062 		ql_disable_intr(ha);
15063 
15064 		/* Shadow registers. */
15065 
15066 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15067 		RD32_IO_REG(ha, io_base_addr);
15068 
15069 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15070 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
15071 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15072 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
15073 
15074 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15075 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
15076 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15077 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
15078 
15079 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15080 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
15081 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15082 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
15083 
15084 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15085 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
15086 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15087 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
15088 
15089 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15090 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
15091 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15092 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
15093 
15094 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15095 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
15096 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15097 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
15098 
15099 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15100 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
15101 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15102 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
15103 
15104 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15105 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
15106 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15107 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
15108 
15109 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15110 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
15111 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15112 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
15113 
15114 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15115 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
15116 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15117 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
15118 
15119 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15120 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
15121 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15122 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
15123 
15124 		/* RISC I/O register. */
15125 
15126 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
15127 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
15128 		    1, 32);
15129 
15130 		/* Mailbox registers. */
15131 
15132 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
15133 		    sizeof (fw->mailbox_reg) / 2, 16);
15134 
15135 		/* Transfer sequence registers. */
15136 
15137 		/* XSEQ GP */
15138 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
15139 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
15140 		    16, 32);
15141 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
15142 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15143 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
15144 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15145 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
15146 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15147 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
15148 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15149 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
15150 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15151 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
15152 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15153 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
15154 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15155 
15156 		/* XSEQ-0 */
15157 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
15158 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
15159 		    16, 32);
15160 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
15161 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15162 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
15163 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15164 
15165 		/* XSEQ-1 */
15166 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
15167 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
15168 		    16, 32);
15169 
15170 		/* Receive sequence registers. */
15171 
15172 		/* RSEQ GP */
15173 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
15174 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
15175 		    16, 32);
15176 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
15177 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15178 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
15179 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15180 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
15181 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15182 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
15183 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15184 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
15185 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15186 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
15187 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15188 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
15189 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15190 
15191 		/* RSEQ-0 */
15192 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
15193 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
15194 		    16, 32);
15195 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
15196 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15197 
15198 		/* RSEQ-1 */
15199 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
15200 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
15201 		    sizeof (fw->rseq_1_reg) / 4, 32);
15202 
15203 		/* RSEQ-2 */
15204 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
15205 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
15206 		    sizeof (fw->rseq_2_reg) / 4, 32);
15207 
15208 		/* Auxiliary sequencer registers. */
15209 
15210 		/* ASEQ GP */
15211 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
15212 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
15213 		    16, 32);
15214 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
15215 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15216 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
15217 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15218 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
15219 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15220 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
15221 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15222 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
15223 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15224 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
15225 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15226 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
15227 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15228 
15229 		/* ASEQ-0 */
15230 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
15231 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
15232 		    16, 32);
15233 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
15234 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15235 
15236 		/* ASEQ-1 */
15237 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
15238 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
15239 		    16, 32);
15240 
15241 		/* ASEQ-2 */
15242 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
15243 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
15244 		    16, 32);
15245 
15246 		/* Command DMA registers. */
15247 
15248 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
15249 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
15250 		    sizeof (fw->cmd_dma_reg) / 4, 32);
15251 
15252 		/* Queues. */
15253 
15254 		/* RequestQ0 */
15255 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
15256 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
15257 		    8, 32);
15258 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15259 
15260 		/* ResponseQ0 */
15261 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
15262 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
15263 		    8, 32);
15264 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15265 
15266 		/* RequestQ1 */
15267 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
15268 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
15269 		    8, 32);
15270 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15271 
15272 		/* Transmit DMA registers. */
15273 
15274 		/* XMT0 */
15275 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
15276 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
15277 		    16, 32);
15278 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
15279 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15280 
15281 		/* XMT1 */
15282 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
15283 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
15284 		    16, 32);
15285 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
15286 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15287 
15288 		/* XMT2 */
15289 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
15290 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
15291 		    16, 32);
15292 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
15293 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15294 
15295 		/* XMT3 */
15296 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
15297 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
15298 		    16, 32);
15299 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
15300 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15301 
15302 		/* XMT4 */
15303 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
15304 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
15305 		    16, 32);
15306 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
15307 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15308 
15309 		/* XMT Common */
15310 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
15311 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
15312 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
15313 
15314 		/* Receive DMA registers. */
15315 
15316 		/* RCVThread0 */
15317 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
15318 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
15319 		    ha->iobase + 0xC0, 16, 32);
15320 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
15321 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15322 
15323 		/* RCVThread1 */
15324 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
15325 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
15326 		    ha->iobase + 0xC0, 16, 32);
15327 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
15328 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15329 
15330 		/* RISC registers. */
15331 
15332 		/* RISC GP */
15333 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
15334 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
15335 		    16, 32);
15336 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
15337 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15338 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
15339 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15340 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
15341 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15342 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
15343 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15344 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
15345 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15346 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
15347 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15348 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15349 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15350 
15351 		/* Local memory controller (LMC) registers. */
15352 
15353 		/* LMC */
15354 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
15355 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
15356 		    16, 32);
15357 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
15358 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15359 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
15360 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15361 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
15362 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15363 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
15364 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15365 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
15366 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15367 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
15368 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15369 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
15370 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15371 
15372 		/* Fibre Protocol Module registers. */
15373 
15374 		/* FPM hardware */
15375 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
15376 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
15377 		    16, 32);
15378 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
15379 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15380 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
15381 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15382 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
15383 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15384 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
15385 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15386 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
15387 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15388 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
15389 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15390 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
15391 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15392 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
15393 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15394 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
15395 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15396 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
15397 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15398 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
15399 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15400 
15401 		/* Frame Buffer registers. */
15402 
15403 		/* FB hardware */
15404 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
15405 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
15406 		    16, 32);
15407 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
15408 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15409 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
15410 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15411 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
15412 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15413 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
15414 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15415 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
15416 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15417 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
15418 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15419 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
15420 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15421 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
15422 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15423 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
15424 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15425 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
15426 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15427 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
15428 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15429 	}
15430 
15431 	if (rval == QL_SUCCESS) {
15432 		/* Get the Queue Pointers */
15433 		dp = fw->req_rsp_ext_mem;
15434 		for (index = 0; index < ha->rsp_queues_cnt; index++) {
15435 			if (index == 0 && ha->flags & MULTI_QUEUE) {
15436 				*dp = RD32_MBAR_REG(ha,
15437 				    ha->req_q[0]->mbar_req_in);
15438 				LITTLE_ENDIAN_32(dp);
15439 				dp++;
15440 				*dp = RD32_MBAR_REG(ha,
15441 				    ha->req_q[0]->mbar_req_out);
15442 				LITTLE_ENDIAN_32(dp);
15443 				dp++;
15444 			} else if (index == 1 && ha->flags & MULTI_QUEUE) {
15445 				*dp = RD32_MBAR_REG(ha,
15446 				    ha->req_q[1]->mbar_req_in);
15447 				LITTLE_ENDIAN_32(dp);
15448 				dp++;
15449 				*dp = RD32_MBAR_REG(ha,
15450 				    ha->req_q[1]->mbar_req_out);
15451 				LITTLE_ENDIAN_32(dp);
15452 				dp++;
15453 			} else {
15454 				*dp++ = 0;
15455 				*dp++ = 0;
15456 			}
15457 			if (ha->flags & MULTI_QUEUE) {
15458 				*dp = RD32_MBAR_REG(ha,
15459 				    ha->rsp_queues[index]->mbar_rsp_in);
15460 				LITTLE_ENDIAN_32(dp);
15461 				dp++;
15462 				*dp = RD32_MBAR_REG(ha,
15463 				    ha->rsp_queues[index]->mbar_rsp_out);
15464 				LITTLE_ENDIAN_32(dp);
15465 				dp++;
15466 			} else {
15467 				*dp++ = 0;
15468 				*dp++ = 0;
15469 			}
15470 		}
15471 		/* Get the request queue */
15472 		(void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
15473 		    DDI_DMA_SYNC_FORCPU);
15474 		w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
15475 		for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
15476 			*dp = *w32ptr++;
15477 			LITTLE_ENDIAN_32(dp);
15478 			dp++;
15479 		}
15480 		if (ha->req_q[1] != NULL) {
15481 			(void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle,
15482 			    0, 0, DDI_DMA_SYNC_FORCPU);
15483 			w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
15484 			for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
15485 				*dp = *w32ptr++;
15486 				LITTLE_ENDIAN_32(dp);
15487 				dp++;
15488 			}
15489 		}
15490 
15491 		/* Get the response queues */
15492 		for (index = 0; index < ha->rsp_queues_cnt; index++) {
15493 			(void) ddi_dma_sync(
15494 			    ha->rsp_queues[index]->rsp_ring.dma_handle,
15495 			    0, 0, DDI_DMA_SYNC_FORCPU);
15496 			w32ptr = (uint32_t *)
15497 			    ha->rsp_queues[index]->rsp_ring.bp;
15498 			for (cnt = 0;
15499 			    cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
15500 			    cnt++) {
15501 				*dp = *w32ptr++;
15502 				LITTLE_ENDIAN_32(dp);
15503 				dp++;
15504 			}
15505 		}
15506 	}
15507 
15508 	/* Reset RISC. */
15509 	ql_reset_chip(ha);
15510 
15511 	/* Memory. */
15512 	if (rval == QL_SUCCESS) {
15513 		/* Code RAM. */
15514 		rval = ql_read_risc_ram(ha, 0x20000,
15515 		    sizeof (fw->code_ram) / 4, fw->code_ram);
15516 	}
15517 	if (rval == QL_SUCCESS) {
15518 		/* External Memory. */
15519 		rval = ql_read_risc_ram(ha, 0x100000,
15520 		    ha->fw_ext_memory_size / 4, dp);
15521 	}
15522 
15523 	/* Get the FC event trace buffer */
15524 	if (rval == QL_SUCCESS) {
15525 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
15526 		    (ha->fwfcetracebuf.bp != NULL)) {
15527 			uint32_t	cnt;
15528 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
15529 
15530 			/* Sync DMA buffer. */
15531 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15532 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15533 
15534 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15535 				fw->fce_trace_buf[cnt] = *w32++;
15536 			}
15537 		}
15538 	}
15539 
15540 	/* Get the extended trace buffer */
15541 	if (rval == QL_SUCCESS) {
15542 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15543 		    (ha->fwexttracebuf.bp != NULL)) {
15544 			uint32_t	cnt;
15545 			uint32_t	*w32 = ha->fwexttracebuf.bp;
15546 
15547 			/* Sync DMA buffer. */
15548 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15549 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15550 
15551 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15552 				fw->ext_trace_buf[cnt] = *w32++;
15553 			}
15554 		}
15555 	}
15556 
15557 	if (rval != QL_SUCCESS) {
15558 		EL(ha, "failed=%xh\n", rval);
15559 	} else {
15560 		/*EMPTY*/
15561 		QL_PRINT_3(ha, "done\n");
15562 	}
15563 
15564 	return (rval);
15565 }
15566 
15567 /*
15568  * ql_81xx_binary_fw_dump
15569  *
15570  * Input:
15571  *	ha:	adapter state pointer.
15572  *	fw:	firmware dump context pointer.
15573  *
15574  * Returns:
15575  *	ql local function return status code.
15576  *
15577  * Context:
15578  *	Interrupt or Kernel context, no mailbox commands allowed.
15579  */
15580 static int
ql_81xx_binary_fw_dump(ql_adapter_state_t * ha,ql_81xx_fw_dump_t * fw)15581 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
15582 {
15583 	uint32_t	*reg32, cnt, *w32ptr, index, *dp;
15584 	void		*bp;
15585 	clock_t		timer;
15586 	int		rval = QL_SUCCESS;
15587 
15588 	QL_PRINT_3(ha, "started\n");
15589 
15590 	fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
15591 	if (ha->req_q[1] != NULL) {
15592 		fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
15593 	}
15594 	fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
15595 
15596 	fw->hccr = RD32_IO_REG(ha, hccr);
15597 	fw->r2h_status = RD32_IO_REG(ha, risc2host);
15598 	fw->aer_ues = ql_pci_config_get32(ha, 0x104);
15599 
15600 	/* Pause RISC. */
15601 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
15602 		/* Disable ISP interrupts. */
15603 		ql_disable_intr(ha);
15604 
15605 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
15606 		for (timer = 30000;
15607 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
15608 		    rval == QL_SUCCESS; timer--) {
15609 			if (timer) {
15610 				drv_usecwait(100);
15611 				if (timer % 10000 == 0) {
15612 					EL(ha, "risc pause %d\n", timer);
15613 				}
15614 			} else {
15615 				EL(ha, "risc pause timeout\n");
15616 				rval = QL_FUNCTION_TIMEOUT;
15617 			}
15618 		}
15619 	}
15620 
15621 	if (rval == QL_SUCCESS) {
15622 
15623 		/* Host Interface registers */
15624 
15625 		/* HostRisc registers. */
15626 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
15627 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
15628 		    16, 32);
15629 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
15630 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15631 
15632 		/* PCIe registers. */
15633 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
15634 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
15635 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
15636 		    3, 32);
15637 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
15638 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
15639 
15640 		/* Host interface registers. */
15641 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
15642 		    sizeof (fw->host_reg) / 4, 32);
15643 
15644 		/* Disable ISP interrupts. */
15645 		ql_disable_intr(ha);
15646 
15647 		/* Shadow registers. */
15648 
15649 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15650 		RD32_IO_REG(ha, io_base_addr);
15651 
15652 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15653 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
15654 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15655 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
15656 
15657 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15658 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
15659 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15660 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
15661 
15662 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15663 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
15664 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15665 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
15666 
15667 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15668 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
15669 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15670 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
15671 
15672 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15673 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
15674 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15675 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
15676 
15677 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15678 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
15679 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15680 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
15681 
15682 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15683 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
15684 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15685 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
15686 
15687 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15688 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
15689 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15690 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
15691 
15692 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15693 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
15694 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15695 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
15696 
15697 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15698 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
15699 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15700 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
15701 
15702 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
15703 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
15704 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
15705 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
15706 
15707 		/* RISC I/O register. */
15708 
15709 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
15710 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
15711 		    1, 32);
15712 
15713 		/* Mailbox registers. */
15714 
15715 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
15716 		    sizeof (fw->mailbox_reg) / 2, 16);
15717 
15718 		/* Transfer sequence registers. */
15719 
15720 		/* XSEQ GP */
15721 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
15722 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
15723 		    16, 32);
15724 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
15725 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15726 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
15727 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15728 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
15729 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15730 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
15731 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15732 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
15733 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15734 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
15735 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15736 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
15737 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15738 
15739 		/* XSEQ-0 */
15740 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
15741 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
15742 		    16, 32);
15743 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
15744 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15745 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
15746 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15747 
15748 		/* XSEQ-1 */
15749 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
15750 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
15751 		    16, 32);
15752 
15753 		/* Receive sequence registers. */
15754 
15755 		/* RSEQ GP */
15756 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
15757 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
15758 		    16, 32);
15759 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
15760 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15761 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
15762 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15763 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
15764 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15765 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
15766 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15767 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
15768 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15769 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
15770 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15771 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
15772 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15773 
15774 		/* RSEQ-0 */
15775 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
15776 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
15777 		    16, 32);
15778 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
15779 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15780 
15781 		/* RSEQ-1 */
15782 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
15783 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
15784 		    sizeof (fw->rseq_1_reg) / 4, 32);
15785 
15786 		/* RSEQ-2 */
15787 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
15788 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
15789 		    sizeof (fw->rseq_2_reg) / 4, 32);
15790 
15791 		/* Auxiliary sequencer registers. */
15792 
15793 		/* ASEQ GP */
15794 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
15795 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
15796 		    16, 32);
15797 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
15798 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15799 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
15800 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15801 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
15802 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15803 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
15804 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15805 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
15806 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15807 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
15808 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15809 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
15810 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15811 
15812 		/* ASEQ-0 */
15813 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
15814 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
15815 		    16, 32);
15816 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
15817 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15818 
15819 		/* ASEQ-1 */
15820 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
15821 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
15822 		    16, 32);
15823 
15824 		/* ASEQ-2 */
15825 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
15826 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
15827 		    16, 32);
15828 
15829 		/* Command DMA registers. */
15830 
15831 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
15832 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
15833 		    sizeof (fw->cmd_dma_reg) / 4, 32);
15834 
15835 		/* Queues. */
15836 
15837 		/* RequestQ0 */
15838 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
15839 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
15840 		    8, 32);
15841 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15842 
15843 		/* ResponseQ0 */
15844 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
15845 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
15846 		    8, 32);
15847 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15848 
15849 		/* RequestQ1 */
15850 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
15851 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
15852 		    8, 32);
15853 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
15854 
15855 		/* Transmit DMA registers. */
15856 
15857 		/* XMT0 */
15858 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
15859 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
15860 		    16, 32);
15861 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
15862 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15863 
15864 		/* XMT1 */
15865 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
15866 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
15867 		    16, 32);
15868 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
15869 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15870 
15871 		/* XMT2 */
15872 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
15873 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
15874 		    16, 32);
15875 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
15876 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15877 
15878 		/* XMT3 */
15879 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
15880 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
15881 		    16, 32);
15882 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
15883 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15884 
15885 		/* XMT4 */
15886 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
15887 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
15888 		    16, 32);
15889 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
15890 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15891 
15892 		/* XMT Common */
15893 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
15894 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
15895 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
15896 
15897 		/* Receive DMA registers. */
15898 
15899 		/* RCVThread0 */
15900 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
15901 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
15902 		    ha->iobase + 0xC0, 16, 32);
15903 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
15904 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15905 
15906 		/* RCVThread1 */
15907 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
15908 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
15909 		    ha->iobase + 0xC0, 16, 32);
15910 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
15911 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15912 
15913 		/* RISC registers. */
15914 
15915 		/* RISC GP */
15916 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
15917 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
15918 		    16, 32);
15919 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
15920 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15921 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
15922 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15923 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
15924 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15925 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
15926 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15927 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
15928 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15929 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
15930 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15931 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
15932 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15933 
15934 		/* Local memory controller (LMC) registers. */
15935 
15936 		/* LMC */
15937 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
15938 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
15939 		    16, 32);
15940 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
15941 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15942 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
15943 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15944 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
15945 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15946 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
15947 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15948 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
15949 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15950 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
15951 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15952 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
15953 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15954 
15955 		/* Fibre Protocol Module registers. */
15956 
15957 		/* FPM hardware */
15958 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
15959 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
15960 		    16, 32);
15961 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
15962 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15963 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
15964 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15965 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
15966 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15967 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
15968 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15969 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
15970 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15971 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
15972 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15973 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
15974 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15975 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
15976 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15977 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
15978 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15979 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
15980 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15981 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
15982 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15983 		WRT32_IO_REG(ha, io_base_addr, 0x40C0);
15984 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15985 		WRT32_IO_REG(ha, io_base_addr, 0x40D0);
15986 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15987 
15988 		/* Frame Buffer registers. */
15989 
15990 		/* FB hardware */
15991 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
15992 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
15993 		    16, 32);
15994 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
15995 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15996 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
15997 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
15998 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
15999 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16000 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
16001 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16002 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
16003 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16004 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
16005 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16006 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
16007 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16008 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
16009 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16010 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
16011 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16012 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
16013 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16014 		WRT32_IO_REG(ha, io_base_addr, 0x61C0);
16015 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16016 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
16017 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
16018 	}
16019 
16020 	if (rval == QL_SUCCESS) {
16021 		/* Get the Queue Pointers */
16022 		dp = fw->req_rsp_ext_mem;
16023 		for (index = 0; index < ha->rsp_queues_cnt; index++) {
16024 			if (index == 0 && ha->flags & MULTI_QUEUE) {
16025 				*dp = RD32_MBAR_REG(ha,
16026 				    ha->req_q[0]->mbar_req_in);
16027 				LITTLE_ENDIAN_32(dp);
16028 				dp++;
16029 				*dp = RD32_MBAR_REG(ha,
16030 				    ha->req_q[0]->mbar_req_out);
16031 				LITTLE_ENDIAN_32(dp);
16032 				dp++;
16033 			} else if (index == 1 && ha->flags & MULTI_QUEUE) {
16034 				*dp = RD32_MBAR_REG(ha,
16035 				    ha->req_q[1]->mbar_req_in);
16036 				LITTLE_ENDIAN_32(dp);
16037 				dp++;
16038 				*dp = RD32_MBAR_REG(ha,
16039 				    ha->req_q[1]->mbar_req_out);
16040 				LITTLE_ENDIAN_32(dp);
16041 				dp++;
16042 			} else {
16043 				*dp++ = 0;
16044 				*dp++ = 0;
16045 			}
16046 			if (ha->flags & MULTI_QUEUE) {
16047 				*dp = RD32_MBAR_REG(ha,
16048 				    ha->rsp_queues[index]->mbar_rsp_in);
16049 				LITTLE_ENDIAN_32(dp);
16050 				dp++;
16051 				*dp = RD32_MBAR_REG(ha,
16052 				    ha->rsp_queues[index]->mbar_rsp_out);
16053 				LITTLE_ENDIAN_32(dp);
16054 				dp++;
16055 			} else {
16056 				*dp++ = 0;
16057 				*dp++ = 0;
16058 			}
16059 		}
16060 		/* Get the request queue */
16061 		(void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
16062 		    DDI_DMA_SYNC_FORCPU);
16063 		w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
16064 		for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
16065 			*dp = *w32ptr++;
16066 			LITTLE_ENDIAN_32(dp);
16067 			dp++;
16068 		}
16069 		if (ha->req_q[1] != NULL) {
16070 			(void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle,
16071 			    0, 0, DDI_DMA_SYNC_FORCPU);
16072 			w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
16073 			for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
16074 				*dp = *w32ptr++;
16075 				LITTLE_ENDIAN_32(dp);
16076 				dp++;
16077 			}
16078 		}
16079 
16080 		/* Get the response queues */
16081 		for (index = 0; index < ha->rsp_queues_cnt; index++) {
16082 			(void) ddi_dma_sync(
16083 			    ha->rsp_queues[index]->rsp_ring.dma_handle,
16084 			    0, 0, DDI_DMA_SYNC_FORCPU);
16085 			w32ptr = (uint32_t *)
16086 			    ha->rsp_queues[index]->rsp_ring.bp;
16087 			for (cnt = 0;
16088 			    cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
16089 			    cnt++) {
16090 				*dp = *w32ptr++;
16091 				LITTLE_ENDIAN_32(dp);
16092 				dp++;
16093 			}
16094 		}
16095 	}
16096 
16097 	/* Reset RISC. */
16098 	ql_reset_chip(ha);
16099 
16100 	/* Memory. */
16101 	if (rval == QL_SUCCESS) {
16102 		/* Code RAM. */
16103 		rval = ql_read_risc_ram(ha, 0x20000,
16104 		    sizeof (fw->code_ram) / 4, fw->code_ram);
16105 	}
16106 	if (rval == QL_SUCCESS) {
16107 		/* External Memory. */
16108 		rval = ql_read_risc_ram(ha, 0x100000,
16109 		    ha->fw_ext_memory_size / 4, dp);
16110 	}
16111 
16112 	/* Get the FC event trace buffer */
16113 	if (rval == QL_SUCCESS) {
16114 		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
16115 		    (ha->fwfcetracebuf.bp != NULL)) {
16116 			uint32_t	cnt;
16117 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
16118 
16119 			/* Sync DMA buffer. */
16120 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
16121 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
16122 
16123 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
16124 				fw->fce_trace_buf[cnt] = *w32++;
16125 			}
16126 		}
16127 	}
16128 
16129 	/* Get the extended trace buffer */
16130 	if (rval == QL_SUCCESS) {
16131 		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
16132 		    (ha->fwexttracebuf.bp != NULL)) {
16133 			uint32_t	cnt;
16134 			uint32_t	*w32 = ha->fwexttracebuf.bp;
16135 
16136 			/* Sync DMA buffer. */
16137 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
16138 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
16139 
16140 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
16141 				fw->ext_trace_buf[cnt] = *w32++;
16142 			}
16143 		}
16144 	}
16145 
16146 	if (rval != QL_SUCCESS) {
16147 		EL(ha, "failed=%xh\n", rval);
16148 	} else {
16149 		/*EMPTY*/
16150 		QL_PRINT_3(ha, "done\n");
16151 	}
16152 
16153 	return (rval);
16154 }
16155 
16156 /*
16157  * ql_read_risc_ram
16158  *	Reads RISC RAM one word at a time.
16159  *	Risc interrupts must be disabled when this routine is called.
16160  *
16161  * Input:
16162  *	ha:	adapter state pointer.
16163  *	risc_address:	RISC code start address.
16164  *	len:		Number of words.
16165  *	buf:		buffer pointer.
16166  *
16167  * Returns:
16168  *	ql local function return status code.
16169  *
16170  * Context:
16171  *	Interrupt or Kernel context, no mailbox commands allowed.
16172  */
16173 static int
ql_read_risc_ram(ql_adapter_state_t * ha,uint32_t risc_address,uint32_t len,void * buf)16174 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
16175     void *buf)
16176 {
16177 	uint32_t	cnt;
16178 	uint16_t	stat;
16179 	clock_t		timer;
16180 	uint16_t	*buf16 = (uint16_t *)buf;
16181 	uint32_t	*buf32 = (uint32_t *)buf;
16182 	int		rval = QL_SUCCESS;
16183 
16184 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
16185 		WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
16186 		WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
16187 		WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
16188 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
16189 			WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
16190 		} else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16191 			WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
16192 		} else {
16193 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
16194 		}
16195 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
16196 			if (INTERRUPT_PENDING(ha)) {
16197 				stat = (uint16_t)
16198 				    (RD16_IO_REG(ha, risc2host) & 0xff);
16199 				if ((stat == 1) || (stat == 0x10)) {
16200 					if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16201 						buf32[cnt] = SHORT_TO_LONG(
16202 						    RD16_IO_REG(ha,
16203 						    mailbox_out[2]),
16204 						    RD16_IO_REG(ha,
16205 						    mailbox_out[3]));
16206 					} else {
16207 						buf16[cnt] =
16208 						    RD16_IO_REG(ha,
16209 						    mailbox_out[2]);
16210 					}
16211 
16212 					break;
16213 				} else if ((stat == 2) || (stat == 0x11)) {
16214 					rval = RD16_IO_REG(ha, mailbox_out[0]);
16215 					break;
16216 				}
16217 				if (CFG_IST(ha, CFG_CTRL_82XX)) {
16218 					ql_8021_clr_hw_intr(ha);
16219 					ql_8021_clr_fw_intr(ha);
16220 				} else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16221 					WRT32_IO_REG(ha, hccr,
16222 					    HC24_CLR_RISC_INT);
16223 					RD32_IO_REG(ha, hccr);
16224 				} else {
16225 					WRT16_IO_REG(ha, semaphore, 0);
16226 					WRT16_IO_REG(ha, hccr,
16227 					    HC_CLR_RISC_INT);
16228 					RD16_IO_REG(ha, hccr);
16229 				}
16230 			}
16231 			drv_usecwait(5);
16232 		}
16233 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
16234 			ql_8021_clr_hw_intr(ha);
16235 			ql_8021_clr_fw_intr(ha);
16236 		} else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
16237 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
16238 			RD32_IO_REG(ha, hccr);
16239 		} else {
16240 			WRT16_IO_REG(ha, semaphore, 0);
16241 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
16242 			RD16_IO_REG(ha, hccr);
16243 		}
16244 
16245 		if (timer == 0) {
16246 			rval = QL_FUNCTION_TIMEOUT;
16247 		}
16248 	}
16249 
16250 	return (rval);
16251 }
16252 
16253 /*
16254  * ql_read_regs
16255  *	Reads adapter registers to buffer.
16256  *
16257  * Input:
16258  *	ha:	adapter state pointer.
16259  *	buf:	buffer pointer.
16260  *	reg:	start address.
16261  *	count:	number of registers.
16262  *	wds:	register size.
16263  *
16264  * Context:
16265  *	Interrupt or Kernel context, no mailbox commands allowed.
16266  */
16267 static void *
ql_read_regs(ql_adapter_state_t * ha,void * buf,void * reg,uint32_t count,uint8_t wds)16268 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
16269     uint8_t wds)
16270 {
16271 	uint32_t	*bp32, *reg32;
16272 	uint16_t	*bp16, *reg16;
16273 	uint8_t		*bp8, *reg8;
16274 
16275 	switch (wds) {
16276 	case 32:
16277 		bp32 = buf;
16278 		reg32 = reg;
16279 		while (count--) {
16280 			*bp32++ = RD_REG_DWORD(ha, reg32++);
16281 		}
16282 		return (bp32);
16283 	case 16:
16284 		bp16 = buf;
16285 		reg16 = reg;
16286 		while (count--) {
16287 			*bp16++ = RD_REG_WORD(ha, reg16++);
16288 		}
16289 		return (bp16);
16290 	case 8:
16291 		bp8 = buf;
16292 		reg8 = reg;
16293 		while (count--) {
16294 			*bp8++ = RD_REG_BYTE(ha, reg8++);
16295 		}
16296 		return (bp8);
16297 	default:
16298 		EL(ha, "Unknown word size=%d\n", wds);
16299 		return (buf);
16300 	}
16301 }
16302 
16303 static int
ql_save_config_regs(dev_info_t * dip)16304 ql_save_config_regs(dev_info_t *dip)
16305 {
16306 	ql_adapter_state_t	*ha;
16307 	int			ret;
16308 	ql_config_space_t	chs;
16309 	caddr_t			prop = "ql-config-space";
16310 
16311 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
16312 	if (ha == NULL) {
16313 		QL_PRINT_2(NULL, "no adapter instance=%d\n",
16314 		    ddi_get_instance(dip));
16315 		return (DDI_FAILURE);
16316 	}
16317 
16318 	QL_PRINT_3(ha, "started\n");
16319 
16320 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
16321 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
16322 	    1) {
16323 		QL_PRINT_2(ha, "no prop exit\n");
16324 		return (DDI_SUCCESS);
16325 	}
16326 
16327 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
16328 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
16329 	    PCI_CONF_HEADER);
16330 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16331 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
16332 		    PCI_BCNF_BCNTRL);
16333 	}
16334 
16335 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
16336 	    PCI_CONF_CACHE_LINESZ);
16337 
16338 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
16339 	    PCI_CONF_LATENCY_TIMER);
16340 
16341 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16342 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
16343 		    PCI_BCNF_LATENCY_TIMER);
16344 	}
16345 
16346 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
16347 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
16348 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
16349 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
16350 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
16351 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
16352 
16353 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
16354 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
16355 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
16356 
16357 	if (ret != DDI_PROP_SUCCESS) {
16358 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
16359 		    QL_NAME, ddi_get_instance(dip), prop);
16360 		return (DDI_FAILURE);
16361 	}
16362 
16363 	QL_PRINT_3(ha, "done\n");
16364 
16365 	return (DDI_SUCCESS);
16366 }
16367 
16368 static int
ql_restore_config_regs(dev_info_t * dip)16369 ql_restore_config_regs(dev_info_t *dip)
16370 {
16371 	ql_adapter_state_t	*ha;
16372 	uint_t			elements;
16373 	ql_config_space_t	*chs_p;
16374 	caddr_t			prop = "ql-config-space";
16375 
16376 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
16377 	if (ha == NULL) {
16378 		QL_PRINT_2(NULL, "no adapter instance=%d\n",
16379 		    ddi_get_instance(dip));
16380 		return (DDI_FAILURE);
16381 	}
16382 
16383 	QL_PRINT_3(ha, "started\n");
16384 
16385 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
16386 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
16387 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
16388 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
16389 		QL_PRINT_2(ha, "no prop exit\n");
16390 		return (DDI_FAILURE);
16391 	}
16392 
16393 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
16394 
16395 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16396 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
16397 		    chs_p->chs_bridge_control);
16398 	}
16399 
16400 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
16401 	    chs_p->chs_cache_line_size);
16402 
16403 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
16404 	    chs_p->chs_latency_timer);
16405 
16406 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
16407 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
16408 		    chs_p->chs_sec_latency_timer);
16409 	}
16410 
16411 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
16412 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
16413 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
16414 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
16415 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
16416 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
16417 
16418 	ddi_prop_free(chs_p);
16419 
16420 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
16421 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
16422 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
16423 		    QL_NAME, ddi_get_instance(dip), prop);
16424 	}
16425 
16426 	QL_PRINT_3(ha, "done\n");
16427 
16428 	return (DDI_SUCCESS);
16429 }
16430 
16431 uint8_t
ql_pci_config_get8(ql_adapter_state_t * ha,off_t off)16432 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
16433 {
16434 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
16435 		return (ddi_get8(ha->sbus_config_handle,
16436 		    (uint8_t *)(ha->sbus_config_base + off)));
16437 	}
16438 
16439 	return (pci_config_get8(ha->pci_handle, off));
16440 }
16441 
16442 uint16_t
ql_pci_config_get16(ql_adapter_state_t * ha,off_t off)16443 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
16444 {
16445 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
16446 		return (ddi_get16(ha->sbus_config_handle,
16447 		    (uint16_t *)(ha->sbus_config_base + off)));
16448 	}
16449 
16450 	return (pci_config_get16(ha->pci_handle, off));
16451 }
16452 
16453 uint32_t
ql_pci_config_get32(ql_adapter_state_t * ha,off_t off)16454 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
16455 {
16456 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
16457 		return (ddi_get32(ha->sbus_config_handle,
16458 		    (uint32_t *)(ha->sbus_config_base + off)));
16459 	}
16460 
16461 	return (pci_config_get32(ha->pci_handle, off));
16462 }
16463 
16464 void
ql_pci_config_put8(ql_adapter_state_t * ha,off_t off,uint8_t val)16465 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
16466 {
16467 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
16468 		ddi_put8(ha->sbus_config_handle,
16469 		    (uint8_t *)(ha->sbus_config_base + off), val);
16470 	} else {
16471 		pci_config_put8(ha->pci_handle, off, val);
16472 	}
16473 }
16474 
16475 void
ql_pci_config_put16(ql_adapter_state_t * ha,off_t off,uint16_t val)16476 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
16477 {
16478 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
16479 		ddi_put16(ha->sbus_config_handle,
16480 		    (uint16_t *)(ha->sbus_config_base + off), val);
16481 	} else {
16482 		pci_config_put16(ha->pci_handle, off, val);
16483 	}
16484 }
16485 
16486 void
ql_pci_config_put32(ql_adapter_state_t * ha,off_t off,uint32_t val)16487 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
16488 {
16489 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
16490 		ddi_put32(ha->sbus_config_handle,
16491 		    (uint32_t *)(ha->sbus_config_base + off), val);
16492 	} else {
16493 		pci_config_put32(ha->pci_handle, off, val);
16494 	}
16495 }
16496 
16497 /*
16498  * ql_halt
16499  *	Waits for commands that are running to finish and
16500  *	if they do not, commands are aborted.
16501  *	Finally the adapter is reset.
16502  *
16503  * Input:
16504  *	ha:	adapter state pointer.
16505  *	pwr:	power state.
16506  *
16507  * Context:
16508  *	Kernel context.
16509  */
16510 static void
ql_halt(ql_adapter_state_t * ha,int pwr)16511 ql_halt(ql_adapter_state_t *ha, int pwr)
16512 {
16513 	ql_link_t	*link;
16514 	ql_response_q_t	*rsp_q;
16515 	ql_tgt_t	*tq;
16516 	ql_srb_t	*sp;
16517 	uint32_t	cnt, i;
16518 	uint16_t	index;
16519 
16520 	QL_PRINT_3(ha, "started\n");
16521 
16522 	/* Wait for all commands running to finish. */
16523 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
16524 		for (link = ha->dev[index].first; link != NULL;
16525 		    link = link->next) {
16526 			tq = link->base_address;
16527 			(void) ql_abort_device(ha, tq, 0);
16528 
16529 			/* Wait for 30 seconds for commands to finish. */
16530 			for (cnt = 3000; cnt != 0; cnt--) {
16531 				/* Acquire device queue lock. */
16532 				DEVICE_QUEUE_LOCK(tq);
16533 				if (tq->outcnt == 0) {
16534 					/* Release device queue lock. */
16535 					DEVICE_QUEUE_UNLOCK(tq);
16536 					break;
16537 				} else {
16538 					/* Release device queue lock. */
16539 					DEVICE_QUEUE_UNLOCK(tq);
16540 					ql_delay(ha, 10000);
16541 				}
16542 			}
16543 
16544 			/* Finish any commands waiting for more status. */
16545 			for (i = 0; i < ha->rsp_queues_cnt; i++) {
16546 				if ((rsp_q = ha->rsp_queues[i]) != NULL &&
16547 				    (sp = rsp_q->status_srb) != NULL) {
16548 					rsp_q->status_srb = NULL;
16549 					sp->cmd.next = NULL;
16550 					ql_done(&sp->cmd, B_FALSE);
16551 				}
16552 			}
16553 
16554 			/* Abort commands that did not finish. */
16555 			if (cnt == 0) {
16556 				for (cnt = 1; cnt < ha->osc_max_cnt;
16557 				    cnt++) {
16558 					if (ha->pending_cmds.first != NULL) {
16559 						ql_start_iocb(ha, NULL);
16560 						cnt = 1;
16561 					}
16562 					sp = ha->outstanding_cmds[cnt];
16563 					if (sp != NULL &&
16564 					    sp != QL_ABORTED_SRB(ha) &&
16565 					    sp->lun_queue->target_queue ==
16566 					    tq) {
16567 						(void) ql_abort_io(ha, sp);
16568 						sp->pkt->pkt_reason =
16569 						    CS_ABORTED;
16570 						sp->cmd.next = NULL;
16571 						ql_done(&sp->cmd, B_FALSE);
16572 					}
16573 				}
16574 			}
16575 		}
16576 	}
16577 
16578 	/* Shutdown IP. */
16579 	if (ha->flags & IP_INITIALIZED) {
16580 		(void) ql_shutdown_ip(ha);
16581 	}
16582 
16583 	/* Stop all timers. */
16584 	ADAPTER_STATE_LOCK(ha);
16585 	ha->port_retry_timer = 0;
16586 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
16587 	ha->watchdog_timer = 0;
16588 	ADAPTER_STATE_UNLOCK(ha);
16589 
16590 	if (pwr == PM_LEVEL_D3 && ha->flags & ONLINE) {
16591 		ADAPTER_STATE_LOCK(ha);
16592 		ha->flags &= ~ONLINE;
16593 		ADAPTER_STATE_UNLOCK(ha);
16594 
16595 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
16596 			ql_8021_clr_drv_active(ha);
16597 		}
16598 
16599 		/* Reset ISP chip. */
16600 		ql_reset_chip(ha);
16601 	}
16602 
16603 	QL_PRINT_3(ha, "done\n");
16604 }
16605 
16606 /*
16607  * ql_get_dma_mem
16608  *	Function used to allocate dma memory.
16609  *
16610  * Input:
16611  *	ha:			adapter state pointer.
16612  *	mem:			pointer to dma memory object.
16613  *	size:			size of the request in bytes
16614  *
16615  * Returns:
16616  *	qn local function return status code.
16617  *
16618  * Context:
16619  *	Kernel context.
16620  */
16621 int
ql_get_dma_mem(ql_adapter_state_t * ha,dma_mem_t * mem,uint32_t size,mem_alloc_type_t allocation_type,mem_alignment_t alignment)16622 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
16623     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
16624 {
16625 	int	rval;
16626 
16627 	QL_PRINT_3(ha, "started\n");
16628 
16629 	mem->size = size;
16630 	mem->type = allocation_type;
16631 	mem->max_cookie_count = 1;
16632 
16633 	switch (alignment) {
16634 	case QL_DMA_DATA_ALIGN:
16635 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
16636 		break;
16637 	case QL_DMA_RING_ALIGN:
16638 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
16639 		break;
16640 	default:
16641 		EL(ha, "failed, unknown alignment type %x\n", alignment);
16642 		break;
16643 	}
16644 
16645 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
16646 		ql_free_phys(ha, mem);
16647 		EL(ha, "failed, alloc_phys=%xh\n", rval);
16648 	}
16649 
16650 	QL_PRINT_3(ha, "done\n");
16651 
16652 	return (rval);
16653 }
16654 
16655 /*
16656  * ql_free_dma_resource
16657  *	Function used to free dma memory.
16658  *
16659  * Input:
16660  *	ha:		adapter state pointer.
16661  *	mem:		pointer to dma memory object.
16662  *	mem->dma_handle	DMA memory handle.
16663  *
16664  * Context:
16665  *	Kernel context.
16666  */
16667 void
ql_free_dma_resource(ql_adapter_state_t * ha,dma_mem_t * mem)16668 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
16669 {
16670 	QL_PRINT_3(ha, "started\n");
16671 
16672 	ql_free_phys(ha, mem);
16673 
16674 	QL_PRINT_3(ha, "done\n");
16675 }
16676 
16677 /*
16678  * ql_alloc_phys
16679  *	Function used to allocate memory and zero it.
16680  *	Memory is below 4 GB.
16681  *
16682  * Input:
16683  *	ha:			adapter state pointer.
16684  *	mem:			pointer to dma memory object.
16685  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
16686  *	mem->cookie_count	number of segments allowed.
16687  *	mem->type		memory allocation type.
16688  *	mem->size		memory size.
16689  *	mem->alignment		memory alignment.
16690  *
16691  * Returns:
16692  *	ql local function return status code.
16693  *
16694  * Context:
16695  *	Kernel context.
16696  */
16697 int
ql_alloc_phys(ql_adapter_state_t * ha,dma_mem_t * mem,int sleep)16698 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
16699 {
16700 	size_t			rlen;
16701 	ddi_dma_attr_t		dma_attr = ha->io_dma_attr;
16702 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
16703 
16704 	QL_PRINT_3(ha, "started\n");
16705 
16706 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
16707 	dma_attr.dma_attr_sgllen = (int)mem->max_cookie_count;
16708 
16709 	/*
16710 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
16711 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
16712 	 * to make sure buffer has enough room for overrun.
16713 	 */
16714 	if (mem->size & 7) {
16715 		mem->size += 8 - (mem->size & 7);
16716 	}
16717 
16718 	mem->flags = DDI_DMA_CONSISTENT;
16719 
16720 	/*
16721 	 * Allocate DMA memory for command.
16722 	 */
16723 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
16724 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
16725 	    DDI_SUCCESS) {
16726 		EL(ha, "failed, ddi_dma_alloc_handle\n");
16727 		mem->dma_handle = NULL;
16728 		return (QL_MEMORY_ALLOC_FAILED);
16729 	}
16730 
16731 	switch (mem->type) {
16732 	case KERNEL_MEM:
16733 		mem->bp = kmem_zalloc(mem->size, sleep);
16734 		break;
16735 	case BIG_ENDIAN_DMA:
16736 	case LITTLE_ENDIAN_DMA:
16737 	case NO_SWAP_DMA:
16738 		if (mem->type == BIG_ENDIAN_DMA) {
16739 			acc_attr.devacc_attr_endian_flags =
16740 			    DDI_STRUCTURE_BE_ACC;
16741 		} else if (mem->type == NO_SWAP_DMA) {
16742 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
16743 		}
16744 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
16745 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
16746 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
16747 		    &mem->acc_handle) == DDI_SUCCESS) {
16748 			bzero(mem->bp, mem->size);
16749 			/* ensure we got what we asked for (32bit) */
16750 			if (dma_attr.dma_attr_addr_hi == 0) {
16751 				if (mem->cookie.dmac_notused != 0) {
16752 					EL(ha, "failed, ddi_dma_mem_alloc "
16753 					    "returned 64 bit DMA address\n");
16754 					ql_free_phys(ha, mem);
16755 					return (QL_MEMORY_ALLOC_FAILED);
16756 				}
16757 			}
16758 		} else {
16759 			mem->acc_handle = NULL;
16760 			mem->bp = NULL;
16761 		}
16762 		break;
16763 	default:
16764 		EL(ha, "failed, unknown type=%xh\n", mem->type);
16765 		mem->acc_handle = NULL;
16766 		mem->bp = NULL;
16767 		break;
16768 	}
16769 
16770 	if (mem->bp == NULL) {
16771 		EL(ha, "failed, ddi_dma_mem_alloc\n");
16772 		ddi_dma_free_handle(&mem->dma_handle);
16773 		mem->dma_handle = NULL;
16774 		return (QL_MEMORY_ALLOC_FAILED);
16775 	}
16776 
16777 	mem->flags |= DDI_DMA_RDWR;
16778 
16779 	if (qlc_fm_check_dma_handle(ha, mem->dma_handle)
16780 	    != DDI_FM_OK) {
16781 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
16782 		ql_free_phys(ha, mem);
16783 		qlc_fm_report_err_impact(ha,
16784 		    QL_FM_EREPORT_DMA_HANDLE_CHECK);
16785 		return (QL_MEMORY_ALLOC_FAILED);
16786 	}
16787 
16788 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
16789 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
16790 		ql_free_phys(ha, mem);
16791 		return (QL_MEMORY_ALLOC_FAILED);
16792 	}
16793 
16794 	QL_PRINT_3(ha, "done\n");
16795 
16796 	return (QL_SUCCESS);
16797 }
16798 
16799 /*
16800  * ql_free_phys
16801  *	Function used to free physical memory.
16802  *
16803  * Input:
16804  *	ha:	adapter state pointer.
16805  *	mem:	pointer to dma memory object.
16806  *
16807  * Context:
16808  *	Kernel context.
16809  */
16810 void
ql_free_phys(ql_adapter_state_t * ha,dma_mem_t * mem)16811 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
16812 {
16813 	QL_PRINT_3(ha, "started\n");
16814 
16815 	if (mem != NULL) {
16816 		if (mem->memflags == DDI_DMA_MAPPED) {
16817 			ql_unbind_dma_buffer(ha, mem);
16818 		}
16819 
16820 		switch (mem->type) {
16821 		case KERNEL_MEM:
16822 			if (mem->bp != NULL) {
16823 				kmem_free(mem->bp, mem->size);
16824 				mem->bp = NULL;
16825 			}
16826 			break;
16827 		case LITTLE_ENDIAN_DMA:
16828 		case BIG_ENDIAN_DMA:
16829 		case NO_SWAP_DMA:
16830 			if (mem->acc_handle != NULL) {
16831 				ddi_dma_mem_free(&mem->acc_handle);
16832 				mem->acc_handle = NULL;
16833 				mem->bp = NULL;
16834 			}
16835 			break;
16836 		default:
16837 			break;
16838 		}
16839 		if (mem->dma_handle != NULL) {
16840 			ddi_dma_free_handle(&mem->dma_handle);
16841 			mem->dma_handle = NULL;
16842 		}
16843 	}
16844 
16845 	QL_PRINT_3(ha, "done\n");
16846 }
16847 
16848 /*
16849  * ql_bind_dma_buffer
16850  *	Binds DMA buffer.
16851  *
16852  * Input:
16853  *	ha:			adapter state pointer.
16854  *	mem:			pointer to dma memory object.
16855  *	kmflags:		KM_SLEEP or KM_NOSLEEP.
16856  *	mem->dma_handle		DMA memory handle.
16857  *	mem->max_cookie_count	number of segments allowed.
16858  *	mem->type		memory allocation type.
16859  *	mem->size		memory size.
16860  *	mem->bp			pointer to memory or struct buf
16861  *
16862  * Returns:
16863  *	mem->cookies		pointer to list of cookies.
16864  *	mem->cookie_count	number of cookies.
16865  *	status			success = DDI_DMA_MAPPED
16866  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
16867  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
16868  *				DDI_DMA_TOOBIG
16869  *
16870  * Context:
16871  *	Kernel context.
16872  */
16873 static int
ql_bind_dma_buffer(ql_adapter_state_t * ha,dma_mem_t * mem,int kmflags)16874 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int kmflags)
16875 {
16876 	ddi_dma_cookie_t	*cookiep;
16877 	uint32_t		cnt;
16878 
16879 	QL_PRINT_3(ha, "started\n");
16880 
16881 	mem->memflags = ddi_dma_addr_bind_handle(mem->dma_handle, NULL,
16882 	    mem->bp, mem->size, mem->flags, (kmflags == KM_SLEEP) ?
16883 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
16884 	    &mem->cookie_count);
16885 
16886 	if (mem->memflags == DDI_DMA_MAPPED) {
16887 		if (mem->cookie_count > mem->max_cookie_count) {
16888 			(void) ddi_dma_unbind_handle(mem->dma_handle);
16889 			EL(ha, "failed, cookie_count %d > %d\n",
16890 			    mem->cookie_count, mem->max_cookie_count);
16891 			mem->memflags = (uint32_t)DDI_DMA_TOOBIG;
16892 		} else {
16893 			if (mem->cookie_count > 1) {
16894 				if (mem->cookies = kmem_zalloc(
16895 				    sizeof (ddi_dma_cookie_t) *
16896 				    mem->cookie_count, kmflags)) {
16897 					*mem->cookies = mem->cookie;
16898 					cookiep = mem->cookies;
16899 					for (cnt = 1; cnt < mem->cookie_count;
16900 					    cnt++) {
16901 						ddi_dma_nextcookie(
16902 						    mem->dma_handle,
16903 						    ++cookiep);
16904 					}
16905 				} else {
16906 					(void) ddi_dma_unbind_handle(
16907 					    mem->dma_handle);
16908 					EL(ha, "failed, kmem_zalloc\n");
16909 					mem->memflags = (uint32_t)
16910 					    DDI_DMA_NORESOURCES;
16911 				}
16912 			} else {
16913 				/*
16914 				 * It has been reported that dmac_size at times
16915 				 * may be incorrect on sparc machines so for
16916 				 * sparc machines that only have one segment
16917 				 * use the buffer size instead.
16918 				 */
16919 				mem->cookies = &mem->cookie;
16920 				mem->cookies->dmac_size = mem->size;
16921 			}
16922 		}
16923 	}
16924 
16925 	if (mem->memflags != DDI_DMA_MAPPED) {
16926 		EL(ha, "failed=%xh\n", mem->memflags);
16927 	} else {
16928 		/*EMPTY*/
16929 		QL_PRINT_3(ha, "done\n");
16930 	}
16931 
16932 	return (mem->memflags);
16933 }
16934 
16935 /*
16936  * ql_unbind_dma_buffer
16937  *	Unbinds DMA buffer.
16938  *
16939  * Input:
16940  *	ha:			adapter state pointer.
16941  *	mem:			pointer to dma memory object.
16942  *	mem->dma_handle		DMA memory handle.
16943  *	mem->cookies		pointer to cookie list.
16944  *	mem->cookie_count	number of cookies.
16945  *
16946  * Context:
16947  *	Kernel context.
16948  */
16949 /* ARGSUSED */
16950 static void
ql_unbind_dma_buffer(ql_adapter_state_t * ha,dma_mem_t * mem)16951 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
16952 {
16953 	QL_PRINT_3(ha, "started\n");
16954 
16955 	if (mem->dma_handle != NULL && mem->memflags == DDI_DMA_MAPPED) {
16956 		(void) ddi_dma_unbind_handle(mem->dma_handle);
16957 	}
16958 	if (mem->cookie_count > 1) {
16959 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
16960 		    mem->cookie_count);
16961 		mem->cookies = NULL;
16962 	}
16963 	mem->cookie_count = 0;
16964 	mem->memflags = (uint32_t)DDI_DMA_NORESOURCES;
16965 
16966 	QL_PRINT_3(ha, "done\n");
16967 }
16968 
16969 static int
ql_suspend_adapter(ql_adapter_state_t * ha)16970 ql_suspend_adapter(ql_adapter_state_t *ha)
16971 {
16972 	clock_t timer = (clock_t)(32 * drv_usectohz(1000000));
16973 
16974 	QL_PRINT_3(ha, "started\n");
16975 
16976 	(void) ql_wait_outstanding(ha);
16977 
16978 	/*
16979 	 * here we are sure that there will not be any mbox interrupt.
16980 	 * So, let's make sure that we return back all the outstanding
16981 	 * cmds as well as internally queued commands.
16982 	 */
16983 	ql_halt(ha, PM_LEVEL_D0);
16984 
16985 	/*
16986 	 * First we will claim mbox ownership so that no
16987 	 * thread using mbox hangs when we disable the
16988 	 * interrupt in the middle of it.
16989 	 */
16990 	MBX_REGISTER_LOCK(ha);
16991 
16992 	/* Check for mailbox available, if not wait for signal. */
16993 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
16994 		ha->mailbox_flags = (uint8_t)
16995 		    (ha->mailbox_flags | MBX_WANT_FLG);
16996 
16997 		/* 30 seconds from now */
16998 		if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
16999 		    timer, TR_CLOCK_TICK) == -1) {
17000 
17001 			/* Release mailbox register lock. */
17002 			MBX_REGISTER_UNLOCK(ha);
17003 			EL(ha, "failed, Suspend mbox");
17004 			return (QL_FUNCTION_TIMEOUT);
17005 		}
17006 	}
17007 
17008 	/* Set busy flag. */
17009 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
17010 	MBX_REGISTER_UNLOCK(ha);
17011 
17012 	if (ha->power_level != PM_LEVEL_D3) {
17013 		/* Disable ISP interrupts. */
17014 		ql_disable_intr(ha);
17015 	}
17016 
17017 	MBX_REGISTER_LOCK(ha);
17018 	/* Reset busy status. */
17019 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
17020 
17021 	/* If thread is waiting for mailbox go signal it to start. */
17022 	if (ha->mailbox_flags & MBX_WANT_FLG) {
17023 		ha->mailbox_flags = (uint8_t)
17024 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
17025 		cv_broadcast(&ha->cv_mbx_wait);
17026 	}
17027 	/* Release mailbox register lock. */
17028 	MBX_REGISTER_UNLOCK(ha);
17029 
17030 	QL_PRINT_3(ha, "done\n");
17031 
17032 	return (QL_SUCCESS);
17033 }
17034 
17035 /*
17036  * ql_add_link_b
17037  *	Add link to the end of the chain.
17038  *
17039  * Input:
17040  *	head = Head of link list.
17041  *	link = link to be added.
17042  *	LOCK must be already obtained.
17043  *
17044  * Context:
17045  *	Interrupt or Kernel context, no mailbox commands allowed.
17046  */
17047 void
ql_add_link_b(ql_head_t * head,ql_link_t * link)17048 ql_add_link_b(ql_head_t *head, ql_link_t *link)
17049 {
17050 	if (link->head != NULL) {
17051 		EL(NULL, "link in use by list=%ph\n", link->head);
17052 	}
17053 
17054 	/* at the end there isn't a next */
17055 	link->next = NULL;
17056 
17057 	if ((link->prev = head->last) == NULL) {
17058 		head->first = link;
17059 	} else {
17060 		head->last->next = link;
17061 	}
17062 
17063 	head->last = link;
17064 	link->head = head;	/* the queue we're on */
17065 }
17066 
17067 /*
17068  * ql_add_link_t
17069  *	Add link to the beginning of the chain.
17070  *
17071  * Input:
17072  *	head = Head of link list.
17073  *	link = link to be added.
17074  *	LOCK must be already obtained.
17075  *
17076  * Context:
17077  *	Interrupt or Kernel context, no mailbox commands allowed.
17078  */
17079 void
ql_add_link_t(ql_head_t * head,ql_link_t * link)17080 ql_add_link_t(ql_head_t *head, ql_link_t *link)
17081 {
17082 	if (link->head != NULL) {
17083 		EL(NULL, "link in use by list=%ph\n", link->head);
17084 	}
17085 	link->prev = NULL;
17086 
17087 	if ((link->next = head->first) == NULL)	{
17088 		head->last = link;
17089 	} else {
17090 		head->first->prev = link;
17091 	}
17092 
17093 	head->first = link;
17094 	link->head = head;	/* the queue we're on */
17095 }
17096 
17097 /*
17098  * ql_remove_link
17099  *	Remove a link from the chain.
17100  *
17101  * Input:
17102  *	head = Head of link list.
17103  *	link = link to be removed.
17104  *	associated proper LOCK must be already obtained.
17105  *
17106  * Context:
17107  *	Interrupt or Kernel context, no mailbox commands allowed.
17108  */
17109 void
ql_remove_link(ql_head_t * head,ql_link_t * link)17110 ql_remove_link(ql_head_t *head, ql_link_t *link)
17111 {
17112 	if (head != NULL) {
17113 		if (link->prev != NULL) {
17114 			if ((link->prev->next = link->next) == NULL) {
17115 				head->last = link->prev;
17116 			} else {
17117 				link->next->prev = link->prev;
17118 			}
17119 		} else if ((head->first = link->next) == NULL) {
17120 			head->last = NULL;
17121 		} else {
17122 			head->first->prev = NULL;
17123 		}
17124 
17125 		/* not on a queue any more */
17126 		link->prev = link->next = NULL;
17127 		link->head = NULL;
17128 	}
17129 }
17130 
17131 /*
17132  * ql_chg_endian
17133  *	Change endianess of byte array.
17134  *
17135  * Input:
17136  *	buf = array pointer.
17137  *	size = size of array in bytes.
17138  *
17139  * Context:
17140  *	Interrupt or Kernel context, no mailbox commands allowed.
17141  */
17142 void
ql_chg_endian(uint8_t buf[],size_t size)17143 ql_chg_endian(uint8_t buf[], size_t size)
17144 {
17145 	uint8_t byte;
17146 	size_t	cnt1;
17147 	size_t	cnt;
17148 
17149 	cnt1 = size - 1;
17150 	for (cnt = 0; cnt < size / 2; cnt++) {
17151 		byte = buf[cnt1];
17152 		buf[cnt1] = buf[cnt];
17153 		buf[cnt] = byte;
17154 		cnt1--;
17155 	}
17156 }
17157 
17158 /*
17159  * ql_bstr_to_dec
17160  *	Convert decimal byte string to number.
17161  *
17162  * Input:
17163  *	s:	byte string pointer.
17164  *	ans:	interger pointer for number.
17165  *	size:	number of ascii bytes.
17166  *
17167  * Returns:
17168  *	success = number of ascii bytes processed.
17169  *
17170  * Context:
17171  *	Kernel/Interrupt context.
17172  */
17173 static int
ql_bstr_to_dec(char * s,uint32_t * ans,uint32_t size)17174 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
17175 {
17176 	int			mul, num, cnt, pos;
17177 	char			*str;
17178 
17179 	/* Calculate size of number. */
17180 	if (size == 0) {
17181 		for (str = s; *str >= '0' && *str <= '9'; str++) {
17182 			size++;
17183 		}
17184 	}
17185 
17186 	*ans = 0;
17187 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
17188 		if (*s >= '0' && *s <= '9') {
17189 			num = *s++ - '0';
17190 		} else {
17191 			break;
17192 		}
17193 
17194 		for (mul = 1, pos = 1; pos < size; pos++) {
17195 			mul *= 10;
17196 		}
17197 		*ans += num * mul;
17198 	}
17199 
17200 	return (cnt);
17201 }
17202 
17203 /*
17204  * ql_delay
17205  *	Calls delay routine if threads are not suspended, otherwise, busy waits
17206  *	Minimum = 1 tick = 10ms
17207  *
17208  * Input:
17209  *	dly = delay time in microseconds.
17210  *
17211  * Context:
17212  *	Kernel or Interrupt context, no mailbox commands allowed.
17213  */
17214 void
ql_delay(ql_adapter_state_t * ha,clock_t usecs)17215 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
17216 {
17217 	if (ha->flags & ADAPTER_SUSPENDED || ddi_in_panic() ||
17218 	    curthread->t_flag & T_INTR_THREAD) {
17219 		drv_usecwait(usecs);
17220 	} else {
17221 		delay(drv_usectohz(usecs));
17222 	}
17223 }
17224 
17225 /*
17226  * ql_stall_drv
17227  *	Stalls one or all driver instances, waits for 30 seconds.
17228  *
17229  * Input:
17230  *	ha:		adapter state pointer or NULL for all.
17231  *	options:	BIT_0 --> leave driver stalled on exit if
17232  *				  failed.
17233  *
17234  * Returns:
17235  *	ql local function return status code.
17236  *
17237  * Context:
17238  *	Kernel context.
17239  */
17240 int
ql_stall_driver(ql_adapter_state_t * ha,uint32_t options)17241 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
17242 {
17243 	ql_link_t		*link;
17244 	ql_adapter_state_t	*ha2 = NULL;
17245 	uint32_t		timer;
17246 
17247 	QL_PRINT_3(ha, "started\n");
17248 
17249 	/* Tell all daemons to stall. */
17250 	link = ha == NULL ? ql_hba.first : &ha->hba;
17251 	while (link != NULL) {
17252 		ha2 = link->base_address;
17253 
17254 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
17255 
17256 		link = ha == NULL ? link->next : NULL;
17257 	}
17258 
17259 	/* Wait for 30 seconds for daemons stall. */
17260 	timer = 3000;
17261 	link = ha == NULL ? ql_hba.first : &ha->hba;
17262 	while (link != NULL && timer) {
17263 		ha2 = link->base_address;
17264 
17265 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
17266 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
17267 		    (ha2->task_daemon_flags & FIRMWARE_UP) == 0 ||
17268 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
17269 		    ql_wait_outstanding(ha2) == ha2->pha->osc_max_cnt)) {
17270 			link = ha == NULL ? link->next : NULL;
17271 			continue;
17272 		}
17273 
17274 		QL_PRINT_2(ha2, "status, dtf=%xh, stf=%xh\n",
17275 		    ha2->task_daemon_flags, ha2->flags);
17276 
17277 		ql_delay(ha2, 10000);
17278 		timer--;
17279 		link = ha == NULL ? ql_hba.first : &ha->hba;
17280 	}
17281 
17282 	if (ha2 != NULL && timer == 0) {
17283 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
17284 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
17285 		    "unstalled"));
17286 		if (options & BIT_0) {
17287 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
17288 		}
17289 		return (QL_FUNCTION_TIMEOUT);
17290 	}
17291 
17292 	QL_PRINT_3(ha, "done\n");
17293 
17294 	return (QL_SUCCESS);
17295 }
17296 
17297 /*
17298  * ql_restart_driver
17299  *	Restarts one or all driver instances.
17300  *
17301  * Input:
17302  *	ha:	adapter state pointer or NULL for all.
17303  *
17304  * Context:
17305  *	Kernel context.
17306  */
17307 void
ql_restart_driver(ql_adapter_state_t * ha)17308 ql_restart_driver(ql_adapter_state_t *ha)
17309 {
17310 	ql_link_t		*link;
17311 	ql_adapter_state_t	*ha2;
17312 	uint32_t		timer;
17313 
17314 	QL_PRINT_3(ha, "started\n");
17315 
17316 	/* Tell all daemons to unstall. */
17317 	link = ha == NULL ? ql_hba.first : &ha->hba;
17318 	while (link != NULL) {
17319 		ha2 = link->base_address;
17320 
17321 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
17322 
17323 		link = ha == NULL ? link->next : NULL;
17324 	}
17325 
17326 	/* Wait for 30 seconds for all daemons unstall. */
17327 	timer = 3000;
17328 	link = ha == NULL ? ql_hba.first : &ha->hba;
17329 	while (link != NULL && timer) {
17330 		ha2 = link->base_address;
17331 
17332 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
17333 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
17334 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
17335 			QL_PRINT_2(ha2, "restarted\n");
17336 			ql_restart_queues(ha2);
17337 			link = ha == NULL ? link->next : NULL;
17338 			continue;
17339 		}
17340 
17341 		QL_PRINT_2(ha2, "status, tdf=%xh\n", ha2->task_daemon_flags);
17342 
17343 		ql_delay(ha2, 10000);
17344 		timer--;
17345 		link = ha == NULL ? ql_hba.first : &ha->hba;
17346 	}
17347 
17348 	QL_PRINT_3(ha, "done\n");
17349 }
17350 
17351 /*
17352  * ql_setup_interrupts
17353  *	Sets up interrupts based on the HBA's and platform's
17354  *	capabilities (e.g., legacy / MSI / FIXED).
17355  *
17356  * Input:
17357  *	ha = adapter state pointer.
17358  *
17359  * Returns:
17360  *	DDI_SUCCESS or DDI_FAILURE.
17361  *
17362  * Context:
17363  *	Kernel context.
17364  */
17365 static int
ql_setup_interrupts(ql_adapter_state_t * ha)17366 ql_setup_interrupts(ql_adapter_state_t *ha)
17367 {
17368 	int32_t		rval = DDI_FAILURE;
17369 	int32_t		i;
17370 	int32_t		itypes = 0;
17371 
17372 	QL_PRINT_3(ha, "started\n");
17373 
17374 	/*
17375 	 * The Solaris Advanced Interrupt Functions (aif) are only
17376 	 * supported on s10U1 or greater.
17377 	 */
17378 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
17379 		EL(ha, "interrupt framework is not supported or is "
17380 		    "disabled, using legacy\n");
17381 		return (ql_legacy_intr(ha));
17382 	} else if (ql_os_release_level == 10) {
17383 		/*
17384 		 * See if the advanced interrupt functions (aif) are
17385 		 * in the kernel
17386 		 */
17387 		void	*fptr = (void *)&ddi_intr_get_supported_types;
17388 
17389 		if (fptr == NULL) {
17390 			EL(ha, "aif is not supported, using legacy "
17391 			    "interrupts (rev)\n");
17392 			return (ql_legacy_intr(ha));
17393 		}
17394 	}
17395 
17396 	/* See what types of interrupts this HBA and platform support */
17397 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
17398 	    DDI_SUCCESS) {
17399 		EL(ha, "get supported types failed, rval=%xh, "
17400 		    "assuming FIXED\n", i);
17401 		itypes = DDI_INTR_TYPE_FIXED;
17402 	}
17403 
17404 	EL(ha, "supported types are: %xh\n", itypes);
17405 
17406 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
17407 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
17408 		EL(ha, "successful MSI-X setup\n");
17409 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
17410 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
17411 		EL(ha, "successful MSI setup\n");
17412 	} else {
17413 		rval = ql_setup_fixed(ha);
17414 	}
17415 
17416 	if (rval != DDI_SUCCESS) {
17417 		EL(ha, "failed, aif, rval=%xh\n", rval);
17418 	} else {
17419 		/* Setup mutexes */
17420 		if ((rval = ql_init_mutex(ha)) != DDI_SUCCESS) {
17421 			EL(ha, "failed, mutex init ret=%xh\n", rval);
17422 			ql_release_intr(ha);
17423 		}
17424 		QL_PRINT_3(ha, "done\n");
17425 	}
17426 
17427 	return (rval);
17428 }
17429 
17430 /*
17431  * ql_setup_msi
17432  *	Set up aif MSI interrupts
17433  *
17434  * Input:
17435  *	ha = adapter state pointer.
17436  *
17437  * Returns:
17438  *	DDI_SUCCESS or DDI_FAILURE.
17439  *
17440  * Context:
17441  *	Kernel context.
17442  */
17443 static int
ql_setup_msi(ql_adapter_state_t * ha)17444 ql_setup_msi(ql_adapter_state_t *ha)
17445 {
17446 	uint_t		i;
17447 	int32_t		count = 0;
17448 	int32_t		avail = 0;
17449 	int32_t		actual = 0;
17450 	int32_t		msitype = DDI_INTR_TYPE_MSI;
17451 	int32_t		ret;
17452 
17453 	QL_PRINT_3(ha, "started\n");
17454 
17455 	if (ql_disable_msi != 0) {
17456 		EL(ha, "MSI is disabled by user\n");
17457 		return (DDI_FAILURE);
17458 	}
17459 
17460 	/* MSI support is only suported on 24xx HBA's. */
17461 	if (!CFG_IST(ha, CFG_MSI_SUPPORT)) {
17462 		EL(ha, "HBA does not support MSI\n");
17463 		return (DDI_FAILURE);
17464 	}
17465 
17466 	/* Get number of MSI interrupts the system supports */
17467 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
17468 	    DDI_SUCCESS) || count == 0) {
17469 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
17470 		return (DDI_FAILURE);
17471 	}
17472 
17473 	/* Get number of available MSI interrupts */
17474 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
17475 	    DDI_SUCCESS) || avail == 0) {
17476 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
17477 		return (DDI_FAILURE);
17478 	}
17479 
17480 	/* MSI requires only 1.  */
17481 	count = 1;
17482 
17483 	/* Allocate space for interrupt handles */
17484 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
17485 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17486 
17487 	ha->iflags |= IFLG_INTR_MSI;
17488 
17489 	/* Allocate the interrupts */
17490 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
17491 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
17492 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
17493 		    "actual=%xh\n", ret, count, actual);
17494 		ql_release_intr(ha);
17495 		return (DDI_FAILURE);
17496 	}
17497 	ha->intr_cnt = actual;
17498 
17499 	/* Get interrupt priority */
17500 	if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17501 		EL(ha, "failed, get_pri ret=%xh\n", ret);
17502 		ql_release_intr(ha);
17503 		return (ret);
17504 	}
17505 	ha->intr_pri = DDI_INTR_PRI(i);
17506 
17507 	/* Add the interrupt handler */
17508 	if ((ret = ddi_intr_add_handler(ha->htable[0], ql_isr_aif,
17509 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
17510 		EL(ha, "failed, intr_add ret=%xh\n", ret);
17511 		ql_release_intr(ha);
17512 		return (ret);
17513 	}
17514 
17515 	/* Get the capabilities */
17516 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
17517 
17518 	/* Enable interrupts */
17519 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17520 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
17521 		    DDI_SUCCESS) {
17522 			EL(ha, "failed, block enable, ret=%xh\n", ret);
17523 			ql_release_intr(ha);
17524 			return (ret);
17525 		}
17526 	} else {
17527 		for (i = 0; i < actual; i++) {
17528 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
17529 			    DDI_SUCCESS) {
17530 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
17531 				ql_release_intr(ha);
17532 				return (ret);
17533 			}
17534 		}
17535 	}
17536 
17537 	QL_PRINT_3(ha, "done\n");
17538 
17539 	return (DDI_SUCCESS);
17540 }
17541 
17542 /*
17543  * ql_setup_msix
17544  *	Set up aif MSI-X interrupts
17545  *
17546  * Input:
17547  *	ha = adapter state pointer.
17548  *
17549  * Returns:
17550  *	DDI_SUCCESS or DDI_FAILURE.
17551  *
17552  * Context:
17553  *	Kernel context.
17554  */
17555 static int
ql_setup_msix(ql_adapter_state_t * ha)17556 ql_setup_msix(ql_adapter_state_t *ha)
17557 {
17558 	int		hwvect;
17559 	int32_t		count = 0;
17560 	int32_t		avail = 0;
17561 	int32_t		actual = 0;
17562 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
17563 	int32_t		ret;
17564 	uint_t		i;
17565 
17566 	QL_PRINT_3(ha, "started\n");
17567 
17568 	if (ql_disable_msix != 0) {
17569 		EL(ha, "MSI-X is disabled by user\n");
17570 		return (DDI_FAILURE);
17571 	}
17572 
17573 	/*
17574 	 * MSI-X support is only available on 24xx HBA's that have
17575 	 * rev A2 parts (revid = 3) or greater.
17576 	 */
17577 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) ||
17578 	    (CFG_IST(ha, CFG_CTRL_24XX) && ha->rev_id < 3)) {
17579 		EL(ha, "HBA does not support MSI-X\n");
17580 		return (DDI_FAILURE);
17581 	}
17582 
17583 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
17584 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
17585 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
17586 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
17587 		return (DDI_FAILURE);
17588 	}
17589 
17590 	/* Get number of MSI-X interrupts the platform h/w supports */
17591 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &hwvect)) !=
17592 	    DDI_SUCCESS) || hwvect == 0) {
17593 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, hwvect);
17594 		return (DDI_FAILURE);
17595 	}
17596 	QL_PRINT_10(ha, "ddi_intr_get_nintrs, hwvect=%d\n", hwvect);
17597 
17598 	/* Get number of available system interrupts */
17599 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
17600 	    DDI_SUCCESS) || avail == 0) {
17601 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
17602 		return (DDI_FAILURE);
17603 	}
17604 	QL_PRINT_10(ha, "ddi_intr_get_navail, avail=%d\n", avail);
17605 
17606 	/* Fill out the intr table */
17607 	count = ha->interrupt_count;
17608 	if (ha->flags & MULTI_QUEUE && count < ha->mq_msix_vectors) {
17609 		count = ha->mq_msix_vectors;
17610 		/* don't exceed the h/w capability */
17611 		if (count > hwvect) {
17612 			count = hwvect;
17613 		}
17614 	}
17615 
17616 	/* Allocate space for interrupt handles */
17617 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
17618 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17619 
17620 	ha->iflags |= IFLG_INTR_MSIX;
17621 
17622 	/* Allocate the interrupts */
17623 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
17624 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
17625 	    actual < ha->interrupt_count) {
17626 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
17627 		    "actual=%xh\n", ret, count, actual);
17628 		ql_release_intr(ha);
17629 		return (DDI_FAILURE);
17630 	}
17631 	ha->intr_cnt = actual;
17632 	EL(ha, "min=%d, multi-q=%d, req=%d, rcv=%d\n",
17633 	    ha->interrupt_count, ha->mq_msix_vectors, count,
17634 	    ha->intr_cnt);
17635 
17636 	/* Get interrupt priority */
17637 	if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17638 		EL(ha, "failed, get_pri ret=%xh\n", ret);
17639 		ql_release_intr(ha);
17640 		return (ret);
17641 	}
17642 	ha->intr_pri = DDI_INTR_PRI(i);
17643 
17644 	/* Add the interrupt handlers */
17645 	for (i = 0; i < actual; i++) {
17646 		if ((ret = ddi_intr_add_handler(ha->htable[i], ql_isr_aif,
17647 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
17648 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
17649 			    actual, ret);
17650 			ql_release_intr(ha);
17651 			return (ret);
17652 		}
17653 	}
17654 
17655 	/*
17656 	 * duplicate the rest of the intr's
17657 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
17658 	 */
17659 #ifdef __sparc
17660 	for (i = actual; i < hwvect; i++) {
17661 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
17662 		    &ha->htable[i])) != DDI_SUCCESS) {
17663 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
17664 			    i, actual, ret);
17665 			ql_release_intr(ha);
17666 			return (ret);
17667 		}
17668 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
17669 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
17670 			ql_release_intr(ha);
17671 			return (ret);
17672 		}
17673 	}
17674 #endif
17675 
17676 	/* Get the capabilities */
17677 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
17678 
17679 	/* Enable interrupts */
17680 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17681 		if ((ret = ddi_intr_block_enable(ha->htable, actual)) !=
17682 		    DDI_SUCCESS) {
17683 			EL(ha, "failed, block enable, ret=%xh\n", ret);
17684 			ql_release_intr(ha);
17685 			return (ret);
17686 		}
17687 		QL_PRINT_10(ha, "intr_block_enable %d\n", actual);
17688 	} else {
17689 		for (i = 0; i < actual; i++) {
17690 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
17691 			    DDI_SUCCESS) {
17692 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
17693 				ql_release_intr(ha);
17694 				return (ret);
17695 			}
17696 			QL_PRINT_10(ha, "intr_enable %d\n", i);
17697 		}
17698 	}
17699 
17700 	QL_PRINT_3(ha, "done\n");
17701 
17702 	return (DDI_SUCCESS);
17703 }
17704 
17705 /*
17706  * ql_setup_fixed
17707  *	Sets up aif FIXED interrupts
17708  *
17709  * Input:
17710  *	ha = adapter state pointer.
17711  *
17712  * Returns:
17713  *	DDI_SUCCESS or DDI_FAILURE.
17714  *
17715  * Context:
17716  *	Kernel context.
17717  */
17718 static int
ql_setup_fixed(ql_adapter_state_t * ha)17719 ql_setup_fixed(ql_adapter_state_t *ha)
17720 {
17721 	int32_t		count = 0;
17722 	int32_t		actual = 0;
17723 	int32_t		ret;
17724 	uint_t		i;
17725 
17726 	QL_PRINT_3(ha, "started\n");
17727 
17728 	if (ql_disable_intx != 0) {
17729 		EL(ha, "INT-X is disabled by user\n");
17730 		return (DDI_FAILURE);
17731 	}
17732 
17733 	/* Get number of fixed interrupts the system supports */
17734 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
17735 	    &count)) != DDI_SUCCESS) || count == 0) {
17736 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
17737 		return (DDI_FAILURE);
17738 	}
17739 
17740 	/* Allocate space for interrupt handles */
17741 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
17742 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
17743 
17744 	ha->iflags |= IFLG_INTR_FIXED;
17745 
17746 	/* Allocate the interrupts */
17747 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
17748 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
17749 	    actual < count) {
17750 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
17751 		    "actual=%xh\n", ret, count, actual);
17752 		ql_release_intr(ha);
17753 		return (DDI_FAILURE);
17754 	}
17755 	ha->intr_cnt = actual;
17756 
17757 	/* Get interrupt priority */
17758 	if ((ret = ddi_intr_get_pri(ha->htable[0], &i)) != DDI_SUCCESS) {
17759 		EL(ha, "failed, get_pri ret=%xh\n", ret);
17760 		ql_release_intr(ha);
17761 		return (ret);
17762 	}
17763 	ha->intr_pri = DDI_INTR_PRI(i);
17764 
17765 	/* Add the interrupt handlers */
17766 	for (i = 0; i < actual; i++) {
17767 		if ((ret = ddi_intr_add_handler(ha->htable[i], ql_isr_aif,
17768 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
17769 			EL(ha, "failed, intr_add ret=%xh\n", ret);
17770 			ql_release_intr(ha);
17771 			return (ret);
17772 		}
17773 	}
17774 
17775 	/* Enable interrupts */
17776 	for (i = 0; i < actual; i++) {
17777 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
17778 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
17779 			ql_release_intr(ha);
17780 			return (ret);
17781 		}
17782 	}
17783 
17784 	EL(ha, "using FIXED interupts\n");
17785 
17786 	QL_PRINT_3(ha, "done\n");
17787 
17788 	return (DDI_SUCCESS);
17789 }
17790 
17791 /*
17792  * ql_release_intr
17793  *	Releases aif legacy interrupt resources
17794  *
17795  * Input:
17796  *	ha = adapter state pointer.
17797  *
17798  * Returns:
17799  *
17800  * Context:
17801  *	Kernel context.
17802  */
17803 static void
ql_release_intr(ql_adapter_state_t * ha)17804 ql_release_intr(ql_adapter_state_t *ha)
17805 {
17806 	int32_t	i, x;
17807 
17808 	QL_PRINT_3(ha, "started\n");
17809 
17810 	if (!(ha->iflags & IFLG_INTR_AIF)) {
17811 		ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
17812 	} else {
17813 		ha->iflags &= ~(IFLG_INTR_AIF);
17814 		if (ha->htable != NULL && ha->hsize > 0) {
17815 			i = x = (int32_t)ha->hsize /
17816 			    (int32_t)sizeof (ddi_intr_handle_t);
17817 			if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
17818 				(void) ddi_intr_block_disable(ha->htable,
17819 				    ha->intr_cnt);
17820 			} else {
17821 				while (i-- > 0) {
17822 					if (ha->htable[i] == 0) {
17823 						EL(ha, "htable[%x]=0h\n", i);
17824 						continue;
17825 					}
17826 
17827 					(void) ddi_intr_disable(ha->htable[i]);
17828 				}
17829 			}
17830 
17831 			i = x;
17832 			while (i-- > 0) {
17833 				if (i < ha->intr_cnt) {
17834 					(void) ddi_intr_remove_handler(
17835 					    ha->htable[i]);
17836 				}
17837 				(void) ddi_intr_free(ha->htable[i]);
17838 			}
17839 
17840 			ha->intr_cnt = 0;
17841 			ha->intr_cap = 0;
17842 
17843 			kmem_free(ha->htable, ha->hsize);
17844 			ha->htable = NULL;
17845 			ha->hsize = 0;
17846 		}
17847 	}
17848 
17849 	ha->intr_pri = NULL;
17850 
17851 	QL_PRINT_3(ha, "done\n");
17852 }
17853 
17854 /*
17855  * ql_legacy_intr
17856  *	Sets up legacy interrupts.
17857  *
17858  *	NB: Only to be used if AIF (Advanced Interupt Framework)
17859  *	    if NOT in the kernel.
17860  *
17861  * Input:
17862  *	ha = adapter state pointer.
17863  *
17864  * Returns:
17865  *	DDI_SUCCESS or DDI_FAILURE.
17866  *
17867  * Context:
17868  *	Kernel context.
17869  */
17870 static int
ql_legacy_intr(ql_adapter_state_t * ha)17871 ql_legacy_intr(ql_adapter_state_t *ha)
17872 {
17873 	int	rval;
17874 
17875 	QL_PRINT_3(ha, "started\n");
17876 
17877 	/* Get iblock cookies to initialize mutexes */
17878 	if ((rval = ddi_get_iblock_cookie(ha->dip, 0, &ha->iblock_cookie)) !=
17879 	    DDI_SUCCESS) {
17880 		EL(ha, "failed, get_iblock: %xh\n", rval);
17881 		return (rval);
17882 	}
17883 	ha->intr_pri = (void *)ha->iblock_cookie;
17884 
17885 	/* Setup standard/legacy interrupt handler */
17886 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
17887 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
17888 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
17889 		    QL_NAME, ha->instance);
17890 		return (rval);
17891 	}
17892 	ha->iflags |= IFLG_INTR_LEGACY;
17893 
17894 	/* Setup mutexes */
17895 	if ((rval = ql_init_mutex(ha)) != DDI_SUCCESS) {
17896 		EL(ha, "failed, mutex init ret=%xh\n", rval);
17897 		ql_release_intr(ha);
17898 	} else {
17899 		EL(ha, "using legacy interrupts\n");
17900 	}
17901 	return (rval);
17902 }
17903 
17904 /*
17905  * ql_init_mutex
17906  *	Initializes mutex's
17907  *
17908  * Input:
17909  *	ha = adapter state pointer.
17910  *
17911  * Returns:
17912  *	DDI_SUCCESS or DDI_FAILURE.
17913  *
17914  * Context:
17915  *	Kernel context.
17916  */
17917 static int
ql_init_mutex(ql_adapter_state_t * ha)17918 ql_init_mutex(ql_adapter_state_t *ha)
17919 {
17920 	QL_PRINT_3(ha, "started\n");
17921 
17922 	/* mutexes to protect the adapter state structure. */
17923 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17924 
17925 	/* mutex to protect the ISP request ring. */
17926 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17927 
17928 	/* I/O completion queue protection. */
17929 	mutex_init(&ha->comp_q_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17930 	cv_init(&ha->cv_comp_thread, NULL, CV_DRIVER, NULL);
17931 
17932 	/* mutex to protect the mailbox registers. */
17933 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17934 
17935 	/* Mailbox wait and interrupt conditional variable. */
17936 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
17937 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
17938 
17939 	/* power management protection */
17940 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17941 
17942 	/* Unsolicited buffer conditional variable. */
17943 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17944 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
17945 
17946 	/* mutex to protect task daemon context. */
17947 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17948 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
17949 
17950 	/* Suspended conditional variable. */
17951 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
17952 
17953 	/* mutex to protect per instance f/w dump flags and buffer */
17954 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
17955 
17956 	QL_PRINT_3(ha, "done\n");
17957 
17958 	return (DDI_SUCCESS);
17959 }
17960 
17961 /*
17962  * ql_destroy_mutex
17963  *	Destroys mutex's
17964  *
17965  * Input:
17966  *	ha = adapter state pointer.
17967  *
17968  * Returns:
17969  *
17970  * Context:
17971  *	Kernel context.
17972  */
17973 static void
ql_destroy_mutex(ql_adapter_state_t * ha)17974 ql_destroy_mutex(ql_adapter_state_t *ha)
17975 {
17976 	QL_PRINT_3(ha, "started\n");
17977 
17978 	mutex_destroy(&ha->dump_mutex);
17979 	cv_destroy(&ha->cv_dr_suspended);
17980 	cv_destroy(&ha->cv_task_daemon);
17981 	mutex_destroy(&ha->task_daemon_mutex);
17982 	cv_destroy(&ha->cv_ub);
17983 	mutex_destroy(&ha->ub_mutex);
17984 	mutex_destroy(&ha->pm_mutex);
17985 	cv_destroy(&ha->cv_mbx_intr);
17986 	cv_destroy(&ha->cv_mbx_wait);
17987 	mutex_destroy(&ha->mbx_mutex);
17988 	cv_destroy(&ha->cv_comp_thread);
17989 	mutex_destroy(&ha->comp_q_mutex);
17990 	mutex_destroy(&ha->req_ring_mutex);
17991 	mutex_destroy(&ha->mutex);
17992 
17993 	QL_PRINT_3(ha, "done\n");
17994 }
17995 
17996 /*
17997  * ql_fwmodule_resolve
17998  *	Loads and resolves external firmware module and symbols
17999  *
18000  * Input:
18001  *	ha:		adapter state pointer.
18002  *
18003  * Returns:
18004  *	ql local function return status code:
18005  *		QL_SUCCESS - external f/w module module and symbols resolved
18006  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
18007  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
18008  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
18009  * Context:
18010  *	Kernel context.
18011  *
18012  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
18013  * could switch to a tighter scope around acutal download (and add an extra
18014  * ddi_modopen for module opens that occur before root is mounted).
18015  *
18016  */
18017 uint32_t
ql_fwmodule_resolve(ql_adapter_state_t * ha)18018 ql_fwmodule_resolve(ql_adapter_state_t *ha)
18019 {
18020 	int8_t			module[128];
18021 	int8_t			fw_version[128];
18022 	uint32_t		rval = QL_SUCCESS;
18023 	caddr_t			code, code02, code03;
18024 	uint8_t			*p_ucfw;
18025 	uint16_t		*p_usaddr, *p_uslen;
18026 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
18027 	uint32_t		*p_uiaddr02, *p_uilen02, *p_uilen03;
18028 	struct fw_table		*fwt;
18029 	extern struct fw_table	fw_table[];
18030 
18031 	QL_PRINT_3(ha, "started\n");
18032 
18033 	if (ha->fw_module != NULL) {
18034 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
18035 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
18036 		    ha->fw_subminor_version);
18037 		return (rval);
18038 	}
18039 
18040 	/* make sure the fw_class is in the fw_table of supported classes */
18041 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
18042 		if (fwt->fw_class == ha->fw_class)
18043 			break;			/* match */
18044 	}
18045 	if (fwt->fw_version == NULL) {
18046 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
18047 		    "in driver's fw_table", QL_NAME, ha->instance,
18048 		    ha->fw_class);
18049 		return (QL_FW_NOT_SUPPORTED);
18050 	}
18051 
18052 	/*
18053 	 * open the module related to the fw_class
18054 	 */
18055 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
18056 	    ha->fw_class);
18057 
18058 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
18059 	if (ha->fw_module == NULL) {
18060 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
18061 		    QL_NAME, ha->instance, module);
18062 		return (QL_FWMODLOAD_FAILED);
18063 	}
18064 
18065 	/*
18066 	 * resolve the fw module symbols, data types depend on fw_class
18067 	 */
18068 
18069 	switch (ha->fw_class) {
18070 	case 0x2200:
18071 	case 0x2300:
18072 	case 0x6322:
18073 
18074 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
18075 		    NULL)) == NULL) {
18076 			rval = QL_FWSYM_NOT_FOUND;
18077 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
18078 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
18079 		    "risc_code_addr01", NULL)) == NULL) {
18080 			rval = QL_FWSYM_NOT_FOUND;
18081 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
18082 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
18083 		    "risc_code_length01", NULL)) == NULL) {
18084 			rval = QL_FWSYM_NOT_FOUND;
18085 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
18086 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
18087 		    "firmware_version", NULL)) == NULL) {
18088 			rval = QL_FWSYM_NOT_FOUND;
18089 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
18090 		}
18091 
18092 		if (rval == QL_SUCCESS) {
18093 			ha->risc_fw[0].code = code;
18094 			ha->risc_fw[0].addr = *p_usaddr;
18095 			ha->risc_fw[0].length = *p_uslen;
18096 
18097 			(void) snprintf(fw_version, sizeof (fw_version),
18098 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
18099 		}
18100 		break;
18101 
18102 	case 0x2400:
18103 	case 0x2500:
18104 	case 0x2700:
18105 	case 0x8100:
18106 	case 0x8301fc:
18107 
18108 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
18109 		    NULL)) == NULL) {
18110 			rval = QL_FWSYM_NOT_FOUND;
18111 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
18112 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
18113 		    "risc_code_addr01", NULL)) == NULL) {
18114 			rval = QL_FWSYM_NOT_FOUND;
18115 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
18116 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
18117 		    "risc_code_length01", NULL)) == NULL) {
18118 			rval = QL_FWSYM_NOT_FOUND;
18119 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
18120 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
18121 		    "firmware_version", NULL)) == NULL) {
18122 			rval = QL_FWSYM_NOT_FOUND;
18123 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
18124 		}
18125 
18126 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
18127 		    NULL)) == NULL) {
18128 			rval = QL_FWSYM_NOT_FOUND;
18129 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
18130 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
18131 		    "risc_code_addr02", NULL)) == NULL) {
18132 			rval = QL_FWSYM_NOT_FOUND;
18133 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
18134 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
18135 		    "risc_code_length02", NULL)) == NULL) {
18136 			rval = QL_FWSYM_NOT_FOUND;
18137 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
18138 		}
18139 
18140 		if (rval == QL_SUCCESS) {
18141 			if (ha->fw_class == 0x2700) {
18142 				if ((code03 = ddi_modsym(ha->fw_module,
18143 				    "tmplt_code01", NULL)) == NULL) {
18144 					EL(ha, "failed, f/w module %d "
18145 					    "tmplt_code01 symbol\n", module);
18146 				} else if ((p_uilen03 = ddi_modsym(
18147 				    ha->fw_module, "tmplt_code_length01",
18148 				    NULL)) == NULL) {
18149 					code03 = NULL;
18150 					EL(ha, "failed, f/w module %d "
18151 					    "tmplt_code_length01 symbol\n",
18152 					    module);
18153 				}
18154 				ha->risc_fw[2].code = code03;
18155 				if ((ha->risc_fw[2].code = code03) != NULL) {
18156 					ha->risc_fw[2].length = *p_uilen03;
18157 				}
18158 			}
18159 			ha->risc_fw[0].code = code;
18160 			ha->risc_fw[0].addr = *p_uiaddr;
18161 			ha->risc_fw[0].length = *p_uilen;
18162 			ha->risc_fw[1].code = code02;
18163 			ha->risc_fw[1].addr = *p_uiaddr02;
18164 			ha->risc_fw[1].length = *p_uilen02;
18165 
18166 			(void) snprintf(fw_version, sizeof (fw_version),
18167 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
18168 		}
18169 		break;
18170 
18171 	default:
18172 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
18173 		rval = QL_FW_NOT_SUPPORTED;
18174 	}
18175 
18176 	if (rval != QL_SUCCESS) {
18177 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
18178 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
18179 		if (ha->fw_module != NULL) {
18180 			(void) ddi_modclose(ha->fw_module);
18181 			ha->fw_module = NULL;
18182 		}
18183 	} else {
18184 		/*
18185 		 * check for firmware version mismatch between module and
18186 		 * compiled in fw_table version.
18187 		 */
18188 
18189 		if (strcmp(fwt->fw_version, fw_version) != 0) {
18190 
18191 			/*
18192 			 * If f/w / driver version mismatches then
18193 			 * return a successful status -- however warn
18194 			 * the user that this is NOT recommended.
18195 			 */
18196 
18197 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
18198 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
18199 			    ha->instance, ha->fw_class, fwt->fw_version,
18200 			    fw_version);
18201 		}
18202 	}
18203 
18204 	QL_PRINT_3(ha, "done\n");
18205 
18206 	return (rval);
18207 }
18208 
18209 /*
18210  * ql_port_state
18211  *	Set the state on all adapter ports.
18212  *
18213  * Input:
18214  *	ha:	parent adapter state pointer.
18215  *	state:	port state.
18216  *	flags:	task daemon flags to set.
18217  *
18218  * Context:
18219  *	Interrupt or Kernel context, no mailbox commands allowed.
18220  */
18221 void
ql_port_state(ql_adapter_state_t * ha,uint32_t state,uint32_t flags)18222 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
18223 {
18224 	ql_adapter_state_t	*vha;
18225 
18226 	QL_PRINT_3(ha, "started\n");
18227 
18228 	TASK_DAEMON_LOCK(ha);
18229 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
18230 		if (FC_PORT_STATE_MASK(vha->state) != state) {
18231 			vha->state = state != FC_STATE_OFFLINE ?
18232 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
18233 			vha->task_daemon_flags |= flags;
18234 		}
18235 	}
18236 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
18237 	TASK_DAEMON_UNLOCK(ha);
18238 
18239 	QL_PRINT_3(ha, "done\n");
18240 }
18241 
18242 /*
18243  * ql_el_trace_alloc - Construct an extended logging trace descriptor.
18244  *
18245  * Input:	Pointer to the adapter state structure.
18246  * Context:	Kernel context.
18247  */
18248 void
ql_el_trace_alloc(ql_adapter_state_t * ha)18249 ql_el_trace_alloc(ql_adapter_state_t *ha)
18250 {
18251 	ql_trace_entry_t	*entry;
18252 	size_t			maxsize;
18253 
18254 	ha->ql_trace_desc =
18255 	    (ql_trace_desc_t *)kmem_zalloc(
18256 	    sizeof (ql_trace_desc_t), KM_SLEEP);
18257 
18258 	/* ql_log_entries could be adjusted in /etc/system */
18259 	maxsize = ql_log_entries * sizeof (ql_trace_entry_t);
18260 	entry = kmem_zalloc(maxsize, KM_SLEEP);
18261 
18262 	mutex_init(&ha->ql_trace_desc->mutex, NULL,
18263 	    MUTEX_DRIVER, NULL);
18264 
18265 	ha->ql_trace_desc->trace_buffer = entry;
18266 	ha->ql_trace_desc->trace_buffer_size = maxsize;
18267 	ha->ql_trace_desc->nindex = 0;
18268 
18269 	ha->ql_trace_desc->nentries = ql_log_entries;
18270 	ha->ql_trace_desc->start = ha->ql_trace_desc->end = 0;
18271 	ha->ql_trace_desc->csize = 0;
18272 	ha->ql_trace_desc->count = 0;
18273 }
18274 
18275 /*
18276  * ql_el_trace_dealloc - Destroy an extended logging trace descriptor.
18277  *
18278  * Input:	Pointer to the adapter state structure.
18279  * Context:	Kernel context.
18280  */
18281 void
ql_el_trace_dealloc(ql_adapter_state_t * ha)18282 ql_el_trace_dealloc(ql_adapter_state_t *ha)
18283 {
18284 	if (ha->ql_trace_desc != NULL) {
18285 		if (ha->ql_trace_desc->trace_buffer != NULL) {
18286 			kmem_free(ha->ql_trace_desc->trace_buffer,
18287 			    ha->ql_trace_desc->trace_buffer_size);
18288 		}
18289 		mutex_destroy(&ha->ql_trace_desc->mutex);
18290 		kmem_free(ha->ql_trace_desc,
18291 		    sizeof (ql_trace_desc_t));
18292 	}
18293 }
18294 
18295 /*
18296  * els_cmd_text	- Return a pointer to a string describing the command
18297  *
18298  * Input:	els_cmd = the els command opcode.
18299  * Returns:	pointer to a string.
18300  * Context:	Kernel context.
18301  */
18302 char *
els_cmd_text(int els_cmd)18303 els_cmd_text(int els_cmd)
18304 {
18305 	cmd_table_t *entry = &els_cmd_tbl[0];
18306 
18307 	return (cmd_text(entry, els_cmd));
18308 }
18309 
18310 /*
18311  * mbx_cmd_text - Return a pointer to a string describing the command
18312  *
18313  * Input:	mbx_cmd = the mailbox command opcode.
18314  * Returns:	pointer to a string.
18315  * Context:	Kernel context.
18316  */
18317 char *
mbx_cmd_text(int mbx_cmd)18318 mbx_cmd_text(int mbx_cmd)
18319 {
18320 	cmd_table_t *entry = &mbox_cmd_tbl[0];
18321 
18322 	return (cmd_text(entry, mbx_cmd));
18323 }
18324 
18325 /*
18326  * cmd_text	Return a pointer to a string describing the command
18327  *
18328  * Input:	entry = the command table
18329  *		cmd = the command.
18330  * Returns:	pointer to a string.
18331  * Context:	Kernel context.
18332  */
18333 char *
cmd_text(cmd_table_t * entry,int cmd)18334 cmd_text(cmd_table_t *entry, int cmd)
18335 {
18336 	for (; entry->cmd != 0; entry++) {
18337 		if (entry->cmd == cmd) {
18338 			break;
18339 		}
18340 	}
18341 	return (entry->string);
18342 }
18343 
18344 /*
18345  * ql_els_24xx_iocb
18346  * 	els request indication.
18347  *
18348  * Input:
18349  *	ha:	adapter state pointer.
18350  *	req_q:	request queue structure pointer.
18351  *	srb:	scsi request block pointer.
18352  *      arg:	els passthru entry iocb pointer.
18353  *
18354  * Returns:
18355  *
18356  * Context:	Kernel context.
18357  */
18358 void
ql_els_24xx_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ql_srb_t * srb,void * arg)18359 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *srb,
18360     void *arg)
18361 {
18362 	els_descriptor_t	els_desc;
18363 
18364 	/* Extract the ELS information */
18365 	ql_fca_isp_els_request(ha, req_q, (fc_packet_t *)srb->pkt,
18366 	    &els_desc);
18367 
18368 	/* Construct the passthru entry */
18369 	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
18370 
18371 	/* Ensure correct endianness */
18372 	ql_isp_els_handle_cmd_endian(ha, srb);
18373 }
18374 
18375 /*
18376  * ql_fca_isp_els_request
18377  *	Extract into an els descriptor the info required
18378  *	to build an els_passthru iocb from an fc packet.
18379  *
18380  * Input:
18381  *	ha:		adapter state pointer.
18382  *	req_q:		request queue structure pointer.
18383  *	pkt:		fc packet pointer
18384  *	els_desc:	els descriptor pointer
18385  *
18386  * Context:
18387  *	Kernel context.
18388  */
18389 static void
ql_fca_isp_els_request(ql_adapter_state_t * ha,ql_request_q_t * req_q,fc_packet_t * pkt,els_descriptor_t * els_desc)18390 ql_fca_isp_els_request(ql_adapter_state_t *ha, ql_request_q_t *req_q,
18391     fc_packet_t *pkt, els_descriptor_t *els_desc)
18392 {
18393 	ls_code_t	els;
18394 
18395 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
18396 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
18397 
18398 	els_desc->els = els.ls_code;
18399 
18400 	els_desc->els_handle = req_q->req_ring.acc_handle;
18401 	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
18402 	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
18403 	/* if n_port_handle is not < 0x7d use 0 */
18404 	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
18405 		els_desc->n_port_handle = ha->n_port->n_port_handle;
18406 	} else {
18407 		els_desc->n_port_handle = 0;
18408 	}
18409 	els_desc->control_flags = 0;
18410 	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
18411 	/*
18412 	 * Transmit DSD. This field defines the Fibre Channel Frame payload
18413 	 * (without the frame header) in system memory.
18414 	 */
18415 	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
18416 	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
18417 	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
18418 
18419 	els_desc->rsp_byte_count = pkt->pkt_rsplen;
18420 	/*
18421 	 * Receive DSD. This field defines the ELS response payload buffer
18422 	 * for the ISP24xx firmware transferring the received ELS
18423 	 * response frame to a location in host memory.
18424 	 */
18425 	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
18426 	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
18427 	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
18428 }
18429 
18430 /*
18431  * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
18432  * using the els descriptor.
18433  *
18434  * Input:	ha = adapter state pointer.
18435  *		els_desc = els descriptor pointer.
18436  *		els_entry = els passthru entry iocb pointer.
18437  * Returns:
18438  * Context:	Kernel context.
18439  */
18440 static void
ql_isp_els_request_ctor(els_descriptor_t * els_desc,els_passthru_entry_t * els_entry)18441 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
18442     els_passthru_entry_t *els_entry)
18443 {
18444 	uint32_t	*ptr32;
18445 
18446 	/*
18447 	 * Construct command packet.
18448 	 */
18449 	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
18450 	    (uint8_t)ELS_PASSTHRU_TYPE);
18451 	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
18452 	    els_desc->n_port_handle);
18453 	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
18454 	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
18455 	    (uint32_t)0);
18456 	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
18457 	    els_desc->els);
18458 	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
18459 	    els_desc->d_id.b.al_pa);
18460 	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
18461 	    els_desc->d_id.b.area);
18462 	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
18463 	    els_desc->d_id.b.domain);
18464 	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
18465 	    els_desc->s_id.b.al_pa);
18466 	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
18467 	    els_desc->s_id.b.area);
18468 	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
18469 	    els_desc->s_id.b.domain);
18470 	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
18471 	    els_desc->control_flags);
18472 	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
18473 	    els_desc->rsp_byte_count);
18474 	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
18475 	    els_desc->cmd_byte_count);
18476 	/* Load transmit data segments and count. */
18477 	ptr32 = (uint32_t *)&els_entry->dseg;
18478 	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
18479 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
18480 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
18481 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
18482 	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
18483 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
18484 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
18485 	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
18486 }
18487 
18488 /*
18489  * ql_isp_els_handle_cmd_endian - els requests must be in big endian
18490  *				  in host memory.
18491  *
18492  * Input:	ha = adapter state pointer.
18493  *		srb = scsi request block
18494  * Returns:
18495  * Context:	Kernel context.
18496  */
18497 void
ql_isp_els_handle_cmd_endian(ql_adapter_state_t * ha,ql_srb_t * srb)18498 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
18499 {
18500 	ls_code_t	els;
18501 	fc_packet_t	*pkt;
18502 	uint8_t		*ptr;
18503 
18504 	pkt = srb->pkt;
18505 
18506 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
18507 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
18508 
18509 	ptr = (uint8_t *)pkt->pkt_cmd;
18510 
18511 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
18512 }
18513 
18514 /*
18515  * ql_isp_els_handle_rsp_endian - els responses must be in big endian
18516  *				  in host memory.
18517  * Input:	ha = adapter state pointer.
18518  *		srb = scsi request block
18519  * Returns:
18520  * Context:	Kernel context.
18521  */
18522 void
ql_isp_els_handle_rsp_endian(ql_adapter_state_t * ha,ql_srb_t * srb)18523 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
18524 {
18525 	ls_code_t	els;
18526 	fc_packet_t	*pkt;
18527 	uint8_t		*ptr;
18528 
18529 	pkt = srb->pkt;
18530 
18531 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
18532 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
18533 
18534 	ptr = (uint8_t *)pkt->pkt_resp;
18535 	BIG_ENDIAN_32(&els);
18536 	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
18537 }
18538 
18539 /*
18540  * ql_isp_els_handle_endian - els requests/responses must be in big endian
18541  *			      in host memory.
18542  * Input:	ha = adapter state pointer.
18543  *		ptr = els request/response buffer pointer.
18544  *		ls_code = els command code.
18545  * Returns:
18546  * Context:	Kernel context.
18547  */
18548 void
ql_isp_els_handle_endian(ql_adapter_state_t * ha,uint8_t * ptr,uint8_t ls_code)18549 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
18550 {
18551 	switch (ls_code) {
18552 	case LA_ELS_PLOGI: {
18553 		BIG_ENDIAN_32(ptr);	/* Command Code */
18554 		ptr += 4;
18555 		BIG_ENDIAN_16(ptr);	/* FC-PH version */
18556 		ptr += 2;
18557 		BIG_ENDIAN_16(ptr);	/* b2b credit */
18558 		ptr += 2;
18559 		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
18560 		ptr += 2;
18561 		BIG_ENDIAN_16(ptr);	/* Rcv data size */
18562 		ptr += 2;
18563 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
18564 		ptr += 2;
18565 		BIG_ENDIAN_16(ptr);	/* Rel offset */
18566 		ptr += 2;
18567 		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
18568 		ptr += 4;		/* Port Name */
18569 		ptr += 8;		/* Node Name */
18570 		ptr += 8;		/* Class 1 */
18571 		ptr += 16;		/* Class 2 */
18572 		ptr += 16;		/* Class 3 */
18573 		BIG_ENDIAN_16(ptr);	/* Service options */
18574 		ptr += 2;
18575 		BIG_ENDIAN_16(ptr);	/* Initiator control */
18576 		ptr += 2;
18577 		BIG_ENDIAN_16(ptr);	/* Recipient Control */
18578 		ptr += 2;
18579 		BIG_ENDIAN_16(ptr);	/* Rcv size */
18580 		ptr += 2;
18581 		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
18582 		ptr += 2;
18583 		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
18584 		ptr += 2;
18585 		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
18586 		break;
18587 	}
18588 	case LA_ELS_PRLI: {
18589 		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
18590 		ptr += 4;		/* Type */
18591 		ptr += 2;
18592 		BIG_ENDIAN_16(ptr);	/* Flags */
18593 		ptr += 2;
18594 		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
18595 		ptr += 4;
18596 		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
18597 		ptr += 4;
18598 		BIG_ENDIAN_32(ptr);	/* Flags */
18599 		break;
18600 	}
18601 	default:
18602 		EL(ha, "can't handle els code %x\n", ls_code);
18603 		break;
18604 	}
18605 }
18606 
18607 /*
18608  * ql_n_port_plogi
18609  *	In N port 2 N port topology where an N Port has logged in with the
18610  *	firmware because it has the N_Port login initiative, we send up
18611  *	a plogi by proxy which stimulates the login procedure to continue.
18612  *
18613  * Input:
18614  *	ha = adapter state pointer.
18615  * Returns:
18616  *
18617  * Context:
18618  *	Kernel context.
18619  */
18620 static int
ql_n_port_plogi(ql_adapter_state_t * ha)18621 ql_n_port_plogi(ql_adapter_state_t *ha)
18622 {
18623 	int		rval;
18624 	ql_tgt_t	*tq = NULL;
18625 	ql_head_t done_q = { NULL, NULL };
18626 
18627 	rval = QL_SUCCESS;
18628 
18629 	if (ha->topology & QL_N_PORT) {
18630 		/* if we're doing this the n_port_handle must be good */
18631 		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
18632 			tq = ql_loop_id_to_queue(ha,
18633 			    ha->n_port->n_port_handle);
18634 			if (tq != NULL) {
18635 				(void) ql_send_plogi(ha, tq, &done_q);
18636 			} else {
18637 				EL(ha, "n_port_handle = %x, tq = %x\n",
18638 				    ha->n_port->n_port_handle, tq);
18639 			}
18640 		} else {
18641 			EL(ha, "n_port_handle = %x, tq = %x\n",
18642 			    ha->n_port->n_port_handle, tq);
18643 		}
18644 		if (done_q.first != NULL) {
18645 			ql_done(done_q.first, B_FALSE);
18646 		}
18647 	}
18648 	return (rval);
18649 }
18650 
18651 /*
18652  * Compare two WWNs. The NAA is omitted for comparison.
18653  *
18654  * Note particularly that the indentation used in this
18655  * function  isn't according to Sun recommendations. It
18656  * is indented to make reading a bit easy.
18657  *
18658  * Return Values:
18659  *   if first == second return  0
18660  *   if first > second  return  1
18661  *   if first < second  return -1
18662  */
18663 /* ARGSUSED */
18664 int
ql_wwn_cmp(ql_adapter_state_t * ha,la_wwn_t * first,la_wwn_t * second)18665 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
18666 {
18667 	la_wwn_t t1, t2;
18668 	int rval;
18669 
18670 	/*
18671 	 * Fibre Channel protocol is big endian, so compare
18672 	 * as big endian values
18673 	 */
18674 	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
18675 	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
18676 
18677 	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
18678 	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
18679 
18680 	if (t1.i_wwn[0] == t2.i_wwn[0]) {
18681 		if (t1.i_wwn[1] == t2.i_wwn[1]) {
18682 			rval = 0;
18683 		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
18684 			rval = 1;
18685 		} else {
18686 			rval = -1;
18687 		}
18688 	} else {
18689 		if (t1.i_wwn[0] > t2.i_wwn[0]) {
18690 			rval = 1;
18691 		} else {
18692 			rval = -1;
18693 		}
18694 	}
18695 	return (rval);
18696 }
18697 
18698 /*
18699  * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
18700  *
18701  * Input:	Pointer to the adapter state structure.
18702  * Returns:	Success or Failure.
18703  * Context:	Kernel context.
18704  */
18705 int
ql_nvram_cache_desc_ctor(ql_adapter_state_t * ha)18706 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
18707 {
18708 	int	rval = DDI_SUCCESS;
18709 
18710 	QL_PRINT_3(ha, "started\n");
18711 
18712 	ha->nvram_cache =
18713 	    (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
18714 	    KM_SLEEP);
18715 
18716 	if (ha->nvram_cache == NULL) {
18717 		cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
18718 		    " descriptor", QL_NAME, ha->instance);
18719 		rval = DDI_FAILURE;
18720 	} else {
18721 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
18722 			ha->nvram_cache->size = sizeof (nvram_24xx_t);
18723 		} else {
18724 			ha->nvram_cache->size = sizeof (nvram_t);
18725 		}
18726 		ha->nvram_cache->cache =
18727 		    (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
18728 		if (ha->nvram_cache->cache == NULL) {
18729 			cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
18730 			    QL_NAME, ha->instance);
18731 			kmem_free(ha->nvram_cache,
18732 			    sizeof (nvram_cache_desc_t));
18733 			ha->nvram_cache = 0;
18734 			rval = DDI_FAILURE;
18735 		} else {
18736 			ha->nvram_cache->valid = 0;
18737 		}
18738 	}
18739 
18740 	QL_PRINT_3(ha, "done\n");
18741 
18742 	return (rval);
18743 }
18744 
18745 /*
18746  * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
18747  *
18748  * Input:	Pointer to the adapter state structure.
18749  * Returns:	Success or Failure.
18750  * Context:	Kernel context.
18751  */
18752 int
ql_nvram_cache_desc_dtor(ql_adapter_state_t * ha)18753 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
18754 {
18755 	int	rval = DDI_SUCCESS;
18756 
18757 	QL_PRINT_3(ha, "started\n");
18758 
18759 	if (ha->nvram_cache == NULL) {
18760 		cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
18761 		    QL_NAME, ha->instance);
18762 		rval = DDI_FAILURE;
18763 	} else {
18764 		if (ha->nvram_cache->cache != NULL) {
18765 			kmem_free(ha->nvram_cache->cache,
18766 			    ha->nvram_cache->size);
18767 		}
18768 		kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
18769 	}
18770 
18771 	QL_PRINT_3(ha, "done\n");
18772 
18773 	return (rval);
18774 }
18775 
18776 /*
18777  * ql_plogi_params_desc_ctor - Construct an plogi retry params descriptor.
18778  *
18779  * Input:	Pointer to the adapter state structure.
18780  * Returns:	Success or Failure.
18781  * Context:	Kernel context.
18782  */
18783 int
ql_plogi_params_desc_ctor(ql_adapter_state_t * ha)18784 ql_plogi_params_desc_ctor(ql_adapter_state_t *ha)
18785 {
18786 	int	rval = DDI_SUCCESS;
18787 
18788 	QL_PRINT_3(ha, "started\n");
18789 
18790 	ha->plogi_params =
18791 	    (plogi_params_desc_t *)kmem_zalloc(sizeof (plogi_params_desc_t),
18792 	    KM_SLEEP);
18793 
18794 	if (ha->plogi_params == NULL) {
18795 		cmn_err(CE_WARN, "%s(%d): can't construct plogi params"
18796 		    " descriptor", QL_NAME, ha->instance);
18797 		rval = DDI_FAILURE;
18798 	} else {
18799 		/* default initializers. */
18800 		ha->plogi_params->retry_cnt = QL_PLOGI_RETRY_CNT;
18801 		ha->plogi_params->retry_dly_usec = QL_PLOGI_RETRY_DLY_USEC;
18802 	}
18803 
18804 	QL_PRINT_3(ha, "done\n");
18805 
18806 	return (rval);
18807 }
18808 
18809 /*
18810  * ql_plogi_params_desc_dtor - Destroy an plogi retry params descriptor.
18811  *
18812  * Input:	Pointer to the adapter state structure.
18813  * Returns:	Success or Failure.
18814  * Context:	Kernel context.
18815  */
18816 int
ql_plogi_params_desc_dtor(ql_adapter_state_t * ha)18817 ql_plogi_params_desc_dtor(ql_adapter_state_t *ha)
18818 {
18819 	int	rval = DDI_SUCCESS;
18820 
18821 	QL_PRINT_3(ha, "started\n");
18822 
18823 	if (ha->plogi_params == NULL) {
18824 		cmn_err(CE_WARN, "%s(%d): can't destroy plogi params"
18825 		    " descriptor", QL_NAME, ha->instance);
18826 		rval = DDI_FAILURE;
18827 	} else {
18828 		kmem_free(ha->plogi_params, sizeof (plogi_params_desc_t));
18829 	}
18830 
18831 	QL_PRINT_3(ha, "done\n");
18832 
18833 	return (rval);
18834 }
18835 
18836 /*
18837  * ql_toggle_loop_state
18838  *	Changes looop state to offline and then online.
18839  *
18840  * Input:
18841  *	ha:	adapter state pointer.
18842  *
18843  * Context:
18844  *	Kernel context.
18845  */
18846 void
ql_toggle_loop_state(ql_adapter_state_t * ha)18847 ql_toggle_loop_state(ql_adapter_state_t *ha)
18848 {
18849 	uint32_t	timer;
18850 
18851 	if (LOOP_READY(ha)) {
18852 		ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
18853 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
18854 		for (timer = 30; timer; timer--) {
18855 			if (!(ha->task_daemon_flags & FC_STATE_CHANGE)) {
18856 				break;
18857 			}
18858 			delay(100);
18859 		}
18860 		ql_loop_online(ha);
18861 	}
18862 }
18863 
18864 /*
18865  * ql_create_queues
18866  *	Allocate request/response queues.
18867  *
18868  * Input:
18869  *	ha:	adapter state pointer.
18870  *
18871  * Returns:
18872  *	ql driver local function return status codes
18873  *
18874  * Context:
18875  *	Kernel context.
18876  */
18877 static int
ql_create_queues(ql_adapter_state_t * ha)18878 ql_create_queues(ql_adapter_state_t *ha)
18879 {
18880 	int		rval;
18881 	uint16_t	cnt;
18882 
18883 	QL_PRINT_10(ha, "started\n");
18884 
18885 	if (ha->req_q[0] != NULL) {
18886 		QL_PRINT_10(ha, "done, queues already exist\n");
18887 		return (QL_SUCCESS);
18888 	}
18889 	if (ha->vp_index != 0) {
18890 		QL_PRINT_10(ha, "done, no multi-req-q \n");
18891 		ha->req_q[0] = ha->pha->req_q[0];
18892 		ha->req_q[1] = ha->pha->req_q[1];
18893 		ha->rsp_queues = ha->pha->rsp_queues;
18894 		return (QL_SUCCESS);
18895 	}
18896 
18897 	/* Setup request queue buffer pointers. */
18898 	ha->req_q[0] = kmem_zalloc(sizeof (ql_request_q_t), KM_SLEEP);
18899 
18900 	/* Allocate request queue. */
18901 	ha->req_q[0]->req_entry_cnt = REQUEST_ENTRY_CNT;
18902 	ha->req_q[0]->req_ring.size = ha->req_q[0]->req_entry_cnt *
18903 	    REQUEST_ENTRY_SIZE;
18904 	if (ha->flags & QUEUE_SHADOW_PTRS) {
18905 		ha->req_q[0]->req_ring.size += SHADOW_ENTRY_SIZE;
18906 	}
18907 	ha->req_q[0]->req_ring.type = LITTLE_ENDIAN_DMA;
18908 	ha->req_q[0]->req_ring.max_cookie_count = 1;
18909 	ha->req_q[0]->req_ring.alignment = 64;
18910 	if ((rval = ql_alloc_phys(ha, &ha->req_q[0]->req_ring, KM_SLEEP)) !=
18911 	    QL_SUCCESS) {
18912 		EL(ha, "request queue status=%xh", rval);
18913 		ql_delete_queues(ha);
18914 		return (rval);
18915 	}
18916 	if (ha->flags & QUEUE_SHADOW_PTRS) {
18917 		ha->req_q[0]->req_out_shadow_ofst =
18918 		    ha->req_q[0]->req_entry_cnt * REQUEST_ENTRY_SIZE;
18919 		ha->req_q[0]->req_out_shadow_ptr = (uint32_t *)
18920 		    ((caddr_t)ha->req_q[0]->req_ring.bp +
18921 		    ha->req_q[0]->req_out_shadow_ofst);
18922 	}
18923 	ha->fw_transfer_size = ha->req_q[0]->req_ring.size;
18924 	if (ha->flags & MULTI_QUEUE) {
18925 		ha->req_q[0]->mbar_req_in = MBAR2_REQ_IN;
18926 		ha->req_q[0]->mbar_req_out = MBAR2_REQ_OUT;
18927 		if (ha->req_q[0]->mbar_req_in >= ha->mbar_size) {
18928 			EL(ha, "req_q index=0 exceeds mbar size=%xh",
18929 			    ha->mbar_size);
18930 			ql_delete_queues(ha);
18931 			return (QL_FUNCTION_PARAMETER_ERROR);
18932 		}
18933 	}
18934 
18935 	/* Allocate response queues. */
18936 	if (ha->rsp_queues == NULL) {
18937 		if (ha->intr_cnt > 1) {
18938 			ha->rsp_queues_cnt = (uint8_t)(ha->intr_cnt - 1);
18939 		} else {
18940 			ha->rsp_queues_cnt = 1;
18941 		}
18942 		ha->io_min_rsp_q_number = 0;
18943 		if (ha->rsp_queues_cnt > 1) {
18944 			/* Setup request queue buffer pointers. */
18945 			ha->req_q[1] = kmem_zalloc(sizeof (ql_request_q_t),
18946 			    KM_SLEEP);
18947 
18948 			/* Allocate request queue. */
18949 			ha->req_q[1]->req_entry_cnt = REQUEST_ENTRY_CNT;
18950 			ha->req_q[1]->req_ring.size =
18951 			    ha->req_q[1]->req_entry_cnt * REQUEST_ENTRY_SIZE;
18952 			if (ha->flags & QUEUE_SHADOW_PTRS) {
18953 				ha->req_q[1]->req_ring.size +=
18954 				    SHADOW_ENTRY_SIZE;
18955 			}
18956 			ha->req_q[1]->req_ring.type = LITTLE_ENDIAN_DMA;
18957 			ha->req_q[1]->req_ring.max_cookie_count = 1;
18958 			ha->req_q[1]->req_ring.alignment = 64;
18959 			if ((rval = ql_alloc_phys(ha, &ha->req_q[1]->req_ring,
18960 			    KM_SLEEP)) != QL_SUCCESS) {
18961 				EL(ha, "ha request queue status=%xh", rval);
18962 				ql_delete_queues(ha);
18963 				return (rval);
18964 			}
18965 			if (ha->flags & QUEUE_SHADOW_PTRS) {
18966 				ha->req_q[1]->req_out_shadow_ofst =
18967 				    ha->req_q[1]->req_entry_cnt *
18968 				    REQUEST_ENTRY_SIZE;
18969 				ha->req_q[1]->req_out_shadow_ptr = (uint32_t *)
18970 				    ((caddr_t)ha->req_q[1]->req_ring.bp +
18971 				    ha->req_q[1]->req_out_shadow_ofst);
18972 			}
18973 			ha->req_q[1]->req_q_number = 1;
18974 			if (ha->flags & MULTI_QUEUE) {
18975 				ha->req_q[1]->mbar_req_in =
18976 				    ha->mbar_queue_offset + MBAR2_REQ_IN;
18977 				ha->req_q[1]->mbar_req_out =
18978 				    ha->mbar_queue_offset + MBAR2_REQ_OUT;
18979 				if (ha->req_q[1]->mbar_req_in >=
18980 				    ha->mbar_size) {
18981 					EL(ha, "ha req_q index=1 exceeds mbar "
18982 					    "size=%xh", ha->mbar_size);
18983 					ql_delete_queues(ha);
18984 					return (QL_FUNCTION_PARAMETER_ERROR);
18985 				}
18986 			}
18987 		}
18988 
18989 		/* Allocate enough rsp_queue descriptors for IRM */
18990 		ha->rsp_queues_size = (ha->hsize / sizeof (ddi_intr_handle_t)) *
18991 		    sizeof (ql_response_q_t *);
18992 		ha->rsp_queues = kmem_zalloc(ha->rsp_queues_size, KM_SLEEP);
18993 
18994 		/* Create rsp_queues for the current rsp_queue_cnt */
18995 		for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
18996 			rval = ql_create_rsp_queue(ha, cnt);
18997 			if (rval != QL_SUCCESS) {
18998 				ql_delete_queues(ha);
18999 				return (rval);
19000 			}
19001 		}
19002 	}
19003 
19004 	if (CFG_IST(ha, CFG_FCIP_TYPE_1)) {
19005 		/* Allocate IP receive queue. */
19006 		ha->rcv_ring.size = RCVBUF_QUEUE_SIZE;
19007 		ha->rcv_ring.type = LITTLE_ENDIAN_DMA;
19008 		ha->rcv_ring.max_cookie_count = 1;
19009 		ha->rcv_ring.alignment = 64;
19010 		if ((rval = ql_alloc_phys(ha, &ha->rcv_ring, KM_SLEEP)) !=
19011 		    QL_SUCCESS) {
19012 			EL(ha, "receive queue status=%xh", rval);
19013 			ql_delete_queues(ha);
19014 			return (rval);
19015 		}
19016 	}
19017 
19018 	QL_PRINT_10(ha, "done\n");
19019 
19020 	return (rval);
19021 }
19022 
19023 /*
19024  * ql_create_rsp_queue
19025  *	Allocate a response queues.
19026  *
19027  * Input:
19028  *	ha:	adapter state pointer.
19029  *
19030  * Returns:
19031  *	ql driver local function return status codes
19032  *
19033  * Context:
19034  *	Kernel context.
19035  */
19036 static int
ql_create_rsp_queue(ql_adapter_state_t * ha,uint16_t rsp_q_indx)19037 ql_create_rsp_queue(ql_adapter_state_t *ha, uint16_t rsp_q_indx)
19038 {
19039 	ql_response_q_t	*rsp_q;
19040 	int		rval = QL_SUCCESS;
19041 
19042 	QL_PRINT_3(ha, "started\n");
19043 
19044 	ha->rsp_queues[rsp_q_indx] = rsp_q =
19045 	    kmem_zalloc(sizeof (ql_response_q_t), KM_SLEEP);
19046 	/* ISP response ring and interrupt protection. */
19047 	mutex_init(&rsp_q->intr_mutex, NULL, MUTEX_DRIVER, ha->intr_pri);
19048 	rsp_q->rsp_q_number = rsp_q_indx;
19049 	rsp_q->msi_x_vector = (uint16_t)(rsp_q_indx + 1);
19050 	if (ha->flags & MULTI_QUEUE) {
19051 		rsp_q->mbar_rsp_in = rsp_q->rsp_q_number *
19052 		    ha->mbar_queue_offset + MBAR2_RESP_IN;
19053 		rsp_q->mbar_rsp_out = rsp_q->rsp_q_number *
19054 		    ha->mbar_queue_offset + MBAR2_RESP_OUT;
19055 		if (rsp_q->mbar_rsp_in >= ha->mbar_size) {
19056 			EL(ha, "rsp_q index=%xh exceeds mbar size=%xh",
19057 			    rsp_q_indx, ha->mbar_size);
19058 			return (QL_FUNCTION_PARAMETER_ERROR);
19059 		}
19060 	}
19061 
19062 	rsp_q->rsp_entry_cnt = RESPONSE_ENTRY_CNT;
19063 	rsp_q->rsp_ring.size = rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
19064 	if (ha->flags & QUEUE_SHADOW_PTRS) {
19065 		rsp_q->rsp_ring.size += SHADOW_ENTRY_SIZE;
19066 	}
19067 	rsp_q->rsp_ring.type = LITTLE_ENDIAN_DMA;
19068 	rsp_q->rsp_ring.max_cookie_count = 1;
19069 	rsp_q->rsp_ring.alignment = 64;
19070 	rval = ql_alloc_phys(ha, &rsp_q->rsp_ring, KM_SLEEP);
19071 	if (rval != QL_SUCCESS) {
19072 		EL(ha, "response queue status=%xh", rval);
19073 	}
19074 	if (ha->flags & QUEUE_SHADOW_PTRS) {
19075 		rsp_q->rsp_in_shadow_ofst =
19076 		    rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
19077 		rsp_q->rsp_in_shadow_ptr = (uint32_t *)
19078 		    ((caddr_t)rsp_q->rsp_ring.bp +
19079 		    rsp_q->rsp_in_shadow_ofst);
19080 	}
19081 
19082 	QL_PRINT_3(ha, "done\n");
19083 	return (rval);
19084 }
19085 
19086 /*
19087  * ql_delete_queues
19088  *	Deletes request/response queues.
19089  *
19090  * Input:
19091  *	ha = adapter state pointer.
19092  *
19093  * Context:
19094  *	Kernel context.
19095  */
19096 static void
ql_delete_queues(ql_adapter_state_t * ha)19097 ql_delete_queues(ql_adapter_state_t *ha)
19098 {
19099 	uint32_t	cnt;
19100 
19101 	QL_PRINT_10(ha, "started\n");
19102 
19103 	if (ha->vp_index != 0) {
19104 		QL_PRINT_10(ha, "done, no multi-req-q \n");
19105 		ha->req_q[0] = ha->req_q[1] = NULL;
19106 		return;
19107 	}
19108 	if (ha->req_q[0] != NULL) {
19109 		ql_free_phys(ha, &ha->req_q[0]->req_ring);
19110 		kmem_free(ha->req_q[0], sizeof (ql_request_q_t));
19111 		ha->req_q[0] = NULL;
19112 	}
19113 	if (ha->req_q[1] != NULL) {
19114 		ql_free_phys(ha, &ha->req_q[1]->req_ring);
19115 		kmem_free(ha->req_q[1], sizeof (ql_request_q_t));
19116 		ha->req_q[1] = NULL;
19117 	}
19118 
19119 	if (ha->rsp_queues != NULL) {
19120 		ql_response_q_t	*rsp_q;
19121 
19122 		for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19123 			if ((rsp_q = ha->rsp_queues[cnt]) == NULL) {
19124 				continue;
19125 			}
19126 
19127 			mutex_destroy(&rsp_q->intr_mutex);
19128 			ql_free_phys(ha, &rsp_q->rsp_ring);
19129 			kmem_free(rsp_q, sizeof (ql_response_q_t));
19130 			ha->rsp_queues[cnt] = NULL;
19131 		}
19132 		kmem_free(ha->rsp_queues, ha->rsp_queues_size);
19133 		ha->rsp_queues = NULL;
19134 	}
19135 
19136 	QL_PRINT_10(ha, "done\n");
19137 }
19138 
19139 /*
19140  * ql_multi_queue_support
19141  *      Test 2500 or 8100 adapters for support of multi-queue
19142  *
19143  * Input:
19144  *	ha:	adapter state pointer.
19145  *
19146  * Returns:
19147  *      ql local function return status code.
19148  *
19149  * Context:
19150  *	Kernel context.
19151  */
19152 static int
ql_multi_queue_support(ql_adapter_state_t * ha)19153 ql_multi_queue_support(ql_adapter_state_t *ha)
19154 {
19155 	uint32_t	data;
19156 	int		rval;
19157 
19158 	data = ql_get_cap_ofst(ha, PCI_CAP_ID_MSI_X);
19159 	if ((ql_pci_config_get16(ha, data + PCI_MSIX_CTRL) &
19160 	    PCI_MSIX_TBL_SIZE_MASK) > 2) {
19161 		ha->mbar_size = MBAR2_MULTI_Q_MAX * MBAR2_REG_OFFSET;
19162 
19163 		if (ql_map_mem_bar(ha, &ha->mbar_dev_handle, &ha->mbar,
19164 		    PCI_CONF_BASE3, ha->mbar_size) != DDI_SUCCESS) {
19165 			return (QL_FUNCTION_FAILED);
19166 		}
19167 		if ((rval = qlc_fm_check_acc_handle(ha,
19168 		    ha->mbar_dev_handle)) != DDI_FM_OK) {
19169 			qlc_fm_report_err_impact(ha,
19170 			    QL_FM_EREPORT_ACC_HANDLE_CHECK);
19171 			EL(ha, "fm_check_acc_handle mbar_dev_handle "
19172 			    "status=%xh\n", rval);
19173 			return (QL_FUNCTION_FAILED);
19174 		}
19175 		return (QL_SUCCESS);
19176 	}
19177 	return (QL_FUNCTION_FAILED);
19178 }
19179 
19180 /*
19181  * ql_get_cap_ofst
19182  *	Locates PCI configuration space capability pointer
19183  *
19184  * Input:
19185  *	ha:	adapter state pointer.
19186  *	cap_id:	Capability ID.
19187  *
19188  * Returns:
19189  *	capability offset
19190  *
19191  * Context:
19192  *	Kernel context.
19193  */
19194 int
ql_get_cap_ofst(ql_adapter_state_t * ha,uint8_t cap_id)19195 ql_get_cap_ofst(ql_adapter_state_t *ha, uint8_t cap_id)
19196 {
19197 	int	cptr = PCI_CAP_NEXT_PTR_NULL;
19198 
19199 	QL_PRINT_3(ha, "started\n");
19200 
19201 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
19202 		cptr = ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
19203 
19204 		while (cptr != PCI_CAP_NEXT_PTR_NULL) {
19205 			if (ql_pci_config_get8(ha, cptr) == cap_id) {
19206 				break;
19207 			}
19208 			cptr = ql_pci_config_get8(ha, cptr + PCI_CAP_NEXT_PTR);
19209 		}
19210 	}
19211 
19212 	QL_PRINT_3(ha, "done\n");
19213 	return (cptr);
19214 }
19215 
19216 /*
19217  * ql_map_mem_bar
19218  *	Map Mem BAR
19219  *
19220  * Input:
19221  *	ha:		 adapter state pointer.
19222  *	handlep:	access handle pointer.
19223  *	addrp:		address structure pointer.
19224  *	ofst:		BAR offset.
19225  *	len:		address space length.
19226  *
19227  * Returns:
19228  *	DDI_SUCCESS or DDI_FAILURE.
19229  *
19230  * Context:
19231  *	Kernel context.
19232  */
19233 static int
ql_map_mem_bar(ql_adapter_state_t * ha,ddi_acc_handle_t * handlep,caddr_t * addrp,uint32_t ofst,uint32_t len)19234 ql_map_mem_bar(ql_adapter_state_t *ha, ddi_acc_handle_t *handlep,
19235     caddr_t *addrp, uint32_t ofst, uint32_t len)
19236 {
19237 	caddr_t		nreg;
19238 	pci_regspec_t	*reg, *reg2;
19239 	int		rval;
19240 	uint_t		rlen;
19241 	uint32_t	rcnt, w32, nreg_size;
19242 
19243 	QL_PRINT_10(ha, "started\n");
19244 
19245 	/* Check for Mem BAR */
19246 	w32 = ql_pci_config_get32(ha, ofst);
19247 	if (w32 == 0) {
19248 		EL(ha, "no Mem BAR %xh\n", ofst);
19249 		return (DDI_FAILURE);
19250 	}
19251 
19252 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
19253 	if ((rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, ha->dip,
19254 	    DDI_PROP_DONTPASS, "reg", (int **)&reg, &rlen)) !=
19255 	    DDI_PROP_SUCCESS) {
19256 		EL(ha, "ddi_prop_lookup_int_array status=%xh\n", rval);
19257 		return (DDI_FAILURE);
19258 	}
19259 	rlen = (uint_t)(rlen * sizeof (int));	/* in bytes */
19260 	rcnt = (uint32_t)(rlen / sizeof (pci_regspec_t));
19261 
19262 	/* Check if register already added. */
19263 	reg2 = reg;
19264 	for (w32 = 0; w32 < rcnt; w32++) {
19265 		if ((reg2->pci_phys_hi & PCI_REG_REG_M) == ofst) {
19266 			EL(ha, "already mapped\n");
19267 			break;
19268 		}
19269 		reg2++;
19270 	}
19271 	if (w32 == rcnt) {
19272 		/*
19273 		 * Allocate memory for the existing reg(s) plus one and then
19274 		 * build it.
19275 		 */
19276 		nreg_size = (uint32_t)(rlen + sizeof (pci_regspec_t));
19277 		nreg = kmem_zalloc(nreg_size, KM_SLEEP);
19278 
19279 		/*
19280 		 * Find a current map memory reg to copy.
19281 		 */
19282 		reg2 = reg;
19283 		while ((reg2->pci_phys_hi & PCI_REG_ADDR_M) !=
19284 		    PCI_ADDR_MEM32 && (reg2->pci_phys_hi & PCI_REG_ADDR_M) !=
19285 		    PCI_ADDR_MEM64) {
19286 			reg2++;
19287 			if ((caddr_t)reg2 >= (caddr_t)reg + rlen) {
19288 				reg2 = reg;
19289 				break;
19290 			}
19291 		}
19292 		w32 = (reg2->pci_phys_hi & ~PCI_REG_REG_M) | ofst;
19293 
19294 		bcopy(reg, nreg, rlen);
19295 		reg2 = (pci_regspec_t *)(nreg + rlen);
19296 
19297 		reg2->pci_phys_hi = w32;
19298 		reg2->pci_phys_mid = 0;
19299 		reg2->pci_phys_low = 0;
19300 		reg2->pci_size_hi = 0;
19301 		reg2->pci_size_low = len;
19302 
19303 		/*
19304 		 * Write out the new "reg" property
19305 		 */
19306 		/*LINTED [Solaris DDI_DEV_T_NONE Lint error]*/
19307 		(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, ha->dip,
19308 		    "reg", (int *)nreg, (uint_t)(nreg_size / sizeof (int)));
19309 
19310 		w32 = (uint_t)(nreg_size / sizeof (pci_regspec_t) - 1);
19311 		kmem_free((caddr_t)nreg, nreg_size);
19312 	}
19313 
19314 	ddi_prop_free(reg);
19315 
19316 	/* Map register */
19317 	rval = ddi_regs_map_setup(ha->dip, w32, addrp, 0, len,
19318 	    &ql_dev_acc_attr, handlep);
19319 	if (rval != DDI_SUCCESS || *addrp == NULL || *handlep == NULL) {
19320 		EL(ha, "regs_map status=%xh, base=%xh, handle=%xh\n",
19321 		    rval, *addrp, *handlep);
19322 		if (*handlep != NULL) {
19323 			ddi_regs_map_free(handlep);
19324 			*handlep = NULL;
19325 		}
19326 	}
19327 
19328 	QL_PRINT_10(ha, "done\n");
19329 
19330 	return (rval);
19331 }
19332 
19333 /*
19334  * ql_intr_lock
19335  *	Acquires all interrupt locks.
19336  *
19337  * Input:
19338  *	ha:	adapter state pointer.
19339  *
19340  * Context:
19341  *	Kernel/Interrupt context.
19342  */
19343 void
ql_intr_lock(ql_adapter_state_t * ha)19344 ql_intr_lock(ql_adapter_state_t *ha)
19345 {
19346 	uint16_t	cnt;
19347 
19348 	QL_PRINT_3(ha, "started\n");
19349 
19350 	if (ha->rsp_queues != NULL) {
19351 		for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19352 			if (ha->rsp_queues[cnt] != NULL) {
19353 				INDX_INTR_LOCK(ha, cnt);
19354 			}
19355 		}
19356 	}
19357 	QL_PRINT_3(ha, "done\n");
19358 }
19359 
19360 /*
19361  * ql_intr_unlock
19362  *	Releases all interrupt locks.
19363  *
19364  * Input:
19365  *	ha:	adapter state pointer.
19366  *
19367  * Context:
19368  *	Kernel/Interrupt context.
19369  */
19370 void
ql_intr_unlock(ql_adapter_state_t * ha)19371 ql_intr_unlock(ql_adapter_state_t *ha)
19372 {
19373 	uint16_t	cnt;
19374 
19375 	QL_PRINT_3(ha, "started\n");
19376 
19377 	if (ha->rsp_queues != NULL) {
19378 		for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
19379 			if (ha->rsp_queues[cnt] != NULL) {
19380 				INDX_INTR_UNLOCK(ha, cnt);
19381 			}
19382 		}
19383 	}
19384 	QL_PRINT_3(ha, "done\n");
19385 }
19386 
19387 /*
19388  * ql_completion_thread
19389  *	I/O completion thread.
19390  *
19391  * Input:
19392  *	arg:	port info pointer.
19393  *	COMP_Q_LOCK must be acquired prior to call.
19394  *
19395  * Context:
19396  *	Kernel context.
19397  */
19398 static void
ql_completion_thread(void * arg)19399 ql_completion_thread(void *arg)
19400 {
19401 	ql_srb_t		*sp;
19402 	ql_adapter_state_t	*ha = arg;
19403 
19404 	QL_PRINT_3(ha, "started, hsp=%p\n", (void *)&sp);
19405 
19406 	COMP_Q_LOCK(ha);
19407 	ha->comp_thds_active++;
19408 	ha->comp_thds_awake++;
19409 	while (!(ha->flags & COMP_THD_TERMINATE)) {
19410 		/* process completion queue items */
19411 		while (ha->comp_q.first != NULL) {
19412 			sp = (ha->comp_q.first)->base_address;
19413 			/* Remove command from completion queue */
19414 			ql_remove_link(&ha->comp_q, &sp->cmd);
19415 			COMP_Q_UNLOCK(ha);
19416 			QL_PRINT_3(ha, "pkt_comp, sp=%p, pkt_state=%xh, "
19417 			    "hsp=%p\n", (void*)sp, sp->pkt->pkt_state,
19418 			    (void *)&sp);
19419 			(sp->pkt->pkt_comp)(sp->pkt);
19420 			COMP_Q_LOCK(ha);
19421 		}
19422 		ha->comp_thds_awake--;
19423 		QL_PRINT_3(ha, "sleep, hsp=%p\n", (void *)&sp);
19424 		cv_wait(&ha->cv_comp_thread, &ha->comp_q_mutex);
19425 		QL_PRINT_3(ha, "awoke, hsp=%p\n", (void *)&sp);
19426 	}
19427 	ha->comp_thds_awake--;
19428 	ha->comp_thds_active--;
19429 	COMP_Q_UNLOCK(ha);
19430 
19431 	QL_PRINT_3(ha, "done\n");
19432 }
19433 
19434 /*
19435  * ql_io_comp
19436  *	Transport I/O completion
19437  *
19438  * Input:
19439  *	sp:	SRB structure pointer
19440  *
19441  * Context:
19442  *	Kernel context.
19443  */
19444 void
ql_io_comp(ql_srb_t * sp)19445 ql_io_comp(ql_srb_t *sp)
19446 {
19447 	ql_adapter_state_t	*ha = sp->ha->pha;
19448 
19449 	QL_PRINT_3(ha, "started, sp=%ph, d_id=%xh\n", (void*)sp,
19450 	    sp->pkt->pkt_cmd_fhdr.d_id);
19451 
19452 	if (sp->pkt->pkt_comp && !ddi_in_panic()) {
19453 		QL_PRINT_3(ha, "added to comp_q\n");
19454 		COMP_Q_LOCK(ha);
19455 		ql_add_link_b(&ha->comp_q, &sp->cmd);
19456 		if (ha->comp_thds_awake < ha->comp_thds_active) {
19457 			ha->comp_thds_awake++;
19458 			QL_PRINT_3(ha, "signal\n");
19459 			cv_signal(&ha->cv_comp_thread);
19460 		}
19461 		COMP_Q_UNLOCK(ha);
19462 	}
19463 
19464 	QL_PRINT_3(ha, "done\n");
19465 }
19466 
19467 /*
19468  * ql_process_comp_queue
19469  *	Process completion queue entries.
19470  *
19471  * Input:
19472  *	arg:	adapter state pointer.
19473  *
19474  * Context:
19475  *	Kernel context.
19476  */
19477 static void
ql_process_comp_queue(void * arg)19478 ql_process_comp_queue(void *arg)
19479 {
19480 	ql_srb_t		*sp;
19481 	ql_adapter_state_t	*ha = arg;
19482 
19483 	QL_PRINT_3(ha, "started\n");
19484 
19485 	COMP_Q_LOCK(ha);
19486 
19487 	/* process completion queue items */
19488 	while (ha->comp_q.first != NULL) {
19489 		sp = (ha->comp_q.first)->base_address;
19490 		QL_PRINT_3(ha, "sending comp=0x%p\n", (void *)sp);
19491 		/* Remove command from completion queue */
19492 		ql_remove_link(&ha->comp_q, &sp->cmd);
19493 		COMP_Q_UNLOCK(ha);
19494 		(sp->pkt->pkt_comp)(sp->pkt);
19495 		COMP_Q_LOCK(ha);
19496 	}
19497 
19498 	COMP_Q_UNLOCK(ha);
19499 
19500 	QL_PRINT_3(ha, "done\n");
19501 }
19502 
19503 /*
19504  * ql_abort_io
19505  *	Abort I/O.
19506  *
19507  * Input:
19508  *	ha:	adapter state pointer.
19509  *	sp:	SRB pointer.
19510  *
19511  * Returns:
19512  *	ql local function return status code.
19513  *
19514  * Context:
19515  *	Kernel context.
19516  */
19517 static int
ql_abort_io(ql_adapter_state_t * vha,ql_srb_t * sp)19518 ql_abort_io(ql_adapter_state_t *vha, ql_srb_t *sp)
19519 {
19520 	ql_link_t		*link;
19521 	ql_srb_t		*sp2;
19522 	ql_tgt_t		*tq;
19523 	ql_lun_t		*lq;
19524 	int			rval = QL_FUNCTION_FAILED;
19525 	ql_adapter_state_t	*ha = vha->pha;
19526 
19527 	QL_PRINT_10(ha, "started, sp=%ph, handle=%xh\n", (void *)sp,
19528 	    sp->handle);
19529 
19530 	if ((lq = sp->lun_queue) != NULL) {
19531 		tq = lq->target_queue;
19532 	} else {
19533 		tq = NULL;
19534 	}
19535 
19536 	/* Acquire target queue lock. */
19537 	if (tq) {
19538 		DEVICE_QUEUE_LOCK(tq);
19539 	}
19540 	REQUEST_RING_LOCK(ha);
19541 
19542 	/* If command not already started. */
19543 	if (!(sp->flags & SRB_ISP_STARTED)) {
19544 		rval = QL_FUNCTION_PARAMETER_ERROR;
19545 
19546 		/* Check pending queue for command. */
19547 		for (link = ha->pending_cmds.first; link != NULL;
19548 		    link = link->next) {
19549 			sp2 = link->base_address;
19550 			if (sp2 == sp) {
19551 				rval = QL_SUCCESS;
19552 				/* Remove srb from pending command queue */
19553 				ql_remove_link(&ha->pending_cmds, &sp->cmd);
19554 				break;
19555 			}
19556 		}
19557 
19558 		if (link == NULL && lq) {
19559 			/* Check for cmd on device queue. */
19560 			for (link = lq->cmd.first; link != NULL;
19561 			    link = link->next) {
19562 				sp2 = link->base_address;
19563 				if (sp2 == sp) {
19564 					rval = QL_SUCCESS;
19565 					/* Remove srb from device queue. */
19566 					ql_remove_link(&lq->cmd, &sp->cmd);
19567 					sp->flags &= ~SRB_IN_DEVICE_QUEUE;
19568 					break;
19569 				}
19570 			}
19571 		}
19572 	}
19573 
19574 	REQUEST_RING_UNLOCK(ha);
19575 	if (tq) {
19576 		DEVICE_QUEUE_UNLOCK(tq);
19577 	}
19578 
19579 	if (sp->flags & SRB_ISP_COMPLETED || rval == QL_SUCCESS) {
19580 		rval = QL_SUCCESS;
19581 	} else {
19582 		uint32_t	index;
19583 
19584 		INTR_LOCK(ha);
19585 		sp->flags |= SRB_ABORTING;
19586 		if (sp->handle != 0) {
19587 			index = sp->handle & OSC_INDEX_MASK;
19588 			if (ha->outstanding_cmds[index] == sp) {
19589 				ha->outstanding_cmds[index] =
19590 				    QL_ABORTED_SRB(ha);
19591 			}
19592 			/* Decrement outstanding commands on device. */
19593 			if (tq != NULL && tq->outcnt != 0) {
19594 				tq->outcnt--;
19595 			}
19596 			if (lq != NULL && sp->flags & SRB_FCP_CMD_PKT &&
19597 			    lq->lun_outcnt != 0) {
19598 				lq->lun_outcnt--;
19599 			}
19600 			/* Remove command from watchdog queue. */
19601 			if (sp->flags & SRB_WATCHDOG_ENABLED) {
19602 				if (tq != NULL) {
19603 					ql_remove_link(&tq->wdg, &sp->wdg);
19604 				}
19605 				sp->flags &= ~SRB_WATCHDOG_ENABLED;
19606 			}
19607 			INTR_UNLOCK(ha);
19608 			(void) ql_abort_command(ha, sp);
19609 			sp->handle = 0;
19610 		} else {
19611 			INTR_UNLOCK(ha);
19612 		}
19613 		rval = QL_SUCCESS;
19614 	}
19615 
19616 	if (rval != QL_SUCCESS) {
19617 		EL(ha, "sp=%p not aborted=%xh\n", (void *)sp, rval);
19618 	} else {
19619 		/*EMPTY*/
19620 		QL_PRINT_10(ha, "done\n");
19621 	}
19622 	return (rval);
19623 }
19624 
19625 /*
19626  *  ql_idc
19627  *	Inter driver communication thread.
19628  *
19629  * Input:
19630  *	ha = adapter state pointer.
19631  *
19632  * Context:
19633  *	Kernel context.
19634  */
19635 static void
ql_idc(ql_adapter_state_t * ha)19636 ql_idc(ql_adapter_state_t *ha)
19637 {
19638 	int		rval;
19639 	uint32_t	timer = 300;
19640 
19641 	QL_PRINT_10(ha, "started\n");
19642 
19643 	for (;;) {
19644 		/* IDC Stall needed. */
19645 		if (ha->flags & IDC_STALL_NEEDED) {
19646 			ADAPTER_STATE_LOCK(ha);
19647 			ha->flags &= ~IDC_STALL_NEEDED;
19648 			ADAPTER_STATE_UNLOCK(ha);
19649 			TASK_DAEMON_LOCK(ha);
19650 			ha->task_daemon_flags |= DRIVER_STALL;
19651 			TASK_DAEMON_UNLOCK(ha);
19652 			if (LOOP_READY(ha)) {
19653 				if ((ha->idc_mb[1] & IDC_TIMEOUT_MASK) <
19654 				    IDC_TIMEOUT_MASK) {
19655 					ha->idc_mb[1] = (uint16_t)
19656 					    (ha->idc_mb[1] | IDC_TIMEOUT_MASK);
19657 					rval = ql_idc_time_extend(ha);
19658 					if (rval != QL_SUCCESS) {
19659 						EL(ha, "idc_time_extend status"
19660 						    "=%xh\n", rval);
19661 					}
19662 				}
19663 				(void) ql_wait_outstanding(ha);
19664 			}
19665 		}
19666 
19667 		/* IDC ACK needed. */
19668 		if (ha->flags & IDC_ACK_NEEDED) {
19669 			ADAPTER_STATE_LOCK(ha);
19670 			ha->flags &= ~IDC_ACK_NEEDED;
19671 			ADAPTER_STATE_UNLOCK(ha);
19672 			rval = ql_idc_ack(ha);
19673 			if (rval != QL_SUCCESS) {
19674 				EL(ha, "idc_ack status=%xh\n", rval);
19675 				ADAPTER_STATE_LOCK(ha);
19676 				ha->flags |= IDC_RESTART_NEEDED;
19677 				ADAPTER_STATE_UNLOCK(ha);
19678 			}
19679 		}
19680 
19681 		/* IDC Restart needed. */
19682 		if (timer-- == 0 || ha->flags & ADAPTER_SUSPENDED ||
19683 		    (ha->flags & IDC_RESTART_NEEDED &&
19684 		    !(ha->flags & LOOPBACK_ACTIVE))) {
19685 			ADAPTER_STATE_LOCK(ha);
19686 			ha->flags &= ~(IDC_RESTART_NEEDED | IDC_STALL_NEEDED |
19687 			    IDC_ACK_NEEDED);
19688 			ADAPTER_STATE_UNLOCK(ha);
19689 			TASK_DAEMON_LOCK(ha);
19690 			ha->task_daemon_flags &= ~DRIVER_STALL;
19691 			TASK_DAEMON_UNLOCK(ha);
19692 			if (LOOP_READY(ha)) {
19693 				ql_restart_queues(ha);
19694 			}
19695 			break;
19696 		}
19697 		delay(10);
19698 	}
19699 
19700 	QL_PRINT_10(ha, "done\n");
19701 }
19702 
19703 /*
19704  * ql_get_lun_addr
19705  *	get the lunslun address.
19706  *
19707  * Input:
19708  *	tq:	target queue pointer.
19709  *	lun:	the lun number.
19710  *
19711  * Returns:
19712  *	the lun address.
19713  *
19714  * Context:
19715  *	Interrupt or Kernel context, no mailbox commands allowed.
19716  */
19717 uint64_t
ql_get_lun_addr(ql_tgt_t * tq,uint16_t lun)19718 ql_get_lun_addr(ql_tgt_t *tq, uint16_t lun)
19719 {
19720 	ql_lun_t		*lq;
19721 	ql_link_t		*link = NULL;
19722 	uint64_t		lun_addr = 0;
19723 	fcp_ent_addr_t		*fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
19724 
19725 	/* If the lun queue exists */
19726 	if (tq) {
19727 		for (link = tq->lun_queues.first; link != NULL;
19728 		    link = link->next) {
19729 			lq = link->base_address;
19730 			if (lq->lun_no == lun) {
19731 				break;
19732 			}
19733 		}
19734 	}
19735 	if (link == NULL) {
19736 		/* create an fcp_ent_addr from the lun number */
19737 		if (MSB(lun)) {
19738 			fcp_ent_addr->ent_addr_0 = CHAR_TO_SHORT(lobyte(lun),
19739 			    (hibyte(lun) | QL_LUN_AM_FLAT));
19740 		} else {
19741 			fcp_ent_addr->ent_addr_0 = CHAR_TO_SHORT(lobyte(lun),
19742 			    hibyte(lun));
19743 		}
19744 	} else {
19745 		lun_addr = lq->lun_addr;
19746 	}
19747 
19748 	return (lun_addr);
19749 }
19750 
19751 
19752 /*
19753  * ql_83xx_binary_fw_dump
19754  *
19755  * Input:
19756  *	ha:	adapter state pointer.
19757  *	fw:	firmware dump context pointer.
19758  *
19759  * Returns:
19760  *	ql local function return status code.
19761  *
19762  * Context:
19763  *	Interrupt or Kernel context, no mailbox commands allowed.
19764  */
19765 static int
ql_83xx_binary_fw_dump(ql_adapter_state_t * ha,ql_83xx_fw_dump_t * fw)19766 ql_83xx_binary_fw_dump(ql_adapter_state_t *ha, ql_83xx_fw_dump_t *fw)
19767 {
19768 	uint32_t	*reg32, cnt, *w32ptr, index, *dp;
19769 	void		*bp;
19770 	clock_t		timer;
19771 	int		rv, rval = QL_SUCCESS;
19772 
19773 	QL_PRINT_3(ha, "started\n");
19774 
19775 	fw->req_q_size[0] = ha->req_q[0]->req_ring.size;
19776 	if (ha->req_q[1] != NULL) {
19777 		fw->req_q_size[1] = ha->req_q[1]->req_ring.size;
19778 	}
19779 	fw->rsp_q_size = ha->rsp_queues[0]->rsp_ring.size * ha->rsp_queues_cnt;
19780 
19781 	fw->hccr = RD32_IO_REG(ha, hccr);
19782 	fw->r2h_status = RD32_IO_REG(ha, risc2host);
19783 	fw->aer_ues = ql_pci_config_get32(ha, 0x104);
19784 
19785 	/* Disable ISP interrupts. */
19786 	ql_disable_intr(ha);
19787 
19788 	/* Pause RISC. */
19789 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
19790 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
19791 		for (timer = 30000;
19792 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
19793 		    rval == QL_SUCCESS; timer--) {
19794 			if (timer) {
19795 				drv_usecwait(100);
19796 				if (timer % 10000 == 0) {
19797 					EL(ha, "risc pause %d\n", timer);
19798 				}
19799 			} else {
19800 				EL(ha, "risc pause timeout\n");
19801 				rval = QL_FUNCTION_TIMEOUT;
19802 			}
19803 		}
19804 	}
19805 
19806 	WRT32_IO_REG(ha, io_base_addr, 0x6000);
19807 	WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0);
19808 	WRT_REG_DWORD(ha, ha->iobase + 0xcc, 0);
19809 
19810 	WRT32_IO_REG(ha, io_base_addr, 0x6010);
19811 	WRT_REG_DWORD(ha, ha->iobase + 0xd4, 0);
19812 
19813 	WRT32_IO_REG(ha, io_base_addr, 0x0F70);
19814 	WRT_REG_DWORD(ha, ha->iobase + 0xf0, 0x60000000);
19815 
19816 	/* Host Interface registers */
19817 
19818 	/* HostRisc registers. */
19819 	WRT32_IO_REG(ha, io_base_addr, 0x7000);
19820 	bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
19821 	    16, 32);
19822 	WRT32_IO_REG(ha, io_base_addr, 0x7010);
19823 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19824 	WRT32_IO_REG(ha, io_base_addr, 0x7040);
19825 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19826 
19827 	/* PCIe registers. */
19828 	WRT32_IO_REG(ha, io_base_addr, 0x7c00);
19829 	WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
19830 	bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
19831 	    3, 32);
19832 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
19833 	WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
19834 
19835 	/* Host interface registers. */
19836 	(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
19837 	    sizeof (fw->host_reg) / 4, 32);
19838 
19839 	/* Shadow registers. */
19840 
19841 	WRT32_IO_REG(ha, io_base_addr, 0x0F70);
19842 	RD32_IO_REG(ha, io_base_addr);
19843 
19844 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19845 	WRT_REG_DWORD(ha, reg32, 0xB0000000);
19846 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19847 	fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
19848 
19849 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19850 	WRT_REG_DWORD(ha, reg32, 0xB0100000);
19851 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19852 	fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
19853 
19854 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19855 	WRT_REG_DWORD(ha, reg32, 0xB0200000);
19856 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19857 	fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
19858 
19859 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19860 	WRT_REG_DWORD(ha, reg32, 0xB0300000);
19861 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19862 	fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
19863 
19864 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19865 	WRT_REG_DWORD(ha, reg32, 0xB0400000);
19866 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19867 	fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
19868 
19869 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19870 	WRT_REG_DWORD(ha, reg32, 0xB0500000);
19871 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19872 	fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
19873 
19874 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19875 	WRT_REG_DWORD(ha, reg32, 0xB0600000);
19876 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19877 	fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
19878 
19879 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19880 	WRT_REG_DWORD(ha, reg32, 0xB0700000);
19881 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19882 	fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
19883 
19884 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19885 	WRT_REG_DWORD(ha, reg32, 0xB0800000);
19886 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19887 	fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
19888 
19889 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19890 	WRT_REG_DWORD(ha, reg32, 0xB0900000);
19891 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19892 	fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
19893 
19894 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
19895 	WRT_REG_DWORD(ha, reg32, 0xB0A00000);
19896 	reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
19897 	fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
19898 
19899 	/* RISC I/O register. */
19900 
19901 	WRT32_IO_REG(ha, io_base_addr, 0x0010);
19902 	(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
19903 	    1, 32);
19904 
19905 	/* Mailbox registers. */
19906 
19907 	(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
19908 	    sizeof (fw->mailbox_reg) / 2, 16);
19909 
19910 	/* Transfer sequence registers. */
19911 
19912 	/* XSEQ GP */
19913 	WRT32_IO_REG(ha, io_base_addr, 0xBE00);
19914 	bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
19915 	    16, 32);
19916 	WRT32_IO_REG(ha, io_base_addr, 0xBE10);
19917 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19918 	WRT32_IO_REG(ha, io_base_addr, 0xBE20);
19919 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19920 	WRT32_IO_REG(ha, io_base_addr, 0xBE30);
19921 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19922 	WRT32_IO_REG(ha, io_base_addr, 0xBE40);
19923 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19924 	WRT32_IO_REG(ha, io_base_addr, 0xBE50);
19925 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19926 	WRT32_IO_REG(ha, io_base_addr, 0xBE60);
19927 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19928 	WRT32_IO_REG(ha, io_base_addr, 0xBE70);
19929 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19930 	WRT32_IO_REG(ha, io_base_addr, 0xBF00);
19931 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19932 	WRT32_IO_REG(ha, io_base_addr, 0xBF10);
19933 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19934 	WRT32_IO_REG(ha, io_base_addr, 0xBF20);
19935 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19936 	WRT32_IO_REG(ha, io_base_addr, 0xBF30);
19937 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19938 	WRT32_IO_REG(ha, io_base_addr, 0xBF40);
19939 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19940 	WRT32_IO_REG(ha, io_base_addr, 0xBF50);
19941 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19942 	WRT32_IO_REG(ha, io_base_addr, 0xBF60);
19943 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19944 	WRT32_IO_REG(ha, io_base_addr, 0xBF70);
19945 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19946 
19947 	/* XSEQ-0 */
19948 	WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
19949 	bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0, 16, 32);
19950 	WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
19951 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19952 	WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
19953 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19954 
19955 	/* XSEQ-1 */
19956 	WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
19957 	(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
19958 	    16, 32);
19959 
19960 	/* XSEQ-2 */
19961 	WRT32_IO_REG(ha, io_base_addr, 0xBEF0);
19962 	(void) ql_read_regs(ha, fw->xseq_2_reg, ha->iobase + 0xC0,
19963 	    16, 32);
19964 
19965 	/* Receive sequence registers. */
19966 
19967 	/* RSEQ GP */
19968 	WRT32_IO_REG(ha, io_base_addr, 0xFE00);
19969 	bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0, 16, 32);
19970 	WRT32_IO_REG(ha, io_base_addr, 0xFE10);
19971 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19972 	WRT32_IO_REG(ha, io_base_addr, 0xFE20);
19973 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19974 	WRT32_IO_REG(ha, io_base_addr, 0xFE30);
19975 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19976 	WRT32_IO_REG(ha, io_base_addr, 0xFE40);
19977 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19978 	WRT32_IO_REG(ha, io_base_addr, 0xFE50);
19979 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19980 	WRT32_IO_REG(ha, io_base_addr, 0xFE60);
19981 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19982 	WRT32_IO_REG(ha, io_base_addr, 0xFE70);
19983 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19984 	WRT32_IO_REG(ha, io_base_addr, 0xFF00);
19985 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19986 	WRT32_IO_REG(ha, io_base_addr, 0xFF10);
19987 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19988 	WRT32_IO_REG(ha, io_base_addr, 0xFF20);
19989 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19990 	WRT32_IO_REG(ha, io_base_addr, 0xFF30);
19991 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19992 	WRT32_IO_REG(ha, io_base_addr, 0xFF40);
19993 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19994 	WRT32_IO_REG(ha, io_base_addr, 0xFF50);
19995 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19996 	WRT32_IO_REG(ha, io_base_addr, 0xFF60);
19997 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
19998 	WRT32_IO_REG(ha, io_base_addr, 0xFF70);
19999 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20000 
20001 	/* RSEQ-0 */
20002 	WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
20003 	bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
20004 	    16, 32);
20005 	WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
20006 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20007 
20008 	/* RSEQ-1 */
20009 	WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
20010 	(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
20011 	    sizeof (fw->rseq_1_reg) / 4, 32);
20012 
20013 	/* RSEQ-2 */
20014 	WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
20015 	(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
20016 	    sizeof (fw->rseq_2_reg) / 4, 32);
20017 
20018 	/* RSEQ-3 */
20019 	WRT32_IO_REG(ha, io_base_addr, 0xFEF0);
20020 	(void) ql_read_regs(ha, fw->rseq_3_reg, ha->iobase + 0xC0,
20021 	    sizeof (fw->rseq_3_reg) / 4, 32);
20022 
20023 	/* Auxiliary sequencer registers. */
20024 
20025 	/* ASEQ GP */
20026 	WRT32_IO_REG(ha, io_base_addr, 0xB000);
20027 	bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0, 16, 32);
20028 	WRT32_IO_REG(ha, io_base_addr, 0xB010);
20029 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20030 	WRT32_IO_REG(ha, io_base_addr, 0xB020);
20031 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20032 	WRT32_IO_REG(ha, io_base_addr, 0xB030);
20033 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20034 	WRT32_IO_REG(ha, io_base_addr, 0xB040);
20035 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20036 	WRT32_IO_REG(ha, io_base_addr, 0xB050);
20037 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20038 	WRT32_IO_REG(ha, io_base_addr, 0xB060);
20039 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20040 	WRT32_IO_REG(ha, io_base_addr, 0xB070);
20041 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20042 	WRT32_IO_REG(ha, io_base_addr, 0xB100);
20043 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20044 	WRT32_IO_REG(ha, io_base_addr, 0xB110);
20045 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20046 	WRT32_IO_REG(ha, io_base_addr, 0xB120);
20047 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20048 	WRT32_IO_REG(ha, io_base_addr, 0xB130);
20049 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20050 	WRT32_IO_REG(ha, io_base_addr, 0xB140);
20051 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20052 	WRT32_IO_REG(ha, io_base_addr, 0xB150);
20053 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20054 	WRT32_IO_REG(ha, io_base_addr, 0xB160);
20055 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20056 	WRT32_IO_REG(ha, io_base_addr, 0xB170);
20057 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20058 
20059 	/* ASEQ-0 */
20060 	WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
20061 	bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
20062 	    16, 32);
20063 	WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
20064 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20065 
20066 	/* ASEQ-1 */
20067 	WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
20068 	(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
20069 	    16, 32);
20070 
20071 	/* ASEQ-2 */
20072 	WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
20073 	(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
20074 	    16, 32);
20075 
20076 	/* ASEQ-3 */
20077 	WRT32_IO_REG(ha, io_base_addr, 0xB1F0);
20078 	(void) ql_read_regs(ha, fw->aseq_3_reg, ha->iobase + 0xC0,
20079 	    16, 32);
20080 
20081 	/* Command DMA registers. */
20082 
20083 	WRT32_IO_REG(ha, io_base_addr, 0x7100);
20084 	bp = ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
20085 	    16, 32);
20086 	WRT32_IO_REG(ha, io_base_addr, 0x7120);
20087 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20088 	WRT32_IO_REG(ha, io_base_addr, 0x7130);
20089 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20090 	WRT32_IO_REG(ha, io_base_addr, 0x71f0);
20091 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20092 
20093 	/* Queues. */
20094 
20095 	/* RequestQ0 */
20096 	WRT32_IO_REG(ha, io_base_addr, 0x7200);
20097 	bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
20098 	    8, 32);
20099 	(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20100 
20101 	/* ResponseQ0 */
20102 	WRT32_IO_REG(ha, io_base_addr, 0x7300);
20103 	bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
20104 	    8, 32);
20105 	(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20106 
20107 	/* RequestQ1 */
20108 	WRT32_IO_REG(ha, io_base_addr, 0x7400);
20109 	bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
20110 	    8, 32);
20111 	(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
20112 
20113 	/* Transmit DMA registers. */
20114 
20115 	/* XMT0 */
20116 	WRT32_IO_REG(ha, io_base_addr, 0x7600);
20117 	bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
20118 	    16, 32);
20119 	WRT32_IO_REG(ha, io_base_addr, 0x7610);
20120 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20121 
20122 	/* XMT1 */
20123 	WRT32_IO_REG(ha, io_base_addr, 0x7620);
20124 	bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
20125 	    16, 32);
20126 	WRT32_IO_REG(ha, io_base_addr, 0x7630);
20127 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20128 
20129 	/* XMT2 */
20130 	WRT32_IO_REG(ha, io_base_addr, 0x7640);
20131 	bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
20132 	    16, 32);
20133 	WRT32_IO_REG(ha, io_base_addr, 0x7650);
20134 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20135 
20136 	/* XMT3 */
20137 	WRT32_IO_REG(ha, io_base_addr, 0x7660);
20138 	bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
20139 	    16, 32);
20140 	WRT32_IO_REG(ha, io_base_addr, 0x7670);
20141 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20142 
20143 	/* XMT4 */
20144 	WRT32_IO_REG(ha, io_base_addr, 0x7680);
20145 	bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
20146 	    16, 32);
20147 	WRT32_IO_REG(ha, io_base_addr, 0x7690);
20148 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20149 
20150 	/* XMT Common */
20151 	WRT32_IO_REG(ha, io_base_addr, 0x76A0);
20152 	(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
20153 	    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
20154 
20155 	/* Receive DMA registers. */
20156 
20157 	/* RCVThread0 */
20158 	WRT32_IO_REG(ha, io_base_addr, 0x7700);
20159 	bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
20160 	    ha->iobase + 0xC0, 16, 32);
20161 	WRT32_IO_REG(ha, io_base_addr, 0x7710);
20162 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20163 
20164 	/* RCVThread1 */
20165 	WRT32_IO_REG(ha, io_base_addr, 0x7720);
20166 	bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
20167 	    ha->iobase + 0xC0, 16, 32);
20168 	WRT32_IO_REG(ha, io_base_addr, 0x7730);
20169 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20170 
20171 	/* RISC registers. */
20172 
20173 	/* RISC GP */
20174 	WRT32_IO_REG(ha, io_base_addr, 0x0F00);
20175 	bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0, 16, 32);
20176 	WRT32_IO_REG(ha, io_base_addr, 0x0F10);
20177 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20178 	WRT32_IO_REG(ha, io_base_addr, 0x0F20);
20179 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20180 	WRT32_IO_REG(ha, io_base_addr, 0x0F30);
20181 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20182 	WRT32_IO_REG(ha, io_base_addr, 0x0F40);
20183 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20184 	WRT32_IO_REG(ha, io_base_addr, 0x0F50);
20185 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20186 	WRT32_IO_REG(ha, io_base_addr, 0x0F60);
20187 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20188 	WRT32_IO_REG(ha, io_base_addr, 0x0F70);
20189 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20190 
20191 	/* Local memory controller (LMC) registers. */
20192 
20193 	/* LMC */
20194 	WRT32_IO_REG(ha, io_base_addr, 0x3000);
20195 	bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0, 16, 32);
20196 	WRT32_IO_REG(ha, io_base_addr, 0x3010);
20197 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20198 	WRT32_IO_REG(ha, io_base_addr, 0x3020);
20199 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20200 	WRT32_IO_REG(ha, io_base_addr, 0x3030);
20201 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20202 	WRT32_IO_REG(ha, io_base_addr, 0x3040);
20203 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20204 	WRT32_IO_REG(ha, io_base_addr, 0x3050);
20205 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20206 	WRT32_IO_REG(ha, io_base_addr, 0x3060);
20207 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20208 	WRT32_IO_REG(ha, io_base_addr, 0x3070);
20209 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20210 
20211 	/* Fibre Protocol Module registers. */
20212 
20213 	/* FPM hardware */
20214 	WRT32_IO_REG(ha, io_base_addr, 0x4000);
20215 	bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0, 16, 32);
20216 	WRT32_IO_REG(ha, io_base_addr, 0x4010);
20217 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20218 	WRT32_IO_REG(ha, io_base_addr, 0x4020);
20219 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20220 	WRT32_IO_REG(ha, io_base_addr, 0x4030);
20221 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20222 	WRT32_IO_REG(ha, io_base_addr, 0x4040);
20223 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20224 	WRT32_IO_REG(ha, io_base_addr, 0x4050);
20225 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20226 	WRT32_IO_REG(ha, io_base_addr, 0x4060);
20227 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20228 	WRT32_IO_REG(ha, io_base_addr, 0x4070);
20229 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20230 	WRT32_IO_REG(ha, io_base_addr, 0x4080);
20231 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20232 	WRT32_IO_REG(ha, io_base_addr, 0x4090);
20233 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20234 	WRT32_IO_REG(ha, io_base_addr, 0x40A0);
20235 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20236 	WRT32_IO_REG(ha, io_base_addr, 0x40B0);
20237 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20238 	WRT32_IO_REG(ha, io_base_addr, 0x40C0);
20239 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20240 	WRT32_IO_REG(ha, io_base_addr, 0x40D0);
20241 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20242 	WRT32_IO_REG(ha, io_base_addr, 0x40E0);
20243 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20244 	WRT32_IO_REG(ha, io_base_addr, 0x40F0);
20245 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20246 
20247 	/* Pointer arrays registers */
20248 
20249 	/* RQ0 Array registers. */
20250 	WRT32_IO_REG(ha, io_base_addr, 0x5C00);
20251 	bp = ql_read_regs(ha, fw->rq0_array_reg, ha->iobase + 0xC0,
20252 	    16, 32);
20253 	WRT32_IO_REG(ha, io_base_addr, 0x5C10);
20254 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20255 	WRT32_IO_REG(ha, io_base_addr, 0x5C20);
20256 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20257 	WRT32_IO_REG(ha, io_base_addr, 0x5C30);
20258 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20259 	WRT32_IO_REG(ha, io_base_addr, 0x5C40);
20260 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20261 	WRT32_IO_REG(ha, io_base_addr, 0x5C50);
20262 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20263 	WRT32_IO_REG(ha, io_base_addr, 0x5C60);
20264 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20265 	WRT32_IO_REG(ha, io_base_addr, 0x5C70);
20266 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20267 	WRT32_IO_REG(ha, io_base_addr, 0x5C80);
20268 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20269 	WRT32_IO_REG(ha, io_base_addr, 0x5C90);
20270 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20271 	WRT32_IO_REG(ha, io_base_addr, 0x5CA0);
20272 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20273 	WRT32_IO_REG(ha, io_base_addr, 0x5CB0);
20274 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20275 	WRT32_IO_REG(ha, io_base_addr, 0x5CC0);
20276 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20277 	WRT32_IO_REG(ha, io_base_addr, 0x5CD0);
20278 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20279 	WRT32_IO_REG(ha, io_base_addr, 0x5CE0);
20280 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20281 	WRT32_IO_REG(ha, io_base_addr, 0x5CF0);
20282 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20283 
20284 	/* RQ1 Array registers. */
20285 	WRT32_IO_REG(ha, io_base_addr, 0x5D00);
20286 	bp = ql_read_regs(ha, fw->rq1_array_reg, ha->iobase + 0xC0, 16, 32);
20287 	WRT32_IO_REG(ha, io_base_addr, 0x5D10);
20288 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20289 	WRT32_IO_REG(ha, io_base_addr, 0x5D20);
20290 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20291 	WRT32_IO_REG(ha, io_base_addr, 0x5D30);
20292 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20293 	WRT32_IO_REG(ha, io_base_addr, 0x5D40);
20294 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20295 	WRT32_IO_REG(ha, io_base_addr, 0x5D50);
20296 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20297 	WRT32_IO_REG(ha, io_base_addr, 0x5D60);
20298 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20299 	WRT32_IO_REG(ha, io_base_addr, 0x5D70);
20300 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20301 	WRT32_IO_REG(ha, io_base_addr, 0x5D80);
20302 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20303 	WRT32_IO_REG(ha, io_base_addr, 0x5D90);
20304 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20305 	WRT32_IO_REG(ha, io_base_addr, 0x5DA0);
20306 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20307 	WRT32_IO_REG(ha, io_base_addr, 0x5DB0);
20308 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20309 	WRT32_IO_REG(ha, io_base_addr, 0x5DC0);
20310 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20311 	WRT32_IO_REG(ha, io_base_addr, 0x5DD0);
20312 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20313 	WRT32_IO_REG(ha, io_base_addr, 0x5DE0);
20314 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20315 	WRT32_IO_REG(ha, io_base_addr, 0x5DF0);
20316 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20317 
20318 	/* RP0 Array registers. */
20319 	WRT32_IO_REG(ha, io_base_addr, 0x5E00);
20320 	bp = ql_read_regs(ha, fw->rp0_array_reg, ha->iobase + 0xC0, 16, 32);
20321 	WRT32_IO_REG(ha, io_base_addr, 0x5E10);
20322 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20323 	WRT32_IO_REG(ha, io_base_addr, 0x5E20);
20324 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20325 	WRT32_IO_REG(ha, io_base_addr, 0x5E30);
20326 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20327 	WRT32_IO_REG(ha, io_base_addr, 0x5E40);
20328 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20329 	WRT32_IO_REG(ha, io_base_addr, 0x5E50);
20330 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20331 	WRT32_IO_REG(ha, io_base_addr, 0x5E60);
20332 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20333 	WRT32_IO_REG(ha, io_base_addr, 0x5E70);
20334 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20335 	WRT32_IO_REG(ha, io_base_addr, 0x5E80);
20336 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20337 	WRT32_IO_REG(ha, io_base_addr, 0x5E90);
20338 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20339 	WRT32_IO_REG(ha, io_base_addr, 0x5EA0);
20340 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20341 	WRT32_IO_REG(ha, io_base_addr, 0x5EB0);
20342 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20343 	WRT32_IO_REG(ha, io_base_addr, 0x5EC0);
20344 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20345 	WRT32_IO_REG(ha, io_base_addr, 0x5ED0);
20346 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20347 	WRT32_IO_REG(ha, io_base_addr, 0x5EE0);
20348 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20349 	WRT32_IO_REG(ha, io_base_addr, 0x5EF0);
20350 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20351 
20352 	/* RP1 Array registers. */
20353 	WRT32_IO_REG(ha, io_base_addr, 0x5F00);
20354 	bp = ql_read_regs(ha, fw->rp1_array_reg, ha->iobase + 0xC0, 16, 32);
20355 	WRT32_IO_REG(ha, io_base_addr, 0x5F10);
20356 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20357 	WRT32_IO_REG(ha, io_base_addr, 0x5F20);
20358 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20359 	WRT32_IO_REG(ha, io_base_addr, 0x5F30);
20360 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20361 	WRT32_IO_REG(ha, io_base_addr, 0x5F40);
20362 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20363 	WRT32_IO_REG(ha, io_base_addr, 0x5F50);
20364 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20365 	WRT32_IO_REG(ha, io_base_addr, 0x5F60);
20366 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20367 	WRT32_IO_REG(ha, io_base_addr, 0x5F70);
20368 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20369 	WRT32_IO_REG(ha, io_base_addr, 0x5F80);
20370 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20371 	WRT32_IO_REG(ha, io_base_addr, 0x5F90);
20372 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20373 	WRT32_IO_REG(ha, io_base_addr, 0x5FA0);
20374 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20375 	WRT32_IO_REG(ha, io_base_addr, 0x5FB0);
20376 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20377 	WRT32_IO_REG(ha, io_base_addr, 0x5FC0);
20378 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20379 	WRT32_IO_REG(ha, io_base_addr, 0x5FD0);
20380 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20381 	WRT32_IO_REG(ha, io_base_addr, 0x5FE0);
20382 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20383 	WRT32_IO_REG(ha, io_base_addr, 0x5FF0);
20384 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20385 
20386 	/* AT0 Array Registers */
20387 	WRT32_IO_REG(ha, io_base_addr, 0x7080);
20388 	bp = ql_read_regs(ha, fw->ato_array_reg, ha->iobase + 0xC0, 16, 32);
20389 	WRT32_IO_REG(ha, io_base_addr, 0x7090);
20390 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20391 	WRT32_IO_REG(ha, io_base_addr, 0x70A0);
20392 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20393 	WRT32_IO_REG(ha, io_base_addr, 0x70B0);
20394 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20395 	WRT32_IO_REG(ha, io_base_addr, 0x70C0);
20396 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20397 	WRT32_IO_REG(ha, io_base_addr, 0x70D0);
20398 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20399 	WRT32_IO_REG(ha, io_base_addr, 0x70E0);
20400 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20401 	WRT32_IO_REG(ha, io_base_addr, 0x70F0);
20402 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20403 
20404 	/* I/O queue control registers */
20405 
20406 	/* Queue Control Registers. */
20407 	WRT32_IO_REG(ha, io_base_addr, 0x7800);
20408 	(void) ql_read_regs(ha, fw->queue_control_reg, ha->iobase + 0xC0,
20409 	    16, 32);
20410 
20411 	/* Frame Buffer registers. */
20412 
20413 	/* FB hardware */
20414 	WRT32_IO_REG(ha, io_base_addr, 0x6000);
20415 	bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0, 16, 32);
20416 	WRT32_IO_REG(ha, io_base_addr, 0x6010);
20417 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20418 	WRT32_IO_REG(ha, io_base_addr, 0x6020);
20419 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20420 	WRT32_IO_REG(ha, io_base_addr, 0x6030);
20421 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20422 	WRT32_IO_REG(ha, io_base_addr, 0x6040);
20423 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20424 	WRT32_IO_REG(ha, io_base_addr, 0x6060);
20425 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20426 	WRT32_IO_REG(ha, io_base_addr, 0x6070);
20427 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20428 	WRT32_IO_REG(ha, io_base_addr, 0x6100);
20429 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20430 	WRT32_IO_REG(ha, io_base_addr, 0x6130);
20431 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20432 	WRT32_IO_REG(ha, io_base_addr, 0x6150);
20433 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20434 	WRT32_IO_REG(ha, io_base_addr, 0x6170);
20435 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20436 	WRT32_IO_REG(ha, io_base_addr, 0x6190);
20437 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20438 	WRT32_IO_REG(ha, io_base_addr, 0x61B0);
20439 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20440 	WRT32_IO_REG(ha, io_base_addr, 0x61C0);
20441 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20442 	WRT32_IO_REG(ha, io_base_addr, 0x6530);
20443 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20444 	WRT32_IO_REG(ha, io_base_addr, 0x6540);
20445 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20446 	WRT32_IO_REG(ha, io_base_addr, 0x6550);
20447 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20448 	WRT32_IO_REG(ha, io_base_addr, 0x6560);
20449 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20450 	WRT32_IO_REG(ha, io_base_addr, 0x6570);
20451 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20452 	WRT32_IO_REG(ha, io_base_addr, 0x6580);
20453 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20454 	WRT32_IO_REG(ha, io_base_addr, 0x6590);
20455 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20456 	WRT32_IO_REG(ha, io_base_addr, 0x65A0);
20457 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20458 	WRT32_IO_REG(ha, io_base_addr, 0x65B0);
20459 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20460 	WRT32_IO_REG(ha, io_base_addr, 0x65C0);
20461 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20462 	WRT32_IO_REG(ha, io_base_addr, 0x65D0);
20463 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20464 	WRT32_IO_REG(ha, io_base_addr, 0x65E0);
20465 	bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20466 	WRT32_IO_REG(ha, io_base_addr, 0x6F00);
20467 	(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
20468 
20469 	/* Get the Queue Pointers */
20470 	dp = fw->req_rsp_ext_mem;
20471 	for (index = 0; index < ha->rsp_queues_cnt; index++) {
20472 		if (index == 0) {
20473 			*dp = RD32_MBAR_REG(ha, ha->req_q[0]->mbar_req_in);
20474 			LITTLE_ENDIAN_32(dp);
20475 			dp++;
20476 			*dp = RD32_MBAR_REG(ha, ha->req_q[0]->mbar_req_out);
20477 			LITTLE_ENDIAN_32(dp);
20478 			dp++;
20479 		} else if (index == 1) {
20480 			*dp = RD32_MBAR_REG(ha, ha->req_q[1]->mbar_req_in);
20481 			LITTLE_ENDIAN_32(dp);
20482 			dp++;
20483 			*dp = RD32_MBAR_REG(ha, ha->req_q[1]->mbar_req_out);
20484 			LITTLE_ENDIAN_32(dp);
20485 			dp++;
20486 		} else {
20487 			*dp++ = 0;
20488 			*dp++ = 0;
20489 		}
20490 		*dp = RD32_MBAR_REG(ha, ha->rsp_queues[index]->mbar_rsp_in);
20491 		LITTLE_ENDIAN_32(dp);
20492 		dp++;
20493 		*dp = RD32_MBAR_REG(ha, ha->rsp_queues[index]->mbar_rsp_out);
20494 		LITTLE_ENDIAN_32(dp);
20495 		dp++;
20496 	}
20497 
20498 	/* Get the request queue */
20499 	(void) ddi_dma_sync(ha->req_q[0]->req_ring.dma_handle, 0, 0,
20500 	    DDI_DMA_SYNC_FORCPU);
20501 	w32ptr = (uint32_t *)ha->req_q[0]->req_ring.bp;
20502 	for (cnt = 0; cnt < fw->req_q_size[0] / 4; cnt++) {
20503 		*dp = *w32ptr++;
20504 		LITTLE_ENDIAN_32(dp);
20505 		dp++;
20506 	}
20507 	if (ha->req_q[1] != NULL) {
20508 		(void) ddi_dma_sync(ha->req_q[1]->req_ring.dma_handle, 0, 0,
20509 		    DDI_DMA_SYNC_FORCPU);
20510 		w32ptr = (uint32_t *)ha->req_q[1]->req_ring.bp;
20511 		for (cnt = 0; cnt < fw->req_q_size[1] / 4; cnt++) {
20512 			*dp = *w32ptr++;
20513 			LITTLE_ENDIAN_32(dp);
20514 			dp++;
20515 		}
20516 	}
20517 
20518 	/* Get the response queues */
20519 	for (index = 0; index < ha->rsp_queues_cnt; index++) {
20520 		(void) ddi_dma_sync(ha->rsp_queues[index]->rsp_ring.dma_handle,
20521 		    0, 0, DDI_DMA_SYNC_FORCPU);
20522 		w32ptr = (uint32_t *)ha->rsp_queues[index]->rsp_ring.bp;
20523 		for (cnt = 0; cnt < ha->rsp_queues[index]->rsp_ring.size / 4;
20524 		    cnt++) {
20525 			*dp = *w32ptr++;
20526 			LITTLE_ENDIAN_32(dp);
20527 			dp++;
20528 		}
20529 	}
20530 
20531 	/* Reset RISC. */
20532 	ql_reset_chip(ha);
20533 
20534 	/* Code RAM. */
20535 	rv = ql_read_risc_ram(ha, 0x20000, sizeof (fw->code_ram) / 4,
20536 	    fw->code_ram);
20537 	if (rval == QL_SUCCESS) {
20538 		rval = rv;
20539 	}
20540 	rv = ql_read_risc_ram(ha, 0x100000,
20541 	    ha->fw_ext_memory_size / 4, dp);
20542 	if (rval == QL_SUCCESS) {
20543 		rval = rv;
20544 	}
20545 
20546 	/* Get the extended trace buffer */
20547 	if (ha->fwexttracebuf.dma_handle != NULL) {
20548 		/* Sync DMA buffer. */
20549 		(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
20550 		    FWEXTSIZE, DDI_DMA_SYNC_FORCPU);
20551 
20552 		w32ptr = ha->fwexttracebuf.bp;
20553 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
20554 			fw->ext_trace_buf[cnt] = *w32ptr++;
20555 		}
20556 	}
20557 
20558 	/* Get the FC event trace buffer */
20559 	if (ha->fwfcetracebuf.dma_handle != NULL) {
20560 		/* Sync DMA buffer. */
20561 		(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
20562 		    FWFCESIZE, DDI_DMA_SYNC_FORCPU);
20563 
20564 		w32ptr = ha->fwfcetracebuf.bp;
20565 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
20566 			fw->fce_trace_buf[cnt] = *w32ptr++;
20567 		}
20568 	}
20569 
20570 	if (rval != QL_SUCCESS) {
20571 		EL(ha, "failed, rval = %xh\n", rval);
20572 	} else {
20573 		/*EMPTY*/
20574 		QL_PRINT_10(ha, "done\n");
20575 	}
20576 	return (QL_SUCCESS);
20577 }
20578 
20579 /*
20580  * ql_83xx_ascii_fw_dump
20581  *	Converts ISP83xx firmware binary dump to ascii.
20582  *
20583  * Input:
20584  *	ha = adapter state pointer.
20585  *	bptr = buffer pointer.
20586  *
20587  * Returns:
20588  *	Amount of data buffer used.
20589  *
20590  * Context:
20591  *	Kernel context.
20592  */
20593 static size_t
ql_83xx_ascii_fw_dump(ql_adapter_state_t * ha,caddr_t bufp)20594 ql_83xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
20595 {
20596 	uint32_t		cnt, cnt1, len, *dp, *dp2;
20597 	caddr_t			bp = bufp;
20598 	ql_83xx_fw_dump_t	*fw = ha->ql_dump_ptr;
20599 
20600 	QL_PRINT_3(ha, "started\n");
20601 
20602 	if ((len = ha->risc_dump_size) == 0) {
20603 		QL_PRINT_10(ha, "no buffer\n");
20604 		return (0);
20605 	}
20606 	(void) snprintf(bp, len, "\nISP FW Version %d.%02d.%02d Attributes "
20607 	    "%X\n", ha->fw_major_version, ha->fw_minor_version,
20608 	    ha->fw_subminor_version, ha->fw_attributes);
20609 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20610 		return (strlen(bufp));
20611 	}
20612 
20613 	(void) snprintf(bp, len, "\nHCCR Register\n%08x\n", fw->hccr);
20614 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20615 		return (strlen(bufp));
20616 	}
20617 
20618 	(void) snprintf(bp, len, "\nR2H Status Register\n%08x\n",
20619 	    fw->r2h_status);
20620 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20621 		return (strlen(bufp));
20622 	}
20623 
20624 	(void) snprintf(bp, len,
20625 	    "\nAER Uncorrectable Error Status Register\n%08x\n", fw->aer_ues);
20626 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20627 		return (strlen(bufp));
20628 	}
20629 
20630 	(void) snprintf(bp, len, "\nHostRisc Registers");
20631 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20632 		return (strlen(bufp));
20633 	}
20634 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
20635 		if (cnt % 8 == 0) {
20636 			(void) snprintf(bp, len, "\n");
20637 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20638 				return (strlen(bufp));
20639 			}
20640 		}
20641 		(void) snprintf(bp, len, "%08x ", fw->hostrisc_reg[cnt]);
20642 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20643 			return (strlen(bufp));
20644 		}
20645 	}
20646 
20647 	(void) snprintf(bp, len, "\n\nPCIe Registers");
20648 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20649 		return (strlen(bufp));
20650 	}
20651 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
20652 		if (cnt % 8 == 0) {
20653 			(void) snprintf(bp, len, "\n");
20654 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20655 				return (strlen(bufp));
20656 			}
20657 		}
20658 		(void) snprintf(bp, len, "%08x ", fw->pcie_reg[cnt]);
20659 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20660 			return (strlen(bufp));
20661 		}
20662 	}
20663 
20664 	dp = fw->req_rsp_ext_mem;
20665 	for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
20666 		(void) snprintf(bp, len, "\n\nQueue Pointers #%d:\n", cnt);
20667 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20668 			return (strlen(bufp));
20669 		}
20670 		for (cnt1 = 0; cnt1 < 4; cnt1++) {
20671 			(void) snprintf(bp, len, "%08x ", *dp++);
20672 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20673 				return (strlen(bufp));
20674 			}
20675 		}
20676 	}
20677 
20678 	(void) snprintf(bp, len, "\n\nHost Interface Registers");
20679 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20680 		return (strlen(bufp));
20681 	}
20682 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
20683 		if (cnt % 8 == 0) {
20684 			(void) snprintf(bp, len, "\n");
20685 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20686 				return (strlen(bufp));
20687 			}
20688 		}
20689 		(void) snprintf(bp, len, "%08x ", fw->host_reg[cnt]);
20690 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20691 			return (strlen(bufp));
20692 		}
20693 	}
20694 
20695 	(void) snprintf(bp, len, "\n\nShadow Registers");
20696 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20697 		return (strlen(bufp));
20698 	}
20699 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
20700 		if (cnt % 8 == 0) {
20701 			(void) snprintf(bp, len, "\n");
20702 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20703 				return (strlen(bufp));
20704 			}
20705 		}
20706 		(void) snprintf(bp, len, "%08x ", fw->shadow_reg[cnt]);
20707 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20708 			return (strlen(bufp));
20709 		}
20710 	}
20711 
20712 	(void) snprintf(bp, len, "\n\nRISC IO Register\n%08x", fw->risc_io);
20713 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20714 		return (strlen(bufp));
20715 	}
20716 
20717 	(void) snprintf(bp, len, "\n\nMailbox Registers");
20718 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20719 		return (strlen(bufp));
20720 	}
20721 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
20722 		if (cnt % 16 == 0) {
20723 			(void) snprintf(bp, len, "\n");
20724 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20725 				return (strlen(bufp));
20726 			}
20727 		}
20728 		(void) snprintf(bp, len, "%04x ", fw->mailbox_reg[cnt]);
20729 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20730 			return (strlen(bufp));
20731 		}
20732 	}
20733 
20734 	(void) snprintf(bp, len, "\n\nXSEQ GP Registers");
20735 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20736 		return (strlen(bufp));
20737 	}
20738 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
20739 		if (cnt % 8 == 0) {
20740 			(void) snprintf(bp, len, "\n");
20741 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20742 				return (strlen(bufp));
20743 			}
20744 		}
20745 		(void) snprintf(bp, len, "%08x ", fw->xseq_gp_reg[cnt]);
20746 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20747 			return (strlen(bufp));
20748 		}
20749 	}
20750 
20751 	(void) snprintf(bp, len, "\n\nXSEQ-0 Registers");
20752 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20753 		return (strlen(bufp));
20754 	}
20755 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
20756 		if (cnt % 8 == 0) {
20757 			(void) snprintf(bp, len, "\n");
20758 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20759 				return (strlen(bufp));
20760 			}
20761 		}
20762 		(void) snprintf(bp, len, "%08x ", fw->xseq_0_reg[cnt]);
20763 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20764 			return (strlen(bufp));
20765 		}
20766 	}
20767 
20768 	(void) snprintf(bp, len, "\n\nXSEQ-1 Registers");
20769 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20770 		return (strlen(bufp));
20771 	}
20772 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
20773 		if (cnt % 8 == 0) {
20774 			(void) snprintf(bp, len, "\n");
20775 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20776 				return (strlen(bufp));
20777 			}
20778 		}
20779 		(void) snprintf(bp, len, "%08x ", fw->xseq_1_reg[cnt]);
20780 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20781 			return (strlen(bufp));
20782 		}
20783 	}
20784 
20785 	(void) snprintf(bp, len, "\n\nXSEQ-2 Registers");
20786 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20787 		return (strlen(bufp));
20788 	}
20789 	for (cnt = 0; cnt < sizeof (fw->xseq_2_reg) / 4; cnt++) {
20790 		if (cnt % 8 == 0) {
20791 			(void) snprintf(bp, len, "\n");
20792 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20793 				return (strlen(bufp));
20794 			}
20795 		}
20796 		(void) snprintf(bp, len, "%08x ", fw->xseq_2_reg[cnt]);
20797 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20798 			return (strlen(bufp));
20799 		}
20800 	}
20801 
20802 	(void) snprintf(bp, len, "\n\nRSEQ GP Registers");
20803 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20804 		return (strlen(bufp));
20805 	}
20806 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
20807 		if (cnt % 8 == 0) {
20808 			(void) snprintf(bp, len, "\n");
20809 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20810 				return (strlen(bufp));
20811 			}
20812 		}
20813 		(void) snprintf(bp, len, "%08x ", fw->rseq_gp_reg[cnt]);
20814 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20815 			return (strlen(bufp));
20816 		}
20817 	}
20818 
20819 	(void) snprintf(bp, len, "\n\nRSEQ-0 Registers");
20820 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20821 		return (strlen(bufp));
20822 	}
20823 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
20824 		if (cnt % 8 == 0) {
20825 			(void) snprintf(bp, len, "\n");
20826 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20827 				return (strlen(bufp));
20828 			}
20829 		}
20830 		(void) snprintf(bp, len, "%08x ", fw->rseq_0_reg[cnt]);
20831 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20832 			return (strlen(bufp));
20833 		}
20834 	}
20835 
20836 	(void) snprintf(bp, len, "\n\nRSEQ-1 Registers");
20837 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20838 		return (strlen(bufp));
20839 	}
20840 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
20841 		if (cnt % 8 == 0) {
20842 			(void) snprintf(bp, len, "\n");
20843 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20844 				return (strlen(bufp));
20845 			}
20846 		}
20847 		(void) snprintf(bp, len, "%08x ", fw->rseq_1_reg[cnt]);
20848 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20849 			return (strlen(bufp));
20850 		}
20851 	}
20852 
20853 	(void) snprintf(bp, len, "\n\nRSEQ-2 Registers");
20854 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20855 		return (strlen(bufp));
20856 	}
20857 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
20858 		if (cnt % 8 == 0) {
20859 			(void) snprintf(bp, len, "\n");
20860 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20861 				return (strlen(bufp));
20862 			}
20863 		}
20864 		(void) snprintf(bp, len, "%08x ", fw->rseq_2_reg[cnt]);
20865 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20866 			return (strlen(bufp));
20867 		}
20868 	}
20869 
20870 	(void) snprintf(bp, len, "\n\nRSEQ-3 Registers");
20871 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20872 		return (strlen(bufp));
20873 	}
20874 	for (cnt = 0; cnt < sizeof (fw->rseq_3_reg) / 4; cnt++) {
20875 		if (cnt % 8 == 0) {
20876 			(void) snprintf(bp, len, "\n");
20877 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20878 				return (strlen(bufp));
20879 			}
20880 		}
20881 		(void) snprintf(bp, len, "%08x ", fw->rseq_3_reg[cnt]);
20882 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20883 			return (strlen(bufp));
20884 		}
20885 	}
20886 
20887 	(void) snprintf(bp, len, "\n\nASEQ GP Registers");
20888 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20889 		return (strlen(bufp));
20890 	}
20891 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
20892 		if (cnt % 8 == 0) {
20893 			(void) snprintf(bp, len, "\n");
20894 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20895 				return (strlen(bufp));
20896 			}
20897 		}
20898 		(void) snprintf(bp, len, "%08x ", fw->aseq_gp_reg[cnt]);
20899 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20900 			return (strlen(bufp));
20901 		}
20902 	}
20903 
20904 	(void) snprintf(bp, len, "\n\nASEQ-0 Registers");
20905 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20906 		return (strlen(bufp));
20907 	}
20908 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
20909 		if (cnt % 8 == 0) {
20910 			(void) snprintf(bp, len, "\n");
20911 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20912 				return (strlen(bufp));
20913 			}
20914 		}
20915 		(void) snprintf(bp, len, "%08x ", fw->aseq_0_reg[cnt]);
20916 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20917 			return (strlen(bufp));
20918 		}
20919 	}
20920 
20921 	(void) snprintf(bp, len, "\n\nASEQ-1 Registers");
20922 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20923 		return (strlen(bufp));
20924 	}
20925 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
20926 		if (cnt % 8 == 0) {
20927 			(void) snprintf(bp, len, "\n");
20928 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20929 				return (strlen(bufp));
20930 			}
20931 		}
20932 		(void) snprintf(bp, len, "%08x ", fw->aseq_1_reg[cnt]);
20933 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20934 			return (strlen(bufp));
20935 		}
20936 	}
20937 
20938 	(void) snprintf(bp, len, "\n\nASEQ-2 Registers");
20939 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20940 		return (strlen(bufp));
20941 	}
20942 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
20943 		if (cnt % 8 == 0) {
20944 			(void) snprintf(bp, len, "\n");
20945 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20946 				return (strlen(bufp));
20947 			}
20948 		}
20949 		(void) snprintf(bp, len, "%08x ", fw->aseq_2_reg[cnt]);
20950 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20951 			return (strlen(bufp));
20952 		}
20953 	}
20954 
20955 	(void) snprintf(bp, len, "\n\nASEQ-3 Registers");
20956 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20957 		return (strlen(bufp));
20958 	}
20959 	for (cnt = 0; cnt < sizeof (fw->aseq_3_reg) / 4; cnt++) {
20960 		if (cnt % 8 == 0) {
20961 			(void) snprintf(bp, len, "\n");
20962 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20963 				return (strlen(bufp));
20964 			}
20965 		}
20966 		(void) snprintf(bp, len, "%08x ", fw->aseq_3_reg[cnt]);
20967 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20968 			return (strlen(bufp));
20969 		}
20970 	}
20971 
20972 	(void) snprintf(bp, len, "\n\nCommand DMA Registers");
20973 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20974 		return (strlen(bufp));
20975 	}
20976 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
20977 		if (cnt % 8 == 0) {
20978 			(void) snprintf(bp, len, "\n");
20979 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20980 				return (strlen(bufp));
20981 			}
20982 		}
20983 		(void) snprintf(bp, len, "%08x ", fw->cmd_dma_reg[cnt]);
20984 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20985 			return (strlen(bufp));
20986 		}
20987 	}
20988 
20989 	(void) snprintf(bp, len, "\n\nRequest0 Queue DMA Channel Registers");
20990 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20991 		return (strlen(bufp));
20992 	}
20993 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
20994 		if (cnt % 8 == 0) {
20995 			(void) snprintf(bp, len, "\n");
20996 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
20997 				return (strlen(bufp));
20998 			}
20999 		}
21000 		(void) snprintf(bp, len, "%08x ", fw->req0_dma_reg[cnt]);
21001 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21002 			return (strlen(bufp));
21003 		}
21004 	}
21005 
21006 	(void) snprintf(bp, len, "\n\nResponse0 Queue DMA Channel Registers");
21007 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21008 		return (strlen(bufp));
21009 	}
21010 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
21011 		if (cnt % 8 == 0) {
21012 			(void) snprintf(bp, len, "\n");
21013 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21014 				return (strlen(bufp));
21015 			}
21016 		}
21017 		(void) snprintf(bp, len, "%08x ", fw->resp0_dma_reg[cnt]);
21018 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21019 			return (strlen(bufp));
21020 		}
21021 	}
21022 
21023 	(void) snprintf(bp, len, "\n\nRequest1 Queue DMA Channel Registers");
21024 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21025 		return (strlen(bufp));
21026 	}
21027 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
21028 		if (cnt % 8 == 0) {
21029 			(void) snprintf(bp, len, "\n");
21030 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21031 				return (strlen(bufp));
21032 			}
21033 		}
21034 		(void) snprintf(bp, len, "%08x ", fw->req1_dma_reg[cnt]);
21035 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21036 			return (strlen(bufp));
21037 		}
21038 	}
21039 
21040 	(void) snprintf(bp, len, "\n\nXMT0 Data DMA Registers");
21041 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21042 		return (strlen(bufp));
21043 	}
21044 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
21045 		if (cnt % 8 == 0) {
21046 			(void) snprintf(bp, len, "\n");
21047 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21048 				return (strlen(bufp));
21049 			}
21050 		}
21051 		(void) snprintf(bp, len, "%08x ", fw->xmt0_dma_reg[cnt]);
21052 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21053 			return (strlen(bufp));
21054 		}
21055 	}
21056 
21057 	(void) snprintf(bp, len, "\n\nXMT1 Data DMA Registers");
21058 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21059 		return (strlen(bufp));
21060 	}
21061 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
21062 		if (cnt % 8 == 0) {
21063 			(void) snprintf(bp, len, "\n");
21064 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21065 				return (strlen(bufp));
21066 			}
21067 		}
21068 		(void) snprintf(bp, len, "%08x ", fw->xmt1_dma_reg[cnt]);
21069 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21070 			return (strlen(bufp));
21071 		}
21072 	}
21073 
21074 	(void) snprintf(bp, len, "\n\nXMT2 Data DMA Registers");
21075 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21076 		return (strlen(bufp));
21077 	}
21078 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
21079 		if (cnt % 8 == 0) {
21080 			(void) snprintf(bp, len, "\n");
21081 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21082 				return (strlen(bufp));
21083 			}
21084 		}
21085 		(void) snprintf(bp, len, "%08x ", fw->xmt2_dma_reg[cnt]);
21086 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21087 			return (strlen(bufp));
21088 		}
21089 	}
21090 
21091 	(void) snprintf(bp, len, "\n\nXMT3 Data DMA Registers");
21092 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21093 		return (strlen(bufp));
21094 	}
21095 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
21096 		if (cnt % 8 == 0) {
21097 			(void) snprintf(bp, len, "\n");
21098 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21099 				return (strlen(bufp));
21100 			}
21101 		}
21102 		(void) snprintf(bp, len, "%08x ", fw->xmt3_dma_reg[cnt]);
21103 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21104 			return (strlen(bufp));
21105 		}
21106 	}
21107 
21108 	(void) snprintf(bp, len, "\n\nXMT4 Data DMA Registers");
21109 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21110 		return (strlen(bufp));
21111 	}
21112 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
21113 		if (cnt % 8 == 0) {
21114 			(void) snprintf(bp, len, "\n");
21115 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21116 				return (strlen(bufp));
21117 			}
21118 		}
21119 		(void) snprintf(bp, len, "%08x ", fw->xmt4_dma_reg[cnt]);
21120 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21121 			return (strlen(bufp));
21122 		}
21123 	}
21124 
21125 	(void) snprintf(bp, len, "\n\nXMT Data DMA Common Registers");
21126 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21127 		return (strlen(bufp));
21128 	}
21129 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
21130 		if (cnt % 8 == 0) {
21131 			(void) snprintf(bp, len, "\n");
21132 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21133 				return (strlen(bufp));
21134 			}
21135 		}
21136 		(void) snprintf(bp, len, "%08x ", fw->xmt_data_dma_reg[cnt]);
21137 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21138 			return (strlen(bufp));
21139 		}
21140 	}
21141 
21142 	(void) snprintf(bp, len, "\n\nRCV Thread 0 Data DMA Registers");
21143 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21144 		return (strlen(bufp));
21145 	}
21146 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
21147 		if (cnt % 8 == 0) {
21148 			(void) snprintf(bp, len, "\n");
21149 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21150 				return (strlen(bufp));
21151 			}
21152 		}
21153 		(void) snprintf(bp, len, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
21154 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21155 			return (strlen(bufp));
21156 		}
21157 	}
21158 
21159 	(void) snprintf(bp, len, "\n\nRCV Thread 1 Data DMA Registers");
21160 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21161 		return (strlen(bufp));
21162 	}
21163 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
21164 		if (cnt % 8 == 0) {
21165 			(void) snprintf(bp, len, "\n");
21166 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21167 				return (strlen(bufp));
21168 			}
21169 		}
21170 		(void) snprintf(bp, len, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
21171 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21172 			return (strlen(bufp));
21173 		}
21174 	}
21175 
21176 	(void) snprintf(bp, len, "\n\nRISC GP Registers");
21177 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21178 		return (strlen(bufp));
21179 	}
21180 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
21181 		if (cnt % 8 == 0) {
21182 			(void) snprintf(bp, len, "\n");
21183 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21184 				return (strlen(bufp));
21185 			}
21186 		}
21187 		(void) snprintf(bp, len, "%08x ", fw->risc_gp_reg[cnt]);
21188 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21189 			return (strlen(bufp));
21190 		}
21191 	}
21192 
21193 	(void) snprintf(bp, len, "\n\nLMC Registers");
21194 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21195 		return (strlen(bufp));
21196 	}
21197 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
21198 		if (cnt % 8 == 0) {
21199 			(void) snprintf(bp, len, "\n");
21200 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21201 				return (strlen(bufp));
21202 			}
21203 		}
21204 		(void) snprintf(bp, len, "%08x ", fw->lmc_reg[cnt]);
21205 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21206 			return (strlen(bufp));
21207 		}
21208 	}
21209 
21210 	(void) snprintf(bp, len, "\n\nFPM Hardware Registers");
21211 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21212 		return (strlen(bufp));
21213 	}
21214 	cnt1 = (uint32_t)(sizeof (fw->fpm_hdw_reg));
21215 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21216 		if (cnt % 8 == 0) {
21217 			(void) snprintf(bp, len, "\n");
21218 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21219 				return (strlen(bufp));
21220 			}
21221 		}
21222 		(void) snprintf(bp, len, "%08x ", fw->fpm_hdw_reg[cnt]);
21223 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21224 			return (strlen(bufp));
21225 		}
21226 	}
21227 
21228 	(void) snprintf(bp, len, "\n\nRQ0 Array Registers");
21229 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21230 		return (strlen(bufp));
21231 	}
21232 	cnt1 = (uint32_t)(sizeof (fw->rq0_array_reg));
21233 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21234 		if (cnt % 8 == 0) {
21235 			(void) snprintf(bp, len, "\n");
21236 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21237 				return (strlen(bufp));
21238 			}
21239 		}
21240 		(void) snprintf(bp, len, "%08x ", fw->rq0_array_reg[cnt]);
21241 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21242 			return (strlen(bufp));
21243 		}
21244 	}
21245 
21246 	(void) snprintf(bp, len, "\n\nRQ1 Array Registers");
21247 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21248 		return (strlen(bufp));
21249 	}
21250 	cnt1 = (uint32_t)(sizeof (fw->rq1_array_reg));
21251 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21252 		if (cnt % 8 == 0) {
21253 			(void) snprintf(bp, len, "\n");
21254 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21255 				return (strlen(bufp));
21256 			}
21257 		}
21258 		(void) snprintf(bp, len, "%08x ", fw->rq1_array_reg[cnt]);
21259 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21260 			return (strlen(bufp));
21261 		}
21262 	}
21263 
21264 	(void) snprintf(bp, len, "\n\nRP0 Array Registers");
21265 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21266 		return (strlen(bufp));
21267 	}
21268 	cnt1 = (uint32_t)(sizeof (fw->rp0_array_reg));
21269 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21270 		if (cnt % 8 == 0) {
21271 			(void) snprintf(bp, len, "\n");
21272 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21273 				return (strlen(bufp));
21274 			}
21275 		}
21276 		(void) snprintf(bp, len, "%08x ", fw->rp0_array_reg[cnt]);
21277 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21278 			return (strlen(bufp));
21279 		}
21280 	}
21281 
21282 	(void) snprintf(bp, len, "\n\nRP1 Array Registers");
21283 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21284 		return (strlen(bufp));
21285 	}
21286 	cnt1 = (uint32_t)(sizeof (fw->rp1_array_reg));
21287 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21288 		if (cnt % 8 == 0) {
21289 			(void) snprintf(bp, len, "\n");
21290 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21291 				return (strlen(bufp));
21292 			}
21293 		}
21294 		(void) snprintf(bp, len, "%08x ", fw->rp1_array_reg[cnt]);
21295 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21296 			return (strlen(bufp));
21297 		}
21298 	}
21299 
21300 	(void) snprintf(bp, len, "\n\nAT0 Array Registers");
21301 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21302 		return (strlen(bufp));
21303 	}
21304 	cnt1 = (uint32_t)(sizeof (fw->ato_array_reg));
21305 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21306 		if (cnt % 8 == 0) {
21307 			(void) snprintf(bp, len, "\n");
21308 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21309 				return (strlen(bufp));
21310 			}
21311 		}
21312 		(void) snprintf(bp, len, "%08x ", fw->ato_array_reg[cnt]);
21313 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21314 			return (strlen(bufp));
21315 		}
21316 	}
21317 
21318 	(void) snprintf(bp, len, "\n\nQueue Control Registers");
21319 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21320 		return (strlen(bufp));
21321 	}
21322 	cnt1 = (uint32_t)(sizeof (fw->queue_control_reg));
21323 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21324 		if (cnt % 8 == 0) {
21325 			(void) snprintf(bp, len, "\n");
21326 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21327 				return (strlen(bufp));
21328 			}
21329 		}
21330 		(void) snprintf(bp, len, "%08x ", fw->queue_control_reg[cnt]);
21331 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21332 			return (strlen(bufp));
21333 		}
21334 	}
21335 
21336 	(void) snprintf(bp, len, "\n\nFB Hardware Registers");
21337 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21338 		return (strlen(bufp));
21339 	}
21340 	cnt1 = (uint32_t)(sizeof (fw->fb_hdw_reg));
21341 	for (cnt = 0; cnt < cnt1 / 4; cnt++) {
21342 		if (cnt % 8 == 0) {
21343 			(void) snprintf(bp, len, "\n");
21344 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21345 				return (strlen(bufp));
21346 			}
21347 		}
21348 		(void) snprintf(bp, len, "%08x ", fw->fb_hdw_reg[cnt]);
21349 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21350 			return (strlen(bufp));
21351 		}
21352 	}
21353 
21354 	(void) snprintf(bp, len, "\n\nCode RAM");
21355 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21356 		return (strlen(bufp));
21357 	}
21358 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
21359 		if (cnt % 8 == 0) {
21360 			(void) snprintf(bp, len, "\n%08x: ", cnt + 0x20000);
21361 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21362 				return (strlen(bufp));
21363 			}
21364 		}
21365 		(void) snprintf(bp, len, "%08x ", fw->code_ram[cnt]);
21366 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21367 			return (strlen(bufp));
21368 		}
21369 	}
21370 
21371 	(void) snprintf(bp, len, "\n\nExternal Memory");
21372 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21373 		return (strlen(bufp));
21374 	}
21375 	dp = (uint32_t *)((caddr_t)fw->req_rsp_ext_mem + fw->req_q_size[0] +
21376 	    fw->req_q_size[1] + fw->rsp_q_size + (ha->rsp_queues_cnt * 16));
21377 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
21378 		if (cnt % 8 == 0) {
21379 			(void) snprintf(bp, len, "\n%08x: ", cnt + 0x100000);
21380 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21381 				return (strlen(bufp));
21382 			}
21383 		}
21384 		(void) snprintf(bp, len, "%08x ", *dp++);
21385 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21386 			return (strlen(bufp));
21387 		}
21388 	}
21389 
21390 	(void) snprintf(bp, len, "\n\n[<==END] ISP Debug Dump");
21391 	if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21392 		return (strlen(bufp));
21393 	}
21394 
21395 	dp = fw->req_rsp_ext_mem + (ha->rsp_queues_cnt * 4);
21396 	for (cnt = 0; cnt < 2 && fw->req_q_size[cnt]; cnt++) {
21397 		dp2 = dp;
21398 		for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
21399 			if (*dp2++) {
21400 				break;
21401 			}
21402 		}
21403 		if (cnt1 == fw->req_q_size[cnt] / 4) {
21404 			dp = dp2;
21405 			continue;
21406 		}
21407 		(void) snprintf(bp, len, "\n\nRequest Queue\nQueue %d:", cnt);
21408 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21409 			return (strlen(bufp));
21410 		}
21411 		for (cnt1 = 0; cnt1 < fw->req_q_size[cnt] / 4; cnt1++) {
21412 			if (cnt1 % 8 == 0) {
21413 				(void) snprintf(bp, len, "\n%08x: ", cnt1);
21414 				if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21415 					return (strlen(bufp));
21416 				}
21417 			}
21418 			(void) snprintf(bp, len, "%08x ", *dp++);
21419 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21420 				return (strlen(bufp));
21421 			}
21422 		}
21423 	}
21424 
21425 	for (cnt = 0; cnt < ha->rsp_queues_cnt; cnt++) {
21426 		dp2 = dp;
21427 		for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
21428 		    cnt1++) {
21429 			if (*dp2++) {
21430 				break;
21431 			}
21432 		}
21433 		if (cnt1 == ha->rsp_queues[cnt]->rsp_ring.size / 4) {
21434 			dp = dp2;
21435 			continue;
21436 		}
21437 		(void) snprintf(bp, len, "\n\nResponse Queue\nQueue %d:", cnt);
21438 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21439 			return (strlen(bufp));
21440 		}
21441 		for (cnt1 = 0; cnt1 < ha->rsp_queues[cnt]->rsp_ring.size / 4;
21442 		    cnt1++) {
21443 			if (cnt1 % 8 == 0) {
21444 				(void) snprintf(bp, len, "\n%08x: ", cnt1);
21445 				if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21446 					return (strlen(bufp));
21447 				}
21448 			}
21449 			(void) snprintf(bp, len, "%08x ", *dp++);
21450 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21451 				return (strlen(bufp));
21452 			}
21453 		}
21454 	}
21455 
21456 	if (ha->fwexttracebuf.dma_handle != NULL) {
21457 		uint32_t	cnt_b;
21458 		uint64_t	w64 = (uintptr_t)ha->fwexttracebuf.bp;
21459 
21460 		(void) snprintf(bp, len, "\n\nExtended Trace Buffer Memory");
21461 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21462 			return (strlen(bufp));
21463 		}
21464 		/* show data address as a byte address, data as long words */
21465 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
21466 			cnt_b = cnt * 4;
21467 			if (cnt_b % 32 == 0) {
21468 				(void) snprintf(bp, len, "\n%08x: ",
21469 				    (int)(w64 + cnt_b));
21470 				if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21471 					return (strlen(bufp));
21472 				}
21473 			}
21474 			(void) snprintf(bp, len, "%08x ",
21475 			    fw->ext_trace_buf[cnt]);
21476 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21477 				return (strlen(bufp));
21478 			}
21479 		}
21480 	}
21481 
21482 	if (ha->fwfcetracebuf.dma_handle != NULL) {
21483 		uint32_t	cnt_b;
21484 		uint64_t	w64 = (uintptr_t)ha->fwfcetracebuf.bp;
21485 
21486 		(void) snprintf(bp, len, "\n\nFC Event Trace Buffer Memory");
21487 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21488 			return (strlen(bufp));
21489 		}
21490 		/* show data address as a byte address, data as long words */
21491 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
21492 			cnt_b = cnt * 4;
21493 			if (cnt_b % 32 == 0) {
21494 				(void) snprintf(bp, len, "\n%08x: ",
21495 				    (int)(w64 + cnt_b));
21496 				if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21497 					return (strlen(bufp));
21498 				}
21499 			}
21500 			(void) snprintf(bp, len, "%08x ",
21501 			    fw->fce_trace_buf[cnt]);
21502 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21503 				return (strlen(bufp));
21504 			}
21505 		}
21506 	}
21507 
21508 	QL_PRINT_10(ha, "done=%xh\n", strlen(bufp));
21509 
21510 	return (strlen(bufp));
21511 }
21512 
21513 
21514 /*
21515  * ql_str_ptr
21516  *	Verifies buffer is not full
21517  *
21518  * Input:
21519  *	ha:	adapter state pointer.
21520  *	bp:	string buffer pointer
21521  *	len:	buffer length
21522  *
21523  * Returns:
21524  *	NULL = buffer full else adjusted buffer pointer
21525  *
21526  * Context:
21527  *	Kernel context.
21528  */
21529 /*ARGSUSED*/
21530 static caddr_t
ql_str_ptr(ql_adapter_state_t * ha,caddr_t bp,uint32_t * len)21531 ql_str_ptr(ql_adapter_state_t *ha, caddr_t bp, uint32_t *len)
21532 {
21533 	uint32_t	i;
21534 
21535 	i = strlen(bp);
21536 	if (i > *len || !(*len -= i)) {
21537 		QL_PRINT_10(ha, "full buffer\n");
21538 		return (NULL);
21539 	}
21540 	return (bp += i);
21541 }
21542 
21543 /*
21544  * ql_27xx_binary_fw_dump
21545  *
21546  * Input:
21547  *	ha:	adapter state pointer.
21548  *	dmp:	firmware dump pointer.
21549  *
21550  * Returns:
21551  *	ql local function return status code.
21552  *
21553  * Context:
21554  *	Interrupt or Kernel context, no mailbox commands allowed.
21555  */
21556 static int
ql_27xx_binary_fw_dump(ql_adapter_state_t * ha)21557 ql_27xx_binary_fw_dump(ql_adapter_state_t *ha)
21558 {
21559 	ql_dmp_template_t	*template_buff;
21560 	int			rval;
21561 	uint32_t		cnt, *dp, *bp, tsize;
21562 
21563 	QL_PRINT_10(ha, "started\n");
21564 
21565 	if (ha->dmp_template.dma_handle == NULL) {
21566 		rval = CFG_IST(ha, CFG_LOAD_FLASH_FW) ?
21567 		    ql_2700_get_flash_dmp_template(ha) :
21568 		    ql_2700_get_module_dmp_template(ha);
21569 		if (rval != QL_SUCCESS) {
21570 			EL(ha, "no dump template, status=%xh\n", rval);
21571 			return (QL_FUNCTION_PARAMETER_ERROR);
21572 		}
21573 	}
21574 	template_buff = ha->dmp_template.bp;
21575 	tsize = template_buff->hdr.size_of_template;
21576 
21577 	if (ha->md_capture_size == 0) {
21578 		ha->ql_dump_ptr = kmem_zalloc(tsize, KM_NOSLEEP);
21579 		if (ha->ql_dump_ptr == NULL) {
21580 			QL_PRINT_10(ha, "done, failed alloc\n");
21581 			return (QL_MEMORY_ALLOC_FAILED);
21582 		}
21583 		cnt = (uint32_t)(tsize / sizeof (uint32_t));
21584 		dp = (uint32_t *)ha->ql_dump_ptr;
21585 		bp = (uint32_t *)&template_buff->hdr;
21586 		while (cnt--) {
21587 			*dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
21588 		}
21589 		ha->md_capture_size = ql_2700_dmp_parse_template(ha,
21590 		    (ql_dt_hdr_t *)ha->ql_dump_ptr, NULL, 0);
21591 		kmem_free(ha->ql_dump_ptr, tsize);
21592 		ha->ql_dump_ptr = NULL;
21593 
21594 		if (ha->md_capture_size == 0) {
21595 			return (QL_MEMORY_ALLOC_FAILED);
21596 		}
21597 
21598 		/*
21599 		 * Determine ascii dump file size
21600 		 * 2 ascii bytes per binary byte + a space and
21601 		 * a newline every 16 binary bytes
21602 		 */
21603 		ha->risc_dump_size = ha->md_capture_size << 1;
21604 		ha->risc_dump_size += ha->md_capture_size;
21605 		ha->risc_dump_size += ha->md_capture_size / 16 + 1;
21606 		QL_PRINT_10(ha, "md_capture_size=%xh, "
21607 		    "risc_dump_size=%xh\n", ha->md_capture_size,
21608 		    ha->risc_dump_size);
21609 	}
21610 
21611 	ha->ql_dump_ptr = kmem_zalloc(ha->md_capture_size, KM_NOSLEEP);
21612 	if (ha->ql_dump_ptr == NULL) {
21613 		QL_PRINT_10(ha, "done, failed alloc\n");
21614 		return (QL_MEMORY_ALLOC_FAILED);
21615 	}
21616 	ha->ql_dump_size = ha->md_capture_size;
21617 
21618 	/* Disable ISP interrupts. */
21619 	ql_disable_intr(ha);
21620 
21621 	cnt = (uint32_t)(tsize / sizeof (uint32_t));
21622 	dp = (uint32_t *)ha->ql_dump_ptr;
21623 	bp = (uint32_t *)&template_buff->hdr;
21624 	while (cnt--) {
21625 		*dp++ = ddi_get32(ha->dmp_template.acc_handle, bp++);
21626 	}
21627 
21628 	(void) ql_2700_dmp_parse_template(ha,
21629 	    (ql_dt_hdr_t *)ha->ql_dump_ptr,
21630 	    (uint8_t *)dp, ha->ql_dump_size);
21631 
21632 #ifdef _BIG_ENDIAN
21633 	cnt = (uint32_t)(tsize / sizeof (uint32_t));
21634 	dp = (uint32_t *)ha->ql_dump_ptr;
21635 	while (cnt--) {
21636 		ql_chg_endian((uint8_t *)dp, 4);
21637 		dp++;
21638 	}
21639 #endif
21640 	QL_PRINT_10(ha, "done\n");
21641 	return (QL_SUCCESS);
21642 }
21643 
21644 /*
21645  * ql_27xx_ascii_fw_dump
21646  *	Converts ISP27xx firmware binary dump to ascii.
21647  *
21648  * Input:
21649  *	ha:	port info pointer.
21650  *	bptr:	buffer pointer.
21651  *
21652  * Returns:
21653  *	Amount of data buffer used.
21654  *
21655  * Context:
21656  *	Kernel context.
21657  */
21658 static size_t
ql_27xx_ascii_fw_dump(ql_adapter_state_t * ha,caddr_t bufp)21659 ql_27xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
21660 {
21661 	uint32_t	cnt, len, dsize;
21662 	uint8_t		*fw;
21663 	caddr_t		bp;
21664 
21665 	QL_PRINT_10(ha, "started\n");
21666 
21667 	if ((len = ha->risc_dump_size) == 0) {
21668 		QL_PRINT_10(ha, "no buffer\n");
21669 		return (0);
21670 	}
21671 
21672 	dsize = ha->ql_dump_size;
21673 	fw = (uint8_t *)ha->ql_dump_ptr;
21674 	bp = bufp;
21675 
21676 	QL_PRINT_10(ha, "fw_dump_buffer=%ph, fw_bin_dump_size=%xh\n",
21677 	    (void *)ha->ql_dump_ptr, ha->ql_dump_size);
21678 
21679 	/*
21680 	 * 2 ascii bytes per binary byte + a space and
21681 	 * a newline every 16 binary bytes
21682 	 */
21683 	cnt = 0;
21684 	while (cnt < dsize) {
21685 		(void) snprintf(bp, len, "%02x ", *fw++);
21686 		if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21687 			return (strlen(bufp));
21688 		}
21689 		if (++cnt % 16 == 0) {
21690 			(void) snprintf(bp, len, "\n");
21691 			if ((bp = ql_str_ptr(ha, bp, &len)) == NULL) {
21692 				return (strlen(bufp));
21693 			}
21694 		}
21695 	}
21696 	if (cnt % 16 != 0) {
21697 		(void) snprintf(bp, len, "\n");
21698 		bp = ql_str_ptr(ha, bp, &len);
21699 		if (bp == NULL) {
21700 			return (strlen(bufp));
21701 		}
21702 	}
21703 
21704 	QL_PRINT_10(ha, "done=%xh\n", strlen(bufp));
21705 
21706 	return (strlen(bufp));
21707 }
21708 
21709 /* ******************************************************************* */
21710 /* ********************* Dump Template Functions ********************* */
21711 /* ******************************************************************* */
21712 
21713 /*
21714  * ql_2700_get_module_dmp_template
21715  *	Get dump template from firmware module
21716  *
21717  * Input:
21718  *	ha:	adapter state pointer.
21719  *
21720  * Returns:
21721  *	ql local function return status code.
21722  *
21723  * Context:
21724  *	Kernel context.
21725  */
21726 int
ql_2700_get_module_dmp_template(ql_adapter_state_t * ha)21727 ql_2700_get_module_dmp_template(ql_adapter_state_t *ha)
21728 {
21729 	int		rval;
21730 	uint32_t	word_count, cnt, *bp, *dp;
21731 
21732 	QL_PRINT_10(ha, "started\n");
21733 
21734 	if (ha->dmp_template.dma_handle != NULL) {
21735 		return (QL_SUCCESS);
21736 	}
21737 
21738 	if ((word_count = ha->risc_fw[2].length) == 0) {
21739 		EL(ha, "no dump template, length=0\n");
21740 		return (QL_FUNCTION_PARAMETER_ERROR);
21741 	}
21742 
21743 	/* Allocate template buffer. */
21744 	ha->dmp_template.size = word_count << 2;
21745 	ha->dmp_template.type = LITTLE_ENDIAN_DMA;
21746 	ha->dmp_template.max_cookie_count = 1;
21747 	ha->dmp_template.alignment = 8;
21748 	rval = ql_alloc_phys(ha, &ha->dmp_template, KM_SLEEP);
21749 	if (rval != QL_SUCCESS) {
21750 		EL(ha, "unable to allocate template buffer, "
21751 		    "status=%xh\n", rval);
21752 		return (rval);
21753 	}
21754 
21755 	/* Get big endian template. */
21756 	bp = ha->dmp_template.bp;
21757 	dp = (uint32_t *)ha->risc_fw[2].code;
21758 	for (cnt = 0; cnt < word_count; cnt++) {
21759 		ddi_put32(ha->dmp_template.acc_handle, bp, *dp++);
21760 		if (cnt > 6) {
21761 			ql_chg_endian((uint8_t *)bp, 4);
21762 		}
21763 		bp++;
21764 	}
21765 
21766 	QL_PRINT_10(ha, "done\n");
21767 	return (rval);
21768 }
21769 
21770 /*
21771  * ql_2700_get_flash_dmp_template
21772  *	Get dump template from flash
21773  *
21774  * Input:
21775  *	pi:	port info pointer.
21776  *
21777  * Returns:
21778  *	ql local function return status code.
21779  *
21780  * Context:
21781  *	Kernel context.
21782  */
21783 int
ql_2700_get_flash_dmp_template(ql_adapter_state_t * ha)21784 ql_2700_get_flash_dmp_template(ql_adapter_state_t *ha)
21785 {
21786 	int		rval;
21787 	uint32_t	word_count, cnt, *bp;
21788 	uint32_t	faddr = ha->flash_data_addr | ha->flash_fw_addr;
21789 	uint32_t	fdata = 0;
21790 
21791 	QL_PRINT_10(ha, "started, fw_addr=%xh\n", ha->flash_fw_addr);
21792 
21793 	if (ha->dmp_template.dma_handle != NULL) {
21794 		ql_free_phys(ha, &ha->dmp_template);
21795 	}
21796 
21797 	/* First array length */
21798 	rval = ql_24xx_read_flash(ha, faddr + 3, &fdata);
21799 	QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21800 	    faddr + 3, fdata);
21801 	if (rval != QL_SUCCESS) {
21802 		EL(ha, "2700_read_flash status=%xh\n", rval);
21803 		return (rval);
21804 	}
21805 	if (fdata == 0 || fdata == 0xffffffff) {
21806 		EL(ha, "Invalid first array length = %xh\n", fdata);
21807 		return (QL_FUNCTION_PARAMETER_ERROR);
21808 	}
21809 	ql_chg_endian((uint8_t *)&fdata, 4);
21810 	QL_PRINT_7(ha, "First array length = %xh\n", fdata);
21811 	faddr += fdata;
21812 
21813 	/* Second array length */
21814 	rval = ql_24xx_read_flash(ha, faddr + 3, &fdata);
21815 	QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21816 	    faddr + 3, fdata);
21817 	if (rval != QL_SUCCESS) {
21818 		EL(ha, "2700_read_flash status=%xh\n", rval);
21819 		return (rval);
21820 	}
21821 	if (fdata == 0 || fdata == 0xffffffff) {
21822 		EL(ha, "Invalid second array length = %xh\n", fdata);
21823 		return (QL_FUNCTION_PARAMETER_ERROR);
21824 	}
21825 	ql_chg_endian((uint8_t *)&fdata, 4);
21826 	QL_PRINT_7(ha, "Second array length = %xh\n", fdata);
21827 	faddr += fdata;
21828 
21829 	/* Third array length (dump template) */
21830 	rval = ql_24xx_read_flash(ha, faddr + 2, &fdata);
21831 	QL_PRINT_7(ha, "read_flash, fw_addr=0x%x, data=0x%x\n",
21832 	    faddr + 2, fdata);
21833 	if (rval != QL_SUCCESS) {
21834 		EL(ha, "2700_read_flash status=%xh\n", rval);
21835 		return (rval);
21836 	}
21837 	if (fdata == 0 || fdata == 0xffffffff) {
21838 		EL(ha, "Invalid third array length = %xh\n", fdata);
21839 		return (QL_FUNCTION_PARAMETER_ERROR);
21840 	}
21841 	ql_chg_endian((uint8_t *)&fdata, 4);
21842 	QL_PRINT_7(ha, "Third array length = %xh\n", fdata);
21843 	word_count = fdata;
21844 
21845 	/* Allocate template buffer. */
21846 	ha->dmp_template.size = word_count << 2;
21847 	ha->dmp_template.type = LITTLE_ENDIAN_DMA;
21848 	ha->dmp_template.max_cookie_count = 1;
21849 	ha->dmp_template.alignment = 8;
21850 	rval = ql_alloc_phys(ha, &ha->dmp_template, KM_SLEEP);
21851 	if (rval != QL_SUCCESS) {
21852 		EL(ha, "unable to allocate template buffer, "
21853 		    "status=%xh\n", rval);
21854 		return (rval);
21855 	}
21856 
21857 	/* Get big endian template. */
21858 	bp = ha->dmp_template.bp;
21859 	for (cnt = 0; cnt < word_count; cnt++) {
21860 		rval = ql_24xx_read_flash(ha, faddr++, &fdata);
21861 		if (rval != QL_SUCCESS) {
21862 			EL(ha, "2700_read_flash status=%xh\n", rval);
21863 			ql_free_phys(ha, &ha->dmp_template);
21864 			return (rval);
21865 		}
21866 		ddi_put32(ha->dmp_template.acc_handle, bp, fdata);
21867 		bp++;
21868 	}
21869 
21870 	QL_PRINT_10(ha, "done\n");
21871 	return (rval);
21872 }
21873 
21874 static uint32_t
ql_2700_dmp_parse_template(ql_adapter_state_t * ha,ql_dt_hdr_t * template_hdr,uint8_t * dump_buff,uint32_t buff_size)21875 ql_2700_dmp_parse_template(ql_adapter_state_t *ha, ql_dt_hdr_t *template_hdr,
21876     uint8_t *dump_buff, uint32_t buff_size)
21877 {
21878 	int		e_cnt, esize, num_of_entries;
21879 	uint32_t	bsize;
21880 	time_t		time;
21881 	uint8_t		*dbuff, *dbuff_end;
21882 	ql_dt_entry_t	*entry;
21883 	int		sane_end = 0;
21884 
21885 	dbuff = dump_buff;	/* dbuff = NULL	size determination. */
21886 	dbuff_end = dump_buff + buff_size;
21887 
21888 	template_hdr->ver_attr[0] = ha->fw_major_version;
21889 	template_hdr->ver_attr[1] = ha->fw_minor_version;
21890 	template_hdr->ver_attr[2] = ha->fw_subminor_version;
21891 	template_hdr->ver_attr[3] = ha->fw_attributes;
21892 	template_hdr->ver_attr[4] = ha->fw_ext_attributes;
21893 
21894 	QL_PRINT_7(ha, "started, template_hdr=%ph, dump_buff=%ph, "
21895 	    "buff_size=%xh, buff_end=%ph\n", (void *)template_hdr,
21896 	    (void *)dbuff, buff_size, (void *)dbuff_end);
21897 
21898 	/* Setup parameters */
21899 	QL_PRINT_7(ha, "type=%d, first_entry_offset=%xh, "
21900 	    "num_of_entries=%xh ver_attr=%xh,%xh,%xh,%xh,%xh\n",
21901 	    template_hdr->type, template_hdr->first_entry_offset,
21902 	    template_hdr->num_of_entries, template_hdr->ver_attr[0],
21903 	    template_hdr->ver_attr[1], template_hdr->ver_attr[2],
21904 	    template_hdr->ver_attr[3], template_hdr->ver_attr[4]);
21905 
21906 	if (template_hdr->type != DT_THDR) {
21907 		EL(ha, "Template header not found\n");
21908 		return (0);
21909 	}
21910 	if (dbuff != NULL) {
21911 		(void) drv_getparm(TIME, &time);
21912 		template_hdr->driver_timestamp = LSD(time);
21913 	}
21914 
21915 	num_of_entries = template_hdr->num_of_entries;
21916 	entry = (ql_dt_entry_t *)((caddr_t)template_hdr +
21917 	    template_hdr->first_entry_offset);
21918 
21919 	bsize = template_hdr->size_of_template;
21920 	for (e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
21921 		QL_PRINT_7(ha, "e_cnt=%xh, entry=%ph, type=%d, size=%xh, "
21922 		    "capture_flags=%xh, driver_flags=%xh, bofst=%xh\n",
21923 		    e_cnt, (void *)entry, entry->h.type, entry->h.size,
21924 		    entry->h.capture_flags, entry->h.driver_flags,
21925 		    dbuff != NULL ? (uintptr_t)dbuff - (uintptr_t)template_hdr :
21926 		    bsize);
21927 		/*
21928 		 * Decode the entry type and process it accordingly
21929 		 */
21930 		esize = 0;
21931 		switch (entry->h.type) {
21932 		case DT_NOP:
21933 			if (dbuff != NULL) {
21934 				entry->h.driver_flags = (uint8_t)
21935 				    (entry->h.driver_flags | SKIPPED_FLAG);
21936 			}
21937 			QL_PRINT_3(ha, "Skipping Entry ID=%d, type=%d\n",
21938 			    e_cnt, entry->h.type);
21939 			break;
21940 		case DT_TEND:
21941 			if (dbuff != NULL) {
21942 				entry->h.driver_flags = (uint8_t)
21943 				    (entry->h.driver_flags | SKIPPED_FLAG);
21944 			}
21945 			QL_PRINT_3(ha, "Skipping Entry ID=%d, type=%d\n",
21946 			    e_cnt, entry->h.type);
21947 			sane_end++;
21948 			break;
21949 		case DT_RIOB1:
21950 			esize = ql_2700_dt_riob1(ha, (ql_dt_riob1_t *)entry,
21951 			    dbuff, dbuff_end);
21952 			break;
21953 		case DT_WIOB1:
21954 			ql_2700_dt_wiob1(ha, (ql_dt_wiob1_t *)entry,
21955 			    dbuff, dbuff_end);
21956 			break;
21957 		case DT_RIOB2:
21958 			esize = ql_2700_dt_riob2(ha, (ql_dt_riob2_t *)entry,
21959 			    dbuff, dbuff_end);
21960 			break;
21961 		case DT_WIOB2:
21962 			ql_2700_dt_wiob2(ha, (ql_dt_wiob2_t *)entry,
21963 			    dbuff, dbuff_end);
21964 			break;
21965 		case DT_RPCI:
21966 			esize = ql_2700_dt_rpci(ha, (ql_dt_rpci_t *)entry,
21967 			    dbuff, dbuff_end);
21968 			break;
21969 		case DT_WPCI:
21970 			ql_2700_dt_wpci(ha, (ql_dt_wpci_t *)entry,
21971 			    dbuff, dbuff_end);
21972 			break;
21973 		case DT_RRAM:
21974 			esize = ql_2700_dt_rram(ha, (ql_dt_rram_t *)entry,
21975 			    dbuff, dbuff_end);
21976 			break;
21977 		case DT_GQUE:
21978 			esize = ql_2700_dt_gque(ha, (ql_dt_gque_t *)entry,
21979 			    dbuff, dbuff_end);
21980 			break;
21981 		case DT_GFCE:
21982 			esize = ql_2700_dt_gfce(ha, (ql_dt_gfce_t *)entry,
21983 			    dbuff, dbuff_end);
21984 			break;
21985 		case DT_PRISC:
21986 			ql_2700_dt_prisc(ha, (ql_dt_prisc_t *)entry,
21987 			    dbuff, dbuff_end);
21988 			break;
21989 		case DT_RRISC:
21990 			ql_2700_dt_rrisc(ha, (ql_dt_rrisc_t *)entry,
21991 			    dbuff, dbuff_end);
21992 			break;
21993 		case DT_DINT:
21994 			ql_2700_dt_dint(ha, (ql_dt_dint_t *)entry,
21995 			    dbuff, dbuff_end);
21996 			break;
21997 		case DT_GHBD:
21998 			esize = ql_2700_dt_ghbd(ha, (ql_dt_ghbd_t *)entry,
21999 			    dbuff, dbuff_end);
22000 			break;
22001 		case DT_SCRA:
22002 			esize = ql_2700_dt_scra(ha, (ql_dt_scra_t *)entry,
22003 			    dbuff, dbuff_end);
22004 			break;
22005 		case DT_RRREG:
22006 			esize = ql_2700_dt_rrreg(ha, (ql_dt_rrreg_t *)entry,
22007 			    dbuff, dbuff_end);
22008 			break;
22009 		case DT_WRREG:
22010 			ql_2700_dt_wrreg(ha, (ql_dt_wrreg_t *)entry,
22011 			    dbuff, dbuff_end);
22012 			break;
22013 		case DT_RRRAM:
22014 			esize = ql_2700_dt_rrram(ha, (ql_dt_rrram_t *)entry,
22015 			    dbuff, dbuff_end);
22016 			break;
22017 		case DT_RPCIC:
22018 			esize = ql_2700_dt_rpcic(ha, (ql_dt_rpcic_t *)entry,
22019 			    dbuff, dbuff_end);
22020 			break;
22021 		case DT_GQUES:
22022 			esize = ql_2700_dt_gques(ha, (ql_dt_gques_t *)entry,
22023 			    dbuff, dbuff_end);
22024 			break;
22025 		case DT_WDMP:
22026 			esize = ql_2700_dt_wdmp(ha, (ql_dt_wdmp_t *)entry,
22027 			    dbuff, dbuff_end);
22028 			break;
22029 		default:
22030 			entry->h.driver_flags = (uint8_t)
22031 			    (entry->h.driver_flags | SKIPPED_FLAG);
22032 			EL(ha, "Entry ID=%d, type=%d unknown\n", e_cnt,
22033 			    entry->h.type);
22034 			break;
22035 		}
22036 		if (dbuff != NULL && esize) {
22037 			QL_PRINT_7(ha, "entry=%d, esize=%xh, capture data\n",
22038 			    entry->h.type, esize);
22039 			QL_DUMP_3(dbuff, 8, esize);
22040 			dbuff += esize;
22041 		}
22042 		bsize += esize;
22043 		/* next entry in the template */
22044 		entry = (ql_dt_entry_t *)((caddr_t)entry + entry->h.size);
22045 	}
22046 	if (sane_end > 1) {
22047 		EL(ha, "Template configuration error. Check Template\n");
22048 	}
22049 
22050 	QL_PRINT_7(ha, "done, num of entries=%xh, size=%xh\n",
22051 	    template_hdr->num_of_entries, bsize);
22052 	return (bsize);
22053 }
22054 
22055 static int
ql_2700_dt_riob1(ql_adapter_state_t * ha,ql_dt_riob1_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22056 ql_2700_dt_riob1(ql_adapter_state_t *ha, ql_dt_riob1_t *entry,
22057     uint8_t *dbuff, uint8_t *dbuff_end)
22058 {
22059 	int		esize;
22060 	uint32_t	i, cnt;
22061 	uint8_t		*bp = dbuff;
22062 	uint32_t	addr = entry->addr;
22063 	uint8_t		*reg = (uint8_t *)ha->iobase + entry->pci_offset;
22064 
22065 	QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, reg_size=%xh, "
22066 	    "reg_count=%x%02xh, pci_offset=%xh\n", (void *)dbuff, entry->addr,
22067 	    entry->reg_size, entry->reg_count_h, entry->reg_count_l,
22068 	    entry->pci_offset);
22069 
22070 	cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
22071 	esize = cnt * 4;		/* addr */
22072 	esize += cnt * entry->reg_size;	/* data */
22073 
22074 	if (dbuff == NULL) {
22075 		QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22076 		return (esize);
22077 	}
22078 	if (esize + dbuff >= dbuff_end) {
22079 		EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22080 		entry->h.driver_flags = (uint8_t)
22081 		    (entry->h.driver_flags | SKIPPED_FLAG);
22082 		return (0);
22083 	}
22084 
22085 	WRT32_IO_REG(ha, io_base_addr, addr);
22086 	while (cnt--) {
22087 		*bp++ = LSB(LSW(addr));
22088 		*bp++ = MSB(LSW(addr));
22089 		*bp++ = LSB(MSW(addr));
22090 		*bp++ = MSB(MSW(addr));
22091 		for (i = 0; i < entry->reg_size; i++) {
22092 			*bp++ = RD_REG_BYTE(ha, reg++);
22093 		}
22094 		addr++;
22095 	}
22096 
22097 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22098 	return (esize);
22099 }
22100 
22101 static void
ql_2700_dt_wiob1(ql_adapter_state_t * ha,ql_dt_wiob1_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22102 ql_2700_dt_wiob1(ql_adapter_state_t *ha, ql_dt_wiob1_t *entry,
22103     uint8_t *dbuff, uint8_t *dbuff_end)
22104 {
22105 	uint8_t	*reg = (uint8_t *)ha->iobase + entry->pci_offset;
22106 
22107 	QL_PRINT_7(ha, "started, addr=%xh, data=%xh, pci_offset=%xh\n",
22108 	    entry->addr, entry->data, entry->pci_offset);
22109 
22110 	if (dbuff == NULL) {
22111 		QL_PRINT_7(ha, "null buf done\n");
22112 		return;
22113 	}
22114 	if (dbuff >= dbuff_end) {
22115 		EL(ha, "skipped, no buffer space, needed=0\n");
22116 		entry->h.driver_flags = (uint8_t)
22117 		    (entry->h.driver_flags | SKIPPED_FLAG);
22118 		return;
22119 	}
22120 
22121 	WRT32_IO_REG(ha, io_base_addr, entry->addr);
22122 	WRT_REG_DWORD(ha, reg, entry->data);
22123 
22124 	QL_PRINT_7(ha, "done\n");
22125 }
22126 
22127 static int
ql_2700_dt_riob2(ql_adapter_state_t * ha,ql_dt_riob2_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22128 ql_2700_dt_riob2(ql_adapter_state_t *ha, ql_dt_riob2_t *entry,
22129     uint8_t *dbuff, uint8_t *dbuff_end)
22130 {
22131 	int		esize;
22132 	uint32_t	i, cnt;
22133 	uint8_t		*bp = dbuff;
22134 	uint8_t		*reg = (uint8_t *)ha->iobase + entry->pci_offset;
22135 	uint32_t	addr = entry->addr;
22136 
22137 	QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, reg_size=%xh, "
22138 	    "reg_count=%x%02xh, pci_offset=%xh, bank_sel_offset=%xh, "
22139 	    "reg_bank=%xh\n", (void *)dbuff, entry->addr,
22140 	    entry->reg_size, entry->reg_count_h, entry->reg_count_l,
22141 	    entry->pci_offset, entry->bank_sel_offset, entry->reg_bank);
22142 
22143 	cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
22144 	esize = cnt * 4;		/* addr */
22145 	esize += cnt * entry->reg_size;	/* data */
22146 
22147 	if (dbuff == NULL) {
22148 		QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22149 		return (esize);
22150 	}
22151 	if (esize + dbuff >= dbuff_end) {
22152 		EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22153 		entry->h.driver_flags = (uint8_t)
22154 		    (entry->h.driver_flags | SKIPPED_FLAG);
22155 		return (0);
22156 	}
22157 
22158 	WRT32_IO_REG(ha, io_base_addr, addr);
22159 	WRT_REG_DWORD(ha, ha->iobase + entry->bank_sel_offset, entry->reg_bank);
22160 	while (cnt--) {
22161 		*bp++ = LSB(LSW(addr));
22162 		*bp++ = MSB(LSW(addr));
22163 		*bp++ = LSB(MSW(addr));
22164 		*bp++ = MSB(MSW(addr));
22165 		for (i = 0; i < entry->reg_size; i++) {
22166 			*bp++ = RD_REG_BYTE(ha, reg++);
22167 		}
22168 		addr++;
22169 	}
22170 
22171 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22172 	return (esize);
22173 }
22174 
22175 static void
ql_2700_dt_wiob2(ql_adapter_state_t * ha,ql_dt_wiob2_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22176 ql_2700_dt_wiob2(ql_adapter_state_t *ha, ql_dt_wiob2_t *entry,
22177     uint8_t *dbuff, uint8_t *dbuff_end)
22178 {
22179 	uint16_t	data;
22180 	uint8_t		*reg = (uint8_t *)ha->iobase + entry->pci_offset;
22181 
22182 	QL_PRINT_7(ha, "started, addr=%xh, data=%x%02xh, pci_offset=%xhh, "
22183 	    "bank_sel_offset=%xh, reg_bank=%xh\n", entry->addr, entry->data_h,
22184 	    entry->data_l, entry->pci_offset, entry->bank_sel_offset,
22185 	    entry->reg_bank);
22186 
22187 	if (dbuff == NULL) {
22188 		QL_PRINT_7(ha, "null buf done\n");
22189 		return;
22190 	}
22191 	if (dbuff >= dbuff_end) {
22192 		EL(ha, "skipped, no buffer space, needed=0\n");
22193 		entry->h.driver_flags = (uint8_t)
22194 		    (entry->h.driver_flags | SKIPPED_FLAG);
22195 		return;
22196 	}
22197 
22198 	data = CHAR_TO_SHORT(entry->data_l, entry->data_h);
22199 
22200 	WRT32_IO_REG(ha, io_base_addr, entry->addr);
22201 	WRT_REG_DWORD(ha, ha->iobase + entry->bank_sel_offset, entry->reg_bank);
22202 	WRT_REG_WORD(ha, reg, data);
22203 
22204 	QL_PRINT_7(ha, "done\n");
22205 }
22206 
22207 static int
ql_2700_dt_rpci(ql_adapter_state_t * ha,ql_dt_rpci_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22208 ql_2700_dt_rpci(ql_adapter_state_t *ha, ql_dt_rpci_t *entry, uint8_t *dbuff,
22209     uint8_t *dbuff_end)
22210 {
22211 	int		esize;
22212 	uint32_t	i;
22213 	uint8_t		*bp = dbuff;
22214 	uint8_t		*reg = (uint8_t *)ha->iobase + entry->addr;
22215 
22216 	QL_PRINT_7(ha, "started, addr=%xh, reg=%ph\n", entry->addr,
22217 	    (void *)reg);
22218 
22219 	esize = 4;	/* addr */
22220 	esize += 4;	/* data */
22221 
22222 	if (dbuff == NULL) {
22223 		QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22224 		return (esize);
22225 	}
22226 	if (esize + dbuff >= dbuff_end) {
22227 		EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22228 		entry->h.driver_flags = (uint8_t)
22229 		    (entry->h.driver_flags | SKIPPED_FLAG);
22230 		return (0);
22231 	}
22232 
22233 	*bp++ = LSB(LSW(entry->addr));
22234 	*bp++ = MSB(LSW(entry->addr));
22235 	*bp++ = LSB(MSW(entry->addr));
22236 	*bp++ = MSB(MSW(entry->addr));
22237 	for (i = 0; i < 4; i++) {
22238 		*bp++ = RD_REG_BYTE(ha, reg++);
22239 	}
22240 
22241 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22242 	return (esize);
22243 }
22244 
22245 static void
ql_2700_dt_wpci(ql_adapter_state_t * ha,ql_dt_wpci_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22246 ql_2700_dt_wpci(ql_adapter_state_t *ha, ql_dt_wpci_t *entry,
22247     uint8_t *dbuff, uint8_t *dbuff_end)
22248 {
22249 	uint8_t	*reg = (uint8_t *)ha->iobase + entry->addr;
22250 
22251 	QL_PRINT_7(ha, "started, addr=%xh, data=%xh, reg=%ph\n",
22252 	    entry->addr, entry->data, (void *)reg);
22253 
22254 	if (dbuff == NULL) {
22255 		QL_PRINT_7(ha, "null buf done\n");
22256 		return;
22257 	}
22258 	if (dbuff >= dbuff_end) {
22259 		EL(ha, "skipped, no buffer space, needed=0\n");
22260 		entry->h.driver_flags = (uint8_t)
22261 		    (entry->h.driver_flags | SKIPPED_FLAG);
22262 		return;
22263 	}
22264 
22265 	WRT_REG_DWORD(ha, reg, entry->data);
22266 
22267 	QL_PRINT_7(ha, "done\n");
22268 }
22269 
22270 static int
ql_2700_dt_rram(ql_adapter_state_t * ha,ql_dt_rram_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22271 ql_2700_dt_rram(ql_adapter_state_t *ha, ql_dt_rram_t *entry,
22272     uint8_t *dbuff, uint8_t *dbuff_end)
22273 {
22274 	int		esize, rval;
22275 	uint32_t	start = entry->start_addr;
22276 	uint32_t	end = entry->end_addr;
22277 
22278 	QL_PRINT_7(ha, "started, buf=%ph, ram_area=%xh, start_addr=%xh, "
22279 	    "end_addr=%xh\n", (void *)dbuff, entry->ram_area,
22280 	    entry->start_addr, entry->end_addr);
22281 
22282 	if (entry->ram_area == 2) {
22283 		end = ha->fw_ext_memory_end;
22284 	} else if (entry->ram_area == 3) {
22285 		start = ha->fw_shared_ram_start;
22286 		end = ha->fw_shared_ram_end;
22287 	} else if (entry->ram_area == 4) {
22288 		start = ha->fw_ddr_ram_start;
22289 		end = ha->fw_ddr_ram_end;
22290 	} else if (entry->ram_area != 1) {
22291 		EL(ha, "skipped, unknown RAM_AREA %d\n", entry->ram_area);
22292 		start = 0;
22293 		end = 0;
22294 	}
22295 	esize = end > start ? end - start : 0;
22296 	if (esize) {
22297 		esize = (esize + 1) * 4;
22298 	}
22299 
22300 	if (dbuff == NULL) {
22301 		QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22302 		return (esize);
22303 	}
22304 	if (esize == 0 || esize + dbuff >= dbuff_end) {
22305 		if (esize != 0) {
22306 			EL(ha, "skipped, no buffer space, needed=%xh\n",
22307 			    esize);
22308 		} else {
22309 			/*EMPTY*/
22310 			QL_PRINT_7(ha, "skipped, no ram_area=%xh, start=%xh, "
22311 			    "end=%xh\n", entry->ram_area, start, end);
22312 		}
22313 		entry->h.driver_flags = (uint8_t)
22314 		    (entry->h.driver_flags | SKIPPED_FLAG);
22315 		return (0);
22316 	}
22317 	entry->end_addr = end;
22318 	entry->start_addr = start;
22319 
22320 	if ((rval = ql_2700_dump_ram(ha, MBC_DUMP_RAM_EXTENDED,
22321 	    start, esize / 4, dbuff)) != QL_SUCCESS) {
22322 		EL(ha, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
22323 		    "esize=0\n", rval, start, esize / 4);
22324 		return (0);
22325 	}
22326 
22327 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22328 	return (esize);
22329 }
22330 
22331 static int
ql_2700_dt_gque(ql_adapter_state_t * ha,ql_dt_gque_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22332 ql_2700_dt_gque(ql_adapter_state_t *ha, ql_dt_gque_t *entry,
22333     uint8_t *dbuff, uint8_t *dbuff_end)
22334 {
22335 	int		esize;
22336 	uint32_t	cnt, q_cnt, e_cnt, i;
22337 	uint8_t		*bp = dbuff, *dp;
22338 
22339 	QL_PRINT_7(ha, "started, buf=%ph, num_queues=%xh, queue_type=%xh\n",
22340 	    (void *)dbuff, entry->num_queues, entry->queue_type);
22341 
22342 	if (entry->queue_type == 1) {
22343 		ql_request_q_t	*req_q;
22344 
22345 		e_cnt = ha->rsp_queues_cnt > 1 ? 2 : 1;
22346 		esize = e_cnt * 2;	/* queue number */
22347 		esize += e_cnt * 2;	/* queue entries */
22348 
22349 		/* queue size */
22350 		esize += ha->req_q[0]->req_entry_cnt * REQUEST_ENTRY_SIZE;
22351 		if (e_cnt > 1) {
22352 			esize += ha->req_q[1]->req_entry_cnt *
22353 			    REQUEST_ENTRY_SIZE;
22354 		}
22355 
22356 		if (dbuff == NULL) {
22357 			QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22358 			return (esize);
22359 		}
22360 		if (esize + dbuff >= dbuff_end) {
22361 			EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22362 			entry->h.driver_flags = (uint8_t)
22363 			    (entry->h.driver_flags | SKIPPED_FLAG);
22364 			return (0);
22365 		}
22366 		entry->num_queues = e_cnt;
22367 
22368 		for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22369 			req_q = q_cnt == 0 ? ha->req_q[0] : ha->req_q[1];
22370 			e_cnt = req_q->req_entry_cnt;
22371 			dp = req_q->req_ring.bp;
22372 			*bp++ = LSB(q_cnt);
22373 			*bp++ = MSB(q_cnt);
22374 			*bp++ = LSB(e_cnt);
22375 			*bp++ = MSB(e_cnt);
22376 			for (cnt = 0; cnt < e_cnt; cnt++) {
22377 				for (i = 0; i < REQUEST_ENTRY_SIZE; i++) {
22378 					*bp++ = *dp++;
22379 				}
22380 			}
22381 		}
22382 	} else if (entry->queue_type == 2) {
22383 		ql_response_q_t	*rsp_q;
22384 
22385 		e_cnt = ha->rsp_queues_cnt;
22386 		esize = e_cnt * 2;	/* queue number */
22387 		esize += e_cnt * 2;	/* queue entries */
22388 
22389 		/* queue size */
22390 		for (q_cnt = 0; q_cnt < ha->rsp_queues_cnt; q_cnt++) {
22391 			rsp_q = ha->rsp_queues[q_cnt];
22392 			esize += rsp_q->rsp_entry_cnt * RESPONSE_ENTRY_SIZE;
22393 		}
22394 
22395 		if (dbuff == NULL) {
22396 			QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22397 			return (esize);
22398 		}
22399 		if (esize + dbuff >= dbuff_end) {
22400 			EL(ha, "skipped2, no buffer space, needed=%xh\n",
22401 			    esize);
22402 			entry->h.driver_flags = (uint8_t)
22403 			    (entry->h.driver_flags | SKIPPED_FLAG);
22404 			return (0);
22405 		}
22406 		entry->num_queues = e_cnt;
22407 
22408 		for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22409 			rsp_q = ha->rsp_queues[q_cnt];
22410 			e_cnt = rsp_q->rsp_entry_cnt;
22411 			dp = rsp_q->rsp_ring.bp;
22412 			*bp++ = LSB(q_cnt);
22413 			*bp++ = MSB(q_cnt);
22414 			*bp++ = LSB(e_cnt);
22415 			*bp++ = MSB(e_cnt);
22416 			for (cnt = 0; cnt < e_cnt; cnt++) {
22417 				for (i = 0; i < RESPONSE_ENTRY_SIZE; i++) {
22418 					*bp++ = *dp++;
22419 				}
22420 			}
22421 		}
22422 	} else if (entry->queue_type == 3) {
22423 		QL_PRINT_7(ha, "skipped, no ATIO queue, esize=0\n");
22424 		if (dbuff != NULL) {
22425 			entry->num_queues = 0;
22426 			entry->h.driver_flags = (uint8_t)
22427 			    (entry->h.driver_flags | SKIPPED_FLAG);
22428 		}
22429 		return (0);
22430 	} else {
22431 		EL(ha, "skipped, unknown queue_type %d, esize=0\n",
22432 		    entry->queue_type);
22433 		if (dbuff != NULL) {
22434 			entry->h.driver_flags = (uint8_t)
22435 			    (entry->h.driver_flags | SKIPPED_FLAG);
22436 		}
22437 		return (0);
22438 	}
22439 
22440 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22441 	return (esize);
22442 }
22443 
22444 /*ARGSUSED*/
22445 static int
ql_2700_dt_gfce(ql_adapter_state_t * ha,ql_dt_gfce_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22446 ql_2700_dt_gfce(ql_adapter_state_t *ha, ql_dt_gfce_t *entry,
22447     uint8_t *dbuff, uint8_t *dbuff_end)
22448 {
22449 	QL_PRINT_7(ha, "started\n");
22450 
22451 	QL_PRINT_7(ha, "skipped, not supported, esize=0\n");
22452 	if (dbuff != NULL) {
22453 		entry->h.driver_flags = (uint8_t)
22454 		    (entry->h.driver_flags | SKIPPED_FLAG);
22455 	}
22456 
22457 	return (0);
22458 }
22459 
22460 static void
ql_2700_dt_prisc(ql_adapter_state_t * ha,ql_dt_prisc_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22461 ql_2700_dt_prisc(ql_adapter_state_t *ha, ql_dt_prisc_t *entry,
22462     uint8_t *dbuff, uint8_t *dbuff_end)
22463 {
22464 	clock_t	timer;
22465 
22466 	QL_PRINT_7(ha, "started\n");
22467 
22468 	if (dbuff == NULL) {
22469 		QL_PRINT_7(ha, "null buf done\n");
22470 		return;
22471 	}
22472 	if (dbuff >= dbuff_end) {
22473 		EL(ha, "skipped, no buffer space, needed=0\n");
22474 		entry->h.driver_flags = (uint8_t)
22475 		    (entry->h.driver_flags | SKIPPED_FLAG);
22476 		return;
22477 	}
22478 
22479 	/* Pause RISC. */
22480 	if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
22481 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
22482 		for (timer = 30000;
22483 		    (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0;
22484 		    timer--) {
22485 			if (timer) {
22486 				drv_usecwait(100);
22487 				if (timer % 10000 == 0) {
22488 					EL(ha, "risc pause %d\n", timer);
22489 				}
22490 			} else {
22491 				EL(ha, "risc pause timeout\n");
22492 				break;
22493 			}
22494 		}
22495 	}
22496 
22497 	QL_PRINT_7(ha, "done\n");
22498 }
22499 
22500 static void
ql_2700_dt_rrisc(ql_adapter_state_t * ha,ql_dt_rrisc_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22501 ql_2700_dt_rrisc(ql_adapter_state_t *ha, ql_dt_rrisc_t *entry,
22502     uint8_t *dbuff, uint8_t *dbuff_end)
22503 {
22504 	clock_t	timer;
22505 
22506 	QL_PRINT_7(ha, "started\n");
22507 
22508 	if (dbuff == NULL) {
22509 		QL_PRINT_7(ha, "null buf done\n");
22510 		return;
22511 	}
22512 	if (dbuff >= dbuff_end) {
22513 		EL(ha, "skipped, no buffer space, needed=0\n");
22514 		entry->h.driver_flags = (uint8_t)
22515 		    (entry->h.driver_flags | SKIPPED_FLAG);
22516 		return;
22517 	}
22518 
22519 	/* Shutdown DMA. */
22520 	WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN);
22521 
22522 	/* Wait for DMA to stop. */
22523 	for (timer = 0; timer < 30000; timer++) {
22524 		if (!(RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE)) {
22525 			break;
22526 		}
22527 		drv_usecwait(100);
22528 	}
22529 
22530 	/* Reset the chip. */
22531 	WRT32_IO_REG(ha, ctrl_status, ISP_RESET);
22532 	drv_usecwait(200);
22533 
22534 	/* Wait for RISC to recover from reset. */
22535 	for (timer = 30000; timer; timer--) {
22536 		ha->rom_status = RD16_IO_REG(ha, mailbox_out[0]);
22537 		if ((ha->rom_status & MBS_ROM_STATUS_MASK) != MBS_ROM_BUSY) {
22538 			break;
22539 		}
22540 		drv_usecwait(100);
22541 	}
22542 
22543 	/* Wait for reset to finish. */
22544 	for (timer = 30000; timer; timer--) {
22545 		if (!(RD32_IO_REG(ha, ctrl_status) & ISP_RESET)) {
22546 			break;
22547 		}
22548 		drv_usecwait(100);
22549 	}
22550 
22551 	ADAPTER_STATE_LOCK(ha);
22552 	ha->flags &= ~FIRMWARE_UP;
22553 	ADAPTER_STATE_UNLOCK(ha);
22554 
22555 	QL_PRINT_7(ha, "done\n");
22556 }
22557 
22558 static void
ql_2700_dt_dint(ql_adapter_state_t * ha,ql_dt_dint_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22559 ql_2700_dt_dint(ql_adapter_state_t *ha, ql_dt_dint_t *entry,
22560     uint8_t *dbuff, uint8_t *dbuff_end)
22561 {
22562 	QL_PRINT_7(ha, "started, pci_offset=%xh, data=%xh\n",
22563 	    entry->pci_offset, entry->data);
22564 
22565 	if (dbuff == NULL) {
22566 		QL_PRINT_7(ha, "null buf done\n");
22567 		return;
22568 	}
22569 	if (dbuff >= dbuff_end) {
22570 		EL(ha, "skipped, no buffer space, needed=0\n");
22571 		entry->h.driver_flags = (uint8_t)
22572 		    (entry->h.driver_flags | SKIPPED_FLAG);
22573 		return;
22574 	}
22575 
22576 	ql_pci_config_put32(ha, entry->pci_offset, entry->data);
22577 
22578 	QL_PRINT_7(ha, "done\n");
22579 }
22580 
22581 /*ARGSUSED*/
22582 static int
ql_2700_dt_ghbd(ql_adapter_state_t * ha,ql_dt_ghbd_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22583 ql_2700_dt_ghbd(ql_adapter_state_t *ha, ql_dt_ghbd_t *entry,
22584     uint8_t *dbuff, uint8_t *dbuff_end)
22585 {
22586 	QL_PRINT_7(ha, "started\n");
22587 
22588 	QL_PRINT_7(ha, "skipped, not supported\n");
22589 	if (dbuff != NULL) {
22590 		entry->h.driver_flags = (uint8_t)
22591 		    (entry->h.driver_flags | SKIPPED_FLAG);
22592 	}
22593 
22594 	return (0);
22595 }
22596 
22597 /*ARGSUSED*/
22598 static int
ql_2700_dt_scra(ql_adapter_state_t * ha,ql_dt_scra_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22599 ql_2700_dt_scra(ql_adapter_state_t *ha, ql_dt_scra_t *entry,
22600     uint8_t *dbuff, uint8_t *dbuff_end)
22601 {
22602 	QL_PRINT_7(ha, "started\n");
22603 
22604 	QL_PRINT_7(ha, "skipped, not supported, esize=0\n");
22605 	if (dbuff != NULL) {
22606 		entry->h.driver_flags = (uint8_t)
22607 		    (entry->h.driver_flags | SKIPPED_FLAG);
22608 	}
22609 
22610 	return (0);
22611 }
22612 
22613 static int
ql_2700_dt_rrreg(ql_adapter_state_t * ha,ql_dt_rrreg_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22614 ql_2700_dt_rrreg(ql_adapter_state_t *ha, ql_dt_rrreg_t *entry,
22615     uint8_t *dbuff, uint8_t *dbuff_end)
22616 {
22617 	int		esize;
22618 	uint32_t	i;
22619 	uint8_t		*bp = dbuff;
22620 	uint8_t		*reg = (uint8_t *)ha->iobase + 0xc4;
22621 	uint32_t	addr = entry->addr;
22622 	uint32_t	cnt = entry->count;
22623 
22624 	QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22625 	    (void *)dbuff, entry->addr, entry->count);
22626 
22627 	esize = cnt * 4;	/* addr */
22628 	esize += cnt * 4;	/* data */
22629 
22630 	if (dbuff == NULL) {
22631 		QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22632 		return (esize);
22633 	}
22634 	if (esize + dbuff >= dbuff_end) {
22635 		EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22636 		entry->h.driver_flags = (uint8_t)
22637 		    (entry->h.driver_flags | SKIPPED_FLAG);
22638 		return (0);
22639 	}
22640 
22641 	WRT32_IO_REG(ha, io_base_addr, 0x40);
22642 	while (cnt--) {
22643 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, addr | 0x80000000);
22644 		*bp++ = LSB(LSW(addr));
22645 		*bp++ = MSB(LSW(addr));
22646 		*bp++ = LSB(MSW(addr));
22647 		*bp++ = MSB(MSW(addr));
22648 		for (i = 0; i < 4; i++) {
22649 			*bp++ = RD_REG_BYTE(ha, reg + i);
22650 		}
22651 		addr += 4;
22652 	}
22653 
22654 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22655 	return (esize);
22656 }
22657 
22658 static void
ql_2700_dt_wrreg(ql_adapter_state_t * ha,ql_dt_wrreg_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22659 ql_2700_dt_wrreg(ql_adapter_state_t *ha, ql_dt_wrreg_t *entry,
22660     uint8_t *dbuff, uint8_t *dbuff_end)
22661 {
22662 	QL_PRINT_7(ha, "started, addr=%xh, data=%xh\n", entry->addr,
22663 	    entry->data);
22664 
22665 	if (dbuff == NULL) {
22666 		QL_PRINT_7(ha, "null buf done\n");
22667 		return;
22668 	}
22669 	if (dbuff >= dbuff_end) {
22670 		EL(ha, "skipped, no buffer space, needed=0\n");
22671 		entry->h.driver_flags = (uint8_t)
22672 		    (entry->h.driver_flags | SKIPPED_FLAG);
22673 		return;
22674 	}
22675 
22676 	WRT32_IO_REG(ha, io_base_addr, 0x40);
22677 	WRT_REG_DWORD(ha, ha->iobase + 0xc4, entry->data);
22678 	WRT_REG_DWORD(ha, ha->iobase + 0xc0, entry->addr);
22679 
22680 	QL_PRINT_7(ha, "done\n");
22681 }
22682 
22683 static int
ql_2700_dt_rrram(ql_adapter_state_t * ha,ql_dt_rrram_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22684 ql_2700_dt_rrram(ql_adapter_state_t *ha, ql_dt_rrram_t *entry,
22685     uint8_t *dbuff, uint8_t *dbuff_end)
22686 {
22687 	int	rval, esize;
22688 
22689 	QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22690 	    (void *)dbuff, entry->addr, entry->count);
22691 
22692 	esize = entry->count * 4;	/* data */
22693 
22694 	if (dbuff == NULL) {
22695 		QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22696 		return (esize);
22697 	}
22698 	if (esize + dbuff >= dbuff_end) {
22699 		EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22700 		entry->h.driver_flags = (uint8_t)
22701 		    (entry->h.driver_flags | SKIPPED_FLAG);
22702 		return (0);
22703 	}
22704 
22705 	if ((rval = ql_2700_dump_ram(ha, MBC_MPI_RAM, entry->addr,
22706 	    entry->count, dbuff)) != QL_SUCCESS) {
22707 		EL(ha, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
22708 		    "esize=0\n", rval, entry->addr, entry->count);
22709 		return (0);
22710 	}
22711 
22712 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22713 	return (esize);
22714 }
22715 
22716 static int
ql_2700_dt_rpcic(ql_adapter_state_t * ha,ql_dt_rpcic_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22717 ql_2700_dt_rpcic(ql_adapter_state_t *ha, ql_dt_rpcic_t *entry,
22718     uint8_t *dbuff, uint8_t *dbuff_end)
22719 {
22720 	int		esize;
22721 	uint32_t	i;
22722 	uint8_t		*bp = dbuff;
22723 	uint32_t	addr = entry->addr;
22724 	uint32_t	cnt = entry->count;
22725 
22726 	QL_PRINT_7(ha, "started, buf=%ph, addr=%xh, count=%xh\n",
22727 	    (void *)dbuff, entry->addr, entry->count);
22728 
22729 	esize = cnt * 4;	/* addr */
22730 	esize += cnt * 4;	/* data */
22731 
22732 	if (dbuff == NULL) {
22733 		QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22734 		return (esize);
22735 	}
22736 	if (esize + dbuff >= dbuff_end) {
22737 		EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22738 		entry->h.driver_flags = (uint8_t)
22739 		    (entry->h.driver_flags | SKIPPED_FLAG);
22740 		return (0);
22741 	}
22742 
22743 	while (cnt--) {
22744 		*bp++ = LSB(LSW(addr));
22745 		*bp++ = MSB(LSW(addr));
22746 		*bp++ = LSB(MSW(addr));
22747 		*bp++ = MSB(MSW(addr));
22748 		for (i = 0; i < 4; i++) {
22749 			*bp++ = ql_pci_config_get8(ha, addr++);
22750 		}
22751 	}
22752 
22753 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22754 	return (esize);
22755 }
22756 
22757 static int
ql_2700_dt_gques(ql_adapter_state_t * ha,ql_dt_gques_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22758 ql_2700_dt_gques(ql_adapter_state_t *ha, ql_dt_gques_t *entry,
22759     uint8_t *dbuff, uint8_t *dbuff_end)
22760 {
22761 	int		esize;
22762 	uint32_t	q_cnt, e_cnt, data;
22763 	uint8_t		*bp = dbuff;
22764 
22765 	QL_PRINT_7(ha, "started, buf=%ph, num_queues=%xh, queue_type=%xh\n",
22766 	    (void *)dbuff, entry->num_queues, entry->queue_type);
22767 
22768 	if (entry->queue_type == 1) {
22769 		ql_request_q_t	*req_q;
22770 
22771 		e_cnt = ha->rsp_queues_cnt > 1 ? 2 : 1;
22772 		esize = e_cnt * 2;	/* queue number */
22773 		esize += e_cnt * 2;	/* shadow entries */
22774 
22775 		/* shadow size */
22776 		esize += SHADOW_ENTRY_SIZE;
22777 		if (e_cnt > 1) {
22778 			esize += SHADOW_ENTRY_SIZE;
22779 		}
22780 		if (dbuff == NULL) {
22781 			QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22782 			return (esize);
22783 		}
22784 		if (esize + dbuff >= dbuff_end) {
22785 			EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22786 			entry->h.driver_flags = (uint8_t)
22787 			    (entry->h.driver_flags | SKIPPED_FLAG);
22788 			return (0);
22789 		}
22790 		entry->num_queues = e_cnt;
22791 
22792 		for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22793 			req_q = q_cnt == 0 ? ha->req_q[0] : ha->req_q[1];
22794 			e_cnt = 1;
22795 			data = ddi_get32(req_q->req_ring.acc_handle,
22796 			    req_q->req_out_shadow_ptr);
22797 			*bp++ = LSB(q_cnt);
22798 			*bp++ = MSB(q_cnt);
22799 			*bp++ = LSB(e_cnt);
22800 			*bp++ = MSB(e_cnt);
22801 			*bp++ = LSB(LSW(data));
22802 			*bp++ = MSB(LSW(data));
22803 			*bp++ = LSB(MSW(data));
22804 			*bp++ = MSB(MSW(data));
22805 		}
22806 	} else if (entry->queue_type == 2) {
22807 		ql_response_q_t	*rsp_q;
22808 
22809 		e_cnt = ha->rsp_queues_cnt;
22810 		esize = e_cnt * 2;	/* queue number */
22811 		esize += e_cnt * 2;	/* shadow entries */
22812 
22813 		/* shadow size */
22814 		for (q_cnt = 0; q_cnt < ha->rsp_queues_cnt; q_cnt++) {
22815 			esize += SHADOW_ENTRY_SIZE;
22816 		}
22817 
22818 		if (dbuff == NULL) {
22819 			QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22820 			return (esize);
22821 		}
22822 		if (esize + dbuff >= dbuff_end) {
22823 			EL(ha, "skipped2, no buffer space, needed=%xh\n",
22824 			    esize);
22825 			entry->h.driver_flags = (uint8_t)
22826 			    (entry->h.driver_flags | SKIPPED_FLAG);
22827 			return (0);
22828 		}
22829 		entry->num_queues = e_cnt;
22830 
22831 		for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
22832 			rsp_q = ha->rsp_queues[q_cnt];
22833 			e_cnt = 1;
22834 			data = ddi_get32(rsp_q->rsp_ring.acc_handle,
22835 			    rsp_q->rsp_in_shadow_ptr);
22836 			*bp++ = LSB(q_cnt);
22837 			*bp++ = MSB(q_cnt);
22838 			*bp++ = LSB(e_cnt);
22839 			*bp++ = MSB(e_cnt);
22840 			*bp++ = LSB(LSW(data));
22841 			*bp++ = MSB(LSW(data));
22842 			*bp++ = LSB(MSW(data));
22843 			*bp++ = MSB(MSW(data));
22844 		}
22845 	} else if (entry->queue_type == 3) {
22846 		EL(ha, "skipped, no ATIO queue, esize=0\n");
22847 		if (dbuff != NULL) {
22848 			entry->num_queues = 0;
22849 			entry->h.driver_flags = (uint8_t)
22850 			    (entry->h.driver_flags | SKIPPED_FLAG);
22851 		}
22852 		return (0);
22853 	} else {
22854 		EL(ha, "skipped, unknown queue_type %d, esize=0\n",
22855 		    entry->queue_type);
22856 		if (dbuff != NULL) {
22857 			entry->h.driver_flags = (uint8_t)
22858 			    (entry->h.driver_flags | SKIPPED_FLAG);
22859 		}
22860 		return (0);
22861 	}
22862 
22863 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22864 	return (esize);
22865 }
22866 
22867 static int
ql_2700_dt_wdmp(ql_adapter_state_t * ha,ql_dt_wdmp_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)22868 ql_2700_dt_wdmp(ql_adapter_state_t *ha, ql_dt_wdmp_t *entry,
22869     uint8_t *dbuff, uint8_t *dbuff_end)
22870 {
22871 	int		esize;
22872 	uint8_t		*bp = dbuff;
22873 	uint32_t	data, cnt = entry->length, *dp = entry->data;
22874 
22875 	QL_PRINT_7(ha, "started, buf=%ph, length=%xh\n",
22876 	    (void *)dbuff, entry->length);
22877 
22878 	esize = cnt;
22879 	if (dbuff == NULL) {
22880 		QL_PRINT_7(ha, "null buf done, esize=%xh\n", esize);
22881 		return (esize);
22882 	}
22883 	if (esize + dbuff >= dbuff_end) {
22884 		EL(ha, "skipped, no buffer space, needed=%xh\n", esize);
22885 		entry->h.driver_flags = (uint8_t)
22886 		    (entry->h.driver_flags | SKIPPED_FLAG);
22887 		return (0);
22888 	}
22889 
22890 	while (cnt--) {
22891 		data = *dp++;
22892 		*bp++ = LSB(LSW(data));
22893 		*bp++ = MSB(LSW(data));
22894 		*bp++ = LSB(MSW(data));
22895 		*bp++ = MSB(MSW(data));
22896 	}
22897 	QL_PRINT_7(ha, "%s\n", dbuff);
22898 
22899 	QL_PRINT_7(ha, "done, esize=%xh\n", esize);
22900 	return (esize);
22901 }
22902 
22903 /*
22904  * ql_2700_dump_ram
22905  *	Dumps RAM.
22906  *	Risc interrupts must be disabled when this routine is called.
22907  *
22908  * Input:
22909  *	ha:		adapter state pointer.
22910  *	cmd:		MBC_DUMP_RAM_EXTENDED/MBC_MPI_RAM.
22911  *	risc_address:	RISC code start address.
22912  *	len:		Number of words.
22913  *	bp:		buffer pointer.
22914  *
22915  * Returns:
22916  *	ql local function return status code.
22917  *
22918  * Context:
22919  *	Interrupt or Kernel context, no mailbox commands allowed.
22920  */
22921 static int
ql_2700_dump_ram(ql_adapter_state_t * ha,uint16_t cmd,uint32_t risc_address,uint32_t len,uint8_t * bp)22922 ql_2700_dump_ram(ql_adapter_state_t *ha, uint16_t cmd, uint32_t risc_address,
22923     uint32_t len, uint8_t *bp)
22924 {
22925 	dma_mem_t	mem;
22926 	uint32_t	i, stat, timer;
22927 	uint8_t		*dp;
22928 	int		rval = QL_SUCCESS;
22929 
22930 	QL_PRINT_7(ha, "started, cmd=%xh, risc_address=%xh, len=%xh, "
22931 	    "bp=%ph\n", cmd, risc_address, len, (void *)bp);
22932 
22933 	mem.size = len * 4;
22934 	mem.type = LITTLE_ENDIAN_DMA;
22935 	mem.max_cookie_count = 1;
22936 	mem.alignment = 8;
22937 	if ((rval = ql_alloc_phys(ha, &mem, KM_SLEEP)) != QL_SUCCESS) {
22938 		EL(ha, "alloc status=%xh\n", rval);
22939 		return (rval);
22940 	}
22941 
22942 	WRT16_IO_REG(ha, mailbox_in[0], cmd);
22943 	WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
22944 	WRT16_IO_REG(ha, mailbox_in[2], MSW(LSD(mem.cookie.dmac_laddress)));
22945 	WRT16_IO_REG(ha, mailbox_in[3], LSW(LSD(mem.cookie.dmac_laddress)));
22946 	WRT16_IO_REG(ha, mailbox_in[4], MSW(len));
22947 	WRT16_IO_REG(ha, mailbox_in[5], LSW(len));
22948 	WRT16_IO_REG(ha, mailbox_in[6], MSW(MSD(mem.cookie.dmac_laddress)));
22949 	WRT16_IO_REG(ha, mailbox_in[7], LSW(MSD(mem.cookie.dmac_laddress)));
22950 	WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
22951 	if (cmd == MBC_MPI_RAM) {
22952 		WRT16_IO_REG(ha, mailbox_in[9], BIT_0);
22953 	}
22954 
22955 	WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
22956 	for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
22957 		stat = RD32_IO_REG(ha, risc2host);
22958 		if (stat & RH_RISC_INT) {
22959 			stat &= 0xff;
22960 			if ((stat == 1) || (stat == 0x10)) {
22961 				break;
22962 			} else if ((stat == 2) || (stat == 0x11)) {
22963 				rval = RD16_IO_REG(ha, mailbox_out[0]);
22964 				break;
22965 			}
22966 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
22967 		}
22968 		drv_usecwait(5);
22969 	}
22970 	WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
22971 
22972 	if (timer == 0) {
22973 		QL_PRINT_7(ha, "timeout addr=%xh\n", risc_address);
22974 		rval = QL_FUNCTION_TIMEOUT;
22975 	} else {
22976 		(void) ddi_dma_sync(mem.dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
22977 		dp = mem.bp;
22978 		for (i = 0; i < mem.size; i++) {
22979 			*bp++ = *dp++;
22980 		}
22981 	}
22982 
22983 	ql_free_phys(ha, &mem);
22984 
22985 	QL_PRINT_7(ha, "done\n");
22986 	return (rval);
22987 }
22988