xref: /illumos-gate/usr/src/uts/common/io/comstar/port/qlt/qlt.c (revision 3f3ce7b9c6904157b36f09490c71d18c41312a06)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
29  * Use is subject to license terms.
30  */
31 
32 #include <sys/conf.h>
33 #include <sys/ddi.h>
34 #include <sys/stat.h>
35 #include <sys/pci.h>
36 #include <sys/sunddi.h>
37 #include <sys/modctl.h>
38 #include <sys/file.h>
39 #include <sys/cred.h>
40 #include <sys/byteorder.h>
41 #include <sys/atomic.h>
42 #include <sys/scsi/scsi.h>
43 
44 #include <stmf_defines.h>
45 #include <fct_defines.h>
46 #include <stmf.h>
47 #include <portif.h>
48 #include <fct.h>
49 #include <qlt.h>
50 #include <qlt_dma.h>
51 #include <qlt_ioctl.h>
52 #include <qlt_open.h>
53 #include <stmf_ioctl.h>
54 
55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 static void qlt_enable_intr(qlt_state_t *);
58 static void qlt_disable_intr(qlt_state_t *);
59 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
60 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
61 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
62     uint32_t word_count, uint32_t risc_addr);
63 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
64 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
65 					uint32_t dma_size);
66 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
67 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
68 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
69 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
70     stmf_state_change_info_t *ssci);
71 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
72 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
73 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
74 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
75 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
76 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
77 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
78 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
79     uint8_t *rsp);
80 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
81 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
82 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
83 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
84 static void qlt_verify_fw(qlt_state_t *qlt);
85 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
86 fct_status_t qlt_port_start(caddr_t arg);
87 fct_status_t qlt_port_stop(caddr_t arg);
88 fct_status_t qlt_port_online(qlt_state_t *qlt);
89 fct_status_t qlt_port_offline(qlt_state_t *qlt);
90 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
91     fct_link_info_t *li);
92 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
93 static fct_status_t qlt_force_lip(qlt_state_t *);
94 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
95 						fct_flogi_xchg_t *fx);
96 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
97 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
98 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
99     fct_remote_port_t *rp, fct_cmd_t *login);
100 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
101     fct_remote_port_t *rp);
102 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
103 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
104 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
105     fct_cmd_t *cmd, int terminate);
106 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
107 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
108 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
109     fct_cmd_t *cmd, uint32_t flags);
110 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
111 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
112 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
113 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
114 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
115 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
116 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
117     stmf_data_buf_t *dbuf, uint32_t ioflags);
118 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
119 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
120 static void qlt_release_intr(qlt_state_t *qlt);
121 static int qlt_setup_interrupts(qlt_state_t *qlt);
122 static void qlt_destroy_mutex(qlt_state_t *qlt);
123 
124 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
125     uint32_t words);
126 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
127     caddr_t buf, uint_t size_left);
128 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
129     caddr_t buf, uint_t size_left);
130 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
131     int count, uint_t size_left);
132 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
133     cred_t *credp, int *rval);
134 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
135 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
136 
137 static int qlt_setup_msi(qlt_state_t *qlt);
138 static int qlt_setup_msix(qlt_state_t *qlt);
139 
140 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
141 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
142 static int qlt_validate_trace_desc(qlt_state_t *qlt);
143 static char *qlt_find_trace_start(qlt_state_t *qlt);
144 
145 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
146 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
147 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
148     char **prop_val);
149 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
150 static int qlt_convert_string_to_ull(char *prop, int radix,
151     u_longlong_t *result);
152 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
153 static int qlt_quiesce(dev_info_t *dip);
154 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
155     uint32_t);
156 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
157     uint32_t *);
158 static void qlt_mps_reset(qlt_state_t *qlt);
159 static void qlt_properties(qlt_state_t *qlt);
160 
161 
162 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] = \
163 	(uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
164 
165 int qlt_enable_msix = 0;
166 int qlt_enable_msi = 1;
167 
168 
169 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
170 
171 /* Array to quickly calculate next free buf index to use */
172 #if 0
173 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
174 #endif
175 
176 static struct cb_ops qlt_cb_ops = {
177 	qlt_open,
178 	qlt_close,
179 	nodev,
180 	nodev,
181 	nodev,
182 	nodev,
183 	nodev,
184 	qlt_ioctl,
185 	nodev,
186 	nodev,
187 	nodev,
188 	nochpoll,
189 	ddi_prop_op,
190 	0,
191 	D_MP | D_NEW
192 };
193 
194 static struct dev_ops qlt_ops = {
195 	DEVO_REV,
196 	0,
197 	nodev,
198 	nulldev,
199 	nulldev,
200 	qlt_attach,
201 	qlt_detach,
202 	nodev,
203 	&qlt_cb_ops,
204 	NULL,
205 	ddi_power,
206 	qlt_quiesce
207 };
208 
209 #ifndef	PORT_SPEED_10G
210 #define	PORT_SPEED_10G		16
211 #endif
212 
213 static struct modldrv modldrv = {
214 	&mod_driverops,
215 	QLT_NAME" "QLT_VERSION,
216 	&qlt_ops,
217 };
218 
219 static struct modlinkage modlinkage = {
220 	MODREV_1, &modldrv, NULL
221 };
222 
223 void *qlt_state = NULL;
224 kmutex_t qlt_global_lock;
225 static uint32_t qlt_loaded_counter = 0;
226 
227 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
228 			"-X Mode 1 133", "--Invalid--",
229 			"-X Mode 2 66", "-X Mode 2 100",
230 			"-X Mode 2 133", " 66" };
231 
232 /* Always use 64 bit DMA. */
233 static ddi_dma_attr_t qlt_queue_dma_attr = {
234 	DMA_ATTR_V0,		/* dma_attr_version */
235 	0,			/* low DMA address range */
236 	0xffffffffffffffff,	/* high DMA address range */
237 	0xffffffff,		/* DMA counter register */
238 	64,			/* DMA address alignment */
239 	0xff,			/* DMA burstsizes */
240 	1,			/* min effective DMA size */
241 	0xffffffff,		/* max DMA xfer size */
242 	0xffffffff,		/* segment boundary */
243 	1,			/* s/g list length */
244 	1,			/* granularity of device */
245 	0			/* DMA transfer flags */
246 };
247 
248 /* qlogic logging */
249 int enable_extended_logging = 0;
250 
251 static char qlt_provider_name[] = "qlt";
252 static struct stmf_port_provider *qlt_pp;
253 
254 int
255 _init(void)
256 {
257 	int ret;
258 
259 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
260 	if (ret == 0) {
261 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
262 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
263 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
264 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
265 		qlt_pp->pp_name = qlt_provider_name;
266 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
267 			stmf_free(qlt_pp);
268 			mutex_destroy(&qlt_global_lock);
269 			ddi_soft_state_fini(&qlt_state);
270 			return (EIO);
271 		}
272 		ret = mod_install(&modlinkage);
273 		if (ret != 0) {
274 			(void) stmf_deregister_port_provider(qlt_pp);
275 			stmf_free(qlt_pp);
276 			mutex_destroy(&qlt_global_lock);
277 			ddi_soft_state_fini(&qlt_state);
278 		}
279 	}
280 	return (ret);
281 }
282 
283 int
284 _fini(void)
285 {
286 	int ret;
287 
288 	if (qlt_loaded_counter)
289 		return (EBUSY);
290 	ret = mod_remove(&modlinkage);
291 	if (ret == 0) {
292 		(void) stmf_deregister_port_provider(qlt_pp);
293 		stmf_free(qlt_pp);
294 		mutex_destroy(&qlt_global_lock);
295 		ddi_soft_state_fini(&qlt_state);
296 	}
297 	return (ret);
298 }
299 
300 int
301 _info(struct modinfo *modinfop)
302 {
303 	return (mod_info(&modlinkage, modinfop));
304 }
305 
306 
307 static int
308 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
309 {
310 	int		instance;
311 	qlt_state_t	*qlt;
312 	ddi_device_acc_attr_t	dev_acc_attr;
313 	uint16_t	did;
314 	uint16_t	val;
315 	uint16_t	mr;
316 	size_t		discard;
317 	uint_t		ncookies;
318 	int		max_read_size;
319 	int		max_payload_size;
320 	fct_status_t	ret;
321 
322 	/* No support for suspend resume yet */
323 	if (cmd != DDI_ATTACH)
324 		return (DDI_FAILURE);
325 	instance = ddi_get_instance(dip);
326 
327 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
328 		return (DDI_FAILURE);
329 	}
330 
331 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
332 	    NULL) {
333 		goto attach_fail_1;
334 	}
335 
336 	qlt->instance = instance;
337 
338 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
339 	qlt->dip = dip;
340 
341 	if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
342 		cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
343 		goto attach_fail_1;
344 	}
345 
346 	EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
347 
348 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
349 		goto attach_fail_2;
350 	}
351 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
352 	if ((did != 0x2422) && (did != 0x2432) &&
353 	    (did != 0x8432) && (did != 0x2532) &&
354 	    (did != 0x8001)) {
355 		cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
356 		    instance, did);
357 		goto attach_fail_4;
358 	}
359 
360 	if ((did & 0xFF00) == 0x8000)
361 		qlt->qlt_81xx_chip = 1;
362 	else if ((did & 0xFF00) == 0x2500)
363 		qlt->qlt_25xx_chip = 1;
364 
365 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
366 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
367 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
368 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
369 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
370 		goto attach_fail_4;
371 	}
372 	if (did == 0x2422) {
373 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
374 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
375 		pci_bits >>= 8;
376 		pci_bits &= 0xf;
377 		if ((pci_bits == 3) || (pci_bits == 7)) {
378 			cmn_err(CE_NOTE,
379 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
380 			    instance, pci_speeds[pci_bits], pci_bits);
381 		} else {
382 			cmn_err(CE_WARN,
383 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
384 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
385 			    "(Invalid)", ((pci_bits == 0) ||
386 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
387 			    "32 bit slot ") : "", pci_bits);
388 		}
389 	}
390 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
391 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
392 		    (unsigned long long)ret);
393 		goto attach_fail_5;
394 	}
395 
396 	qlt_properties(qlt);
397 
398 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
399 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
400 		goto attach_fail_5;
401 	}
402 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
403 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
404 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
405 	    DDI_SUCCESS) {
406 		goto attach_fail_6;
407 	}
408 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
409 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
410 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
411 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
412 		goto attach_fail_7;
413 	}
414 	if (ncookies != 1)
415 		goto attach_fail_8;
416 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
417 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
418 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
419 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
420 
421 	/* mutex are inited in this function */
422 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
423 		goto attach_fail_8;
424 
425 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
426 	    "qlt%d", instance);
427 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
428 	    "%s,0", qlt->qlt_minor_name);
429 
430 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
431 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
432 		goto attach_fail_9;
433 	}
434 
435 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
436 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
437 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
438 
439 	/* Setup PCI cfg space registers */
440 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
441 	if (max_read_size == 11)
442 		goto over_max_read_xfer_setting;
443 	if (did == 0x2422) {
444 		if (max_read_size == 512)
445 			val = 0;
446 		else if (max_read_size == 1024)
447 			val = 1;
448 		else if (max_read_size == 2048)
449 			val = 2;
450 		else if (max_read_size == 4096)
451 			val = 3;
452 		else {
453 			cmn_err(CE_WARN, "qlt(%d) malformed "
454 			    "pci-max-read-request in qlt.conf. Valid values "
455 			    "for this HBA are 512/1024/2048/4096", instance);
456 			goto over_max_read_xfer_setting;
457 		}
458 		mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
459 		mr = (uint16_t)(mr & 0xfff3);
460 		mr = (uint16_t)(mr | (val << 2));
461 		PCICFG_WR16(qlt, 0x4E, mr);
462 	} else if ((did == 0x2432) || (did == 0x8432) ||
463 	    (did == 0x2532) || (did == 0x8001)) {
464 		if (max_read_size == 128)
465 			val = 0;
466 		else if (max_read_size == 256)
467 			val = 1;
468 		else if (max_read_size == 512)
469 			val = 2;
470 		else if (max_read_size == 1024)
471 			val = 3;
472 		else if (max_read_size == 2048)
473 			val = 4;
474 		else if (max_read_size == 4096)
475 			val = 5;
476 		else {
477 			cmn_err(CE_WARN, "qlt(%d) malformed "
478 			    "pci-max-read-request in qlt.conf. Valid values "
479 			    "for this HBA are 128/256/512/1024/2048/4096",
480 			    instance);
481 			goto over_max_read_xfer_setting;
482 		}
483 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
484 		mr = (uint16_t)(mr & 0x8fff);
485 		mr = (uint16_t)(mr | (val << 12));
486 		PCICFG_WR16(qlt, 0x54, mr);
487 	} else {
488 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
489 		    "pci-max-read-request for this device (%x)",
490 		    instance, did);
491 	}
492 over_max_read_xfer_setting:;
493 
494 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
495 	if (max_payload_size == 11)
496 		goto over_max_payload_setting;
497 	if ((did == 0x2432) || (did == 0x8432) ||
498 	    (did == 0x2532) || (did == 0x8001)) {
499 		if (max_payload_size == 128)
500 			val = 0;
501 		else if (max_payload_size == 256)
502 			val = 1;
503 		else if (max_payload_size == 512)
504 			val = 2;
505 		else if (max_payload_size == 1024)
506 			val = 3;
507 		else {
508 			cmn_err(CE_WARN, "qlt(%d) malformed "
509 			    "pcie-max-payload-size in qlt.conf. Valid values "
510 			    "for this HBA are 128/256/512/1024",
511 			    instance);
512 			goto over_max_payload_setting;
513 		}
514 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
515 		mr = (uint16_t)(mr & 0xff1f);
516 		mr = (uint16_t)(mr | (val << 5));
517 		PCICFG_WR16(qlt, 0x54, mr);
518 	} else {
519 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
520 		    "pcie-max-payload-size for this device (%x)",
521 		    instance, did);
522 	}
523 
524 over_max_payload_setting:;
525 
526 	qlt_enable_intr(qlt);
527 
528 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
529 		goto attach_fail_10;
530 
531 	ddi_report_dev(dip);
532 	return (DDI_SUCCESS);
533 
534 attach_fail_10:;
535 	mutex_destroy(&qlt->qlt_ioctl_lock);
536 	cv_destroy(&qlt->mbox_cv);
537 	cv_destroy(&qlt->rp_dereg_cv);
538 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
539 attach_fail_9:;
540 	qlt_destroy_mutex(qlt);
541 	qlt_release_intr(qlt);
542 attach_fail_8:;
543 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
544 attach_fail_7:;
545 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
546 attach_fail_6:;
547 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
548 attach_fail_5:;
549 	ddi_regs_map_free(&qlt->regs_acc_handle);
550 attach_fail_4:;
551 	pci_config_teardown(&qlt->pcicfg_acc_handle);
552 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
553 	(void) qlt_el_trace_desc_dtor(qlt);
554 attach_fail_2:;
555 attach_fail_1:;
556 	ddi_soft_state_free(qlt_state, instance);
557 	return (DDI_FAILURE);
558 }
559 
560 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
561 
562 /* ARGSUSED */
563 static int
564 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
565 {
566 	qlt_state_t *qlt;
567 
568 	int instance;
569 
570 	instance = ddi_get_instance(dip);
571 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
572 	    NULL) {
573 		return (DDI_FAILURE);
574 	}
575 
576 	if (qlt->fw_code01) {
577 		return (DDI_FAILURE);
578 	}
579 
580 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
581 	    qlt->qlt_state_not_acked) {
582 		return (DDI_FAILURE);
583 	}
584 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
585 		return (DDI_FAILURE);
586 	}
587 
588 	qlt_disable_intr(qlt);
589 
590 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
591 	qlt_destroy_mutex(qlt);
592 	qlt_release_intr(qlt);
593 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
594 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
595 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
596 	ddi_regs_map_free(&qlt->regs_acc_handle);
597 	pci_config_teardown(&qlt->pcicfg_acc_handle);
598 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
599 	cv_destroy(&qlt->mbox_cv);
600 	cv_destroy(&qlt->rp_dereg_cv);
601 	(void) qlt_el_trace_desc_dtor(qlt);
602 	ddi_soft_state_free(qlt_state, instance);
603 
604 	return (DDI_SUCCESS);
605 }
606 
607 /*
608  * qlt_quiesce	quiesce a device attached to the system.
609  */
610 static int
611 qlt_quiesce(dev_info_t *dip)
612 {
613 	qlt_state_t	*qlt;
614 	uint32_t	timer;
615 	uint32_t	stat;
616 
617 	qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
618 	if (qlt == NULL) {
619 		/* Oh well.... */
620 		return (DDI_SUCCESS);
621 	}
622 
623 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
624 	REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
625 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
626 	for (timer = 0; timer < 30000; timer++) {
627 		stat = REG_RD32(qlt, REG_RISC_STATUS);
628 		if (stat & RISC_HOST_INTR_REQUEST) {
629 			if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
630 				REG_WR32(qlt, REG_HCCR,
631 				    HCCR_CMD(CLEAR_RISC_PAUSE));
632 				break;
633 			}
634 			REG_WR32(qlt, REG_HCCR,
635 			    HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
636 		}
637 		drv_usecwait(100);
638 	}
639 	/* Reset the chip. */
640 	REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
641 	    PCI_X_XFER_CTRL);
642 	drv_usecwait(100);
643 
644 	qlt_disable_intr(qlt);
645 
646 	return (DDI_SUCCESS);
647 }
648 
649 static void
650 qlt_enable_intr(qlt_state_t *qlt)
651 {
652 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
653 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
654 	} else {
655 		int i;
656 		for (i = 0; i < qlt->intr_cnt; i++)
657 			(void) ddi_intr_enable(qlt->htable[i]);
658 	}
659 	qlt->qlt_intr_enabled = 1;
660 }
661 
662 static void
663 qlt_disable_intr(qlt_state_t *qlt)
664 {
665 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
666 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
667 	} else {
668 		int i;
669 		for (i = 0; i < qlt->intr_cnt; i++)
670 			(void) ddi_intr_disable(qlt->htable[i]);
671 	}
672 	qlt->qlt_intr_enabled = 0;
673 }
674 
675 static void
676 qlt_release_intr(qlt_state_t *qlt)
677 {
678 	if (qlt->htable) {
679 		int i;
680 		for (i = 0; i < qlt->intr_cnt; i++) {
681 			(void) ddi_intr_remove_handler(qlt->htable[i]);
682 			(void) ddi_intr_free(qlt->htable[i]);
683 		}
684 		kmem_free(qlt->htable, (uint_t)qlt->intr_size);
685 	}
686 	qlt->htable = NULL;
687 	qlt->intr_pri = 0;
688 	qlt->intr_cnt = 0;
689 	qlt->intr_size = 0;
690 	qlt->intr_cap = 0;
691 }
692 
693 
694 static void
695 qlt_init_mutex(qlt_state_t *qlt)
696 {
697 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
698 	    INT2PTR(qlt->intr_pri, void *));
699 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
700 	    INT2PTR(qlt->intr_pri, void *));
701 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
702 	    INT2PTR(qlt->intr_pri, void *));
703 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
704 	    INT2PTR(qlt->intr_pri, void *));
705 }
706 
707 static void
708 qlt_destroy_mutex(qlt_state_t *qlt)
709 {
710 	mutex_destroy(&qlt->req_lock);
711 	mutex_destroy(&qlt->preq_lock);
712 	mutex_destroy(&qlt->mbox_lock);
713 	mutex_destroy(&qlt->intr_lock);
714 }
715 
716 
717 static int
718 qlt_setup_msix(qlt_state_t *qlt)
719 {
720 	int count, avail, actual;
721 	int ret;
722 	int itype = DDI_INTR_TYPE_MSIX;
723 	int i;
724 
725 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
726 	if (ret != DDI_SUCCESS || count == 0) {
727 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
728 		    count);
729 		return (DDI_FAILURE);
730 	}
731 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
732 	if (ret != DDI_SUCCESS || avail == 0) {
733 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
734 		    avail);
735 		return (DDI_FAILURE);
736 	}
737 	if (avail < count) {
738 		stmf_trace(qlt->qlt_port_alias,
739 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
740 	}
741 
742 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
743 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
744 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
745 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
746 	/* we need at least 2 interrupt vectors */
747 	if (ret != DDI_SUCCESS || actual < 2) {
748 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
749 		    actual);
750 		ret = DDI_FAILURE;
751 		goto release_intr;
752 	}
753 	if (actual < count) {
754 		EL(qlt, "requested: %d, received: %d\n", count, actual);
755 	}
756 
757 	qlt->intr_cnt = actual;
758 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
759 	if (ret != DDI_SUCCESS) {
760 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
761 		ret = DDI_FAILURE;
762 		goto release_intr;
763 	}
764 	qlt_init_mutex(qlt);
765 	for (i = 0; i < actual; i++) {
766 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
767 		    qlt, INT2PTR((uint_t)i, void *));
768 		if (ret != DDI_SUCCESS) {
769 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
770 			goto release_mutex;
771 		}
772 	}
773 
774 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
775 	qlt->intr_flags |= QLT_INTR_MSIX;
776 	return (DDI_SUCCESS);
777 
778 release_mutex:
779 	qlt_destroy_mutex(qlt);
780 release_intr:
781 	for (i = 0; i < actual; i++)
782 		(void) ddi_intr_free(qlt->htable[i]);
783 #if 0
784 free_mem:
785 #endif
786 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
787 	qlt->htable = NULL;
788 	qlt_release_intr(qlt);
789 	return (ret);
790 }
791 
792 
793 static int
794 qlt_setup_msi(qlt_state_t *qlt)
795 {
796 	int count, avail, actual;
797 	int itype = DDI_INTR_TYPE_MSI;
798 	int ret;
799 	int i;
800 
801 	/* get the # of interrupts */
802 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
803 	if (ret != DDI_SUCCESS || count == 0) {
804 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
805 		    count);
806 		return (DDI_FAILURE);
807 	}
808 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
809 	if (ret != DDI_SUCCESS || avail == 0) {
810 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
811 		    avail);
812 		return (DDI_FAILURE);
813 	}
814 	if (avail < count) {
815 		EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
816 	}
817 	/* MSI requires only 1 interrupt. */
818 	count = 1;
819 
820 	/* allocate interrupt */
821 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
822 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
823 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
824 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
825 	if (ret != DDI_SUCCESS || actual == 0) {
826 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
827 		    actual);
828 		ret = DDI_FAILURE;
829 		goto free_mem;
830 	}
831 	if (actual < count) {
832 		EL(qlt, "requested: %d, received: %d\n", count, actual);
833 	}
834 	qlt->intr_cnt = actual;
835 
836 	/*
837 	 * Get priority for first msi, assume remaining are all the same.
838 	 */
839 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
840 	if (ret != DDI_SUCCESS) {
841 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
842 		ret = DDI_FAILURE;
843 		goto release_intr;
844 	}
845 	qlt_init_mutex(qlt);
846 
847 	/* add handler */
848 	for (i = 0; i < actual; i++) {
849 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
850 		    qlt, INT2PTR((uint_t)i, void *));
851 		if (ret != DDI_SUCCESS) {
852 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
853 			goto release_mutex;
854 		}
855 	}
856 
857 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
858 	qlt->intr_flags |= QLT_INTR_MSI;
859 	return (DDI_SUCCESS);
860 
861 release_mutex:
862 	qlt_destroy_mutex(qlt);
863 release_intr:
864 	for (i = 0; i < actual; i++)
865 		(void) ddi_intr_free(qlt->htable[i]);
866 free_mem:
867 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
868 	qlt->htable = NULL;
869 	qlt_release_intr(qlt);
870 	return (ret);
871 }
872 
873 static int
874 qlt_setup_fixed(qlt_state_t *qlt)
875 {
876 	int count;
877 	int actual;
878 	int ret;
879 	int itype = DDI_INTR_TYPE_FIXED;
880 
881 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
882 	/* Fixed interrupts can only have one interrupt. */
883 	if (ret != DDI_SUCCESS || count != 1) {
884 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
885 		    count);
886 		return (DDI_FAILURE);
887 	}
888 
889 	qlt->intr_size = sizeof (ddi_intr_handle_t);
890 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
891 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
892 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
893 	if (ret != DDI_SUCCESS || actual != 1) {
894 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
895 		    actual);
896 		ret = DDI_FAILURE;
897 		goto free_mem;
898 	}
899 
900 	qlt->intr_cnt = actual;
901 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
902 	if (ret != DDI_SUCCESS) {
903 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
904 		ret = DDI_FAILURE;
905 		goto release_intr;
906 	}
907 	qlt_init_mutex(qlt);
908 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
909 	if (ret != DDI_SUCCESS) {
910 		EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
911 		goto release_mutex;
912 	}
913 
914 	qlt->intr_flags |= QLT_INTR_FIXED;
915 	return (DDI_SUCCESS);
916 
917 release_mutex:
918 	qlt_destroy_mutex(qlt);
919 release_intr:
920 	(void) ddi_intr_free(qlt->htable[0]);
921 free_mem:
922 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
923 	qlt->htable = NULL;
924 	qlt_release_intr(qlt);
925 	return (ret);
926 }
927 
928 static int
929 qlt_setup_interrupts(qlt_state_t *qlt)
930 {
931 	int itypes = 0;
932 
933 /*
934  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
935  */
936 #ifndef __sparc
937 	if (qlt_enable_msi != 0) {
938 #endif
939 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
940 		itypes = DDI_INTR_TYPE_FIXED;
941 	}
942 
943 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
944 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
945 			return (DDI_SUCCESS);
946 	}
947 
948 	if (itypes & DDI_INTR_TYPE_MSI) {
949 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
950 			return (DDI_SUCCESS);
951 	}
952 #ifndef __sparc
953 	}
954 #endif
955 	return (qlt_setup_fixed(qlt));
956 }
957 
958 /*
959  * Filling the hba attributes
960  */
961 void
962 qlt_populate_hba_fru_details(struct fct_local_port *port,
963     struct fct_port_attrs *port_attrs)
964 {
965 	caddr_t	bufp;
966 	int len;
967 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
968 
969 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
970 	    "QLogic Corp.");
971 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
972 	    "%s", QLT_NAME);
973 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
974 	    "%s", QLT_VERSION);
975 	port_attrs->serial_number[0] = '\0';
976 	port_attrs->hardware_version[0] = '\0';
977 
978 	(void) snprintf(port_attrs->firmware_version,
979 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
980 	    qlt->fw_minor, qlt->fw_subminor);
981 
982 	/* Get FCode version */
983 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
984 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
985 	    (int *)&len) == DDI_PROP_SUCCESS) {
986 		(void) snprintf(port_attrs->option_rom_version,
987 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
988 		kmem_free(bufp, (uint_t)len);
989 		bufp = NULL;
990 	} else {
991 #ifdef __sparc
992 		(void) snprintf(port_attrs->option_rom_version,
993 		    FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
994 #else
995 		(void) snprintf(port_attrs->option_rom_version,
996 		    FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
997 #endif
998 	}
999 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
1000 	    qlt->nvram->subsystem_vendor_id[1] << 8;
1001 
1002 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1003 	    qlt->nvram->max_frame_length[0];
1004 
1005 	port_attrs->supported_cos = 0x10000000;
1006 	port_attrs->supported_speed = PORT_SPEED_1G |
1007 	    PORT_SPEED_2G | PORT_SPEED_4G;
1008 	if (qlt->qlt_25xx_chip)
1009 		port_attrs->supported_speed |= PORT_SPEED_8G;
1010 	if (qlt->qlt_81xx_chip)
1011 		port_attrs->supported_speed = PORT_SPEED_10G;
1012 
1013 	/* limit string length to nvr model_name length */
1014 	len = (qlt->qlt_81xx_chip) ? 16 : 8;
1015 	(void) snprintf(port_attrs->model,
1016 	    (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1017 	    "%s", qlt->nvram->model_name);
1018 
1019 	(void) snprintf(port_attrs->model_description,
1020 	    (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1021 	    FCHBA_MODEL_DESCRIPTION_LEN),
1022 	    "%s", qlt->nvram->model_name);
1023 }
1024 
1025 /* ARGSUSED */
1026 fct_status_t
1027 qlt_info(uint32_t cmd, fct_local_port_t *port,
1028     void *arg, uint8_t *buf, uint32_t *bufsizep)
1029 {
1030 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
1031 	mbox_cmd_t	*mcp;
1032 	fct_status_t	ret = FCT_SUCCESS;
1033 	uint8_t		*p;
1034 	fct_port_link_status_t	*link_status;
1035 
1036 	switch (cmd) {
1037 	case FC_TGT_PORT_RLS:
1038 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1039 			EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1040 			    "fct_port_link_status_t=%xh\n", *bufsizep,
1041 			    sizeof (fct_port_link_status_t));
1042 			ret = FCT_FAILURE;
1043 			break;
1044 		}
1045 		/* send mailbox command to get link status */
1046 		mcp = qlt_alloc_mailbox_command(qlt, 156);
1047 		if (mcp == NULL) {
1048 			EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1049 			ret = FCT_ALLOC_FAILURE;
1050 			break;
1051 		}
1052 
1053 		/* GET LINK STATUS count */
1054 		mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1055 		mcp->to_fw[8] = 156/4;
1056 		mcp->to_fw_mask |= BIT_1 | BIT_8;
1057 		mcp->from_fw_mask |= BIT_1 | BIT_2;
1058 
1059 		ret = qlt_mailbox_command(qlt, mcp);
1060 		if (ret != QLT_SUCCESS) {
1061 			EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1062 			qlt_free_mailbox_command(qlt, mcp);
1063 			break;
1064 		}
1065 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1066 
1067 		p = mcp->dbuf->db_sglist[0].seg_addr;
1068 		link_status = (fct_port_link_status_t *)buf;
1069 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1070 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1071 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1072 		link_status->PrimitiveSeqProtocolErrorCount =
1073 		    LE_32(*((uint32_t *)(p + 12)));
1074 		link_status->InvalidTransmissionWordCount =
1075 		    LE_32(*((uint32_t *)(p + 16)));
1076 		link_status->InvalidCRCCount =
1077 		    LE_32(*((uint32_t *)(p + 20)));
1078 
1079 		qlt_free_mailbox_command(qlt, mcp);
1080 		break;
1081 	default:
1082 		EL(qlt, "Unknown cmd=%xh\n", cmd);
1083 		ret = FCT_FAILURE;
1084 		break;
1085 	}
1086 	return (ret);
1087 }
1088 
1089 fct_status_t
1090 qlt_port_start(caddr_t arg)
1091 {
1092 	qlt_state_t *qlt = (qlt_state_t *)arg;
1093 	fct_local_port_t *port;
1094 	fct_dbuf_store_t *fds;
1095 	fct_status_t ret;
1096 
1097 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1098 		return (FCT_FAILURE);
1099 	}
1100 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1101 	if (port == NULL) {
1102 		goto qlt_pstart_fail_1;
1103 	}
1104 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1105 	if (fds == NULL) {
1106 		goto qlt_pstart_fail_2;
1107 	}
1108 	qlt->qlt_port = port;
1109 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
1110 	fds->fds_free_data_buf = qlt_dmem_free;
1111 	fds->fds_fca_private = (void *)qlt;
1112 	/*
1113 	 * Since we keep everything in the state struct and dont allocate any
1114 	 * port private area, just use that pointer to point to the
1115 	 * state struct.
1116 	 */
1117 	port->port_fca_private = qlt;
1118 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
1119 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1120 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1121 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1122 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1123 	port->port_default_alias = qlt->qlt_port_alias;
1124 	port->port_pp = qlt_pp;
1125 	port->port_fds = fds;
1126 	port->port_max_logins = QLT_MAX_LOGINS;
1127 	port->port_max_xchges = QLT_MAX_XCHGES;
1128 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1129 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1130 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1131 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1132 	port->port_get_link_info = qlt_get_link_info;
1133 	port->port_register_remote_port = qlt_register_remote_port;
1134 	port->port_deregister_remote_port = qlt_deregister_remote_port;
1135 	port->port_send_cmd = qlt_send_cmd;
1136 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1137 	port->port_send_cmd_response = qlt_send_cmd_response;
1138 	port->port_abort_cmd = qlt_abort_cmd;
1139 	port->port_ctl = qlt_ctl;
1140 	port->port_flogi_xchg = qlt_do_flogi;
1141 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1142 	port->port_info = qlt_info;
1143 	port->port_fca_version = FCT_FCA_MODREV_1;
1144 
1145 	if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1146 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1147 		goto qlt_pstart_fail_2_5;
1148 	}
1149 
1150 	return (QLT_SUCCESS);
1151 #if 0
1152 qlt_pstart_fail_3:
1153 	(void) fct_deregister_local_port(port);
1154 #endif
1155 qlt_pstart_fail_2_5:
1156 	fct_free(fds);
1157 qlt_pstart_fail_2:
1158 	fct_free(port);
1159 	qlt->qlt_port = NULL;
1160 qlt_pstart_fail_1:
1161 	qlt_dmem_fini(qlt);
1162 	return (QLT_FAILURE);
1163 }
1164 
1165 fct_status_t
1166 qlt_port_stop(caddr_t arg)
1167 {
1168 	qlt_state_t *qlt = (qlt_state_t *)arg;
1169 	fct_status_t ret;
1170 
1171 	if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1172 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1173 		return (QLT_FAILURE);
1174 	}
1175 	fct_free(qlt->qlt_port->port_fds);
1176 	fct_free(qlt->qlt_port);
1177 	qlt->qlt_port = NULL;
1178 	qlt_dmem_fini(qlt);
1179 	return (QLT_SUCCESS);
1180 }
1181 
1182 /*
1183  * Called by framework to init the HBA.
1184  * Can be called in the middle of I/O. (Why ??)
1185  * Should make sure sane state both before and after the initialization
1186  */
1187 fct_status_t
1188 qlt_port_online(qlt_state_t *qlt)
1189 {
1190 	uint64_t	da;
1191 	int		instance, i;
1192 	fct_status_t	ret;
1193 	uint16_t	rcount;
1194 	caddr_t		icb;
1195 	mbox_cmd_t	*mcp;
1196 	uint8_t		*elsbmp;
1197 
1198 	instance = ddi_get_instance(qlt->dip);
1199 
1200 	/* XXX Make sure a sane state */
1201 
1202 	if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
1203 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1204 		return (ret);
1205 	}
1206 
1207 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1208 
1209 	/* Get resource count */
1210 	REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
1211 	ret = qlt_raw_mailbox_command(qlt);
1212 	rcount = REG_RD16(qlt, REG_MBOX(3));
1213 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1214 	if (ret != QLT_SUCCESS) {
1215 		EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1216 		return (ret);
1217 	}
1218 
1219 	/* Enable PUREX */
1220 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
1221 	REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
1222 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1223 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1224 	ret = qlt_raw_mailbox_command(qlt);
1225 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1226 	if (ret != QLT_SUCCESS) {
1227 		EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1228 		cmn_err(CE_NOTE, "Enable PUREX failed");
1229 		return (ret);
1230 	}
1231 
1232 	/* Pass ELS bitmap to fw */
1233 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
1234 	REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
1235 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1236 	bzero(elsbmp, 32);
1237 	da = qlt->queue_mem_cookie.dmac_laddress;
1238 	da += MBOX_DMA_MEM_OFFSET;
1239 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
1240 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
1241 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
1242 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
1243 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1244 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1245 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1246 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1247 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1248 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1249 	SETELSBIT(elsbmp, ELS_OP_SCN);
1250 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1251 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1252 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1253 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1254 	SETELSBIT(elsbmp, ELS_OP_RNID);
1255 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1256 	    DDI_DMA_SYNC_FORDEV);
1257 	ret = qlt_raw_mailbox_command(qlt);
1258 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1259 	if (ret != QLT_SUCCESS) {
1260 		EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1261 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1262 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1263 		    elsbmp[1]);
1264 		return (ret);
1265 	}
1266 
1267 	/* Init queue pointers */
1268 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1269 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1270 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1271 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1272 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1273 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1274 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1275 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1276 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1277 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1278 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1279 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1280 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1281 
1282 	/*
1283 	 * XXX support for tunables. Also should we cache icb ?
1284 	 */
1285 	if (qlt->qlt_81xx_chip) {
1286 	    /* allocate extra 64 bytes for Extended init control block */
1287 		mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1288 	} else {
1289 		mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1290 	}
1291 	if (mcp == NULL) {
1292 		EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1293 		return (STMF_ALLOC_FAILURE);
1294 	}
1295 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1296 	if (qlt->qlt_81xx_chip) {
1297 		bzero(icb, 0xC0);
1298 	} else {
1299 		bzero(icb, 0x80);
1300 	}
1301 	da = qlt->queue_mem_cookie.dmac_laddress;
1302 	DMEM_WR16(qlt, icb, 1);		/* Version */
1303 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1304 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1305 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1306 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1307 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1308 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1309 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1310 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1311 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1312 	if (!qlt->qlt_81xx_chip) {
1313 		DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1314 	}
1315 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1316 	DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1317 	DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1318 	DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1319 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1320 	DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1321 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1322 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1323 	if (qlt->qlt_81xx_chip) {
1324 		qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1325 
1326 		DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1327 		DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1328 		DMEM_WR32(qlt, icb+0x70,
1329 		    qlt81nvr->enode_mac[0] |
1330 		    (qlt81nvr->enode_mac[1] << 8) |
1331 		    (qlt81nvr->enode_mac[2] << 16) |
1332 		    (qlt81nvr->enode_mac[3] << 24));
1333 		DMEM_WR16(qlt, icb+0x74,
1334 		    qlt81nvr->enode_mac[4] |
1335 		    (qlt81nvr->enode_mac[5] << 8));
1336 	} else {
1337 		DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1338 		    BIT_2 | BIT_1 | BIT_0);
1339 		DMEM_WR32(qlt, icb+0x60, BIT_5);
1340 		DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1341 		    BIT_4);
1342 	}
1343 
1344 	if (qlt->qlt_81xx_chip) {
1345 		qlt_dmem_bctl_t		*bctl;
1346 		uint32_t		index;
1347 		caddr_t			src;
1348 		caddr_t			dst;
1349 		qlt_nvram_81xx_t	*qlt81nvr;
1350 
1351 		dst = icb+0x80;
1352 		qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1353 		src = (caddr_t)&qlt81nvr->ext_blk;
1354 		index = sizeof (qlt_ext_icb_81xx_t);
1355 
1356 		/* Use defaults for cases where we find nothing in NVR */
1357 		if (*src == 0) {
1358 			EL(qlt, "nvram eicb=null\n");
1359 			cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1360 			    instance);
1361 			qlt81nvr->ext_blk.version[0] = 1;
1362 /*
1363  * not yet, for !FIP firmware at least
1364  *
1365  *                qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1366  */
1367 #ifdef _LITTLE_ENDIAN
1368 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1369 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1370 #else
1371 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1372 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1373 #endif
1374 		}
1375 
1376 		while (index--) {
1377 			*dst++ = *src++;
1378 		}
1379 
1380 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1381 		da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1382 
1383 		mcp->to_fw[11] = LSW(LSD(da));
1384 		mcp->to_fw[10] = MSW(LSD(da));
1385 		mcp->to_fw[13] = LSW(MSD(da));
1386 		mcp->to_fw[12] = MSW(MSD(da));
1387 		mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1388 		    0xffff);
1389 
1390 		/* eicb enable */
1391 		mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1392 		mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1393 		    BIT_1;
1394 	}
1395 
1396 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1397 	mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
1398 
1399 	/*
1400 	 * This is the 1st command after adapter initialize which will
1401 	 * use interrupts and regular mailbox interface.
1402 	 */
1403 	qlt->mbox_io_state = MBOX_STATE_READY;
1404 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1405 	/* Issue mailbox to firmware */
1406 	ret = qlt_mailbox_command(qlt, mcp);
1407 	if (ret != QLT_SUCCESS) {
1408 		EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1409 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1410 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1411 	}
1412 
1413 	mcp->to_fw_mask = BIT_0;
1414 	mcp->from_fw_mask = BIT_0 | BIT_1;
1415 	mcp->to_fw[0] = 0x28;
1416 	ret = qlt_mailbox_command(qlt, mcp);
1417 	if (ret != QLT_SUCCESS) {
1418 		EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1419 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1420 		    (long long)ret);
1421 	}
1422 
1423 	/*
1424 	 * Report FW versions for 81xx - MPI rev is useful
1425 	 */
1426 	if (qlt->qlt_81xx_chip) {
1427 		mcp->to_fw_mask = BIT_0;
1428 		mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1429 		    BIT_0;
1430 		mcp->to_fw[0] = 0x8;
1431 		ret = qlt_mailbox_command(qlt, mcp);
1432 		if (ret != QLT_SUCCESS) {
1433 			EL(qlt, "about fw failed: %llx\n", (long long)ret);
1434 		} else {
1435 			EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1436 			    mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1437 			    mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1438 			    mcp->from_fw[11] & 0xff);
1439 		}
1440 	}
1441 
1442 	qlt_free_mailbox_command(qlt, mcp);
1443 
1444 	for (i = 0; i < 5; i++) {
1445 		qlt->qlt_bufref[i] = 0;
1446 	}
1447 	qlt->qlt_bumpbucket = 0;
1448 	qlt->qlt_pmintry = 0;
1449 	qlt->qlt_pmin_ok = 0;
1450 
1451 	if (ret != QLT_SUCCESS)
1452 		return (ret);
1453 	return (FCT_SUCCESS);
1454 }
1455 
1456 fct_status_t
1457 qlt_port_offline(qlt_state_t *qlt)
1458 {
1459 	int		retries;
1460 
1461 	mutex_enter(&qlt->mbox_lock);
1462 
1463 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1464 		mutex_exit(&qlt->mbox_lock);
1465 		goto poff_mbox_done;
1466 	}
1467 
1468 	/* Wait to grab the mailboxes */
1469 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1470 	    retries++) {
1471 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1472 		if ((retries > 5) ||
1473 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1474 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1475 			mutex_exit(&qlt->mbox_lock);
1476 			goto poff_mbox_done;
1477 		}
1478 	}
1479 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1480 	mutex_exit(&qlt->mbox_lock);
1481 poff_mbox_done:;
1482 	qlt->intr_sneak_counter = 10;
1483 	mutex_enter(&qlt->intr_lock);
1484 	(void) qlt_reset_chip(qlt);
1485 	drv_usecwait(20);
1486 	qlt->intr_sneak_counter = 0;
1487 	mutex_exit(&qlt->intr_lock);
1488 
1489 	return (FCT_SUCCESS);
1490 }
1491 
1492 static fct_status_t
1493 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1494 {
1495 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1496 	mbox_cmd_t *mcp;
1497 	fct_status_t fc_ret;
1498 	fct_status_t ret;
1499 	clock_t et;
1500 
1501 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1502 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1503 link_info_retry:
1504 	mcp->to_fw[0] = MBC_GET_ID;
1505 	mcp->to_fw[9] = 0;
1506 	mcp->to_fw_mask |= BIT_0 | BIT_9;
1507 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1508 	/* Issue mailbox to firmware */
1509 	ret = qlt_mailbox_command(qlt, mcp);
1510 	if (ret != QLT_SUCCESS) {
1511 		EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1512 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1513 			/* Firmware is not ready */
1514 			if (ddi_get_lbolt() < et) {
1515 				delay(drv_usectohz(50000));
1516 				goto link_info_retry;
1517 			}
1518 		}
1519 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1520 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1521 		fc_ret = FCT_FAILURE;
1522 	} else {
1523 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1524 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1525 
1526 		li->port_speed = qlt->link_speed;
1527 		switch (mcp->from_fw[6]) {
1528 		case 1:
1529 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1530 			li->port_fca_flogi_done = 1;
1531 			break;
1532 		case 0:
1533 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1534 			li->port_no_fct_flogi = 1;
1535 			break;
1536 		case 3:
1537 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1538 			li->port_fca_flogi_done = 1;
1539 			break;
1540 		case 2: /*FALLTHROUGH*/
1541 		case 4:
1542 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1543 			li->port_fca_flogi_done = 1;
1544 			break;
1545 		default:
1546 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1547 			EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1548 		}
1549 		qlt->cur_topology = li->port_topology;
1550 		fc_ret = FCT_SUCCESS;
1551 	}
1552 	qlt_free_mailbox_command(qlt, mcp);
1553 
1554 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1555 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1556 		mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
1557 		mcp->to_fw[1] = 0x7FE;
1558 		mcp->to_fw[9] = 0;
1559 		mcp->to_fw[10] = 0;
1560 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1561 		fc_ret = qlt_mailbox_command(qlt, mcp);
1562 		if (fc_ret != QLT_SUCCESS) {
1563 			EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1564 			    fc_ret);
1565 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1566 			    "database for F_port failed, ret = %llx", fc_ret);
1567 		} else {
1568 			uint8_t *p;
1569 
1570 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1571 			p = mcp->dbuf->db_sglist[0].seg_addr;
1572 			bcopy(p + 0x18, li->port_rpwwn, 8);
1573 			bcopy(p + 0x20, li->port_rnwwn, 8);
1574 		}
1575 		qlt_free_mailbox_command(qlt, mcp);
1576 	}
1577 	return (fc_ret);
1578 }
1579 
1580 static int
1581 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1582 {
1583 	int		instance;
1584 	qlt_state_t	*qlt;
1585 
1586 	if (otype != OTYP_CHR) {
1587 		return (EINVAL);
1588 	}
1589 
1590 	/*
1591 	 * Since this is for debugging only, only allow root to issue ioctl now
1592 	 */
1593 	if (drv_priv(credp)) {
1594 		return (EPERM);
1595 	}
1596 
1597 	instance = (int)getminor(*devp);
1598 	qlt = ddi_get_soft_state(qlt_state, instance);
1599 	if (qlt == NULL) {
1600 		return (ENXIO);
1601 	}
1602 
1603 	mutex_enter(&qlt->qlt_ioctl_lock);
1604 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1605 		/*
1606 		 * It is already open for exclusive access.
1607 		 * So shut the door on this caller.
1608 		 */
1609 		mutex_exit(&qlt->qlt_ioctl_lock);
1610 		return (EBUSY);
1611 	}
1612 
1613 	if (flag & FEXCL) {
1614 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1615 			/*
1616 			 * Exclusive operation not possible
1617 			 * as it is already opened
1618 			 */
1619 			mutex_exit(&qlt->qlt_ioctl_lock);
1620 			return (EBUSY);
1621 		}
1622 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1623 	}
1624 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1625 	mutex_exit(&qlt->qlt_ioctl_lock);
1626 
1627 	return (0);
1628 }
1629 
1630 /* ARGSUSED */
1631 static int
1632 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1633 {
1634 	int		instance;
1635 	qlt_state_t	*qlt;
1636 
1637 	if (otype != OTYP_CHR) {
1638 		return (EINVAL);
1639 	}
1640 
1641 	instance = (int)getminor(dev);
1642 	qlt = ddi_get_soft_state(qlt_state, instance);
1643 	if (qlt == NULL) {
1644 		return (ENXIO);
1645 	}
1646 
1647 	mutex_enter(&qlt->qlt_ioctl_lock);
1648 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1649 		mutex_exit(&qlt->qlt_ioctl_lock);
1650 		return (ENODEV);
1651 	}
1652 
1653 	/*
1654 	 * It looks there's one hole here, maybe there could several concurrent
1655 	 * shareed open session, but we never check this case.
1656 	 * But it will not hurt too much, disregard it now.
1657 	 */
1658 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1659 	mutex_exit(&qlt->qlt_ioctl_lock);
1660 
1661 	return (0);
1662 }
1663 
1664 /*
1665  * All of these ioctls are unstable interfaces which are meant to be used
1666  * in a controlled lab env. No formal testing will be (or needs to be) done
1667  * for these ioctls. Specially note that running with an additional
1668  * uploaded firmware is not supported and is provided here for test
1669  * purposes only.
1670  */
1671 /* ARGSUSED */
1672 static int
1673 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1674     cred_t *credp, int *rval)
1675 {
1676 	qlt_state_t	*qlt;
1677 	int		ret = 0;
1678 #ifdef _LITTLE_ENDIAN
1679 	int		i;
1680 #endif
1681 	stmf_iocdata_t	*iocd;
1682 	void		*ibuf = NULL;
1683 	void		*obuf = NULL;
1684 	uint32_t	*intp;
1685 	qlt_fw_info_t	*fwi;
1686 	mbox_cmd_t	*mcp;
1687 	fct_status_t	st;
1688 	char		info[80];
1689 	fct_status_t	ret2;
1690 
1691 	if (drv_priv(credp) != 0)
1692 		return (EPERM);
1693 
1694 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1695 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1696 	if (ret)
1697 		return (ret);
1698 	iocd->stmf_error = 0;
1699 
1700 	switch (cmd) {
1701 	case QLT_IOCTL_FETCH_FWDUMP:
1702 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1703 			EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1704 			    iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1705 			ret = EINVAL;
1706 			break;
1707 		}
1708 		mutex_enter(&qlt->qlt_ioctl_lock);
1709 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1710 			mutex_exit(&qlt->qlt_ioctl_lock);
1711 			ret = ENODATA;
1712 			EL(qlt, "no fwdump\n");
1713 			iocd->stmf_error = QLTIO_NO_DUMP;
1714 			break;
1715 		}
1716 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1717 			mutex_exit(&qlt->qlt_ioctl_lock);
1718 			ret = EBUSY;
1719 			EL(qlt, "fwdump inprogress\n");
1720 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1721 			break;
1722 		}
1723 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1724 			mutex_exit(&qlt->qlt_ioctl_lock);
1725 			ret = EEXIST;
1726 			EL(qlt, "fwdump already fetched\n");
1727 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1728 			break;
1729 		}
1730 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1731 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1732 		mutex_exit(&qlt->qlt_ioctl_lock);
1733 
1734 		break;
1735 
1736 	case QLT_IOCTL_TRIGGER_FWDUMP:
1737 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1738 			ret = EACCES;
1739 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1740 			break;
1741 		}
1742 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1743 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1744 		info[79] = 0;
1745 		if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1746 		    STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1747 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1748 			EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1749 			    "%llxh\n", ret2);
1750 			ret = EIO;
1751 		}
1752 		break;
1753 	case QLT_IOCTL_UPLOAD_FW:
1754 		if ((iocd->stmf_ibuf_size < 1024) ||
1755 		    (iocd->stmf_ibuf_size & 3)) {
1756 			EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1757 			    iocd->stmf_ibuf_size);
1758 			ret = EINVAL;
1759 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1760 			break;
1761 		}
1762 		intp = (uint32_t *)ibuf;
1763 #ifdef _LITTLE_ENDIAN
1764 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1765 			intp[i] = BSWAP_32(intp[i]);
1766 		}
1767 #endif
1768 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1769 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1770 		    iocd->stmf_ibuf_size)) {
1771 			EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1772 			    iocd->stmf_ibuf_size);
1773 			ret = EINVAL;
1774 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1775 			break;
1776 		}
1777 		if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1778 		    (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1779 		    (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1780 		    ((intp[8] & 3) == 0))) {
1781 			EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1782 			ret = EACCES;
1783 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1784 			break;
1785 		}
1786 
1787 		/* Everything looks ok, lets copy this firmware */
1788 		if (qlt->fw_code01) {
1789 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1790 			    qlt->fw_length02) << 2);
1791 			qlt->fw_code01 = NULL;
1792 		} else {
1793 			atomic_add_32(&qlt_loaded_counter, 1);
1794 		}
1795 		qlt->fw_length01 = intp[3];
1796 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1797 		    KM_SLEEP);
1798 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1799 		qlt->fw_addr01 = intp[2];
1800 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1801 		qlt->fw_addr02 = qlt->fw_code02[2];
1802 		qlt->fw_length02 = qlt->fw_code02[3];
1803 		break;
1804 
1805 	case QLT_IOCTL_CLEAR_FW:
1806 		if (qlt->fw_code01) {
1807 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1808 			    qlt->fw_length02) << 2);
1809 			qlt->fw_code01 = NULL;
1810 			atomic_add_32(&qlt_loaded_counter, -1);
1811 		}
1812 		break;
1813 
1814 	case QLT_IOCTL_GET_FW_INFO:
1815 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1816 			EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1817 			    iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1818 			ret = EINVAL;
1819 			break;
1820 		}
1821 		fwi = (qlt_fw_info_t *)obuf;
1822 		if (qlt->qlt_stay_offline) {
1823 			fwi->fwi_stay_offline = 1;
1824 		}
1825 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1826 			fwi->fwi_port_active = 1;
1827 		}
1828 		fwi->fwi_active_major = qlt->fw_major;
1829 		fwi->fwi_active_minor = qlt->fw_minor;
1830 		fwi->fwi_active_subminor = qlt->fw_subminor;
1831 		fwi->fwi_active_attr = qlt->fw_attr;
1832 		if (qlt->fw_code01) {
1833 			fwi->fwi_fw_uploaded = 1;
1834 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1835 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1836 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1837 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1838 		}
1839 		if (qlt->qlt_81xx_chip) {
1840 			fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1841 			fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1842 			fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1843 			fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1844 		} else if (qlt->qlt_25xx_chip) {
1845 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1846 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1847 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1848 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1849 		} else {
1850 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1851 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1852 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1853 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1854 		}
1855 		break;
1856 
1857 	case QLT_IOCTL_STAY_OFFLINE:
1858 		if (!iocd->stmf_ibuf_size) {
1859 			EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1860 			    iocd->stmf_ibuf_size);
1861 			ret = EINVAL;
1862 			break;
1863 		}
1864 		if (*((char *)ibuf)) {
1865 			qlt->qlt_stay_offline = 1;
1866 		} else {
1867 			qlt->qlt_stay_offline = 0;
1868 		}
1869 		break;
1870 
1871 	case QLT_IOCTL_MBOX:
1872 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1873 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1874 			EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1875 			    iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1876 			ret = EINVAL;
1877 			break;
1878 		}
1879 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1880 		if (mcp == NULL) {
1881 			EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1882 			ret = ENOMEM;
1883 			break;
1884 		}
1885 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1886 		st = qlt_mailbox_command(qlt, mcp);
1887 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1888 		qlt_free_mailbox_command(qlt, mcp);
1889 		if (st != QLT_SUCCESS) {
1890 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1891 				st = QLT_SUCCESS;
1892 		}
1893 		if (st != QLT_SUCCESS) {
1894 			EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1895 			ret = EIO;
1896 			switch (st) {
1897 			case QLT_MBOX_NOT_INITIALIZED:
1898 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1899 				break;
1900 			case QLT_MBOX_BUSY:
1901 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1902 				break;
1903 			case QLT_MBOX_TIMEOUT:
1904 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1905 				break;
1906 			case QLT_MBOX_ABORTED:
1907 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1908 				break;
1909 			}
1910 		}
1911 		break;
1912 
1913 	case QLT_IOCTL_ELOG:
1914 		qlt_dump_el_trace_buffer(qlt);
1915 		break;
1916 
1917 	default:
1918 		EL(qlt, "Unknown ioctl-%xh\n", cmd);
1919 		ret = ENOTTY;
1920 	}
1921 
1922 	if (ret == 0) {
1923 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1924 	} else if (iocd->stmf_error) {
1925 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1926 	}
1927 	if (obuf) {
1928 		kmem_free(obuf, iocd->stmf_obuf_size);
1929 		obuf = NULL;
1930 	}
1931 	if (ibuf) {
1932 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1933 		ibuf = NULL;
1934 	}
1935 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1936 	return (ret);
1937 }
1938 
1939 static fct_status_t
1940 qlt_force_lip(qlt_state_t *qlt)
1941 {
1942 	mbox_cmd_t	*mcp;
1943 	fct_status_t	 rval;
1944 
1945 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1946 	mcp->to_fw[0] = 0x0072;
1947 	mcp->to_fw[1] = BIT_4;
1948 	mcp->to_fw[3] = 1;
1949 	mcp->to_fw_mask |= BIT_1 | BIT_3;
1950 	rval = qlt_mailbox_command(qlt, mcp);
1951 	if (rval != FCT_SUCCESS) {
1952 		EL(qlt, "qlt force lip MB failed: rval=%x", rval);
1953 	} else {
1954 		if (mcp->from_fw[0] != 0x4000) {
1955 			QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
1956 			    mcp->from_fw[0]);
1957 			rval = FCT_FAILURE;
1958 		}
1959 	}
1960 	qlt_free_mailbox_command(qlt, mcp);
1961 	return (rval);
1962 }
1963 
1964 static void
1965 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1966 {
1967 	stmf_change_status_t		st;
1968 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1969 	qlt_state_t			*qlt;
1970 	fct_status_t			ret;
1971 
1972 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1973 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1974 	    (cmd == FCT_CMD_FORCE_LIP) ||
1975 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1976 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1977 
1978 	qlt = (qlt_state_t *)port->port_fca_private;
1979 	st.st_completion_status = FCT_SUCCESS;
1980 	st.st_additional_info = NULL;
1981 
1982 	switch (cmd) {
1983 	case FCT_CMD_PORT_ONLINE:
1984 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1985 			st.st_completion_status = STMF_ALREADY;
1986 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1987 			st.st_completion_status = FCT_FAILURE;
1988 		if (st.st_completion_status == FCT_SUCCESS) {
1989 			qlt->qlt_state = FCT_STATE_ONLINING;
1990 			qlt->qlt_state_not_acked = 1;
1991 			st.st_completion_status = qlt_port_online(qlt);
1992 			if (st.st_completion_status != STMF_SUCCESS) {
1993 				EL(qlt, "PORT_ONLINE status=%xh\n",
1994 				    st.st_completion_status);
1995 				qlt->qlt_state = FCT_STATE_OFFLINE;
1996 				qlt->qlt_state_not_acked = 0;
1997 			} else {
1998 				qlt->qlt_state = FCT_STATE_ONLINE;
1999 			}
2000 		}
2001 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
2002 		qlt->qlt_change_state_flags = 0;
2003 		break;
2004 
2005 	case FCT_CMD_PORT_OFFLINE:
2006 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
2007 			st.st_completion_status = STMF_ALREADY;
2008 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
2009 			st.st_completion_status = FCT_FAILURE;
2010 		}
2011 		if (st.st_completion_status == FCT_SUCCESS) {
2012 			qlt->qlt_state = FCT_STATE_OFFLINING;
2013 			qlt->qlt_state_not_acked = 1;
2014 
2015 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
2016 				(void) qlt_firmware_dump(port, ssci);
2017 			}
2018 			qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
2019 			st.st_completion_status = qlt_port_offline(qlt);
2020 			if (st.st_completion_status != STMF_SUCCESS) {
2021 				EL(qlt, "PORT_OFFLINE status=%xh\n",
2022 				    st.st_completion_status);
2023 				qlt->qlt_state = FCT_STATE_ONLINE;
2024 				qlt->qlt_state_not_acked = 0;
2025 			} else {
2026 				qlt->qlt_state = FCT_STATE_OFFLINE;
2027 			}
2028 		}
2029 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
2030 		break;
2031 
2032 	case FCT_ACK_PORT_ONLINE_COMPLETE:
2033 		qlt->qlt_state_not_acked = 0;
2034 		break;
2035 
2036 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
2037 		qlt->qlt_state_not_acked = 0;
2038 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
2039 		    (qlt->qlt_stay_offline == 0)) {
2040 			if ((ret = fct_port_initialize(port,
2041 			    qlt->qlt_change_state_flags,
2042 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
2043 			    "with RLFLAG_RESET")) != FCT_SUCCESS) {
2044 				EL(qlt, "fct_port_initialize status=%llxh\n",
2045 				    ret);
2046 				cmn_err(CE_WARN, "qlt_ctl: "
2047 				    "fct_port_initialize failed, please use "
2048 				    "stmfstate to start the port-%s manualy",
2049 				    qlt->qlt_port_alias);
2050 			}
2051 		}
2052 		break;
2053 
2054 	case FCT_CMD_FORCE_LIP:
2055 		if (qlt->qlt_81xx_chip) {
2056 			EL(qlt, "force lip is an unsupported command "
2057 			    "for this adapter type\n");
2058 		} else {
2059 			*((fct_status_t *)arg) = qlt_force_lip(qlt);
2060 			EL(qlt, "forcelip done\n");
2061 		}
2062 		break;
2063 
2064 	default:
2065 		EL(qlt, "unsupport cmd - 0x%02X", cmd);
2066 		break;
2067 	}
2068 }
2069 
2070 /* ARGSUSED */
2071 static fct_status_t
2072 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2073 {
2074 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
2075 
2076 	EL(qlt, "FLOGI requested not supported\n");
2077 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2078 	return (FCT_FAILURE);
2079 }
2080 
2081 /*
2082  * Return a pointer to n entries in the request queue. Assumes that
2083  * request queue lock is held. Does a very short busy wait if
2084  * less/zero entries are available. Retuns NULL if it still cannot
2085  * fullfill the request.
2086  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2087  */
2088 caddr_t
2089 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
2090 {
2091 	int try = 0;
2092 
2093 	while (qlt->req_available < n) {
2094 		uint32_t val1, val2, val3;
2095 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2096 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2097 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2098 		if ((val1 != val2) || (val2 != val3))
2099 			continue;
2100 
2101 		qlt->req_ndx_from_fw = val1;
2102 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2103 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2104 		    (REQUEST_QUEUE_ENTRIES - 1));
2105 		if (qlt->req_available < n) {
2106 			if (try < 2) {
2107 				drv_usecwait(100);
2108 				try++;
2109 				continue;
2110 			} else {
2111 				stmf_trace(qlt->qlt_port_alias,
2112 				    "Req Q is full");
2113 				return (NULL);
2114 			}
2115 		}
2116 		break;
2117 	}
2118 	/* We dont change anything until the entries are sumitted */
2119 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2120 }
2121 
2122 /*
2123  * updates the req in ptr to fw. Assumes that req lock is held.
2124  */
2125 void
2126 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2127 {
2128 	ASSERT(n >= 1);
2129 	qlt->req_ndx_to_fw += n;
2130 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2131 	qlt->req_available -= n;
2132 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2133 }
2134 
2135 
2136 /*
2137  * Return a pointer to n entries in the priority request queue. Assumes that
2138  * priority request queue lock is held. Does a very short busy wait if
2139  * less/zero entries are available. Retuns NULL if it still cannot
2140  * fullfill the request.
2141  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2142  */
2143 caddr_t
2144 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2145 {
2146 	int try = 0;
2147 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2148 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2149 	    (PRIORITY_QUEUE_ENTRIES - 1));
2150 
2151 	while (req_available < n) {
2152 		uint32_t val1, val2, val3;
2153 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2154 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2155 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2156 		if ((val1 != val2) || (val2 != val3))
2157 			continue;
2158 
2159 		qlt->preq_ndx_from_fw = val1;
2160 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2161 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2162 		    (PRIORITY_QUEUE_ENTRIES - 1));
2163 		if (req_available < n) {
2164 			if (try < 2) {
2165 				drv_usecwait(100);
2166 				try++;
2167 				continue;
2168 			} else {
2169 				return (NULL);
2170 			}
2171 		}
2172 		break;
2173 	}
2174 	/* We dont change anything until the entries are sumitted */
2175 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2176 }
2177 
2178 /*
2179  * updates the req in ptr to fw. Assumes that req lock is held.
2180  */
2181 void
2182 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2183 {
2184 	ASSERT(n >= 1);
2185 	qlt->preq_ndx_to_fw += n;
2186 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2187 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2188 }
2189 
2190 /*
2191  * - Should not be called from Interrupt.
2192  * - A very hardware specific function. Does not touch driver state.
2193  * - Assumes that interrupts are disabled or not there.
2194  * - Expects that the caller makes sure that all activity has stopped
2195  *   and its ok now to go ahead and reset the chip. Also the caller
2196  *   takes care of post reset damage control.
2197  * - called by initialize adapter() and dump_fw(for reset only).
2198  * - During attach() nothing much is happening and during initialize_adapter()
2199  *   the function (caller) does all the housekeeping so that this function
2200  *   can execute in peace.
2201  * - Returns 0 on success.
2202  */
2203 static fct_status_t
2204 qlt_reset_chip(qlt_state_t *qlt)
2205 {
2206 	int cntr;
2207 
2208 	EL(qlt, "initiated\n");
2209 
2210 	/* XXX: Switch off LEDs */
2211 
2212 	/* Disable Interrupts */
2213 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2214 	(void) REG_RD32(qlt, REG_INTR_CTRL);
2215 	/* Stop DMA */
2216 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2217 
2218 	/* Wait for DMA to be stopped */
2219 	cntr = 0;
2220 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2221 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2222 		cntr++;
2223 		/* 3 sec should be more than enough */
2224 		if (cntr == 300)
2225 			return (QLT_DMA_STUCK);
2226 	}
2227 
2228 	/* Reset the Chip */
2229 	REG_WR32(qlt, REG_CTRL_STATUS,
2230 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2231 
2232 	qlt->qlt_link_up = 0;
2233 
2234 	drv_usecwait(100);
2235 
2236 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2237 	cntr = 0;
2238 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2239 		delay(drv_usectohz(10000));
2240 		cntr++;
2241 		/* 3 sec should be more than enough */
2242 		if (cntr == 300)
2243 			return (QLT_ROM_STUCK);
2244 	}
2245 	/* Disable Interrupts (Probably not needed) */
2246 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2247 
2248 	return (QLT_SUCCESS);
2249 }
2250 /*
2251  * - Should not be called from Interrupt.
2252  * - A very hardware specific function. Does not touch driver state.
2253  * - Assumes that interrupts are disabled or not there.
2254  * - Expects that the caller makes sure that all activity has stopped
2255  *   and its ok now to go ahead and reset the chip. Also the caller
2256  *   takes care of post reset damage control.
2257  * - called by initialize adapter() and dump_fw(for reset only).
2258  * - During attach() nothing much is happening and during initialize_adapter()
2259  *   the function (caller) does all the housekeeping so that this function
2260  *   can execute in peace.
2261  * - Returns 0 on success.
2262  */
2263 static fct_status_t
2264 qlt_download_fw(qlt_state_t *qlt)
2265 {
2266 	uint32_t start_addr;
2267 	fct_status_t ret;
2268 
2269 	EL(qlt, "initiated\n");
2270 
2271 	(void) qlt_reset_chip(qlt);
2272 
2273 	if (qlt->qlt_81xx_chip) {
2274 		qlt_mps_reset(qlt);
2275 	}
2276 
2277 	/* Load the two segments */
2278 	if (qlt->fw_code01 != NULL) {
2279 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2280 		    qlt->fw_addr01);
2281 		if (ret == QLT_SUCCESS) {
2282 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2283 			    qlt->fw_length02, qlt->fw_addr02);
2284 		}
2285 		start_addr = qlt->fw_addr01;
2286 	} else if (qlt->qlt_81xx_chip) {
2287 		ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2288 		    fw8100_addr01);
2289 		if (ret == QLT_SUCCESS) {
2290 			ret = qlt_load_risc_ram(qlt, fw8100_code02,
2291 			    fw8100_length02, fw8100_addr02);
2292 		}
2293 		start_addr = fw8100_addr01;
2294 	} else if (qlt->qlt_25xx_chip) {
2295 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2296 		    fw2500_addr01);
2297 		if (ret == QLT_SUCCESS) {
2298 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
2299 			    fw2500_length02, fw2500_addr02);
2300 		}
2301 		start_addr = fw2500_addr01;
2302 	} else {
2303 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2304 		    fw2400_addr01);
2305 		if (ret == QLT_SUCCESS) {
2306 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
2307 			    fw2400_length02, fw2400_addr02);
2308 		}
2309 		start_addr = fw2400_addr01;
2310 	}
2311 	if (ret != QLT_SUCCESS) {
2312 		EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2313 		return (ret);
2314 	}
2315 
2316 	/* Verify Checksum */
2317 	REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
2318 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2319 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2320 	ret = qlt_raw_mailbox_command(qlt);
2321 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2322 	if (ret != QLT_SUCCESS) {
2323 		EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2324 		return (ret);
2325 	}
2326 
2327 	/* Execute firmware */
2328 	REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
2329 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2330 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2331 	REG_WR16(qlt, REG_MBOX(3), 0);
2332 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
2333 	ret = qlt_raw_mailbox_command(qlt);
2334 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2335 	if (ret != QLT_SUCCESS) {
2336 		EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2337 		return (ret);
2338 	}
2339 
2340 	/* Get revisions (About Firmware) */
2341 	REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
2342 	ret = qlt_raw_mailbox_command(qlt);
2343 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2344 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2345 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2346 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2347 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2348 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2349 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2350 	if (ret != QLT_SUCCESS) {
2351 		EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2352 		return (ret);
2353 	}
2354 
2355 	return (QLT_SUCCESS);
2356 }
2357 
2358 /*
2359  * Used only from qlt_download_fw().
2360  */
2361 static fct_status_t
2362 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2363 				uint32_t word_count, uint32_t risc_addr)
2364 {
2365 	uint32_t words_sent = 0;
2366 	uint32_t words_being_sent;
2367 	uint32_t *cur_host_addr;
2368 	uint32_t cur_risc_addr;
2369 	uint64_t da;
2370 	fct_status_t ret;
2371 
2372 	while (words_sent < word_count) {
2373 		cur_host_addr = &(host_addr[words_sent]);
2374 		cur_risc_addr = risc_addr + (words_sent << 2);
2375 		words_being_sent = min(word_count - words_sent,
2376 		    TOTAL_DMA_MEM_SIZE >> 2);
2377 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2378 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2379 		    DDI_DEV_AUTOINCR);
2380 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2381 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2382 		da = qlt->queue_mem_cookie.dmac_laddress;
2383 		REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
2384 		REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
2385 		REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
2386 		REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2387 		REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2388 		REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2389 		REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2390 		REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
2391 		REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
2392 		ret = qlt_raw_mailbox_command(qlt);
2393 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2394 		if (ret != QLT_SUCCESS) {
2395 			EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2396 			    ret);
2397 			return (ret);
2398 		}
2399 		words_sent += words_being_sent;
2400 	}
2401 	return (QLT_SUCCESS);
2402 }
2403 
2404 /*
2405  * Not used during normal operation. Only during driver init.
2406  * Assumes that interrupts are disabled and mailboxes are loaded.
2407  * Just triggers the mailbox command an waits for the completion.
2408  * Also expects that There is nothing else going on and we will only
2409  * get back a mailbox completion from firmware.
2410  * ---DOES NOT CLEAR INTERRUPT---
2411  * Used only from the code path originating from
2412  * qlt_reset_chip_and_download_fw()
2413  */
2414 static fct_status_t
2415 qlt_raw_mailbox_command(qlt_state_t *qlt)
2416 {
2417 	int cntr = 0;
2418 	uint32_t status;
2419 
2420 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2421 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
2422 		cntr++;
2423 		if (cntr == 100) {
2424 			return (QLT_MAILBOX_STUCK);
2425 		}
2426 		delay(drv_usectohz(10000));
2427 	}
2428 	status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
2429 
2430 	if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
2431 	    (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
2432 	    (status == MBX_CMD_SUCCESSFUL) ||
2433 	    (status == MBX_CMD_NOT_SUCCESSFUL)) {
2434 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2435 		if (mbox0 == QLT_MBX_CMD_SUCCESS) {
2436 			return (QLT_SUCCESS);
2437 		} else {
2438 			return (QLT_MBOX_FAILED | mbox0);
2439 		}
2440 	}
2441 	/* This is unexpected, dump a message */
2442 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2443 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
2444 	return (QLT_UNEXPECTED_RESPONSE);
2445 }
2446 
2447 static mbox_cmd_t *
2448 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2449 {
2450 	mbox_cmd_t *mcp;
2451 
2452 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2453 	if (dma_size) {
2454 		qlt_dmem_bctl_t *bctl;
2455 		uint64_t da;
2456 
2457 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2458 		if (mcp->dbuf == NULL) {
2459 			kmem_free(mcp, sizeof (*mcp));
2460 			return (NULL);
2461 		}
2462 		mcp->dbuf->db_data_size = dma_size;
2463 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2464 
2465 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2466 		da = bctl->bctl_dev_addr;
2467 		/* This is the most common initialization of dma ptrs */
2468 		mcp->to_fw[3] = LSW(LSD(da));
2469 		mcp->to_fw[2] = MSW(LSD(da));
2470 		mcp->to_fw[7] = LSW(MSD(da));
2471 		mcp->to_fw[6] = MSW(MSD(da));
2472 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2473 	}
2474 	mcp->to_fw_mask |= BIT_0;
2475 	mcp->from_fw_mask |= BIT_0;
2476 	return (mcp);
2477 }
2478 
2479 void
2480 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2481 {
2482 	if (mcp->dbuf)
2483 		qlt_i_dmem_free(qlt, mcp->dbuf);
2484 	kmem_free(mcp, sizeof (*mcp));
2485 }
2486 
2487 /*
2488  * This can sleep. Should never be called from interrupt context.
2489  */
2490 static fct_status_t
2491 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2492 {
2493 	int	retries;
2494 	int	i;
2495 	char	info[80];
2496 
2497 	if (curthread->t_flag & T_INTR_THREAD) {
2498 		ASSERT(0);
2499 		return (QLT_MBOX_FAILED);
2500 	}
2501 
2502 	mutex_enter(&qlt->mbox_lock);
2503 	/* See if mailboxes are still uninitialized */
2504 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2505 		mutex_exit(&qlt->mbox_lock);
2506 		return (QLT_MBOX_NOT_INITIALIZED);
2507 	}
2508 
2509 	/* Wait to grab the mailboxes */
2510 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2511 	    retries++) {
2512 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2513 		if ((retries > 5) ||
2514 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2515 			mutex_exit(&qlt->mbox_lock);
2516 			return (QLT_MBOX_BUSY);
2517 		}
2518 	}
2519 	/* Make sure we always ask for mailbox 0 */
2520 	mcp->from_fw_mask |= BIT_0;
2521 
2522 	/* Load mailboxes, set state and generate RISC interrupt */
2523 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2524 	qlt->mcp = mcp;
2525 	for (i = 0; i < MAX_MBOXES; i++) {
2526 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2527 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2528 	}
2529 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2530 
2531 qlt_mbox_wait_loop:;
2532 	/* Wait for mailbox command completion */
2533 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2534 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2535 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2536 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2537 		info[79] = 0;
2538 		qlt->mcp = NULL;
2539 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2540 		mutex_exit(&qlt->mbox_lock);
2541 
2542 		/*
2543 		 * XXX Throw HBA fatal error event
2544 		 */
2545 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2546 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2547 		return (QLT_MBOX_TIMEOUT);
2548 	}
2549 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2550 		goto qlt_mbox_wait_loop;
2551 
2552 	qlt->mcp = NULL;
2553 
2554 	/* Make sure its a completion */
2555 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2556 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2557 		mutex_exit(&qlt->mbox_lock);
2558 		return (QLT_MBOX_ABORTED);
2559 	}
2560 
2561 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2562 	/* Mailboxes are already loaded by interrupt routine */
2563 	qlt->mbox_io_state = MBOX_STATE_READY;
2564 	mutex_exit(&qlt->mbox_lock);
2565 	if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS)
2566 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2567 
2568 	return (QLT_SUCCESS);
2569 }
2570 
2571 /*
2572  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2573  */
2574 /* ARGSUSED */
2575 static uint_t
2576 qlt_isr(caddr_t arg, caddr_t arg2)
2577 {
2578 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2579 	uint32_t	risc_status, intr_type;
2580 	int		i;
2581 	int		intr_loop_count;
2582 	char		info[80];
2583 
2584 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2585 	if (!mutex_tryenter(&qlt->intr_lock)) {
2586 		/*
2587 		 * Normally we will always get this lock. If tryenter is
2588 		 * failing then it means that driver is trying to do
2589 		 * some cleanup and is masking the intr but some intr
2590 		 * has sneaked in between. See if our device has generated
2591 		 * this intr. If so then wait a bit and return claimed.
2592 		 * If not then return claimed if this is the 1st instance
2593 		 * of a interrupt after driver has grabbed the lock.
2594 		 */
2595 		if (risc_status & BIT_15) {
2596 			drv_usecwait(10);
2597 			return (DDI_INTR_CLAIMED);
2598 		} else if (qlt->intr_sneak_counter) {
2599 			qlt->intr_sneak_counter--;
2600 			return (DDI_INTR_CLAIMED);
2601 		} else {
2602 			return (DDI_INTR_UNCLAIMED);
2603 		}
2604 	}
2605 	if (((risc_status & BIT_15) == 0) ||
2606 	    (qlt->qlt_intr_enabled == 0)) {
2607 		/*
2608 		 * This might be a pure coincedence that we are operating
2609 		 * in a interrupt disabled mode and another device
2610 		 * sharing the interrupt line has generated an interrupt
2611 		 * while an interrupt from our device might be pending. Just
2612 		 * ignore it and let the code handling the interrupt
2613 		 * disabled mode handle it.
2614 		 */
2615 		mutex_exit(&qlt->intr_lock);
2616 		return (DDI_INTR_UNCLAIMED);
2617 	}
2618 
2619 	/*
2620 	 * XXX take care for MSI case. disable intrs
2621 	 * Its gonna be complicated because of the max iterations.
2622 	 * as hba will have posted the intr which did not go on PCI
2623 	 * but we did not service it either because of max iterations.
2624 	 * Maybe offload the intr on a different thread.
2625 	 */
2626 	intr_loop_count = 0;
2627 
2628 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2629 
2630 intr_again:;
2631 
2632 	/* check for risc pause */
2633 	if (risc_status & BIT_8) {
2634 		EL(qlt, "Risc Pause status=%xh\n", risc_status);
2635 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2636 		    qlt->instance, risc_status);
2637 		(void) snprintf(info, 80, "Risc Pause %08x", risc_status);
2638 		info[79] = 0;
2639 		(void) fct_port_shutdown(qlt->qlt_port,
2640 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2641 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2642 	}
2643 
2644 	/* First check for high performance path */
2645 	intr_type = risc_status & 0xff;
2646 	if (intr_type == 0x1D) {
2647 		qlt->atio_ndx_from_fw = (uint16_t)
2648 		    REG_RD32(qlt, REG_ATIO_IN_PTR);
2649 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2650 		qlt->resp_ndx_from_fw = risc_status >> 16;
2651 		qlt_handle_atio_queue_update(qlt);
2652 		qlt_handle_resp_queue_update(qlt);
2653 	} else if (intr_type == 0x1C) {
2654 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2655 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2656 		qlt_handle_atio_queue_update(qlt);
2657 	} else if (intr_type == 0x13) {
2658 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2659 		qlt->resp_ndx_from_fw = risc_status >> 16;
2660 		qlt_handle_resp_queue_update(qlt);
2661 	} else if (intr_type == 0x12) {
2662 		uint16_t code = (uint16_t)(risc_status >> 16);
2663 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2664 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2665 		uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2666 		uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2667 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2668 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2669 
2670 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2671 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2672 		    " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2673 		    mbox5, mbox6);
2674 		EL(qlt, "Async event %x mb1=%x mb2=%x, mb3=%x, mb5=%x, mb6=%x",
2675 		    code, mbox1, mbox2, mbox3, mbox5, mbox6);
2676 
2677 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2678 			if (qlt->qlt_link_up) {
2679 				fct_handle_event(qlt->qlt_port,
2680 				    FCT_EVENT_LINK_RESET, 0, 0);
2681 			}
2682 		} else if (code == 0x8012) {
2683 			qlt->qlt_link_up = 0;
2684 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2685 			    0, 0);
2686 		} else if (code == 0x8011) {
2687 			switch (mbox1) {
2688 			case 0: qlt->link_speed = PORT_SPEED_1G;
2689 				break;
2690 			case 1: qlt->link_speed = PORT_SPEED_2G;
2691 				break;
2692 			case 3: qlt->link_speed = PORT_SPEED_4G;
2693 				break;
2694 			case 4: qlt->link_speed = PORT_SPEED_8G;
2695 				break;
2696 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
2697 				break;
2698 			default:
2699 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2700 			}
2701 			qlt->qlt_link_up = 1;
2702 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2703 			    0, 0);
2704 		} else if ((code == 0x8002) || (code == 0x8003) ||
2705 		    (code == 0x8004) || (code == 0x8005)) {
2706 			(void) snprintf(info, 80,
2707 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2708 			    code, mbox1, mbox2, mbox5, mbox6);
2709 			info[79] = 0;
2710 			(void) fct_port_shutdown(qlt->qlt_port,
2711 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2712 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2713 		} else if (code == 0x800F) {
2714 			(void) snprintf(info, 80,
2715 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
2716 			    mbox1, mbox2, mbox3);
2717 
2718 			if (mbox1 != 1) {
2719 				/* issue "verify fw" */
2720 				qlt_verify_fw(qlt);
2721 			}
2722 		} else if (code == 0x8101) {
2723 			(void) snprintf(info, 80,
2724 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2725 			    code, mbox1, mbox2, mbox3);
2726 			info[79] = 0;
2727 
2728 			/* check if "ACK" is required (timeout != 0) */
2729 			if (mbox1 & 0x0f00) {
2730 				caddr_t	req;
2731 
2732 				/*
2733 				 * Ack the request (queue work to do it?)
2734 				 * using a mailbox iocb
2735 				 */
2736 				mutex_enter(&qlt->req_lock);
2737 				req = qlt_get_req_entries(qlt, 1);
2738 				if (req) {
2739 					bzero(req, IOCB_SIZE);
2740 					req[0] = 0x39; req[1] = 1;
2741 					QMEM_WR16(qlt, req+8, 0x101);
2742 					QMEM_WR16(qlt, req+10, mbox1);
2743 					QMEM_WR16(qlt, req+12, mbox2);
2744 					QMEM_WR16(qlt, req+14, mbox3);
2745 					QMEM_WR16(qlt, req+16, mbox4);
2746 					QMEM_WR16(qlt, req+18, mbox5);
2747 					QMEM_WR16(qlt, req+20, mbox6);
2748 					qlt_submit_req_entries(qlt, 1);
2749 				} else {
2750 					(void) snprintf(info, 80,
2751 					    "IDC ACK failed");
2752 					info[79] = 0;
2753 				}
2754 				mutex_exit(&qlt->req_lock);
2755 			}
2756 		}
2757 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2758 		/* Handle mailbox completion */
2759 		mutex_enter(&qlt->mbox_lock);
2760 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2761 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2762 			    " when driver wasn't waiting for it %d",
2763 			    qlt->instance, qlt->mbox_io_state);
2764 		} else {
2765 			for (i = 0; i < MAX_MBOXES; i++) {
2766 				if (qlt->mcp->from_fw_mask &
2767 				    (((uint32_t)1) << i)) {
2768 					qlt->mcp->from_fw[i] =
2769 					    REG_RD16(qlt, REG_MBOX(i));
2770 				}
2771 			}
2772 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2773 		}
2774 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2775 		cv_broadcast(&qlt->mbox_cv);
2776 		mutex_exit(&qlt->mbox_lock);
2777 	} else {
2778 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2779 		    qlt->instance, intr_type);
2780 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2781 	}
2782 
2783 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2784 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2785 	if ((risc_status & BIT_15) &&
2786 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2787 		goto intr_again;
2788 	}
2789 
2790 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2791 
2792 	mutex_exit(&qlt->intr_lock);
2793 	return (DDI_INTR_CLAIMED);
2794 }
2795 
2796 /* **************** NVRAM Functions ********************** */
2797 
2798 fct_status_t
2799 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2800 {
2801 	uint32_t	timer;
2802 
2803 	/* Clear access error flag */
2804 	REG_WR32(qlt, REG_CTRL_STATUS,
2805 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2806 
2807 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2808 
2809 	/* Wait for READ cycle to complete. */
2810 	for (timer = 3000; timer; timer--) {
2811 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2812 			break;
2813 		}
2814 		drv_usecwait(10);
2815 	}
2816 	if (timer == 0) {
2817 		EL(qlt, "flash timeout\n");
2818 		return (QLT_FLASH_TIMEOUT);
2819 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2820 		EL(qlt, "flash access error\n");
2821 		return (QLT_FLASH_ACCESS_ERROR);
2822 	}
2823 
2824 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2825 
2826 	return (QLT_SUCCESS);
2827 }
2828 
2829 fct_status_t
2830 qlt_read_nvram(qlt_state_t *qlt)
2831 {
2832 	uint32_t		index, addr, chksum;
2833 	uint32_t		val, *ptr;
2834 	fct_status_t		ret;
2835 	qlt_nvram_t		*nv;
2836 	uint64_t		empty_node_name = 0;
2837 
2838 	if (qlt->qlt_81xx_chip) {
2839 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2840 		    QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2841 	} else if (qlt->qlt_25xx_chip) {
2842 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2843 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2844 	} else {
2845 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2846 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2847 	}
2848 	mutex_enter(&qlt_global_lock);
2849 
2850 	/* Pause RISC. */
2851 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
2852 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2853 
2854 	/* Get NVRAM data and calculate checksum. */
2855 	ptr = (uint32_t *)qlt->nvram;
2856 	chksum = 0;
2857 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2858 		ret = qlt_read_flash_word(qlt, addr++, &val);
2859 		if (ret != QLT_SUCCESS) {
2860 			EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
2861 			mutex_exit(&qlt_global_lock);
2862 			return (ret);
2863 		}
2864 		chksum += val;
2865 		*ptr = LE_32(val);
2866 		ptr++;
2867 	}
2868 
2869 	/* Release RISC Pause */
2870 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
2871 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2872 
2873 	mutex_exit(&qlt_global_lock);
2874 
2875 	/* Sanity check NVRAM Data */
2876 	nv = qlt->nvram;
2877 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2878 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2879 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2880 		EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2881 		    nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2882 		    nv->nvram_version[1], nv->nvram_version[0]);
2883 		return (QLT_BAD_NVRAM_DATA);
2884 	}
2885 
2886 	/* If node name is zero, hand craft it from port name */
2887 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2888 		bcopy(nv->port_name, nv->node_name, 8);
2889 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2890 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2891 	}
2892 
2893 	return (QLT_SUCCESS);
2894 }
2895 
2896 uint32_t
2897 qlt_sync_atio_queue(qlt_state_t *qlt)
2898 {
2899 	uint32_t total_ent;
2900 
2901 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2902 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2903 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2904 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2905 		    DDI_DMA_SYNC_FORCPU);
2906 	} else {
2907 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2908 		    qlt->atio_ndx_from_fw;
2909 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2910 		    + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2911 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2912 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2913 		    ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2914 		    DDI_DMA_SYNC_FORCPU);
2915 	}
2916 	return (total_ent);
2917 }
2918 
2919 void
2920 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2921 {
2922 	uint32_t total_ent;
2923 
2924 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2925 		return;
2926 
2927 	total_ent = qlt_sync_atio_queue(qlt);
2928 
2929 	do {
2930 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2931 		    qlt->atio_ndx_to_fw << 6];
2932 		uint32_t ent_cnt;
2933 
2934 		ent_cnt = (uint32_t)(atio[1]);
2935 		if (ent_cnt > total_ent) {
2936 			break;
2937 		}
2938 		switch ((uint8_t)(atio[0])) {
2939 		case 0x0d:	/* INOT */
2940 			qlt_handle_inot(qlt, atio);
2941 			break;
2942 		case 0x06:	/* ATIO */
2943 			qlt_handle_atio(qlt, atio);
2944 			break;
2945 		default:
2946 			EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2947 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2948 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2949 			break;
2950 		}
2951 		qlt->atio_ndx_to_fw = (uint16_t)(
2952 		    (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2953 		total_ent -= ent_cnt;
2954 	} while (total_ent > 0);
2955 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2956 }
2957 
2958 uint32_t
2959 qlt_sync_resp_queue(qlt_state_t *qlt)
2960 {
2961 	uint32_t total_ent;
2962 
2963 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2964 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2965 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2966 		    RESPONSE_QUEUE_OFFSET
2967 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2968 		    DDI_DMA_SYNC_FORCPU);
2969 	} else {
2970 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2971 		    qlt->resp_ndx_from_fw;
2972 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2973 		    RESPONSE_QUEUE_OFFSET
2974 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2975 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2976 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2977 		    RESPONSE_QUEUE_OFFSET,
2978 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2979 	}
2980 	return (total_ent);
2981 }
2982 
2983 void
2984 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2985 {
2986 	uint32_t total_ent;
2987 	uint8_t c;
2988 
2989 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2990 		return;
2991 
2992 	total_ent = qlt_sync_resp_queue(qlt);
2993 
2994 	do {
2995 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
2996 		uint32_t ent_cnt;
2997 
2998 		ent_cnt = (uint32_t)(resp[1]);
2999 		if (ent_cnt > total_ent) {
3000 			break;
3001 		}
3002 		switch ((uint8_t)(resp[0])) {
3003 		case 0x12:	/* CTIO completion */
3004 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
3005 			break;
3006 		case 0x0e:	/* NACK */
3007 			/* Do Nothing */
3008 			break;
3009 		case 0x1b:	/* Verify FW */
3010 			qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
3011 			break;
3012 		case 0x29:	/* CT PassThrough */
3013 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
3014 			break;
3015 		case 0x33:	/* Abort IO IOCB completion */
3016 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
3017 			break;
3018 		case 0x51:	/* PUREX */
3019 			qlt_handle_purex(qlt, (uint8_t *)resp);
3020 			break;
3021 		case 0x52:
3022 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
3023 			break;
3024 		case 0x53:	/* ELS passthrough */
3025 			c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
3026 			if (c == 0) {
3027 				qlt_handle_sol_els_completion(qlt,
3028 				    (uint8_t *)resp);
3029 			} else if (c == 3) {
3030 				qlt_handle_unsol_els_abort_completion(qlt,
3031 				    (uint8_t *)resp);
3032 			} else {
3033 				qlt_handle_unsol_els_completion(qlt,
3034 				    (uint8_t *)resp);
3035 			}
3036 			break;
3037 		case 0x54:	/* ABTS received */
3038 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
3039 			break;
3040 		case 0x55:	/* ABTS completion */
3041 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
3042 			break;
3043 		default:
3044 			EL(qlt, "response entry=%xh\n", resp[0]);
3045 			break;
3046 		}
3047 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
3048 		    (RESPONSE_QUEUE_ENTRIES - 1);
3049 		total_ent -= ent_cnt;
3050 	} while (total_ent > 0);
3051 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
3052 }
3053 
3054 fct_status_t
3055 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
3056 				uint16_t *ret_handle)
3057 {
3058 	fct_status_t ret;
3059 	mbox_cmd_t *mcp;
3060 	uint16_t n;
3061 	uint16_t h;
3062 	uint32_t ent_id;
3063 	uint8_t *p;
3064 	int found = 0;
3065 
3066 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
3067 	if (mcp == NULL) {
3068 		return (STMF_ALLOC_FAILURE);
3069 	}
3070 	mcp->to_fw[0] = MBC_GET_ID_LIST;
3071 	mcp->to_fw[8] = 2048 * 8;
3072 	mcp->to_fw[9] = 0;
3073 	mcp->to_fw_mask |= BIT_9 | BIT_8;
3074 	mcp->from_fw_mask |= BIT_1 | BIT_2;
3075 
3076 	ret = qlt_mailbox_command(qlt, mcp);
3077 	if (ret != QLT_SUCCESS) {
3078 		EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
3079 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
3080 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
3081 		    mcp->from_fw[1], mcp->from_fw[2]);
3082 		qlt_free_mailbox_command(qlt, mcp);
3083 		return (ret);
3084 	}
3085 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
3086 	p = mcp->dbuf->db_sglist[0].seg_addr;
3087 	for (n = 0; n < mcp->from_fw[1]; n++) {
3088 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
3089 		h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
3090 		if (ent_id == id) {
3091 			found = 1;
3092 			*ret_handle = h;
3093 			if ((cmd_handle != FCT_HANDLE_NONE) &&
3094 			    (cmd_handle != h)) {
3095 				cmn_err(CE_WARN, "login for portid %x came in "
3096 				    "with handle %x, while the portid was "
3097 				    "already using a different handle %x",
3098 				    id, cmd_handle, h);
3099 				qlt_free_mailbox_command(qlt, mcp);
3100 				return (QLT_FAILURE);
3101 			}
3102 			break;
3103 		}
3104 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
3105 			cmn_err(CE_WARN, "login for portid %x came in with "
3106 			    "handle %x, while the handle was already in use "
3107 			    "for portid %x", id, cmd_handle, ent_id);
3108 			qlt_free_mailbox_command(qlt, mcp);
3109 			return (QLT_FAILURE);
3110 		}
3111 		p += 8;
3112 	}
3113 	if (!found) {
3114 		*ret_handle = cmd_handle;
3115 	}
3116 	qlt_free_mailbox_command(qlt, mcp);
3117 	return (FCT_SUCCESS);
3118 }
3119 
3120 /* ARGSUSED */
3121 fct_status_t
3122 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
3123 				fct_cmd_t *login)
3124 {
3125 	uint8_t *p;
3126 
3127 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3128 	p[0] = ELS_OP_PLOGI;
3129 	*((uint16_t *)(&p[4])) = 0x2020;
3130 	p[7] = 3;
3131 	p[8] = 0x88;
3132 	p[10] = 8;
3133 	p[13] = 0xff; p[15] = 0x1f;
3134 	p[18] = 7; p[19] = 0xd0;
3135 
3136 	bcopy(port->port_pwwn, p + 20, 8);
3137 	bcopy(port->port_nwwn, p + 28, 8);
3138 
3139 	p[68] = 0x80;
3140 	p[74] = 8;
3141 	p[77] = 0xff;
3142 	p[81] = 1;
3143 
3144 	return (FCT_SUCCESS);
3145 }
3146 
3147 /* ARGSUSED */
3148 fct_status_t
3149 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
3150 				fct_cmd_t *login)
3151 {
3152 	return (FCT_SUCCESS);
3153 }
3154 
3155 fct_status_t
3156 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
3157     fct_cmd_t *login)
3158 {
3159 	uint16_t h;
3160 	fct_status_t ret;
3161 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3162 
3163 	switch (rp->rp_id) {
3164 	case 0xFFFFFC:	h = 0x7FC; break;
3165 	case 0xFFFFFD:	h = 0x7FD; break;
3166 	case 0xFFFFFE:	h = 0x7FE; break;
3167 	case 0xFFFFFF:	h = 0x7FF; break;
3168 	default:
3169 		ret = qlt_portid_to_handle(qlt, rp->rp_id,
3170 		    login->cmd_rp_handle, &h);
3171 		if (ret != FCT_SUCCESS) {
3172 			EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3173 			return (ret);
3174 		}
3175 	}
3176 
3177 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
3178 		ret = qlt_fill_plogi_req(port, rp, login);
3179 	} else {
3180 		ret = qlt_fill_plogi_resp(port, rp, login);
3181 	}
3182 
3183 	if (ret != FCT_SUCCESS) {
3184 		EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3185 		return (ret);
3186 	}
3187 
3188 	if (h == FCT_HANDLE_NONE)
3189 		return (FCT_SUCCESS);
3190 
3191 	if (rp->rp_handle == FCT_HANDLE_NONE) {
3192 		rp->rp_handle = h;
3193 		return (FCT_SUCCESS);
3194 	}
3195 
3196 	if (rp->rp_handle == h)
3197 		return (FCT_SUCCESS);
3198 
3199 	EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3200 	return (FCT_FAILURE);
3201 }
3202 /* invoked in single thread */
3203 fct_status_t
3204 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3205 {
3206 	uint8_t *req;
3207 	qlt_state_t *qlt;
3208 	clock_t	dereg_req_timer;
3209 	fct_status_t ret;
3210 
3211 	qlt = (qlt_state_t *)port->port_fca_private;
3212 
3213 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3214 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
3215 		return (FCT_SUCCESS);
3216 	ASSERT(qlt->rp_id_in_dereg == 0);
3217 
3218 	mutex_enter(&qlt->preq_lock);
3219 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3220 	if (req == NULL) {
3221 		mutex_exit(&qlt->preq_lock);
3222 		return (FCT_BUSY);
3223 	}
3224 	bzero(req, IOCB_SIZE);
3225 	req[0] = 0x52; req[1] = 1;
3226 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
3227 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3228 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3229 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3230 	qlt->rp_id_in_dereg = rp->rp_id;
3231 	qlt_submit_preq_entries(qlt, 1);
3232 
3233 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
3234 	if (cv_timedwait(&qlt->rp_dereg_cv,
3235 	    &qlt->preq_lock, dereg_req_timer) > 0) {
3236 		ret = qlt->rp_dereg_status;
3237 	} else {
3238 		ret = FCT_BUSY;
3239 	}
3240 	qlt->rp_dereg_status = 0;
3241 	qlt->rp_id_in_dereg = 0;
3242 	mutex_exit(&qlt->preq_lock);
3243 	return (ret);
3244 }
3245 
3246 /*
3247  * Pass received ELS up to framework.
3248  */
3249 static void
3250 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3251 {
3252 	fct_cmd_t		*cmd;
3253 	fct_els_t		*els;
3254 	qlt_cmd_t		*qcmd;
3255 	uint32_t		payload_size;
3256 	uint32_t		remote_portid;
3257 	uint8_t			*pldptr, *bndrptr;
3258 	int			i, off;
3259 	uint16_t		iocb_flags;
3260 	char			info[160];
3261 
3262 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3263 	    ((uint32_t)(resp[0x1A])) << 16;
3264 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3265 	if (iocb_flags & BIT_15) {
3266 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3267 	} else {
3268 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3269 	}
3270 
3271 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
3272 		EL(qlt, "payload is too large = %xh\n", payload_size);
3273 		cmn_err(CE_WARN, "handle_purex: payload is too large");
3274 		goto cmd_null;
3275 	}
3276 
3277 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3278 	    (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3279 	if (cmd == NULL) {
3280 		EL(qlt, "fct_alloc cmd==NULL\n");
3281 cmd_null:;
3282 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
3283 		    "allocate space for fct_cmd", (void *)qlt);
3284 		info[159] = 0;
3285 		(void) fct_port_shutdown(qlt->qlt_port,
3286 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3287 		return;
3288 	}
3289 
3290 	cmd->cmd_port = qlt->qlt_port;
3291 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3292 	if (cmd->cmd_rp_handle == 0xFFFF) {
3293 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3294 	}
3295 
3296 	els = (fct_els_t *)cmd->cmd_specific;
3297 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3298 	els->els_req_size = (uint16_t)payload_size;
3299 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3300 	    GET_STRUCT_SIZE(qlt_cmd_t));
3301 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3302 	cmd->cmd_rportid = remote_portid;
3303 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3304 	    ((uint32_t)(resp[0x16])) << 16;
3305 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3306 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3307 	pldptr = &resp[0x2C];
3308 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
3309 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3310 		/* Take care of fw's swapping of payload */
3311 		els->els_req_payload[i] = pldptr[3];
3312 		els->els_req_payload[i+1] = pldptr[2];
3313 		els->els_req_payload[i+2] = pldptr[1];
3314 		els->els_req_payload[i+3] = pldptr[0];
3315 		pldptr += 4;
3316 		if (pldptr == bndrptr)
3317 			pldptr = (uint8_t *)qlt->resp_ptr;
3318 		off += 4;
3319 		if (off >= IOCB_SIZE) {
3320 			off = 4;
3321 			pldptr += 4;
3322 		}
3323 	}
3324 	fct_post_rcvd_cmd(cmd, 0);
3325 }
3326 
3327 fct_status_t
3328 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3329 {
3330 	qlt_state_t	*qlt;
3331 	char		info[160];
3332 
3333 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3334 
3335 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3336 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3337 			EL(qlt, "ioflags = %xh\n", ioflags);
3338 			goto fatal_panic;
3339 		} else {
3340 			return (qlt_send_status(qlt, cmd));
3341 		}
3342 	}
3343 
3344 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3345 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3346 			goto fatal_panic;
3347 		} else {
3348 			return (qlt_send_els_response(qlt, cmd));
3349 		}
3350 	}
3351 
3352 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3353 		cmd->cmd_handle = 0;
3354 	}
3355 
3356 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3357 		return (qlt_send_abts_response(qlt, cmd, 0));
3358 	} else {
3359 		EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3360 		ASSERT(0);
3361 		return (FCT_FAILURE);
3362 	}
3363 
3364 fatal_panic:;
3365 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
3366 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3367 	    ioflags);
3368 	info[159] = 0;
3369 	(void) fct_port_shutdown(qlt->qlt_port,
3370 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3371 	return (FCT_FAILURE);
3372 }
3373 
3374 /* ARGSUSED */
3375 fct_status_t
3376 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3377 {
3378 	qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3379 	qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3380 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3381 	uint8_t *req;
3382 	uint16_t flags;
3383 
3384 	if (dbuf->db_handle == 0)
3385 		qcmd->dbuf = dbuf;
3386 	flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3387 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3388 		flags = (uint16_t)(flags | 2);
3389 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3390 	} else {
3391 		flags = (uint16_t)(flags | 1);
3392 	}
3393 
3394 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3395 		flags = (uint16_t)(flags | BIT_15);
3396 
3397 	mutex_enter(&qlt->req_lock);
3398 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3399 	if (req == NULL) {
3400 		mutex_exit(&qlt->req_lock);
3401 		return (FCT_BUSY);
3402 	}
3403 	bzero(req, IOCB_SIZE);
3404 	req[0] = 0x12; req[1] = 0x1;
3405 	req[2] = dbuf->db_handle;
3406 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3407 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3408 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
3409 	req[12] = 1;
3410 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3411 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3412 	QMEM_WR16(qlt, req+0x1A, flags);
3413 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3414 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3415 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
3416 	QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3417 	QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
3418 	qlt_submit_req_entries(qlt, 1);
3419 	mutex_exit(&qlt->req_lock);
3420 
3421 	return (STMF_SUCCESS);
3422 }
3423 
3424 /*
3425  * We must construct proper FCP_RSP_IU now. Here we only focus on
3426  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3427  * we could have catched them before we enter here.
3428  */
3429 fct_status_t
3430 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3431 {
3432 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
3433 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
3434 	qlt_dmem_bctl_t *bctl;
3435 	uint32_t size;
3436 	uint8_t *req, *fcp_rsp_iu;
3437 	uint8_t *psd, sensbuf[24];		/* sense data */
3438 	uint16_t flags;
3439 	uint16_t scsi_status;
3440 	int use_mode2;
3441 	int ndx;
3442 
3443 	/*
3444 	 * Enter fast channel for non check condition
3445 	 */
3446 	if (task->task_scsi_status != STATUS_CHECK) {
3447 		/*
3448 		 * We will use mode1
3449 		 */
3450 		flags = (uint16_t)(BIT_6 | BIT_15 |
3451 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3452 		scsi_status = (uint16_t)task->task_scsi_status;
3453 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3454 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3455 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3456 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3457 		}
3458 		qcmd->dbuf_rsp_iu = NULL;
3459 
3460 		/*
3461 		 * Fillout CTIO type 7 IOCB
3462 		 */
3463 		mutex_enter(&qlt->req_lock);
3464 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3465 		if (req == NULL) {
3466 			mutex_exit(&qlt->req_lock);
3467 			return (FCT_BUSY);
3468 		}
3469 
3470 		/*
3471 		 * Common fields
3472 		 */
3473 		bzero(req, IOCB_SIZE);
3474 		req[0x00] = 0x12;
3475 		req[0x01] = 0x1;
3476 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
3477 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3478 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3479 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3480 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3481 
3482 		/*
3483 		 * Mode-specific fields
3484 		 */
3485 		QMEM_WR16(qlt, req + 0x1A, flags);
3486 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3487 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3488 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3489 
3490 		/*
3491 		 * Trigger FW to send SCSI status out
3492 		 */
3493 		qlt_submit_req_entries(qlt, 1);
3494 		mutex_exit(&qlt->req_lock);
3495 		return (STMF_SUCCESS);
3496 	}
3497 
3498 	ASSERT(task->task_scsi_status == STATUS_CHECK);
3499 	/*
3500 	 * Decide the SCSI status mode, that should be used
3501 	 */
3502 	use_mode2 = (task->task_sense_length > 24);
3503 
3504 	/*
3505 	 * Prepare required information per the SCSI status mode
3506 	 */
3507 	flags = (uint16_t)(BIT_15 |
3508 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3509 	if (use_mode2) {
3510 		flags = (uint16_t)(flags | BIT_7);
3511 
3512 		size = task->task_sense_length;
3513 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3514 		    task->task_sense_length, &size, 0);
3515 		if (!qcmd->dbuf_rsp_iu) {
3516 			return (FCT_ALLOC_FAILURE);
3517 		}
3518 
3519 		/*
3520 		 * Start to construct FCP_RSP IU
3521 		 */
3522 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3523 		bzero(fcp_rsp_iu, 24);
3524 
3525 		/*
3526 		 * FCP_RSP IU flags, byte10
3527 		 */
3528 		fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
3529 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3530 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
3531 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3532 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
3533 		}
3534 
3535 		/*
3536 		 * SCSI status code, byte11
3537 		 */
3538 		fcp_rsp_iu[11] = task->task_scsi_status;
3539 
3540 		/*
3541 		 * FCP_RESID (Overrun or underrun)
3542 		 */
3543 		fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
3544 		fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
3545 		fcp_rsp_iu[14] = (uint8_t)((task->task_resid >>  8) & 0xFF);
3546 		fcp_rsp_iu[15] = (uint8_t)((task->task_resid >>  0) & 0xFF);
3547 
3548 		/*
3549 		 * FCP_SNS_LEN
3550 		 */
3551 		fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
3552 		    0xFF);
3553 		fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
3554 		    0xFF);
3555 
3556 		/*
3557 		 * FCP_RSP_LEN
3558 		 */
3559 		/*
3560 		 * no FCP_RSP_INFO
3561 		 */
3562 		/*
3563 		 * FCP_SNS_INFO
3564 		 */
3565 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3566 		    task->task_sense_length);
3567 
3568 		/*
3569 		 * Ensure dma data consistency
3570 		 */
3571 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3572 	} else {
3573 		flags = (uint16_t)(flags | BIT_6);
3574 
3575 		scsi_status = (uint16_t)task->task_scsi_status;
3576 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3577 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3578 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3579 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3580 		}
3581 		if (task->task_sense_length) {
3582 			scsi_status = (uint16_t)(scsi_status | BIT_9);
3583 		}
3584 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3585 		qcmd->dbuf_rsp_iu = NULL;
3586 	}
3587 
3588 	/*
3589 	 * Fillout CTIO type 7 IOCB
3590 	 */
3591 	mutex_enter(&qlt->req_lock);
3592 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3593 	if (req == NULL) {
3594 		mutex_exit(&qlt->req_lock);
3595 		if (use_mode2) {
3596 			qlt_dmem_free(cmd->cmd_port->port_fds,
3597 			    qcmd->dbuf_rsp_iu);
3598 			qcmd->dbuf_rsp_iu = NULL;
3599 		}
3600 		return (FCT_BUSY);
3601 	}
3602 
3603 	/*
3604 	 * Common fields
3605 	 */
3606 	bzero(req, IOCB_SIZE);
3607 	req[0x00] = 0x12;
3608 	req[0x01] = 0x1;
3609 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3610 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3611 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3612 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3613 	if (use_mode2) {
3614 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3615 	}
3616 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3617 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3618 
3619 	/*
3620 	 * Mode-specific fields
3621 	 */
3622 	if (!use_mode2) {
3623 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3624 	}
3625 	QMEM_WR16(qlt, req + 0x1A, flags);
3626 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3627 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3628 	if (use_mode2) {
3629 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3630 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3631 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3632 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3633 	} else {
3634 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3635 		psd = req+0x28;
3636 
3637 		/*
3638 		 * Data in sense buf is always big-endian, data in IOCB
3639 		 * should always be little-endian, so we must do swapping.
3640 		 */
3641 		size = ((task->task_sense_length + 3) & (~3));
3642 		for (ndx = 0; ndx < size; ndx += 4) {
3643 			psd[ndx + 0] = sensbuf[ndx + 3];
3644 			psd[ndx + 1] = sensbuf[ndx + 2];
3645 			psd[ndx + 2] = sensbuf[ndx + 1];
3646 			psd[ndx + 3] = sensbuf[ndx + 0];
3647 		}
3648 	}
3649 
3650 	/*
3651 	 * Trigger FW to send SCSI status out
3652 	 */
3653 	qlt_submit_req_entries(qlt, 1);
3654 	mutex_exit(&qlt->req_lock);
3655 
3656 	return (STMF_SUCCESS);
3657 }
3658 
3659 fct_status_t
3660 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3661 {
3662 	qlt_cmd_t	*qcmd;
3663 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3664 	uint8_t *req, *addr;
3665 	qlt_dmem_bctl_t *bctl;
3666 	uint32_t minsize;
3667 	uint8_t elsop, req1f;
3668 
3669 	addr = els->els_resp_payload;
3670 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3671 
3672 	minsize = els->els_resp_size;
3673 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3674 	if (qcmd->dbuf == NULL)
3675 		return (FCT_BUSY);
3676 
3677 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3678 
3679 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3680 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3681 
3682 	if (addr[0] == 0x02) {	/* ACC */
3683 		req1f = BIT_5;
3684 	} else {
3685 		req1f = BIT_6;
3686 	}
3687 	elsop = els->els_req_payload[0];
3688 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3689 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3690 		req1f = (uint8_t)(req1f | BIT_4);
3691 	}
3692 
3693 	mutex_enter(&qlt->req_lock);
3694 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3695 	if (req == NULL) {
3696 		mutex_exit(&qlt->req_lock);
3697 		qlt_dmem_free(NULL, qcmd->dbuf);
3698 		qcmd->dbuf = NULL;
3699 		return (FCT_BUSY);
3700 	}
3701 	bzero(req, IOCB_SIZE);
3702 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3703 	req[0x16] = elsop; req[0x1f] = req1f;
3704 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3705 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3706 	QMEM_WR16(qlt, (&req[0xC]), 1);
3707 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3708 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3709 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3710 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3711 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3712 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3713 	}
3714 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3715 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3716 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3717 	qlt_submit_req_entries(qlt, 1);
3718 	mutex_exit(&qlt->req_lock);
3719 
3720 	return (FCT_SUCCESS);
3721 }
3722 
3723 fct_status_t
3724 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3725 {
3726 	qlt_abts_cmd_t *qcmd;
3727 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3728 	uint8_t *req;
3729 	uint32_t lportid;
3730 	uint32_t fctl;
3731 	int i;
3732 
3733 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3734 
3735 	mutex_enter(&qlt->req_lock);
3736 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3737 	if (req == NULL) {
3738 		mutex_exit(&qlt->req_lock);
3739 		return (FCT_BUSY);
3740 	}
3741 	bcopy(qcmd->buf, req, IOCB_SIZE);
3742 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3743 	fctl = QMEM_RD32(qlt, req+0x1C);
3744 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3745 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3746 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3747 	if (cmd->cmd_rp)
3748 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3749 	else
3750 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3751 	if (terminate) {
3752 		QMEM_WR16(qlt, (&req[0xC]), 1);
3753 	}
3754 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3755 	req[0x17] = abts->abts_resp_rctl;
3756 	QMEM_WR32(qlt, req+0x18, lportid);
3757 	QMEM_WR32(qlt, req+0x1C, fctl);
3758 	req[0x23]++;
3759 	for (i = 0; i < 12; i += 4) {
3760 		/* Take care of firmware's LE requirement */
3761 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3762 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3763 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3764 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3765 	}
3766 	qlt_submit_req_entries(qlt, 1);
3767 	mutex_exit(&qlt->req_lock);
3768 
3769 	return (FCT_SUCCESS);
3770 }
3771 
3772 static void
3773 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3774 {
3775 	int i;
3776 	uint32_t d;
3777 	caddr_t req;
3778 	/* Just put it on the request queue */
3779 	mutex_enter(&qlt->req_lock);
3780 	req = qlt_get_req_entries(qlt, 1);
3781 	if (req == NULL) {
3782 		mutex_exit(&qlt->req_lock);
3783 		/* XXX handle this */
3784 		return;
3785 	}
3786 	for (i = 0; i < 16; i++) {
3787 		d = QMEM_RD32(qlt, inot);
3788 		inot += 4;
3789 		QMEM_WR32(qlt, req, d);
3790 		req += 4;
3791 	}
3792 	req -= 64;
3793 	req[0] = 0x0e;
3794 	qlt_submit_req_entries(qlt, 1);
3795 	mutex_exit(&qlt->req_lock);
3796 }
3797 
3798 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3799 static void
3800 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3801 {
3802 	fct_cmd_t	*cmd;
3803 	scsi_task_t	*task;
3804 	qlt_cmd_t	*qcmd;
3805 	uint32_t	rportid, fw_xchg_addr;
3806 	uint8_t		*p, *q, *req, tm;
3807 	uint16_t	cdb_size, flags, oxid;
3808 	char		info[160];
3809 
3810 	/*
3811 	 * If either bidirection xfer is requested of there is extended
3812 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3813 	 */
3814 	cdb_size = 16;
3815 	if (atio[0x20 + 11] >= 3) {
3816 		uint8_t b = atio[0x20 + 11];
3817 		uint16_t b1;
3818 		if ((b & 3) == 3) {
3819 			EL(qlt, "bidirectional I/O not supported\n");
3820 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3821 			    "received, dropping the cmd as bidirectional "
3822 			    " transfers are not yet supported", qlt->instance);
3823 			/* XXX abort the I/O */
3824 			return;
3825 		}
3826 		cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3827 		/*
3828 		 * Verify that we have enough entries. Without additional CDB
3829 		 * Everything will fit nicely within the same 64 bytes. So the
3830 		 * additional cdb size is essentially the # of additional bytes
3831 		 * we need.
3832 		 */
3833 		b1 = (uint16_t)b;
3834 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3835 			EL(qlt, "extended cdb received\n");
3836 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3837 			    " cdb (cdb size = %d bytes), however the firmware "
3838 			    " did not DMAed the entire FCP_CMD IU, entry count "
3839 			    " is %d while it should be %d", qlt->instance,
3840 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3841 			/* XXX abort the I/O */
3842 			return;
3843 		}
3844 	}
3845 
3846 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3847 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3848 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3849 	oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3850 
3851 	if (fw_xchg_addr == 0xFFFFFFFF) {
3852 		EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3853 		cmd = NULL;
3854 	} else {
3855 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3856 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3857 		if (cmd == NULL) {
3858 			EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3859 		}
3860 	}
3861 	if (cmd == NULL) {
3862 		EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3863 		/* Abort this IO */
3864 		flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3865 
3866 		mutex_enter(&qlt->req_lock);
3867 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3868 		if (req == NULL) {
3869 			mutex_exit(&qlt->req_lock);
3870 
3871 			(void) snprintf(info, 160,
3872 			    "qlt_handle_atio: qlt-%p, can't "
3873 			    "allocate space for scsi_task", (void *)qlt);
3874 			info[159] = 0;
3875 			(void) fct_port_shutdown(qlt->qlt_port,
3876 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3877 			return;
3878 		}
3879 		bzero(req, IOCB_SIZE);
3880 		req[0] = 0x12; req[1] = 0x1;
3881 		QMEM_WR32(qlt, req+4, 0);
3882 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3883 		    rportid));
3884 		QMEM_WR16(qlt, req+10, 60);
3885 		QMEM_WR32(qlt, req+0x10, rportid);
3886 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3887 		QMEM_WR16(qlt, req+0x1A, flags);
3888 		QMEM_WR16(qlt, req+0x20, oxid);
3889 		qlt_submit_req_entries(qlt, 1);
3890 		mutex_exit(&qlt->req_lock);
3891 
3892 		return;
3893 	}
3894 
3895 	task = (scsi_task_t *)cmd->cmd_specific;
3896 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3897 	qcmd->fw_xchg_addr = fw_xchg_addr;
3898 	qcmd->param.atio_byte3 = atio[3];
3899 	cmd->cmd_oxid = oxid;
3900 	cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3901 	    atio[8+19]);
3902 	cmd->cmd_rportid = rportid;
3903 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3904 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3905 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3906 	/* Dont do a 64 byte read as this is IOMMU */
3907 	q = atio+0x28;
3908 	/* XXX Handle fcp_cntl */
3909 	task->task_cmd_seq_no = (uint32_t)(*q++);
3910 	task->task_csn_size = 8;
3911 	task->task_flags = qlt_task_flags[(*q++) & 7];
3912 	tm = *q++;
3913 	if (tm) {
3914 		if (tm & BIT_1)
3915 			task->task_mgmt_function = TM_ABORT_TASK_SET;
3916 		else if (tm & BIT_2)
3917 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
3918 		else if (tm & BIT_4)
3919 			task->task_mgmt_function = TM_LUN_RESET;
3920 		else if (tm & BIT_5)
3921 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
3922 		else if (tm & BIT_6)
3923 			task->task_mgmt_function = TM_CLEAR_ACA;
3924 		else
3925 			task->task_mgmt_function = TM_ABORT_TASK;
3926 	}
3927 	task->task_max_nbufs = STMF_BUFS_MAX;
3928 	task->task_csn_size = 8;
3929 	task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
3930 	p = task->task_cdb;
3931 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3932 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3933 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3934 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3935 	if (cdb_size > 16) {
3936 		uint16_t xtra = (uint16_t)(cdb_size - 16);
3937 		uint16_t i;
3938 		uint8_t cb[4];
3939 
3940 		while (xtra) {
3941 			*p++ = *q++;
3942 			xtra--;
3943 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3944 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3945 				q = (uint8_t *)qlt->queue_mem_ptr +
3946 				    ATIO_QUEUE_OFFSET;
3947 			}
3948 		}
3949 		for (i = 0; i < 4; i++) {
3950 			cb[i] = *q++;
3951 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3952 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3953 				q = (uint8_t *)qlt->queue_mem_ptr +
3954 				    ATIO_QUEUE_OFFSET;
3955 			}
3956 		}
3957 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
3958 		    (((uint32_t)cb[1]) << 16) |
3959 		    (((uint32_t)cb[2]) << 8) | cb[3];
3960 	} else {
3961 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
3962 		    (((uint32_t)q[1]) << 16) |
3963 		    (((uint32_t)q[2]) << 8) | q[3];
3964 	}
3965 	fct_post_rcvd_cmd(cmd, 0);
3966 }
3967 
3968 static void
3969 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
3970 {
3971 	uint16_t status;
3972 	uint32_t portid;
3973 	uint32_t subcode1, subcode2;
3974 
3975 	status = QMEM_RD16(qlt, rsp+8);
3976 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
3977 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
3978 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
3979 
3980 	mutex_enter(&qlt->preq_lock);
3981 	if (portid != qlt->rp_id_in_dereg) {
3982 		int instance = ddi_get_instance(qlt->dip);
3983 
3984 		EL(qlt, "implicit logout reveived portid = %xh\n", portid);
3985 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
3986 		    " received when driver wasn't waiting for it",
3987 		    instance, portid);
3988 		mutex_exit(&qlt->preq_lock);
3989 		return;
3990 	}
3991 
3992 	if (status != 0) {
3993 		EL(qlt, "implicit logout completed for %xh with status %xh, "
3994 		    "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
3995 		    subcode2);
3996 		if (status == 0x31 && subcode1 == 0x0a) {
3997 			qlt->rp_dereg_status = FCT_SUCCESS;
3998 		} else {
3999 			EL(qlt, "implicit logout portid=%xh, status=%xh, "
4000 			    "subcode1=%xh, subcode2=%xh\n", portid, status,
4001 			    subcode1, subcode2);
4002 			qlt->rp_dereg_status =
4003 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
4004 		}
4005 	} else {
4006 		qlt->rp_dereg_status = FCT_SUCCESS;
4007 	}
4008 	cv_signal(&qlt->rp_dereg_cv);
4009 	mutex_exit(&qlt->preq_lock);
4010 }
4011 
4012 /*
4013  * Note that when an ELS is aborted, the regular or aborted completion
4014  * (if any) gets posted before the abort IOCB comes back on response queue.
4015  */
4016 static void
4017 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4018 {
4019 	char		info[160];
4020 	fct_cmd_t	*cmd;
4021 	qlt_cmd_t	*qcmd;
4022 	uint32_t	hndl;
4023 	uint32_t	subcode1, subcode2;
4024 	uint16_t	status;
4025 
4026 	hndl = QMEM_RD32(qlt, rsp+4);
4027 	status = QMEM_RD16(qlt, rsp+8);
4028 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4029 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4030 
4031 	if (!CMD_HANDLE_VALID(hndl)) {
4032 		EL(qlt, "handle = %xh\n", hndl);
4033 		/*
4034 		 * This cannot happen for unsol els completion. This can
4035 		 * only happen when abort for an unsol els completes.
4036 		 * This condition indicates a firmware bug.
4037 		 */
4038 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
4039 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4040 		    hndl, status, subcode1, subcode2, (void *)rsp);
4041 		info[159] = 0;
4042 		(void) fct_port_shutdown(qlt->qlt_port,
4043 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4044 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4045 		return;
4046 	}
4047 
4048 	if (status == 5) {
4049 		/*
4050 		 * When an unsolicited els is aborted, the abort is done
4051 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
4052 		 * and not the abortee. We will do the cleanup when the
4053 		 * IOCB which caused the abort, returns.
4054 		 */
4055 		EL(qlt, "status = %xh\n", status);
4056 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
4057 		return;
4058 	}
4059 
4060 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4061 	if (cmd == NULL) {
4062 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4063 		/*
4064 		 * Now why would this happen ???
4065 		 */
4066 		(void) snprintf(info, 160,
4067 		    "qlt_handle_unsol_els_completion: can not "
4068 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4069 		    (void *)rsp);
4070 		info[159] = 0;
4071 		(void) fct_port_shutdown(qlt->qlt_port,
4072 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4073 
4074 		return;
4075 	}
4076 
4077 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4078 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4079 	if (qcmd->flags & QLT_CMD_ABORTING) {
4080 		/*
4081 		 * This is the same case as "if (status == 5)" above. The
4082 		 * only difference is that in this case the firmware actually
4083 		 * finished sending the response. So the abort attempt will
4084 		 * come back with status ?. We will handle it there.
4085 		 */
4086 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
4087 		    "abort it");
4088 		return;
4089 	}
4090 
4091 	if (qcmd->dbuf != NULL) {
4092 		qlt_dmem_free(NULL, qcmd->dbuf);
4093 		qcmd->dbuf = NULL;
4094 	}
4095 
4096 	if (status == 0) {
4097 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4098 	} else {
4099 		fct_send_response_done(cmd,
4100 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4101 	}
4102 }
4103 
4104 static void
4105 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4106 {
4107 	char		info[160];
4108 	fct_cmd_t	*cmd;
4109 	qlt_cmd_t	*qcmd;
4110 	uint32_t	hndl;
4111 	uint32_t	subcode1, subcode2;
4112 	uint16_t	status;
4113 
4114 	hndl = QMEM_RD32(qlt, rsp+4);
4115 	status = QMEM_RD16(qlt, rsp+8);
4116 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4117 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4118 
4119 	if (!CMD_HANDLE_VALID(hndl)) {
4120 		EL(qlt, "handle = %xh\n", hndl);
4121 		ASSERT(hndl == 0);
4122 		/*
4123 		 * Someone has requested to abort it, but no one is waiting for
4124 		 * this completion.
4125 		 */
4126 		if ((status != 0) && (status != 8)) {
4127 			EL(qlt, "status = %xh\n", status);
4128 			/*
4129 			 * There could be exchange resource leakage, so
4130 			 * throw HBA fatal error event now
4131 			 */
4132 			(void) snprintf(info, 160,
4133 			    "qlt_handle_unsol_els_abort_completion: "
4134 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4135 			    hndl, status, subcode1, subcode2, (void *)rsp);
4136 			info[159] = 0;
4137 			(void) fct_port_shutdown(qlt->qlt_port,
4138 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4139 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4140 			return;
4141 		}
4142 
4143 		return;
4144 	}
4145 
4146 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4147 	if (cmd == NULL) {
4148 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4149 		/*
4150 		 * Why would this happen ??
4151 		 */
4152 		(void) snprintf(info, 160,
4153 		    "qlt_handle_unsol_els_abort_completion: can not get "
4154 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4155 		    (void *)rsp);
4156 		info[159] = 0;
4157 		(void) fct_port_shutdown(qlt->qlt_port,
4158 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4159 
4160 		return;
4161 	}
4162 
4163 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4164 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4165 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4166 
4167 	if (qcmd->dbuf != NULL) {
4168 		qlt_dmem_free(NULL, qcmd->dbuf);
4169 		qcmd->dbuf = NULL;
4170 	}
4171 
4172 	if (status == 0) {
4173 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4174 	} else if (status == 8) {
4175 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4176 	} else {
4177 		fct_cmd_fca_aborted(cmd,
4178 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4179 	}
4180 }
4181 
4182 static void
4183 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4184 {
4185 	char		info[160];
4186 	fct_cmd_t	*cmd;
4187 	fct_els_t	*els;
4188 	qlt_cmd_t	*qcmd;
4189 	uint32_t	hndl;
4190 	uint32_t	subcode1, subcode2;
4191 	uint16_t	status;
4192 
4193 	hndl = QMEM_RD32(qlt, rsp+4);
4194 	status = QMEM_RD16(qlt, rsp+8);
4195 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4196 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4197 
4198 	if (!CMD_HANDLE_VALID(hndl)) {
4199 		EL(qlt, "handle = %xh\n", hndl);
4200 		/*
4201 		 * This cannot happen for sol els completion.
4202 		 */
4203 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
4204 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4205 		    hndl, status, subcode1, subcode2, (void *)rsp);
4206 		info[159] = 0;
4207 		(void) fct_port_shutdown(qlt->qlt_port,
4208 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4209 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4210 		return;
4211 	}
4212 
4213 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4214 	if (cmd == NULL) {
4215 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4216 		(void) snprintf(info, 160,
4217 		    "qlt_handle_sol_els_completion: can not "
4218 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4219 		    (void *)rsp);
4220 		info[159] = 0;
4221 		(void) fct_port_shutdown(qlt->qlt_port,
4222 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4223 
4224 		return;
4225 	}
4226 
4227 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4228 	els = (fct_els_t *)cmd->cmd_specific;
4229 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4230 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4231 
4232 	if (qcmd->flags & QLT_CMD_ABORTING) {
4233 		/*
4234 		 * We will handle it when the ABORT IO IOCB returns.
4235 		 */
4236 		return;
4237 	}
4238 
4239 	if (qcmd->dbuf != NULL) {
4240 		if (status == 0) {
4241 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4242 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4243 			    qcmd->param.resp_offset,
4244 			    els->els_resp_payload, els->els_resp_size);
4245 		}
4246 		qlt_dmem_free(NULL, qcmd->dbuf);
4247 		qcmd->dbuf = NULL;
4248 	}
4249 
4250 	if (status == 0) {
4251 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4252 	} else {
4253 		fct_send_cmd_done(cmd,
4254 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4255 	}
4256 }
4257 
4258 static void
4259 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4260 {
4261 	fct_cmd_t	*cmd;
4262 	fct_sol_ct_t	*ct;
4263 	qlt_cmd_t	*qcmd;
4264 	uint32_t	 hndl;
4265 	uint16_t	 status;
4266 	char		 info[160];
4267 
4268 	hndl = QMEM_RD32(qlt, rsp+4);
4269 	status = QMEM_RD16(qlt, rsp+8);
4270 
4271 	if (!CMD_HANDLE_VALID(hndl)) {
4272 		EL(qlt, "handle = %xh\n", hndl);
4273 		/*
4274 		 * Solicited commands will always have a valid handle.
4275 		 */
4276 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
4277 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4278 		info[159] = 0;
4279 		(void) fct_port_shutdown(qlt->qlt_port,
4280 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4281 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4282 		return;
4283 	}
4284 
4285 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4286 	if (cmd == NULL) {
4287 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4288 		(void) snprintf(info, 160,
4289 		    "qlt_handle_ct_completion: cannot find "
4290 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4291 		    (void *)rsp);
4292 		info[159] = 0;
4293 		(void) fct_port_shutdown(qlt->qlt_port,
4294 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4295 
4296 		return;
4297 	}
4298 
4299 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4300 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4301 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4302 
4303 	if (qcmd->flags & QLT_CMD_ABORTING) {
4304 		/*
4305 		 * We will handle it when ABORT IO IOCB returns;
4306 		 */
4307 		return;
4308 	}
4309 
4310 	ASSERT(qcmd->dbuf);
4311 	if (status == 0) {
4312 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4313 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4314 		    qcmd->param.resp_offset,
4315 		    ct->ct_resp_payload, ct->ct_resp_size);
4316 	}
4317 	qlt_dmem_free(NULL, qcmd->dbuf);
4318 	qcmd->dbuf = NULL;
4319 
4320 	if (status == 0) {
4321 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4322 	} else {
4323 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4324 	}
4325 }
4326 
4327 static void
4328 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
4329 {
4330 	fct_cmd_t	*cmd;
4331 	scsi_task_t	*task;
4332 	qlt_cmd_t	*qcmd;
4333 	stmf_data_buf_t	*dbuf;
4334 	fct_status_t	fc_st;
4335 	uint32_t	iof = 0;
4336 	uint32_t	hndl;
4337 	uint16_t	status;
4338 	uint16_t	flags;
4339 	uint8_t		abort_req;
4340 	uint8_t		n;
4341 	char		info[160];
4342 
4343 	/* XXX: Check validity of the IOCB by checking 4th byte. */
4344 	hndl = QMEM_RD32(qlt, rsp+4);
4345 	status = QMEM_RD16(qlt, rsp+8);
4346 	flags = QMEM_RD16(qlt, rsp+0x1a);
4347 	n = rsp[2];
4348 
4349 	if (!CMD_HANDLE_VALID(hndl)) {
4350 		EL(qlt, "handle = %xh\n", hndl);
4351 		ASSERT(hndl == 0);
4352 		/*
4353 		 * Someone has requested to abort it, but no one is waiting for
4354 		 * this completion.
4355 		 */
4356 		EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4357 		    (void *)rsp);
4358 		if ((status != 1) && (status != 2)) {
4359 			EL(qlt, "status = %xh\n", status);
4360 			/*
4361 			 * There could be exchange resource leakage, so
4362 			 * throw HBA fatal error event now
4363 			 */
4364 			(void) snprintf(info, 160,
4365 			    "qlt_handle_ctio_completion: hndl-"
4366 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4367 			info[159] = 0;
4368 			(void) fct_port_shutdown(qlt->qlt_port,
4369 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4370 
4371 		}
4372 
4373 		return;
4374 	}
4375 
4376 	if (flags & BIT_14) {
4377 		abort_req = 1;
4378 		EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4379 		    (void *)rsp);
4380 	} else {
4381 		abort_req = 0;
4382 	}
4383 
4384 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4385 	if (cmd == NULL) {
4386 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4387 		(void) snprintf(info, 160,
4388 		    "qlt_handle_ctio_completion: cannot find "
4389 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4390 		    (void *)rsp);
4391 		info[159] = 0;
4392 		(void) fct_port_shutdown(qlt->qlt_port,
4393 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4394 
4395 		return;
4396 	}
4397 
4398 	task = (scsi_task_t *)cmd->cmd_specific;
4399 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4400 	if (qcmd->dbuf_rsp_iu) {
4401 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4402 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4403 		qcmd->dbuf_rsp_iu = NULL;
4404 	}
4405 
4406 	if ((status == 1) || (status == 2)) {
4407 		if (abort_req) {
4408 			fc_st = FCT_ABORT_SUCCESS;
4409 			iof = FCT_IOF_FCA_DONE;
4410 		} else {
4411 			fc_st = FCT_SUCCESS;
4412 			if (flags & BIT_15) {
4413 				iof = FCT_IOF_FCA_DONE;
4414 			}
4415 		}
4416 	} else {
4417 		EL(qlt, "status = %xh\n", status);
4418 		if ((status == 8) && abort_req) {
4419 			fc_st = FCT_NOT_FOUND;
4420 			iof = FCT_IOF_FCA_DONE;
4421 		} else {
4422 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
4423 		}
4424 	}
4425 	dbuf = NULL;
4426 	if (((n & BIT_7) == 0) && (!abort_req)) {
4427 		/* A completion of data xfer */
4428 		if (n == 0) {
4429 			dbuf = qcmd->dbuf;
4430 		} else {
4431 			dbuf = stmf_handle_to_buf(task, n);
4432 		}
4433 
4434 		ASSERT(dbuf != NULL);
4435 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4436 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4437 		if (flags & BIT_15) {
4438 			dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4439 			    DB_STATUS_GOOD_SENT);
4440 		}
4441 
4442 		dbuf->db_xfer_status = fc_st;
4443 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
4444 		return;
4445 	}
4446 	if (!abort_req) {
4447 		/*
4448 		 * This was just a pure status xfer.
4449 		 */
4450 		fct_send_response_done(cmd, fc_st, iof);
4451 		return;
4452 	}
4453 
4454 	fct_cmd_fca_aborted(cmd, fc_st, iof);
4455 }
4456 
4457 static void
4458 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4459 {
4460 	char		info[80];
4461 	fct_cmd_t	*cmd;
4462 	qlt_cmd_t	*qcmd;
4463 	uint32_t	h;
4464 	uint16_t	status;
4465 
4466 	h = QMEM_RD32(qlt, rsp+4);
4467 	status = QMEM_RD16(qlt, rsp+8);
4468 
4469 	if (!CMD_HANDLE_VALID(h)) {
4470 		EL(qlt, "handle = %xh\n", h);
4471 		/*
4472 		 * Solicited commands always have a valid handle.
4473 		 */
4474 		(void) snprintf(info, 80,
4475 		    "qlt_handle_sol_abort_completion: hndl-"
4476 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4477 		info[79] = 0;
4478 		(void) fct_port_shutdown(qlt->qlt_port,
4479 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4480 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4481 		return;
4482 	}
4483 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4484 	if (cmd == NULL) {
4485 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4486 		/*
4487 		 * What happened to the cmd ??
4488 		 */
4489 		(void) snprintf(info, 80,
4490 		    "qlt_handle_sol_abort_completion: cannot "
4491 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4492 		    (void *)rsp);
4493 		info[79] = 0;
4494 		(void) fct_port_shutdown(qlt->qlt_port,
4495 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4496 
4497 		return;
4498 	}
4499 
4500 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4501 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
4502 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4503 	if (qcmd->dbuf != NULL) {
4504 		qlt_dmem_free(NULL, qcmd->dbuf);
4505 		qcmd->dbuf = NULL;
4506 	}
4507 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4508 	if (status == 0) {
4509 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4510 	} else if (status == 0x31) {
4511 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4512 	} else {
4513 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4514 	}
4515 }
4516 
4517 static void
4518 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4519 {
4520 	qlt_abts_cmd_t	*qcmd;
4521 	fct_cmd_t	*cmd;
4522 	uint32_t	remote_portid;
4523 	char		info[160];
4524 
4525 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4526 	    ((uint32_t)(resp[0x1A])) << 16;
4527 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4528 	    sizeof (qlt_abts_cmd_t), 0);
4529 	if (cmd == NULL) {
4530 		EL(qlt, "fct_alloc cmd==NULL\n");
4531 		(void) snprintf(info, 160,
4532 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
4533 		    "allocate space for fct_cmd", (void *)qlt);
4534 		info[159] = 0;
4535 		(void) fct_port_shutdown(qlt->qlt_port,
4536 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4537 		return;
4538 	}
4539 
4540 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
4541 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4542 	bcopy(resp, qcmd->buf, IOCB_SIZE);
4543 	cmd->cmd_port = qlt->qlt_port;
4544 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4545 	if (cmd->cmd_rp_handle == 0xFFFF)
4546 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4547 
4548 	cmd->cmd_rportid = remote_portid;
4549 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4550 	    ((uint32_t)(resp[0x16])) << 16;
4551 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4552 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4553 	fct_post_rcvd_cmd(cmd, 0);
4554 }
4555 
4556 static void
4557 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4558 {
4559 	uint16_t status;
4560 	char	info[80];
4561 
4562 	status = QMEM_RD16(qlt, resp+8);
4563 
4564 	if ((status == 0) || (status == 5)) {
4565 		return;
4566 	}
4567 	EL(qlt, "status = %xh\n", status);
4568 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4569 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4570 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4571 	info[79] = 0;
4572 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4573 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4574 }
4575 
4576 #ifdef	DEBUG
4577 uint32_t qlt_drop_abort_counter = 0;
4578 #endif
4579 
4580 fct_status_t
4581 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4582 {
4583 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4584 
4585 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4586 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4587 		return (FCT_NOT_FOUND);
4588 	}
4589 
4590 #ifdef DEBUG
4591 	if (qlt_drop_abort_counter > 0) {
4592 		if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
4593 			return (FCT_SUCCESS);
4594 	}
4595 #endif
4596 
4597 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4598 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4599 	}
4600 
4601 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4602 		cmd->cmd_handle = 0;
4603 	}
4604 
4605 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4606 		return (qlt_send_abts_response(qlt, cmd, 1));
4607 	}
4608 
4609 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4610 		return (qlt_abort_purex(qlt, cmd));
4611 	}
4612 
4613 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4614 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4615 		return (qlt_abort_sol_cmd(qlt, cmd));
4616 	}
4617 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4618 
4619 	ASSERT(0);
4620 	return (FCT_FAILURE);
4621 }
4622 
4623 fct_status_t
4624 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4625 {
4626 	uint8_t *req;
4627 	qlt_cmd_t *qcmd;
4628 
4629 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4630 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4631 	EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
4632 
4633 	mutex_enter(&qlt->req_lock);
4634 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4635 	if (req == NULL) {
4636 		mutex_exit(&qlt->req_lock);
4637 
4638 		return (FCT_BUSY);
4639 	}
4640 	bzero(req, IOCB_SIZE);
4641 	req[0] = 0x33; req[1] = 1;
4642 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4643 	if (cmd->cmd_rp) {
4644 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4645 	} else {
4646 		QMEM_WR16(qlt, req+8, 0xFFFF);
4647 	}
4648 
4649 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4650 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4651 	qlt_submit_req_entries(qlt, 1);
4652 	mutex_exit(&qlt->req_lock);
4653 
4654 	return (FCT_SUCCESS);
4655 }
4656 
4657 fct_status_t
4658 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4659 {
4660 	uint8_t *req;
4661 	qlt_cmd_t *qcmd;
4662 	fct_els_t *els;
4663 	uint8_t elsop, req1f;
4664 
4665 	els = (fct_els_t *)cmd->cmd_specific;
4666 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4667 	elsop = els->els_req_payload[0];
4668 	EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4669 	    elsop);
4670 	req1f = 0x60;	/* Terminate xchg */
4671 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4672 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4673 		req1f = (uint8_t)(req1f | BIT_4);
4674 	}
4675 
4676 	mutex_enter(&qlt->req_lock);
4677 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4678 	if (req == NULL) {
4679 		mutex_exit(&qlt->req_lock);
4680 
4681 		return (FCT_BUSY);
4682 	}
4683 
4684 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4685 	bzero(req, IOCB_SIZE);
4686 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4687 	req[0x16] = elsop; req[0x1f] = req1f;
4688 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4689 	if (cmd->cmd_rp) {
4690 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4691 		EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4692 	} else {
4693 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4694 		EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4695 	}
4696 
4697 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4698 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4699 	qlt_submit_req_entries(qlt, 1);
4700 	mutex_exit(&qlt->req_lock);
4701 
4702 	return (FCT_SUCCESS);
4703 }
4704 
4705 fct_status_t
4706 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4707 {
4708 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4709 	uint8_t *req;
4710 	uint16_t flags;
4711 
4712 	flags = (uint16_t)(BIT_14 |
4713 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4714 	EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4715 
4716 	mutex_enter(&qlt->req_lock);
4717 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4718 	if (req == NULL) {
4719 		mutex_exit(&qlt->req_lock);
4720 
4721 		return (FCT_BUSY);
4722 	}
4723 
4724 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4725 	bzero(req, IOCB_SIZE);
4726 	req[0] = 0x12; req[1] = 0x1;
4727 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4728 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4729 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4730 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4731 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4732 	QMEM_WR16(qlt, req+0x1A, flags);
4733 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4734 	qlt_submit_req_entries(qlt, 1);
4735 	mutex_exit(&qlt->req_lock);
4736 
4737 	return (FCT_SUCCESS);
4738 }
4739 
4740 fct_status_t
4741 qlt_send_cmd(fct_cmd_t *cmd)
4742 {
4743 	qlt_state_t *qlt;
4744 
4745 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4746 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4747 		return (qlt_send_els(qlt, cmd));
4748 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4749 		return (qlt_send_ct(qlt, cmd));
4750 	}
4751 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4752 
4753 	ASSERT(0);
4754 	return (FCT_FAILURE);
4755 }
4756 
4757 fct_status_t
4758 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4759 {
4760 	uint8_t *req;
4761 	fct_els_t *els;
4762 	qlt_cmd_t *qcmd;
4763 	stmf_data_buf_t *buf;
4764 	qlt_dmem_bctl_t *bctl;
4765 	uint32_t sz, minsz;
4766 
4767 	els = (fct_els_t *)cmd->cmd_specific;
4768 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4769 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4770 	qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4771 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4772 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4773 	if (buf == NULL) {
4774 		return (FCT_BUSY);
4775 	}
4776 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4777 
4778 	qcmd->dbuf = buf;
4779 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4780 	    els->els_req_size);
4781 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4782 
4783 	mutex_enter(&qlt->req_lock);
4784 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4785 	if (req == NULL) {
4786 		qlt_dmem_free(NULL, buf);
4787 		mutex_exit(&qlt->req_lock);
4788 		return (FCT_BUSY);
4789 	}
4790 	bzero(req, IOCB_SIZE);
4791 	req[0] = 0x53; req[1] = 1;
4792 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4793 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4794 	QMEM_WR16(qlt, (&req[0xC]), 1);
4795 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4796 	QMEM_WR16(qlt, (&req[0x14]), 1);
4797 	req[0x16] = els->els_req_payload[0];
4798 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4799 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4800 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
4801 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4802 	}
4803 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4804 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4805 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4806 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4807 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4808 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4809 	    qcmd->param.resp_offset));
4810 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4811 	qlt_submit_req_entries(qlt, 1);
4812 	mutex_exit(&qlt->req_lock);
4813 
4814 	return (FCT_SUCCESS);
4815 }
4816 
4817 fct_status_t
4818 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4819 {
4820 	uint8_t *req;
4821 	fct_sol_ct_t *ct;
4822 	qlt_cmd_t *qcmd;
4823 	stmf_data_buf_t *buf;
4824 	qlt_dmem_bctl_t *bctl;
4825 	uint32_t sz, minsz;
4826 
4827 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4828 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4829 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4830 	qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4831 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4832 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4833 	if (buf == NULL) {
4834 		return (FCT_BUSY);
4835 	}
4836 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4837 
4838 	qcmd->dbuf = buf;
4839 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4840 	    ct->ct_req_size);
4841 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4842 
4843 	mutex_enter(&qlt->req_lock);
4844 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4845 	if (req == NULL) {
4846 		qlt_dmem_free(NULL, buf);
4847 		mutex_exit(&qlt->req_lock);
4848 		return (FCT_BUSY);
4849 	}
4850 	bzero(req, IOCB_SIZE);
4851 	req[0] = 0x29; req[1] = 1;
4852 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4853 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4854 	QMEM_WR16(qlt, (&req[0xC]), 1);
4855 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4856 	QMEM_WR16(qlt, (&req[0x14]), 1);
4857 
4858 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4859 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4860 
4861 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4862 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4863 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4864 	    qcmd->param.resp_offset));		/* RESPONSE DSD */
4865 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4866 
4867 	qlt_submit_req_entries(qlt, 1);
4868 	mutex_exit(&qlt->req_lock);
4869 
4870 	return (FCT_SUCCESS);
4871 }
4872 
4873 
4874 /*
4875  * All QLT_FIRMWARE_* will mainly be handled in this function
4876  * It can not be called in interrupt context
4877  *
4878  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4879  * and qlt_ioctl_lock
4880  */
4881 static fct_status_t
4882 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4883 {
4884 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4885 	int		i;
4886 	int		retries, n;
4887 	uint_t		size_left;
4888 	char		c = ' ';
4889 	uint32_t	addr, endaddr, words_to_read;
4890 	caddr_t		buf;
4891 	fct_status_t	ret;
4892 
4893 	mutex_enter(&qlt->qlt_ioctl_lock);
4894 	/*
4895 	 * To make sure that there's no outstanding dumping task
4896 	 */
4897 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4898 		mutex_exit(&qlt->qlt_ioctl_lock);
4899 		EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4900 		    qlt->qlt_ioctl_flags);
4901 		EL(qlt, "outstanding\n");
4902 		return (FCT_FAILURE);
4903 	}
4904 
4905 	/*
4906 	 * To make sure not to overwrite existing dump
4907 	 */
4908 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4909 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4910 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4911 		/*
4912 		 * If we have alreay one dump, but it's not triggered by user
4913 		 * and the user hasn't fetched it, we shouldn't dump again.
4914 		 */
4915 		mutex_exit(&qlt->qlt_ioctl_lock);
4916 		EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
4917 		    qlt->qlt_ioctl_flags);
4918 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
4919 		    "is one already outstanding.", qlt->instance);
4920 		return (FCT_FAILURE);
4921 	}
4922 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
4923 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
4924 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
4925 	} else {
4926 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
4927 	}
4928 	mutex_exit(&qlt->qlt_ioctl_lock);
4929 
4930 	size_left = QLT_FWDUMP_BUFSIZE;
4931 	if (!qlt->qlt_fwdump_buf) {
4932 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
4933 		/*
4934 		 * It's the only place that we allocate buf for dumping. After
4935 		 * it's allocated, we will use it until the port is detached.
4936 		 */
4937 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
4938 	}
4939 
4940 	/*
4941 	 * Start to dump firmware
4942 	 */
4943 	buf = (caddr_t)qlt->qlt_fwdump_buf;
4944 
4945 	/*
4946 	 * Print the ISP firmware revision number and attributes information
4947 	 * Read the RISC to Host Status register
4948 	 */
4949 	n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
4950 	    "Attributes %04x\n\nR2H Status Register\n%08x",
4951 	    qlt->fw_major, qlt->fw_minor,
4952 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
4953 	buf += n; size_left -= n;
4954 
4955 	/*
4956 	 * Before pausing the RISC, make sure no mailbox can execute
4957 	 */
4958 	mutex_enter(&qlt->mbox_lock);
4959 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
4960 		/*
4961 		 * Wait to grab the mailboxes
4962 		 */
4963 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
4964 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
4965 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
4966 			    ddi_get_lbolt() + drv_usectohz(1000000));
4967 			if (retries > 5) {
4968 				mutex_exit(&qlt->mbox_lock);
4969 				EL(qlt, "can't drain out mailbox commands\n");
4970 				goto dump_fail;
4971 			}
4972 		}
4973 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
4974 		cv_broadcast(&qlt->mbox_cv);
4975 	}
4976 	mutex_exit(&qlt->mbox_lock);
4977 
4978 	/*
4979 	 * Pause the RISC processor
4980 	 */
4981 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4982 
4983 	/*
4984 	 * Wait for the RISC processor to pause
4985 	 */
4986 	for (i = 0; i < 200; i++) {
4987 		if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
4988 			break;
4989 		}
4990 		drv_usecwait(1000);
4991 	}
4992 	if (i == 200) {
4993 		EL(qlt, "can't pause\n");
4994 		return (FCT_FAILURE);
4995 	}
4996 
4997 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
4998 		goto over_25xx_specific_dump;
4999 	}
5000 	n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
5001 	buf += n; size_left -= n;
5002 	REG_WR32(qlt, 0x54, 0x7000);
5003 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5004 	buf += n; size_left -= n;
5005 	REG_WR32(qlt, 0x54, 0x7010);
5006 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5007 	buf += n; size_left -= n;
5008 	REG_WR32(qlt, 0x54, 0x7C00);
5009 
5010 	n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
5011 	buf += n; size_left -= n;
5012 	REG_WR32(qlt, 0xC0, 0x1);
5013 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
5014 	buf += n; size_left -= n;
5015 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
5016 	buf += n; size_left -= n;
5017 	REG_WR32(qlt, 0xC0, 0x0);
5018 
5019 over_25xx_specific_dump:;
5020 	n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
5021 	buf += n; size_left -= n;
5022 	/*
5023 	 * Capture data from 32 regsiters
5024 	 */
5025 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
5026 	buf += n; size_left -= n;
5027 
5028 	/*
5029 	 * Disable interrupts
5030 	 */
5031 	REG_WR32(qlt, 0xc, 0);
5032 
5033 	/*
5034 	 * Shadow registers
5035 	 */
5036 	n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
5037 	buf += n; size_left -= n;
5038 
5039 	REG_WR32(qlt, 0x54, 0xF70);
5040 	addr = 0xb0000000;
5041 	for (i = 0; i < 0xb; i++) {
5042 		if ((!qlt->qlt_25xx_chip) &&
5043 		    (!qlt->qlt_81xx_chip) &&
5044 		    (i >= 7)) {
5045 			break;
5046 		}
5047 		if (i && ((i & 7) == 0)) {
5048 			n = (int)snprintf(buf, size_left, "\n");
5049 			buf += n; size_left -= n;
5050 		}
5051 		REG_WR32(qlt, 0xF0, addr);
5052 		n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
5053 		buf += n; size_left -= n;
5054 		addr += 0x100000;
5055 	}
5056 
5057 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5058 		REG_WR32(qlt, 0x54, 0x10);
5059 		n = (int)snprintf(buf, size_left,
5060 		    "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
5061 		buf += n; size_left -= n;
5062 	}
5063 
5064 	/*
5065 	 * Mailbox registers
5066 	 */
5067 	n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
5068 	buf += n; size_left -= n;
5069 	for (i = 0; i < 32; i += 2) {
5070 		if ((i + 2) & 15) {
5071 			c = ' ';
5072 		} else {
5073 			c = '\n';
5074 		}
5075 		n = (int)snprintf(buf, size_left, "%04x %04x%c",
5076 		    REG_RD16(qlt, 0x80 + (i << 1)),
5077 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
5078 		buf += n; size_left -= n;
5079 	}
5080 
5081 	/*
5082 	 * Transfer sequence registers
5083 	 */
5084 	n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
5085 	buf += n; size_left -= n;
5086 
5087 	REG_WR32(qlt, 0x54, 0xBF00);
5088 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5089 	buf += n; size_left -= n;
5090 	REG_WR32(qlt, 0x54, 0xBF10);
5091 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5092 	buf += n; size_left -= n;
5093 	REG_WR32(qlt, 0x54, 0xBF20);
5094 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5095 	buf += n; size_left -= n;
5096 	REG_WR32(qlt, 0x54, 0xBF30);
5097 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5098 	buf += n; size_left -= n;
5099 	REG_WR32(qlt, 0x54, 0xBF40);
5100 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5101 	buf += n; size_left -= n;
5102 	REG_WR32(qlt, 0x54, 0xBF50);
5103 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5104 	buf += n; size_left -= n;
5105 	REG_WR32(qlt, 0x54, 0xBF60);
5106 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5107 	buf += n; size_left -= n;
5108 	REG_WR32(qlt, 0x54, 0xBF70);
5109 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5110 	buf += n; size_left -= n;
5111 	n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
5112 	buf += n; size_left -= n;
5113 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5114 		REG_WR32(qlt, 0x54, 0xBFC0);
5115 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5116 		buf += n; size_left -= n;
5117 		REG_WR32(qlt, 0x54, 0xBFD0);
5118 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5119 		buf += n; size_left -= n;
5120 	}
5121 	REG_WR32(qlt, 0x54, 0xBFE0);
5122 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5123 	buf += n; size_left -= n;
5124 	n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5125 	buf += n; size_left -= n;
5126 	REG_WR32(qlt, 0x54, 0xBFF0);
5127 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5128 	buf += n; size_left -= n;
5129 
5130 	/*
5131 	 * Receive sequence registers
5132 	 */
5133 	n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
5134 	buf += n; size_left -= n;
5135 	REG_WR32(qlt, 0x54, 0xFF00);
5136 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5137 	buf += n; size_left -= n;
5138 	REG_WR32(qlt, 0x54, 0xFF10);
5139 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5140 	buf += n; size_left -= n;
5141 	REG_WR32(qlt, 0x54, 0xFF20);
5142 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5143 	buf += n; size_left -= n;
5144 	REG_WR32(qlt, 0x54, 0xFF30);
5145 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5146 	buf += n; size_left -= n;
5147 	REG_WR32(qlt, 0x54, 0xFF40);
5148 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5149 	buf += n; size_left -= n;
5150 	REG_WR32(qlt, 0x54, 0xFF50);
5151 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5152 	buf += n; size_left -= n;
5153 	REG_WR32(qlt, 0x54, 0xFF60);
5154 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5155 	buf += n; size_left -= n;
5156 	REG_WR32(qlt, 0x54, 0xFF70);
5157 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5158 	buf += n; size_left -= n;
5159 	n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5160 	buf += n; size_left -= n;
5161 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5162 		REG_WR32(qlt, 0x54, 0xFFC0);
5163 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5164 		buf += n; size_left -= n;
5165 	}
5166 	REG_WR32(qlt, 0x54, 0xFFD0);
5167 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5168 	buf += n; size_left -= n;
5169 	n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5170 	buf += n; size_left -= n;
5171 	REG_WR32(qlt, 0x54, 0xFFE0);
5172 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5173 	buf += n; size_left -= n;
5174 	n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5175 	buf += n; size_left -= n;
5176 	REG_WR32(qlt, 0x54, 0xFFF0);
5177 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5178 	buf += n; size_left -= n;
5179 
5180 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
5181 		goto over_aseq_regs;
5182 
5183 	/*
5184 	 * Auxiliary sequencer registers
5185 	 */
5186 	n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
5187 	buf += n; size_left -= n;
5188 	REG_WR32(qlt, 0x54, 0xB000);
5189 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5190 	buf += n; size_left -= n;
5191 	REG_WR32(qlt, 0x54, 0xB010);
5192 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5193 	buf += n; size_left -= n;
5194 	REG_WR32(qlt, 0x54, 0xB020);
5195 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5196 	buf += n; size_left -= n;
5197 	REG_WR32(qlt, 0x54, 0xB030);
5198 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5199 	buf += n; size_left -= n;
5200 	REG_WR32(qlt, 0x54, 0xB040);
5201 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5202 	buf += n; size_left -= n;
5203 	REG_WR32(qlt, 0x54, 0xB050);
5204 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5205 	buf += n; size_left -= n;
5206 	REG_WR32(qlt, 0x54, 0xB060);
5207 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5208 	buf += n; size_left -= n;
5209 	REG_WR32(qlt, 0x54, 0xB070);
5210 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5211 	buf += n; size_left -= n;
5212 	n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5213 	buf += n; size_left -= n;
5214 	REG_WR32(qlt, 0x54, 0xB0C0);
5215 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5216 	buf += n; size_left -= n;
5217 	REG_WR32(qlt, 0x54, 0xB0D0);
5218 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5219 	buf += n; size_left -= n;
5220 	n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5221 	buf += n; size_left -= n;
5222 	REG_WR32(qlt, 0x54, 0xB0E0);
5223 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5224 	buf += n; size_left -= n;
5225 	n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5226 	buf += n; size_left -= n;
5227 	REG_WR32(qlt, 0x54, 0xB0F0);
5228 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5229 	buf += n; size_left -= n;
5230 
5231 over_aseq_regs:;
5232 
5233 	/*
5234 	 * Command DMA registers
5235 	 */
5236 	n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5237 	buf += n; size_left -= n;
5238 	REG_WR32(qlt, 0x54, 0x7100);
5239 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5240 	buf += n; size_left -= n;
5241 
5242 	/*
5243 	 * Queues
5244 	 */
5245 	n = (int)snprintf(buf, size_left,
5246 	    "\nRequest0 Queue DMA Channel registers\n");
5247 	buf += n; size_left -= n;
5248 	REG_WR32(qlt, 0x54, 0x7200);
5249 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5250 	buf += n; size_left -= n;
5251 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5252 	buf += n; size_left -= n;
5253 
5254 	n = (int)snprintf(buf, size_left,
5255 	    "\n\nResponse0 Queue DMA Channel registers\n");
5256 	buf += n; size_left -= n;
5257 	REG_WR32(qlt, 0x54, 0x7300);
5258 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5259 	buf += n; size_left -= n;
5260 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5261 	buf += n; size_left -= n;
5262 
5263 	n = (int)snprintf(buf, size_left,
5264 	    "\n\nRequest1 Queue DMA Channel registers\n");
5265 	buf += n; size_left -= n;
5266 	REG_WR32(qlt, 0x54, 0x7400);
5267 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5268 	buf += n; size_left -= n;
5269 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5270 	buf += n; size_left -= n;
5271 
5272 	/*
5273 	 * Transmit DMA registers
5274 	 */
5275 	n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
5276 	buf += n; size_left -= n;
5277 	REG_WR32(qlt, 0x54, 0x7600);
5278 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5279 	buf += n; size_left -= n;
5280 	REG_WR32(qlt, 0x54, 0x7610);
5281 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5282 	buf += n; size_left -= n;
5283 	n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
5284 	buf += n; size_left -= n;
5285 	REG_WR32(qlt, 0x54, 0x7620);
5286 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5287 	buf += n; size_left -= n;
5288 	REG_WR32(qlt, 0x54, 0x7630);
5289 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5290 	buf += n; size_left -= n;
5291 	n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
5292 	buf += n; size_left -= n;
5293 	REG_WR32(qlt, 0x54, 0x7640);
5294 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5295 	buf += n; size_left -= n;
5296 	REG_WR32(qlt, 0x54, 0x7650);
5297 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5298 	buf += n; size_left -= n;
5299 	n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
5300 	buf += n; size_left -= n;
5301 	REG_WR32(qlt, 0x54, 0x7660);
5302 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5303 	buf += n; size_left -= n;
5304 	REG_WR32(qlt, 0x54, 0x7670);
5305 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5306 	buf += n; size_left -= n;
5307 	n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
5308 	buf += n; size_left -= n;
5309 	REG_WR32(qlt, 0x54, 0x7680);
5310 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5311 	buf += n; size_left -= n;
5312 	REG_WR32(qlt, 0x54, 0x7690);
5313 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5314 	buf += n; size_left -= n;
5315 	n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
5316 	buf += n; size_left -= n;
5317 	REG_WR32(qlt, 0x54, 0x76A0);
5318 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5319 	buf += n; size_left -= n;
5320 
5321 	/*
5322 	 * Receive DMA registers
5323 	 */
5324 	n = (int)snprintf(buf, size_left,
5325 	    "\nRCV Thread 0 Data DMA registers\n");
5326 	buf += n; size_left -= n;
5327 	REG_WR32(qlt, 0x54, 0x7700);
5328 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5329 	buf += n; size_left -= n;
5330 	REG_WR32(qlt, 0x54, 0x7710);
5331 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5332 	buf += n; size_left -= n;
5333 	n = (int)snprintf(buf, size_left,
5334 	    "\nRCV Thread 1 Data DMA registers\n");
5335 	buf += n; size_left -= n;
5336 	REG_WR32(qlt, 0x54, 0x7720);
5337 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5338 	buf += n; size_left -= n;
5339 	REG_WR32(qlt, 0x54, 0x7730);
5340 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5341 	buf += n; size_left -= n;
5342 
5343 	/*
5344 	 * RISC registers
5345 	 */
5346 	n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
5347 	buf += n; size_left -= n;
5348 	REG_WR32(qlt, 0x54, 0x0F00);
5349 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5350 	buf += n; size_left -= n;
5351 	REG_WR32(qlt, 0x54, 0x0F10);
5352 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5353 	buf += n; size_left -= n;
5354 	REG_WR32(qlt, 0x54, 0x0F20);
5355 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5356 	buf += n; size_left -= n;
5357 	REG_WR32(qlt, 0x54, 0x0F30);
5358 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5359 	buf += n; size_left -= n;
5360 	REG_WR32(qlt, 0x54, 0x0F40);
5361 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5362 	buf += n; size_left -= n;
5363 	REG_WR32(qlt, 0x54, 0x0F50);
5364 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5365 	buf += n; size_left -= n;
5366 	REG_WR32(qlt, 0x54, 0x0F60);
5367 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5368 	buf += n; size_left -= n;
5369 	REG_WR32(qlt, 0x54, 0x0F70);
5370 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5371 	buf += n; size_left -= n;
5372 
5373 	/*
5374 	 * Local memory controller registers
5375 	 */
5376 	n = (int)snprintf(buf, size_left, "\nLMC registers\n");
5377 	buf += n; size_left -= n;
5378 	REG_WR32(qlt, 0x54, 0x3000);
5379 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5380 	buf += n; size_left -= n;
5381 	REG_WR32(qlt, 0x54, 0x3010);
5382 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5383 	buf += n; size_left -= n;
5384 	REG_WR32(qlt, 0x54, 0x3020);
5385 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5386 	buf += n; size_left -= n;
5387 	REG_WR32(qlt, 0x54, 0x3030);
5388 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5389 	buf += n; size_left -= n;
5390 	REG_WR32(qlt, 0x54, 0x3040);
5391 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5392 	buf += n; size_left -= n;
5393 	REG_WR32(qlt, 0x54, 0x3050);
5394 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5395 	buf += n; size_left -= n;
5396 	REG_WR32(qlt, 0x54, 0x3060);
5397 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5398 	buf += n; size_left -= n;
5399 
5400 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5401 		REG_WR32(qlt, 0x54, 0x3070);
5402 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5403 		buf += n; size_left -= n;
5404 	}
5405 
5406 	/*
5407 	 * Fibre protocol module regsiters
5408 	 */
5409 	n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5410 	buf += n; size_left -= n;
5411 	REG_WR32(qlt, 0x54, 0x4000);
5412 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5413 	buf += n; size_left -= n;
5414 	REG_WR32(qlt, 0x54, 0x4010);
5415 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5416 	buf += n; size_left -= n;
5417 	REG_WR32(qlt, 0x54, 0x4020);
5418 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5419 	buf += n; size_left -= n;
5420 	REG_WR32(qlt, 0x54, 0x4030);
5421 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5422 	buf += n; size_left -= n;
5423 	REG_WR32(qlt, 0x54, 0x4040);
5424 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5425 	buf += n; size_left -= n;
5426 	REG_WR32(qlt, 0x54, 0x4050);
5427 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5428 	buf += n; size_left -= n;
5429 	REG_WR32(qlt, 0x54, 0x4060);
5430 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5431 	buf += n; size_left -= n;
5432 	REG_WR32(qlt, 0x54, 0x4070);
5433 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5434 	buf += n; size_left -= n;
5435 	REG_WR32(qlt, 0x54, 0x4080);
5436 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5437 	buf += n; size_left -= n;
5438 	REG_WR32(qlt, 0x54, 0x4090);
5439 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5440 	buf += n; size_left -= n;
5441 	REG_WR32(qlt, 0x54, 0x40A0);
5442 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5443 	buf += n; size_left -= n;
5444 	REG_WR32(qlt, 0x54, 0x40B0);
5445 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5446 	buf += n; size_left -= n;
5447 	if (qlt->qlt_81xx_chip) {
5448 		REG_WR32(qlt, 0x54, 0x40C0);
5449 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5450 		buf += n; size_left -= n;
5451 		REG_WR32(qlt, 0x54, 0x40D0);
5452 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5453 		buf += n; size_left -= n;
5454 	}
5455 
5456 	/*
5457 	 * Fibre buffer registers
5458 	 */
5459 	n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5460 	buf += n; size_left -= n;
5461 	REG_WR32(qlt, 0x54, 0x6000);
5462 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5463 	buf += n; size_left -= n;
5464 	REG_WR32(qlt, 0x54, 0x6010);
5465 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5466 	buf += n; size_left -= n;
5467 	REG_WR32(qlt, 0x54, 0x6020);
5468 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5469 	buf += n; size_left -= n;
5470 	REG_WR32(qlt, 0x54, 0x6030);
5471 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5472 	buf += n; size_left -= n;
5473 	REG_WR32(qlt, 0x54, 0x6040);
5474 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5475 	buf += n; size_left -= n;
5476 	REG_WR32(qlt, 0x54, 0x6100);
5477 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5478 	buf += n; size_left -= n;
5479 	REG_WR32(qlt, 0x54, 0x6130);
5480 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5481 	buf += n; size_left -= n;
5482 	REG_WR32(qlt, 0x54, 0x6150);
5483 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5484 	buf += n; size_left -= n;
5485 	REG_WR32(qlt, 0x54, 0x6170);
5486 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5487 	buf += n; size_left -= n;
5488 	REG_WR32(qlt, 0x54, 0x6190);
5489 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5490 	buf += n; size_left -= n;
5491 	REG_WR32(qlt, 0x54, 0x61B0);
5492 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5493 	buf += n; size_left -= n;
5494 	if (qlt->qlt_81xx_chip) {
5495 		REG_WR32(qlt, 0x54, 0x61C0);
5496 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5497 		buf += n; size_left -= n;
5498 	}
5499 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5500 		REG_WR32(qlt, 0x54, 0x6F00);
5501 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5502 		buf += n; size_left -= n;
5503 	}
5504 
5505 	qlt->intr_sneak_counter = 10;
5506 	mutex_enter(&qlt->intr_lock);
5507 	(void) qlt_reset_chip(qlt);
5508 	drv_usecwait(20);
5509 	qlt->intr_sneak_counter = 0;
5510 	mutex_exit(&qlt->intr_lock);
5511 
5512 	/*
5513 	 * Memory
5514 	 */
5515 	n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5516 	buf += n; size_left -= n;
5517 
5518 	addr = 0x20000;
5519 	endaddr = 0x22000;
5520 	words_to_read = 0;
5521 	while (addr < endaddr) {
5522 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5523 		if ((words_to_read + addr) > endaddr) {
5524 			words_to_read = endaddr - addr;
5525 		}
5526 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5527 		    QLT_SUCCESS) {
5528 			EL(qlt, "Error reading risc ram - CODE RAM status="
5529 			    "%llxh\n", ret);
5530 			goto dump_fail;
5531 		}
5532 
5533 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5534 		buf += n; size_left -= n;
5535 
5536 		if (size_left < 100000) {
5537 			EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5538 			    size_left);
5539 			goto dump_ok;
5540 		}
5541 		addr += words_to_read;
5542 	}
5543 
5544 	n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
5545 	buf += n; size_left -= n;
5546 
5547 	addr = 0x100000;
5548 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5549 	endaddr++;
5550 	if (endaddr & 7) {
5551 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
5552 	}
5553 
5554 	words_to_read = 0;
5555 	while (addr < endaddr) {
5556 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5557 		if ((words_to_read + addr) > endaddr) {
5558 			words_to_read = endaddr - addr;
5559 		}
5560 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5561 		    QLT_SUCCESS) {
5562 			EL(qlt, "Error reading risc ram - EXT RAM status="
5563 			    "%llxh\n", ret);
5564 			goto dump_fail;
5565 		}
5566 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5567 		buf += n; size_left -= n;
5568 		if (size_left < 100000) {
5569 			EL(qlt, "run out of space - EXT RAM\n");
5570 			goto dump_ok;
5571 		}
5572 		addr += words_to_read;
5573 	}
5574 
5575 	/*
5576 	 * Label the end tag
5577 	 */
5578 	n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5579 	buf += n; size_left -= n;
5580 
5581 	/*
5582 	 * Queue dumping
5583 	 */
5584 	n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5585 	buf += n; size_left -= n;
5586 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5587 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
5588 	buf += n; size_left -= n;
5589 
5590 	n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5591 	buf += n; size_left -= n;
5592 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5593 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
5594 	buf += n; size_left -= n;
5595 
5596 	n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5597 	buf += n; size_left -= n;
5598 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5599 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5600 	buf += n; size_left -= n;
5601 
5602 	n = (int)snprintf(buf, size_left, "\nATIO queue\n");
5603 	buf += n; size_left -= n;
5604 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5605 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5606 	buf += n; size_left -= n;
5607 
5608 	/*
5609 	 * Label dump reason
5610 	 */
5611 	n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5612 	    qlt->qlt_port_alias, ssci->st_additional_info);
5613 	buf += n; size_left -= n;
5614 
5615 dump_ok:
5616 	EL(qlt, "left-%d\n", size_left);
5617 
5618 	mutex_enter(&qlt->qlt_ioctl_lock);
5619 	qlt->qlt_ioctl_flags &=
5620 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5621 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5622 	mutex_exit(&qlt->qlt_ioctl_lock);
5623 	return (FCT_SUCCESS);
5624 
5625 dump_fail:
5626 	EL(qlt, "dump not done\n");
5627 	mutex_enter(&qlt->qlt_ioctl_lock);
5628 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5629 	mutex_exit(&qlt->qlt_ioctl_lock);
5630 	return (FCT_FAILURE);
5631 }
5632 
5633 static int
5634 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5635     uint_t size_left)
5636 {
5637 	int		i;
5638 	int		n;
5639 	char		c = ' ';
5640 
5641 	for (i = 0, n = 0; i < count; i++) {
5642 		if ((i + 1) & 7) {
5643 			c = ' ';
5644 		} else {
5645 			c = '\n';
5646 		}
5647 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5648 		    "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
5649 	}
5650 	return (n);
5651 }
5652 
5653 static int
5654 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5655     caddr_t buf, uint_t size_left)
5656 {
5657 	int		i;
5658 	int		n;
5659 	char		c = ' ';
5660 	uint32_t	*ptr;
5661 
5662 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5663 	for (i = 0, n = 0; i < words; i++) {
5664 		if ((i & 7) == 0) {
5665 			n = (int)(n + (int)snprintf(&buf[n],
5666 			    (uint_t)(size_left - n), "%08x: ", addr + i));
5667 		}
5668 		if ((i + 1) & 7) {
5669 			c = ' ';
5670 		} else {
5671 			c = '\n';
5672 		}
5673 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5674 		    "%08x%c", ptr[i], c));
5675 	}
5676 	return (n);
5677 }
5678 
5679 static int
5680 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5681     uint_t size_left)
5682 {
5683 	int		i;
5684 	int		n;
5685 	char		c = ' ';
5686 	int		words;
5687 	uint16_t	*ptr;
5688 	uint16_t	w;
5689 
5690 	words = entries * 32;
5691 	ptr = (uint16_t *)qadr;
5692 	for (i = 0, n = 0; i < words; i++) {
5693 		if ((i & 7) == 0) {
5694 			n = (int)(n + (int)snprintf(&buf[n],
5695 			    (uint_t)(size_left - n), "%05x: ", i));
5696 		}
5697 		if ((i + 1) & 7) {
5698 			c = ' ';
5699 		} else {
5700 			c = '\n';
5701 		}
5702 		w = QMEM_RD16(qlt, &ptr[i]);
5703 		n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
5704 		    w, c));
5705 	}
5706 	return (n);
5707 }
5708 
5709 /*
5710  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5711  * mailbox ram is available.
5712  * Copy data from RISC RAM to system memory
5713  */
5714 static fct_status_t
5715 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5716 {
5717 	uint64_t	da;
5718 	fct_status_t	ret;
5719 
5720 	REG_WR16(qlt, REG_MBOX(0), MBC_DUMP_RAM_EXTENDED);
5721 	da = qlt->queue_mem_cookie.dmac_laddress;
5722 	da += MBOX_DMA_MEM_OFFSET;
5723 
5724 	/* System destination address */
5725 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
5726 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
5727 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
5728 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
5729 
5730 	/* Length */
5731 	REG_WR16(qlt, REG_MBOX(5), LSW(words));
5732 	REG_WR16(qlt, REG_MBOX(4), MSW(words));
5733 
5734 	/* RISC source address */
5735 	REG_WR16(qlt, REG_MBOX(1), LSW(addr));
5736 	REG_WR16(qlt, REG_MBOX(8), MSW(addr));
5737 
5738 	ret = qlt_raw_mailbox_command(qlt);
5739 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
5740 	if (ret == QLT_SUCCESS) {
5741 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5742 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5743 	} else {
5744 		EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
5745 	}
5746 	return (ret);
5747 }
5748 
5749 static void
5750 qlt_verify_fw(qlt_state_t *qlt)
5751 {
5752 	caddr_t req;
5753 	/* Just put it on the request queue */
5754 	mutex_enter(&qlt->req_lock);
5755 	req = qlt_get_req_entries(qlt, 1);
5756 	if (req == NULL) {
5757 		mutex_exit(&qlt->req_lock);
5758 		/* XXX handle this */
5759 		return;
5760 	}
5761 
5762 	bzero(req, IOCB_SIZE);
5763 
5764 	req[0] = 0x1b;
5765 	req[1] = 1;
5766 
5767 	QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5768 	QMEM_WR16(qlt, (&req[0x8]), 1);    /*  options - don't update */
5769 	QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5770 
5771 	qlt_submit_req_entries(qlt, 1);
5772 	mutex_exit(&qlt->req_lock);
5773 }
5774 
5775 static void
5776 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5777 {
5778 	uint16_t	status;
5779 	char		info[80];
5780 
5781 	status = QMEM_RD16(qlt, rsp+8);
5782 	if (status != 0) {
5783 		(void) snprintf(info, 80, "qlt_handle_verify_fw_completion: "
5784 		    "status:%x, rsp:%p", status, (void *)rsp);
5785 		if (status == 3) {
5786 			uint16_t error_code;
5787 
5788 			error_code = QMEM_RD16(qlt, rsp+0xA);
5789 			(void) snprintf(info, 80, "qlt_handle_verify_fw_"
5790 			    "completion: error code:%x", error_code);
5791 		}
5792 	}
5793 }
5794 
5795 /*
5796  * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5797  *
5798  * Input:	Pointer to the adapter state structure.
5799  * Returns:	Success or Failure.
5800  * Context:	Kernel context.
5801  */
5802 static int
5803 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5804 {
5805 	int	rval = DDI_SUCCESS;
5806 
5807 	qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5808 	    kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
5809 
5810 	if (qlt->el_trace_desc == NULL) {
5811 		cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5812 		    qlt->instance);
5813 		rval = DDI_FAILURE;
5814 	} else {
5815 		qlt->el_trace_desc->next = 0;
5816 		qlt->el_trace_desc->trace_buffer =
5817 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
5818 
5819 		if (qlt->el_trace_desc->trace_buffer == NULL) {
5820 			cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5821 			    qlt->instance);
5822 			kmem_free(qlt->el_trace_desc,
5823 			    sizeof (qlt_el_trace_desc_t));
5824 			qlt->el_trace_desc = NULL;
5825 			rval = DDI_FAILURE;
5826 		} else {
5827 			qlt->el_trace_desc->trace_buffer_size =
5828 			    EL_TRACE_BUF_SIZE;
5829 			mutex_init(&qlt->el_trace_desc->mutex, NULL,
5830 			    MUTEX_DRIVER, NULL);
5831 		}
5832 	}
5833 
5834 	return (rval);
5835 }
5836 
5837 /*
5838  * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5839  *
5840  * Input:	Pointer to the adapter state structure.
5841  * Returns:	Success or Failure.
5842  * Context:	Kernel context.
5843  */
5844 static int
5845 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5846 {
5847 	int	rval = DDI_SUCCESS;
5848 
5849 	if (qlt->el_trace_desc == NULL) {
5850 		cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5851 		    qlt->instance);
5852 		rval = DDI_FAILURE;
5853 	} else {
5854 		if (qlt->el_trace_desc->trace_buffer != NULL) {
5855 			kmem_free(qlt->el_trace_desc->trace_buffer,
5856 			    qlt->el_trace_desc->trace_buffer_size);
5857 		}
5858 		mutex_destroy(&qlt->el_trace_desc->mutex);
5859 		kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5860 		qlt->el_trace_desc = NULL;
5861 	}
5862 
5863 	return (rval);
5864 }
5865 
5866 /*
5867  * qlt_el_msg
5868  *	Extended logging message
5869  *
5870  * Input:
5871  *	qlt:	adapter state pointer.
5872  *	fn:	function name.
5873  *	ce:	level
5874  *	...:	Variable argument list.
5875  *
5876  * Context:
5877  *	Kernel/Interrupt context.
5878  */
5879 void
5880 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5881 {
5882 	char		*s, *fmt = 0, *fmt1 = 0;
5883 	char		fmt2[EL_BUFFER_RESERVE];
5884 	int		rval, tmp;
5885 	int		tracing = 0;
5886 	va_list		vl;
5887 
5888 	/* Tracing is the default but it can be disabled. */
5889 	if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5890 		tracing = 1;
5891 
5892 		mutex_enter(&qlt->el_trace_desc->mutex);
5893 
5894 		/*
5895 		 * Ensure enough space for the string. Wrap to
5896 		 * start when default message allocation size
5897 		 * would overrun the end.
5898 		 */
5899 		if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5900 		    qlt->el_trace_desc->trace_buffer_size) {
5901 			fmt = qlt->el_trace_desc->trace_buffer;
5902 			qlt->el_trace_desc->next = 0;
5903 		} else {
5904 			fmt = qlt->el_trace_desc->trace_buffer +
5905 			    qlt->el_trace_desc->next;
5906 		}
5907 	}
5908 
5909 	/* if no buffer use the stack */
5910 	if (fmt == NULL) {
5911 		fmt = fmt2;
5912 	}
5913 
5914 	va_start(vl, ce);
5915 
5916 	s = va_arg(vl, char *);
5917 
5918 	rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
5919 	    "QEL qlt(%d): %s, ", qlt->instance, fn);
5920 	fmt1 = fmt + rval;
5921 	tmp = (int)vsnprintf(fmt1,
5922 	    (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
5923 	rval += tmp;
5924 
5925 	/*
5926 	 * Calculate the offset where the next message will go,
5927 	 * skipping the NULL.
5928 	 */
5929 	if (tracing) {
5930 		uint16_t next = (uint16_t)(rval += 1);
5931 		qlt->el_trace_desc->next += next;
5932 		mutex_exit(&qlt->el_trace_desc->mutex);
5933 	}
5934 
5935 	if (enable_extended_logging) {
5936 		cmn_err(ce, fmt);
5937 	}
5938 
5939 	va_end(vl);
5940 }
5941 
5942 /*
5943  * qlt_dump_el_trace_buffer
5944  *	 Outputs extended logging trace buffer.
5945  *
5946  * Input:
5947  *	qlt:	adapter state pointer.
5948  */
5949 void
5950 qlt_dump_el_trace_buffer(qlt_state_t *qlt)
5951 {
5952 	char		*dump_start = NULL;
5953 	char		*dump_current = NULL;
5954 	char		*trace_start;
5955 	char		*trace_end;
5956 	int		wrapped = 0;
5957 	int		rval;
5958 
5959 	mutex_enter(&qlt->el_trace_desc->mutex);
5960 
5961 	rval = qlt_validate_trace_desc(qlt);
5962 	if (rval != NULL) {
5963 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
5964 		    qlt->instance);
5965 	} else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
5966 		dump_current = dump_start;
5967 		trace_start = qlt->el_trace_desc->trace_buffer;
5968 		trace_end = trace_start +
5969 		    qlt->el_trace_desc->trace_buffer_size;
5970 
5971 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
5972 		    qlt->instance,
5973 		    (void *)dump_start, (void *)trace_start);
5974 
5975 		while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
5976 		    (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
5977 			/* Show it... */
5978 			cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
5979 			    dump_current);
5980 			/* Make the next the current */
5981 			dump_current += (strlen(dump_current) + 1);
5982 			/* check for wrap */
5983 			if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
5984 				dump_current = trace_start;
5985 				wrapped = 1;
5986 			} else if (wrapped) {
5987 				/* Don't go past next. */
5988 				if ((trace_start + qlt->el_trace_desc->next) <=
5989 				    dump_current) {
5990 					break;
5991 				}
5992 			} else if (*dump_current == NULL) {
5993 				break;
5994 			}
5995 		}
5996 	}
5997 	mutex_exit(&qlt->el_trace_desc->mutex);
5998 }
5999 
6000 /*
6001  * qlt_validate_trace_desc
6002  *	 Ensures the extended logging trace descriptor is good.
6003  *
6004  * Input:
6005  *	qlt:	adapter state pointer.
6006  *
6007  * Returns:
6008  *	ql local function return status code.
6009  */
6010 static int
6011 qlt_validate_trace_desc(qlt_state_t *qlt)
6012 {
6013 	int	rval = DDI_SUCCESS;
6014 
6015 	if (qlt->el_trace_desc == NULL) {
6016 		rval = DDI_FAILURE;
6017 	} else if (qlt->el_trace_desc->trace_buffer == NULL) {
6018 		rval = DDI_FAILURE;
6019 	}
6020 	return (rval);
6021 }
6022 
6023 /*
6024  * qlt_find_trace_start
6025  *	 Locate the oldest extended logging trace entry.
6026  *
6027  * Input:
6028  *	qlt:	adapter state pointer.
6029  *
6030  * Returns:
6031  *	Pointer to a string.
6032  *
6033  * Context:
6034  *	Kernel/Interrupt context.
6035  */
6036 static char *
6037 qlt_find_trace_start(qlt_state_t *qlt)
6038 {
6039 	char	*trace_start = 0;
6040 	char	*trace_next  = 0;
6041 
6042 	trace_next = qlt->el_trace_desc->trace_buffer +
6043 	    qlt->el_trace_desc->next;
6044 
6045 	/*
6046 	 * If the buffer has not wrapped next will point at a null so
6047 	 * start is the beginning of the buffer.  If next points at a char
6048 	 * then we must traverse the buffer until a null is detected and
6049 	 * that will be the beginning of the oldest whole object in the buffer
6050 	 * which is the start.
6051 	 */
6052 
6053 	if ((trace_next + EL_BUFFER_RESERVE) >=
6054 	    (qlt->el_trace_desc->trace_buffer +
6055 	    qlt->el_trace_desc->trace_buffer_size)) {
6056 		trace_start = qlt->el_trace_desc->trace_buffer;
6057 	} else if (*trace_next != NULL) {
6058 		trace_start = trace_next + (strlen(trace_next) + 1);
6059 	} else {
6060 		trace_start = qlt->el_trace_desc->trace_buffer;
6061 	}
6062 	return (trace_start);
6063 }
6064 
6065 
6066 static int
6067 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
6068 {
6069 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
6070 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
6071 }
6072 
6073 static int
6074 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6075 {
6076 	return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
6077 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, prop_val));
6078 }
6079 
6080 static int
6081 qlt_read_int_instance_prop(qlt_state_t *qlt, char *prop, int defval)
6082 {
6083 	char		inst_prop[256];
6084 	int		val;
6085 
6086 	/*
6087 	 * Get adapter instance specific parameters. If the instance
6088 	 * specific parameter isn't there, try the global parameter.
6089 	 */
6090 
6091 	(void) sprintf(inst_prop, "hba%d-%s", qlt->instance, prop);
6092 
6093 	if ((val = qlt_read_int_prop(qlt, inst_prop, defval)) == defval) {
6094 		val = qlt_read_int_prop(qlt, prop, defval);
6095 	}
6096 
6097 	return (val);
6098 }
6099 
6100 static int
6101 qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6102 {
6103 	char		instance_prop[256];
6104 
6105 	/* Get adapter instance specific parameter. */
6106 	(void) sprintf(instance_prop, "hba%d-%s", qlt->instance, prop);
6107 	return (qlt_read_string_prop(qlt, instance_prop, prop_val));
6108 }
6109 
6110 static int
6111 qlt_convert_string_to_ull(char *prop, int radix,
6112     u_longlong_t *result)
6113 {
6114 	return (ddi_strtoull((const char *)prop, 0, radix, result));
6115 }
6116 
6117 static boolean_t
6118 qlt_wwn_overload_prop(qlt_state_t *qlt)
6119 {
6120 	char		*prop_val = 0;
6121 	int		rval;
6122 	int		radix;
6123 	u_longlong_t	wwnn = 0, wwpn = 0;
6124 	boolean_t	overloaded = FALSE;
6125 
6126 	radix = 16;
6127 
6128 	rval = qlt_read_string_instance_prop(qlt, "adapter-wwnn", &prop_val);
6129 	if (rval == DDI_PROP_SUCCESS) {
6130 		rval = qlt_convert_string_to_ull(prop_val, radix, &wwnn);
6131 	}
6132 	if (rval == DDI_PROP_SUCCESS) {
6133 		rval = qlt_read_string_instance_prop(qlt, "adapter-wwpn",
6134 		    &prop_val);
6135 		if (rval == DDI_PROP_SUCCESS) {
6136 			rval = qlt_convert_string_to_ull(prop_val, radix,
6137 			    &wwpn);
6138 		}
6139 	}
6140 	if (rval == DDI_PROP_SUCCESS) {
6141 		overloaded = TRUE;
6142 		/* Overload the current node/port name nvram copy */
6143 		bcopy((char *)&wwnn, qlt->nvram->node_name, 8);
6144 		BIG_ENDIAN_64(qlt->nvram->node_name);
6145 		bcopy((char *)&wwpn, qlt->nvram->port_name, 8);
6146 		BIG_ENDIAN_64(qlt->nvram->port_name);
6147 	}
6148 	return (overloaded);
6149 }
6150 
6151 /*
6152  * prop_text - Return a pointer to a string describing the status
6153  *
6154  * Input:	prop_status = the return status from a property function.
6155  * Returns:	pointer to a string.
6156  * Context:	Kernel context.
6157  */
6158 char *
6159 prop_text(int prop_status)
6160 {
6161 	string_table_t *entry = &prop_status_tbl[0];
6162 
6163 	return (value2string(entry, prop_status, 0xFFFF));
6164 }
6165 
6166 /*
6167  * value2string	Return a pointer to a string associated with the value
6168  *
6169  * Input:	entry = the value to string table
6170  *		value = the value
6171  * Returns:	pointer to a string.
6172  * Context:	Kernel context.
6173  */
6174 char *
6175 value2string(string_table_t *entry, int value, int delimiter)
6176 {
6177 	for (; entry->value != delimiter; entry++) {
6178 		if (entry->value == value) {
6179 			break;
6180 		}
6181 	}
6182 	return (entry->string);
6183 }
6184 
6185 /*
6186  * qlt_chg_endian Change endianess of byte array.
6187  *
6188  * Input:	buf = array pointer.
6189  *		size = size of array in bytes.
6190  *
6191  * Context:	Interrupt or Kernel context.
6192  */
6193 void
6194 qlt_chg_endian(uint8_t buf[], size_t size)
6195 {
6196 	uint8_t byte;
6197 	size_t  cnt1;
6198 	size_t  cnt;
6199 
6200 	cnt1 = size - 1;
6201 	for (cnt = 0; cnt < size / 2; cnt++) {
6202 		byte = buf[cnt1];
6203 		buf[cnt1] = buf[cnt];
6204 		buf[cnt] = byte;
6205 		cnt1--;
6206 	}
6207 }
6208 
6209 /*
6210  * ql_mps_reset
6211  *	Reset MPS for FCoE functions.
6212  *
6213  * Input:
6214  *	ha = virtual adapter state pointer.
6215  *
6216  * Context:
6217  *	Kernel context.
6218  */
6219 static void
6220 qlt_mps_reset(qlt_state_t *qlt)
6221 {
6222 	uint32_t	data, dctl = 1000;
6223 
6224 	do {
6225 		if (dctl-- == 0 || qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 1) !=
6226 		    QLT_SUCCESS) {
6227 			return;
6228 		}
6229 		if (qlt_raw_rd_risc_ram_word(qlt, 0x7c00, &data) !=
6230 		    QLT_SUCCESS) {
6231 			qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6232 			return;
6233 		}
6234 	} while (!(data & BIT_0));
6235 
6236 	if (qlt_raw_rd_risc_ram_word(qlt, 0x7A15, &data) == QLT_SUCCESS) {
6237 		dctl = (uint16_t)PCICFG_RD16(qlt, 0x54);
6238 		if ((data & 0xe0) != (dctl & 0xe0)) {
6239 			data &= 0xff1f;
6240 			data |= dctl & 0xe0;
6241 			qlt_raw_wrt_risc_ram_word(qlt, 0x7A15, data);
6242 		}
6243 	}
6244 	qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6245 }
6246 
6247 /*
6248  * qlt_raw_wrt_risc_ram_word
6249  *	Write RISC RAM word.
6250  *
6251  * Input:	qlt:		adapter state pointer.
6252  *		risc_address:	risc ram word address.
6253  *		data:		data.
6254  *
6255  * Returns:	qlt local function return status code.
6256  *
6257  * Context:	Kernel context.
6258  */
6259 static fct_status_t
6260 qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6261     uint32_t data)
6262 {
6263 	fct_status_t	ret;
6264 
6265 	REG_WR16(qlt, REG_MBOX(0), MBC_WRITE_RAM_EXTENDED);
6266 	REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6267 	REG_WR16(qlt, REG_MBOX(2), LSW(data));
6268 	REG_WR16(qlt, REG_MBOX(3), MSW(data));
6269 	REG_WR16(qlt, REG_MBOX(8), MSW(risc_address));
6270 	ret = qlt_raw_mailbox_command(qlt);
6271 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6272 	if (ret != QLT_SUCCESS) {
6273 		EL(qlt, "qlt_raw_mailbox_command=MBC_WRITE_RAM_EXTENDED status"
6274 		    "=%llxh\n", ret);
6275 	}
6276 	return (ret);
6277 }
6278 
6279 /*
6280  * ql_raw_rd_risc_ram_word
6281  *	Read RISC RAM word.
6282  *
6283  * Input:	qlt:		adapter state pointer.
6284  *		risc_address:	risc ram word address.
6285  *		data:		data pointer.
6286  *
6287  * Returns:	ql local function return status code.
6288  *
6289  * Context:	Kernel context.
6290  */
6291 static fct_status_t
6292 qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6293     uint32_t *data)
6294 {
6295 	fct_status_t	ret;
6296 
6297 	REG_WR16(qlt, REG_MBOX(0), MBC_READ_RAM_EXTENDED);
6298 	REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6299 	REG_WR16(qlt, REG_MBOX(2), MSW(risc_address));
6300 	ret = qlt_raw_mailbox_command(qlt);
6301 	*data = REG_RD16(qlt, REG_MBOX(2));
6302 	*data |= (REG_RD16(qlt, REG_MBOX(3)) << 16);
6303 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6304 	if (ret != QLT_SUCCESS) {
6305 		EL(qlt, "qlt_raw_mailbox_command=MBC_READ_RAM_EXTENDED status"
6306 		    "=%llxh\n", ret);
6307 	}
6308 	return (ret);
6309 }
6310 
6311 static void
6312 qlt_properties(qlt_state_t *qlt)
6313 {
6314 	int32_t		cnt = 0;
6315 	int32_t		defval = 0xffff;
6316 
6317 	if (qlt_wwn_overload_prop(qlt) == TRUE) {
6318 		EL(qlt, "wwnn overloaded.\n");
6319 	}
6320 
6321 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt2k", defval)) !=
6322 	    defval) {
6323 		qlt->qlt_bucketcnt[0] = cnt;
6324 		EL(qlt, "2k bucket o/l=%d\n", cnt);
6325 	}
6326 
6327 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt8k", defval)) !=
6328 	    defval) {
6329 		qlt->qlt_bucketcnt[1] = cnt;
6330 		EL(qlt, "8k bucket o/l=%d\n", cnt);
6331 	}
6332 
6333 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt64k", defval)) !=
6334 	    defval) {
6335 		qlt->qlt_bucketcnt[2] = cnt;
6336 		EL(qlt, "64k bucket o/l=%d\n", cnt);
6337 	}
6338 
6339 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt128k", defval)) !=
6340 	    defval) {
6341 		qlt->qlt_bucketcnt[3] = cnt;
6342 		EL(qlt, "128k bucket o/l=%d\n", cnt);
6343 	}
6344 
6345 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt256", defval)) !=
6346 	    defval) {
6347 		qlt->qlt_bucketcnt[4] = cnt;
6348 		EL(qlt, "256k bucket o/l=%d\n", cnt);
6349 	}
6350 }
6351