1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/ddi.h>
28 #include <sys/stat.h>
29 #include <sys/pci.h>
30 #include <sys/sunddi.h>
31 #include <sys/modctl.h>
32 #include <sys/file.h>
33 #include <sys/cred.h>
34 #include <sys/byteorder.h>
35 #include <sys/atomic.h>
36 #include <sys/scsi/scsi.h>
37 
38 #include <stmf_defines.h>
39 #include <fct_defines.h>
40 #include <stmf.h>
41 #include <portif.h>
42 #include <fct.h>
43 #include <qlt.h>
44 #include <qlt_dma.h>
45 #include <qlt_ioctl.h>
46 #include <stmf_ioctl.h>
47 
48 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
49 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
50 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
51     int reset_only);
52 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
53     uint32_t word_count, uint32_t risc_addr);
54 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
55 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
56 					uint32_t dma_size);
57 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
58 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
59 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
60 static fct_status_t qlt_initialize_adapter(fct_local_port_t *port);
61 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
62     stmf_state_change_info_t *ssci);
63 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
64 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
65 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
66 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
67 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
68 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
69 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
70 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
71     uint8_t *rsp);
72 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
73 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
74 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
75 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
76     int reset_only);
77 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
78     uint32_t word_count, uint32_t risc_addr);
79 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
80 fct_status_t qlt_port_start(caddr_t arg);
81 fct_status_t qlt_port_stop(caddr_t arg);
82 fct_status_t qlt_port_online(qlt_state_t *qlt);
83 fct_status_t qlt_port_offline(qlt_state_t *qlt);
84 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
85     fct_link_info_t *li);
86 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
87 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
88 						fct_flogi_xchg_t *fx);
89 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
90 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
91 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
92     fct_remote_port_t *rp, fct_cmd_t *login);
93 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
94     fct_remote_port_t *rp);
95 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
96 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
97 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
98     fct_cmd_t *cmd, int terminate);
99 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
100 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
101 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
102     fct_cmd_t *cmd, uint32_t flags);
103 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
104 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
105 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
106 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
107 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
108 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
109 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
110     stmf_data_buf_t *dbuf, uint32_t ioflags);
111 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
112 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
113 static void qlt_release_intr(qlt_state_t *qlt);
114 static int qlt_setup_interrupts(qlt_state_t *qlt);
115 static void qlt_destroy_mutex(qlt_state_t *qlt);
116 
117 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
118     uint32_t words);
119 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
120     caddr_t buf, int size_left);
121 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
122     caddr_t buf, int size_left);
123 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
124     int count, int size_left);
125 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
126     cred_t *credp, int *rval);
127 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
128 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
129 
130 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] |= \
131 				    ((uint8_t)1) << ((els) & 7)
132 
133 int qlt_enable_msix = 0;
134 
135 /* Array to quickly calculate next free buf index to use */
136 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
137 
138 static struct cb_ops qlt_cb_ops = {
139 	qlt_open,
140 	qlt_close,
141 	nodev,
142 	nodev,
143 	nodev,
144 	nodev,
145 	nodev,
146 	qlt_ioctl,
147 	nodev,
148 	nodev,
149 	nodev,
150 	nochpoll,
151 	ddi_prop_op,
152 	0,
153 	D_MP | D_NEW
154 };
155 
156 static struct dev_ops qlt_ops = {
157 	DEVO_REV,
158 	0,
159 	nodev,
160 	nulldev,
161 	nulldev,
162 	qlt_attach,
163 	qlt_detach,
164 	nodev,
165 	&qlt_cb_ops,
166 	NULL,
167 	ddi_power
168 };
169 
170 #define	QLT_NAME    "COMSTAR QLT"
171 #define	QLT_VERSION "1.0"
172 
173 static struct modldrv modldrv = {
174 	&mod_driverops,
175 	QLT_NAME,
176 	&qlt_ops,
177 };
178 
179 static struct modlinkage modlinkage = {
180 	MODREV_1, &modldrv, NULL
181 };
182 
183 void *qlt_state = NULL;
184 kmutex_t qlt_global_lock;
185 static uint32_t qlt_loaded_counter = 0;
186 
187 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
188 			"-X Mode 1 133", "--Invalid--",
189 			"-X Mode 2 66", "-X Mode 2 100",
190 			"-X Mode 2 133", " 66" };
191 
192 /* Always use 64 bit DMA. */
193 static ddi_dma_attr_t qlt_queue_dma_attr = {
194 	DMA_ATTR_V0,		/* dma_attr_version */
195 	0,			/* low DMA address range */
196 	0xffffffffffffffff,	/* high DMA address range */
197 	0xffffffff,		/* DMA counter register */
198 	64,			/* DMA address alignment */
199 	0xff,			/* DMA burstsizes */
200 	1,			/* min effective DMA size */
201 	0xffffffff,		/* max DMA xfer size */
202 	0xffffffff,		/* segment boundary */
203 	1,			/* s/g list length */
204 	1,			/* granularity of device */
205 	0			/* DMA transfer flags */
206 };
207 
208 /* qlogic logging */
209 int enable_extended_logging = 0;
210 
211 static char qlt_provider_name[] = "qlt";
212 static struct stmf_port_provider *qlt_pp;
213 
214 int
215 _init(void)
216 {
217 	int ret;
218 
219 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
220 	if (ret == 0) {
221 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
222 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
223 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
224 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
225 		qlt_pp->pp_name = qlt_provider_name;
226 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
227 			stmf_free(qlt_pp);
228 			mutex_destroy(&qlt_global_lock);
229 			ddi_soft_state_fini(&qlt_state);
230 			return (EIO);
231 		}
232 		ret = mod_install(&modlinkage);
233 		if (ret != 0) {
234 			(void) stmf_deregister_port_provider(qlt_pp);
235 			stmf_free(qlt_pp);
236 			mutex_destroy(&qlt_global_lock);
237 			ddi_soft_state_fini(&qlt_state);
238 		}
239 	}
240 	return (ret);
241 }
242 
243 int
244 _fini(void)
245 {
246 	int ret;
247 
248 	if (qlt_loaded_counter)
249 		return (EBUSY);
250 	ret = mod_remove(&modlinkage);
251 	if (ret == 0) {
252 		(void) stmf_deregister_port_provider(qlt_pp);
253 		stmf_free(qlt_pp);
254 		mutex_destroy(&qlt_global_lock);
255 		ddi_soft_state_fini(&qlt_state);
256 	}
257 	return (ret);
258 }
259 
260 int
261 _info(struct modinfo *modinfop)
262 {
263 	return (mod_info(&modlinkage, modinfop));
264 }
265 
266 int
267 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
268 {
269 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
270 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
271 }
272 
273 static int
274 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
275 {
276 	int		instance;
277 	qlt_state_t	*qlt;
278 	ddi_device_acc_attr_t	dev_acc_attr;
279 	uint16_t	did;
280 	uint16_t	val;
281 	uint16_t	mr;
282 	size_t		discard;
283 	uint_t		ncookies;
284 	int		max_read_size;
285 	int		max_payload_size;
286 	fct_status_t	ret;
287 
288 	/* No support for suspend resume yet */
289 	if (cmd != DDI_ATTACH)
290 		return (DDI_FAILURE);
291 	instance = ddi_get_instance(dip);
292 
293 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
294 		return (DDI_FAILURE);
295 	}
296 
297 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance))
298 	    == NULL) {
299 		goto attach_fail_1;
300 	}
301 	qlt->instance = instance;
302 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
303 	qlt->dip = dip;
304 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
305 		goto attach_fail_2;
306 	}
307 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
308 	if ((did != 0x2422) && (did != 0x2432) &&
309 	    (did != 0x2522) && (did != 0x2532)) {
310 		cmn_err(CE_WARN, "qlt(%d): unknwon devid(%x), failing attach",
311 		    instance, did);
312 		goto attach_fail_4;
313 	}
314 	if ((did & 0xFF00) == 0x2500)
315 		qlt->qlt_25xx_chip = 1;
316 
317 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
318 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
319 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
320 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
321 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
322 		goto attach_fail_4;
323 	}
324 	if (did == 0x2422) {
325 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
326 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
327 		pci_bits >>= 8;
328 		pci_bits &= 0xf;
329 		if ((pci_bits == 3) || (pci_bits == 7)) {
330 			cmn_err(CE_NOTE,
331 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
332 			    instance, pci_speeds[pci_bits], pci_bits);
333 		} else {
334 			cmn_err(CE_WARN,
335 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
336 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
337 			    "(Invalid)", ((pci_bits == 0) ||
338 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
339 			    "32 bit slot ") : "", pci_bits);
340 		}
341 	}
342 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
343 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
344 		    (unsigned long long)ret);
345 		goto attach_fail_5;
346 	}
347 
348 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
349 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
350 		goto attach_fail_5;
351 	}
352 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
353 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
354 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
355 	    DDI_SUCCESS) {
356 		goto attach_fail_6;
357 	}
358 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
359 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
360 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
361 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
362 		goto attach_fail_7;
363 	}
364 	if (ncookies != 1)
365 		goto attach_fail_8;
366 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
367 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
368 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
369 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
370 
371 	/* mutex are inited in this function */
372 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
373 		goto attach_fail_8;
374 
375 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
376 	    "qlt%d", instance);
377 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
378 	    "%s,0", qlt->qlt_minor_name);
379 
380 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
381 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
382 		goto attach_fail_9;
383 	}
384 
385 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
386 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
387 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
388 
389 	/* Setup PCI cfg space registers */
390 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
391 	if (max_read_size == 11)
392 		goto over_max_read_xfer_setting;
393 	if (did == 0x2422) {
394 		if (max_read_size == 512)
395 			val = 0;
396 		else if (max_read_size == 1024)
397 			val = 1;
398 		else if (max_read_size == 2048)
399 			val = 2;
400 		else if (max_read_size == 4096)
401 			val = 3;
402 		else {
403 			cmn_err(CE_WARN, "qlt(%d) malformed "
404 			    "pci-max-read-request in qlt.conf. Valid values "
405 			    "for this HBA are 512/1024/2048/4096", instance);
406 			goto over_max_read_xfer_setting;
407 		}
408 		mr = PCICFG_RD16(qlt, 0x4E);
409 		mr &= 0xfff3;
410 		mr |= (val << 2);
411 		PCICFG_WR16(qlt, 0x4E, mr);
412 	} else if ((did == 0x2432) || (did == 0x2532)) {
413 		if (max_read_size == 128)
414 			val = 0;
415 		else if (max_read_size == 256)
416 			val = 1;
417 		else if (max_read_size == 512)
418 			val = 2;
419 		else if (max_read_size == 1024)
420 			val = 3;
421 		else if (max_read_size == 2048)
422 			val = 4;
423 		else if (max_read_size == 4096)
424 			val = 5;
425 		else {
426 			cmn_err(CE_WARN, "qlt(%d) malformed "
427 			    "pci-max-read-request in qlt.conf. Valid values "
428 			    "for this HBA are 128/256/512/1024/2048/4096",
429 			    instance);
430 			goto over_max_read_xfer_setting;
431 		}
432 		mr = PCICFG_RD16(qlt, 0x54);
433 		mr &= 0x8fff;
434 		mr |= (val << 12);
435 		PCICFG_WR16(qlt, 0x54, mr);
436 	} else {
437 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
438 		    "pci-max-read-request for this device (%x)",
439 		    instance, did);
440 	}
441 over_max_read_xfer_setting:;
442 
443 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
444 	if (max_payload_size == 11)
445 		goto over_max_payload_setting;
446 	if ((did == 0x2432) || (did == 0x2532)) {
447 		if (max_payload_size == 128)
448 			val = 0;
449 		else if (max_payload_size == 256)
450 			val = 1;
451 		else if (max_payload_size == 512)
452 			val = 2;
453 		else if (max_payload_size == 1024)
454 			val = 3;
455 		else {
456 			cmn_err(CE_WARN, "qlt(%d) malformed "
457 			    "pcie-max-payload-size in qlt.conf. Valid values "
458 			    "for this HBA are 128/256/512/1024",
459 			    instance);
460 			goto over_max_payload_setting;
461 		}
462 		mr = PCICFG_RD16(qlt, 0x54);
463 		mr &= 0xff1f;
464 		mr |= (val << 5);
465 		PCICFG_WR16(qlt, 0x54, mr);
466 	} else {
467 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
468 		    "pcie-max-payload-size for this device (%x)",
469 		    instance, did);
470 	}
471 
472 over_max_payload_setting:;
473 
474 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
475 		goto attach_fail_10;
476 
477 	ddi_report_dev(dip);
478 	return (DDI_SUCCESS);
479 
480 attach_fail_10:;
481 	mutex_destroy(&qlt->qlt_ioctl_lock);
482 	cv_destroy(&qlt->mbox_cv);
483 	cv_destroy(&qlt->rp_dereg_cv);
484 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
485 attach_fail_9:;
486 	qlt_destroy_mutex(qlt);
487 	qlt_release_intr(qlt);
488 attach_fail_8:;
489 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
490 attach_fail_7:;
491 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
492 attach_fail_6:;
493 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
494 attach_fail_5:;
495 	ddi_regs_map_free(&qlt->regs_acc_handle);
496 attach_fail_4:;
497 	pci_config_teardown(&qlt->pcicfg_acc_handle);
498 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
499 attach_fail_2:;
500 attach_fail_1:;
501 	ddi_soft_state_free(qlt_state, instance);
502 	return (DDI_FAILURE);
503 }
504 
505 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
506 
507 /* ARGSUSED */
508 static int
509 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
510 {
511 	qlt_state_t *qlt;
512 
513 	int instance;
514 
515 	instance = ddi_get_instance(dip);
516 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance))
517 	    == NULL) {
518 		return (DDI_FAILURE);
519 	}
520 
521 	if (qlt->fw_code01) {
522 		return (DDI_FAILURE);
523 	}
524 
525 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
526 	    qlt->qlt_state_not_acked) {
527 		return (DDI_FAILURE);
528 	}
529 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS)
530 		return (DDI_FAILURE);
531 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
532 	qlt_destroy_mutex(qlt);
533 	qlt_release_intr(qlt);
534 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
535 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
536 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
537 	ddi_regs_map_free(&qlt->regs_acc_handle);
538 	pci_config_teardown(&qlt->pcicfg_acc_handle);
539 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
540 	cv_destroy(&qlt->mbox_cv);
541 	cv_destroy(&qlt->rp_dereg_cv);
542 	ddi_soft_state_free(qlt_state, instance);
543 
544 	return (DDI_SUCCESS);
545 }
546 
547 static void
548 qlt_enable_intr(qlt_state_t *qlt)
549 {
550 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
551 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
552 	} else {
553 		int i;
554 		for (i = 0; i < qlt->intr_cnt; i++)
555 			(void) ddi_intr_enable(qlt->htable[i]);
556 	}
557 }
558 
559 static void
560 qlt_disable_intr(qlt_state_t *qlt)
561 {
562 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
563 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
564 	} else {
565 		int i;
566 		for (i = 0; i < qlt->intr_cnt; i++)
567 			(void) ddi_intr_disable(qlt->htable[i]);
568 	}
569 }
570 
571 static void
572 qlt_release_intr(qlt_state_t *qlt)
573 {
574 	if (qlt->htable) {
575 		int i;
576 		for (i = 0; i < qlt->intr_cnt; i++) {
577 			(void) ddi_intr_remove_handler(qlt->htable[i]);
578 			(void) ddi_intr_free(qlt->htable[i]);
579 		}
580 		kmem_free(qlt->htable, qlt->intr_size);
581 	}
582 	qlt->htable = NULL;
583 	qlt->intr_pri = 0;
584 	qlt->intr_cnt = 0;
585 	qlt->intr_size = 0;
586 	qlt->intr_cap = 0;
587 }
588 
589 
590 static void
591 qlt_init_mutex(qlt_state_t *qlt)
592 {
593 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
594 	    INT2PTR(qlt->intr_pri, void *));
595 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
596 	    INT2PTR(qlt->intr_pri, void *));
597 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
598 	    INT2PTR(qlt->intr_pri, void *));
599 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
600 	    INT2PTR(qlt->intr_pri, void *));
601 }
602 
603 static void
604 qlt_destroy_mutex(qlt_state_t *qlt)
605 {
606 	mutex_destroy(&qlt->req_lock);
607 	mutex_destroy(&qlt->preq_lock);
608 	mutex_destroy(&qlt->mbox_lock);
609 	mutex_destroy(&qlt->intr_lock);
610 }
611 
612 
613 static int
614 qlt_setup_msix(qlt_state_t *qlt)
615 {
616 	int count, avail, actual;
617 	int ret;
618 	int itype = DDI_INTR_TYPE_MSIX;
619 	int i;
620 
621 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
622 	if (ret != DDI_SUCCESS || count == 0) {
623 		return (DDI_FAILURE);
624 	}
625 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
626 	if (ret != DDI_SUCCESS || avail == 0) {
627 		return (DDI_FAILURE);
628 	}
629 	if (avail < count) {
630 		stmf_trace(qlt->qlt_port_alias,
631 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
632 	}
633 
634 	qlt->intr_size = count * sizeof (ddi_intr_handle_t);
635 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
636 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
637 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
638 	/* we need at least 2 interrupt vectors */
639 	if (ret != DDI_SUCCESS || actual < 2) {
640 		ret = DDI_FAILURE;
641 		goto release_intr;
642 	}
643 	if (actual < count) {
644 		QLT_LOG(qlt->qlt_port_alias, "qlt_setup_msix: "
645 		    "requested: %d, received: %d\n",
646 		    count, actual);
647 	}
648 
649 	qlt->intr_cnt = actual;
650 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
651 	if (ret != DDI_SUCCESS) {
652 		ret = DDI_FAILURE;
653 		goto release_intr;
654 	}
655 	qlt_init_mutex(qlt);
656 	for (i = 0; i < actual; i++) {
657 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
658 		    qlt, INT2PTR(i, void *));
659 		if (ret != DDI_SUCCESS)
660 			goto release_mutex;
661 	}
662 
663 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
664 	qlt->intr_flags |= QLT_INTR_MSIX;
665 	return (DDI_SUCCESS);
666 
667 release_mutex:
668 	qlt_destroy_mutex(qlt);
669 release_intr:
670 	for (i = 0; i < actual; i++)
671 		(void) ddi_intr_free(qlt->htable[i]);
672 free_mem:
673 	kmem_free(qlt->htable, qlt->intr_size);
674 	qlt->htable = NULL;
675 	qlt_release_intr(qlt);
676 	return (ret);
677 }
678 
679 
680 static int
681 qlt_setup_msi(qlt_state_t *qlt)
682 {
683 	int count, avail, actual;
684 	int itype = DDI_INTR_TYPE_MSI;
685 	int ret;
686 	int i;
687 
688 	/* get the # of interrupts */
689 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
690 	if (ret != DDI_SUCCESS || count == 0) {
691 		return (DDI_FAILURE);
692 	}
693 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
694 	if (ret != DDI_SUCCESS || avail == 0) {
695 		return (DDI_FAILURE);
696 	}
697 	if (avail < count) {
698 		QLT_LOG(qlt->qlt_port_alias,
699 		    "qlt_setup_msi: nintrs=%d, avail=%d", count, avail);
700 	}
701 	/* MSI requires only 1 interrupt. */
702 	count = 1;
703 
704 	/* allocate interrupt */
705 	qlt->intr_size = count * sizeof (ddi_intr_handle_t);
706 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
707 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
708 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
709 	if (ret != DDI_SUCCESS || actual == 0) {
710 		ret = DDI_FAILURE;
711 		goto free_mem;
712 	}
713 	if (actual < count) {
714 		QLT_LOG(qlt->qlt_port_alias, "qlt_setup_msi: "
715 		    "requested: %d, received:%d",
716 		    count, actual);
717 	}
718 	qlt->intr_cnt = actual;
719 
720 	/*
721 	 * Get priority for first msi, assume remaining are all the same.
722 	 */
723 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
724 	if (ret != DDI_SUCCESS) {
725 		ret = DDI_FAILURE;
726 		goto release_intr;
727 	}
728 	qlt_init_mutex(qlt);
729 
730 	/* add handler */
731 	for (i = 0; i < actual; i++) {
732 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
733 		    qlt, INT2PTR(i, void *));
734 		if (ret != DDI_SUCCESS)
735 			goto release_mutex;
736 	}
737 
738 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
739 	qlt->intr_flags |= QLT_INTR_MSI;
740 	return (DDI_SUCCESS);
741 
742 release_mutex:
743 	qlt_destroy_mutex(qlt);
744 release_intr:
745 	for (i = 0; i < actual; i++)
746 		(void) ddi_intr_free(qlt->htable[i]);
747 free_mem:
748 	kmem_free(qlt->htable, qlt->intr_size);
749 	qlt->htable = NULL;
750 	qlt_release_intr(qlt);
751 	return (ret);
752 }
753 
754 static int
755 qlt_setup_fixed(qlt_state_t *qlt)
756 {
757 	int count;
758 	int actual;
759 	int ret;
760 	int itype = DDI_INTR_TYPE_FIXED;
761 
762 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
763 	/* Fixed interrupts can only have one interrupt. */
764 	if (ret != DDI_SUCCESS || count != 1) {
765 		return (DDI_FAILURE);
766 	}
767 
768 	qlt->intr_size = sizeof (ddi_intr_handle_t);
769 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
770 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
771 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
772 	if (ret != DDI_SUCCESS || actual != 1) {
773 		ret = DDI_FAILURE;
774 		goto free_mem;
775 	}
776 
777 	qlt->intr_cnt = actual;
778 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
779 	if (ret != DDI_SUCCESS) {
780 		ret = DDI_FAILURE;
781 		goto release_intr;
782 	}
783 	qlt_init_mutex(qlt);
784 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
785 	if (ret != DDI_SUCCESS)
786 		goto release_mutex;
787 
788 	qlt->intr_flags |= QLT_INTR_FIXED;
789 	return (DDI_SUCCESS);
790 
791 release_mutex:
792 	qlt_destroy_mutex(qlt);
793 release_intr:
794 	(void) ddi_intr_free(qlt->htable[0]);
795 free_mem:
796 	kmem_free(qlt->htable, qlt->intr_size);
797 	qlt->htable = NULL;
798 	qlt_release_intr(qlt);
799 	return (ret);
800 }
801 
802 
803 static int
804 qlt_setup_interrupts(qlt_state_t *qlt)
805 {
806 #if defined(__sparc)
807 	int itypes = 0;
808 #endif
809 
810 /*
811  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198). So use
812  * MSI for sparc only for now.
813  */
814 #if defined(__sparc)
815 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
816 		itypes = DDI_INTR_TYPE_FIXED;
817 	}
818 
819 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
820 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
821 			return (DDI_SUCCESS);
822 	}
823 	if (itypes & DDI_INTR_TYPE_MSI) {
824 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
825 			return (DDI_SUCCESS);
826 	}
827 #endif
828 	return (qlt_setup_fixed(qlt));
829 }
830 
831 /*
832  * Filling the hba attributes
833  */
834 void
835 qlt_populate_hba_fru_details(struct fct_local_port *port,
836     struct fct_port_attrs *port_attrs)
837 {
838 	caddr_t	bufp;
839 	int len;
840 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
841 
842 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
843 	    "QLogic Corp.");
844 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
845 	    "%s", QLT_NAME);
846 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
847 	    "%s", QLT_VERSION);
848 	port_attrs->serial_number[0] = '\0';
849 	port_attrs->hardware_version[0] = '\0';
850 
851 	(void) snprintf(port_attrs->firmware_version,
852 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
853 	    qlt->fw_minor, qlt->fw_subminor);
854 
855 	/* Get FCode version */
856 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
857 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
858 	    (int *)&len) == DDI_PROP_SUCCESS) {
859 		(void) snprintf(port_attrs->option_rom_version,
860 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
861 		kmem_free(bufp, len);
862 		bufp = NULL;
863 	} else {
864 #ifdef __sparc
865 #define	FCHBA_OPTION_ROM_ERR_TEXT	"No Fcode found"
866 #else
867 #define	FCHBA_OPTION_ROM_ERR_TEXT	"N/A"
868 #endif
869 		(void) snprintf(port_attrs->option_rom_version,
870 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s",
871 		    FCHBA_OPTION_ROM_ERR_TEXT);
872 	}
873 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
874 	    qlt->nvram->subsystem_vendor_id[1] << 8;
875 
876 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
877 	    qlt->nvram->max_frame_length[0];
878 
879 	port_attrs->supported_cos = 0x10000000;
880 	port_attrs->supported_speed = PORT_SPEED_1G |
881 	    PORT_SPEED_2G | PORT_SPEED_4G;
882 	if (qlt->qlt_25xx_chip)
883 		port_attrs->supported_speed |= PORT_SPEED_8G;
884 
885 	(void) snprintf(port_attrs->model, FCHBA_MODEL_LEN, "%s",
886 	    qlt->nvram->model_name);
887 	(void) snprintf(port_attrs->model_description,
888 	    FCHBA_MODEL_DESCRIPTION_LEN, "%s", qlt->nvram->model_name);
889 }
890 
891 /* ARGSUSED */
892 fct_status_t
893 qlt_info(uint32_t cmd, fct_local_port_t *port,
894     void *arg, uint8_t *buf, uint32_t *bufsizep)
895 {
896 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
897 	mbox_cmd_t	*mcp;
898 	fct_status_t	ret = FCT_SUCCESS;
899 	uint8_t		*p;
900 	fct_port_link_status_t	*link_status;
901 
902 	switch (cmd) {
903 	case FC_TGT_PORT_RLS:
904 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
905 			ret = FCT_FAILURE;
906 			break;
907 		}
908 		/* send mailbox command to get link status */
909 		mcp = qlt_alloc_mailbox_command(qlt, 156);
910 		if (mcp == NULL) {
911 			ret = FCT_ALLOC_FAILURE;
912 			break;
913 		}
914 
915 		/* GET LINK STATUS count */
916 		mcp->to_fw[0] = 0x6d;
917 		mcp->to_fw[8] = 156/4;
918 		mcp->to_fw_mask |= BIT_1 | BIT_8;
919 		mcp->from_fw_mask |= BIT_1 | BIT_2;
920 
921 		ret = qlt_mailbox_command(qlt, mcp);
922 		if (ret != QLT_SUCCESS) {
923 			qlt_free_mailbox_command(qlt, mcp);
924 			break;
925 		}
926 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
927 
928 		p = mcp->dbuf->db_sglist[0].seg_addr;
929 		link_status = (fct_port_link_status_t *)buf;
930 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
931 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
932 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
933 		link_status->PrimitiveSeqProtocolErrorCount =
934 		    LE_32(*((uint32_t *)(p + 12)));
935 		link_status->InvalidTransmissionWordCount =
936 		    LE_32(*((uint32_t *)(p + 16)));
937 		link_status->InvalidCRCCount =
938 		    LE_32(*((uint32_t *)(p + 20)));
939 
940 		qlt_free_mailbox_command(qlt, mcp);
941 		break;
942 	default:
943 		ret = FCT_FAILURE;
944 		break;
945 	}
946 	return (ret);
947 }
948 
949 fct_status_t
950 qlt_port_start(caddr_t arg)
951 {
952 	qlt_state_t *qlt = (qlt_state_t *)arg;
953 	fct_local_port_t *port;
954 	fct_dbuf_store_t *fds;
955 
956 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
957 		return (FCT_FAILURE);
958 	}
959 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
960 	if (port == NULL) {
961 		goto qlt_pstart_fail_1;
962 	}
963 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
964 	if (fds == NULL) {
965 		goto qlt_pstart_fail_2;
966 	}
967 	qlt->qlt_port = port;
968 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
969 	fds->fds_free_data_buf = qlt_dmem_free;
970 	fds->fds_fca_private = (void *)qlt;
971 	/*
972 	 * Since we keep everything in the state struct and dont allocate any
973 	 * port private area, just use that pointer to point to the
974 	 * state struct.
975 	 */
976 	port->port_fca_private = qlt;
977 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
978 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
979 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
980 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
981 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
982 	port->port_default_alias = qlt->qlt_port_alias;
983 	port->port_pp = qlt_pp;
984 	port->port_fds = fds;
985 	port->port_max_logins = QLT_MAX_LOGINS;
986 	port->port_max_xchges = QLT_MAX_XCHGES;
987 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
988 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
989 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
990 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
991 	port->port_get_link_info = qlt_get_link_info;
992 	port->port_register_remote_port = qlt_register_remote_port;
993 	port->port_deregister_remote_port = qlt_deregister_remote_port;
994 	port->port_send_cmd = qlt_send_cmd;
995 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
996 	port->port_send_cmd_response = qlt_send_cmd_response;
997 	port->port_abort_cmd = qlt_abort_cmd;
998 	port->port_ctl = qlt_ctl;
999 	port->port_flogi_xchg = qlt_do_flogi;
1000 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1001 	port->port_info = qlt_info;
1002 
1003 	if (fct_register_local_port(port) != FCT_SUCCESS) {
1004 		goto qlt_pstart_fail_2_5;
1005 	}
1006 
1007 	return (QLT_SUCCESS);
1008 
1009 qlt_pstart_fail_3:
1010 	(void) fct_deregister_local_port(port);
1011 qlt_pstart_fail_2_5:
1012 	fct_free(fds);
1013 qlt_pstart_fail_2:
1014 	fct_free(port);
1015 	qlt->qlt_port = NULL;
1016 qlt_pstart_fail_1:
1017 	qlt_dmem_fini(qlt);
1018 	return (QLT_FAILURE);
1019 }
1020 
1021 fct_status_t
1022 qlt_port_stop(caddr_t arg)
1023 {
1024 	qlt_state_t *qlt = (qlt_state_t *)arg;
1025 
1026 	if (fct_deregister_local_port(qlt->qlt_port) != FCT_SUCCESS)
1027 		return (QLT_FAILURE);
1028 	fct_free(qlt->qlt_port->port_fds);
1029 	fct_free(qlt->qlt_port);
1030 	qlt->qlt_port = NULL;
1031 	qlt_dmem_fini(qlt);
1032 	return (QLT_SUCCESS);
1033 }
1034 
1035 /*
1036  * Called by framework to init the HBA.
1037  * Can be called in the middle of I/O. (Why ??)
1038  * Should make sure sane state both before and after the initialization
1039  */
1040 fct_status_t
1041 qlt_port_online(qlt_state_t *qlt)
1042 {
1043 	uint64_t	da;
1044 	int		instance;
1045 	fct_status_t	ret;
1046 	uint16_t	rcount;
1047 	caddr_t		icb;
1048 	mbox_cmd_t	*mcp;
1049 	uint8_t		*elsbmp;
1050 
1051 	instance = ddi_get_instance(qlt->dip);
1052 
1053 	/* XXX Make sure a sane state */
1054 
1055 	if ((ret = qlt_reset_chip_and_download_fw(qlt, 0)) != QLT_SUCCESS) {
1056 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1057 		return (ret);
1058 	}
1059 
1060 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1061 
1062 	/* Get resource count */
1063 	REG_WR16(qlt, REG_MBOX(0), 0x42);
1064 	ret = qlt_raw_mailbox_command(qlt);
1065 	rcount = REG_RD16(qlt, REG_MBOX(3));
1066 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1067 	if (ret != QLT_SUCCESS)
1068 		return (ret);
1069 
1070 	/* Enable PUREX */
1071 	REG_WR16(qlt, REG_MBOX(0), 0x38);
1072 	REG_WR16(qlt, REG_MBOX(1), 0x0400);
1073 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1074 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1075 	ret = qlt_raw_mailbox_command(qlt);
1076 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1077 	if (ret != QLT_SUCCESS) {
1078 		cmn_err(CE_NOTE, "Enable PUREX failed");
1079 		return (ret);
1080 	}
1081 
1082 	/* Pass ELS bitmap to fw */
1083 	REG_WR16(qlt, REG_MBOX(0), 0x59);
1084 	REG_WR16(qlt, REG_MBOX(1), 0x0500);
1085 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1086 	bzero(elsbmp, 32);
1087 	da = qlt->queue_mem_cookie.dmac_laddress;
1088 	da += MBOX_DMA_MEM_OFFSET;
1089 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
1090 	da >>= 16;
1091 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
1092 	da >>= 16;
1093 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
1094 	da >>= 16;
1095 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
1096 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1097 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1098 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1099 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1100 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1101 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1102 	SETELSBIT(elsbmp, ELS_OP_SCN);
1103 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1104 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1105 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1106 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1107 	SETELSBIT(elsbmp, ELS_OP_RNID);
1108 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1109 	    DDI_DMA_SYNC_FORDEV);
1110 	ret = qlt_raw_mailbox_command(qlt);
1111 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1112 	if (ret != QLT_SUCCESS) {
1113 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1114 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1115 		    elsbmp[1]);
1116 		return (ret);
1117 	}
1118 
1119 	/* Init queue pointers */
1120 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1121 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1122 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1123 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1124 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1125 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1126 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1127 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1128 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1129 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1130 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1131 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1132 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1133 
1134 	/*
1135 	 * XXX support for tunables. Also should we cache icb ?
1136 	 */
1137 	mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1138 	if (mcp == NULL) {
1139 		return (STMF_ALLOC_FAILURE);
1140 	}
1141 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1142 	bzero(icb, 0x80);
1143 	da = qlt->queue_mem_cookie.dmac_laddress;
1144 	DMEM_WR16(qlt, icb, 1);		/* Version */
1145 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1146 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1147 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1148 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1149 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1150 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1151 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1152 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1153 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1154 	DMEM_WR16(qlt, icb+0x28, 100);	/* ms of NOS/OLS for Link down */
1155 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1156 	DMEM_WR64(qlt, icb+0x2c, da+REQUEST_QUEUE_OFFSET);
1157 	DMEM_WR64(qlt, icb+0x34, da+RESPONSE_QUEUE_OFFSET);
1158 	DMEM_WR64(qlt, icb+0x3c, da+PRIORITY_QUEUE_OFFSET);
1159 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1160 	DMEM_WR64(qlt, icb+0x50, da+ATIO_QUEUE_OFFSET);
1161 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1162 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1163 	DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1164 	    BIT_2 | BIT_1 | BIT_0);
1165 	DMEM_WR32(qlt, icb+0x60, BIT_5);
1166 	DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 | BIT_4);
1167 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1168 	mcp->to_fw[0] = 0x60;
1169 
1170 	/*
1171 	 * This is the 1st command adter adapter initialize which will
1172 	 * use interrupts and regular mailbox interface.
1173 	 */
1174 	qlt->mbox_io_state = MBOX_STATE_READY;
1175 	qlt_enable_intr(qlt);
1176 	qlt->qlt_intr_enabled = 1;
1177 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1178 	/* Issue mailbox to firmware */
1179 	ret = qlt_mailbox_command(qlt, mcp);
1180 	if (ret != QLT_SUCCESS) {
1181 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1182 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1183 	}
1184 
1185 	mcp->to_fw_mask = BIT_0;
1186 	mcp->from_fw_mask = BIT_0 | BIT_1;
1187 	mcp->to_fw[0] = 0x28;
1188 	ret = qlt_mailbox_command(qlt, mcp);
1189 	if (ret != QLT_SUCCESS) {
1190 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1191 		    (long long)ret);
1192 	}
1193 
1194 	qlt_free_mailbox_command(qlt, mcp);
1195 	if (ret != QLT_SUCCESS)
1196 		return (ret);
1197 	return (FCT_SUCCESS);
1198 }
1199 
1200 fct_status_t
1201 qlt_port_offline(qlt_state_t *qlt)
1202 {
1203 	int		retries;
1204 
1205 	mutex_enter(&qlt->mbox_lock);
1206 
1207 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1208 		mutex_exit(&qlt->mbox_lock);
1209 		goto poff_mbox_done;
1210 	}
1211 
1212 	/* Wait to grab the mailboxes */
1213 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1214 	    retries++) {
1215 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1216 		if ((retries > 5) ||
1217 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1218 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1219 			mutex_exit(&qlt->mbox_lock);
1220 			goto poff_mbox_done;
1221 		}
1222 	}
1223 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1224 	mutex_exit(&qlt->mbox_lock);
1225 poff_mbox_done:;
1226 	qlt->intr_sneak_counter = 10;
1227 	qlt_disable_intr(qlt);
1228 	mutex_enter(&qlt->intr_lock);
1229 	qlt->qlt_intr_enabled = 0;
1230 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
1231 	drv_usecwait(20);
1232 	qlt->intr_sneak_counter = 0;
1233 	mutex_exit(&qlt->intr_lock);
1234 
1235 	return (FCT_SUCCESS);
1236 }
1237 
1238 static fct_status_t
1239 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1240 {
1241 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1242 	mbox_cmd_t *mcp;
1243 	fct_status_t fc_ret;
1244 	fct_status_t ret;
1245 	clock_t et;
1246 
1247 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1248 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1249 link_info_retry:
1250 	mcp->to_fw[0] = 0x20;
1251 	mcp->to_fw_mask |= BIT_0;
1252 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1253 	/* Issue mailbox to firmware */
1254 	ret = qlt_mailbox_command(qlt, mcp);
1255 	if (ret != QLT_SUCCESS) {
1256 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1257 			/* Firmware is not ready */
1258 			if (ddi_get_lbolt() < et) {
1259 				delay(drv_usectohz(50000));
1260 				goto link_info_retry;
1261 			}
1262 		}
1263 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1264 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1265 		fc_ret = FCT_FAILURE;
1266 	} else {
1267 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1268 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1269 
1270 		li->port_speed = qlt->link_speed;
1271 		switch (mcp->from_fw[6]) {
1272 		case 1:
1273 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1274 			li->port_fca_flogi_done = 1;
1275 			break;
1276 		case 0:
1277 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1278 			li->port_no_fct_flogi = 1;
1279 			break;
1280 		case 3:
1281 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1282 			li->port_fca_flogi_done = 1;
1283 			break;
1284 		case 2: /*FALLTHROUGH*/
1285 		case 4:
1286 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1287 			li->port_fca_flogi_done = 1;
1288 			break;
1289 		default:
1290 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1291 			QLT_LOG(qlt->qlt_port_alias, "Unknown link speed "
1292 			    "reported by fw %x", mcp->from_fw[6]);
1293 		}
1294 		qlt->cur_topology = li->port_topology;
1295 		fc_ret = FCT_SUCCESS;
1296 	}
1297 	qlt_free_mailbox_command(qlt, mcp);
1298 
1299 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1300 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1301 		mcp->to_fw[0] = 0x64;
1302 		mcp->to_fw[1] = 0x7FE;
1303 		mcp->to_fw[10] = 0;
1304 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_10;
1305 		fc_ret = qlt_mailbox_command(qlt, mcp);
1306 		if (fc_ret != QLT_SUCCESS) {
1307 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1308 			    "database for F_port failed, ret = %llx", fc_ret);
1309 		} else {
1310 			uint8_t *p;
1311 
1312 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1313 			p = mcp->dbuf->db_sglist[0].seg_addr;
1314 			bcopy(p + 0x18, li->port_rpwwn, 8);
1315 			bcopy(p + 0x20, li->port_rnwwn, 8);
1316 		}
1317 		qlt_free_mailbox_command(qlt, mcp);
1318 	}
1319 	return (fc_ret);
1320 }
1321 
1322 static int
1323 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1324 {
1325 	int		instance;
1326 	qlt_state_t	*qlt;
1327 
1328 	if (otype != OTYP_CHR) {
1329 		return (EINVAL);
1330 	}
1331 
1332 	/*
1333 	 * Since this is for debugging only, only allow root to issue ioctl now
1334 	 */
1335 	if (drv_priv(credp)) {
1336 		return (EPERM);
1337 	}
1338 
1339 	instance = (int)getminor(*devp);
1340 	qlt = ddi_get_soft_state(qlt_state, instance);
1341 	if (qlt == NULL) {
1342 		return (ENXIO);
1343 	}
1344 
1345 	mutex_enter(&qlt->qlt_ioctl_lock);
1346 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1347 		/*
1348 		 * It is already open for exclusive access.
1349 		 * So shut the door on this caller.
1350 		 */
1351 		mutex_exit(&qlt->qlt_ioctl_lock);
1352 		return (EBUSY);
1353 	}
1354 
1355 	if (flag & FEXCL) {
1356 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1357 			/*
1358 			 * Exclusive operation not possible
1359 			 * as it is already opened
1360 			 */
1361 			mutex_exit(&qlt->qlt_ioctl_lock);
1362 			return (EBUSY);
1363 		}
1364 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1365 	}
1366 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1367 	mutex_exit(&qlt->qlt_ioctl_lock);
1368 
1369 	return (0);
1370 }
1371 
1372 /* ARGSUSED */
1373 static int
1374 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1375 {
1376 	int		instance;
1377 	qlt_state_t	*qlt;
1378 
1379 	if (otype != OTYP_CHR) {
1380 		return (EINVAL);
1381 	}
1382 
1383 	instance = (int)getminor(dev);
1384 	qlt = ddi_get_soft_state(qlt_state, instance);
1385 	if (qlt == NULL) {
1386 		return (ENXIO);
1387 	}
1388 
1389 	mutex_enter(&qlt->qlt_ioctl_lock);
1390 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1391 		mutex_exit(&qlt->qlt_ioctl_lock);
1392 		return (ENODEV);
1393 	}
1394 
1395 	/*
1396 	 * It looks there's one hole here, maybe there could several concurrent
1397 	 * shareed open session, but we never check this case.
1398 	 * But it will not hurt too much, disregard it now.
1399 	 */
1400 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1401 	mutex_exit(&qlt->qlt_ioctl_lock);
1402 
1403 	return (0);
1404 }
1405 
1406 /*
1407  * All of these ioctls are unstable interfaces which are meant to be used
1408  * in a controlled lab env. No formal testing will be (or needs to be) done
1409  * for these ioctls. Specially note that running with an additional
1410  * uploaded firmware is not supported and is provided here for test
1411  * purposes only.
1412  */
1413 /* ARGSUSED */
1414 static int
1415 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1416     cred_t *credp, int *rval)
1417 {
1418 	qlt_state_t	*qlt;
1419 	int		ret = 0;
1420 #ifdef _LITTLE_ENDIAN
1421 	int		i;
1422 #endif
1423 	stmf_iocdata_t	*iocd;
1424 	void		*ibuf = NULL;
1425 	void		*obuf = NULL;
1426 	uint32_t	*intp;
1427 	qlt_fw_info_t	*fwi;
1428 	mbox_cmd_t	*mcp;
1429 	fct_status_t	st;
1430 	char		info[80];
1431 
1432 	if (drv_priv(credp) != 0)
1433 		return (EPERM);
1434 
1435 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1436 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1437 	if (ret)
1438 		return (ret);
1439 	iocd->stmf_error = 0;
1440 
1441 	switch (cmd) {
1442 	case QLT_IOCTL_FETCH_FWDUMP:
1443 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1444 			ret = EINVAL;
1445 			break;
1446 		}
1447 		mutex_enter(&qlt->qlt_ioctl_lock);
1448 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1449 			mutex_exit(&qlt->qlt_ioctl_lock);
1450 			ret = ENODATA;
1451 			iocd->stmf_error = QLTIO_NO_DUMP;
1452 			break;
1453 		}
1454 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1455 			mutex_exit(&qlt->qlt_ioctl_lock);
1456 			ret = EBUSY;
1457 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1458 			break;
1459 		}
1460 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1461 			mutex_exit(&qlt->qlt_ioctl_lock);
1462 			ret = EEXIST;
1463 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1464 			break;
1465 		}
1466 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1467 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1468 		mutex_exit(&qlt->qlt_ioctl_lock);
1469 
1470 		break;
1471 
1472 	case QLT_IOCTL_TRIGGER_FWDUMP:
1473 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1474 			ret = EACCES;
1475 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1476 			break;
1477 		}
1478 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1479 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1480 		info[79] = 0;
1481 		if (fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_USER_REQUEST |
1482 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP,
1483 		    info) != FCT_SUCCESS) {
1484 			ret = EIO;
1485 		}
1486 		break;
1487 	case QLT_IOCTL_UPLOAD_FW:
1488 		if ((iocd->stmf_ibuf_size < 1024) ||
1489 		    (iocd->stmf_ibuf_size & 3)) {
1490 			ret = EINVAL;
1491 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1492 			break;
1493 		}
1494 		intp = (uint32_t *)ibuf;
1495 #ifdef _LITTLE_ENDIAN
1496 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1497 			intp[i] = BSWAP_32(intp[i]);
1498 		}
1499 #endif
1500 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1501 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1502 		    iocd->stmf_ibuf_size)) {
1503 			ret = EINVAL;
1504 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1505 			break;
1506 		}
1507 		if ((qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1508 		    (!qlt->qlt_25xx_chip && ((intp[8] & 3) == 0))) {
1509 			ret = EACCES;
1510 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1511 			break;
1512 		}
1513 
1514 		/* Everything looks ok, lets copy this firmware */
1515 		if (qlt->fw_code01) {
1516 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1517 			    qlt->fw_length02) << 2);
1518 			qlt->fw_code01 = NULL;
1519 		} else {
1520 			atomic_add_32(&qlt_loaded_counter, 1);
1521 		}
1522 		qlt->fw_length01 = intp[3];
1523 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1524 		    KM_SLEEP);
1525 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1526 		qlt->fw_addr01 = intp[2];
1527 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1528 		qlt->fw_addr02 = qlt->fw_code02[2];
1529 		qlt->fw_length02 = qlt->fw_code02[3];
1530 		break;
1531 
1532 	case QLT_IOCTL_CLEAR_FW:
1533 		if (qlt->fw_code01) {
1534 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1535 			    qlt->fw_length02) << 2);
1536 			qlt->fw_code01 = NULL;
1537 			atomic_add_32(&qlt_loaded_counter, -1);
1538 		}
1539 		break;
1540 
1541 	case QLT_IOCTL_GET_FW_INFO:
1542 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1543 			ret = EINVAL;
1544 			break;
1545 		}
1546 		fwi = (qlt_fw_info_t *)obuf;
1547 		if (qlt->qlt_stay_offline) {
1548 			fwi->fwi_stay_offline = 1;
1549 		}
1550 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1551 			fwi->fwi_port_active = 1;
1552 		}
1553 		fwi->fwi_active_major = qlt->fw_major;
1554 		fwi->fwi_active_minor = qlt->fw_minor;
1555 		fwi->fwi_active_subminor = qlt->fw_subminor;
1556 		fwi->fwi_active_attr = qlt->fw_attr;
1557 		if (qlt->fw_code01) {
1558 			fwi->fwi_fw_uploaded = 1;
1559 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1560 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1561 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1562 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1563 		}
1564 		if (qlt->qlt_25xx_chip) {
1565 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1566 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1567 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1568 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1569 		} else {
1570 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1571 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1572 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1573 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1574 		}
1575 		break;
1576 
1577 	case QLT_IOCTL_STAY_OFFLINE:
1578 		if (!iocd->stmf_ibuf_size) {
1579 			ret = EINVAL;
1580 			break;
1581 		}
1582 		if (*((char *)ibuf)) {
1583 			qlt->qlt_stay_offline = 1;
1584 		} else {
1585 			qlt->qlt_stay_offline = 0;
1586 		}
1587 		break;
1588 
1589 	case QLT_IOCTL_MBOX:
1590 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1591 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1592 			ret = EINVAL;
1593 			break;
1594 		}
1595 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1596 		if (mcp == NULL) {
1597 			ret = ENOMEM;
1598 			break;
1599 		}
1600 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1601 		st = qlt_mailbox_command(qlt, mcp);
1602 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1603 		qlt_free_mailbox_command(qlt, mcp);
1604 		if (st != QLT_SUCCESS) {
1605 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1606 				st = QLT_SUCCESS;
1607 		}
1608 		if (st != QLT_SUCCESS) {
1609 			ret = EIO;
1610 			switch (st) {
1611 			case QLT_MBOX_NOT_INITIALIZED:
1612 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1613 				break;
1614 			case QLT_MBOX_BUSY:
1615 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1616 				break;
1617 			case QLT_MBOX_TIMEOUT:
1618 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1619 				break;
1620 			case QLT_MBOX_ABORTED:
1621 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1622 				break;
1623 			}
1624 		}
1625 		break;
1626 
1627 	default:
1628 		QLT_LOG(qlt->qlt_port_alias, "qlt_ioctl: ioctl-0x%02X", cmd);
1629 		ret = ENOTTY;
1630 	}
1631 
1632 	if (ret == 0) {
1633 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1634 	} else if (iocd->stmf_error) {
1635 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1636 	}
1637 	if (obuf) {
1638 		kmem_free(obuf, iocd->stmf_obuf_size);
1639 		obuf = NULL;
1640 	}
1641 	if (ibuf) {
1642 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1643 		ibuf = NULL;
1644 	}
1645 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1646 	return (ret);
1647 }
1648 
1649 static fct_status_t
1650 qlt_force_lip(qlt_state_t *qlt)
1651 {
1652 	mbox_cmd_t	*mcp;
1653 	fct_status_t	 rval;
1654 
1655 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1656 	mcp->to_fw[0] = 0x0072;
1657 	mcp->to_fw[1] = BIT_4;
1658 	mcp->to_fw[3] = 1;
1659 	mcp->to_fw_mask |= BIT_1 | BIT_3;
1660 	rval = qlt_mailbox_command(qlt, mcp);
1661 	if (rval != FCT_SUCCESS) {
1662 		QLT_LOG(qlt->qlt_port_alias, "qlt FLIP MB failed: rval=%x");
1663 	} else {
1664 		if (mcp->from_fw[0] != 0x4000) {
1665 			QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
1666 			    mcp->from_fw[0]);
1667 			rval = FCT_FAILURE;
1668 		}
1669 	}
1670 	qlt_free_mailbox_command(qlt, mcp);
1671 	return (rval);
1672 }
1673 
1674 static void
1675 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1676 {
1677 	stmf_change_status_t		 st;
1678 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1679 	qlt_state_t			*qlt;
1680 
1681 	qlt = (qlt_state_t *)port->port_fca_private;
1682 	st.st_completion_status = FCT_SUCCESS;
1683 	st.st_additional_info = NULL;
1684 
1685 	switch (cmd) {
1686 	case FCT_CMD_PORT_ONLINE:
1687 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1688 			st.st_completion_status = STMF_ALREADY;
1689 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1690 			st.st_completion_status = FCT_FAILURE;
1691 		if (st.st_completion_status == FCT_SUCCESS) {
1692 			qlt->qlt_state = FCT_STATE_ONLINING;
1693 			qlt->qlt_state_not_acked = 1;
1694 			st.st_completion_status = qlt_port_online(qlt);
1695 			if (st.st_completion_status != STMF_SUCCESS) {
1696 				qlt->qlt_state = FCT_STATE_OFFLINE;
1697 				qlt->qlt_state_not_acked = 0;
1698 			} else {
1699 				qlt->qlt_state = FCT_STATE_ONLINE;
1700 			}
1701 		}
1702 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
1703 		qlt->qlt_change_state_flags = 0;
1704 		break;
1705 
1706 	case FCT_CMD_PORT_OFFLINE:
1707 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
1708 			st.st_completion_status = STMF_ALREADY;
1709 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
1710 			st.st_completion_status = FCT_FAILURE;
1711 		}
1712 		if (st.st_completion_status == FCT_SUCCESS) {
1713 			qlt->qlt_state = FCT_STATE_OFFLINING;
1714 			qlt->qlt_state_not_acked = 1;
1715 
1716 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
1717 				(void) qlt_firmware_dump(port, ssci);
1718 			}
1719 			qlt->qlt_change_state_flags = ssci->st_rflags;
1720 			st.st_completion_status = qlt_port_offline(qlt);
1721 			if (st.st_completion_status != STMF_SUCCESS) {
1722 				qlt->qlt_state = FCT_STATE_ONLINE;
1723 				qlt->qlt_state_not_acked = 0;
1724 			} else {
1725 				qlt->qlt_state = FCT_STATE_OFFLINE;
1726 			}
1727 		}
1728 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
1729 		break;
1730 
1731 	case FCT_ACK_PORT_ONLINE_COMPLETE:
1732 		qlt->qlt_state_not_acked = 0;
1733 		break;
1734 
1735 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
1736 		qlt->qlt_state_not_acked = 0;
1737 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
1738 		    (qlt->qlt_stay_offline == 0)) {
1739 			if (fct_port_initialize(port,
1740 			    qlt->qlt_change_state_flags,
1741 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
1742 			    "with RLFLAG_RESET") != FCT_SUCCESS) {
1743 				cmn_err(CE_WARN, "qlt_ctl: "
1744 				    "fct_port_initialize failed, please use "
1745 				    "stmfstate to start the port-%s manualy",
1746 				    qlt->qlt_port_alias);
1747 			}
1748 		}
1749 		break;
1750 
1751 	case FCT_CMD_FORCE_LIP:
1752 		*((fct_status_t *)arg) = qlt_force_lip(qlt);
1753 		QLT_LOG(qlt->qlt_port_alias, "qlt_ctl: forcelip done");
1754 		break;
1755 
1756 	default:
1757 		QLT_LOG(qlt->qlt_port_alias, "qlt_ctl: unsupport-0x%02X", cmd);
1758 		break;
1759 	}
1760 }
1761 
1762 /* ARGSUSED */
1763 static fct_status_t
1764 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
1765 {
1766 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
1767 	return (FCT_FAILURE);
1768 }
1769 
1770 /*
1771  * Return a pointer to n entries in the request queue. Assumes that
1772  * request queue lock is held. Does a very short busy wait if
1773  * less/zero entries are available. Retuns NULL if it still cannot
1774  * fullfill the request.
1775  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
1776  */
1777 caddr_t
1778 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
1779 {
1780 	int try = 0;
1781 
1782 	while (qlt->req_available < n) {
1783 		uint32_t val1, val2, val3;
1784 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1785 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1786 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1787 		if ((val1 != val2) || (val2 != val3))
1788 			continue;
1789 
1790 		qlt->req_ndx_from_fw = val1;
1791 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
1792 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
1793 		    (REQUEST_QUEUE_ENTRIES - 1));
1794 		if (qlt->req_available < n) {
1795 			if (try < 2) {
1796 				drv_usecwait(100);
1797 				try++;
1798 				continue;
1799 			} else {
1800 				stmf_trace(qlt->qlt_port_alias,
1801 				    "Req Q is full");
1802 				return (NULL);
1803 			}
1804 		}
1805 		break;
1806 	}
1807 	/* We dont change anything until the entries are sumitted */
1808 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
1809 }
1810 
1811 /*
1812  * updates the req in ptr to fw. Assumes that req lock is held.
1813  */
1814 void
1815 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
1816 {
1817 	ASSERT(n >= 1);
1818 	qlt->req_ndx_to_fw += n;
1819 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
1820 	qlt->req_available -= n;
1821 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
1822 }
1823 
1824 
1825 /*
1826  * Return a pointer to n entries in the priority request queue. Assumes that
1827  * priority request queue lock is held. Does a very short busy wait if
1828  * less/zero entries are available. Retuns NULL if it still cannot
1829  * fullfill the request.
1830  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
1831  */
1832 caddr_t
1833 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
1834 {
1835 	int try = 0;
1836 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
1837 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
1838 	    (PRIORITY_QUEUE_ENTRIES - 1));
1839 
1840 	while (req_available < n) {
1841 		uint32_t val1, val2, val3;
1842 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1843 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1844 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1845 		if ((val1 != val2) || (val2 != val3))
1846 			continue;
1847 
1848 		qlt->preq_ndx_from_fw = val1;
1849 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
1850 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
1851 		    (PRIORITY_QUEUE_ENTRIES - 1));
1852 		if (req_available < n) {
1853 			if (try < 2) {
1854 				drv_usecwait(100);
1855 				try++;
1856 				continue;
1857 			} else {
1858 				return (NULL);
1859 			}
1860 		}
1861 		break;
1862 	}
1863 	/* We dont change anything until the entries are sumitted */
1864 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
1865 }
1866 
1867 /*
1868  * updates the req in ptr to fw. Assumes that req lock is held.
1869  */
1870 void
1871 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
1872 {
1873 	ASSERT(n >= 1);
1874 	qlt->preq_ndx_to_fw += n;
1875 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
1876 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
1877 }
1878 
1879 /*
1880  * - Should not be called from Interrupt.
1881  * - A very hardware specific function. Does not touch driver state.
1882  * - Assumes that interrupts are disabled or not there.
1883  * - Expects that the caller makes sure that all activity has stopped
1884  *   and its ok now to go ahead and reset the chip. Also the caller
1885  *   takes care of post reset damage control.
1886  * - called by initialize adapter() and dump_fw(for reset only).
1887  * - During attach() nothing much is happening and during initialize_adapter()
1888  *   the function (caller) does all the housekeeping so that this function
1889  *   can execute in peace.
1890  * - Returns 0 on success.
1891  */
1892 static fct_status_t
1893 qlt_reset_chip_and_download_fw(qlt_state_t *qlt, int reset_only)
1894 {
1895 	int cntr;
1896 	uint32_t start_addr;
1897 	fct_status_t ret;
1898 
1899 	/* XXX: Switch off LEDs */
1900 
1901 	/* Disable Interrupts */
1902 	REG_WR32(qlt, REG_INTR_CTRL, 0);
1903 	(void) REG_RD32(qlt, REG_INTR_CTRL);
1904 	/* Stop DMA */
1905 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
1906 
1907 	/* Wait for DMA to be stopped */
1908 	cntr = 0;
1909 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
1910 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
1911 		cntr++;
1912 		/* 3 sec should be more than enough */
1913 		if (cntr == 300)
1914 			return (QLT_DMA_STUCK);
1915 	}
1916 
1917 	/* Reset the Chip */
1918 	REG_WR32(qlt, REG_CTRL_STATUS,
1919 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
1920 
1921 	qlt->qlt_link_up = 0;
1922 
1923 	drv_usecwait(100);
1924 
1925 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
1926 	cntr = 0;
1927 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
1928 		delay(drv_usectohz(10000));
1929 		cntr++;
1930 		/* 3 sec should be more than enough */
1931 		if (cntr == 300)
1932 			return (QLT_ROM_STUCK);
1933 	}
1934 	/* Disable Interrupts (Probably not needed) */
1935 	REG_WR32(qlt, REG_INTR_CTRL, 0);
1936 	if (reset_only)
1937 		return (QLT_SUCCESS);
1938 
1939 	/* Load the two segments */
1940 	if (qlt->fw_code01 != NULL) {
1941 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
1942 		    qlt->fw_addr01);
1943 		if (ret == QLT_SUCCESS) {
1944 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
1945 			    qlt->fw_length02, qlt->fw_addr02);
1946 		}
1947 		start_addr = qlt->fw_addr01;
1948 	} else if (qlt->qlt_25xx_chip) {
1949 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
1950 		    fw2500_addr01);
1951 		if (ret == QLT_SUCCESS) {
1952 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
1953 			    fw2500_length02, fw2500_addr02);
1954 		}
1955 		start_addr = fw2500_addr01;
1956 	} else {
1957 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
1958 		    fw2400_addr01);
1959 		if (ret == QLT_SUCCESS) {
1960 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
1961 			    fw2400_length02, fw2400_addr02);
1962 		}
1963 		start_addr = fw2400_addr01;
1964 	}
1965 	if (ret != QLT_SUCCESS)
1966 		return (ret);
1967 
1968 	/* Verify Checksum */
1969 	REG_WR16(qlt, REG_MBOX(0), 7);
1970 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
1971 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
1972 	ret = qlt_raw_mailbox_command(qlt);
1973 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1974 	if (ret != QLT_SUCCESS)
1975 		return (ret);
1976 
1977 	/* Execute firmware */
1978 	REG_WR16(qlt, REG_MBOX(0), 2);
1979 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
1980 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
1981 	REG_WR16(qlt, REG_MBOX(3), 0);
1982 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
1983 	ret = qlt_raw_mailbox_command(qlt);
1984 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1985 	if (ret != QLT_SUCCESS)
1986 		return (ret);
1987 
1988 	/* Get revisions (About Firmware) */
1989 	REG_WR16(qlt, REG_MBOX(0), 8);
1990 	ret = qlt_raw_mailbox_command(qlt);
1991 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
1992 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
1993 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
1994 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
1995 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
1996 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
1997 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1998 	if (ret != QLT_SUCCESS)
1999 		return (ret);
2000 
2001 	return (QLT_SUCCESS);
2002 }
2003 
2004 /*
2005  * Used only from qlt_reset_chip_and_download_fw().
2006  */
2007 static fct_status_t
2008 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2009 				uint32_t word_count, uint32_t risc_addr)
2010 {
2011 	uint32_t words_sent = 0;
2012 	uint32_t words_being_sent;
2013 	uint32_t *cur_host_addr;
2014 	uint32_t cur_risc_addr;
2015 	uint64_t da;
2016 	fct_status_t ret;
2017 
2018 	while (words_sent < word_count) {
2019 		cur_host_addr = &(host_addr[words_sent]);
2020 		cur_risc_addr = risc_addr + (words_sent << 2);
2021 		words_being_sent = min(word_count - words_sent,
2022 		    TOTAL_DMA_MEM_SIZE >> 2);
2023 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2024 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2025 		    DDI_DEV_AUTOINCR);
2026 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2027 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2028 		da = qlt->queue_mem_cookie.dmac_laddress;
2029 		REG_WR16(qlt, REG_MBOX(0), 0x0B);
2030 		REG_WR16(qlt, REG_MBOX(1), risc_addr & 0xffff);
2031 		REG_WR16(qlt, REG_MBOX(8), ((cur_risc_addr >> 16) & 0xffff));
2032 		REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
2033 		da >>= 16;
2034 		REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
2035 		da >>= 16;
2036 		REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
2037 		da >>= 16;
2038 		REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
2039 		REG_WR16(qlt, REG_MBOX(5), words_being_sent & 0xffff);
2040 		REG_WR16(qlt, REG_MBOX(4), (words_being_sent >> 16) & 0xffff);
2041 		ret = qlt_raw_mailbox_command(qlt);
2042 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2043 		if (ret != QLT_SUCCESS)
2044 			return (ret);
2045 		words_sent += words_being_sent;
2046 	}
2047 	return (QLT_SUCCESS);
2048 }
2049 
2050 /*
2051  * Not used during normal operation. Only during driver init.
2052  * Assumes that interrupts are disabled and mailboxes are loaded.
2053  * Just triggers the mailbox command an waits for the completion.
2054  * Also expects that There is nothing else going on and we will only
2055  * get back a mailbox completion from firmware.
2056  * ---DOES NOT CLEAR INTERRUPT---
2057  * Used only from the code path originating from
2058  * qlt_reset_chip_and_download_fw()
2059  */
2060 static fct_status_t
2061 qlt_raw_mailbox_command(qlt_state_t *qlt)
2062 {
2063 	int cntr = 0;
2064 	uint32_t status;
2065 
2066 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2067 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_INTR_REQUEST) == 0) {
2068 		cntr++;
2069 		if (cntr == 100)
2070 			return (QLT_MAILBOX_STUCK);
2071 		delay(drv_usectohz(10000));
2072 	}
2073 	status = (REG_RD32(qlt, REG_RISC_STATUS) & 0xff);
2074 	if ((status == 1) || (status == 2) ||
2075 	    (status == 0x10) || (status == 0x11)) {
2076 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2077 		if (mbox0 == 0x4000)
2078 			return (QLT_SUCCESS);
2079 		else
2080 			return (QLT_MBOX_FAILED | mbox0);
2081 	}
2082 	/* This is unexpected, dump a message */
2083 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2084 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
2085 	return (QLT_UNEXPECTED_RESPONSE);
2086 }
2087 
2088 static mbox_cmd_t *
2089 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2090 {
2091 	mbox_cmd_t *mcp;
2092 
2093 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2094 	if (dma_size) {
2095 		qlt_dmem_bctl_t *bctl;
2096 		uint64_t da;
2097 
2098 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2099 		if (mcp->dbuf == NULL) {
2100 			kmem_free(mcp, sizeof (*mcp));
2101 			return (NULL);
2102 		}
2103 		mcp->dbuf->db_data_size = dma_size;
2104 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2105 
2106 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2107 		da = bctl->bctl_dev_addr;
2108 		/* This is the most common initialization of dma ptrs */
2109 		mcp->to_fw[3] = da & 0xffff;
2110 		da >>= 16;
2111 		mcp->to_fw[2] = da & 0xffff;
2112 		da >>= 16;
2113 		mcp->to_fw[7] = da & 0xffff;
2114 		da >>= 16;
2115 		mcp->to_fw[6] = da & 0xffff;
2116 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2117 	}
2118 	mcp->to_fw_mask |= BIT_0;
2119 	mcp->from_fw_mask |= BIT_0;
2120 	return (mcp);
2121 }
2122 
2123 void
2124 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2125 {
2126 	if (mcp->dbuf)
2127 		qlt_i_dmem_free(qlt, mcp->dbuf);
2128 	kmem_free(mcp, sizeof (*mcp));
2129 }
2130 
2131 /*
2132  * This can sleep. Should never be called from interrupt context.
2133  */
2134 static fct_status_t
2135 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2136 {
2137 	int	retries;
2138 	int	i;
2139 	char	info[80];
2140 
2141 	if (curthread->t_flag & T_INTR_THREAD) {
2142 		ASSERT(0);
2143 		return (QLT_MBOX_FAILED);
2144 	}
2145 
2146 	mutex_enter(&qlt->mbox_lock);
2147 	/* See if mailboxes are still uninitialized */
2148 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2149 		mutex_exit(&qlt->mbox_lock);
2150 		return (QLT_MBOX_NOT_INITIALIZED);
2151 	}
2152 
2153 	/* Wait to grab the mailboxes */
2154 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2155 	    retries++) {
2156 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2157 		if ((retries > 5) ||
2158 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2159 			mutex_exit(&qlt->mbox_lock);
2160 			return (QLT_MBOX_BUSY);
2161 		}
2162 	}
2163 	/* Make sure we always ask for mailbox 0 */
2164 	mcp->from_fw_mask |= BIT_0;
2165 
2166 	/* Load mailboxes, set state and generate RISC interrupt */
2167 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2168 	qlt->mcp = mcp;
2169 	for (i = 0; i < MAX_MBOXES; i++) {
2170 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2171 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2172 	}
2173 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2174 
2175 qlt_mbox_wait_loop:;
2176 	/* Wait for mailbox command completion */
2177 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2178 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2179 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2180 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2181 		info[79] = 0;
2182 		qlt->mcp = NULL;
2183 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2184 		mutex_exit(&qlt->mbox_lock);
2185 
2186 		/*
2187 		 * XXX Throw HBA fatal error event
2188 		 */
2189 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2190 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2191 		return (QLT_MBOX_TIMEOUT);
2192 	}
2193 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2194 		goto qlt_mbox_wait_loop;
2195 
2196 	qlt->mcp = NULL;
2197 
2198 	/* Make sure its a completion */
2199 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2200 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2201 		mutex_exit(&qlt->mbox_lock);
2202 		return (QLT_MBOX_ABORTED);
2203 	}
2204 
2205 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2206 	/* Mailboxes are already loaded by interrupt routine */
2207 	qlt->mbox_io_state = MBOX_STATE_READY;
2208 	mutex_exit(&qlt->mbox_lock);
2209 	if (mcp->from_fw[0] != 0x4000)
2210 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2211 
2212 	return (QLT_SUCCESS);
2213 }
2214 
2215 /*
2216  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2217  */
2218 /* ARGSUSED */
2219 static uint_t
2220 qlt_isr(caddr_t arg, caddr_t arg2)
2221 {
2222 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2223 	int		instance;
2224 	uint32_t	risc_status, intr_type;
2225 	int		i;
2226 	int		intr_loop_count;
2227 	char		info[80];
2228 
2229 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2230 	if (!mutex_tryenter(&qlt->intr_lock)) {
2231 		/*
2232 		 * Normally we will always get this lock. If tryenter is
2233 		 * failing then it means that driver is trying to do
2234 		 * some cleanup and is masking the intr but some intr
2235 		 * has sneaked in between. See if our device has generated
2236 		 * this intr. If so then wait a bit and return claimed.
2237 		 * If not then return claimed if this is the 1st instance
2238 		 * of a interrupt after driver has grabbed the lock.
2239 		 */
2240 		if (risc_status & BIT_15) {
2241 			drv_usecwait(10);
2242 			return (DDI_INTR_CLAIMED);
2243 		} else if (qlt->intr_sneak_counter) {
2244 			qlt->intr_sneak_counter--;
2245 			return (DDI_INTR_CLAIMED);
2246 		} else {
2247 			return (DDI_INTR_UNCLAIMED);
2248 		}
2249 	}
2250 	if (((risc_status & BIT_15) == 0) ||
2251 	    (qlt->qlt_intr_enabled == 0)) {
2252 		/*
2253 		 * This might be a pure coincedence that we are operating
2254 		 * in a interrupt disabled mode and another device
2255 		 * sharing the interrupt line has generated an interrupt
2256 		 * while an interrupt from our device might be pending. Just
2257 		 * ignore it and let the code handling the interrupt
2258 		 * disabled mode handle it.
2259 		 */
2260 		mutex_exit(&qlt->intr_lock);
2261 		return (DDI_INTR_UNCLAIMED);
2262 	}
2263 
2264 	/*
2265 	 * XXX take care for MSI case. disable intrs
2266 	 * Its gonna be complicated becasue of the max iterations.
2267 	 * as hba will have posted the intr which did not go on PCI
2268 	 * but we did not service it either becasue of max iterations.
2269 	 * Maybe offload the intr on a different thread.
2270 	 */
2271 	instance = ddi_get_instance(qlt->dip);
2272 	intr_loop_count = 0;
2273 
2274 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2275 
2276 intr_again:;
2277 	/* First check for high performance path */
2278 	intr_type = risc_status & 0xff;
2279 	if (intr_type == 0x1C) {
2280 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2281 		qlt->atio_ndx_from_fw = risc_status >> 16;
2282 		qlt_handle_atio_queue_update(qlt);
2283 	} else if (intr_type == 0x13) {
2284 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2285 		qlt->resp_ndx_from_fw = risc_status >> 16;
2286 		qlt_handle_resp_queue_update(qlt);
2287 		/* XXX what about priority queue */
2288 	} else if (intr_type == 0x1D) {
2289 		qlt->atio_ndx_from_fw = REG_RD32(qlt, REG_ATIO_IN_PTR);
2290 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2291 		qlt->resp_ndx_from_fw = risc_status >> 16;
2292 		qlt_handle_atio_queue_update(qlt);
2293 		qlt_handle_resp_queue_update(qlt);
2294 	} else if (intr_type == 0x12) {
2295 		uint16_t code = risc_status >> 16;
2296 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2297 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2298 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2299 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2300 
2301 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2302 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2303 		    " mb5=%x, mb6=%x", code, mbox1, mbox2, mbox5, mbox6);
2304 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
2305 		    " mb5=%x, mb6=%x", instance, code, mbox1, mbox2, mbox5,
2306 		    mbox6);
2307 
2308 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2309 			if (qlt->qlt_link_up) {
2310 				fct_handle_event(qlt->qlt_port,
2311 				    FCT_EVENT_LINK_RESET, 0, 0);
2312 			}
2313 		} else if (code == 0x8012) {
2314 			qlt->qlt_link_up = 0;
2315 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2316 			    0, 0);
2317 		} else if (code == 0x8011) {
2318 			switch (mbox1) {
2319 			case 0: qlt->link_speed = PORT_SPEED_1G;
2320 				break;
2321 			case 1: qlt->link_speed = PORT_SPEED_2G;
2322 				break;
2323 			case 3: qlt->link_speed = PORT_SPEED_4G;
2324 				break;
2325 			case 4: qlt->link_speed = PORT_SPEED_8G;
2326 				break;
2327 			default:
2328 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2329 			}
2330 			qlt->qlt_link_up = 1;
2331 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2332 			    0, 0);
2333 		} else if (code == 0x8002) {
2334 			(void) snprintf(info, 80,
2335 			    "Got 8002, mb1=%x mb2=%x mb5=%x mb6=%x",
2336 			    mbox1, mbox2, mbox5, mbox6);
2337 			info[79] = 0;
2338 			(void) fct_port_shutdown(qlt->qlt_port,
2339 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2340 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2341 		}
2342 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2343 		/* Handle mailbox completion */
2344 		mutex_enter(&qlt->mbox_lock);
2345 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2346 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2347 			    " when driver wasn't waiting for it %d",
2348 			    instance, qlt->mbox_io_state);
2349 		} else {
2350 			for (i = 0; i < MAX_MBOXES; i++) {
2351 				if (qlt->mcp->from_fw_mask &
2352 				    (((uint32_t)1) << i)) {
2353 					qlt->mcp->from_fw[i] =
2354 					    REG_RD16(qlt, REG_MBOX(i));
2355 				}
2356 			}
2357 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2358 		}
2359 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2360 		cv_broadcast(&qlt->mbox_cv);
2361 		mutex_exit(&qlt->mbox_lock);
2362 	} else {
2363 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2364 		    instance, intr_type);
2365 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2366 	}
2367 
2368 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2369 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2370 	if ((risc_status & BIT_15) &&
2371 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2372 		goto intr_again;
2373 	}
2374 
2375 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2376 
2377 	mutex_exit(&qlt->intr_lock);
2378 	return (DDI_INTR_CLAIMED);
2379 }
2380 
2381 /* **************** NVRAM Functions ********************** */
2382 
2383 fct_status_t
2384 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2385 {
2386 	uint32_t	timer;
2387 
2388 	/* Clear access error flag */
2389 	REG_WR32(qlt, REG_CTRL_STATUS,
2390 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2391 
2392 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2393 
2394 	/* Wait for READ cycle to complete. */
2395 	for (timer = 3000; timer; timer--) {
2396 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2397 			break;
2398 		}
2399 		drv_usecwait(10);
2400 	}
2401 	if (timer == 0) {
2402 		return (QLT_FLASH_TIMEOUT);
2403 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2404 		return (QLT_FLASH_ACCESS_ERROR);
2405 	}
2406 
2407 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2408 
2409 	return (QLT_SUCCESS);
2410 }
2411 
2412 fct_status_t
2413 qlt_read_nvram(qlt_state_t *qlt)
2414 {
2415 	uint32_t		index, addr, chksum;
2416 	uint32_t		val, *ptr;
2417 	fct_status_t		ret;
2418 	qlt_nvram_t		*nv;
2419 	uint64_t		empty_node_name = 0;
2420 
2421 	if (qlt->qlt_25xx_chip) {
2422 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2423 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2424 	} else {
2425 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2426 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2427 	}
2428 	mutex_enter(&qlt_global_lock);
2429 
2430 	/* Pause RISC. */
2431 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
2432 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2433 
2434 	/* Get NVRAM data and calculate checksum. */
2435 	ptr = (uint32_t *)qlt->nvram;
2436 	chksum = 0;
2437 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2438 		ret = qlt_read_flash_word(qlt, addr++, &val);
2439 		if (ret != QLT_SUCCESS) {
2440 			mutex_exit(&qlt_global_lock);
2441 			return (ret);
2442 		}
2443 		chksum += val;
2444 		*ptr = LE_32(val);
2445 		ptr++;
2446 	}
2447 
2448 	/* Release RISC Pause */
2449 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_PAUSE);
2450 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2451 
2452 	mutex_exit(&qlt_global_lock);
2453 
2454 	/* Sanity check NVRAM Data */
2455 	nv = qlt->nvram;
2456 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2457 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2458 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2459 		return (QLT_BAD_NVRAM_DATA);
2460 	}
2461 
2462 	/* If node name is zero, hand craft it from port name */
2463 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2464 		bcopy(nv->port_name, nv->node_name, 8);
2465 		nv->node_name[0] = nv->node_name[0] & ~BIT_0;
2466 		nv->port_name[0] = nv->node_name[0] | BIT_0;
2467 	}
2468 
2469 	return (QLT_SUCCESS);
2470 }
2471 
2472 uint32_t
2473 qlt_sync_atio_queue(qlt_state_t *qlt)
2474 {
2475 	uint32_t total_ent;
2476 
2477 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2478 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2479 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2480 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2481 		    DDI_DMA_SYNC_FORCPU);
2482 	} else {
2483 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2484 		    qlt->atio_ndx_from_fw;
2485 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2486 		    + (qlt->atio_ndx_to_fw << 6), (ATIO_QUEUE_ENTRIES -
2487 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2488 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2489 		    ATIO_QUEUE_OFFSET,
2490 		    qlt->atio_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2491 	}
2492 	return (total_ent);
2493 }
2494 
2495 void
2496 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2497 {
2498 	uint32_t total_ent;
2499 
2500 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2501 		return;
2502 
2503 	total_ent = qlt_sync_atio_queue(qlt);
2504 
2505 	do {
2506 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2507 		    qlt->atio_ndx_to_fw << 6];
2508 		uint32_t ent_cnt;
2509 
2510 		ent_cnt = (uint32_t)(atio[1]);
2511 		if (ent_cnt > total_ent) {
2512 			break;
2513 		}
2514 		switch ((uint8_t)(atio[0])) {
2515 		case 0x0d:	/* INOT */
2516 			qlt_handle_inot(qlt, atio);
2517 			break;
2518 		case 0x06:	/* ATIO */
2519 			qlt_handle_atio(qlt, atio);
2520 			break;
2521 		default:
2522 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2523 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2524 			break;
2525 		}
2526 		qlt->atio_ndx_to_fw = (qlt->atio_ndx_to_fw + ent_cnt) &
2527 		    (ATIO_QUEUE_ENTRIES - 1);
2528 		total_ent -= ent_cnt;
2529 	} while (total_ent > 0);
2530 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2531 }
2532 
2533 uint32_t
2534 qlt_sync_resp_queue(qlt_state_t *qlt)
2535 {
2536 	uint32_t total_ent;
2537 
2538 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2539 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2540 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2541 		    RESPONSE_QUEUE_OFFSET
2542 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2543 		    DDI_DMA_SYNC_FORCPU);
2544 	} else {
2545 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2546 		    qlt->resp_ndx_from_fw;
2547 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2548 		    RESPONSE_QUEUE_OFFSET
2549 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2550 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2551 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2552 		    RESPONSE_QUEUE_OFFSET,
2553 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2554 	}
2555 	return (total_ent);
2556 }
2557 
2558 void
2559 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2560 {
2561 	uint32_t total_ent;
2562 	uint8_t c;
2563 
2564 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2565 		return;
2566 
2567 	total_ent = qlt_sync_resp_queue(qlt);
2568 
2569 	do {
2570 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
2571 		uint32_t ent_cnt;
2572 
2573 		ent_cnt = (uint32_t)(resp[1]);
2574 		if (ent_cnt > total_ent) {
2575 			break;
2576 		}
2577 		switch ((uint8_t)(resp[0])) {
2578 		case 0x12:	/* CTIO completion */
2579 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
2580 			break;
2581 		case 0x0e:	/* NACK */
2582 			/* Do Nothing */
2583 			break;
2584 		case 0x29:	/* CT PassThrough */
2585 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
2586 			break;
2587 		case 0x33:	/* Abort IO IOCB completion */
2588 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
2589 			break;
2590 		case 0x51:	/* PUREX */
2591 			qlt_handle_purex(qlt, (uint8_t *)resp);
2592 			break;
2593 		case 0x52:
2594 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
2595 			break;
2596 		case 0x53:	/* ELS passthrough */
2597 			c = ((uint8_t)resp[0x1f]) >> 5;
2598 			if (c == 0) {
2599 				qlt_handle_sol_els_completion(qlt,
2600 				    (uint8_t *)resp);
2601 			} else if (c == 3) {
2602 				qlt_handle_unsol_els_abort_completion(qlt,
2603 				    (uint8_t *)resp);
2604 			} else {
2605 				qlt_handle_unsol_els_completion(qlt,
2606 				    (uint8_t *)resp);
2607 			}
2608 			break;
2609 		case 0x54:	/* ABTS received */
2610 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
2611 			break;
2612 		case 0x55:	/* ABTS completion */
2613 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
2614 			break;
2615 		}
2616 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
2617 		    (RESPONSE_QUEUE_ENTRIES - 1);
2618 		total_ent -= ent_cnt;
2619 	} while (total_ent > 0);
2620 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
2621 }
2622 
2623 fct_status_t
2624 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
2625 				uint16_t *ret_handle)
2626 {
2627 	fct_status_t ret;
2628 	mbox_cmd_t *mcp;
2629 	uint16_t n;
2630 	uint16_t h;
2631 	uint32_t ent_id;
2632 	uint8_t *p;
2633 	int found = 0;
2634 
2635 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
2636 	if (mcp == NULL) {
2637 		return (STMF_ALLOC_FAILURE);
2638 	}
2639 	mcp->to_fw[0] = 0x7C;	/* GET ID LIST */
2640 	mcp->to_fw[8] = 2048 * 8;
2641 	mcp->to_fw_mask |= BIT_8;
2642 	mcp->from_fw_mask |= BIT_1 | BIT_2;
2643 
2644 	ret = qlt_mailbox_command(qlt, mcp);
2645 	if (ret != QLT_SUCCESS) {
2646 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
2647 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
2648 		    mcp->from_fw[1], mcp->from_fw[2]);
2649 		qlt_free_mailbox_command(qlt, mcp);
2650 		return (ret);
2651 	}
2652 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
2653 	p = mcp->dbuf->db_sglist[0].seg_addr;
2654 	for (n = 0; n < mcp->from_fw[1]; n++) {
2655 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
2656 		h = (uint16_t)p[4] | (((uint16_t)p[5]) << 8);
2657 		if (ent_id == id) {
2658 			found = 1;
2659 			*ret_handle = h;
2660 			if ((cmd_handle != FCT_HANDLE_NONE) &&
2661 			    (cmd_handle != h)) {
2662 				cmn_err(CE_WARN, "login for portid %x came in "
2663 				    "with handle %x, while the portid was "
2664 				    "already using a different handle %x",
2665 				    id, cmd_handle, h);
2666 				qlt_free_mailbox_command(qlt, mcp);
2667 				return (QLT_FAILURE);
2668 			}
2669 			break;
2670 		}
2671 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
2672 			cmn_err(CE_WARN, "login for portid %x came in with "
2673 			    "handle %x, while the handle was already in use "
2674 			    "for portid %x", id, cmd_handle, ent_id);
2675 			qlt_free_mailbox_command(qlt, mcp);
2676 			return (QLT_FAILURE);
2677 		}
2678 		p += 8;
2679 	}
2680 	if (!found) {
2681 		*ret_handle = cmd_handle;
2682 	}
2683 	qlt_free_mailbox_command(qlt, mcp);
2684 	return (FCT_SUCCESS);
2685 }
2686 
2687 /* ARGSUSED */
2688 fct_status_t
2689 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
2690 				fct_cmd_t *login)
2691 {
2692 	uint8_t *p;
2693 
2694 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
2695 	p[0] = ELS_OP_PLOGI;
2696 	*((uint16_t *)(&p[4])) = 0x2020;
2697 	p[7] = 3;
2698 	p[8] = 0x88;
2699 	p[10] = 8;
2700 	p[13] = 0xff; p[15] = 0x1f;
2701 	p[18] = 7; p[19] = 0xd0;
2702 
2703 	bcopy(port->port_pwwn, p + 20, 8);
2704 	bcopy(port->port_nwwn, p + 28, 8);
2705 
2706 	p[68] = 0x80;
2707 	p[74] = 8;
2708 	p[77] = 0xff;
2709 	p[81] = 1;
2710 
2711 	return (FCT_SUCCESS);
2712 }
2713 
2714 /* ARGSUSED */
2715 fct_status_t
2716 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
2717 				fct_cmd_t *login)
2718 {
2719 	return (FCT_SUCCESS);
2720 }
2721 
2722 fct_status_t
2723 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
2724 				fct_cmd_t *login)
2725 {
2726 	uint16_t h;
2727 	fct_status_t ret;
2728 
2729 	switch (rp->rp_id) {
2730 	case 0xFFFFFC:	h = 0x7FC; break;
2731 	case 0xFFFFFD:	h = 0x7FD; break;
2732 	case 0xFFFFFE:	h = 0x7FE; break;
2733 	case 0xFFFFFF:	h = 0x7FF; break;
2734 	default:
2735 		ret = qlt_portid_to_handle(
2736 		    (qlt_state_t *)port->port_fca_private, rp->rp_id,
2737 		    login->cmd_rp_handle, &h);
2738 		if (ret != FCT_SUCCESS)
2739 			return (ret);
2740 	}
2741 
2742 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
2743 		ret = qlt_fill_plogi_req(port, rp, login);
2744 	} else {
2745 		ret = qlt_fill_plogi_resp(port, rp, login);
2746 	}
2747 
2748 	if (ret != FCT_SUCCESS)
2749 		return (ret);
2750 
2751 	if (h == FCT_HANDLE_NONE)
2752 		return (FCT_SUCCESS);
2753 
2754 	if (rp->rp_handle == FCT_HANDLE_NONE) {
2755 		rp->rp_handle = h;
2756 		return (FCT_SUCCESS);
2757 	}
2758 
2759 	if (rp->rp_handle == h)
2760 		return (FCT_SUCCESS);
2761 
2762 	return (FCT_FAILURE);
2763 }
2764 /* invoked in single thread */
2765 fct_status_t
2766 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
2767 {
2768 	uint8_t *req;
2769 	qlt_state_t *qlt;
2770 	clock_t	dereg_req_timer;
2771 	fct_status_t ret;
2772 
2773 	qlt = (qlt_state_t *)port->port_fca_private;
2774 
2775 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
2776 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
2777 		return (FCT_SUCCESS);
2778 	ASSERT(qlt->rp_id_in_dereg == 0);
2779 
2780 	mutex_enter(&qlt->preq_lock);
2781 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
2782 	if (req == NULL) {
2783 		mutex_exit(&qlt->preq_lock);
2784 		return (FCT_BUSY);
2785 	}
2786 	bzero(req, IOCB_SIZE);
2787 	req[0] = 0x52; req[1] = 1;
2788 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
2789 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
2790 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
2791 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
2792 	qlt->rp_id_in_dereg = rp->rp_id;
2793 	qlt_submit_preq_entries(qlt, 1);
2794 
2795 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
2796 	if (cv_timedwait(&qlt->rp_dereg_cv,
2797 	    &qlt->preq_lock, dereg_req_timer) > 0) {
2798 		ret = qlt->rp_dereg_status;
2799 	} else {
2800 		ret = FCT_BUSY;
2801 	}
2802 	qlt->rp_dereg_status = 0;
2803 	qlt->rp_id_in_dereg = 0;
2804 	mutex_exit(&qlt->preq_lock);
2805 	return (ret);
2806 }
2807 
2808 /*
2809  * Pass received ELS up to framework.
2810  */
2811 static void
2812 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
2813 {
2814 	fct_cmd_t		*cmd;
2815 	fct_els_t		*els;
2816 	qlt_cmd_t		*qcmd;
2817 	uint32_t		payload_size;
2818 	uint32_t		remote_portid;
2819 	uint8_t			*pldptr, *bndrptr;
2820 	int			i, off;
2821 	uint16_t		iocb_flags;
2822 	char			info[160];
2823 
2824 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
2825 	    ((uint32_t)(resp[0x1A])) << 16;
2826 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
2827 	if (iocb_flags & BIT_15) {
2828 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
2829 	} else {
2830 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
2831 	}
2832 
2833 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
2834 		cmn_err(CE_WARN, "handle_purex: payload is too large");
2835 		goto cmd_null;
2836 	}
2837 
2838 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS, payload_size +
2839 	    GET_STRUCT_SIZE(qlt_cmd_t), 0);
2840 	if (cmd == NULL) {
2841 cmd_null:;
2842 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
2843 		    "allocate space for fct_cmd", (void *)qlt);
2844 		info[159] = 0;
2845 		(void) fct_port_shutdown(qlt->qlt_port,
2846 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
2847 		return;
2848 	}
2849 
2850 	cmd->cmd_port = qlt->qlt_port;
2851 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
2852 	if (cmd->cmd_rp_handle == 0xFFFF) {
2853 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
2854 	}
2855 
2856 	els = (fct_els_t *)cmd->cmd_specific;
2857 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
2858 	els->els_req_size = payload_size;
2859 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
2860 	    GET_STRUCT_SIZE(qlt_cmd_t));
2861 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
2862 	cmd->cmd_rportid = remote_portid;
2863 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
2864 	    ((uint32_t)(resp[0x16])) << 16;
2865 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
2866 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
2867 	pldptr = &resp[0x2C];
2868 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
2869 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
2870 		/* Take care of fw's swapping of payload */
2871 		els->els_req_payload[i] = pldptr[3];
2872 		els->els_req_payload[i+1] = pldptr[2];
2873 		els->els_req_payload[i+2] = pldptr[1];
2874 		els->els_req_payload[i+3] = pldptr[0];
2875 		pldptr += 4;
2876 		if (pldptr == bndrptr)
2877 			pldptr = (uint8_t *)qlt->resp_ptr;
2878 		off += 4;
2879 		if (off >= IOCB_SIZE) {
2880 			off = 4;
2881 			pldptr += 4;
2882 		}
2883 	}
2884 	fct_post_rcvd_cmd(cmd, 0);
2885 }
2886 
2887 fct_status_t
2888 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
2889 {
2890 	qlt_state_t	*qlt;
2891 	char		info[160];
2892 
2893 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
2894 
2895 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
2896 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2897 			goto fatal_panic;
2898 		} else {
2899 			return (qlt_send_status(qlt, cmd));
2900 		}
2901 	}
2902 
2903 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
2904 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2905 			goto fatal_panic;
2906 		} else {
2907 			return (qlt_send_els_response(qlt, cmd));
2908 		}
2909 	}
2910 
2911 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2912 		cmd->cmd_handle = 0;
2913 	}
2914 
2915 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
2916 		return (qlt_send_abts_response(qlt, cmd, 0));
2917 	} else {
2918 		ASSERT(0);
2919 		return (FCT_FAILURE);
2920 	}
2921 
2922 fatal_panic:;
2923 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
2924 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
2925 	    ioflags);
2926 	info[159] = 0;
2927 	(void) fct_port_shutdown(qlt->qlt_port,
2928 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
2929 	return (FCT_FAILURE);
2930 }
2931 
2932 /* ARGSUSED */
2933 fct_status_t
2934 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
2935 {
2936 	qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
2937 	qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
2938 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
2939 	uint8_t *req;
2940 	uint16_t flags;
2941 
2942 	if (dbuf->db_handle == 0)
2943 		qcmd->dbuf = dbuf;
2944 	flags = ((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5;
2945 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
2946 		flags |= 2;
2947 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
2948 	} else {
2949 		flags |= 1;
2950 	}
2951 
2952 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
2953 		flags |= BIT_15;
2954 
2955 	mutex_enter(&qlt->req_lock);
2956 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
2957 	if (req == NULL) {
2958 		mutex_exit(&qlt->req_lock);
2959 		return (FCT_BUSY);
2960 	}
2961 	bzero(req, IOCB_SIZE);
2962 	req[0] = 0x12; req[1] = 0x1;
2963 	req[2] = dbuf->db_handle;
2964 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
2965 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
2966 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
2967 	req[12] = 1;
2968 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
2969 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
2970 	QMEM_WR16(qlt, req+0x1A, flags);
2971 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
2972 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
2973 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
2974 	QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
2975 	QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
2976 	qlt_submit_req_entries(qlt, 1);
2977 	mutex_exit(&qlt->req_lock);
2978 
2979 	return (STMF_SUCCESS);
2980 }
2981 
2982 /*
2983  * We must construct proper FCP_RSP_IU now. Here we only focus on
2984  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
2985  * we could have catched them before we enter here.
2986  */
2987 fct_status_t
2988 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
2989 {
2990 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
2991 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
2992 	qlt_dmem_bctl_t *bctl;
2993 	uint32_t size;
2994 	uint8_t *req, *fcp_rsp_iu;
2995 	uint8_t *psd, sensbuf[24];		/* sense data */
2996 	uint16_t flags;
2997 	uint16_t scsi_status;
2998 	int use_mode2;
2999 	int ndx;
3000 
3001 	/*
3002 	 * Enter fast channel for non check condition
3003 	 */
3004 	if (task->task_scsi_status != STATUS_CHECK) {
3005 		/*
3006 		 * We will use mode1
3007 		 */
3008 		flags = BIT_6 | BIT_15 |
3009 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3010 		scsi_status = (uint16_t)task->task_scsi_status;
3011 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3012 			scsi_status |= BIT_10;
3013 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3014 			scsi_status |= BIT_11;
3015 		}
3016 		qcmd->dbuf_rsp_iu = NULL;
3017 
3018 		/*
3019 		 * Fillout CTIO type 7 IOCB
3020 		 */
3021 		mutex_enter(&qlt->req_lock);
3022 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3023 		if (req == NULL) {
3024 			mutex_exit(&qlt->req_lock);
3025 			return (FCT_BUSY);
3026 		}
3027 
3028 		/*
3029 		 * Common fields
3030 		 */
3031 		bzero(req, IOCB_SIZE);
3032 		req[0x00] = 0x12;
3033 		req[0x01] = 0x1;
3034 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
3035 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3036 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3037 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3038 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3039 
3040 		/*
3041 		 * Mode-specific fields
3042 		 */
3043 		QMEM_WR16(qlt, req + 0x1A, flags);
3044 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3045 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3046 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3047 
3048 		/*
3049 		 * Trigger FW to send SCSI status out
3050 		 */
3051 		qlt_submit_req_entries(qlt, 1);
3052 		mutex_exit(&qlt->req_lock);
3053 		return (STMF_SUCCESS);
3054 	}
3055 
3056 	ASSERT(task->task_scsi_status == STATUS_CHECK);
3057 	/*
3058 	 * Decide the SCSI status mode, that should be used
3059 	 */
3060 	use_mode2 = (task->task_sense_length > 24);
3061 
3062 	/*
3063 	 * Prepare required information per the SCSI status mode
3064 	 */
3065 	flags = BIT_15 | (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3066 	if (use_mode2) {
3067 		flags |= BIT_7;
3068 
3069 		size = task->task_sense_length;
3070 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3071 		    task->task_sense_length, &size, 0);
3072 		if (!qcmd->dbuf_rsp_iu) {
3073 			return (FCT_ALLOC_FAILURE);
3074 		}
3075 
3076 		/*
3077 		 * Start to construct FCP_RSP IU
3078 		 */
3079 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3080 		bzero(fcp_rsp_iu, 24);
3081 
3082 		/*
3083 		 * FCP_RSP IU flags, byte10
3084 		 */
3085 		fcp_rsp_iu[10] |= BIT_1;
3086 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3087 			fcp_rsp_iu[10] |= BIT_2;
3088 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3089 			fcp_rsp_iu[10] |= BIT_3;
3090 		}
3091 
3092 		/*
3093 		 * SCSI status code, byte11
3094 		 */
3095 		fcp_rsp_iu[11] = task->task_scsi_status;
3096 
3097 		/*
3098 		 * FCP_RESID (Overrun or underrun)
3099 		 */
3100 		fcp_rsp_iu[12] = (task->task_resid >> 24) & 0xFF;
3101 		fcp_rsp_iu[13] = (task->task_resid >> 16) & 0xFF;
3102 		fcp_rsp_iu[14] = (task->task_resid >>  8) & 0xFF;
3103 		fcp_rsp_iu[15] = (task->task_resid >>  0) & 0xFF;
3104 
3105 		/*
3106 		 * FCP_SNS_LEN
3107 		 */
3108 		fcp_rsp_iu[18] = (task->task_sense_length >> 8) & 0xFF;
3109 		fcp_rsp_iu[19] = (task->task_sense_length >> 0) & 0xFF;
3110 
3111 		/*
3112 		 * FCP_RSP_LEN
3113 		 */
3114 		/*
3115 		 * no FCP_RSP_INFO
3116 		 */
3117 		/*
3118 		 * FCP_SNS_INFO
3119 		 */
3120 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3121 		    task->task_sense_length);
3122 
3123 		/*
3124 		 * Ensure dma data consistency
3125 		 */
3126 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3127 	} else {
3128 		flags |= BIT_6;
3129 
3130 		scsi_status = (uint16_t)task->task_scsi_status;
3131 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3132 			scsi_status |= BIT_10;
3133 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3134 			scsi_status |= BIT_11;
3135 		}
3136 		if (task->task_sense_length) {
3137 			scsi_status |= BIT_9;
3138 		}
3139 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3140 		qcmd->dbuf_rsp_iu = NULL;
3141 	}
3142 
3143 	/*
3144 	 * Fillout CTIO type 7 IOCB
3145 	 */
3146 	mutex_enter(&qlt->req_lock);
3147 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3148 	if (req == NULL) {
3149 		mutex_exit(&qlt->req_lock);
3150 		if (use_mode2) {
3151 			qlt_dmem_free(cmd->cmd_port->port_fds,
3152 			    qcmd->dbuf_rsp_iu);
3153 			qcmd->dbuf_rsp_iu = NULL;
3154 		}
3155 		return (FCT_BUSY);
3156 	}
3157 
3158 	/*
3159 	 * Common fields
3160 	 */
3161 	bzero(req, IOCB_SIZE);
3162 	req[0x00] = 0x12;
3163 	req[0x01] = 0x1;
3164 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3165 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3166 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3167 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3168 	if (use_mode2) {
3169 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3170 	}
3171 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3172 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3173 
3174 	/*
3175 	 * Mode-specific fields
3176 	 */
3177 	if (!use_mode2) {
3178 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3179 	}
3180 	QMEM_WR16(qlt, req + 0x1A, flags);
3181 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3182 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3183 	if (use_mode2) {
3184 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3185 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3186 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3187 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3188 	} else {
3189 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3190 		psd = req+0x28;
3191 
3192 		/*
3193 		 * Data in sense buf is always big-endian, data in IOCB
3194 		 * should always be little-endian, so we must do swapping.
3195 		 */
3196 		size = ((task->task_sense_length + 3) & (~3));
3197 		for (ndx = 0; ndx < size; ndx += 4) {
3198 			psd[ndx + 0] = sensbuf[ndx + 3];
3199 			psd[ndx + 1] = sensbuf[ndx + 2];
3200 			psd[ndx + 2] = sensbuf[ndx + 1];
3201 			psd[ndx + 3] = sensbuf[ndx + 0];
3202 		}
3203 	}
3204 
3205 	/*
3206 	 * Trigger FW to send SCSI status out
3207 	 */
3208 	qlt_submit_req_entries(qlt, 1);
3209 	mutex_exit(&qlt->req_lock);
3210 
3211 	return (STMF_SUCCESS);
3212 }
3213 
3214 fct_status_t
3215 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3216 {
3217 	qlt_cmd_t	*qcmd;
3218 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3219 	uint8_t *req, *addr;
3220 	qlt_dmem_bctl_t *bctl;
3221 	uint32_t minsize;
3222 	uint8_t elsop, req1f;
3223 
3224 	addr = els->els_resp_payload;
3225 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3226 
3227 	minsize = els->els_resp_size;
3228 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3229 	if (qcmd->dbuf == NULL)
3230 		return (FCT_BUSY);
3231 
3232 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3233 
3234 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3235 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3236 
3237 	if (addr[0] == 0x02) {	/* ACC */
3238 		req1f = BIT_5;
3239 	} else {
3240 		req1f = BIT_6;
3241 	}
3242 	elsop = els->els_req_payload[0];
3243 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3244 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3245 		req1f |= BIT_4;
3246 	}
3247 
3248 	mutex_enter(&qlt->req_lock);
3249 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3250 	if (req == NULL) {
3251 		mutex_exit(&qlt->req_lock);
3252 		qlt_dmem_free(NULL, qcmd->dbuf);
3253 		qcmd->dbuf = NULL;
3254 		return (FCT_BUSY);
3255 	}
3256 	bzero(req, IOCB_SIZE);
3257 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3258 	req[0x16] = elsop; req[0x1f] = req1f;
3259 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3260 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3261 	QMEM_WR16(qlt, (&req[0xC]), 1);
3262 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3263 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3264 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3265 		req[0x1b] = (cmd->cmd_lportid >> 16) & 0xff;
3266 		req[0x1c] = cmd->cmd_lportid & 0xff;
3267 		req[0x1d] = (cmd->cmd_lportid >> 8) & 0xff;
3268 	}
3269 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3270 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3271 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3272 	qlt_submit_req_entries(qlt, 1);
3273 	mutex_exit(&qlt->req_lock);
3274 
3275 	return (FCT_SUCCESS);
3276 }
3277 
3278 fct_status_t
3279 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3280 {
3281 	qlt_abts_cmd_t *qcmd;
3282 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3283 	uint8_t *req;
3284 	uint32_t lportid;
3285 	uint32_t fctl;
3286 	int i;
3287 
3288 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3289 
3290 	mutex_enter(&qlt->req_lock);
3291 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3292 	if (req == NULL) {
3293 		mutex_exit(&qlt->req_lock);
3294 		return (FCT_BUSY);
3295 	}
3296 	bcopy(qcmd->buf, req, IOCB_SIZE);
3297 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3298 	fctl = QMEM_RD32(qlt, req+0x1C);
3299 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3300 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3301 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3302 	if (cmd->cmd_rp)
3303 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3304 	else
3305 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3306 	if (terminate) {
3307 		QMEM_WR16(qlt, (&req[0xC]), 1);
3308 	}
3309 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3310 	req[0x17] = abts->abts_resp_rctl;
3311 	QMEM_WR32(qlt, req+0x18, lportid);
3312 	QMEM_WR32(qlt, req+0x1C, fctl);
3313 	req[0x23]++;
3314 	for (i = 0; i < 12; i += 4) {
3315 		/* Take care of firmware's LE requirement */
3316 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3317 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3318 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3319 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3320 	}
3321 	qlt_submit_req_entries(qlt, 1);
3322 	mutex_exit(&qlt->req_lock);
3323 
3324 	return (FCT_SUCCESS);
3325 }
3326 
3327 static void
3328 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3329 {
3330 	int i;
3331 	uint32_t d;
3332 	caddr_t req;
3333 	/* Just put it on the request queue */
3334 	mutex_enter(&qlt->req_lock);
3335 	req = qlt_get_req_entries(qlt, 1);
3336 	if (req == NULL) {
3337 		mutex_exit(&qlt->req_lock);
3338 		/* XXX handle this */
3339 		return;
3340 	}
3341 	for (i = 0; i < 16; i++) {
3342 		d = QMEM_RD32(qlt, inot);
3343 		inot += 4;
3344 		QMEM_WR32(qlt, req, d);
3345 		req += 4;
3346 	}
3347 	req -= 64;
3348 	req[0] = 0x0e;
3349 	qlt_submit_req_entries(qlt, 1);
3350 	mutex_exit(&qlt->req_lock);
3351 }
3352 
3353 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3354 static void
3355 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3356 {
3357 	fct_cmd_t	*cmd;
3358 	scsi_task_t	*task;
3359 	qlt_cmd_t	*qcmd;
3360 	uint32_t	rportid, fw_xchg_addr;
3361 	uint8_t		*p, *q, *req, tm;
3362 	uint16_t	cdb_size, flags, oxid;
3363 	char		info[160];
3364 
3365 	/*
3366 	 * If either bidirection xfer is requested of there is extended
3367 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3368 	 */
3369 	cdb_size = 16;
3370 	if (atio[0x20 + 11] >= 3) {
3371 		uint8_t b = atio[0x20 + 11];
3372 		uint16_t b1;
3373 		if ((b & 3) == 3) {
3374 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3375 			    "received, dropping the cmd as bidirectional "
3376 			    " transfers are not yet supported", qlt->instance);
3377 			/* XXX abort the I/O */
3378 			return;
3379 		}
3380 		cdb_size += b & 0xfc;
3381 		/*
3382 		 * Verify that we have enough entries. Without additional CDB
3383 		 * Everything will fit nicely within the same 64 bytes. So the
3384 		 * additional cdb size is essentially the # of additional bytes
3385 		 * we need.
3386 		 */
3387 		b1 = (uint16_t)b;
3388 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3389 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3390 			    " cdb (cdb size = %d bytes), however the firmware "
3391 			    " did not DMAed the entire FCP_CMD IU, entry count "
3392 			    " is %d while it should be %d", qlt->instance,
3393 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3394 			/* XXX abort the I/O */
3395 			return;
3396 		}
3397 	}
3398 
3399 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3400 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3401 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3402 	oxid = (((uint16_t)atio[8 + 16]) << 8) | atio[8+17];
3403 
3404 	if (fw_xchg_addr == 0xFFFFFFFF) {
3405 		cmd = NULL;
3406 	} else {
3407 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3408 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3409 	}
3410 	if (cmd == NULL) {
3411 		/* Abort this IO */
3412 		flags = BIT_14 | ((atio[3] & 0xF0) << 5);
3413 
3414 		mutex_enter(&qlt->req_lock);
3415 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3416 		if (req == NULL) {
3417 			mutex_exit(&qlt->req_lock);
3418 
3419 			(void) snprintf(info, 160,
3420 			    "qlt_handle_atio: qlt-%p, can't "
3421 			    "allocate space for scsi_task", (void *)qlt);
3422 			info[159] = 0;
3423 			(void) fct_port_shutdown(qlt->qlt_port,
3424 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3425 			return;
3426 		}
3427 		bzero(req, IOCB_SIZE);
3428 		req[0] = 0x12; req[1] = 0x1;
3429 		QMEM_WR32(qlt, req+4, 0);
3430 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3431 		    rportid));
3432 		QMEM_WR16(qlt, req+10, 60);
3433 		QMEM_WR32(qlt, req+0x10, rportid);
3434 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3435 		QMEM_WR16(qlt, req+0x1A, flags);
3436 		QMEM_WR16(qlt, req+0x20, oxid);
3437 		qlt_submit_req_entries(qlt, 1);
3438 		mutex_exit(&qlt->req_lock);
3439 
3440 		return;
3441 	}
3442 
3443 	task = (scsi_task_t *)cmd->cmd_specific;
3444 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3445 	qcmd->fw_xchg_addr = fw_xchg_addr;
3446 	qcmd->param.atio_byte3 = atio[3];
3447 	cmd->cmd_oxid = oxid;
3448 	cmd->cmd_rxid = (((uint16_t)atio[8 + 18]) << 8) | atio[8+19];
3449 	cmd->cmd_rportid = rportid;
3450 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3451 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3452 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3453 	/* Dont do a 64 byte read as this is IOMMU */
3454 	q = atio+0x28;
3455 	/* XXX Handle fcp_cntl */
3456 	task->task_cmd_seq_no = (uint32_t)(*q++);
3457 	task->task_csn_size = 8;
3458 	task->task_flags = qlt_task_flags[(*q++) & 7];
3459 	tm = *q++;
3460 	if (tm) {
3461 		if (tm & BIT_1)
3462 			task->task_mgmt_function = TM_ABORT_TASK_SET;
3463 		else if (tm & BIT_2)
3464 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
3465 		else if (tm & BIT_4)
3466 			task->task_mgmt_function = TM_LUN_RESET;
3467 		else if (tm & BIT_5)
3468 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
3469 		else if (tm & BIT_6)
3470 			task->task_mgmt_function = TM_CLEAR_ACA;
3471 		else
3472 			task->task_mgmt_function = TM_ABORT_TASK;
3473 	}
3474 	task->task_max_nbufs = STMF_BUFS_MAX;
3475 	task->task_csn_size = 8;
3476 	task->task_flags |= ((*q++) & 3) << 5;
3477 	p = task->task_cdb;
3478 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3479 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3480 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3481 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3482 	if (cdb_size > 16) {
3483 		uint16_t xtra = cdb_size - 16;
3484 		uint16_t i;
3485 		uint8_t cb[4];
3486 
3487 		while (xtra) {
3488 			*p++ = *q++;
3489 			xtra--;
3490 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3491 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3492 				q = (uint8_t *)qlt->queue_mem_ptr +
3493 				    ATIO_QUEUE_OFFSET;
3494 			}
3495 		}
3496 		for (i = 0; i < 4; i++) {
3497 			cb[i] = *q++;
3498 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3499 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3500 				q = (uint8_t *)qlt->queue_mem_ptr +
3501 				    ATIO_QUEUE_OFFSET;
3502 			}
3503 		}
3504 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
3505 		    (((uint32_t)cb[1]) << 16) |
3506 		    (((uint32_t)cb[2]) << 8) | cb[3];
3507 	} else {
3508 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
3509 		    (((uint32_t)q[1]) << 16) |
3510 		    (((uint32_t)q[2]) << 8) | q[3];
3511 	}
3512 	fct_post_rcvd_cmd(cmd, 0);
3513 }
3514 
3515 static void
3516 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
3517 {
3518 	uint16_t status;
3519 	uint32_t portid;
3520 	uint32_t subcode1, subcode2;
3521 
3522 	status = QMEM_RD16(qlt, rsp+8);
3523 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
3524 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
3525 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
3526 
3527 	mutex_enter(&qlt->preq_lock);
3528 	if (portid != qlt->rp_id_in_dereg) {
3529 		int instance = ddi_get_instance(qlt->dip);
3530 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
3531 		    " received when driver wasn't waiting for it",
3532 		    instance, portid);
3533 		mutex_exit(&qlt->preq_lock);
3534 		return;
3535 	}
3536 
3537 	if (status != 0) {
3538 		QLT_LOG(qlt->qlt_port_alias, "implicit logout completed "
3539 		    "for 0x%x with status %x, subcode1 %x subcode2 %x",
3540 		    portid, status, subcode1, subcode2);
3541 		if (status == 0x31 && subcode1 == 0x0a)
3542 			qlt->rp_dereg_status = FCT_SUCCESS;
3543 		else
3544 			qlt->rp_dereg_status =
3545 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
3546 	} else {
3547 		qlt->rp_dereg_status = FCT_SUCCESS;
3548 	}
3549 	cv_signal(&qlt->rp_dereg_cv);
3550 	mutex_exit(&qlt->preq_lock);
3551 }
3552 
3553 /*
3554  * Note that when an ELS is aborted, the regular or aborted completion
3555  * (if any) gets posted before the abort IOCB comes back on response queue.
3556  */
3557 static void
3558 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3559 {
3560 	char		info[160];
3561 	fct_cmd_t	*cmd;
3562 	qlt_cmd_t	*qcmd;
3563 	uint32_t	hndl;
3564 	uint32_t	subcode1, subcode2;
3565 	uint16_t	status;
3566 
3567 	hndl = QMEM_RD32(qlt, rsp+4);
3568 	status = QMEM_RD16(qlt, rsp+8);
3569 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3570 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3571 
3572 	if (!CMD_HANDLE_VALID(hndl)) {
3573 		/*
3574 		 * This cannot happen for unsol els completion. This can
3575 		 * only happen when abort for an unsol els completes.
3576 		 * This condition indicates a firmware bug.
3577 		 */
3578 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
3579 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3580 		    hndl, status, subcode1, subcode2, (void *)rsp);
3581 		info[159] = 0;
3582 		(void) fct_port_shutdown(qlt->qlt_port,
3583 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3584 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3585 		return;
3586 	}
3587 
3588 	if (status == 5) {
3589 		/*
3590 		 * When an unsolicited els is aborted, the abort is done
3591 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
3592 		 * and not the abortee. We will do the cleanup when the
3593 		 * IOCB which caused the abort, returns.
3594 		 */
3595 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
3596 		return;
3597 	}
3598 
3599 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3600 	if (cmd == NULL) {
3601 		/*
3602 		 * Now why would this happen ???
3603 		 */
3604 		(void) snprintf(info, 160,
3605 		    "qlt_handle_unsol_els_completion: can not "
3606 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3607 		    (void *)rsp);
3608 		info[159] = 0;
3609 		(void) fct_port_shutdown(qlt->qlt_port,
3610 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3611 
3612 		return;
3613 	}
3614 
3615 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3616 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3617 	if (qcmd->flags & QLT_CMD_ABORTING) {
3618 		/*
3619 		 * This is the same case as "if (status == 5)" above. The
3620 		 * only difference is that in this case the firmware actually
3621 		 * finished sending the response. So the abort attempt will
3622 		 * come back with status ?. We will handle it there.
3623 		 */
3624 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
3625 		    "abort it");
3626 		return;
3627 	}
3628 
3629 	if (qcmd->dbuf != NULL) {
3630 		qlt_dmem_free(NULL, qcmd->dbuf);
3631 		qcmd->dbuf = NULL;
3632 	}
3633 
3634 	if (status == 0) {
3635 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3636 	} else {
3637 		fct_send_response_done(cmd,
3638 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3639 	}
3640 }
3641 
3642 static void
3643 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
3644 {
3645 	char		info[160];
3646 	fct_cmd_t	*cmd;
3647 	qlt_cmd_t	*qcmd;
3648 	uint32_t	hndl;
3649 	uint32_t	subcode1, subcode2;
3650 	uint16_t	status;
3651 
3652 	hndl = QMEM_RD32(qlt, rsp+4);
3653 	status = QMEM_RD16(qlt, rsp+8);
3654 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3655 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3656 
3657 	if (!CMD_HANDLE_VALID(hndl)) {
3658 		ASSERT(hndl == 0);
3659 		/*
3660 		 * Someone has requested to abort it, but no one is waiting for
3661 		 * this completion.
3662 		 */
3663 		if ((status != 0) && (status != 8)) {
3664 			/*
3665 			 * There could be exchange resource leakage, so
3666 			 * throw HBA fatal error event now
3667 			 */
3668 			(void) snprintf(info, 160,
3669 			    "qlt_handle_unsol_els_abort_completion: "
3670 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3671 			    hndl, status, subcode1, subcode2, (void *)rsp);
3672 			info[159] = 0;
3673 			(void) fct_port_shutdown(qlt->qlt_port,
3674 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3675 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3676 			return;
3677 		}
3678 
3679 		return;
3680 	}
3681 
3682 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3683 	if (cmd == NULL) {
3684 		/*
3685 		 * Why would this happen ??
3686 		 */
3687 		(void) snprintf(info, 160,
3688 		    "qlt_handle_unsol_els_abort_completion: can not get "
3689 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3690 		    (void *)rsp);
3691 		info[159] = 0;
3692 		(void) fct_port_shutdown(qlt->qlt_port,
3693 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3694 
3695 		return;
3696 	}
3697 
3698 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3699 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3700 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
3701 
3702 	if (qcmd->dbuf != NULL) {
3703 		qlt_dmem_free(NULL, qcmd->dbuf);
3704 		qcmd->dbuf = NULL;
3705 	}
3706 
3707 	if (status == 0) {
3708 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
3709 	} else if (status == 8) {
3710 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
3711 	} else {
3712 		fct_cmd_fca_aborted(cmd,
3713 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3714 	}
3715 }
3716 
3717 static void
3718 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3719 {
3720 	char		info[160];
3721 	fct_cmd_t	*cmd;
3722 	fct_els_t	*els;
3723 	qlt_cmd_t	*qcmd;
3724 	uint32_t	hndl;
3725 	uint32_t	subcode1, subcode2;
3726 	uint16_t	status;
3727 
3728 	hndl = QMEM_RD32(qlt, rsp+4);
3729 	status = QMEM_RD16(qlt, rsp+8);
3730 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3731 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3732 
3733 	if (!CMD_HANDLE_VALID(hndl)) {
3734 		/*
3735 		 * This cannot happen for sol els completion.
3736 		 */
3737 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
3738 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3739 		    hndl, status, subcode1, subcode2, (void *)rsp);
3740 		info[159] = 0;
3741 		(void) fct_port_shutdown(qlt->qlt_port,
3742 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3743 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3744 		return;
3745 	}
3746 
3747 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3748 	if (cmd == NULL) {
3749 		(void) snprintf(info, 160,
3750 		    "qlt_handle_sol_els_completion: can not "
3751 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3752 		    (void *)rsp);
3753 		info[159] = 0;
3754 		(void) fct_port_shutdown(qlt->qlt_port,
3755 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3756 
3757 		return;
3758 	}
3759 
3760 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
3761 	els = (fct_els_t *)cmd->cmd_specific;
3762 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3763 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
3764 
3765 	if (qcmd->flags & QLT_CMD_ABORTING) {
3766 		/*
3767 		 * We will handle it when the ABORT IO IOCB returns.
3768 		 */
3769 		return;
3770 	}
3771 
3772 	if (qcmd->dbuf != NULL) {
3773 		if (status == 0) {
3774 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
3775 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
3776 			    qcmd->param.resp_offset,
3777 			    els->els_resp_payload, els->els_resp_size);
3778 		}
3779 		qlt_dmem_free(NULL, qcmd->dbuf);
3780 		qcmd->dbuf = NULL;
3781 	}
3782 
3783 	if (status == 0) {
3784 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3785 	} else {
3786 		fct_send_cmd_done(cmd,
3787 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3788 	}
3789 }
3790 
3791 static void
3792 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
3793 {
3794 	fct_cmd_t	*cmd;
3795 	fct_sol_ct_t	*ct;
3796 	qlt_cmd_t	*qcmd;
3797 	uint32_t	 hndl;
3798 	uint16_t	 status;
3799 	char		 info[160];
3800 
3801 	hndl = QMEM_RD32(qlt, rsp+4);
3802 	status = QMEM_RD16(qlt, rsp+8);
3803 
3804 	if (!CMD_HANDLE_VALID(hndl)) {
3805 		/*
3806 		 * Solicited commands will always have a valid handle.
3807 		 */
3808 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
3809 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3810 		info[159] = 0;
3811 		(void) fct_port_shutdown(qlt->qlt_port,
3812 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3813 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3814 		return;
3815 	}
3816 
3817 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3818 	if (cmd == NULL) {
3819 		(void) snprintf(info, 160,
3820 		    "qlt_handle_ct_completion: cannot find "
3821 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3822 		    (void *)rsp);
3823 		info[159] = 0;
3824 		(void) fct_port_shutdown(qlt->qlt_port,
3825 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3826 
3827 		return;
3828 	}
3829 
3830 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
3831 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3832 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
3833 
3834 	if (qcmd->flags & QLT_CMD_ABORTING) {
3835 		/*
3836 		 * We will handle it when ABORT IO IOCB returns;
3837 		 */
3838 		return;
3839 	}
3840 
3841 	ASSERT(qcmd->dbuf);
3842 	if (status == 0) {
3843 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
3844 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
3845 		    qcmd->param.resp_offset,
3846 		    ct->ct_resp_payload, ct->ct_resp_size);
3847 	}
3848 	qlt_dmem_free(NULL, qcmd->dbuf);
3849 	qcmd->dbuf = NULL;
3850 
3851 	if (status == 0) {
3852 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3853 	} else {
3854 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
3855 	}
3856 }
3857 
3858 static void
3859 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
3860 {
3861 	fct_cmd_t	*cmd;
3862 	scsi_task_t	*task;
3863 	qlt_cmd_t	*qcmd;
3864 	stmf_data_buf_t	*dbuf;
3865 	fct_status_t	fc_st;
3866 	uint32_t	iof = 0;
3867 	uint32_t	hndl;
3868 	uint16_t	status;
3869 	uint16_t	flags;
3870 	uint8_t		abort_req;
3871 	uint8_t		n;
3872 	char		info[160];
3873 
3874 	/* XXX: Check validity of the IOCB by checking 4th byte. */
3875 	hndl = QMEM_RD32(qlt, rsp+4);
3876 	status = QMEM_RD16(qlt, rsp+8);
3877 	flags = QMEM_RD16(qlt, rsp+0x1a);
3878 	n = rsp[2];
3879 
3880 	if (!CMD_HANDLE_VALID(hndl)) {
3881 		ASSERT(hndl == 0);
3882 		/*
3883 		 * Someone has requested to abort it, but no one is waiting for
3884 		 * this completion.
3885 		 */
3886 		QLT_LOG(qlt->qlt_port_alias, "qlt_handle_ctio_completion: "
3887 		    "hndl-%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3888 		if ((status != 1) && (status != 2)) {
3889 			/*
3890 			 * There could be exchange resource leakage, so
3891 			 * throw HBA fatal error event now
3892 			 */
3893 			(void) snprintf(info, 160,
3894 			    "qlt_handle_ctio_completion: hndl-"
3895 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3896 			info[159] = 0;
3897 			(void) fct_port_shutdown(qlt->qlt_port,
3898 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3899 
3900 		}
3901 
3902 		return;
3903 	}
3904 
3905 	if (flags & BIT_14) {
3906 		abort_req = 1;
3907 		QLT_EXT_LOG(qlt->qlt_port_alias, "qlt_handle_ctio_completion: "
3908 		    "abort: hndl-%x, status-%x, rsp-%p", hndl, status,
3909 		    (void *)rsp);
3910 	} else {
3911 		abort_req = 0;
3912 	}
3913 
3914 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3915 	if (cmd == NULL) {
3916 		(void) snprintf(info, 160,
3917 		    "qlt_handle_ctio_completion: cannot find "
3918 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3919 		    (void *)rsp);
3920 		info[159] = 0;
3921 		(void) fct_port_shutdown(qlt->qlt_port,
3922 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3923 
3924 		return;
3925 	}
3926 
3927 	task = (scsi_task_t *)cmd->cmd_specific;
3928 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3929 	if (qcmd->dbuf_rsp_iu) {
3930 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
3931 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
3932 		qcmd->dbuf_rsp_iu = NULL;
3933 	}
3934 
3935 	if ((status == 1) || (status == 2)) {
3936 		if (abort_req) {
3937 			fc_st = FCT_ABORT_SUCCESS;
3938 			iof = FCT_IOF_FCA_DONE;
3939 		} else {
3940 			fc_st = FCT_SUCCESS;
3941 			if (flags & BIT_15) {
3942 				iof = FCT_IOF_FCA_DONE;
3943 			}
3944 		}
3945 	} else {
3946 		if ((status == 8) && abort_req) {
3947 			fc_st = FCT_NOT_FOUND;
3948 			iof = FCT_IOF_FCA_DONE;
3949 		} else {
3950 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
3951 		}
3952 	}
3953 	dbuf = NULL;
3954 	if (((n & BIT_7) == 0) && (!abort_req)) {
3955 		/* A completion of data xfer */
3956 		if (n == 0) {
3957 			dbuf = qcmd->dbuf;
3958 		} else {
3959 			dbuf = stmf_handle_to_buf(task, n);
3960 		}
3961 
3962 		ASSERT(dbuf != NULL);
3963 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
3964 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
3965 		if (flags & BIT_15) {
3966 			dbuf->db_flags |= DB_STATUS_GOOD_SENT;
3967 		}
3968 
3969 		dbuf->db_xfer_status = fc_st;
3970 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
3971 		return;
3972 	}
3973 	if (!abort_req) {
3974 		/*
3975 		 * This was just a pure status xfer.
3976 		 */
3977 		fct_send_response_done(cmd, fc_st, iof);
3978 		return;
3979 	}
3980 
3981 	fct_cmd_fca_aborted(cmd, fc_st, iof);
3982 }
3983 
3984 static void
3985 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
3986 {
3987 	char		info[80];
3988 	fct_cmd_t	*cmd;
3989 	qlt_cmd_t	*qcmd;
3990 	uint32_t	h;
3991 	uint16_t	status;
3992 
3993 	h = QMEM_RD32(qlt, rsp+4);
3994 	status = QMEM_RD16(qlt, rsp+8);
3995 
3996 	if (!CMD_HANDLE_VALID(h)) {
3997 		/*
3998 		 * Solicited commands always have a valid handle.
3999 		 */
4000 		(void) snprintf(info, 80,
4001 		    "qlt_handle_sol_abort_completion: hndl-"
4002 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4003 		info[79] = 0;
4004 		(void) fct_port_shutdown(qlt->qlt_port,
4005 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4006 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4007 		return;
4008 	}
4009 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4010 	if (cmd == NULL) {
4011 		/*
4012 		 * What happened to the cmd ??
4013 		 */
4014 		(void) snprintf(info, 80,
4015 		    "qlt_handle_sol_abort_completion: cannot "
4016 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4017 		    (void *)rsp);
4018 		info[79] = 0;
4019 		(void) fct_port_shutdown(qlt->qlt_port,
4020 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4021 
4022 		return;
4023 	}
4024 
4025 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4026 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
4027 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4028 	if (qcmd->dbuf != NULL) {
4029 		qlt_dmem_free(NULL, qcmd->dbuf);
4030 		qcmd->dbuf = NULL;
4031 	}
4032 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4033 	if (status == 0) {
4034 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4035 	} else if (status == 0x31) {
4036 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4037 	} else {
4038 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4039 	}
4040 }
4041 
4042 static void
4043 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4044 {
4045 	qlt_abts_cmd_t	*qcmd;
4046 	fct_cmd_t	*cmd;
4047 	uint32_t	remote_portid;
4048 	char		info[160];
4049 
4050 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4051 	    ((uint32_t)(resp[0x1A])) << 16;
4052 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4053 	    sizeof (qlt_abts_cmd_t), 0);
4054 	if (cmd == NULL) {
4055 		(void) snprintf(info, 160,
4056 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
4057 		    "allocate space for fct_cmd", (void *)qlt);
4058 		info[159] = 0;
4059 		(void) fct_port_shutdown(qlt->qlt_port,
4060 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4061 		return;
4062 	}
4063 
4064 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
4065 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4066 	bcopy(resp, qcmd->buf, IOCB_SIZE);
4067 	cmd->cmd_port = qlt->qlt_port;
4068 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4069 	if (cmd->cmd_rp_handle == 0xFFFF)
4070 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4071 
4072 	cmd->cmd_rportid = remote_portid;
4073 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4074 	    ((uint32_t)(resp[0x16])) << 16;
4075 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4076 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4077 	fct_post_rcvd_cmd(cmd, 0);
4078 }
4079 
4080 static void
4081 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4082 {
4083 	uint16_t status;
4084 	char	info[80];
4085 
4086 	status = QMEM_RD16(qlt, resp+8);
4087 
4088 	if ((status == 0) || (status == 5)) {
4089 		return;
4090 	}
4091 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4092 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4093 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4094 	info[79] = 0;
4095 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4096 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4097 }
4098 
4099 #ifdef	DEBUG
4100 uint32_t qlt_drop_abort_counter = 0;
4101 #endif
4102 
4103 fct_status_t
4104 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4105 {
4106 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4107 
4108 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4109 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4110 		return (FCT_NOT_FOUND);
4111 	}
4112 
4113 #ifdef DEBUG
4114 	if (qlt_drop_abort_counter > 0) {
4115 		if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
4116 			return (FCT_SUCCESS);
4117 	}
4118 #endif
4119 
4120 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4121 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4122 	}
4123 
4124 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4125 		cmd->cmd_handle = 0;
4126 	}
4127 
4128 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4129 		return (qlt_send_abts_response(qlt, cmd, 1));
4130 	}
4131 
4132 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4133 		return (qlt_abort_purex(qlt, cmd));
4134 	}
4135 
4136 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4137 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4138 		return (qlt_abort_sol_cmd(qlt, cmd));
4139 	}
4140 
4141 	ASSERT(0);
4142 	return (FCT_FAILURE);
4143 }
4144 
4145 fct_status_t
4146 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4147 {
4148 	uint8_t *req;
4149 	qlt_cmd_t *qcmd;
4150 
4151 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4152 	qcmd->flags |= QLT_CMD_ABORTING;
4153 	QLT_LOG(qlt->qlt_port_alias, "qlt_abort_sol_cmd: fctcmd-%p, "
4154 	    "cmd_handle-%x", cmd, cmd->cmd_handle);
4155 
4156 	mutex_enter(&qlt->req_lock);
4157 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4158 	if (req == NULL) {
4159 		mutex_exit(&qlt->req_lock);
4160 
4161 		return (FCT_BUSY);
4162 	}
4163 	bzero(req, IOCB_SIZE);
4164 	req[0] = 0x33; req[1] = 1;
4165 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4166 	if (cmd->cmd_rp) {
4167 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4168 	} else {
4169 		QMEM_WR16(qlt, req+8, 0xFFFF);
4170 	}
4171 
4172 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4173 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4174 	qlt_submit_req_entries(qlt, 1);
4175 	mutex_exit(&qlt->req_lock);
4176 
4177 	return (FCT_SUCCESS);
4178 }
4179 
4180 fct_status_t
4181 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4182 {
4183 	uint8_t *req;
4184 	qlt_cmd_t *qcmd;
4185 	fct_els_t *els;
4186 	uint8_t elsop, req1f;
4187 
4188 	els = (fct_els_t *)cmd->cmd_specific;
4189 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4190 	elsop = els->els_req_payload[0];
4191 	QLT_LOG(qlt->qlt_port_alias,
4192 	    "qlt_abort_purex: fctcmd-%p, cmd_handle-%x, "
4193 	    "elsop-%x", cmd, cmd->cmd_handle, elsop);
4194 	req1f = 0x60;	/* Terminate xchg */
4195 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4196 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4197 		req1f |= BIT_4;
4198 	}
4199 
4200 	mutex_enter(&qlt->req_lock);
4201 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4202 	if (req == NULL) {
4203 		mutex_exit(&qlt->req_lock);
4204 
4205 		return (FCT_BUSY);
4206 	}
4207 
4208 	qcmd->flags |= QLT_CMD_ABORTING;
4209 	bzero(req, IOCB_SIZE);
4210 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4211 	req[0x16] = elsop; req[0x1f] = req1f;
4212 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4213 	if (cmd->cmd_rp) {
4214 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4215 	} else {
4216 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4217 	}
4218 
4219 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4220 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4221 	qlt_submit_req_entries(qlt, 1);
4222 	mutex_exit(&qlt->req_lock);
4223 
4224 	return (FCT_SUCCESS);
4225 }
4226 
4227 fct_status_t
4228 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4229 {
4230 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4231 	uint8_t *req;
4232 	uint16_t flags;
4233 
4234 	flags = BIT_14 | (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
4235 	QLT_EXT_LOG(qlt->qlt_port_alias, "qlt_abort_unsol_scsi_cmd: fctcmd-%p, "
4236 	    "cmd_handle-%x", cmd, cmd->cmd_handle);
4237 
4238 	mutex_enter(&qlt->req_lock);
4239 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4240 	if (req == NULL) {
4241 		mutex_exit(&qlt->req_lock);
4242 
4243 		return (FCT_BUSY);
4244 	}
4245 
4246 	qcmd->flags |= QLT_CMD_ABORTING;
4247 	bzero(req, IOCB_SIZE);
4248 	req[0] = 0x12; req[1] = 0x1;
4249 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4250 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4251 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4252 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4253 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4254 	QMEM_WR16(qlt, req+0x1A, flags);
4255 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4256 	qlt_submit_req_entries(qlt, 1);
4257 	mutex_exit(&qlt->req_lock);
4258 
4259 	return (FCT_SUCCESS);
4260 }
4261 
4262 fct_status_t
4263 qlt_send_cmd(fct_cmd_t *cmd)
4264 {
4265 	qlt_state_t *qlt;
4266 
4267 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4268 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4269 		return (qlt_send_els(qlt, cmd));
4270 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4271 		return (qlt_send_ct(qlt, cmd));
4272 	}
4273 
4274 	ASSERT(0);
4275 	return (FCT_FAILURE);
4276 }
4277 
4278 fct_status_t
4279 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4280 {
4281 	uint8_t *req;
4282 	fct_els_t *els;
4283 	qlt_cmd_t *qcmd;
4284 	stmf_data_buf_t *buf;
4285 	qlt_dmem_bctl_t *bctl;
4286 	uint32_t sz, minsz;
4287 
4288 	els = (fct_els_t *)cmd->cmd_specific;
4289 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4290 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4291 	qcmd->param.resp_offset = (els->els_req_size + 7) & ~7;
4292 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4293 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4294 	if (buf == NULL) {
4295 		return (FCT_BUSY);
4296 	}
4297 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4298 
4299 	qcmd->dbuf = buf;
4300 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4301 	    els->els_req_size);
4302 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4303 
4304 	mutex_enter(&qlt->req_lock);
4305 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4306 	if (req == NULL) {
4307 		qlt_dmem_free(NULL, buf);
4308 		mutex_exit(&qlt->req_lock);
4309 		return (FCT_BUSY);
4310 	}
4311 	bzero(req, IOCB_SIZE);
4312 	req[0] = 0x53; req[1] = 1;
4313 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4314 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4315 	QMEM_WR16(qlt, (&req[0xC]), 1);
4316 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4317 	QMEM_WR16(qlt, (&req[0x14]), 1);
4318 	req[0x16] = els->els_req_payload[0];
4319 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4320 		req[0x1b] = (cmd->cmd_lportid >> 16) & 0xff;
4321 		req[0x1c] = cmd->cmd_lportid & 0xff;
4322 		req[0x1d] = (cmd->cmd_lportid >> 8) & 0xff;
4323 	}
4324 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4325 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4326 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4327 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4328 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4329 	QMEM_WR64(qlt, (&req[0x34]), bctl->bctl_dev_addr +
4330 	    qcmd->param.resp_offset);
4331 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4332 	qlt_submit_req_entries(qlt, 1);
4333 	mutex_exit(&qlt->req_lock);
4334 
4335 	return (FCT_SUCCESS);
4336 }
4337 
4338 fct_status_t
4339 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4340 {
4341 	uint8_t *req;
4342 	fct_sol_ct_t *ct;
4343 	qlt_cmd_t *qcmd;
4344 	stmf_data_buf_t *buf;
4345 	qlt_dmem_bctl_t *bctl;
4346 	uint32_t sz, minsz;
4347 
4348 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4349 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4350 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4351 	qcmd->param.resp_offset = (ct->ct_req_size + 7) & ~7;
4352 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4353 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4354 	if (buf == NULL) {
4355 		return (FCT_BUSY);
4356 	}
4357 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4358 
4359 	qcmd->dbuf = buf;
4360 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4361 	    ct->ct_req_size);
4362 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4363 
4364 	mutex_enter(&qlt->req_lock);
4365 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4366 	if (req == NULL) {
4367 		qlt_dmem_free(NULL, buf);
4368 		mutex_exit(&qlt->req_lock);
4369 		return (FCT_BUSY);
4370 	}
4371 	bzero(req, IOCB_SIZE);
4372 	req[0] = 0x29; req[1] = 1;
4373 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4374 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4375 	QMEM_WR16(qlt, (&req[0xC]), 1);
4376 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4377 	QMEM_WR16(qlt, (&req[0x14]), 1);
4378 
4379 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4380 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4381 
4382 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4383 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4384 	QMEM_WR64(qlt, (&req[0x34]), bctl->bctl_dev_addr +
4385 	    qcmd->param.resp_offset);		/* RESPONSE DSD */
4386 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4387 
4388 	qlt_submit_req_entries(qlt, 1);
4389 	mutex_exit(&qlt->req_lock);
4390 
4391 	return (FCT_SUCCESS);
4392 }
4393 
4394 
4395 /*
4396  * All QLT_FIRMWARE_* will mainly be handled in this function
4397  * It can not be called in interrupt context
4398  *
4399  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4400  * and qlt_ioctl_lock
4401  */
4402 static fct_status_t
4403 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4404 {
4405 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4406 	int		i;
4407 	int		retries;
4408 	int		n, size_left;
4409 	char		c = ' ';
4410 	uint32_t	addr, endaddr, words_to_read;
4411 	caddr_t		buf;
4412 
4413 	mutex_enter(&qlt->qlt_ioctl_lock);
4414 	/*
4415 	 * To make sure that there's no outstanding dumping task
4416 	 */
4417 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4418 		mutex_exit(&qlt->qlt_ioctl_lock);
4419 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: outstanding");
4420 		return (FCT_FAILURE);
4421 	}
4422 
4423 	/*
4424 	 * To make sure not to overwrite existing dump
4425 	 */
4426 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4427 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4428 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4429 		/*
4430 		 * If we have alreay one dump, but it's not triggered by user
4431 		 * and the user hasn't fetched it, we shouldn't dump again.
4432 		 */
4433 		mutex_exit(&qlt->qlt_ioctl_lock);
4434 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: There's one "
4435 		    "dump, please fetech it");
4436 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
4437 		    "is one already outstanding.", qlt->instance);
4438 		return (FCT_FAILURE);
4439 	}
4440 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
4441 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
4442 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
4443 	} else {
4444 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
4445 	}
4446 	mutex_exit(&qlt->qlt_ioctl_lock);
4447 
4448 	size_left = QLT_FWDUMP_BUFSIZE;
4449 	if (!qlt->qlt_fwdump_buf) {
4450 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
4451 		/*
4452 		 * It's the only place that we allocate buf for dumping. After
4453 		 * it's allocated, we will use it until the port is detached.
4454 		 */
4455 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
4456 	}
4457 
4458 	/*
4459 	 * Start to dump firmware
4460 	 */
4461 	buf = (caddr_t)qlt->qlt_fwdump_buf;
4462 
4463 	/*
4464 	 * Print the ISP firmware revision number and attributes information
4465 	 * Read the RISC to Host Status register
4466 	 */
4467 	n = snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
4468 	    "Attributes %04x\n\nR2H Status Register\n%08x",
4469 	    qlt->fw_major, qlt->fw_minor,
4470 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, 0x44));
4471 	buf += n; size_left -= n;
4472 
4473 	/*
4474 	 * Before pausing the RISC, make sure no mailbox can execute
4475 	 */
4476 	mutex_enter(&qlt->mbox_lock);
4477 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
4478 		/*
4479 		 * Wait to grab the mailboxes
4480 		 */
4481 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
4482 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
4483 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
4484 			    ddi_get_lbolt() + drv_usectohz(1000000));
4485 			if (retries > 5) {
4486 				mutex_exit(&qlt->mbox_lock);
4487 				QLT_LOG(qlt->qlt_port_alias,
4488 				    "qlt_firmware_dump: "
4489 				    "can't drain out mailbox commands");
4490 				goto dump_fail;
4491 			}
4492 		}
4493 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
4494 		cv_broadcast(&qlt->mbox_cv);
4495 	}
4496 	mutex_exit(&qlt->mbox_lock);
4497 
4498 	/*
4499 	 * Pause the RISC processor
4500 	 */
4501 	REG_WR32(qlt, REG_HCCR, 0x30000000);
4502 
4503 	/*
4504 	 * Wait for the RISC processor to pause
4505 	 */
4506 	for (i = 0; i < 200; i++) {
4507 		if (REG_RD32(qlt, 0x44) & 0x100) {
4508 			break;
4509 		}
4510 		drv_usecwait(1000);
4511 	}
4512 	if (i == 200) {
4513 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: can't pause");
4514 		return (FCT_FAILURE);
4515 	}
4516 
4517 	if (!qlt->qlt_25xx_chip) {
4518 		goto over_25xx_specific_dump;
4519 	}
4520 	n = snprintf(buf, size_left, "\n\nHostRisc registers\n");
4521 	buf += n; size_left -= n;
4522 	REG_WR32(qlt, 0x54, 0x7000);
4523 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4524 	buf += n; size_left -= n;
4525 	REG_WR32(qlt, 0x54, 0x7010);
4526 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4527 	buf += n; size_left -= n;
4528 	REG_WR32(qlt, 0x54, 0x7C00);
4529 
4530 	n = snprintf(buf, size_left, "\nPCIe registers\n");
4531 	buf += n; size_left -= n;
4532 	REG_WR32(qlt, 0xC0, 0x1);
4533 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
4534 	buf += n; size_left -= n;
4535 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
4536 	buf += n; size_left -= n;
4537 	REG_WR32(qlt, 0xC0, 0x0);
4538 
4539 over_25xx_specific_dump:;
4540 	n = snprintf(buf, size_left, "\n\nHost Interface Registers\n");
4541 	buf += n; size_left -= n;
4542 	/*
4543 	 * Capture data from 32 regsiters
4544 	 */
4545 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
4546 	buf += n; size_left -= n;
4547 
4548 	/*
4549 	 * Disable interrupts
4550 	 */
4551 	REG_WR32(qlt, 0xc, 0);
4552 
4553 	/*
4554 	 * Shadow registers
4555 	 */
4556 	n = snprintf(buf, size_left, "\nShadow Registers\n");
4557 	buf += n; size_left -= n;
4558 
4559 	REG_WR32(qlt, 0x54, 0xF70);
4560 	addr = 0xb0000000;
4561 	for (i = 0; i < 0xb; i++) {
4562 		if ((!qlt->qlt_25xx_chip) && (i >= 7)) {
4563 			break;
4564 		}
4565 		if (i && ((i & 7) == 0)) {
4566 			n = snprintf(buf, size_left, "\n");
4567 			buf += n; size_left -= n;
4568 		}
4569 		REG_WR32(qlt, 0xF0, addr);
4570 		n = snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
4571 		buf += n; size_left -= n;
4572 		addr += 0x100000;
4573 	}
4574 
4575 	if (qlt->qlt_25xx_chip) {
4576 		REG_WR32(qlt, 0x54, 0x10);
4577 		n = snprintf(buf, size_left, "\n\nRISC IO Register\n%08x",
4578 		    REG_RD32(qlt, 0xC0));
4579 		buf += n; size_left -= n;
4580 	}
4581 
4582 	/*
4583 	 * Mailbox registers
4584 	 */
4585 	n = snprintf(buf, size_left, "\n\nMailbox Registers\n");
4586 	buf += n; size_left -= n;
4587 	for (i = 0; i < 32; i += 2) {
4588 		if ((i + 2) & 15) {
4589 			c = ' ';
4590 		} else {
4591 			c = '\n';
4592 		}
4593 		n = snprintf(buf, size_left, "%04x %04x%c",
4594 		    REG_RD16(qlt, 0x80 + (i << 1)),
4595 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
4596 		buf += n; size_left -= n;
4597 	}
4598 
4599 	/*
4600 	 * Transfer sequence registers
4601 	 */
4602 	n = snprintf(buf, size_left, "\nXSEQ GP Registers\n");
4603 	buf += n; size_left -= n;
4604 
4605 	REG_WR32(qlt, 0x54, 0xBF00);
4606 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4607 	buf += n; size_left -= n;
4608 	REG_WR32(qlt, 0x54, 0xBF10);
4609 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4610 	buf += n; size_left -= n;
4611 	REG_WR32(qlt, 0x54, 0xBF20);
4612 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4613 	buf += n; size_left -= n;
4614 	REG_WR32(qlt, 0x54, 0xBF30);
4615 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4616 	buf += n; size_left -= n;
4617 	REG_WR32(qlt, 0x54, 0xBF40);
4618 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4619 	buf += n; size_left -= n;
4620 	REG_WR32(qlt, 0x54, 0xBF50);
4621 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4622 	buf += n; size_left -= n;
4623 	REG_WR32(qlt, 0x54, 0xBF60);
4624 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4625 	buf += n; size_left -= n;
4626 	REG_WR32(qlt, 0x54, 0xBF70);
4627 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4628 	buf += n; size_left -= n;
4629 	n = snprintf(buf, size_left, "\nXSEQ-0 registers\n");
4630 	buf += n; size_left -= n;
4631 	REG_WR32(qlt, 0x54, 0xBFE0);
4632 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4633 	buf += n; size_left -= n;
4634 	n = snprintf(buf, size_left, "\nXSEQ-1 registers\n");
4635 	buf += n; size_left -= n;
4636 	REG_WR32(qlt, 0x54, 0xBFF0);
4637 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4638 	buf += n; size_left -= n;
4639 
4640 	/*
4641 	 * Receive sequence registers
4642 	 */
4643 	n = snprintf(buf, size_left, "\nRSEQ GP Registers\n");
4644 	buf += n; size_left -= n;
4645 	REG_WR32(qlt, 0x54, 0xFF00);
4646 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4647 	buf += n; size_left -= n;
4648 	REG_WR32(qlt, 0x54, 0xFF10);
4649 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4650 	buf += n; size_left -= n;
4651 	REG_WR32(qlt, 0x54, 0xFF20);
4652 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4653 	buf += n; size_left -= n;
4654 	REG_WR32(qlt, 0x54, 0xFF30);
4655 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4656 	buf += n; size_left -= n;
4657 	REG_WR32(qlt, 0x54, 0xFF40);
4658 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4659 	buf += n; size_left -= n;
4660 	REG_WR32(qlt, 0x54, 0xFF50);
4661 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4662 	buf += n; size_left -= n;
4663 	REG_WR32(qlt, 0x54, 0xFF60);
4664 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4665 	buf += n; size_left -= n;
4666 	REG_WR32(qlt, 0x54, 0xFF70);
4667 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4668 	buf += n; size_left -= n;
4669 	n = snprintf(buf, size_left, "\nRSEQ-0 registers\n");
4670 	buf += n; size_left -= n;
4671 	REG_WR32(qlt, 0x54, 0xFFD0);
4672 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4673 	buf += n; size_left -= n;
4674 	n = snprintf(buf, size_left, "\nRSEQ-1 registers\n");
4675 	buf += n; size_left -= n;
4676 	REG_WR32(qlt, 0x54, 0xFFE0);
4677 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4678 	buf += n; size_left -= n;
4679 	n = snprintf(buf, size_left, "\nRSEQ-2 registers\n");
4680 	buf += n; size_left -= n;
4681 	REG_WR32(qlt, 0x54, 0xFFF0);
4682 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4683 	buf += n; size_left -= n;
4684 
4685 	if (!qlt->qlt_25xx_chip)
4686 		goto over_aseq_regs;
4687 
4688 	/*
4689 	 * Auxiliary sequencer registers
4690 	 */
4691 	n = snprintf(buf, size_left, "\nASEQ GP Registers\n");
4692 	buf += n; size_left -= n;
4693 	REG_WR32(qlt, 0x54, 0xB000);
4694 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4695 	buf += n; size_left -= n;
4696 	REG_WR32(qlt, 0x54, 0xB010);
4697 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4698 	buf += n; size_left -= n;
4699 	REG_WR32(qlt, 0x54, 0xB020);
4700 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4701 	buf += n; size_left -= n;
4702 	REG_WR32(qlt, 0x54, 0xB030);
4703 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4704 	buf += n; size_left -= n;
4705 	REG_WR32(qlt, 0x54, 0xB040);
4706 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4707 	buf += n; size_left -= n;
4708 	REG_WR32(qlt, 0x54, 0xB050);
4709 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4710 	buf += n; size_left -= n;
4711 	REG_WR32(qlt, 0x54, 0xB060);
4712 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4713 	buf += n; size_left -= n;
4714 	REG_WR32(qlt, 0x54, 0xB070);
4715 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4716 	buf += n; size_left -= n;
4717 	n = snprintf(buf, size_left, "\nASEQ-0 registers\n");
4718 	buf += n; size_left -= n;
4719 	REG_WR32(qlt, 0x54, 0xB0C0);
4720 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4721 	buf += n; size_left -= n;
4722 	REG_WR32(qlt, 0x54, 0xB0D0);
4723 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4724 	buf += n; size_left -= n;
4725 	n = snprintf(buf, size_left, "\nASEQ-1 registers\n");
4726 	buf += n; size_left -= n;
4727 	REG_WR32(qlt, 0x54, 0xB0E0);
4728 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4729 	buf += n; size_left -= n;
4730 	n = snprintf(buf, size_left, "\nASEQ-2 registers\n");
4731 	buf += n; size_left -= n;
4732 	REG_WR32(qlt, 0x54, 0xB0F0);
4733 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4734 	buf += n; size_left -= n;
4735 
4736 over_aseq_regs:;
4737 
4738 	/*
4739 	 * Command DMA registers
4740 	 */
4741 	n = snprintf(buf, size_left, "\nCommand DMA registers\n");
4742 	buf += n; size_left -= n;
4743 	REG_WR32(qlt, 0x54, 0x7100);
4744 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4745 	buf += n; size_left -= n;
4746 
4747 	/*
4748 	 * Queues
4749 	 */
4750 	n = snprintf(buf, size_left,
4751 	    "\nRequest0 Queue DMA Channel registers\n");
4752 	buf += n; size_left -= n;
4753 	REG_WR32(qlt, 0x54, 0x7200);
4754 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4755 	buf += n; size_left -= n;
4756 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4757 	buf += n; size_left -= n;
4758 
4759 	n = snprintf(buf, size_left,
4760 	    "\n\nResponse0 Queue DMA Channel registers\n");
4761 	buf += n; size_left -= n;
4762 	REG_WR32(qlt, 0x54, 0x7300);
4763 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4764 	buf += n; size_left -= n;
4765 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4766 	buf += n; size_left -= n;
4767 
4768 	n = snprintf(buf, size_left,
4769 	    "\n\nRequest1 Queue DMA Channel registers\n");
4770 	buf += n; size_left -= n;
4771 	REG_WR32(qlt, 0x54, 0x7400);
4772 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4773 	buf += n; size_left -= n;
4774 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4775 	buf += n; size_left -= n;
4776 
4777 	/*
4778 	 * Transmit DMA registers
4779 	 */
4780 	n = snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
4781 	buf += n; size_left -= n;
4782 	REG_WR32(qlt, 0x54, 0x7600);
4783 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4784 	buf += n; size_left -= n;
4785 	REG_WR32(qlt, 0x54, 0x7610);
4786 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4787 	buf += n; size_left -= n;
4788 	n = snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
4789 	buf += n; size_left -= n;
4790 	REG_WR32(qlt, 0x54, 0x7620);
4791 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4792 	buf += n; size_left -= n;
4793 	REG_WR32(qlt, 0x54, 0x7630);
4794 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4795 	buf += n; size_left -= n;
4796 	n = snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
4797 	buf += n; size_left -= n;
4798 	REG_WR32(qlt, 0x54, 0x7640);
4799 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4800 	buf += n; size_left -= n;
4801 	REG_WR32(qlt, 0x54, 0x7650);
4802 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4803 	buf += n; size_left -= n;
4804 	n = snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
4805 	buf += n; size_left -= n;
4806 	REG_WR32(qlt, 0x54, 0x7660);
4807 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4808 	buf += n; size_left -= n;
4809 	REG_WR32(qlt, 0x54, 0x7670);
4810 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4811 	buf += n; size_left -= n;
4812 	n = snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
4813 	buf += n; size_left -= n;
4814 	REG_WR32(qlt, 0x54, 0x7680);
4815 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4816 	buf += n; size_left -= n;
4817 	REG_WR32(qlt, 0x54, 0x7690);
4818 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4819 	buf += n; size_left -= n;
4820 	n = snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
4821 	buf += n; size_left -= n;
4822 	REG_WR32(qlt, 0x54, 0x76A0);
4823 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4824 	buf += n; size_left -= n;
4825 
4826 	/*
4827 	 * Receive DMA registers
4828 	 */
4829 	n = snprintf(buf, size_left, "\nRCV Thread 0 Data DMA registers\n");
4830 	buf += n; size_left -= n;
4831 	REG_WR32(qlt, 0x54, 0x7700);
4832 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4833 	buf += n; size_left -= n;
4834 	REG_WR32(qlt, 0x54, 0x7710);
4835 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4836 	buf += n; size_left -= n;
4837 	n = snprintf(buf, size_left, "\nRCV Thread 1 Data DMA registers\n");
4838 	buf += n; size_left -= n;
4839 	REG_WR32(qlt, 0x54, 0x7720);
4840 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4841 	buf += n; size_left -= n;
4842 	REG_WR32(qlt, 0x54, 0x7730);
4843 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4844 	buf += n; size_left -= n;
4845 
4846 	/*
4847 	 * RISC registers
4848 	 */
4849 	n = snprintf(buf, size_left, "\nRISC GP registers\n");
4850 	buf += n; size_left -= n;
4851 	REG_WR32(qlt, 0x54, 0x0F00);
4852 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4853 	buf += n; size_left -= n;
4854 	REG_WR32(qlt, 0x54, 0x0F10);
4855 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4856 	buf += n; size_left -= n;
4857 	REG_WR32(qlt, 0x54, 0x0F20);
4858 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4859 	buf += n; size_left -= n;
4860 	REG_WR32(qlt, 0x54, 0x0F30);
4861 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4862 	buf += n; size_left -= n;
4863 	REG_WR32(qlt, 0x54, 0x0F40);
4864 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4865 	buf += n; size_left -= n;
4866 	REG_WR32(qlt, 0x54, 0x0F50);
4867 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4868 	buf += n; size_left -= n;
4869 	REG_WR32(qlt, 0x54, 0x0F60);
4870 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4871 	buf += n; size_left -= n;
4872 	REG_WR32(qlt, 0x54, 0x0F70);
4873 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4874 	buf += n; size_left -= n;
4875 
4876 	/*
4877 	 * Local memory controller registers
4878 	 */
4879 	n = snprintf(buf, size_left, "\nLMC registers\n");
4880 	buf += n; size_left -= n;
4881 	REG_WR32(qlt, 0x54, 0x3000);
4882 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4883 	buf += n; size_left -= n;
4884 	REG_WR32(qlt, 0x54, 0x3010);
4885 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4886 	buf += n; size_left -= n;
4887 	REG_WR32(qlt, 0x54, 0x3020);
4888 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4889 	buf += n; size_left -= n;
4890 	REG_WR32(qlt, 0x54, 0x3030);
4891 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4892 	buf += n; size_left -= n;
4893 	REG_WR32(qlt, 0x54, 0x3040);
4894 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4895 	buf += n; size_left -= n;
4896 	REG_WR32(qlt, 0x54, 0x3050);
4897 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4898 	buf += n; size_left -= n;
4899 	REG_WR32(qlt, 0x54, 0x3060);
4900 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4901 	buf += n; size_left -= n;
4902 
4903 	if (qlt->qlt_25xx_chip) {
4904 		REG_WR32(qlt, 0x54, 0x3070);
4905 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4906 		buf += n; size_left -= n;
4907 	}
4908 
4909 	/*
4910 	 * Fibre protocol module regsiters
4911 	 */
4912 	n = snprintf(buf, size_left, "\nFPM hardware registers\n");
4913 	buf += n; size_left -= n;
4914 	REG_WR32(qlt, 0x54, 0x4000);
4915 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4916 	buf += n; size_left -= n;
4917 	REG_WR32(qlt, 0x54, 0x4010);
4918 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4919 	buf += n; size_left -= n;
4920 	REG_WR32(qlt, 0x54, 0x4020);
4921 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4922 	buf += n; size_left -= n;
4923 	REG_WR32(qlt, 0x54, 0x4030);
4924 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4925 	buf += n; size_left -= n;
4926 	REG_WR32(qlt, 0x54, 0x4040);
4927 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4928 	buf += n; size_left -= n;
4929 	REG_WR32(qlt, 0x54, 0x4050);
4930 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4931 	buf += n; size_left -= n;
4932 	REG_WR32(qlt, 0x54, 0x4060);
4933 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4934 	buf += n; size_left -= n;
4935 	REG_WR32(qlt, 0x54, 0x4070);
4936 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4937 	buf += n; size_left -= n;
4938 	REG_WR32(qlt, 0x54, 0x4080);
4939 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4940 	buf += n; size_left -= n;
4941 	REG_WR32(qlt, 0x54, 0x4090);
4942 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4943 	buf += n; size_left -= n;
4944 	REG_WR32(qlt, 0x54, 0x40A0);
4945 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4946 	buf += n; size_left -= n;
4947 	REG_WR32(qlt, 0x54, 0x40B0);
4948 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4949 	buf += n; size_left -= n;
4950 
4951 	/*
4952 	 * Fibre buffer registers
4953 	 */
4954 	n = snprintf(buf, size_left, "\nFB hardware registers\n");
4955 	buf += n; size_left -= n;
4956 	REG_WR32(qlt, 0x54, 0x6000);
4957 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4958 	buf += n; size_left -= n;
4959 	REG_WR32(qlt, 0x54, 0x6010);
4960 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4961 	buf += n; size_left -= n;
4962 	REG_WR32(qlt, 0x54, 0x6020);
4963 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4964 	buf += n; size_left -= n;
4965 	REG_WR32(qlt, 0x54, 0x6030);
4966 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4967 	buf += n; size_left -= n;
4968 	REG_WR32(qlt, 0x54, 0x6040);
4969 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4970 	buf += n; size_left -= n;
4971 	REG_WR32(qlt, 0x54, 0x6100);
4972 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4973 	buf += n; size_left -= n;
4974 	REG_WR32(qlt, 0x54, 0x6130);
4975 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4976 	buf += n; size_left -= n;
4977 	REG_WR32(qlt, 0x54, 0x6150);
4978 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4979 	buf += n; size_left -= n;
4980 	REG_WR32(qlt, 0x54, 0x6170);
4981 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4982 	buf += n; size_left -= n;
4983 	REG_WR32(qlt, 0x54, 0x6190);
4984 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4985 	buf += n; size_left -= n;
4986 	REG_WR32(qlt, 0x54, 0x61B0);
4987 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4988 	buf += n; size_left -= n;
4989 
4990 	if (qlt->qlt_25xx_chip) {
4991 		REG_WR32(qlt, 0x54, 0x6F00);
4992 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4993 		buf += n; size_left -= n;
4994 	}
4995 
4996 	qlt->intr_sneak_counter = 10;
4997 	qlt_disable_intr(qlt);
4998 	mutex_enter(&qlt->intr_lock);
4999 	qlt->qlt_intr_enabled = 0;
5000 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
5001 	drv_usecwait(20);
5002 	qlt->intr_sneak_counter = 0;
5003 	mutex_exit(&qlt->intr_lock);
5004 
5005 	/*
5006 	 * Memory
5007 	 */
5008 	n = snprintf(buf, size_left, "\nCode RAM\n");
5009 	buf += n; size_left -= n;
5010 
5011 	addr = 0x20000;
5012 	endaddr = 0x22000;
5013 	words_to_read = 0;
5014 	while (addr < endaddr) {
5015 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5016 		if ((words_to_read + addr) > endaddr) {
5017 			words_to_read = endaddr - addr;
5018 		}
5019 		if (qlt_read_risc_ram(qlt, addr, words_to_read) !=
5020 		    QLT_SUCCESS) {
5021 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: Error "
5022 			    "reading risc ram - CODE RAM");
5023 			goto dump_fail;
5024 		}
5025 
5026 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5027 		buf += n; size_left -= n;
5028 
5029 		if (size_left < 100000) {
5030 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: run "
5031 			    "out of space - CODE RAM");
5032 			goto dump_ok;
5033 		}
5034 		addr += words_to_read;
5035 	}
5036 
5037 	n = snprintf(buf, size_left, "\nExternal Memory\n");
5038 	buf += n; size_left -= n;
5039 
5040 	addr = 0x100000;
5041 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5042 	endaddr++;
5043 	if (endaddr & 7) {
5044 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
5045 	}
5046 
5047 	words_to_read = 0;
5048 	while (addr < endaddr) {
5049 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5050 		if ((words_to_read + addr) > endaddr) {
5051 			words_to_read = endaddr - addr;
5052 		}
5053 		if (qlt_read_risc_ram(qlt, addr, words_to_read) !=
5054 		    QLT_SUCCESS) {
5055 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: Error "
5056 			    "reading risc ram - EXT RAM");
5057 			goto dump_fail;
5058 		}
5059 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5060 		buf += n; size_left -= n;
5061 		if (size_left < 100000) {
5062 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: run "
5063 			    "out of space - EXT RAM");
5064 			goto dump_ok;
5065 		}
5066 		addr += words_to_read;
5067 	}
5068 
5069 	/*
5070 	 * Label the end tag
5071 	 */
5072 	n = snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5073 	buf += n; size_left -= n;
5074 
5075 	/*
5076 	 * Queue dumping
5077 	 */
5078 	n = snprintf(buf, size_left, "\nRequest Queue\n");
5079 	buf += n; size_left -= n;
5080 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5081 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
5082 	buf += n; size_left -= n;
5083 
5084 	n = snprintf(buf, size_left, "\nPriority Queue\n");
5085 	buf += n; size_left -= n;
5086 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5087 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
5088 	buf += n; size_left -= n;
5089 
5090 	n = snprintf(buf, size_left, "\nResponse Queue\n");
5091 	buf += n; size_left -= n;
5092 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5093 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5094 	buf += n; size_left -= n;
5095 
5096 	n = snprintf(buf, size_left, "\nATIO queue\n");
5097 	buf += n; size_left -= n;
5098 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5099 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5100 	buf += n; size_left -= n;
5101 
5102 	/*
5103 	 * Lable dump reason
5104 	 */
5105 	n = snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5106 	    qlt->qlt_port_alias, ssci->st_additional_info);
5107 	buf += n; size_left -= n;
5108 
5109 dump_ok:
5110 	QLT_LOG(qlt->qlt_port_alias, "qlt_fireware_dump: left-%d", size_left);
5111 
5112 	mutex_enter(&qlt->qlt_ioctl_lock);
5113 	qlt->qlt_ioctl_flags &=
5114 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5115 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5116 	mutex_exit(&qlt->qlt_ioctl_lock);
5117 	return (FCT_SUCCESS);
5118 
5119 dump_fail:
5120 	mutex_enter(&qlt->qlt_ioctl_lock);
5121 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5122 	mutex_exit(&qlt->qlt_ioctl_lock);
5123 	return (FCT_FAILURE);
5124 }
5125 
5126 static int
5127 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5128     int size_left)
5129 {
5130 	int		i;
5131 	int		n;
5132 	char		c = ' ';
5133 
5134 	for (i = 0, n = 0; i < count; i++) {
5135 		if ((i + 1) & 7) {
5136 			c = ' ';
5137 		} else {
5138 			c = '\n';
5139 		}
5140 		n += snprintf(&buf[n], (size_left - n), "%08x%c",
5141 		    REG_RD32(qlt, startaddr + (i << 2)), c);
5142 	}
5143 	return (n);
5144 }
5145 
5146 static int
5147 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5148     caddr_t buf, int size_left)
5149 {
5150 	int		i;
5151 	int		n;
5152 	char		c = ' ';
5153 	uint32_t	*ptr;
5154 
5155 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5156 	for (i = 0, n = 0; i < words; i++) {
5157 		if ((i & 7) == 0) {
5158 			n += snprintf(&buf[n], (size_left - n), "%08x: ",
5159 			    addr + i);
5160 		}
5161 		if ((i + 1) & 7) {
5162 			c = ' ';
5163 		} else {
5164 			c = '\n';
5165 		}
5166 		n += snprintf(&buf[n], (size_left - n), "%08x%c", ptr[i], c);
5167 	}
5168 	return (n);
5169 }
5170 
5171 static int
5172 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5173     int size_left)
5174 {
5175 	int		i;
5176 	int		n;
5177 	char		c = ' ';
5178 	int		words;
5179 	uint16_t	*ptr;
5180 	uint16_t	w;
5181 
5182 	words = entries * 32;
5183 	ptr = (uint16_t *)qadr;
5184 	for (i = 0, n = 0; i < words; i++) {
5185 		if ((i & 7) == 0) {
5186 			n += snprintf(&buf[n], (size_left - n), "%05x: ", i);
5187 		}
5188 		if ((i + 1) & 7) {
5189 			c = ' ';
5190 		} else {
5191 			c = '\n';
5192 		}
5193 		w = QMEM_RD16(qlt, &ptr[i]);
5194 		n += snprintf(&buf[n], (size_left - n), "%04x%c", w, c);
5195 	}
5196 	return (n);
5197 }
5198 
5199 /*
5200  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5201  * mailbox ram is available.
5202  * Copy data from RISC RAM to system memory
5203  */
5204 static fct_status_t
5205 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5206 {
5207 	uint64_t	da;
5208 	fct_status_t	ret;
5209 
5210 	REG_WR16(qlt, REG_MBOX(0), 0xc);
5211 	da = qlt->queue_mem_cookie.dmac_laddress;
5212 	da += MBOX_DMA_MEM_OFFSET;
5213 
5214 	/*
5215 	 * System destination address
5216 	 */
5217 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
5218 	da >>= 16;
5219 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
5220 	da >>= 16;
5221 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
5222 	da >>= 16;
5223 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
5224 
5225 	/*
5226 	 * Length
5227 	 */
5228 	REG_WR16(qlt, REG_MBOX(5), words & 0xffff);
5229 	REG_WR16(qlt, REG_MBOX(4), ((words >> 16) & 0xffff));
5230 
5231 	/*
5232 	 * RISC source address
5233 	 */
5234 	REG_WR16(qlt, REG_MBOX(1), addr & 0xffff);
5235 	REG_WR16(qlt, REG_MBOX(8), ((addr >> 16) & 0xffff));
5236 
5237 	ret = qlt_raw_mailbox_command(qlt);
5238 	REG_WR32(qlt, REG_HCCR, 0xA0000000);
5239 	if (ret == QLT_SUCCESS) {
5240 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5241 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5242 	} else {
5243 		QLT_LOG(qlt->qlt_port_alias, "qlt_read_risc_ram: qlt raw_mbox "
5244 		    "failed 0x%llX", ret);
5245 	}
5246 	return (ret);
5247 }
5248