xref: /illumos-gate/usr/src/uts/common/io/comstar/port/qlt/qlt.c (revision fcf3ce441efd61da9bb2884968af01cb7c1452cc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/ddi.h>
28 #include <sys/stat.h>
29 #include <sys/pci.h>
30 #include <sys/sunddi.h>
31 #include <sys/modctl.h>
32 #include <sys/file.h>
33 #include <sys/cred.h>
34 #include <sys/byteorder.h>
35 #include <sys/atomic.h>
36 #include <sys/scsi/scsi.h>
37 
38 #include <stmf_defines.h>
39 #include <fct_defines.h>
40 #include <stmf.h>
41 #include <portif.h>
42 #include <fct.h>
43 #include <qlt.h>
44 #include <qlt_dma.h>
45 #include <qlt_ioctl.h>
46 #include <stmf_ioctl.h>
47 
48 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
49 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
50 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
51     int reset_only);
52 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
53     uint32_t word_count, uint32_t risc_addr);
54 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
55 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
56 					uint32_t dma_size);
57 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
58 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
59 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
60 static fct_status_t qlt_initialize_adapter(fct_local_port_t *port);
61 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
62     stmf_state_change_info_t *ssci);
63 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
64 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
65 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
66 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
67 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
68 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
69 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
70 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
71     uint8_t *rsp);
72 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
73 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
74 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
75 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
76     int reset_only);
77 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
78     uint32_t word_count, uint32_t risc_addr);
79 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
80 fct_status_t qlt_port_start(caddr_t arg);
81 fct_status_t qlt_port_stop(caddr_t arg);
82 fct_status_t qlt_port_online(qlt_state_t *qlt);
83 fct_status_t qlt_port_offline(qlt_state_t *qlt);
84 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
85     fct_link_info_t *li);
86 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
87 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
88 						fct_flogi_xchg_t *fx);
89 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
90 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
91 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
92     fct_remote_port_t *rp, fct_cmd_t *login);
93 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
94     fct_remote_port_t *rp);
95 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
96 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
97 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
98     fct_cmd_t *cmd, int terminate);
99 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
100 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
101 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
102     fct_cmd_t *cmd, uint32_t flags);
103 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
104 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
105 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
106 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
107 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
108 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
109 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
110     stmf_data_buf_t *dbuf, uint32_t ioflags);
111 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
112 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
113 static void qlt_release_intr(qlt_state_t *qlt);
114 static int qlt_setup_interrupts(qlt_state_t *qlt);
115 static void qlt_destroy_mutex(qlt_state_t *qlt);
116 
117 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
118     uint32_t words);
119 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
120     caddr_t buf, int size_left);
121 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
122     caddr_t buf, int size_left);
123 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
124     int count, int size_left);
125 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
126     cred_t *credp, int *rval);
127 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
128 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
129 
130 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] |= \
131 				    ((uint8_t)1) << ((els) & 7)
132 
133 int qlt_enable_msix = 0;
134 
135 /* Array to quickly calculate next free buf index to use */
136 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
137 
138 static struct cb_ops qlt_cb_ops = {
139 	qlt_open,
140 	qlt_close,
141 	nodev,
142 	nodev,
143 	nodev,
144 	nodev,
145 	nodev,
146 	qlt_ioctl,
147 	nodev,
148 	nodev,
149 	nodev,
150 	nochpoll,
151 	ddi_prop_op,
152 	0,
153 	D_MP | D_NEW
154 };
155 
156 static struct dev_ops qlt_ops = {
157 	DEVO_REV,
158 	0,
159 	nodev,
160 	nulldev,
161 	nulldev,
162 	qlt_attach,
163 	qlt_detach,
164 	nodev,
165 	&qlt_cb_ops,
166 	NULL,
167 	ddi_power
168 };
169 
170 #define	QLT_NAME    "COMSTAR QLT"
171 #define	QLT_VERSION "1.0"
172 
173 static struct modldrv modldrv = {
174 	&mod_driverops,
175 	QLT_NAME,
176 	&qlt_ops,
177 };
178 
179 static struct modlinkage modlinkage = {
180 	MODREV_1, &modldrv, NULL
181 };
182 
183 void *qlt_state = NULL;
184 kmutex_t qlt_global_lock;
185 static uint32_t qlt_loaded_counter = 0;
186 
187 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
188 			"-X Mode 1 133", "--Invalid--",
189 			"-X Mode 2 66", "-X Mode 2 100",
190 			"-X Mode 2 133", " 66" };
191 
192 /* Always use 64 bit DMA. */
193 static ddi_dma_attr_t qlt_queue_dma_attr = {
194 	DMA_ATTR_V0,		/* dma_attr_version */
195 	0,			/* low DMA address range */
196 	0xffffffffffffffff,	/* high DMA address range */
197 	0xffffffff,		/* DMA counter register */
198 	64,			/* DMA address alignment */
199 	0xff,			/* DMA burstsizes */
200 	1,			/* min effective DMA size */
201 	0xffffffff,		/* max DMA xfer size */
202 	0xffffffff,		/* segment boundary */
203 	1,			/* s/g list length */
204 	1,			/* granularity of device */
205 	0			/* DMA transfer flags */
206 };
207 
208 /* qlogic logging */
209 int enable_extended_logging = 0;
210 
211 static char qlt_provider_name[] = "qlt";
212 static struct stmf_port_provider *qlt_pp;
213 
214 int
215 _init(void)
216 {
217 	int ret;
218 
219 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
220 	if (ret == 0) {
221 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
222 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
223 			    STMF_STRUCT_PORT_PROVIDER, 0, 0);
224 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
225 		qlt_pp->pp_name = qlt_provider_name;
226 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
227 			stmf_free(qlt_pp);
228 			mutex_destroy(&qlt_global_lock);
229 			ddi_soft_state_fini(&qlt_state);
230 			return (EIO);
231 		}
232 		ret = mod_install(&modlinkage);
233 		if (ret != 0) {
234 			(void) stmf_deregister_port_provider(qlt_pp);
235 			stmf_free(qlt_pp);
236 			mutex_destroy(&qlt_global_lock);
237 			ddi_soft_state_fini(&qlt_state);
238 		}
239 	}
240 	return (ret);
241 }
242 
243 int
244 _fini(void)
245 {
246 	int ret;
247 
248 	if (qlt_loaded_counter)
249 		return (EBUSY);
250 	ret = mod_remove(&modlinkage);
251 	if (ret == 0) {
252 		(void) stmf_deregister_port_provider(qlt_pp);
253 		stmf_free(qlt_pp);
254 		mutex_destroy(&qlt_global_lock);
255 		ddi_soft_state_fini(&qlt_state);
256 	}
257 	return (ret);
258 }
259 
260 int
261 _info(struct modinfo *modinfop)
262 {
263 	return (mod_info(&modlinkage, modinfop));
264 }
265 
266 int
267 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
268 {
269 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
270 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
271 }
272 
273 static int
274 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
275 {
276 	int		instance;
277 	qlt_state_t	*qlt;
278 	ddi_device_acc_attr_t	dev_acc_attr;
279 	uint16_t	did;
280 	uint16_t	val;
281 	uint16_t	mr;
282 	size_t		discard;
283 	uint_t		ncookies;
284 	int		max_read_size;
285 	int		max_payload_size;
286 	fct_status_t	ret;
287 
288 	/* No support for suspend resume yet */
289 	if (cmd != DDI_ATTACH)
290 		return (DDI_FAILURE);
291 	instance = ddi_get_instance(dip);
292 
293 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
294 		return (DDI_FAILURE);
295 	}
296 
297 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance))
298 		== NULL) {
299 		goto attach_fail_1;
300 	}
301 	qlt->instance = instance;
302 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
303 	qlt->dip = dip;
304 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
305 		goto attach_fail_2;
306 	}
307 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
308 	if ((did != 0x2422) && (did != 0x2432) &&
309 	    (did != 0x2522) && (did != 0x2532)) {
310 		cmn_err(CE_WARN, "qlt(%d): unknwon devid(%x), failing attach",
311 		    instance, did);
312 		goto attach_fail_4;
313 	}
314 	if ((did & 0xFF00) == 0x2500)
315 		qlt->qlt_25xx_chip = 1;
316 
317 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
318 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
319 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
320 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
321 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
322 		goto attach_fail_4;
323 	}
324 	if (did == 0x2422) {
325 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
326 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
327 		pci_bits >>= 8;
328 		pci_bits &= 0xf;
329 		if ((pci_bits == 3) || (pci_bits == 7)) {
330 			cmn_err(CE_NOTE,
331 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
332 			    instance, pci_speeds[pci_bits], pci_bits);
333 		} else {
334 			cmn_err(CE_WARN,
335 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
336 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
337 			    "(Invalid)", ((pci_bits == 0) ||
338 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
339 			    "32 bit slot ") : "", pci_bits);
340 		}
341 	}
342 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
343 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
344 		    (unsigned long long)ret);
345 		goto attach_fail_5;
346 	}
347 
348 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
349 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
350 		goto attach_fail_5;
351 	}
352 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
353 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
354 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
355 	    DDI_SUCCESS) {
356 		goto attach_fail_6;
357 	}
358 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
359 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
360 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
361 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
362 		goto attach_fail_7;
363 	}
364 	if (ncookies != 1)
365 		goto attach_fail_8;
366 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
367 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
368 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
369 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
370 
371 	/* mutex are inited in this function */
372 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
373 		goto attach_fail_8;
374 
375 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
376 				"qlt%d", instance);
377 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
378 	    "%s,0", qlt->qlt_minor_name);
379 
380 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
381 				instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
382 		goto attach_fail_9;
383 	}
384 
385 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
386 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
387 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
388 
389 	/* Setup PCI cfg space registers */
390 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
391 	if (max_read_size == 11)
392 		goto over_max_read_xfer_setting;
393 	if (did == 0x2422) {
394 		if (max_read_size == 512)
395 			val = 0;
396 		else if (max_read_size == 1024)
397 			val = 1;
398 		else if (max_read_size == 2048)
399 			val = 2;
400 		else if (max_read_size == 4096)
401 			val = 3;
402 		else {
403 			cmn_err(CE_WARN, "qlt(%d) malformed "
404 			    "pci-max-read-request in qlt.conf. Valid values "
405 			    "for this HBA are 512/1024/2048/4096", instance);
406 			goto over_max_read_xfer_setting;
407 		}
408 		mr = PCICFG_RD16(qlt, 0x4E);
409 		mr &= 0xfff3;
410 		mr |= (val << 2);
411 		PCICFG_WR16(qlt, 0x4E, mr);
412 	} else if ((did == 0x2432) || (did == 0x2532)) {
413 		if (max_read_size == 128)
414 			val = 0;
415 		else if (max_read_size == 256)
416 			val = 1;
417 		else if (max_read_size == 512)
418 			val = 2;
419 		else if (max_read_size == 1024)
420 			val = 3;
421 		else if (max_read_size == 2048)
422 			val = 4;
423 		else if (max_read_size == 4096)
424 			val = 5;
425 		else {
426 			cmn_err(CE_WARN, "qlt(%d) malformed "
427 			    "pci-max-read-request in qlt.conf. Valid values "
428 			    "for this HBA are 128/256/512/1024/2048/4096",
429 				instance);
430 			goto over_max_read_xfer_setting;
431 		}
432 		mr = PCICFG_RD16(qlt, 0x54);
433 		mr &= 0x8fff;
434 		mr |= (val << 12);
435 		PCICFG_WR16(qlt, 0x54, mr);
436 	} else {
437 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
438 		    "pci-max-read-request for this device (%x)",
439 		    instance, did);
440 	}
441 over_max_read_xfer_setting:;
442 
443 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
444 	if (max_payload_size == 11)
445 		goto over_max_payload_setting;
446 	if ((did == 0x2432) || (did == 0x2532)) {
447 		if (max_payload_size == 128)
448 			val = 0;
449 		else if (max_payload_size == 256)
450 			val = 1;
451 		else if (max_payload_size == 512)
452 			val = 2;
453 		else if (max_payload_size == 1024)
454 			val = 3;
455 		else {
456 			cmn_err(CE_WARN, "qlt(%d) malformed "
457 			    "pcie-max-payload-size in qlt.conf. Valid values "
458 			    "for this HBA are 128/256/512/1024",
459 				instance);
460 			goto over_max_payload_setting;
461 		}
462 		mr = PCICFG_RD16(qlt, 0x54);
463 		mr &= 0xff1f;
464 		mr |= (val << 5);
465 		PCICFG_WR16(qlt, 0x54, mr);
466 	} else {
467 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
468 		    "pcie-max-payload-size for this device (%x)",
469 		    instance, did);
470 	}
471 
472 over_max_payload_setting:;
473 
474 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
475 		goto attach_fail_10;
476 
477 	ddi_report_dev(dip);
478 	return (DDI_SUCCESS);
479 
480 attach_fail_10:;
481 	mutex_destroy(&qlt->qlt_ioctl_lock);
482 	cv_destroy(&qlt->mbox_cv);
483 	cv_destroy(&qlt->rp_dereg_cv);
484 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
485 attach_fail_9:;
486 	qlt_destroy_mutex(qlt);
487 	qlt_release_intr(qlt);
488 attach_fail_8:;
489 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
490 attach_fail_7:;
491 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
492 attach_fail_6:;
493 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
494 attach_fail_5:;
495 	ddi_regs_map_free(&qlt->regs_acc_handle);
496 attach_fail_4:;
497 	pci_config_teardown(&qlt->pcicfg_acc_handle);
498 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
499 attach_fail_2:;
500 attach_fail_1:;
501 	ddi_soft_state_free(qlt_state, instance);
502 	return (DDI_FAILURE);
503 }
504 
505 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
506 
507 /* ARGSUSED */
508 static int
509 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
510 {
511 	qlt_state_t *qlt;
512 
513 	int instance;
514 
515 	instance = ddi_get_instance(dip);
516 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance))
517 					== NULL) {
518 		return (DDI_FAILURE);
519 	}
520 
521 	if (qlt->fw_code01) {
522 		return (DDI_FAILURE);
523 	}
524 
525 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
526 				qlt->qlt_state_not_acked) {
527 		return (DDI_FAILURE);
528 	}
529 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS)
530 		return (DDI_FAILURE);
531 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
532 	qlt_destroy_mutex(qlt);
533 	qlt_release_intr(qlt);
534 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
535 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
536 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
537 	ddi_regs_map_free(&qlt->regs_acc_handle);
538 	pci_config_teardown(&qlt->pcicfg_acc_handle);
539 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
540 	cv_destroy(&qlt->mbox_cv);
541 	cv_destroy(&qlt->rp_dereg_cv);
542 	ddi_soft_state_free(qlt_state, instance);
543 
544 	return (DDI_SUCCESS);
545 }
546 
547 static void
548 qlt_enable_intr(qlt_state_t *qlt)
549 {
550 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
551 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
552 	} else {
553 		int i;
554 		for (i = 0; i < qlt->intr_cnt; i++)
555 			(void) ddi_intr_enable(qlt->htable[i]);
556 	}
557 }
558 
559 static void
560 qlt_disable_intr(qlt_state_t *qlt)
561 {
562 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
563 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
564 	} else {
565 		int i;
566 		for (i = 0; i < qlt->intr_cnt; i++)
567 			(void) ddi_intr_disable(qlt->htable[i]);
568 	}
569 }
570 
571 static void
572 qlt_release_intr(qlt_state_t *qlt)
573 {
574 	if (qlt->htable) {
575 		int i;
576 		for (i = 0; i < qlt->intr_cnt; i++) {
577 			(void) ddi_intr_remove_handler(qlt->htable[i]);
578 			(void) ddi_intr_free(qlt->htable[i]);
579 		}
580 		kmem_free(qlt->htable, qlt->intr_size);
581 	}
582 	qlt->htable = NULL;
583 	qlt->intr_pri = 0;
584 	qlt->intr_cnt = 0;
585 	qlt->intr_size = 0;
586 	qlt->intr_cap = 0;
587 }
588 
589 
590 static void
591 qlt_init_mutex(qlt_state_t *qlt)
592 {
593 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
594 	    INT2PTR(qlt->intr_pri, void *));
595 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
596 	    INT2PTR(qlt->intr_pri, void *));
597 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
598 	    INT2PTR(qlt->intr_pri, void *));
599 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
600 	    INT2PTR(qlt->intr_pri, void *));
601 }
602 
603 static void
604 qlt_destroy_mutex(qlt_state_t *qlt)
605 {
606 	mutex_destroy(&qlt->req_lock);
607 	mutex_destroy(&qlt->preq_lock);
608 	mutex_destroy(&qlt->mbox_lock);
609 	mutex_destroy(&qlt->intr_lock);
610 }
611 
612 
613 static int
614 qlt_setup_msix(qlt_state_t *qlt)
615 {
616 	int count, avail, actual;
617 	int ret;
618 	int itype = DDI_INTR_TYPE_MSIX;
619 	int i;
620 
621 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
622 	if (ret != DDI_SUCCESS || count == 0) {
623 		return (DDI_FAILURE);
624 	}
625 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
626 	if (ret != DDI_SUCCESS || avail == 0) {
627 		return (DDI_FAILURE);
628 	}
629 	if (avail < count) {
630 		stmf_trace(qlt->qlt_port_alias,
631 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
632 	}
633 
634 	qlt->intr_size = count * sizeof (ddi_intr_handle_t);
635 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
636 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
637 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
638 	/* we need at least 2 interrupt vectors */
639 	if (ret != DDI_SUCCESS || actual < 2) {
640 		ret = DDI_FAILURE;
641 		goto release_intr;
642 	}
643 	if (actual < count) {
644 		QLT_LOG(qlt->qlt_port_alias, "qlt_setup_msix: "
645 		    "requested: %d, received: %d\n",
646 		    count, actual);
647 	}
648 
649 	qlt->intr_cnt = actual;
650 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
651 	if (ret != DDI_SUCCESS) {
652 		ret = DDI_FAILURE;
653 		goto release_intr;
654 	}
655 	qlt_init_mutex(qlt);
656 	for (i = 0; i < actual; i++) {
657 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
658 		    qlt, INT2PTR(i, void *));
659 		if (ret != DDI_SUCCESS)
660 			goto release_mutex;
661 	}
662 
663 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
664 	qlt->intr_flags |= QLT_INTR_MSIX;
665 	return (DDI_SUCCESS);
666 
667 release_mutex:
668 	qlt_destroy_mutex(qlt);
669 release_intr:
670 	for (i = 0; i < actual; i++)
671 		(void) ddi_intr_free(qlt->htable[i]);
672 free_mem:
673 	kmem_free(qlt->htable, qlt->intr_size);
674 	qlt->htable = NULL;
675 	qlt_release_intr(qlt);
676 	return (ret);
677 }
678 
679 
680 static int
681 qlt_setup_msi(qlt_state_t *qlt)
682 {
683 	int count, avail, actual;
684 	int itype = DDI_INTR_TYPE_MSI;
685 	int ret;
686 	int i;
687 
688 	/* get the # of interrupts */
689 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
690 	if (ret != DDI_SUCCESS || count == 0) {
691 		return (DDI_FAILURE);
692 	}
693 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
694 	if (ret != DDI_SUCCESS || avail == 0) {
695 		return (DDI_FAILURE);
696 	}
697 	if (avail < count) {
698 		QLT_LOG(qlt->qlt_port_alias,
699 		    "qlt_setup_msi: nintrs=%d, avail=%d", count, avail);
700 	}
701 	/* MSI requires only 1 interrupt. */
702 	count = 1;
703 
704 	/* allocate interrupt */
705 	qlt->intr_size = count * sizeof (ddi_intr_handle_t);
706 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
707 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
708 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
709 	if (ret != DDI_SUCCESS || actual == 0) {
710 		ret = DDI_FAILURE;
711 		goto free_mem;
712 	}
713 	if (actual < count) {
714 		QLT_LOG(qlt->qlt_port_alias, "qlt_setup_msi: "
715 		    "requested: %d, received:%d",
716 		    count, actual);
717 	}
718 	qlt->intr_cnt = actual;
719 
720 	/*
721 	 * Get priority for first msi, assume remaining are all the same.
722 	 */
723 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
724 	if (ret != DDI_SUCCESS) {
725 		ret = DDI_FAILURE;
726 		goto release_intr;
727 	}
728 	qlt_init_mutex(qlt);
729 
730 	/* add handler */
731 	for (i = 0; i < actual; i++) {
732 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
733 		    qlt, INT2PTR(i, void *));
734 		if (ret != DDI_SUCCESS)
735 			goto release_mutex;
736 	}
737 
738 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
739 	qlt->intr_flags |= QLT_INTR_MSI;
740 	return (DDI_SUCCESS);
741 
742 release_mutex:
743 	qlt_destroy_mutex(qlt);
744 release_intr:
745 	for (i = 0; i < actual; i++)
746 		(void) ddi_intr_free(qlt->htable[i]);
747 free_mem:
748 	kmem_free(qlt->htable, qlt->intr_size);
749 	qlt->htable = NULL;
750 	qlt_release_intr(qlt);
751 	return (ret);
752 }
753 
754 static int
755 qlt_setup_fixed(qlt_state_t *qlt)
756 {
757 	int count;
758 	int actual;
759 	int ret;
760 	int itype = DDI_INTR_TYPE_FIXED;
761 
762 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
763 	/* Fixed interrupts can only have one interrupt. */
764 	if (ret != DDI_SUCCESS || count != 1) {
765 		return (DDI_FAILURE);
766 	}
767 
768 	qlt->intr_size = sizeof (ddi_intr_handle_t);
769 	qlt->htable = kmem_zalloc(qlt->intr_size, KM_SLEEP);
770 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
771 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
772 	if (ret != DDI_SUCCESS || actual != 1) {
773 		ret = DDI_FAILURE;
774 		goto free_mem;
775 	}
776 
777 	qlt->intr_cnt = actual;
778 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
779 	if (ret != DDI_SUCCESS) {
780 		ret = DDI_FAILURE;
781 		goto release_intr;
782 	}
783 	qlt_init_mutex(qlt);
784 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
785 	if (ret != DDI_SUCCESS)
786 		goto release_mutex;
787 
788 	qlt->intr_flags |= QLT_INTR_FIXED;
789 	return (DDI_SUCCESS);
790 
791 release_mutex:
792 	qlt_destroy_mutex(qlt);
793 release_intr:
794 	(void) ddi_intr_free(qlt->htable[0]);
795 free_mem:
796 	kmem_free(qlt->htable, qlt->intr_size);
797 	qlt->htable = NULL;
798 	qlt_release_intr(qlt);
799 	return (ret);
800 }
801 
802 
803 static int
804 qlt_setup_interrupts(qlt_state_t *qlt)
805 {
806 #if defined(__sparc)
807 	int itypes = 0;
808 #endif
809 
810 /*
811  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198). So use
812  * MSI for sparc only for now.
813  */
814 #if defined(__sparc)
815 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
816 		itypes = DDI_INTR_TYPE_FIXED;
817 	}
818 
819 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
820 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
821 			return (DDI_SUCCESS);
822 	}
823 	if (itypes & DDI_INTR_TYPE_MSI) {
824 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
825 			return (DDI_SUCCESS);
826 	}
827 #endif
828 	return (qlt_setup_fixed(qlt));
829 }
830 
831 /*
832  * Filling the hba attributes
833  */
834 void
835 qlt_populate_hba_fru_details(struct fct_local_port *port,
836     struct fct_port_attrs *port_attrs)
837 {
838 	caddr_t	bufp;
839 	int len;
840 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
841 
842 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
843 	    "QLogic Corp.");
844 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
845 	    "%s", QLT_NAME);
846 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
847 	    "%s", QLT_VERSION);
848 	port_attrs->serial_number[0] = '\0';
849 	port_attrs->hardware_version[0] = '\0';
850 
851 	(void) snprintf(port_attrs->firmware_version,
852 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
853 	    qlt->fw_minor, qlt->fw_subminor);
854 
855 	/* Get FCode version */
856 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
857 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
858 	    (int *)&len) == DDI_PROP_SUCCESS) {
859 		(void) snprintf(port_attrs->option_rom_version,
860 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
861 		kmem_free(bufp, len);
862 		bufp = NULL;
863 	} else {
864 		(void) snprintf(port_attrs->option_rom_version,
865 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s",
866 #ifdef __sparc
867 		    "No Fcode found");
868 #else
869 		    "N/A");
870 #endif
871 	}
872 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
873 	    qlt->nvram->subsystem_vendor_id[1] << 8;
874 
875 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
876 	    qlt->nvram->max_frame_length[0];
877 
878 	port_attrs->supported_cos = 0x10000000;
879 	port_attrs->supported_speed = PORT_SPEED_1G |
880 	    PORT_SPEED_2G | PORT_SPEED_4G;
881 	if (qlt->qlt_25xx_chip)
882 		port_attrs->supported_speed |= PORT_SPEED_8G;
883 
884 	(void) snprintf(port_attrs->model, FCHBA_MODEL_LEN, "%s",
885 	    qlt->nvram->model_name);
886 	(void) snprintf(port_attrs->model_description,
887 	    FCHBA_MODEL_DESCRIPTION_LEN, "%s", qlt->nvram->model_name);
888 }
889 
890 fct_status_t
891 qlt_port_start(caddr_t arg)
892 {
893 	qlt_state_t *qlt = (qlt_state_t *)arg;
894 	fct_local_port_t *port;
895 	fct_dbuf_store_t *fds;
896 
897 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
898 		return (FCT_FAILURE);
899 	}
900 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
901 	if (port == NULL) {
902 		goto qlt_pstart_fail_1;
903 	}
904 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
905 	if (fds == NULL) {
906 		goto qlt_pstart_fail_2;
907 	}
908 	qlt->qlt_port = port;
909 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
910 	fds->fds_free_data_buf = qlt_dmem_free;
911 	fds->fds_fca_private = (void *)qlt;
912 	/*
913 	 * Since we keep everything in the state struct and dont allocate any
914 	 * port private area, just use that pointer to point to the
915 	 * state struct.
916 	 */
917 	port->port_fca_private = qlt;
918 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
919 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
920 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
921 	port->port_default_alias = qlt->qlt_port_alias;
922 	port->port_pp = qlt_pp;
923 	port->port_fds = fds;
924 	port->port_max_logins = QLT_MAX_LOGINS;
925 	port->port_max_xchges = QLT_MAX_XCHGES;
926 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
927 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
928 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
929 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
930 	port->port_get_link_info = qlt_get_link_info;
931 	port->port_register_remote_port = qlt_register_remote_port;
932 	port->port_deregister_remote_port = qlt_deregister_remote_port;
933 	port->port_send_cmd = qlt_send_cmd;
934 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
935 	port->port_send_cmd_response = qlt_send_cmd_response;
936 	port->port_abort_cmd = qlt_abort_cmd;
937 	port->port_ctl = qlt_ctl;
938 	port->port_flogi_xchg = qlt_do_flogi;
939 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
940 
941 	if (fct_register_local_port(port) != FCT_SUCCESS) {
942 		goto qlt_pstart_fail_2_5;
943 	}
944 
945 	return (QLT_SUCCESS);
946 
947 qlt_pstart_fail_3:
948 	(void) fct_deregister_local_port(port);
949 qlt_pstart_fail_2_5:
950 	fct_free(fds);
951 qlt_pstart_fail_2:
952 	fct_free(port);
953 	qlt->qlt_port = NULL;
954 qlt_pstart_fail_1:
955 	qlt_dmem_fini(qlt);
956 	return (QLT_FAILURE);
957 }
958 
959 fct_status_t
960 qlt_port_stop(caddr_t arg)
961 {
962 	qlt_state_t *qlt = (qlt_state_t *)arg;
963 
964 	if (fct_deregister_local_port(qlt->qlt_port) != FCT_SUCCESS)
965 		return (QLT_FAILURE);
966 	fct_free(qlt->qlt_port->port_fds);
967 	fct_free(qlt->qlt_port);
968 	qlt->qlt_port = NULL;
969 	qlt_dmem_fini(qlt);
970 	return (QLT_SUCCESS);
971 }
972 
973 /*
974  * Called by framework to init the HBA.
975  * Can be called in the middle of I/O. (Why ??)
976  * Should make sure sane state both before and after the initialization
977  */
978 fct_status_t
979 qlt_port_online(qlt_state_t *qlt)
980 {
981 	uint64_t	da;
982 	int		instance;
983 	fct_status_t	ret;
984 	uint16_t	rcount;
985 	caddr_t		icb;
986 	mbox_cmd_t	*mcp;
987 	uint8_t		*elsbmp;
988 
989 	instance = ddi_get_instance(qlt->dip);
990 
991 	/* XXX Make sure a sane state */
992 
993 	if ((ret = qlt_reset_chip_and_download_fw(qlt, 0)) != QLT_SUCCESS) {
994 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
995 		return (ret);
996 	}
997 
998 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
999 
1000 	/* Get resource count */
1001 	REG_WR16(qlt, REG_MBOX(0), 0x42);
1002 	ret = qlt_raw_mailbox_command(qlt);
1003 	rcount = REG_RD16(qlt, REG_MBOX(3));
1004 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1005 	if (ret != QLT_SUCCESS)
1006 		return (ret);
1007 
1008 	/* Enable PUREX */
1009 	REG_WR16(qlt, REG_MBOX(0), 0x38);
1010 	REG_WR16(qlt, REG_MBOX(1), 0x0400);
1011 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1012 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1013 	ret = qlt_raw_mailbox_command(qlt);
1014 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1015 	if (ret != QLT_SUCCESS) {
1016 		cmn_err(CE_NOTE, "Enable PUREX failed");
1017 		return (ret);
1018 	}
1019 
1020 	/* Pass ELS bitmap to fw */
1021 	REG_WR16(qlt, REG_MBOX(0), 0x59);
1022 	REG_WR16(qlt, REG_MBOX(1), 0x0500);
1023 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1024 	bzero(elsbmp, 32);
1025 	da = qlt->queue_mem_cookie.dmac_laddress;
1026 	da += MBOX_DMA_MEM_OFFSET;
1027 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
1028 	da >>= 16;
1029 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
1030 	da >>= 16;
1031 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
1032 	da >>= 16;
1033 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
1034 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1035 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1036 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1037 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1038 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1039 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1040 	SETELSBIT(elsbmp, ELS_OP_SCN);
1041 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1042 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1043 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1044 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1045 	SETELSBIT(elsbmp, ELS_OP_RNID);
1046 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1047 		DDI_DMA_SYNC_FORDEV);
1048 	ret = qlt_raw_mailbox_command(qlt);
1049 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1050 	if (ret != QLT_SUCCESS) {
1051 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1052 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1053 		    elsbmp[1]);
1054 		return (ret);
1055 	}
1056 
1057 	/* Init queue pointers */
1058 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1059 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1060 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1061 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1062 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1063 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1064 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1065 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1066 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1067 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1068 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1069 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1070 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1071 
1072 	/*
1073 	 * XXX support for tunables. Also should we cache icb ?
1074 	 */
1075 	mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1076 	if (mcp == NULL) {
1077 		return (STMF_ALLOC_FAILURE);
1078 	}
1079 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1080 	bzero(icb, 0x80);
1081 	da = qlt->queue_mem_cookie.dmac_laddress;
1082 	DMEM_WR16(qlt, icb, 1);		/* Version */
1083 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1084 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1085 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1086 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1087 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1088 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1089 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1090 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1091 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1092 	DMEM_WR16(qlt, icb+0x28, 100);	/* ms of NOS/OLS for Link down */
1093 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1094 	DMEM_WR64(qlt, icb+0x2c, da+REQUEST_QUEUE_OFFSET);
1095 	DMEM_WR64(qlt, icb+0x34, da+RESPONSE_QUEUE_OFFSET);
1096 	DMEM_WR64(qlt, icb+0x3c, da+PRIORITY_QUEUE_OFFSET);
1097 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1098 	DMEM_WR64(qlt, icb+0x50, da+ATIO_QUEUE_OFFSET);
1099 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1100 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1101 	DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1102 				BIT_2 | BIT_1 | BIT_0);
1103 	DMEM_WR32(qlt, icb+0x60, BIT_5);
1104 	DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 | BIT_4);
1105 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1106 	mcp->to_fw[0] = 0x60;
1107 
1108 	/*
1109 	 * This is the 1st command adter adapter initialize which will
1110 	 * use interrupts and regular mailbox interface.
1111 	 */
1112 	qlt->mbox_io_state = MBOX_STATE_READY;
1113 	qlt_enable_intr(qlt);
1114 	qlt->qlt_intr_enabled = 1;
1115 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1116 	/* Issue mailbox to firmware */
1117 	ret = qlt_mailbox_command(qlt, mcp);
1118 	if (ret != QLT_SUCCESS) {
1119 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1120 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1121 	}
1122 
1123 	mcp->to_fw_mask = BIT_0;
1124 	mcp->from_fw_mask = BIT_0 | BIT_1;
1125 	mcp->to_fw[0] = 0x28;
1126 	ret = qlt_mailbox_command(qlt, mcp);
1127 	if (ret != QLT_SUCCESS) {
1128 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1129 		    (long long)ret);
1130 	}
1131 
1132 	qlt_free_mailbox_command(qlt, mcp);
1133 	if (ret != QLT_SUCCESS)
1134 		return (ret);
1135 	return (FCT_SUCCESS);
1136 }
1137 
1138 fct_status_t
1139 qlt_port_offline(qlt_state_t *qlt)
1140 {
1141 	int		retries;
1142 
1143 	mutex_enter(&qlt->mbox_lock);
1144 
1145 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1146 		mutex_exit(&qlt->mbox_lock);
1147 		goto poff_mbox_done;
1148 	}
1149 
1150 	/* Wait to grab the mailboxes */
1151 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1152 				retries++) {
1153 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1154 		if ((retries > 5) ||
1155 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1156 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1157 			mutex_exit(&qlt->mbox_lock);
1158 			goto poff_mbox_done;
1159 		}
1160 	}
1161 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1162 	mutex_exit(&qlt->mbox_lock);
1163 poff_mbox_done:;
1164 	qlt->intr_sneak_counter = 10;
1165 	qlt_disable_intr(qlt);
1166 	mutex_enter(&qlt->intr_lock);
1167 	qlt->qlt_intr_enabled = 0;
1168 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
1169 	drv_usecwait(20);
1170 	qlt->intr_sneak_counter = 0;
1171 	mutex_exit(&qlt->intr_lock);
1172 
1173 	return (FCT_SUCCESS);
1174 }
1175 
1176 static fct_status_t
1177 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1178 {
1179 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1180 	mbox_cmd_t *mcp;
1181 	fct_status_t fc_ret;
1182 	fct_status_t ret;
1183 	clock_t et;
1184 
1185 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1186 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1187 link_info_retry:
1188 	mcp->to_fw[0] = 0x20;
1189 	mcp->to_fw_mask |= BIT_0;
1190 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1191 	/* Issue mailbox to firmware */
1192 	ret = qlt_mailbox_command(qlt, mcp);
1193 	if (ret != QLT_SUCCESS) {
1194 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1195 			/* Firmware is not ready */
1196 			if (ddi_get_lbolt() < et) {
1197 				delay(drv_usectohz(50000));
1198 				goto link_info_retry;
1199 			}
1200 		}
1201 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1202 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1203 		fc_ret = FCT_FAILURE;
1204 	} else {
1205 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1206 			(((uint32_t)(mcp->from_fw[3])) << 16);
1207 
1208 		li->port_speed = qlt->link_speed;
1209 		switch (mcp->from_fw[6]) {
1210 		case 1:
1211 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1212 			li->port_fca_flogi_done = 1;
1213 			break;
1214 		case 0:
1215 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1216 			li->port_no_fct_flogi = 1;
1217 			break;
1218 		case 3:
1219 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1220 			li->port_fca_flogi_done = 1;
1221 			break;
1222 		case 2: /*FALLTHROUGH*/
1223 		case 4:
1224 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1225 			li->port_fca_flogi_done = 1;
1226 			break;
1227 		default:
1228 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1229 			QLT_LOG(qlt->qlt_port_alias, "Unknown link speed "
1230 			    "reported by fw %x", mcp->from_fw[6]);
1231 		}
1232 		qlt->cur_topology = li->port_topology;
1233 		fc_ret = FCT_SUCCESS;
1234 	}
1235 	qlt_free_mailbox_command(qlt, mcp);
1236 
1237 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1238 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1239 		mcp->to_fw[0] = 0x64;
1240 		mcp->to_fw[1] = 0x7FE;
1241 		mcp->to_fw[10] = 0;
1242 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_10;
1243 		fc_ret = qlt_mailbox_command(qlt, mcp);
1244 		if (fc_ret != QLT_SUCCESS) {
1245 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1246 			    "database for F_port failed, ret = %llx", fc_ret);
1247 		} else {
1248 			uint8_t *p;
1249 
1250 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1251 			p = mcp->dbuf->db_sglist[0].seg_addr;
1252 			bcopy(p + 0x18, li->port_rpwwn, 8);
1253 			bcopy(p + 0x20, li->port_rnwwn, 8);
1254 		}
1255 		qlt_free_mailbox_command(qlt, mcp);
1256 	}
1257 	return (fc_ret);
1258 }
1259 
1260 static int
1261 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1262 {
1263 	int		instance;
1264 	qlt_state_t	*qlt;
1265 
1266 	if (otype != OTYP_CHR) {
1267 		return (EINVAL);
1268 	}
1269 
1270 	/*
1271 	 * Since this is for debugging only, only allow root to issue ioctl now
1272 	 */
1273 	if (drv_priv(credp)) {
1274 		return (EPERM);
1275 	}
1276 
1277 	instance = (int)getminor(*devp);
1278 	qlt = ddi_get_soft_state(qlt_state, instance);
1279 	if (qlt == NULL) {
1280 		return (ENXIO);
1281 	}
1282 
1283 	mutex_enter(&qlt->qlt_ioctl_lock);
1284 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1285 		/*
1286 		 * It is already open for exclusive access.
1287 		 * So shut the door on this caller.
1288 		 */
1289 		mutex_exit(&qlt->qlt_ioctl_lock);
1290 		return (EBUSY);
1291 	}
1292 
1293 	if (flag & FEXCL) {
1294 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1295 			/*
1296 			 * Exclusive operation not possible
1297 			 * as it is already opened
1298 			 */
1299 			mutex_exit(&qlt->qlt_ioctl_lock);
1300 			return (EBUSY);
1301 		}
1302 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1303 	}
1304 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1305 	mutex_exit(&qlt->qlt_ioctl_lock);
1306 
1307 	return (0);
1308 }
1309 
1310 /* ARGSUSED */
1311 static int
1312 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1313 {
1314 	int		instance;
1315 	qlt_state_t	*qlt;
1316 
1317 	if (otype != OTYP_CHR) {
1318 		return (EINVAL);
1319 	}
1320 
1321 	instance = (int)getminor(dev);
1322 	qlt = ddi_get_soft_state(qlt_state, instance);
1323 	if (qlt == NULL) {
1324 		return (ENXIO);
1325 	}
1326 
1327 	mutex_enter(&qlt->qlt_ioctl_lock);
1328 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1329 		mutex_exit(&qlt->qlt_ioctl_lock);
1330 		return (ENODEV);
1331 	}
1332 
1333 	/*
1334 	 * It looks there's one hole here, maybe there could several concurrent
1335 	 * shareed open session, but we never check this case.
1336 	 * But it will not hurt too much, disregard it now.
1337 	 */
1338 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1339 	mutex_exit(&qlt->qlt_ioctl_lock);
1340 
1341 	return (0);
1342 }
1343 
1344 /*
1345  * All of these ioctls are unstable interfaces which are meant to be used
1346  * in a controlled lab env. No formal testing will be (or needs to be) done
1347  * for these ioctls. Specially note that running with an additional
1348  * uploaded firmware is not supported and is provided here for test
1349  * purposes only.
1350  */
1351 /* ARGSUSED */
1352 static int
1353 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1354     cred_t *credp, int *rval)
1355 {
1356 	qlt_state_t	*qlt;
1357 	int		ret = 0;
1358 #ifdef _LITTLE_ENDIAN
1359 	int		i;
1360 #endif
1361 	stmf_iocdata_t	*iocd;
1362 	void		*ibuf = NULL;
1363 	void		*obuf = NULL;
1364 	uint32_t	*intp;
1365 	qlt_fw_info_t	*fwi;
1366 	mbox_cmd_t	*mcp;
1367 	fct_status_t	st;
1368 	char		info[80];
1369 
1370 	if (drv_priv(credp) != 0)
1371 		return (EPERM);
1372 
1373 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1374 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1375 	if (ret)
1376 		return (ret);
1377 	iocd->stmf_error = 0;
1378 
1379 	switch (cmd) {
1380 	case QLT_IOCTL_FETCH_FWDUMP:
1381 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1382 			ret = EINVAL;
1383 			break;
1384 		}
1385 		mutex_enter(&qlt->qlt_ioctl_lock);
1386 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1387 			mutex_exit(&qlt->qlt_ioctl_lock);
1388 			ret = ENODATA;
1389 			iocd->stmf_error = QLTIO_NO_DUMP;
1390 			break;
1391 		}
1392 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1393 			mutex_exit(&qlt->qlt_ioctl_lock);
1394 			ret = EBUSY;
1395 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1396 			break;
1397 		}
1398 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1399 			mutex_exit(&qlt->qlt_ioctl_lock);
1400 			ret = EEXIST;
1401 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1402 			break;
1403 		}
1404 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1405 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1406 		mutex_exit(&qlt->qlt_ioctl_lock);
1407 
1408 		break;
1409 
1410 	case QLT_IOCTL_TRIGGER_FWDUMP:
1411 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1412 			ret = EACCES;
1413 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1414 			break;
1415 		}
1416 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1417 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1418 		info[79] = 0;
1419 		if (fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_USER_REQUEST |
1420 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP,
1421 		    info) != FCT_SUCCESS) {
1422 			ret = EIO;
1423 		}
1424 		break;
1425 	case QLT_IOCTL_UPLOAD_FW:
1426 		if ((iocd->stmf_ibuf_size < 1024) ||
1427 		    (iocd->stmf_ibuf_size & 3)) {
1428 			ret = EINVAL;
1429 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1430 			break;
1431 		}
1432 		intp = (uint32_t *)ibuf;
1433 #ifdef _LITTLE_ENDIAN
1434 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1435 			intp[i] = BSWAP_32(intp[i]);
1436 		}
1437 #endif
1438 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1439 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1440 		    iocd->stmf_ibuf_size)) {
1441 			ret = EINVAL;
1442 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1443 			break;
1444 		}
1445 		if ((qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1446 		    (!qlt->qlt_25xx_chip && ((intp[8] & 3) == 0))) {
1447 			ret = EACCES;
1448 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1449 			break;
1450 		}
1451 
1452 		/* Everything looks ok, lets copy this firmware */
1453 		if (qlt->fw_code01) {
1454 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1455 			    qlt->fw_length02) << 2);
1456 			qlt->fw_code01 = NULL;
1457 		} else {
1458 			atomic_add_32(&qlt_loaded_counter, 1);
1459 		}
1460 		qlt->fw_length01 = intp[3];
1461 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1462 								KM_SLEEP);
1463 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1464 		qlt->fw_addr01 = intp[2];
1465 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1466 		qlt->fw_addr02 = qlt->fw_code02[2];
1467 		qlt->fw_length02 = qlt->fw_code02[3];
1468 		break;
1469 
1470 	case QLT_IOCTL_CLEAR_FW:
1471 		if (qlt->fw_code01) {
1472 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1473 			    qlt->fw_length02) << 2);
1474 			qlt->fw_code01 = NULL;
1475 			atomic_add_32(&qlt_loaded_counter, -1);
1476 		}
1477 		break;
1478 
1479 	case QLT_IOCTL_GET_FW_INFO:
1480 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1481 			ret = EINVAL;
1482 			break;
1483 		}
1484 		fwi = (qlt_fw_info_t *)obuf;
1485 		if (qlt->qlt_stay_offline) {
1486 			fwi->fwi_stay_offline = 1;
1487 		}
1488 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1489 			fwi->fwi_port_active = 1;
1490 		}
1491 		fwi->fwi_active_major = qlt->fw_major;
1492 		fwi->fwi_active_minor = qlt->fw_minor;
1493 		fwi->fwi_active_subminor = qlt->fw_subminor;
1494 		fwi->fwi_active_attr = qlt->fw_attr;
1495 		if (qlt->fw_code01) {
1496 			fwi->fwi_fw_uploaded = 1;
1497 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1498 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1499 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1500 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1501 		}
1502 		if (qlt->qlt_25xx_chip) {
1503 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1504 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1505 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1506 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1507 		} else {
1508 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1509 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1510 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1511 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1512 		}
1513 		break;
1514 
1515 	case QLT_IOCTL_STAY_OFFLINE:
1516 		if (!iocd->stmf_ibuf_size) {
1517 			ret = EINVAL;
1518 			break;
1519 		}
1520 		if (*((char *)ibuf)) {
1521 			qlt->qlt_stay_offline = 1;
1522 		} else {
1523 			qlt->qlt_stay_offline = 0;
1524 		}
1525 		break;
1526 
1527 	case QLT_IOCTL_MBOX:
1528 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1529 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1530 			ret = EINVAL;
1531 			break;
1532 		}
1533 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1534 		if (mcp == NULL) {
1535 			ret = ENOMEM;
1536 			break;
1537 		}
1538 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1539 		st = qlt_mailbox_command(qlt, mcp);
1540 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1541 		qlt_free_mailbox_command(qlt, mcp);
1542 		if (st != QLT_SUCCESS) {
1543 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1544 				st = QLT_SUCCESS;
1545 		}
1546 		if (st != QLT_SUCCESS) {
1547 			ret = EIO;
1548 			switch (st) {
1549 			case QLT_MBOX_NOT_INITIALIZED:
1550 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1551 				break;
1552 			case QLT_MBOX_BUSY:
1553 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1554 				break;
1555 			case QLT_MBOX_TIMEOUT:
1556 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1557 				break;
1558 			case QLT_MBOX_ABORTED:
1559 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1560 				break;
1561 			}
1562 		}
1563 		break;
1564 
1565 	default:
1566 		QLT_LOG(qlt->qlt_port_alias, "qlt_ioctl: ioctl-0x%02X", cmd);
1567 		ret = ENOTTY;
1568 	}
1569 
1570 	if (ret == 0) {
1571 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1572 	} else if (iocd->stmf_error) {
1573 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1574 	}
1575 	if (obuf) {
1576 		kmem_free(obuf, iocd->stmf_obuf_size);
1577 		obuf = NULL;
1578 	}
1579 	if (ibuf) {
1580 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1581 		ibuf = NULL;
1582 	}
1583 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1584 	return (ret);
1585 }
1586 
1587 static void
1588 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1589 {
1590 	stmf_change_status_t		st;
1591 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1592 	qlt_state_t			*qlt;
1593 
1594 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1595 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1596 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1597 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1598 
1599 	qlt = (qlt_state_t *)port->port_fca_private;
1600 	st.st_completion_status = FCT_SUCCESS;
1601 	st.st_additional_info = NULL;
1602 
1603 	switch (cmd) {
1604 	case FCT_CMD_PORT_ONLINE:
1605 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1606 			st.st_completion_status = STMF_ALREADY;
1607 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1608 			st.st_completion_status = FCT_FAILURE;
1609 		if (st.st_completion_status == FCT_SUCCESS) {
1610 			qlt->qlt_state = FCT_STATE_ONLINING;
1611 			qlt->qlt_state_not_acked = 1;
1612 			st.st_completion_status = qlt_port_online(qlt);
1613 			if (st.st_completion_status != STMF_SUCCESS) {
1614 				qlt->qlt_state = FCT_STATE_OFFLINE;
1615 				qlt->qlt_state_not_acked = 0;
1616 			} else {
1617 				qlt->qlt_state = FCT_STATE_ONLINE;
1618 			}
1619 		}
1620 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
1621 		qlt->qlt_change_state_flags = 0;
1622 		break;
1623 
1624 	case FCT_CMD_PORT_OFFLINE:
1625 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
1626 			st.st_completion_status = STMF_ALREADY;
1627 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
1628 			st.st_completion_status = FCT_FAILURE;
1629 		}
1630 		if (st.st_completion_status == FCT_SUCCESS) {
1631 			qlt->qlt_state = FCT_STATE_OFFLINING;
1632 			qlt->qlt_state_not_acked = 1;
1633 
1634 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
1635 				(void) qlt_firmware_dump(port, ssci);
1636 			}
1637 			qlt->qlt_change_state_flags = ssci->st_rflags;
1638 			st.st_completion_status = qlt_port_offline(qlt);
1639 			if (st.st_completion_status != STMF_SUCCESS) {
1640 				qlt->qlt_state = FCT_STATE_ONLINE;
1641 				qlt->qlt_state_not_acked = 0;
1642 			} else {
1643 				qlt->qlt_state = FCT_STATE_OFFLINE;
1644 			}
1645 		}
1646 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
1647 		break;
1648 
1649 	case FCT_ACK_PORT_ONLINE_COMPLETE:
1650 		qlt->qlt_state_not_acked = 0;
1651 		break;
1652 
1653 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
1654 		qlt->qlt_state_not_acked = 0;
1655 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
1656 		    (qlt->qlt_stay_offline == 0)) {
1657 			if (fct_port_initialize(port,
1658 			    qlt->qlt_change_state_flags,
1659 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
1660 			    "with RLFLAG_RESET") != FCT_SUCCESS) {
1661 				cmn_err(CE_WARN, "qlt_ctl: "
1662 				    "fct_port_initialize failed, please use "
1663 				    "stmfstate to start the port-%s manualy",
1664 				    qlt->qlt_port_alias);
1665 			}
1666 		}
1667 		break;
1668 	}
1669 }
1670 
1671 /* ARGSUSED */
1672 static fct_status_t
1673 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
1674 {
1675 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
1676 	return (FCT_FAILURE);
1677 }
1678 
1679 /*
1680  * Return a pointer to n entries in the request queue. Assumes that
1681  * request queue lock is held. Does a very short busy wait if
1682  * less/zero entries are available. Retuns NULL if it still cannot
1683  * fullfill the request.
1684  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
1685  */
1686 caddr_t
1687 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
1688 {
1689 	int try = 0;
1690 
1691 	while (qlt->req_available < n) {
1692 		uint32_t val1, val2, val3;
1693 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1694 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1695 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1696 		if ((val1 != val2) || (val2 != val3))
1697 			continue;
1698 
1699 		qlt->req_ndx_from_fw = val1;
1700 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
1701 			((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
1702 			    (REQUEST_QUEUE_ENTRIES - 1));
1703 		if (qlt->req_available < n) {
1704 			if (try < 2) {
1705 				drv_usecwait(100);
1706 				try++;
1707 				continue;
1708 			} else {
1709 				stmf_trace(qlt->qlt_port_alias,
1710 				    "Req Q is full");
1711 				return (NULL);
1712 			}
1713 		}
1714 		break;
1715 	}
1716 	/* We dont change anything until the entries are sumitted */
1717 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
1718 }
1719 
1720 /*
1721  * updates the req in ptr to fw. Assumes that req lock is held.
1722  */
1723 void
1724 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
1725 {
1726 	ASSERT(n >= 1);
1727 	qlt->req_ndx_to_fw += n;
1728 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
1729 	qlt->req_available -= n;
1730 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
1731 }
1732 
1733 
1734 /*
1735  * Return a pointer to n entries in the priority request queue. Assumes that
1736  * priority request queue lock is held. Does a very short busy wait if
1737  * less/zero entries are available. Retuns NULL if it still cannot
1738  * fullfill the request.
1739  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
1740  */
1741 caddr_t
1742 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
1743 {
1744 	int try = 0;
1745 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
1746 		((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
1747 		    (PRIORITY_QUEUE_ENTRIES - 1));
1748 
1749 	while (req_available < n) {
1750 		uint32_t val1, val2, val3;
1751 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1752 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1753 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
1754 		if ((val1 != val2) || (val2 != val3))
1755 			continue;
1756 
1757 		qlt->preq_ndx_from_fw = val1;
1758 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
1759 			((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
1760 			(PRIORITY_QUEUE_ENTRIES - 1));
1761 		if (req_available < n) {
1762 			if (try < 2) {
1763 				drv_usecwait(100);
1764 				try++;
1765 				continue;
1766 			} else {
1767 				return (NULL);
1768 			}
1769 		}
1770 		break;
1771 	}
1772 	/* We dont change anything until the entries are sumitted */
1773 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
1774 }
1775 
1776 /*
1777  * updates the req in ptr to fw. Assumes that req lock is held.
1778  */
1779 void
1780 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
1781 {
1782 	ASSERT(n >= 1);
1783 	qlt->preq_ndx_to_fw += n;
1784 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
1785 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
1786 }
1787 
1788 /*
1789  * - Should not be called from Interrupt.
1790  * - A very hardware specific function. Does not touch driver state.
1791  * - Assumes that interrupts are disabled or not there.
1792  * - Expects that the caller makes sure that all activity has stopped
1793  *   and its ok now to go ahead and reset the chip. Also the caller
1794  *   takes care of post reset damage control.
1795  * - called by initialize adapter() and dump_fw(for reset only).
1796  * - During attach() nothing much is happening and during initialize_adapter()
1797  *   the function (caller) does all the housekeeping so that this function
1798  *   can execute in peace.
1799  * - Returns 0 on success.
1800  */
1801 static fct_status_t
1802 qlt_reset_chip_and_download_fw(qlt_state_t *qlt, int reset_only)
1803 {
1804 	int cntr;
1805 	uint32_t start_addr;
1806 	fct_status_t ret;
1807 
1808 	/* XXX: Switch off LEDs */
1809 
1810 	/* Disable Interrupts */
1811 	REG_WR32(qlt, REG_INTR_CTRL, 0);
1812 	(void) REG_RD32(qlt, REG_INTR_CTRL);
1813 	/* Stop DMA */
1814 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
1815 
1816 	/* Wait for DMA to be stopped */
1817 	cntr = 0;
1818 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
1819 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
1820 		cntr++;
1821 		/* 3 sec should be more than enough */
1822 		if (cntr == 300)
1823 			return (QLT_DMA_STUCK);
1824 	}
1825 
1826 	/* Reset the Chip */
1827 	REG_WR32(qlt, REG_CTRL_STATUS,
1828 		DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
1829 
1830 	qlt->qlt_link_up = 0;
1831 
1832 	drv_usecwait(100);
1833 
1834 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
1835 	cntr = 0;
1836 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
1837 		delay(drv_usectohz(10000));
1838 		cntr++;
1839 		/* 3 sec should be more than enough */
1840 		if (cntr == 300)
1841 			return (QLT_ROM_STUCK);
1842 	}
1843 	/* Disable Interrupts (Probably not needed) */
1844 	REG_WR32(qlt, REG_INTR_CTRL, 0);
1845 	if (reset_only)
1846 		return (QLT_SUCCESS);
1847 
1848 	/* Load the two segments */
1849 	if (qlt->fw_code01 != NULL) {
1850 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
1851 						qlt->fw_addr01);
1852 		if (ret == QLT_SUCCESS) {
1853 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
1854 			    qlt->fw_length02, qlt->fw_addr02);
1855 		}
1856 		start_addr = qlt->fw_addr01;
1857 	} else if (qlt->qlt_25xx_chip) {
1858 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
1859 						fw2500_addr01);
1860 		if (ret == QLT_SUCCESS) {
1861 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
1862 					fw2500_length02, fw2500_addr02);
1863 		}
1864 		start_addr = fw2500_addr01;
1865 	} else {
1866 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
1867 						fw2400_addr01);
1868 		if (ret == QLT_SUCCESS) {
1869 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
1870 					fw2400_length02, fw2400_addr02);
1871 		}
1872 		start_addr = fw2400_addr01;
1873 	}
1874 	if (ret != QLT_SUCCESS)
1875 		return (ret);
1876 
1877 	/* Verify Checksum */
1878 	REG_WR16(qlt, REG_MBOX(0), 7);
1879 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
1880 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
1881 	ret = qlt_raw_mailbox_command(qlt);
1882 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1883 	if (ret != QLT_SUCCESS)
1884 		return (ret);
1885 
1886 	/* Execute firmware */
1887 	REG_WR16(qlt, REG_MBOX(0), 2);
1888 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
1889 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
1890 	REG_WR16(qlt, REG_MBOX(3), 0);
1891 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
1892 	ret = qlt_raw_mailbox_command(qlt);
1893 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1894 	if (ret != QLT_SUCCESS)
1895 		return (ret);
1896 
1897 	/* Get revisions (About Firmware) */
1898 	REG_WR16(qlt, REG_MBOX(0), 8);
1899 	ret = qlt_raw_mailbox_command(qlt);
1900 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
1901 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
1902 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
1903 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
1904 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
1905 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
1906 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1907 	if (ret != QLT_SUCCESS)
1908 		return (ret);
1909 
1910 	return (QLT_SUCCESS);
1911 }
1912 
1913 /*
1914  * Used only from qlt_reset_chip_and_download_fw().
1915  */
1916 static fct_status_t
1917 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
1918 				uint32_t word_count, uint32_t risc_addr)
1919 {
1920 	uint32_t words_sent = 0;
1921 	uint32_t words_being_sent;
1922 	uint32_t *cur_host_addr;
1923 	uint32_t cur_risc_addr;
1924 	uint64_t da;
1925 	fct_status_t ret;
1926 
1927 	while (words_sent < word_count) {
1928 		cur_host_addr = &(host_addr[words_sent]);
1929 		cur_risc_addr = risc_addr + (words_sent << 2);
1930 		words_being_sent = min(word_count - words_sent,
1931 			TOTAL_DMA_MEM_SIZE >> 2);
1932 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
1933 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
1934 		    DDI_DEV_AUTOINCR);
1935 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
1936 				words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
1937 		da = qlt->queue_mem_cookie.dmac_laddress;
1938 		REG_WR16(qlt, REG_MBOX(0), 0x0B);
1939 		REG_WR16(qlt, REG_MBOX(1), risc_addr & 0xffff);
1940 		REG_WR16(qlt, REG_MBOX(8), ((cur_risc_addr >> 16) & 0xffff));
1941 		REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
1942 		da >>= 16;
1943 		REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
1944 		da >>= 16;
1945 		REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
1946 		da >>= 16;
1947 		REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
1948 		REG_WR16(qlt, REG_MBOX(5), words_being_sent & 0xffff);
1949 		REG_WR16(qlt, REG_MBOX(4), (words_being_sent >> 16) & 0xffff);
1950 		ret = qlt_raw_mailbox_command(qlt);
1951 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1952 		if (ret != QLT_SUCCESS)
1953 			return (ret);
1954 		words_sent += words_being_sent;
1955 	}
1956 	return (QLT_SUCCESS);
1957 }
1958 
1959 /*
1960  * Not used during normal operation. Only during driver init.
1961  * Assumes that interrupts are disabled and mailboxes are loaded.
1962  * Just triggers the mailbox command an waits for the completion.
1963  * Also expects that There is nothing else going on and we will only
1964  * get back a mailbox completion from firmware.
1965  * ---DOES NOT CLEAR INTERRUPT---
1966  * Used only from the code path originating from
1967  * qlt_reset_chip_and_download_fw()
1968  */
1969 static fct_status_t
1970 qlt_raw_mailbox_command(qlt_state_t *qlt)
1971 {
1972 	int cntr = 0;
1973 	uint32_t status;
1974 
1975 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
1976 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_INTR_REQUEST) == 0) {
1977 		cntr++;
1978 		if (cntr == 100)
1979 			return (QLT_MAILBOX_STUCK);
1980 		delay(drv_usectohz(10000));
1981 	}
1982 	status = (REG_RD32(qlt, REG_RISC_STATUS) & 0xff);
1983 	if ((status == 1) || (status == 2) ||
1984 	    (status == 0x10) || (status == 0x11)) {
1985 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
1986 		if (mbox0 == 0x4000)
1987 			return (QLT_SUCCESS);
1988 		else
1989 			return (QLT_MBOX_FAILED | mbox0);
1990 	}
1991 	/* This is unexpected, dump a message */
1992 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
1993 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
1994 	return (QLT_UNEXPECTED_RESPONSE);
1995 }
1996 
1997 static mbox_cmd_t *
1998 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
1999 {
2000 	mbox_cmd_t *mcp;
2001 
2002 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2003 	if (dma_size) {
2004 		qlt_dmem_bctl_t *bctl;
2005 		uint64_t da;
2006 
2007 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2008 		if (mcp->dbuf == NULL) {
2009 			kmem_free(mcp, sizeof (*mcp));
2010 			return (NULL);
2011 		}
2012 		mcp->dbuf->db_data_size = dma_size;
2013 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2014 
2015 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2016 		da = bctl->bctl_dev_addr;
2017 		/* This is the most common initialization of dma ptrs */
2018 		mcp->to_fw[3] = da & 0xffff;
2019 		da >>= 16;
2020 		mcp->to_fw[2] = da & 0xffff;
2021 		da >>= 16;
2022 		mcp->to_fw[7] = da & 0xffff;
2023 		da >>= 16;
2024 		mcp->to_fw[6] = da & 0xffff;
2025 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2026 	}
2027 	mcp->to_fw_mask |= BIT_0;
2028 	mcp->from_fw_mask |= BIT_0;
2029 	return (mcp);
2030 }
2031 
2032 void
2033 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2034 {
2035 	if (mcp->dbuf)
2036 		qlt_i_dmem_free(qlt, mcp->dbuf);
2037 	kmem_free(mcp, sizeof (*mcp));
2038 }
2039 
2040 /*
2041  * This can sleep. Should never be called from interrupt context.
2042  */
2043 static fct_status_t
2044 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2045 {
2046 	int	retries;
2047 	int	i;
2048 	char	info[80];
2049 
2050 	if (curthread->t_flag & T_INTR_THREAD) {
2051 		ASSERT(0);
2052 		return (QLT_MBOX_FAILED);
2053 	}
2054 
2055 	mutex_enter(&qlt->mbox_lock);
2056 	/* See if mailboxes are still uninitialized */
2057 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2058 		mutex_exit(&qlt->mbox_lock);
2059 		return (QLT_MBOX_NOT_INITIALIZED);
2060 	}
2061 
2062 	/* Wait to grab the mailboxes */
2063 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2064 				retries++) {
2065 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2066 		if ((retries > 5) ||
2067 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2068 			mutex_exit(&qlt->mbox_lock);
2069 			return (QLT_MBOX_BUSY);
2070 		}
2071 	}
2072 	/* Make sure we always ask for mailbox 0 */
2073 	mcp->from_fw_mask |= BIT_0;
2074 
2075 	/* Load mailboxes, set state and generate RISC interrupt */
2076 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2077 	qlt->mcp = mcp;
2078 	for (i = 0; i < MAX_MBOXES; i++) {
2079 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2080 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2081 	}
2082 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2083 
2084 qlt_mbox_wait_loop:;
2085 	/* Wait for mailbox command completion */
2086 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2087 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2088 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2089 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2090 		info[79] = 0;
2091 		qlt->mcp = NULL;
2092 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2093 		mutex_exit(&qlt->mbox_lock);
2094 
2095 		/*
2096 		 * XXX Throw HBA fatal error event
2097 		 */
2098 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2099 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2100 		return (QLT_MBOX_TIMEOUT);
2101 	}
2102 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2103 		goto qlt_mbox_wait_loop;
2104 
2105 	qlt->mcp = NULL;
2106 
2107 	/* Make sure its a completion */
2108 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2109 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2110 		mutex_exit(&qlt->mbox_lock);
2111 		return (QLT_MBOX_ABORTED);
2112 	}
2113 
2114 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2115 	/* Mailboxes are already loaded by interrupt routine */
2116 	qlt->mbox_io_state = MBOX_STATE_READY;
2117 	mutex_exit(&qlt->mbox_lock);
2118 	if (mcp->from_fw[0] != 0x4000)
2119 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2120 
2121 	return (QLT_SUCCESS);
2122 }
2123 
2124 /*
2125  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2126  */
2127 /* ARGSUSED */
2128 static uint_t
2129 qlt_isr(caddr_t arg, caddr_t arg2)
2130 {
2131 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2132 	int		instance;
2133 	uint32_t	risc_status, intr_type;
2134 	int		i;
2135 	int		intr_loop_count;
2136 	char		info[80];
2137 
2138 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2139 	if (!mutex_tryenter(&qlt->intr_lock)) {
2140 		/*
2141 		 * Normally we will always get this lock. If tryenter is
2142 		 * failing then it means that driver is trying to do
2143 		 * some cleanup and is masking the intr but some intr
2144 		 * has sneaked in between. See if our device has generated
2145 		 * this intr. If so then wait a bit and return claimed.
2146 		 * If not then return claimed if this is the 1st instance
2147 		 * of a interrupt after driver has grabbed the lock.
2148 		 */
2149 		if (risc_status & BIT_15) {
2150 			drv_usecwait(10);
2151 			return (DDI_INTR_CLAIMED);
2152 		} else if (qlt->intr_sneak_counter) {
2153 			qlt->intr_sneak_counter--;
2154 			return (DDI_INTR_CLAIMED);
2155 		} else {
2156 			return (DDI_INTR_UNCLAIMED);
2157 		}
2158 	}
2159 	if (((risc_status & BIT_15) == 0) ||
2160 	    (qlt->qlt_intr_enabled == 0)) {
2161 		/*
2162 		 * This might be a pure coincedence that we are operating
2163 		 * in a interrupt disabled mode and another device
2164 		 * sharing the interrupt line has generated an interrupt
2165 		 * while an interrupt from our device might be pending. Just
2166 		 * ignore it and let the code handling the interrupt
2167 		 * disabled mode handle it.
2168 		 */
2169 		mutex_exit(&qlt->intr_lock);
2170 		return (DDI_INTR_UNCLAIMED);
2171 	}
2172 
2173 	/*
2174 	 * XXX take care for MSI case. disable intrs
2175 	 * Its gonna be complicated becasue of the max iterations.
2176 	 * as hba will have posted the intr which did not go on PCI
2177 	 * but we did not service it either becasue of max iterations.
2178 	 * Maybe offload the intr on a different thread.
2179 	 */
2180 	instance = ddi_get_instance(qlt->dip);
2181 	intr_loop_count = 0;
2182 
2183 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2184 
2185 intr_again:;
2186 	/* First check for high performance path */
2187 	intr_type = risc_status & 0xff;
2188 	if (intr_type == 0x1C) {
2189 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2190 		qlt->atio_ndx_from_fw = risc_status >> 16;
2191 		qlt_handle_atio_queue_update(qlt);
2192 	} else if (intr_type == 0x13) {
2193 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2194 		qlt->resp_ndx_from_fw = risc_status >> 16;
2195 		qlt_handle_resp_queue_update(qlt);
2196 		/* XXX what about priority queue */
2197 	} else if (intr_type == 0x1D) {
2198 		qlt->atio_ndx_from_fw = REG_RD32(qlt, REG_ATIO_IN_PTR);
2199 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2200 		qlt->resp_ndx_from_fw = risc_status >> 16;
2201 		qlt_handle_atio_queue_update(qlt);
2202 		qlt_handle_resp_queue_update(qlt);
2203 	} else if (intr_type == 0x12) {
2204 		uint16_t code = risc_status >> 16;
2205 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2206 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2207 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2208 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2209 
2210 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2211 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2212 		    " mb5=%x, mb6=%x", code, mbox1, mbox2, mbox5, mbox6);
2213 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
2214 		    " mb5=%x, mb6=%x", instance, code, mbox1, mbox2, mbox5,
2215 		    mbox6);
2216 
2217 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2218 			if (qlt->qlt_link_up) {
2219 				fct_handle_event(qlt->qlt_port,
2220 				    FCT_EVENT_LINK_RESET, 0, 0);
2221 			}
2222 		} else if (code == 0x8012) {
2223 			qlt->qlt_link_up = 0;
2224 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2225 						0, 0);
2226 		} else if (code == 0x8011) {
2227 			switch (mbox1) {
2228 			case 0: qlt->link_speed = PORT_SPEED_1G;
2229 				break;
2230 			case 1: qlt->link_speed = PORT_SPEED_2G;
2231 				break;
2232 			case 3: qlt->link_speed = PORT_SPEED_4G;
2233 				break;
2234 			case 4: qlt->link_speed = PORT_SPEED_8G;
2235 				break;
2236 			default:
2237 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2238 			}
2239 			qlt->qlt_link_up = 1;
2240 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2241 						0, 0);
2242 		} else if (code == 0x8002) {
2243 			(void) snprintf(info, 80,
2244 			    "Got 8002, mb1=%x mb2=%x mb5=%x mb6=%x",
2245 			    mbox1, mbox2, mbox5, mbox6);
2246 			info[79] = 0;
2247 			(void) fct_port_shutdown(qlt->qlt_port,
2248 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2249 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2250 		}
2251 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2252 		/* Handle mailbox completion */
2253 		mutex_enter(&qlt->mbox_lock);
2254 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2255 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2256 			    " when driver wasn't waiting for it %d",
2257 				instance, qlt->mbox_io_state);
2258 		} else {
2259 			for (i = 0; i < MAX_MBOXES; i++) {
2260 				if (qlt->mcp->from_fw_mask &
2261 				    (((uint32_t)1) << i)) {
2262 					qlt->mcp->from_fw[i] =
2263 						REG_RD16(qlt, REG_MBOX(i));
2264 				}
2265 			}
2266 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2267 		}
2268 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2269 		cv_broadcast(&qlt->mbox_cv);
2270 		mutex_exit(&qlt->mbox_lock);
2271 	} else {
2272 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2273 		    instance, intr_type);
2274 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2275 	}
2276 
2277 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2278 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2279 	if ((risc_status & BIT_15) &&
2280 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2281 		goto intr_again;
2282 	}
2283 
2284 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2285 
2286 	mutex_exit(&qlt->intr_lock);
2287 	return (DDI_INTR_CLAIMED);
2288 }
2289 
2290 /* **************** NVRAM Functions ********************** */
2291 
2292 fct_status_t
2293 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2294 {
2295 	uint32_t	timer;
2296 
2297 	/* Clear access error flag */
2298 	REG_WR32(qlt, REG_CTRL_STATUS,
2299 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2300 
2301 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2302 
2303 	/* Wait for READ cycle to complete. */
2304 	for (timer = 3000; timer; timer--) {
2305 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2306 			break;
2307 		}
2308 		drv_usecwait(10);
2309 	}
2310 	if (timer == 0) {
2311 		return (QLT_FLASH_TIMEOUT);
2312 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2313 		return (QLT_FLASH_ACCESS_ERROR);
2314 	}
2315 
2316 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2317 
2318 	return (QLT_SUCCESS);
2319 }
2320 
2321 fct_status_t
2322 qlt_read_nvram(qlt_state_t *qlt)
2323 {
2324 	uint32_t		index, addr, chksum;
2325 	uint32_t		val, *ptr;
2326 	fct_status_t		ret;
2327 	qlt_nvram_t		*nv;
2328 	uint64_t		empty_node_name = 0;
2329 
2330 	if (qlt->qlt_25xx_chip) {
2331 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2332 			QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2333 	} else {
2334 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2335 				NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2336 	}
2337 	mutex_enter(&qlt_global_lock);
2338 
2339 	/* Pause RISC. */
2340 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
2341 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2342 
2343 	/* Get NVRAM data and calculate checksum. */
2344 	ptr = (uint32_t *)qlt->nvram;
2345 	chksum = 0;
2346 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2347 		ret = qlt_read_flash_word(qlt, addr++, &val);
2348 		if (ret != QLT_SUCCESS) {
2349 			mutex_exit(&qlt_global_lock);
2350 			return (ret);
2351 		}
2352 		chksum += val;
2353 		*ptr = LE_32(val);
2354 		ptr++;
2355 	}
2356 
2357 	/* Release RISC Pause */
2358 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_PAUSE);
2359 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2360 
2361 	mutex_exit(&qlt_global_lock);
2362 
2363 	/* Sanity check NVRAM Data */
2364 	nv = qlt->nvram;
2365 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2366 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2367 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2368 		return (QLT_BAD_NVRAM_DATA);
2369 	}
2370 
2371 	/* If node name is zero, hand craft it from port name */
2372 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2373 		bcopy(nv->port_name, nv->node_name, 8);
2374 		nv->node_name[0] = nv->node_name[0] & ~BIT_0;
2375 		nv->port_name[0] = nv->node_name[0] | BIT_0;
2376 	}
2377 
2378 	return (QLT_SUCCESS);
2379 }
2380 
2381 uint32_t
2382 qlt_sync_atio_queue(qlt_state_t *qlt)
2383 {
2384 	uint32_t total_ent;
2385 
2386 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2387 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2388 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2389 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2390 		    DDI_DMA_SYNC_FORCPU);
2391 	} else {
2392 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2393 			qlt->atio_ndx_from_fw;
2394 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2395 		    + (qlt->atio_ndx_to_fw << 6), (ATIO_QUEUE_ENTRIES -
2396 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2397 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2398 		    ATIO_QUEUE_OFFSET,
2399 		    qlt->atio_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2400 	}
2401 	return (total_ent);
2402 }
2403 
2404 void
2405 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2406 {
2407 	uint32_t total_ent;
2408 
2409 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2410 		return;
2411 
2412 	total_ent = qlt_sync_atio_queue(qlt);
2413 
2414 	do {
2415 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2416 					qlt->atio_ndx_to_fw << 6];
2417 		uint32_t ent_cnt;
2418 
2419 		ent_cnt = (uint32_t)(atio[1]);
2420 		if (ent_cnt > total_ent) {
2421 			break;
2422 		}
2423 		switch ((uint8_t)(atio[0])) {
2424 		case 0x0d:	/* INOT */
2425 			qlt_handle_inot(qlt, atio);
2426 			break;
2427 		case 0x06:	/* ATIO */
2428 			qlt_handle_atio(qlt, atio);
2429 			break;
2430 		default:
2431 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2432 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2433 			break;
2434 		}
2435 		qlt->atio_ndx_to_fw = (qlt->atio_ndx_to_fw + ent_cnt) &
2436 					(ATIO_QUEUE_ENTRIES - 1);
2437 		total_ent -= ent_cnt;
2438 	} while (total_ent > 0);
2439 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2440 }
2441 
2442 uint32_t
2443 qlt_sync_resp_queue(qlt_state_t *qlt)
2444 {
2445 	uint32_t total_ent;
2446 
2447 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2448 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2449 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2450 		    RESPONSE_QUEUE_OFFSET
2451 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2452 		    DDI_DMA_SYNC_FORCPU);
2453 	} else {
2454 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2455 			qlt->resp_ndx_from_fw;
2456 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2457 		    RESPONSE_QUEUE_OFFSET
2458 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2459 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2460 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2461 		    RESPONSE_QUEUE_OFFSET,
2462 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2463 	}
2464 	return (total_ent);
2465 }
2466 
2467 void
2468 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2469 {
2470 	uint32_t total_ent;
2471 	uint8_t c;
2472 
2473 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2474 		return;
2475 
2476 	total_ent = qlt_sync_resp_queue(qlt);
2477 
2478 	do {
2479 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
2480 		uint32_t ent_cnt;
2481 
2482 		ent_cnt = (uint32_t)(resp[1]);
2483 		if (ent_cnt > total_ent) {
2484 			break;
2485 		}
2486 		switch ((uint8_t)(resp[0])) {
2487 		case 0x12:	/* CTIO completion */
2488 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
2489 			break;
2490 		case 0x0e:	/* NACK */
2491 			/* Do Nothing */
2492 			break;
2493 		case 0x29:	/* CT PassThrough */
2494 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
2495 			break;
2496 		case 0x33:	/* Abort IO IOCB completion */
2497 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
2498 			break;
2499 		case 0x51:	/* PUREX */
2500 			qlt_handle_purex(qlt, (uint8_t *)resp);
2501 			break;
2502 		case 0x52:
2503 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
2504 			break;
2505 		case 0x53:	/* ELS passthrough */
2506 			c = ((uint8_t)resp[0x1f]) >> 5;
2507 			if (c == 0) {
2508 				qlt_handle_sol_els_completion(qlt,
2509 				    (uint8_t *)resp);
2510 			} else if (c == 3) {
2511 				qlt_handle_unsol_els_abort_completion(qlt,
2512 				    (uint8_t *)resp);
2513 			} else {
2514 				qlt_handle_unsol_els_completion(qlt,
2515 				    (uint8_t *)resp);
2516 			}
2517 			break;
2518 		case 0x54:	/* ABTS received */
2519 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
2520 			break;
2521 		case 0x55:	/* ABTS completion */
2522 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
2523 			break;
2524 		}
2525 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
2526 					(RESPONSE_QUEUE_ENTRIES - 1);
2527 		total_ent -= ent_cnt;
2528 	} while (total_ent > 0);
2529 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
2530 }
2531 
2532 fct_status_t
2533 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
2534 				uint16_t *ret_handle)
2535 {
2536 	fct_status_t ret;
2537 	mbox_cmd_t *mcp;
2538 	uint16_t n;
2539 	uint16_t h;
2540 	uint32_t ent_id;
2541 	uint8_t *p;
2542 	int found = 0;
2543 
2544 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
2545 	if (mcp == NULL) {
2546 		return (STMF_ALLOC_FAILURE);
2547 	}
2548 	mcp->to_fw[0] = 0x7C;	/* GET ID LIST */
2549 	mcp->to_fw[8] = 2048 * 8;
2550 	mcp->to_fw_mask |= BIT_8;
2551 	mcp->from_fw_mask |= BIT_1 | BIT_2;
2552 
2553 	ret = qlt_mailbox_command(qlt, mcp);
2554 	if (ret != QLT_SUCCESS) {
2555 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
2556 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
2557 		    mcp->from_fw[1], mcp->from_fw[2]);
2558 		qlt_free_mailbox_command(qlt, mcp);
2559 		return (ret);
2560 	}
2561 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
2562 	p = mcp->dbuf->db_sglist[0].seg_addr;
2563 	for (n = 0; n < mcp->from_fw[1]; n++) {
2564 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
2565 		h = (uint16_t)p[4] | (((uint16_t)p[5]) << 8);
2566 		if (ent_id == id) {
2567 			found = 1;
2568 			*ret_handle = h;
2569 			if ((cmd_handle != FCT_HANDLE_NONE) &&
2570 			    (cmd_handle != h)) {
2571 				cmn_err(CE_WARN, "login for portid %x came in "
2572 				    "with handle %x, while the portid was "
2573 				    "already using a different handle %x",
2574 					id, cmd_handle, h);
2575 				qlt_free_mailbox_command(qlt, mcp);
2576 				return (QLT_FAILURE);
2577 			}
2578 			break;
2579 		}
2580 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
2581 			cmn_err(CE_WARN, "login for portid %x came in with "
2582 			    "handle %x, while the handle was already in use "
2583 			    "for portid %x", id, cmd_handle, ent_id);
2584 			qlt_free_mailbox_command(qlt, mcp);
2585 			return (QLT_FAILURE);
2586 		}
2587 		p += 8;
2588 	}
2589 	if (!found) {
2590 		*ret_handle = cmd_handle;
2591 	}
2592 	qlt_free_mailbox_command(qlt, mcp);
2593 	return (FCT_SUCCESS);
2594 }
2595 
2596 /* ARGSUSED */
2597 fct_status_t
2598 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
2599 				fct_cmd_t *login)
2600 {
2601 	uint8_t *p;
2602 
2603 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
2604 	p[0] = ELS_OP_PLOGI;
2605 	*((uint16_t *)(&p[4])) = 0x2020;
2606 	p[7] = 3;
2607 	p[8] = 0x88;
2608 	p[10] = 8;
2609 	p[13] = 0xff; p[15] = 0x1f;
2610 	p[18] = 7; p[19] = 0xd0;
2611 
2612 	bcopy(port->port_pwwn, p + 20, 8);
2613 	bcopy(port->port_nwwn, p + 28, 8);
2614 
2615 	p[68] = 0x80;
2616 	p[74] = 8;
2617 	p[77] = 0xff;
2618 	p[81] = 1;
2619 
2620 	return (FCT_SUCCESS);
2621 }
2622 
2623 /* ARGSUSED */
2624 fct_status_t
2625 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
2626 				fct_cmd_t *login)
2627 {
2628 	return (FCT_SUCCESS);
2629 }
2630 
2631 fct_status_t
2632 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
2633 				fct_cmd_t *login)
2634 {
2635 	uint16_t h;
2636 	fct_status_t ret;
2637 
2638 	switch (rp->rp_id) {
2639 	case 0xFFFFFC:	h = 0x7FC; break;
2640 	case 0xFFFFFD:	h = 0x7FD; break;
2641 	case 0xFFFFFE:	h = 0x7FE; break;
2642 	case 0xFFFFFF:	h = 0x7FF; break;
2643 	default:
2644 		ret = qlt_portid_to_handle(
2645 	    (qlt_state_t *)port->port_fca_private, rp->rp_id,
2646 		login->cmd_rp_handle, &h);
2647 		if (ret != FCT_SUCCESS)
2648 			return (ret);
2649 	}
2650 
2651 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
2652 		ret = qlt_fill_plogi_req(port, rp, login);
2653 	} else {
2654 		ret = qlt_fill_plogi_resp(port, rp, login);
2655 	}
2656 
2657 	if (ret != FCT_SUCCESS)
2658 		return (ret);
2659 
2660 	if (h == FCT_HANDLE_NONE)
2661 		return (FCT_SUCCESS);
2662 
2663 	if (rp->rp_handle == FCT_HANDLE_NONE) {
2664 		rp->rp_handle = h;
2665 		return (FCT_SUCCESS);
2666 	}
2667 
2668 	if (rp->rp_handle == h)
2669 		return (FCT_SUCCESS);
2670 
2671 	return (FCT_FAILURE);
2672 }
2673 /* invoked in single thread */
2674 fct_status_t
2675 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
2676 {
2677 	uint8_t *req;
2678 	qlt_state_t *qlt;
2679 	clock_t	dereg_req_timer;
2680 	fct_status_t ret;
2681 
2682 	qlt = (qlt_state_t *)port->port_fca_private;
2683 
2684 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
2685 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
2686 		return (FCT_SUCCESS);
2687 	ASSERT(qlt->rp_id_in_dereg == 0);
2688 
2689 	mutex_enter(&qlt->preq_lock);
2690 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
2691 	if (req == NULL) {
2692 		mutex_exit(&qlt->preq_lock);
2693 		return (FCT_BUSY);
2694 	}
2695 	bzero(req, IOCB_SIZE);
2696 	req[0] = 0x52; req[1] = 1;
2697 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
2698 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
2699 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
2700 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
2701 	qlt->rp_id_in_dereg = rp->rp_id;
2702 	qlt_submit_preq_entries(qlt, 1);
2703 
2704 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
2705 	if (cv_timedwait(&qlt->rp_dereg_cv,
2706 	    &qlt->preq_lock, dereg_req_timer) > 0) {
2707 		ret = qlt->rp_dereg_status;
2708 	} else {
2709 		ret = FCT_BUSY;
2710 	}
2711 	qlt->rp_dereg_status = 0;
2712 	qlt->rp_id_in_dereg = 0;
2713 	mutex_exit(&qlt->preq_lock);
2714 	return (ret);
2715 }
2716 
2717 /*
2718  * Pass received ELS up to framework.
2719  */
2720 static void
2721 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
2722 {
2723 	fct_cmd_t		*cmd;
2724 	fct_els_t		*els;
2725 	qlt_cmd_t		*qcmd;
2726 	uint32_t		payload_size;
2727 	uint32_t		remote_portid;
2728 	uint8_t			*pldptr, *bndrptr;
2729 	int			i, off;
2730 	uint16_t		iocb_flags;
2731 	char			info[160];
2732 
2733 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
2734 	    ((uint32_t)(resp[0x1A])) << 16;
2735 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
2736 	if (iocb_flags & BIT_15) {
2737 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
2738 	} else {
2739 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
2740 	}
2741 
2742 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
2743 		cmn_err(CE_WARN, "handle_purex: payload is too large");
2744 		goto cmd_null;
2745 	}
2746 
2747 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS, payload_size +
2748 	    GET_STRUCT_SIZE(qlt_cmd_t), 0);
2749 	if (cmd == NULL) {
2750 cmd_null:;
2751 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
2752 		    "allocate space for fct_cmd", (void *)qlt);
2753 		info[159] = 0;
2754 		(void) fct_port_shutdown(qlt->qlt_port,
2755 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
2756 		return;
2757 	}
2758 
2759 	cmd->cmd_port = qlt->qlt_port;
2760 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
2761 	if (cmd->cmd_rp_handle == 0xFFFF) {
2762 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
2763 	}
2764 
2765 	els = (fct_els_t *)cmd->cmd_specific;
2766 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
2767 	els->els_req_size = payload_size;
2768 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
2769 	    GET_STRUCT_SIZE(qlt_cmd_t));
2770 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
2771 	cmd->cmd_rportid = remote_portid;
2772 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
2773 	    ((uint32_t)(resp[0x16])) << 16;
2774 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
2775 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
2776 	pldptr = &resp[0x2C];
2777 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
2778 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
2779 		/* Take care of fw's swapping of payload */
2780 		els->els_req_payload[i] = pldptr[3];
2781 		els->els_req_payload[i+1] = pldptr[2];
2782 		els->els_req_payload[i+2] = pldptr[1];
2783 		els->els_req_payload[i+3] = pldptr[0];
2784 		pldptr += 4;
2785 		if (pldptr == bndrptr)
2786 			pldptr = (uint8_t *)qlt->resp_ptr;
2787 		off += 4;
2788 		if (off >= IOCB_SIZE) {
2789 			off = 4;
2790 			pldptr += 4;
2791 		}
2792 	}
2793 	fct_post_rcvd_cmd(cmd, 0);
2794 }
2795 
2796 fct_status_t
2797 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
2798 {
2799 	qlt_state_t	*qlt;
2800 	char		info[160];
2801 
2802 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
2803 
2804 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
2805 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2806 			goto fatal_panic;
2807 		} else {
2808 			return (qlt_send_status(qlt, cmd));
2809 		}
2810 	}
2811 
2812 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
2813 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2814 			goto fatal_panic;
2815 		} else {
2816 			return (qlt_send_els_response(qlt, cmd));
2817 		}
2818 	}
2819 
2820 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
2821 		cmd->cmd_handle = 0;
2822 	}
2823 
2824 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
2825 		return (qlt_send_abts_response(qlt, cmd, 0));
2826 	} else {
2827 		ASSERT(0);
2828 		return (FCT_FAILURE);
2829 	}
2830 
2831 fatal_panic:;
2832 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
2833 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
2834 	    ioflags);
2835 	info[159] = 0;
2836 	(void) fct_port_shutdown(qlt->qlt_port,
2837 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
2838 	return (FCT_FAILURE);
2839 }
2840 
2841 /* ARGSUSED */
2842 fct_status_t
2843 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
2844 {
2845 	qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
2846 	qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
2847 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
2848 	uint8_t *req;
2849 	uint16_t flags;
2850 
2851 	if (dbuf->db_handle == 0)
2852 		qcmd->dbuf = dbuf;
2853 	flags = ((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5;
2854 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
2855 		flags |= 2;
2856 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
2857 	} else {
2858 		flags |= 1;
2859 	}
2860 
2861 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
2862 		flags |= BIT_15;
2863 
2864 	mutex_enter(&qlt->req_lock);
2865 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
2866 	if (req == NULL) {
2867 		mutex_exit(&qlt->req_lock);
2868 		return (FCT_BUSY);
2869 	}
2870 	bzero(req, IOCB_SIZE);
2871 	req[0] = 0x12; req[1] = 0x1;
2872 	req[2] = dbuf->db_handle;
2873 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
2874 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
2875 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
2876 	req[12] = 1;
2877 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
2878 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
2879 	QMEM_WR16(qlt, req+0x1A, flags);
2880 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
2881 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
2882 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
2883 	QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
2884 	QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
2885 	qlt_submit_req_entries(qlt, 1);
2886 	mutex_exit(&qlt->req_lock);
2887 
2888 	return (STMF_SUCCESS);
2889 }
2890 
2891 /*
2892  * We must construct proper FCP_RSP_IU now. Here we only focus on
2893  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
2894  * we could have catched them before we enter here.
2895  */
2896 fct_status_t
2897 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
2898 {
2899 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
2900 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
2901 	qlt_dmem_bctl_t *bctl;
2902 	uint32_t size;
2903 	uint8_t *req, *fcp_rsp_iu;
2904 	uint8_t *psd, sensbuf[24];		/* sense data */
2905 	uint16_t flags;
2906 	uint16_t scsi_status;
2907 	int use_mode2;
2908 	int ndx;
2909 
2910 	/*
2911 	 * Enter fast channel for non check condition
2912 	 */
2913 	if (task->task_scsi_status != STATUS_CHECK) {
2914 		/*
2915 		 * We will use mode1
2916 		 */
2917 		flags = BIT_6 | BIT_15 |
2918 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
2919 		scsi_status = (uint16_t)task->task_scsi_status;
2920 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
2921 			scsi_status |= BIT_10;
2922 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
2923 			scsi_status |= BIT_11;
2924 		}
2925 		qcmd->dbuf_rsp_iu = NULL;
2926 
2927 		/*
2928 		 * Fillout CTIO type 7 IOCB
2929 		 */
2930 		mutex_enter(&qlt->req_lock);
2931 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
2932 		if (req == NULL) {
2933 			mutex_exit(&qlt->req_lock);
2934 			return (FCT_BUSY);
2935 		}
2936 
2937 		/*
2938 		 * Common fields
2939 		 */
2940 		bzero(req, IOCB_SIZE);
2941 		req[0x00] = 0x12;
2942 		req[0x01] = 0x1;
2943 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
2944 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
2945 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
2946 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
2947 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
2948 
2949 		/*
2950 		 * Mode-specific fields
2951 		 */
2952 		QMEM_WR16(qlt, req + 0x1A, flags);
2953 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
2954 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
2955 		QMEM_WR16(qlt, req + 0x22, scsi_status);
2956 
2957 		/*
2958 		 * Trigger FW to send SCSI status out
2959 		 */
2960 		qlt_submit_req_entries(qlt, 1);
2961 		mutex_exit(&qlt->req_lock);
2962 		return (STMF_SUCCESS);
2963 	}
2964 
2965 	ASSERT(task->task_scsi_status == STATUS_CHECK);
2966 	/*
2967 	 * Decide the SCSI status mode, that should be used
2968 	 */
2969 	use_mode2 = (task->task_sense_length > 24);
2970 
2971 	/*
2972 	 * Prepare required information per the SCSI status mode
2973 	 */
2974 	flags = BIT_15 | (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
2975 	if (use_mode2) {
2976 		flags |= BIT_7;
2977 
2978 		size = task->task_sense_length;
2979 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
2980 		    task->task_sense_length, &size, 0);
2981 		if (!qcmd->dbuf_rsp_iu) {
2982 			return (FCT_ALLOC_FAILURE);
2983 		}
2984 
2985 		/*
2986 		 * Start to construct FCP_RSP IU
2987 		 */
2988 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
2989 		bzero(fcp_rsp_iu, 24);
2990 
2991 		/*
2992 		 * FCP_RSP IU flags, byte10
2993 		 */
2994 		fcp_rsp_iu[10] |= BIT_1;
2995 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
2996 			fcp_rsp_iu[10] |= BIT_2;
2997 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
2998 			fcp_rsp_iu[10] |= BIT_3;
2999 		}
3000 
3001 		/*
3002 		 * SCSI status code, byte11
3003 		 */
3004 		fcp_rsp_iu[11] = task->task_scsi_status;
3005 
3006 		/*
3007 		 * FCP_RESID (Overrun or underrun)
3008 		 */
3009 		fcp_rsp_iu[12] = (task->task_resid >> 24) & 0xFF;
3010 		fcp_rsp_iu[13] = (task->task_resid >> 16) & 0xFF;
3011 		fcp_rsp_iu[14] = (task->task_resid >>  8) & 0xFF;
3012 		fcp_rsp_iu[15] = (task->task_resid >>  0) & 0xFF;
3013 
3014 		/*
3015 		 * FCP_SNS_LEN
3016 		 */
3017 		fcp_rsp_iu[18] = (task->task_sense_length >> 8) & 0xFF;
3018 		fcp_rsp_iu[19] = (task->task_sense_length >> 0) & 0xFF;
3019 
3020 		/*
3021 		 * FCP_RSP_LEN
3022 		 */
3023 		/*
3024 		 * no FCP_RSP_INFO
3025 		 */
3026 		/*
3027 		 * FCP_SNS_INFO
3028 		 */
3029 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3030 		    task->task_sense_length);
3031 
3032 		/*
3033 		 * Ensure dma data consistency
3034 		 */
3035 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3036 	} else {
3037 		flags |= BIT_6;
3038 
3039 		scsi_status = (uint16_t)task->task_scsi_status;
3040 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3041 			scsi_status |= BIT_10;
3042 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3043 			scsi_status |= BIT_11;
3044 		}
3045 		if (task->task_sense_length) {
3046 			scsi_status |= BIT_9;
3047 		}
3048 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3049 		qcmd->dbuf_rsp_iu = NULL;
3050 	}
3051 
3052 	/*
3053 	 * Fillout CTIO type 7 IOCB
3054 	 */
3055 	mutex_enter(&qlt->req_lock);
3056 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3057 	if (req == NULL) {
3058 		mutex_exit(&qlt->req_lock);
3059 		if (use_mode2) {
3060 			qlt_dmem_free(cmd->cmd_port->port_fds,
3061 						qcmd->dbuf_rsp_iu);
3062 			qcmd->dbuf_rsp_iu = NULL;
3063 		}
3064 		return (FCT_BUSY);
3065 	}
3066 
3067 	/*
3068 	 * Common fields
3069 	 */
3070 	bzero(req, IOCB_SIZE);
3071 	req[0x00] = 0x12;
3072 	req[0x01] = 0x1;
3073 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3074 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3075 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3076 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3077 	if (use_mode2) {
3078 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3079 	}
3080 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3081 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3082 
3083 	/*
3084 	 * Mode-specific fields
3085 	 */
3086 	if (!use_mode2) {
3087 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3088 	}
3089 	QMEM_WR16(qlt, req + 0x1A, flags);
3090 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3091 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3092 	if (use_mode2) {
3093 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3094 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3095 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3096 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3097 	} else {
3098 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3099 		psd = req+0x28;
3100 
3101 		/*
3102 		 * Data in sense buf is always big-endian, data in IOCB
3103 		 * should always be little-endian, so we must do swapping.
3104 		 */
3105 		size = ((task->task_sense_length + 3) & (~3));
3106 		for (ndx = 0; ndx < size; ndx += 4) {
3107 			psd[ndx + 0] = sensbuf[ndx + 3];
3108 			psd[ndx + 1] = sensbuf[ndx + 2];
3109 			psd[ndx + 2] = sensbuf[ndx + 1];
3110 			psd[ndx + 3] = sensbuf[ndx + 0];
3111 		}
3112 	}
3113 
3114 	/*
3115 	 * Trigger FW to send SCSI status out
3116 	 */
3117 	qlt_submit_req_entries(qlt, 1);
3118 	mutex_exit(&qlt->req_lock);
3119 
3120 	return (STMF_SUCCESS);
3121 }
3122 
3123 fct_status_t
3124 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3125 {
3126 	qlt_cmd_t	*qcmd;
3127 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3128 	uint8_t *req, *addr;
3129 	qlt_dmem_bctl_t *bctl;
3130 	uint32_t minsize;
3131 	uint8_t elsop, req1f;
3132 
3133 	addr = els->els_resp_payload;
3134 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3135 
3136 	minsize = els->els_resp_size;
3137 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3138 	if (qcmd->dbuf == NULL)
3139 		return (FCT_BUSY);
3140 
3141 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3142 
3143 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3144 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3145 
3146 	if (addr[0] == 0x02) {	/* ACC */
3147 		req1f = BIT_5;
3148 	} else {
3149 		req1f = BIT_6;
3150 	}
3151 	elsop = els->els_req_payload[0];
3152 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3153 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3154 		req1f |= BIT_4;
3155 	}
3156 
3157 	mutex_enter(&qlt->req_lock);
3158 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3159 	if (req == NULL) {
3160 		mutex_exit(&qlt->req_lock);
3161 		qlt_dmem_free(NULL, qcmd->dbuf);
3162 		qcmd->dbuf = NULL;
3163 		return (FCT_BUSY);
3164 	}
3165 	bzero(req, IOCB_SIZE);
3166 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3167 	req[0x16] = elsop; req[0x1f] = req1f;
3168 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3169 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3170 	QMEM_WR16(qlt, (&req[0xC]), 1);
3171 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3172 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3173 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3174 		req[0x1b] = (cmd->cmd_lportid >> 16) & 0xff;
3175 		req[0x1c] = cmd->cmd_lportid & 0xff;
3176 		req[0x1d] = (cmd->cmd_lportid >> 8) & 0xff;
3177 	}
3178 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3179 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3180 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3181 	qlt_submit_req_entries(qlt, 1);
3182 	mutex_exit(&qlt->req_lock);
3183 
3184 	return (FCT_SUCCESS);
3185 }
3186 
3187 fct_status_t
3188 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3189 {
3190 	qlt_abts_cmd_t *qcmd;
3191 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3192 	uint8_t *req;
3193 	uint32_t lportid;
3194 	uint32_t fctl;
3195 	int i;
3196 
3197 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3198 
3199 	mutex_enter(&qlt->req_lock);
3200 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3201 	if (req == NULL) {
3202 		mutex_exit(&qlt->req_lock);
3203 		return (FCT_BUSY);
3204 	}
3205 	bcopy(qcmd->buf, req, IOCB_SIZE);
3206 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3207 	fctl = QMEM_RD32(qlt, req+0x1C);
3208 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3209 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3210 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3211 	if (cmd->cmd_rp)
3212 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3213 	else
3214 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3215 	if (terminate) {
3216 		QMEM_WR16(qlt, (&req[0xC]), 1);
3217 	}
3218 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3219 	req[0x17] = abts->abts_resp_rctl;
3220 	QMEM_WR32(qlt, req+0x18, lportid);
3221 	QMEM_WR32(qlt, req+0x1C, fctl);
3222 	req[0x23]++;
3223 	for (i = 0; i < 12; i += 4) {
3224 		/* Take care of firmware's LE requirement */
3225 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3226 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3227 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3228 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3229 	}
3230 	qlt_submit_req_entries(qlt, 1);
3231 	mutex_exit(&qlt->req_lock);
3232 
3233 	return (FCT_SUCCESS);
3234 }
3235 
3236 static void
3237 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3238 {
3239 	int i;
3240 	uint32_t d;
3241 	caddr_t req;
3242 	/* Just put it on the request queue */
3243 	mutex_enter(&qlt->req_lock);
3244 	req = qlt_get_req_entries(qlt, 1);
3245 	if (req == NULL) {
3246 		mutex_exit(&qlt->req_lock);
3247 		/* XXX handle this */
3248 		return;
3249 	}
3250 	for (i = 0; i < 16; i++) {
3251 		d = QMEM_RD32(qlt, inot);
3252 		inot += 4;
3253 		QMEM_WR32(qlt, req, d);
3254 		req += 4;
3255 	}
3256 	req -= 64;
3257 	req[0] = 0x0e;
3258 	qlt_submit_req_entries(qlt, 1);
3259 	mutex_exit(&qlt->req_lock);
3260 }
3261 
3262 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3263 static void
3264 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3265 {
3266 	fct_cmd_t	*cmd;
3267 	scsi_task_t	*task;
3268 	qlt_cmd_t	*qcmd;
3269 	uint32_t	rportid, fw_xchg_addr;
3270 	uint8_t		*p, *q, *req, tm;
3271 	uint16_t	cdb_size, flags, oxid;
3272 	char		info[160];
3273 
3274 	/*
3275 	 * If either bidirection xfer is requested of there is extended
3276 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3277 	 */
3278 	cdb_size = 16;
3279 	if (atio[0x20 + 11] >= 3) {
3280 		uint8_t b = atio[0x20 + 11];
3281 		uint16_t b1;
3282 		if ((b & 3) == 3) {
3283 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3284 			    "received, dropping the cmd as bidirectional "
3285 			    " transfers are not yet supported", qlt->instance);
3286 			/* XXX abort the I/O */
3287 			return;
3288 		}
3289 		cdb_size += b & 0xfc;
3290 		/*
3291 		 * Verify that we have enough entries. Without additional CDB
3292 		 * Everything will fit nicely within the same 64 bytes. So the
3293 		 * additional cdb size is essentially the # of additional bytes
3294 		 * we need.
3295 		 */
3296 		b1 = (uint16_t)b;
3297 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3298 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3299 			    " cdb (cdb size = %d bytes), however the firmware "
3300 			    " did not DMAed the entire FCP_CMD IU, entry count "
3301 			    " is %d while it should be %d", qlt->instance,
3302 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3303 			/* XXX abort the I/O */
3304 			return;
3305 		}
3306 	}
3307 
3308 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3309 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3310 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3311 	oxid = (((uint16_t)atio[8 + 16]) << 8) | atio[8+17];
3312 
3313 	if (fw_xchg_addr == 0xFFFFFFFF) {
3314 		cmd = NULL;
3315 	} else {
3316 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3317 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3318 	}
3319 	if (cmd == NULL) {
3320 		/* Abort this IO */
3321 		flags = BIT_14 | ((atio[3] & 0xF0) << 5);
3322 
3323 		mutex_enter(&qlt->req_lock);
3324 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3325 		if (req == NULL) {
3326 			mutex_exit(&qlt->req_lock);
3327 
3328 			(void) snprintf(info, 160,
3329 			    "qlt_handle_atio: qlt-%p, can't "
3330 			    "allocate space for scsi_task", (void *)qlt);
3331 			info[159] = 0;
3332 			(void) fct_port_shutdown(qlt->qlt_port,
3333 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3334 			return;
3335 		}
3336 		bzero(req, IOCB_SIZE);
3337 		req[0] = 0x12; req[1] = 0x1;
3338 		QMEM_WR32(qlt, req+4, 0);
3339 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3340 		    rportid));
3341 		QMEM_WR16(qlt, req+10, 60);
3342 		QMEM_WR32(qlt, req+0x10, rportid);
3343 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3344 		QMEM_WR16(qlt, req+0x1A, flags);
3345 		QMEM_WR16(qlt, req+0x20, oxid);
3346 		qlt_submit_req_entries(qlt, 1);
3347 		mutex_exit(&qlt->req_lock);
3348 
3349 		return;
3350 	}
3351 
3352 	task = (scsi_task_t *)cmd->cmd_specific;
3353 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3354 	qcmd->fw_xchg_addr = fw_xchg_addr;
3355 	qcmd->param.atio_byte3 = atio[3];
3356 	cmd->cmd_oxid = oxid;
3357 	cmd->cmd_rxid = (((uint16_t)atio[8 + 18]) << 8) | atio[8+19];
3358 	cmd->cmd_rportid = rportid;
3359 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3360 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3361 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3362 	/* Dont do a 64 byte read as this is IOMMU */
3363 	q = atio+0x28;
3364 	/* XXX Handle fcp_cntl */
3365 	task->task_cmd_seq_no = (uint32_t)(*q++);
3366 	task->task_csn_size = 8;
3367 	task->task_flags = qlt_task_flags[(*q++) & 7];
3368 	tm = *q++;
3369 	if (tm) {
3370 		if (tm & BIT_1)
3371 			task->task_mgmt_function = TM_ABORT_TASK_SET;
3372 		else if (tm & BIT_2)
3373 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
3374 		else if (tm & BIT_4)
3375 			task->task_mgmt_function = TM_LUN_RESET;
3376 		else if (tm & BIT_5)
3377 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
3378 		else if (tm & BIT_6)
3379 			task->task_mgmt_function = TM_CLEAR_ACA;
3380 		else
3381 			task->task_mgmt_function = TM_ABORT_TASK;
3382 	}
3383 	task->task_max_nbufs = STMF_BUFS_MAX;
3384 	task->task_csn_size = 8;
3385 	task->task_flags |= ((*q++) & 3) << 5;
3386 	p = task->task_cdb;
3387 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3388 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3389 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3390 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3391 	if (cdb_size > 16) {
3392 		uint16_t xtra = cdb_size - 16;
3393 		uint16_t i;
3394 		uint8_t cb[4];
3395 
3396 		while (xtra) {
3397 			*p++ = *q++;
3398 			xtra--;
3399 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3400 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3401 				q = (uint8_t *)qlt->queue_mem_ptr +
3402 						ATIO_QUEUE_OFFSET;
3403 			}
3404 		}
3405 		for (i = 0; i < 4; i++) {
3406 			cb[i] = *q++;
3407 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3408 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3409 				q = (uint8_t *)qlt->queue_mem_ptr +
3410 						ATIO_QUEUE_OFFSET;
3411 			}
3412 		}
3413 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
3414 				(((uint32_t)cb[1]) << 16) |
3415 				(((uint32_t)cb[2]) << 8) | cb[3];
3416 	} else {
3417 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
3418 				(((uint32_t)q[1]) << 16) |
3419 				(((uint32_t)q[2]) << 8) | q[3];
3420 	}
3421 	fct_post_rcvd_cmd(cmd, 0);
3422 }
3423 
3424 static void
3425 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
3426 {
3427 	uint16_t status;
3428 	uint32_t portid;
3429 	uint32_t subcode1, subcode2;
3430 
3431 	status = QMEM_RD16(qlt, rsp+8);
3432 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
3433 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
3434 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
3435 
3436 	mutex_enter(&qlt->preq_lock);
3437 	if (portid != qlt->rp_id_in_dereg) {
3438 		int instance = ddi_get_instance(qlt->dip);
3439 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
3440 		    " received when driver wasn't waiting for it",
3441 		    instance, portid);
3442 		mutex_exit(&qlt->preq_lock);
3443 		return;
3444 	}
3445 
3446 	if (status != 0) {
3447 		QLT_LOG(qlt->qlt_port_alias, "implicit logout completed "
3448 		    "for 0x%x with status %x, subcode1 %x subcode2 %x",
3449 		    portid, status, subcode1, subcode2);
3450 		if (status == 0x31 && subcode1 == 0x0a)
3451 			qlt->rp_dereg_status = FCT_SUCCESS;
3452 		else
3453 			qlt->rp_dereg_status =
3454 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
3455 	} else {
3456 		qlt->rp_dereg_status = FCT_SUCCESS;
3457 	}
3458 	cv_signal(&qlt->rp_dereg_cv);
3459 	mutex_exit(&qlt->preq_lock);
3460 }
3461 
3462 /*
3463  * Note that when an ELS is aborted, the regular or aborted completion
3464  * (if any) gets posted before the abort IOCB comes back on response queue.
3465  */
3466 static void
3467 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3468 {
3469 	char		info[160];
3470 	fct_cmd_t	*cmd;
3471 	qlt_cmd_t	*qcmd;
3472 	uint32_t	hndl;
3473 	uint32_t	subcode1, subcode2;
3474 	uint16_t	status;
3475 
3476 	hndl = QMEM_RD32(qlt, rsp+4);
3477 	status = QMEM_RD16(qlt, rsp+8);
3478 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3479 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3480 
3481 	if (!CMD_HANDLE_VALID(hndl)) {
3482 		/*
3483 		 * This cannot happen for unsol els completion. This can
3484 		 * only happen when abort for an unsol els completes.
3485 		 * This condition indicates a firmware bug.
3486 		 */
3487 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
3488 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3489 		    hndl, status, subcode1, subcode2, (void *)rsp);
3490 		info[159] = 0;
3491 		(void) fct_port_shutdown(qlt->qlt_port,
3492 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3493 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3494 		return;
3495 	}
3496 
3497 	if (status == 5) {
3498 		/*
3499 		 * When an unsolicited els is aborted, the abort is done
3500 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
3501 		 * and not the abortee. We will do the cleanup when the
3502 		 * IOCB which caused the abort, returns.
3503 		 */
3504 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
3505 		return;
3506 	}
3507 
3508 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3509 	if (cmd == NULL) {
3510 		/*
3511 		 * Now why would this happen ???
3512 		 */
3513 		(void) snprintf(info, 160,
3514 		    "qlt_handle_unsol_els_completion: can not "
3515 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3516 		    (void *)rsp);
3517 		info[159] = 0;
3518 		(void) fct_port_shutdown(qlt->qlt_port,
3519 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3520 
3521 		return;
3522 	}
3523 
3524 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3525 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3526 	if (qcmd->flags & QLT_CMD_ABORTING) {
3527 		/*
3528 		 * This is the same case as "if (status == 5)" above. The
3529 		 * only difference is that in this case the firmware actually
3530 		 * finished sending the response. So the abort attempt will
3531 		 * come back with status ?. We will handle it there.
3532 		 */
3533 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
3534 		    "abort it");
3535 		return;
3536 	}
3537 
3538 	if (qcmd->dbuf != NULL) {
3539 		qlt_dmem_free(NULL, qcmd->dbuf);
3540 		qcmd->dbuf = NULL;
3541 	}
3542 
3543 	if (status == 0) {
3544 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3545 	} else {
3546 		fct_send_response_done(cmd,
3547 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3548 	}
3549 }
3550 
3551 static void
3552 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
3553 {
3554 	char		info[160];
3555 	fct_cmd_t	*cmd;
3556 	qlt_cmd_t	*qcmd;
3557 	uint32_t	hndl;
3558 	uint32_t	subcode1, subcode2;
3559 	uint16_t	status;
3560 
3561 	hndl = QMEM_RD32(qlt, rsp+4);
3562 	status = QMEM_RD16(qlt, rsp+8);
3563 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3564 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3565 
3566 	if (!CMD_HANDLE_VALID(hndl)) {
3567 		ASSERT(hndl == 0);
3568 		/*
3569 		 * Someone has requested to abort it, but no one is waiting for
3570 		 * this completion.
3571 		 */
3572 		if ((status != 0) && (status != 8)) {
3573 			/*
3574 			 * There could be exchange resource leakage, so
3575 			 * throw HBA fatal error event now
3576 			 */
3577 			(void) snprintf(info, 160,
3578 			    "qlt_handle_unsol_els_abort_completion: "
3579 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3580 			    hndl, status, subcode1, subcode2, (void *)rsp);
3581 			info[159] = 0;
3582 			(void) fct_port_shutdown(qlt->qlt_port,
3583 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3584 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3585 			return;
3586 		}
3587 
3588 		return;
3589 	}
3590 
3591 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3592 	if (cmd == NULL) {
3593 		/*
3594 		 * Why would this happen ??
3595 		 */
3596 		(void) snprintf(info, 160,
3597 		    "qlt_handle_unsol_els_abort_completion: can not get "
3598 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3599 		    (void *)rsp);
3600 		info[159] = 0;
3601 		(void) fct_port_shutdown(qlt->qlt_port,
3602 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3603 
3604 		return;
3605 	}
3606 
3607 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3608 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3609 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
3610 
3611 	if (qcmd->dbuf != NULL) {
3612 		qlt_dmem_free(NULL, qcmd->dbuf);
3613 		qcmd->dbuf = NULL;
3614 	}
3615 
3616 	if (status == 0) {
3617 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
3618 	} else if (status == 8) {
3619 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
3620 	} else {
3621 		fct_cmd_fca_aborted(cmd,
3622 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3623 	}
3624 }
3625 
3626 static void
3627 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3628 {
3629 	char		info[160];
3630 	fct_cmd_t	*cmd;
3631 	fct_els_t	*els;
3632 	qlt_cmd_t	*qcmd;
3633 	uint32_t	hndl;
3634 	uint32_t	subcode1, subcode2;
3635 	uint16_t	status;
3636 
3637 	hndl = QMEM_RD32(qlt, rsp+4);
3638 	status = QMEM_RD16(qlt, rsp+8);
3639 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3640 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3641 
3642 	if (!CMD_HANDLE_VALID(hndl)) {
3643 		/*
3644 		 * This cannot happen for sol els completion.
3645 		 */
3646 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
3647 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3648 		    hndl, status, subcode1, subcode2, (void *)rsp);
3649 		info[159] = 0;
3650 		(void) fct_port_shutdown(qlt->qlt_port,
3651 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3652 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3653 		return;
3654 	}
3655 
3656 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3657 	if (cmd == NULL) {
3658 		(void) snprintf(info, 160,
3659 		    "qlt_handle_sol_els_completion: can not "
3660 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3661 		    (void *)rsp);
3662 		info[159] = 0;
3663 		(void) fct_port_shutdown(qlt->qlt_port,
3664 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3665 
3666 		return;
3667 	}
3668 
3669 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
3670 	els = (fct_els_t *)cmd->cmd_specific;
3671 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3672 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
3673 
3674 	if (qcmd->flags & QLT_CMD_ABORTING) {
3675 		/*
3676 		 * We will handle it when the ABORT IO IOCB returns.
3677 		 */
3678 		return;
3679 	}
3680 
3681 	if (qcmd->dbuf != NULL) {
3682 		if (status == 0) {
3683 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
3684 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
3685 			    qcmd->param.resp_offset,
3686 				els->els_resp_payload, els->els_resp_size);
3687 		}
3688 		qlt_dmem_free(NULL, qcmd->dbuf);
3689 		qcmd->dbuf = NULL;
3690 	}
3691 
3692 	if (status == 0) {
3693 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3694 	} else {
3695 		fct_send_cmd_done(cmd,
3696 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3697 	}
3698 }
3699 
3700 static void
3701 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
3702 {
3703 	fct_cmd_t	*cmd;
3704 	fct_sol_ct_t	*ct;
3705 	qlt_cmd_t	*qcmd;
3706 	uint32_t	 hndl;
3707 	uint16_t	 status;
3708 	char		 info[160];
3709 
3710 	hndl = QMEM_RD32(qlt, rsp+4);
3711 	status = QMEM_RD16(qlt, rsp+8);
3712 
3713 	if (!CMD_HANDLE_VALID(hndl)) {
3714 		/*
3715 		 * Solicited commands will always have a valid handle.
3716 		 */
3717 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
3718 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3719 		info[159] = 0;
3720 		(void) fct_port_shutdown(qlt->qlt_port,
3721 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3722 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3723 		return;
3724 	}
3725 
3726 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3727 	if (cmd == NULL) {
3728 		(void) snprintf(info, 160,
3729 		    "qlt_handle_ct_completion: cannot find "
3730 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3731 		    (void *)rsp);
3732 		info[159] = 0;
3733 		(void) fct_port_shutdown(qlt->qlt_port,
3734 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3735 
3736 		return;
3737 	}
3738 
3739 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
3740 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3741 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
3742 
3743 	if (qcmd->flags & QLT_CMD_ABORTING) {
3744 		/*
3745 		 * We will handle it when ABORT IO IOCB returns;
3746 		 */
3747 		return;
3748 	}
3749 
3750 	ASSERT(qcmd->dbuf);
3751 	if (status == 0) {
3752 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
3753 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
3754 		    qcmd->param.resp_offset,
3755 		    ct->ct_resp_payload, ct->ct_resp_size);
3756 	}
3757 	qlt_dmem_free(NULL, qcmd->dbuf);
3758 	qcmd->dbuf = NULL;
3759 
3760 	if (status == 0) {
3761 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3762 	} else {
3763 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
3764 	}
3765 }
3766 
3767 static void
3768 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
3769 {
3770 	fct_cmd_t	*cmd;
3771 	scsi_task_t	*task;
3772 	qlt_cmd_t	*qcmd;
3773 	stmf_data_buf_t	*dbuf;
3774 	fct_status_t	fc_st;
3775 	uint32_t	iof = 0;
3776 	uint32_t	hndl;
3777 	uint16_t	status;
3778 	uint16_t	flags;
3779 	uint8_t		abort_req;
3780 	uint8_t		n;
3781 	char		info[160];
3782 
3783 	/* XXX: Check validity of the IOCB by checking 4th byte. */
3784 	hndl = QMEM_RD32(qlt, rsp+4);
3785 	status = QMEM_RD16(qlt, rsp+8);
3786 	flags = QMEM_RD16(qlt, rsp+0x1a);
3787 	n = rsp[2];
3788 
3789 	if (!CMD_HANDLE_VALID(hndl)) {
3790 		ASSERT(hndl == 0);
3791 		/*
3792 		 * Someone has requested to abort it, but no one is waiting for
3793 		 * this completion.
3794 		 */
3795 		QLT_LOG(qlt->qlt_port_alias, "qlt_handle_ctio_completion: "
3796 		    "hndl-%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3797 		if ((status != 1) && (status != 2)) {
3798 			/*
3799 			 * There could be exchange resource leakage, so
3800 			 * throw HBA fatal error event now
3801 			 */
3802 			(void) snprintf(info, 160,
3803 			    "qlt_handle_ctio_completion: hndl-"
3804 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
3805 			info[159] = 0;
3806 			(void) fct_port_shutdown(qlt->qlt_port,
3807 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3808 
3809 		}
3810 
3811 		return;
3812 	}
3813 
3814 	if (flags & BIT_14) {
3815 		abort_req = 1;
3816 		QLT_EXT_LOG(qlt->qlt_port_alias, "qlt_handle_ctio_completion: "
3817 		    "abort: hndl-%x, status-%x, rsp-%p", hndl, status,
3818 		    (void *)rsp);
3819 	} else {
3820 		abort_req = 0;
3821 	}
3822 
3823 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3824 	if (cmd == NULL) {
3825 		(void) snprintf(info, 160,
3826 		    "qlt_handle_ctio_completion: cannot find "
3827 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3828 		    (void *)rsp);
3829 		info[159] = 0;
3830 		(void) fct_port_shutdown(qlt->qlt_port,
3831 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3832 
3833 		return;
3834 	}
3835 
3836 	task = (scsi_task_t *)cmd->cmd_specific;
3837 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3838 	if (qcmd->dbuf_rsp_iu) {
3839 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
3840 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
3841 		qcmd->dbuf_rsp_iu = NULL;
3842 	}
3843 
3844 	if ((status == 1) || (status == 2)) {
3845 		if (abort_req) {
3846 			fc_st = FCT_ABORT_SUCCESS;
3847 			iof = FCT_IOF_FCA_DONE;
3848 		} else {
3849 			fc_st = FCT_SUCCESS;
3850 			if (flags & BIT_15) {
3851 				iof = FCT_IOF_FCA_DONE;
3852 			}
3853 		}
3854 	} else {
3855 		if ((status == 8) && abort_req) {
3856 			fc_st = FCT_NOT_FOUND;
3857 			iof = FCT_IOF_FCA_DONE;
3858 		} else {
3859 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
3860 		}
3861 	}
3862 	dbuf = NULL;
3863 	if (((n & BIT_7) == 0) && (!abort_req)) {
3864 		/* A completion of data xfer */
3865 		if (n == 0) {
3866 			dbuf = qcmd->dbuf;
3867 		} else {
3868 			dbuf = stmf_handle_to_buf(task, n);
3869 		}
3870 
3871 		ASSERT(dbuf != NULL);
3872 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
3873 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
3874 		if (flags & BIT_15) {
3875 			dbuf->db_flags |= DB_STATUS_GOOD_SENT;
3876 		}
3877 
3878 		dbuf->db_xfer_status = fc_st;
3879 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
3880 		return;
3881 	}
3882 	if (!abort_req) {
3883 		/*
3884 		 * This was just a pure status xfer.
3885 		 */
3886 		fct_send_response_done(cmd, fc_st, iof);
3887 		return;
3888 	}
3889 
3890 	fct_cmd_fca_aborted(cmd, fc_st, iof);
3891 }
3892 
3893 static void
3894 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
3895 {
3896 	char		info[80];
3897 	fct_cmd_t	*cmd;
3898 	qlt_cmd_t	*qcmd;
3899 	uint32_t	h;
3900 	uint16_t	status;
3901 
3902 	h = QMEM_RD32(qlt, rsp+4);
3903 	status = QMEM_RD16(qlt, rsp+8);
3904 
3905 	if (!CMD_HANDLE_VALID(h)) {
3906 		/*
3907 		 * Solicited commands always have a valid handle.
3908 		 */
3909 		(void) snprintf(info, 80,
3910 		    "qlt_handle_sol_abort_completion: hndl-"
3911 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
3912 		info[79] = 0;
3913 		(void) fct_port_shutdown(qlt->qlt_port,
3914 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3915 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3916 		return;
3917 	}
3918 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
3919 	if (cmd == NULL) {
3920 		/*
3921 		 * What happened to the cmd ??
3922 		 */
3923 		(void) snprintf(info, 80,
3924 		    "qlt_handle_sol_abort_completion: cannot "
3925 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
3926 		    (void *)rsp);
3927 		info[79] = 0;
3928 		(void) fct_port_shutdown(qlt->qlt_port,
3929 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3930 
3931 		return;
3932 	}
3933 
3934 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
3935 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
3936 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3937 	if (qcmd->dbuf != NULL) {
3938 		qlt_dmem_free(NULL, qcmd->dbuf);
3939 		qcmd->dbuf = NULL;
3940 	}
3941 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
3942 	if (status == 0) {
3943 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
3944 	} else if (status == 0x31) {
3945 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
3946 	} else {
3947 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
3948 	}
3949 }
3950 
3951 static void
3952 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
3953 {
3954 	qlt_abts_cmd_t	*qcmd;
3955 	fct_cmd_t	*cmd;
3956 	uint32_t	remote_portid;
3957 	char		info[160];
3958 
3959 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3960 	    ((uint32_t)(resp[0x1A])) << 16;
3961 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
3962 	    sizeof (qlt_abts_cmd_t), 0);
3963 	if (cmd == NULL) {
3964 		(void) snprintf(info, 160,
3965 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
3966 		    "allocate space for fct_cmd", (void *)qlt);
3967 		info[159] = 0;
3968 		(void) fct_port_shutdown(qlt->qlt_port,
3969 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3970 		return;
3971 	}
3972 
3973 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
3974 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3975 	bcopy(resp, qcmd->buf, IOCB_SIZE);
3976 	cmd->cmd_port = qlt->qlt_port;
3977 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
3978 	if (cmd->cmd_rp_handle == 0xFFFF)
3979 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3980 
3981 	cmd->cmd_rportid = remote_portid;
3982 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3983 	    ((uint32_t)(resp[0x16])) << 16;
3984 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3985 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3986 	fct_post_rcvd_cmd(cmd, 0);
3987 }
3988 
3989 static void
3990 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
3991 {
3992 	uint16_t status;
3993 	char	info[80];
3994 
3995 	status = QMEM_RD16(qlt, resp+8);
3996 
3997 	if ((status == 0) || (status == 5)) {
3998 		return;
3999 	}
4000 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4001 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4002 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4003 	info[79] = 0;
4004 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4005 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4006 }
4007 
4008 #ifdef	DEBUG
4009 uint32_t qlt_drop_abort_counter = 0;
4010 #endif
4011 
4012 fct_status_t
4013 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4014 {
4015 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4016 
4017 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4018 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4019 		return (FCT_NOT_FOUND);
4020 	}
4021 
4022 #ifdef DEBUG
4023 	if (qlt_drop_abort_counter > 0) {
4024 		if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
4025 			return (FCT_SUCCESS);
4026 	}
4027 #endif
4028 
4029 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4030 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4031 	}
4032 
4033 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4034 		cmd->cmd_handle = 0;
4035 	}
4036 
4037 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4038 		return (qlt_send_abts_response(qlt, cmd, 1));
4039 	}
4040 
4041 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4042 		return (qlt_abort_purex(qlt, cmd));
4043 	}
4044 
4045 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4046 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4047 		return (qlt_abort_sol_cmd(qlt, cmd));
4048 	}
4049 
4050 	ASSERT(0);
4051 	return (FCT_FAILURE);
4052 }
4053 
4054 fct_status_t
4055 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4056 {
4057 	uint8_t *req;
4058 	qlt_cmd_t *qcmd;
4059 
4060 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4061 	qcmd->flags |= QLT_CMD_ABORTING;
4062 	QLT_LOG(qlt->qlt_port_alias, "qlt_abort_sol_cmd: fctcmd-%p, "
4063 	    "cmd_handle-%x", cmd, cmd->cmd_handle);
4064 
4065 	mutex_enter(&qlt->req_lock);
4066 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4067 	if (req == NULL) {
4068 		mutex_exit(&qlt->req_lock);
4069 
4070 		return (FCT_BUSY);
4071 	}
4072 	bzero(req, IOCB_SIZE);
4073 	req[0] = 0x33; req[1] = 1;
4074 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4075 	if (cmd->cmd_rp) {
4076 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4077 	} else {
4078 		QMEM_WR16(qlt, req+8, 0xFFFF);
4079 	}
4080 
4081 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4082 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4083 	qlt_submit_req_entries(qlt, 1);
4084 	mutex_exit(&qlt->req_lock);
4085 
4086 	return (FCT_SUCCESS);
4087 }
4088 
4089 fct_status_t
4090 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4091 {
4092 	uint8_t *req;
4093 	qlt_cmd_t *qcmd;
4094 	fct_els_t *els;
4095 	uint8_t elsop, req1f;
4096 
4097 	els = (fct_els_t *)cmd->cmd_specific;
4098 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4099 	elsop = els->els_req_payload[0];
4100 	QLT_LOG(qlt->qlt_port_alias,
4101 	    "qlt_abort_purex: fctcmd-%p, cmd_handle-%x, "
4102 	    "elsop-%x", cmd, cmd->cmd_handle, elsop);
4103 	req1f = 0x60;	/* Terminate xchg */
4104 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4105 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4106 		req1f |= BIT_4;
4107 	}
4108 
4109 	mutex_enter(&qlt->req_lock);
4110 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4111 	if (req == NULL) {
4112 		mutex_exit(&qlt->req_lock);
4113 
4114 		return (FCT_BUSY);
4115 	}
4116 
4117 	qcmd->flags |= QLT_CMD_ABORTING;
4118 	bzero(req, IOCB_SIZE);
4119 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4120 	req[0x16] = elsop; req[0x1f] = req1f;
4121 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4122 	if (cmd->cmd_rp) {
4123 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4124 	} else {
4125 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4126 	}
4127 
4128 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4129 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4130 	qlt_submit_req_entries(qlt, 1);
4131 	mutex_exit(&qlt->req_lock);
4132 
4133 	return (FCT_SUCCESS);
4134 }
4135 
4136 fct_status_t
4137 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4138 {
4139 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4140 	uint8_t *req;
4141 	uint16_t flags;
4142 
4143 	flags = BIT_14 | (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
4144 	QLT_EXT_LOG(qlt->qlt_port_alias, "qlt_abort_unsol_scsi_cmd: fctcmd-%p, "
4145 	    "cmd_handle-%x", cmd, cmd->cmd_handle);
4146 
4147 	mutex_enter(&qlt->req_lock);
4148 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4149 	if (req == NULL) {
4150 		mutex_exit(&qlt->req_lock);
4151 
4152 		return (FCT_BUSY);
4153 	}
4154 
4155 	qcmd->flags |= QLT_CMD_ABORTING;
4156 	bzero(req, IOCB_SIZE);
4157 	req[0] = 0x12; req[1] = 0x1;
4158 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4159 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4160 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4161 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4162 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4163 	QMEM_WR16(qlt, req+0x1A, flags);
4164 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4165 	qlt_submit_req_entries(qlt, 1);
4166 	mutex_exit(&qlt->req_lock);
4167 
4168 	return (FCT_SUCCESS);
4169 }
4170 
4171 fct_status_t
4172 qlt_send_cmd(fct_cmd_t *cmd)
4173 {
4174 	qlt_state_t *qlt;
4175 
4176 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4177 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4178 		return (qlt_send_els(qlt, cmd));
4179 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4180 		return (qlt_send_ct(qlt, cmd));
4181 	}
4182 
4183 	ASSERT(0);
4184 	return (FCT_FAILURE);
4185 }
4186 
4187 fct_status_t
4188 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4189 {
4190 	uint8_t *req;
4191 	fct_els_t *els;
4192 	qlt_cmd_t *qcmd;
4193 	stmf_data_buf_t *buf;
4194 	qlt_dmem_bctl_t *bctl;
4195 	uint32_t sz, minsz;
4196 
4197 	els = (fct_els_t *)cmd->cmd_specific;
4198 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4199 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4200 	qcmd->param.resp_offset = (els->els_req_size + 7) & ~7;
4201 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4202 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4203 	if (buf == NULL) {
4204 		return (FCT_BUSY);
4205 	}
4206 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4207 
4208 	qcmd->dbuf = buf;
4209 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4210 						els->els_req_size);
4211 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4212 
4213 	mutex_enter(&qlt->req_lock);
4214 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4215 	if (req == NULL) {
4216 		qlt_dmem_free(NULL, buf);
4217 		mutex_exit(&qlt->req_lock);
4218 		return (FCT_BUSY);
4219 	}
4220 	bzero(req, IOCB_SIZE);
4221 	req[0] = 0x53; req[1] = 1;
4222 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4223 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4224 	QMEM_WR16(qlt, (&req[0xC]), 1);
4225 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4226 	QMEM_WR16(qlt, (&req[0x14]), 1);
4227 	req[0x16] = els->els_req_payload[0];
4228 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4229 		req[0x1b] = (cmd->cmd_lportid >> 16) & 0xff;
4230 		req[0x1c] = cmd->cmd_lportid & 0xff;
4231 		req[0x1d] = (cmd->cmd_lportid >> 8) & 0xff;
4232 	}
4233 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4234 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4235 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4236 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4237 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4238 	QMEM_WR64(qlt, (&req[0x34]), bctl->bctl_dev_addr +
4239 					qcmd->param.resp_offset);
4240 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4241 	qlt_submit_req_entries(qlt, 1);
4242 	mutex_exit(&qlt->req_lock);
4243 
4244 	return (FCT_SUCCESS);
4245 }
4246 
4247 fct_status_t
4248 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4249 {
4250 	uint8_t *req;
4251 	fct_sol_ct_t *ct;
4252 	qlt_cmd_t *qcmd;
4253 	stmf_data_buf_t *buf;
4254 	qlt_dmem_bctl_t *bctl;
4255 	uint32_t sz, minsz;
4256 
4257 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4258 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4259 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4260 	qcmd->param.resp_offset = (ct->ct_req_size + 7) & ~7;
4261 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4262 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4263 	if (buf == NULL) {
4264 		return (FCT_BUSY);
4265 	}
4266 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4267 
4268 	qcmd->dbuf = buf;
4269 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4270 						ct->ct_req_size);
4271 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4272 
4273 	mutex_enter(&qlt->req_lock);
4274 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4275 	if (req == NULL) {
4276 		qlt_dmem_free(NULL, buf);
4277 		mutex_exit(&qlt->req_lock);
4278 		return (FCT_BUSY);
4279 	}
4280 	bzero(req, IOCB_SIZE);
4281 	req[0] = 0x29; req[1] = 1;
4282 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4283 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4284 	QMEM_WR16(qlt, (&req[0xC]), 1);
4285 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4286 	QMEM_WR16(qlt, (&req[0x14]), 1);
4287 
4288 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4289 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4290 
4291 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4292 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4293 	QMEM_WR64(qlt, (&req[0x34]), bctl->bctl_dev_addr +
4294 	    qcmd->param.resp_offset);		/* RESPONSE DSD */
4295 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4296 
4297 	qlt_submit_req_entries(qlt, 1);
4298 	mutex_exit(&qlt->req_lock);
4299 
4300 	return (FCT_SUCCESS);
4301 }
4302 
4303 
4304 /*
4305  * All QLT_FIRMWARE_* will mainly be handled in this function
4306  * It can not be called in interrupt context
4307  *
4308  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4309  * and qlt_ioctl_lock
4310  */
4311 static fct_status_t
4312 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4313 {
4314 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4315 	int		i;
4316 	int		retries;
4317 	int		n, size_left;
4318 	char		c = ' ';
4319 	uint32_t	addr, endaddr, words_to_read;
4320 	caddr_t		buf;
4321 
4322 	mutex_enter(&qlt->qlt_ioctl_lock);
4323 	/*
4324 	 * To make sure that there's no outstanding dumping task
4325 	 */
4326 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4327 		mutex_exit(&qlt->qlt_ioctl_lock);
4328 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: outstanding");
4329 		return (FCT_FAILURE);
4330 	}
4331 
4332 	/*
4333 	 * To make sure not to overwrite existing dump
4334 	 */
4335 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4336 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4337 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4338 		/*
4339 		 * If we have alreay one dump, but it's not triggered by user
4340 		 * and the user hasn't fetched it, we shouldn't dump again.
4341 		 */
4342 		mutex_exit(&qlt->qlt_ioctl_lock);
4343 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: There's one "
4344 		    "dump, please fetech it");
4345 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
4346 		    "is one already outstanding.", qlt->instance);
4347 		return (FCT_FAILURE);
4348 	}
4349 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
4350 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
4351 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
4352 	} else {
4353 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
4354 	}
4355 	mutex_exit(&qlt->qlt_ioctl_lock);
4356 
4357 	size_left = QLT_FWDUMP_BUFSIZE;
4358 	if (!qlt->qlt_fwdump_buf) {
4359 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
4360 		/*
4361 		 * It's the only place that we allocate buf for dumping. After
4362 		 * it's allocated, we will use it until the port is detached.
4363 		 */
4364 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
4365 	}
4366 
4367 	/*
4368 	 * Start to dump firmware
4369 	 */
4370 	buf = (caddr_t)qlt->qlt_fwdump_buf;
4371 
4372 	/*
4373 	 * Print the ISP firmware revision number and attributes information
4374 	 * Read the RISC to Host Status register
4375 	 */
4376 	n = snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
4377 	    "Attributes %04x\n\nR2H Status Register\n%08x",
4378 	    qlt->fw_major, qlt->fw_minor,
4379 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, 0x44));
4380 	buf += n; size_left -= n;
4381 
4382 	/*
4383 	 * Before pausing the RISC, make sure no mailbox can execute
4384 	 */
4385 	mutex_enter(&qlt->mbox_lock);
4386 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
4387 		/*
4388 		 * Wait to grab the mailboxes
4389 		 */
4390 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
4391 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
4392 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
4393 			    ddi_get_lbolt() + drv_usectohz(1000000));
4394 			if (retries > 5) {
4395 				mutex_exit(&qlt->mbox_lock);
4396 				QLT_LOG(qlt->qlt_port_alias,
4397 				    "qlt_firmware_dump: "
4398 				    "can't drain out mailbox commands");
4399 				goto dump_fail;
4400 			}
4401 		}
4402 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
4403 		cv_broadcast(&qlt->mbox_cv);
4404 	}
4405 	mutex_exit(&qlt->mbox_lock);
4406 
4407 	/*
4408 	 * Pause the RISC processor
4409 	 */
4410 	REG_WR32(qlt, REG_HCCR, 0x30000000);
4411 
4412 	/*
4413 	 * Wait for the RISC processor to pause
4414 	 */
4415 	for (i = 0; i < 200; i++) {
4416 		if (REG_RD32(qlt, 0x44) & 0x100) {
4417 			break;
4418 		}
4419 		drv_usecwait(1000);
4420 	}
4421 	if (i == 200) {
4422 		QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: can't pause");
4423 		return (FCT_FAILURE);
4424 	}
4425 
4426 	if (!qlt->qlt_25xx_chip) {
4427 		goto over_25xx_specific_dump;
4428 	}
4429 	n = snprintf(buf, size_left, "\n\nHostRisc registers\n");
4430 	buf += n; size_left -= n;
4431 	REG_WR32(qlt, 0x54, 0x7000);
4432 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4433 	buf += n; size_left -= n;
4434 	REG_WR32(qlt, 0x54, 0x7010);
4435 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4436 	buf += n; size_left -= n;
4437 	REG_WR32(qlt, 0x54, 0x7C00);
4438 
4439 	n = snprintf(buf, size_left, "\nPCIe registers\n");
4440 	buf += n; size_left -= n;
4441 	REG_WR32(qlt, 0xC0, 0x1);
4442 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
4443 	buf += n; size_left -= n;
4444 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
4445 	buf += n; size_left -= n;
4446 	REG_WR32(qlt, 0xC0, 0x0);
4447 
4448 over_25xx_specific_dump:;
4449 	n = snprintf(buf, size_left, "\n\nHost Interface Registers\n");
4450 	buf += n; size_left -= n;
4451 	/*
4452 	 * Capture data from 32 regsiters
4453 	 */
4454 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
4455 	buf += n; size_left -= n;
4456 
4457 	/*
4458 	 * Disable interrupts
4459 	 */
4460 	REG_WR32(qlt, 0xc, 0);
4461 
4462 	/*
4463 	 * Shadow registers
4464 	 */
4465 	n = snprintf(buf, size_left, "\nShadow Registers\n");
4466 	buf += n; size_left -= n;
4467 
4468 	REG_WR32(qlt, 0x54, 0xF70);
4469 	addr = 0xb0000000;
4470 	for (i = 0; i < 0xb; i++) {
4471 		if ((!qlt->qlt_25xx_chip) && (i >= 7)) {
4472 			break;
4473 		}
4474 		if (i && ((i & 7) == 0)) {
4475 			n = snprintf(buf, size_left, "\n");
4476 			buf += n; size_left -= n;
4477 		}
4478 		REG_WR32(qlt, 0xF0, addr);
4479 		n = snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
4480 		buf += n; size_left -= n;
4481 		addr += 0x100000;
4482 	}
4483 
4484 	if (qlt->qlt_25xx_chip) {
4485 		REG_WR32(qlt, 0x54, 0x10);
4486 		n = snprintf(buf, size_left, "\n\nRISC IO Register\n%08x",
4487 		    REG_RD32(qlt, 0xC0));
4488 		buf += n; size_left -= n;
4489 	}
4490 
4491 	/*
4492 	 * Mailbox registers
4493 	 */
4494 	n = snprintf(buf, size_left, "\n\nMailbox Registers\n");
4495 	buf += n; size_left -= n;
4496 	for (i = 0; i < 32; i += 2) {
4497 		if ((i + 2) & 15) {
4498 			c = ' ';
4499 		} else {
4500 			c = '\n';
4501 		}
4502 		n = snprintf(buf, size_left, "%04x %04x%c",
4503 		    REG_RD16(qlt, 0x80 + (i << 1)),
4504 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
4505 		buf += n; size_left -= n;
4506 	}
4507 
4508 	/*
4509 	 * Transfer sequence registers
4510 	 */
4511 	n = snprintf(buf, size_left, "\nXSEQ GP Registers\n");
4512 	buf += n; size_left -= n;
4513 
4514 	REG_WR32(qlt, 0x54, 0xBF00);
4515 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4516 	buf += n; size_left -= n;
4517 	REG_WR32(qlt, 0x54, 0xBF10);
4518 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4519 	buf += n; size_left -= n;
4520 	REG_WR32(qlt, 0x54, 0xBF20);
4521 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4522 	buf += n; size_left -= n;
4523 	REG_WR32(qlt, 0x54, 0xBF30);
4524 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4525 	buf += n; size_left -= n;
4526 	REG_WR32(qlt, 0x54, 0xBF40);
4527 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4528 	buf += n; size_left -= n;
4529 	REG_WR32(qlt, 0x54, 0xBF50);
4530 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4531 	buf += n; size_left -= n;
4532 	REG_WR32(qlt, 0x54, 0xBF60);
4533 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4534 	buf += n; size_left -= n;
4535 	REG_WR32(qlt, 0x54, 0xBF70);
4536 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4537 	buf += n; size_left -= n;
4538 	n = snprintf(buf, size_left, "\nXSEQ-0 registers\n");
4539 	buf += n; size_left -= n;
4540 	REG_WR32(qlt, 0x54, 0xBFE0);
4541 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4542 	buf += n; size_left -= n;
4543 	n = snprintf(buf, size_left, "\nXSEQ-1 registers\n");
4544 	buf += n; size_left -= n;
4545 	REG_WR32(qlt, 0x54, 0xBFF0);
4546 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4547 	buf += n; size_left -= n;
4548 
4549 	/*
4550 	 * Receive sequence registers
4551 	 */
4552 	n = snprintf(buf, size_left, "\nRSEQ GP Registers\n");
4553 	buf += n; size_left -= n;
4554 	REG_WR32(qlt, 0x54, 0xFF00);
4555 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4556 	buf += n; size_left -= n;
4557 	REG_WR32(qlt, 0x54, 0xFF10);
4558 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4559 	buf += n; size_left -= n;
4560 	REG_WR32(qlt, 0x54, 0xFF20);
4561 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4562 	buf += n; size_left -= n;
4563 	REG_WR32(qlt, 0x54, 0xFF30);
4564 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4565 	buf += n; size_left -= n;
4566 	REG_WR32(qlt, 0x54, 0xFF40);
4567 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4568 	buf += n; size_left -= n;
4569 	REG_WR32(qlt, 0x54, 0xFF50);
4570 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4571 	buf += n; size_left -= n;
4572 	REG_WR32(qlt, 0x54, 0xFF60);
4573 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4574 	buf += n; size_left -= n;
4575 	REG_WR32(qlt, 0x54, 0xFF70);
4576 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4577 	buf += n; size_left -= n;
4578 	n = snprintf(buf, size_left, "\nRSEQ-0 registers\n");
4579 	buf += n; size_left -= n;
4580 	REG_WR32(qlt, 0x54, 0xFFD0);
4581 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4582 	buf += n; size_left -= n;
4583 	n = snprintf(buf, size_left, "\nRSEQ-1 registers\n");
4584 	buf += n; size_left -= n;
4585 	REG_WR32(qlt, 0x54, 0xFFE0);
4586 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4587 	buf += n; size_left -= n;
4588 	n = snprintf(buf, size_left, "\nRSEQ-2 registers\n");
4589 	buf += n; size_left -= n;
4590 	REG_WR32(qlt, 0x54, 0xFFF0);
4591 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4592 	buf += n; size_left -= n;
4593 
4594 	if (!qlt->qlt_25xx_chip)
4595 		goto over_aseq_regs;
4596 
4597 	/*
4598 	 * Auxiliary sequencer registers
4599 	 */
4600 	n = snprintf(buf, size_left, "\nASEQ GP Registers\n");
4601 	buf += n; size_left -= n;
4602 	REG_WR32(qlt, 0x54, 0xB000);
4603 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4604 	buf += n; size_left -= n;
4605 	REG_WR32(qlt, 0x54, 0xB010);
4606 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4607 	buf += n; size_left -= n;
4608 	REG_WR32(qlt, 0x54, 0xB020);
4609 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4610 	buf += n; size_left -= n;
4611 	REG_WR32(qlt, 0x54, 0xB030);
4612 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4613 	buf += n; size_left -= n;
4614 	REG_WR32(qlt, 0x54, 0xB040);
4615 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4616 	buf += n; size_left -= n;
4617 	REG_WR32(qlt, 0x54, 0xB050);
4618 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4619 	buf += n; size_left -= n;
4620 	REG_WR32(qlt, 0x54, 0xB060);
4621 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4622 	buf += n; size_left -= n;
4623 	REG_WR32(qlt, 0x54, 0xB070);
4624 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4625 	buf += n; size_left -= n;
4626 	n = snprintf(buf, size_left, "\nASEQ-0 registers\n");
4627 	buf += n; size_left -= n;
4628 	REG_WR32(qlt, 0x54, 0xB0C0);
4629 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4630 	buf += n; size_left -= n;
4631 	REG_WR32(qlt, 0x54, 0xB0D0);
4632 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4633 	buf += n; size_left -= n;
4634 	n = snprintf(buf, size_left, "\nASEQ-1 registers\n");
4635 	buf += n; size_left -= n;
4636 	REG_WR32(qlt, 0x54, 0xB0E0);
4637 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4638 	buf += n; size_left -= n;
4639 	n = snprintf(buf, size_left, "\nASEQ-2 registers\n");
4640 	buf += n; size_left -= n;
4641 	REG_WR32(qlt, 0x54, 0xB0F0);
4642 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4643 	buf += n; size_left -= n;
4644 
4645 over_aseq_regs:;
4646 
4647 	/*
4648 	 * Command DMA registers
4649 	 */
4650 	n = snprintf(buf, size_left, "\nCommand DMA registers\n");
4651 	buf += n; size_left -= n;
4652 	REG_WR32(qlt, 0x54, 0x7100);
4653 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4654 	buf += n; size_left -= n;
4655 
4656 	/*
4657 	 * Queues
4658 	 */
4659 	n = snprintf(buf, size_left,
4660 			"\nRequest0 Queue DMA Channel registers\n");
4661 	buf += n; size_left -= n;
4662 	REG_WR32(qlt, 0x54, 0x7200);
4663 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4664 	buf += n; size_left -= n;
4665 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4666 	buf += n; size_left -= n;
4667 
4668 	n = snprintf(buf, size_left,
4669 			"\n\nResponse0 Queue DMA Channel registers\n");
4670 	buf += n; size_left -= n;
4671 	REG_WR32(qlt, 0x54, 0x7300);
4672 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4673 	buf += n; size_left -= n;
4674 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4675 	buf += n; size_left -= n;
4676 
4677 	n = snprintf(buf, size_left,
4678 			"\n\nRequest1 Queue DMA Channel registers\n");
4679 	buf += n; size_left -= n;
4680 	REG_WR32(qlt, 0x54, 0x7400);
4681 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
4682 	buf += n; size_left -= n;
4683 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
4684 	buf += n; size_left -= n;
4685 
4686 	/*
4687 	 * Transmit DMA registers
4688 	 */
4689 	n = snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
4690 	buf += n; size_left -= n;
4691 	REG_WR32(qlt, 0x54, 0x7600);
4692 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4693 	buf += n; size_left -= n;
4694 	REG_WR32(qlt, 0x54, 0x7610);
4695 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4696 	buf += n; size_left -= n;
4697 	n = snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
4698 	buf += n; size_left -= n;
4699 	REG_WR32(qlt, 0x54, 0x7620);
4700 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4701 	buf += n; size_left -= n;
4702 	REG_WR32(qlt, 0x54, 0x7630);
4703 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4704 	buf += n; size_left -= n;
4705 	n = snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
4706 	buf += n; size_left -= n;
4707 	REG_WR32(qlt, 0x54, 0x7640);
4708 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4709 	buf += n; size_left -= n;
4710 	REG_WR32(qlt, 0x54, 0x7650);
4711 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4712 	buf += n; size_left -= n;
4713 	n = snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
4714 	buf += n; size_left -= n;
4715 	REG_WR32(qlt, 0x54, 0x7660);
4716 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4717 	buf += n; size_left -= n;
4718 	REG_WR32(qlt, 0x54, 0x7670);
4719 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4720 	buf += n; size_left -= n;
4721 	n = snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
4722 	buf += n; size_left -= n;
4723 	REG_WR32(qlt, 0x54, 0x7680);
4724 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4725 	buf += n; size_left -= n;
4726 	REG_WR32(qlt, 0x54, 0x7690);
4727 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4728 	buf += n; size_left -= n;
4729 	n = snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
4730 	buf += n; size_left -= n;
4731 	REG_WR32(qlt, 0x54, 0x76A0);
4732 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4733 	buf += n; size_left -= n;
4734 
4735 	/*
4736 	 * Receive DMA registers
4737 	 */
4738 	n = snprintf(buf, size_left, "\nRCV Thread 0 Data DMA registers\n");
4739 	buf += n; size_left -= n;
4740 	REG_WR32(qlt, 0x54, 0x7700);
4741 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4742 	buf += n; size_left -= n;
4743 	REG_WR32(qlt, 0x54, 0x7710);
4744 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4745 	buf += n; size_left -= n;
4746 	n = snprintf(buf, size_left, "\nRCV Thread 1 Data DMA registers\n");
4747 	buf += n; size_left -= n;
4748 	REG_WR32(qlt, 0x54, 0x7720);
4749 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4750 	buf += n; size_left -= n;
4751 	REG_WR32(qlt, 0x54, 0x7730);
4752 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4753 	buf += n; size_left -= n;
4754 
4755 	/*
4756 	 * RISC registers
4757 	 */
4758 	n = snprintf(buf, size_left, "\nRISC GP registers\n");
4759 	buf += n; size_left -= n;
4760 	REG_WR32(qlt, 0x54, 0x0F00);
4761 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4762 	buf += n; size_left -= n;
4763 	REG_WR32(qlt, 0x54, 0x0F10);
4764 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4765 	buf += n; size_left -= n;
4766 	REG_WR32(qlt, 0x54, 0x0F20);
4767 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4768 	buf += n; size_left -= n;
4769 	REG_WR32(qlt, 0x54, 0x0F30);
4770 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4771 	buf += n; size_left -= n;
4772 	REG_WR32(qlt, 0x54, 0x0F40);
4773 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4774 	buf += n; size_left -= n;
4775 	REG_WR32(qlt, 0x54, 0x0F50);
4776 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4777 	buf += n; size_left -= n;
4778 	REG_WR32(qlt, 0x54, 0x0F60);
4779 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4780 	buf += n; size_left -= n;
4781 	REG_WR32(qlt, 0x54, 0x0F70);
4782 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4783 	buf += n; size_left -= n;
4784 
4785 	/*
4786 	 * Local memory controller registers
4787 	 */
4788 	n = snprintf(buf, size_left, "\nLMC registers\n");
4789 	buf += n; size_left -= n;
4790 	REG_WR32(qlt, 0x54, 0x3000);
4791 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4792 	buf += n; size_left -= n;
4793 	REG_WR32(qlt, 0x54, 0x3010);
4794 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4795 	buf += n; size_left -= n;
4796 	REG_WR32(qlt, 0x54, 0x3020);
4797 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4798 	buf += n; size_left -= n;
4799 	REG_WR32(qlt, 0x54, 0x3030);
4800 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4801 	buf += n; size_left -= n;
4802 	REG_WR32(qlt, 0x54, 0x3040);
4803 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4804 	buf += n; size_left -= n;
4805 	REG_WR32(qlt, 0x54, 0x3050);
4806 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4807 	buf += n; size_left -= n;
4808 	REG_WR32(qlt, 0x54, 0x3060);
4809 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4810 	buf += n; size_left -= n;
4811 
4812 	if (qlt->qlt_25xx_chip) {
4813 		REG_WR32(qlt, 0x54, 0x3070);
4814 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4815 		buf += n; size_left -= n;
4816 	}
4817 
4818 	/*
4819 	 * Fibre protocol module regsiters
4820 	 */
4821 	n = snprintf(buf, size_left, "\nFPM hardware registers\n");
4822 	buf += n; size_left -= n;
4823 	REG_WR32(qlt, 0x54, 0x4000);
4824 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4825 	buf += n; size_left -= n;
4826 	REG_WR32(qlt, 0x54, 0x4010);
4827 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4828 	buf += n; size_left -= n;
4829 	REG_WR32(qlt, 0x54, 0x4020);
4830 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4831 	buf += n; size_left -= n;
4832 	REG_WR32(qlt, 0x54, 0x4030);
4833 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4834 	buf += n; size_left -= n;
4835 	REG_WR32(qlt, 0x54, 0x4040);
4836 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4837 	buf += n; size_left -= n;
4838 	REG_WR32(qlt, 0x54, 0x4050);
4839 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4840 	buf += n; size_left -= n;
4841 	REG_WR32(qlt, 0x54, 0x4060);
4842 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4843 	buf += n; size_left -= n;
4844 	REG_WR32(qlt, 0x54, 0x4070);
4845 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4846 	buf += n; size_left -= n;
4847 	REG_WR32(qlt, 0x54, 0x4080);
4848 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4849 	buf += n; size_left -= n;
4850 	REG_WR32(qlt, 0x54, 0x4090);
4851 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4852 	buf += n; size_left -= n;
4853 	REG_WR32(qlt, 0x54, 0x40A0);
4854 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4855 	buf += n; size_left -= n;
4856 	REG_WR32(qlt, 0x54, 0x40B0);
4857 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4858 	buf += n; size_left -= n;
4859 
4860 	/*
4861 	 * Fibre buffer registers
4862 	 */
4863 	n = snprintf(buf, size_left, "\nFB hardware registers\n");
4864 	buf += n; size_left -= n;
4865 	REG_WR32(qlt, 0x54, 0x6000);
4866 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4867 	buf += n; size_left -= n;
4868 	REG_WR32(qlt, 0x54, 0x6010);
4869 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4870 	buf += n; size_left -= n;
4871 	REG_WR32(qlt, 0x54, 0x6020);
4872 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4873 	buf += n; size_left -= n;
4874 	REG_WR32(qlt, 0x54, 0x6030);
4875 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4876 	buf += n; size_left -= n;
4877 	REG_WR32(qlt, 0x54, 0x6040);
4878 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4879 	buf += n; size_left -= n;
4880 	REG_WR32(qlt, 0x54, 0x6100);
4881 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4882 	buf += n; size_left -= n;
4883 	REG_WR32(qlt, 0x54, 0x6130);
4884 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4885 	buf += n; size_left -= n;
4886 	REG_WR32(qlt, 0x54, 0x6150);
4887 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4888 	buf += n; size_left -= n;
4889 	REG_WR32(qlt, 0x54, 0x6170);
4890 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4891 	buf += n; size_left -= n;
4892 	REG_WR32(qlt, 0x54, 0x6190);
4893 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4894 	buf += n; size_left -= n;
4895 	REG_WR32(qlt, 0x54, 0x61B0);
4896 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4897 	buf += n; size_left -= n;
4898 
4899 	if (qlt->qlt_25xx_chip) {
4900 		REG_WR32(qlt, 0x54, 0x6F00);
4901 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4902 		buf += n; size_left -= n;
4903 	}
4904 
4905 	qlt->intr_sneak_counter = 10;
4906 	qlt_disable_intr(qlt);
4907 	mutex_enter(&qlt->intr_lock);
4908 	qlt->qlt_intr_enabled = 0;
4909 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
4910 	drv_usecwait(20);
4911 	qlt->intr_sneak_counter = 0;
4912 	mutex_exit(&qlt->intr_lock);
4913 
4914 	/*
4915 	 * Memory
4916 	 */
4917 	n = snprintf(buf, size_left, "\nCode RAM\n");
4918 	buf += n; size_left -= n;
4919 
4920 	addr = 0x20000;
4921 	endaddr = 0x22000;
4922 	words_to_read = 0;
4923 	while (addr < endaddr) {
4924 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
4925 		if ((words_to_read + addr) > endaddr) {
4926 			words_to_read = endaddr - addr;
4927 		}
4928 		if (qlt_read_risc_ram(qlt, addr, words_to_read) !=
4929 		    QLT_SUCCESS) {
4930 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: Error "
4931 			    "reading risc ram - CODE RAM");
4932 			goto dump_fail;
4933 		}
4934 
4935 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
4936 		buf += n; size_left -= n;
4937 
4938 		if (size_left < 100000) {
4939 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: run "
4940 			    "out of space - CODE RAM");
4941 			goto dump_ok;
4942 		}
4943 		addr += words_to_read;
4944 	}
4945 
4946 	n = snprintf(buf, size_left, "\nExternal Memory\n");
4947 	buf += n; size_left -= n;
4948 
4949 	addr = 0x100000;
4950 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
4951 	endaddr++;
4952 	if (endaddr & 7) {
4953 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
4954 	}
4955 
4956 	words_to_read = 0;
4957 	while (addr < endaddr) {
4958 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
4959 		if ((words_to_read + addr) > endaddr) {
4960 			words_to_read = endaddr - addr;
4961 		}
4962 		if (qlt_read_risc_ram(qlt, addr, words_to_read) !=
4963 		    QLT_SUCCESS) {
4964 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: Error "
4965 			    "reading risc ram - EXT RAM");
4966 			goto dump_fail;
4967 		}
4968 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
4969 		buf += n; size_left -= n;
4970 		if (size_left < 100000) {
4971 			QLT_LOG(qlt->qlt_port_alias, "qlt_firmware_dump: run "
4972 			    "out of space - EXT RAM");
4973 			goto dump_ok;
4974 		}
4975 		addr += words_to_read;
4976 	}
4977 
4978 	/*
4979 	 * Label the end tag
4980 	 */
4981 	n = snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
4982 	buf += n; size_left -= n;
4983 
4984 	/*
4985 	 * Queue dumping
4986 	 */
4987 	n = snprintf(buf, size_left, "\nRequest Queue\n");
4988 	buf += n; size_left -= n;
4989 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
4990 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
4991 	buf += n; size_left -= n;
4992 
4993 	n = snprintf(buf, size_left, "\nPriority Queue\n");
4994 	buf += n; size_left -= n;
4995 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
4996 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
4997 	buf += n; size_left -= n;
4998 
4999 	n = snprintf(buf, size_left, "\nResponse Queue\n");
5000 	buf += n; size_left -= n;
5001 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5002 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5003 	buf += n; size_left -= n;
5004 
5005 	n = snprintf(buf, size_left, "\nATIO queue\n");
5006 	buf += n; size_left -= n;
5007 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5008 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5009 	buf += n; size_left -= n;
5010 
5011 	/*
5012 	 * Lable dump reason
5013 	 */
5014 	n = snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5015 	    qlt->qlt_port_alias, ssci->st_additional_info);
5016 	buf += n; size_left -= n;
5017 
5018 dump_ok:
5019 	QLT_LOG(qlt->qlt_port_alias, "qlt_fireware_dump: left-%d", size_left);
5020 
5021 	mutex_enter(&qlt->qlt_ioctl_lock);
5022 	qlt->qlt_ioctl_flags &=
5023 		~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5024 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5025 	mutex_exit(&qlt->qlt_ioctl_lock);
5026 	return (FCT_SUCCESS);
5027 
5028 dump_fail:
5029 	mutex_enter(&qlt->qlt_ioctl_lock);
5030 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5031 	mutex_exit(&qlt->qlt_ioctl_lock);
5032 	return (FCT_FAILURE);
5033 }
5034 
5035 static int
5036 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5037     int size_left)
5038 {
5039 	int		i;
5040 	int		n;
5041 	char		c = ' ';
5042 
5043 	for (i = 0, n = 0; i < count; i++) {
5044 		if ((i + 1) & 7) {
5045 			c = ' ';
5046 		} else {
5047 			c = '\n';
5048 		}
5049 		n += snprintf(&buf[n], (size_left - n), "%08x%c",
5050 		    REG_RD32(qlt, startaddr + (i << 2)), c);
5051 	}
5052 	return (n);
5053 }
5054 
5055 static int
5056 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5057     caddr_t buf, int size_left)
5058 {
5059 	int		i;
5060 	int		n;
5061 	char		c = ' ';
5062 	uint32_t	*ptr;
5063 
5064 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5065 	for (i = 0, n = 0; i < words; i++) {
5066 		if ((i & 7) == 0) {
5067 			n += snprintf(&buf[n], (size_left - n), "%08x: ",
5068 				addr + i);
5069 		}
5070 		if ((i + 1) & 7) {
5071 			c = ' ';
5072 		} else {
5073 			c = '\n';
5074 		}
5075 		n += snprintf(&buf[n], (size_left - n), "%08x%c", ptr[i], c);
5076 	}
5077 	return (n);
5078 }
5079 
5080 static int
5081 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5082     int size_left)
5083 {
5084 	int		i;
5085 	int		n;
5086 	char		c = ' ';
5087 	int		words;
5088 	uint16_t	*ptr;
5089 	uint16_t	w;
5090 
5091 	words = entries * 32;
5092 	ptr = (uint16_t *)qadr;
5093 	for (i = 0, n = 0; i < words; i++) {
5094 		if ((i & 7) == 0) {
5095 			n += snprintf(&buf[n], (size_left - n), "%05x: ", i);
5096 		}
5097 		if ((i + 1) & 7) {
5098 			c = ' ';
5099 		} else {
5100 			c = '\n';
5101 		}
5102 		w = QMEM_RD16(qlt, &ptr[i]);
5103 		n += snprintf(&buf[n], (size_left - n), "%04x%c", w, c);
5104 	}
5105 	return (n);
5106 }
5107 
5108 /*
5109  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5110  * mailbox ram is available.
5111  * Copy data from RISC RAM to system memory
5112  */
5113 static fct_status_t
5114 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5115 {
5116 	uint64_t	da;
5117 	fct_status_t	ret;
5118 
5119 	REG_WR16(qlt, REG_MBOX(0), 0xc);
5120 	da = qlt->queue_mem_cookie.dmac_laddress;
5121 	da += MBOX_DMA_MEM_OFFSET;
5122 
5123 	/*
5124 	 * System destination address
5125 	 */
5126 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
5127 	da >>= 16;
5128 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
5129 	da >>= 16;
5130 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
5131 	da >>= 16;
5132 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
5133 
5134 	/*
5135 	 * Length
5136 	 */
5137 	REG_WR16(qlt, REG_MBOX(5), words & 0xffff);
5138 	REG_WR16(qlt, REG_MBOX(4), ((words >> 16) & 0xffff));
5139 
5140 	/*
5141 	 * RISC source address
5142 	 */
5143 	REG_WR16(qlt, REG_MBOX(1), addr & 0xffff);
5144 	REG_WR16(qlt, REG_MBOX(8), ((addr >> 16) & 0xffff));
5145 
5146 	ret = qlt_raw_mailbox_command(qlt);
5147 	REG_WR32(qlt, REG_HCCR, 0xA0000000);
5148 	if (ret == QLT_SUCCESS) {
5149 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5150 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5151 	} else {
5152 		QLT_LOG(qlt->qlt_port_alias, "qlt_read_risc_ram: qlt raw_mbox "
5153 		    "failed 0x%llX", ret);
5154 	}
5155 	return (ret);
5156 }
5157