1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
29  * Use is subject to license terms.
30  */
31 
32 #include <sys/conf.h>
33 #include <sys/ddi.h>
34 #include <sys/stat.h>
35 #include <sys/pci.h>
36 #include <sys/sunddi.h>
37 #include <sys/modctl.h>
38 #include <sys/file.h>
39 #include <sys/cred.h>
40 #include <sys/byteorder.h>
41 #include <sys/atomic.h>
42 #include <sys/scsi/scsi.h>
43 
44 #include <stmf_defines.h>
45 #include <fct_defines.h>
46 #include <stmf.h>
47 #include <portif.h>
48 #include <fct.h>
49 #include <qlt.h>
50 #include <qlt_dma.h>
51 #include <qlt_ioctl.h>
52 #include <qlt_open.h>
53 #include <stmf_ioctl.h>
54 
55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
58     int reset_only);
59 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
60     uint32_t word_count, uint32_t risc_addr);
61 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
62 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
63 					uint32_t dma_size);
64 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
65 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
66 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
67 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
68     stmf_state_change_info_t *ssci);
69 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
70 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
71 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
72 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
73 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
74 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
75 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
76 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
77     uint8_t *rsp);
78 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
79 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
80 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
81 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
82     int reset_only);
83 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
84     uint32_t word_count, uint32_t risc_addr);
85 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
86 static void qlt_verify_fw(qlt_state_t *qlt);
87 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
88 fct_status_t qlt_port_start(caddr_t arg);
89 fct_status_t qlt_port_stop(caddr_t arg);
90 fct_status_t qlt_port_online(qlt_state_t *qlt);
91 fct_status_t qlt_port_offline(qlt_state_t *qlt);
92 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
93     fct_link_info_t *li);
94 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
95 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
96 						fct_flogi_xchg_t *fx);
97 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
98 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
99 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
100     fct_remote_port_t *rp, fct_cmd_t *login);
101 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
102     fct_remote_port_t *rp);
103 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
104 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
105 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
106     fct_cmd_t *cmd, int terminate);
107 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
108 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
109 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
110     fct_cmd_t *cmd, uint32_t flags);
111 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
112 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
113 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
114 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
115 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
116 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
117 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
118     stmf_data_buf_t *dbuf, uint32_t ioflags);
119 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
120 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
121 static void qlt_release_intr(qlt_state_t *qlt);
122 static int qlt_setup_interrupts(qlt_state_t *qlt);
123 static void qlt_destroy_mutex(qlt_state_t *qlt);
124 
125 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
126     uint32_t words);
127 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
128     caddr_t buf, uint_t size_left);
129 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
130     caddr_t buf, uint_t size_left);
131 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
132     int count, uint_t size_left);
133 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
134     cred_t *credp, int *rval);
135 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
136 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
137 
138 #if defined(__sparc)
139 static int qlt_setup_msi(qlt_state_t *qlt);
140 static int qlt_setup_msix(qlt_state_t *qlt);
141 #endif
142 
143 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
144 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
145 static int qlt_validate_trace_desc(qlt_state_t *qlt);
146 static char *qlt_find_trace_start(qlt_state_t *qlt);
147 
148 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] = \
149 	(uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
150 
151 int qlt_enable_msix = 0;
152 
153 /* Array to quickly calculate next free buf index to use */
154 #if 0
155 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
156 #endif
157 
158 static struct cb_ops qlt_cb_ops = {
159 	qlt_open,
160 	qlt_close,
161 	nodev,
162 	nodev,
163 	nodev,
164 	nodev,
165 	nodev,
166 	qlt_ioctl,
167 	nodev,
168 	nodev,
169 	nodev,
170 	nochpoll,
171 	ddi_prop_op,
172 	0,
173 	D_MP | D_NEW
174 };
175 
176 static struct dev_ops qlt_ops = {
177 	DEVO_REV,
178 	0,
179 	nodev,
180 	nulldev,
181 	nulldev,
182 	qlt_attach,
183 	qlt_detach,
184 	nodev,
185 	&qlt_cb_ops,
186 	NULL,
187 	ddi_power
188 };
189 
190 #ifndef	PORT_SPEED_10G
191 #define	PORT_SPEED_10G		16
192 #endif
193 
194 static struct modldrv modldrv = {
195 	&mod_driverops,
196 	QLT_NAME" "QLT_VERSION,
197 	&qlt_ops,
198 };
199 
200 static struct modlinkage modlinkage = {
201 	MODREV_1, &modldrv, NULL
202 };
203 
204 void *qlt_state = NULL;
205 kmutex_t qlt_global_lock;
206 static uint32_t qlt_loaded_counter = 0;
207 
208 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
209 			"-X Mode 1 133", "--Invalid--",
210 			"-X Mode 2 66", "-X Mode 2 100",
211 			"-X Mode 2 133", " 66" };
212 
213 /* Always use 64 bit DMA. */
214 static ddi_dma_attr_t qlt_queue_dma_attr = {
215 	DMA_ATTR_V0,		/* dma_attr_version */
216 	0,			/* low DMA address range */
217 	0xffffffffffffffff,	/* high DMA address range */
218 	0xffffffff,		/* DMA counter register */
219 	64,			/* DMA address alignment */
220 	0xff,			/* DMA burstsizes */
221 	1,			/* min effective DMA size */
222 	0xffffffff,		/* max DMA xfer size */
223 	0xffffffff,		/* segment boundary */
224 	1,			/* s/g list length */
225 	1,			/* granularity of device */
226 	0			/* DMA transfer flags */
227 };
228 
229 /* qlogic logging */
230 int enable_extended_logging = 0;
231 
232 static char qlt_provider_name[] = "qlt";
233 static struct stmf_port_provider *qlt_pp;
234 
235 int
236 _init(void)
237 {
238 	int ret;
239 
240 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
241 	if (ret == 0) {
242 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
243 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
244 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
245 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
246 		qlt_pp->pp_name = qlt_provider_name;
247 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
248 			stmf_free(qlt_pp);
249 			mutex_destroy(&qlt_global_lock);
250 			ddi_soft_state_fini(&qlt_state);
251 			return (EIO);
252 		}
253 		ret = mod_install(&modlinkage);
254 		if (ret != 0) {
255 			(void) stmf_deregister_port_provider(qlt_pp);
256 			stmf_free(qlt_pp);
257 			mutex_destroy(&qlt_global_lock);
258 			ddi_soft_state_fini(&qlt_state);
259 		}
260 	}
261 	return (ret);
262 }
263 
264 int
265 _fini(void)
266 {
267 	int ret;
268 
269 	if (qlt_loaded_counter)
270 		return (EBUSY);
271 	ret = mod_remove(&modlinkage);
272 	if (ret == 0) {
273 		(void) stmf_deregister_port_provider(qlt_pp);
274 		stmf_free(qlt_pp);
275 		mutex_destroy(&qlt_global_lock);
276 		ddi_soft_state_fini(&qlt_state);
277 	}
278 	return (ret);
279 }
280 
281 int
282 _info(struct modinfo *modinfop)
283 {
284 	return (mod_info(&modlinkage, modinfop));
285 }
286 
287 int
288 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
289 {
290 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
291 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
292 }
293 
294 static int
295 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
296 {
297 	int		instance;
298 	qlt_state_t	*qlt;
299 	ddi_device_acc_attr_t	dev_acc_attr;
300 	uint16_t	did;
301 	uint16_t	val;
302 	uint16_t	mr;
303 	size_t		discard;
304 	uint_t		ncookies;
305 	int		max_read_size;
306 	int		max_payload_size;
307 	fct_status_t	ret;
308 
309 	/* No support for suspend resume yet */
310 	if (cmd != DDI_ATTACH)
311 		return (DDI_FAILURE);
312 	instance = ddi_get_instance(dip);
313 
314 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
315 		return (DDI_FAILURE);
316 	}
317 
318 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
319 	    NULL) {
320 		goto attach_fail_1;
321 	}
322 	qlt->instance = instance;
323 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
324 	qlt->dip = dip;
325 
326 	if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
327 		cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
328 		goto attach_fail_1;
329 	}
330 
331 	EL(qlt, "instance=%d\n", instance);
332 
333 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
334 		goto attach_fail_2;
335 	}
336 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
337 	if ((did != 0x2422) && (did != 0x2432) &&
338 	    (did != 0x8432) && (did != 0x2532) &&
339 	    (did != 0x8001)) {
340 		cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
341 		    instance, did);
342 		goto attach_fail_4;
343 	}
344 
345 	if ((did & 0xFF00) == 0x8000)
346 		qlt->qlt_81xx_chip = 1;
347 	else if ((did & 0xFF00) == 0x2500)
348 		qlt->qlt_25xx_chip = 1;
349 
350 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
351 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
352 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
353 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
354 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
355 		goto attach_fail_4;
356 	}
357 	if (did == 0x2422) {
358 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
359 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
360 		pci_bits >>= 8;
361 		pci_bits &= 0xf;
362 		if ((pci_bits == 3) || (pci_bits == 7)) {
363 			cmn_err(CE_NOTE,
364 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
365 			    instance, pci_speeds[pci_bits], pci_bits);
366 		} else {
367 			cmn_err(CE_WARN,
368 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
369 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
370 			    "(Invalid)", ((pci_bits == 0) ||
371 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
372 			    "32 bit slot ") : "", pci_bits);
373 		}
374 	}
375 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
376 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
377 		    (unsigned long long)ret);
378 		goto attach_fail_5;
379 	}
380 
381 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
382 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
383 		goto attach_fail_5;
384 	}
385 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
386 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
387 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
388 	    DDI_SUCCESS) {
389 		goto attach_fail_6;
390 	}
391 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
392 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
393 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
394 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
395 		goto attach_fail_7;
396 	}
397 	if (ncookies != 1)
398 		goto attach_fail_8;
399 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
400 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
401 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
402 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
403 
404 	/* mutex are inited in this function */
405 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
406 		goto attach_fail_8;
407 
408 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
409 	    "qlt%d", instance);
410 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
411 	    "%s,0", qlt->qlt_minor_name);
412 
413 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
414 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
415 		goto attach_fail_9;
416 	}
417 
418 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
419 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
420 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
421 
422 	/* Setup PCI cfg space registers */
423 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
424 	if (max_read_size == 11)
425 		goto over_max_read_xfer_setting;
426 	if (did == 0x2422) {
427 		if (max_read_size == 512)
428 			val = 0;
429 		else if (max_read_size == 1024)
430 			val = 1;
431 		else if (max_read_size == 2048)
432 			val = 2;
433 		else if (max_read_size == 4096)
434 			val = 3;
435 		else {
436 			cmn_err(CE_WARN, "qlt(%d) malformed "
437 			    "pci-max-read-request in qlt.conf. Valid values "
438 			    "for this HBA are 512/1024/2048/4096", instance);
439 			goto over_max_read_xfer_setting;
440 		}
441 		mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
442 		mr = (uint16_t)(mr & 0xfff3);
443 		mr = (uint16_t)(mr | (val << 2));
444 		PCICFG_WR16(qlt, 0x4E, mr);
445 	} else if ((did == 0x2432) || (did == 0x8432) ||
446 	    (did == 0x2532) || (did == 0x8001)) {
447 		if (max_read_size == 128)
448 			val = 0;
449 		else if (max_read_size == 256)
450 			val = 1;
451 		else if (max_read_size == 512)
452 			val = 2;
453 		else if (max_read_size == 1024)
454 			val = 3;
455 		else if (max_read_size == 2048)
456 			val = 4;
457 		else if (max_read_size == 4096)
458 			val = 5;
459 		else {
460 			cmn_err(CE_WARN, "qlt(%d) malformed "
461 			    "pci-max-read-request in qlt.conf. Valid values "
462 			    "for this HBA are 128/256/512/1024/2048/4096",
463 			    instance);
464 			goto over_max_read_xfer_setting;
465 		}
466 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
467 		mr = (uint16_t)(mr & 0x8fff);
468 		mr = (uint16_t)(mr | (val << 12));
469 		PCICFG_WR16(qlt, 0x54, mr);
470 	} else {
471 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
472 		    "pci-max-read-request for this device (%x)",
473 		    instance, did);
474 	}
475 over_max_read_xfer_setting:;
476 
477 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
478 	if (max_payload_size == 11)
479 		goto over_max_payload_setting;
480 	if ((did == 0x2432) || (did == 0x8432) ||
481 	    (did == 0x2532) || (did == 0x8001)) {
482 		if (max_payload_size == 128)
483 			val = 0;
484 		else if (max_payload_size == 256)
485 			val = 1;
486 		else if (max_payload_size == 512)
487 			val = 2;
488 		else if (max_payload_size == 1024)
489 			val = 3;
490 		else {
491 			cmn_err(CE_WARN, "qlt(%d) malformed "
492 			    "pcie-max-payload-size in qlt.conf. Valid values "
493 			    "for this HBA are 128/256/512/1024",
494 			    instance);
495 			goto over_max_payload_setting;
496 		}
497 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
498 		mr = (uint16_t)(mr & 0xff1f);
499 		mr = (uint16_t)(mr | (val << 5));
500 		PCICFG_WR16(qlt, 0x54, mr);
501 	} else {
502 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
503 		    "pcie-max-payload-size for this device (%x)",
504 		    instance, did);
505 	}
506 
507 over_max_payload_setting:;
508 
509 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
510 		goto attach_fail_10;
511 
512 	ddi_report_dev(dip);
513 	return (DDI_SUCCESS);
514 
515 attach_fail_10:;
516 	mutex_destroy(&qlt->qlt_ioctl_lock);
517 	cv_destroy(&qlt->mbox_cv);
518 	cv_destroy(&qlt->rp_dereg_cv);
519 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
520 attach_fail_9:;
521 	qlt_destroy_mutex(qlt);
522 	qlt_release_intr(qlt);
523 attach_fail_8:;
524 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
525 attach_fail_7:;
526 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
527 attach_fail_6:;
528 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
529 attach_fail_5:;
530 	ddi_regs_map_free(&qlt->regs_acc_handle);
531 attach_fail_4:;
532 	pci_config_teardown(&qlt->pcicfg_acc_handle);
533 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
534 	(void) qlt_el_trace_desc_dtor(qlt);
535 attach_fail_2:;
536 attach_fail_1:;
537 	ddi_soft_state_free(qlt_state, instance);
538 	return (DDI_FAILURE);
539 }
540 
541 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
542 
543 /* ARGSUSED */
544 static int
545 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
546 {
547 	qlt_state_t *qlt;
548 
549 	int instance;
550 
551 	instance = ddi_get_instance(dip);
552 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
553 	    NULL) {
554 		return (DDI_FAILURE);
555 	}
556 
557 	if (qlt->fw_code01) {
558 		return (DDI_FAILURE);
559 	}
560 
561 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
562 	    qlt->qlt_state_not_acked) {
563 		return (DDI_FAILURE);
564 	}
565 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS)
566 		return (DDI_FAILURE);
567 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
568 	qlt_destroy_mutex(qlt);
569 	qlt_release_intr(qlt);
570 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
571 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
572 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
573 	ddi_regs_map_free(&qlt->regs_acc_handle);
574 	pci_config_teardown(&qlt->pcicfg_acc_handle);
575 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
576 	cv_destroy(&qlt->mbox_cv);
577 	cv_destroy(&qlt->rp_dereg_cv);
578 	(void) qlt_el_trace_desc_dtor(qlt);
579 	ddi_soft_state_free(qlt_state, instance);
580 
581 	return (DDI_SUCCESS);
582 }
583 
584 static void
585 qlt_enable_intr(qlt_state_t *qlt)
586 {
587 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
588 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
589 	} else {
590 		int i;
591 		for (i = 0; i < qlt->intr_cnt; i++)
592 			(void) ddi_intr_enable(qlt->htable[i]);
593 	}
594 }
595 
596 static void
597 qlt_disable_intr(qlt_state_t *qlt)
598 {
599 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
600 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
601 	} else {
602 		int i;
603 		for (i = 0; i < qlt->intr_cnt; i++)
604 			(void) ddi_intr_disable(qlt->htable[i]);
605 	}
606 }
607 
608 static void
609 qlt_release_intr(qlt_state_t *qlt)
610 {
611 	if (qlt->htable) {
612 		int i;
613 		for (i = 0; i < qlt->intr_cnt; i++) {
614 			(void) ddi_intr_remove_handler(qlt->htable[i]);
615 			(void) ddi_intr_free(qlt->htable[i]);
616 		}
617 		kmem_free(qlt->htable, (uint_t)qlt->intr_size);
618 	}
619 	qlt->htable = NULL;
620 	qlt->intr_pri = 0;
621 	qlt->intr_cnt = 0;
622 	qlt->intr_size = 0;
623 	qlt->intr_cap = 0;
624 }
625 
626 
627 static void
628 qlt_init_mutex(qlt_state_t *qlt)
629 {
630 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
631 	    INT2PTR(qlt->intr_pri, void *));
632 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
633 	    INT2PTR(qlt->intr_pri, void *));
634 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
635 	    INT2PTR(qlt->intr_pri, void *));
636 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
637 	    INT2PTR(qlt->intr_pri, void *));
638 }
639 
640 static void
641 qlt_destroy_mutex(qlt_state_t *qlt)
642 {
643 	mutex_destroy(&qlt->req_lock);
644 	mutex_destroy(&qlt->preq_lock);
645 	mutex_destroy(&qlt->mbox_lock);
646 	mutex_destroy(&qlt->intr_lock);
647 }
648 
649 
650 #if defined(__sparc)
651 static int
652 qlt_setup_msix(qlt_state_t *qlt)
653 {
654 	int count, avail, actual;
655 	int ret;
656 	int itype = DDI_INTR_TYPE_MSIX;
657 	int i;
658 
659 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
660 	if (ret != DDI_SUCCESS || count == 0) {
661 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
662 		    count);
663 		return (DDI_FAILURE);
664 	}
665 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
666 	if (ret != DDI_SUCCESS || avail == 0) {
667 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
668 		    avail);
669 		return (DDI_FAILURE);
670 	}
671 	if (avail < count) {
672 		stmf_trace(qlt->qlt_port_alias,
673 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
674 	}
675 
676 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
677 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
678 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
679 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
680 	/* we need at least 2 interrupt vectors */
681 	if (ret != DDI_SUCCESS || actual < 2) {
682 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
683 		    actual);
684 		ret = DDI_FAILURE;
685 		goto release_intr;
686 	}
687 	if (actual < count) {
688 		EL(qlt, "requested: %d, received: %d\n", count, actual);
689 	}
690 
691 	qlt->intr_cnt = actual;
692 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
693 	if (ret != DDI_SUCCESS) {
694 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
695 		ret = DDI_FAILURE;
696 		goto release_intr;
697 	}
698 	qlt_init_mutex(qlt);
699 	for (i = 0; i < actual; i++) {
700 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
701 		    qlt, INT2PTR((uint_t)i, void *));
702 		if (ret != DDI_SUCCESS) {
703 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
704 			goto release_mutex;
705 		}
706 	}
707 
708 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
709 	qlt->intr_flags |= QLT_INTR_MSIX;
710 	return (DDI_SUCCESS);
711 
712 release_mutex:
713 	qlt_destroy_mutex(qlt);
714 release_intr:
715 	for (i = 0; i < actual; i++)
716 		(void) ddi_intr_free(qlt->htable[i]);
717 #if 0
718 free_mem:
719 #endif
720 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
721 	qlt->htable = NULL;
722 	qlt_release_intr(qlt);
723 	return (ret);
724 }
725 
726 
727 static int
728 qlt_setup_msi(qlt_state_t *qlt)
729 {
730 	int count, avail, actual;
731 	int itype = DDI_INTR_TYPE_MSI;
732 	int ret;
733 	int i;
734 
735 	/* get the # of interrupts */
736 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
737 	if (ret != DDI_SUCCESS || count == 0) {
738 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
739 		    count);
740 		return (DDI_FAILURE);
741 	}
742 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
743 	if (ret != DDI_SUCCESS || avail == 0) {
744 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
745 		    avail);
746 		return (DDI_FAILURE);
747 	}
748 	if (avail < count) {
749 		EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
750 	}
751 	/* MSI requires only 1 interrupt. */
752 	count = 1;
753 
754 	/* allocate interrupt */
755 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
756 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
757 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
758 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
759 	if (ret != DDI_SUCCESS || actual == 0) {
760 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
761 		    actual);
762 		ret = DDI_FAILURE;
763 		goto free_mem;
764 	}
765 	if (actual < count) {
766 		EL(qlt, "requested: %d, received: %d\n", count, actual);
767 	}
768 	qlt->intr_cnt = actual;
769 
770 	/*
771 	 * Get priority for first msi, assume remaining are all the same.
772 	 */
773 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
774 	if (ret != DDI_SUCCESS) {
775 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
776 		ret = DDI_FAILURE;
777 		goto release_intr;
778 	}
779 	qlt_init_mutex(qlt);
780 
781 	/* add handler */
782 	for (i = 0; i < actual; i++) {
783 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
784 		    qlt, INT2PTR((uint_t)i, void *));
785 		if (ret != DDI_SUCCESS) {
786 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
787 			goto release_mutex;
788 		}
789 	}
790 
791 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
792 	qlt->intr_flags |= QLT_INTR_MSI;
793 	return (DDI_SUCCESS);
794 
795 release_mutex:
796 	qlt_destroy_mutex(qlt);
797 release_intr:
798 	for (i = 0; i < actual; i++)
799 		(void) ddi_intr_free(qlt->htable[i]);
800 free_mem:
801 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
802 	qlt->htable = NULL;
803 	qlt_release_intr(qlt);
804 	return (ret);
805 }
806 #endif
807 
808 static int
809 qlt_setup_fixed(qlt_state_t *qlt)
810 {
811 	int count;
812 	int actual;
813 	int ret;
814 	int itype = DDI_INTR_TYPE_FIXED;
815 
816 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
817 	/* Fixed interrupts can only have one interrupt. */
818 	if (ret != DDI_SUCCESS || count != 1) {
819 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
820 		    count);
821 		return (DDI_FAILURE);
822 	}
823 
824 	qlt->intr_size = sizeof (ddi_intr_handle_t);
825 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
826 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
827 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
828 	if (ret != DDI_SUCCESS || actual != 1) {
829 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
830 		    actual);
831 		ret = DDI_FAILURE;
832 		goto free_mem;
833 	}
834 
835 	qlt->intr_cnt = actual;
836 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
837 	if (ret != DDI_SUCCESS) {
838 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
839 		ret = DDI_FAILURE;
840 		goto release_intr;
841 	}
842 	qlt_init_mutex(qlt);
843 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
844 	if (ret != DDI_SUCCESS) {
845 		EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
846 		goto release_mutex;
847 	}
848 
849 	qlt->intr_flags |= QLT_INTR_FIXED;
850 	return (DDI_SUCCESS);
851 
852 release_mutex:
853 	qlt_destroy_mutex(qlt);
854 release_intr:
855 	(void) ddi_intr_free(qlt->htable[0]);
856 free_mem:
857 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
858 	qlt->htable = NULL;
859 	qlt_release_intr(qlt);
860 	return (ret);
861 }
862 
863 
864 static int
865 qlt_setup_interrupts(qlt_state_t *qlt)
866 {
867 #if defined(__sparc)
868 	int itypes = 0;
869 #endif
870 
871 /*
872  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198). So use
873  * MSI for sparc only for now.
874  */
875 #if defined(__sparc)
876 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
877 		itypes = DDI_INTR_TYPE_FIXED;
878 	}
879 
880 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
881 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
882 			return (DDI_SUCCESS);
883 	}
884 	if (itypes & DDI_INTR_TYPE_MSI) {
885 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
886 			return (DDI_SUCCESS);
887 	}
888 #endif
889 	return (qlt_setup_fixed(qlt));
890 }
891 
892 /*
893  * Filling the hba attributes
894  */
895 void
896 qlt_populate_hba_fru_details(struct fct_local_port *port,
897     struct fct_port_attrs *port_attrs)
898 {
899 	caddr_t	bufp;
900 	int len;
901 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
902 
903 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
904 	    "QLogic Corp.");
905 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
906 	    "%s", QLT_NAME);
907 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
908 	    "%s", QLT_VERSION);
909 	port_attrs->serial_number[0] = '\0';
910 	port_attrs->hardware_version[0] = '\0';
911 
912 	(void) snprintf(port_attrs->firmware_version,
913 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
914 	    qlt->fw_minor, qlt->fw_subminor);
915 
916 	/* Get FCode version */
917 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
918 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
919 	    (int *)&len) == DDI_PROP_SUCCESS) {
920 		(void) snprintf(port_attrs->option_rom_version,
921 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
922 		kmem_free(bufp, (uint_t)len);
923 		bufp = NULL;
924 	} else {
925 #ifdef __sparc
926 		(void) snprintf(port_attrs->option_rom_version,
927 		    FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
928 #else
929 		(void) snprintf(port_attrs->option_rom_version,
930 		    FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
931 #endif
932 	}
933 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
934 	    qlt->nvram->subsystem_vendor_id[1] << 8;
935 
936 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
937 	    qlt->nvram->max_frame_length[0];
938 
939 	port_attrs->supported_cos = 0x10000000;
940 	port_attrs->supported_speed = PORT_SPEED_1G |
941 	    PORT_SPEED_2G | PORT_SPEED_4G;
942 	if (qlt->qlt_25xx_chip)
943 		port_attrs->supported_speed |= PORT_SPEED_8G;
944 	if (qlt->qlt_81xx_chip)
945 		port_attrs->supported_speed = PORT_SPEED_10G;
946 
947 	/* limit string length to nvr model_name length */
948 	len = (qlt->qlt_81xx_chip) ? 16 : 8;
949 	(void) snprintf(port_attrs->model,
950 	    (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
951 	    "%s", qlt->nvram->model_name);
952 
953 	(void) snprintf(port_attrs->model_description,
954 	    (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
955 	    FCHBA_MODEL_DESCRIPTION_LEN),
956 	    "%s", qlt->nvram->model_name);
957 }
958 
959 /* ARGSUSED */
960 fct_status_t
961 qlt_info(uint32_t cmd, fct_local_port_t *port,
962     void *arg, uint8_t *buf, uint32_t *bufsizep)
963 {
964 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
965 	mbox_cmd_t	*mcp;
966 	fct_status_t	ret = FCT_SUCCESS;
967 	uint8_t		*p;
968 	fct_port_link_status_t	*link_status;
969 
970 	switch (cmd) {
971 	case FC_TGT_PORT_RLS:
972 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
973 			EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
974 			    "fct_port_link_status_t=%xh\n", *bufsizep,
975 			    sizeof (fct_port_link_status_t));
976 			ret = FCT_FAILURE;
977 			break;
978 		}
979 		/* send mailbox command to get link status */
980 		mcp = qlt_alloc_mailbox_command(qlt, 156);
981 		if (mcp == NULL) {
982 			EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
983 			ret = FCT_ALLOC_FAILURE;
984 			break;
985 		}
986 
987 		/* GET LINK STATUS count */
988 		mcp->to_fw[0] = 0x6d;
989 		mcp->to_fw[8] = 156/4;
990 		mcp->to_fw_mask |= BIT_1 | BIT_8;
991 		mcp->from_fw_mask |= BIT_1 | BIT_2;
992 
993 		ret = qlt_mailbox_command(qlt, mcp);
994 		if (ret != QLT_SUCCESS) {
995 			EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
996 			qlt_free_mailbox_command(qlt, mcp);
997 			break;
998 		}
999 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1000 
1001 		p = mcp->dbuf->db_sglist[0].seg_addr;
1002 		link_status = (fct_port_link_status_t *)buf;
1003 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1004 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1005 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1006 		link_status->PrimitiveSeqProtocolErrorCount =
1007 		    LE_32(*((uint32_t *)(p + 12)));
1008 		link_status->InvalidTransmissionWordCount =
1009 		    LE_32(*((uint32_t *)(p + 16)));
1010 		link_status->InvalidCRCCount =
1011 		    LE_32(*((uint32_t *)(p + 20)));
1012 
1013 		qlt_free_mailbox_command(qlt, mcp);
1014 		break;
1015 	default:
1016 		EL(qlt, "Unknown cmd=%xh\n", cmd);
1017 		ret = FCT_FAILURE;
1018 		break;
1019 	}
1020 	return (ret);
1021 }
1022 
1023 fct_status_t
1024 qlt_port_start(caddr_t arg)
1025 {
1026 	qlt_state_t *qlt = (qlt_state_t *)arg;
1027 	fct_local_port_t *port;
1028 	fct_dbuf_store_t *fds;
1029 	fct_status_t ret;
1030 
1031 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1032 		return (FCT_FAILURE);
1033 	}
1034 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1035 	if (port == NULL) {
1036 		goto qlt_pstart_fail_1;
1037 	}
1038 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1039 	if (fds == NULL) {
1040 		goto qlt_pstart_fail_2;
1041 	}
1042 	qlt->qlt_port = port;
1043 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
1044 	fds->fds_free_data_buf = qlt_dmem_free;
1045 	fds->fds_fca_private = (void *)qlt;
1046 	/*
1047 	 * Since we keep everything in the state struct and dont allocate any
1048 	 * port private area, just use that pointer to point to the
1049 	 * state struct.
1050 	 */
1051 	port->port_fca_private = qlt;
1052 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
1053 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1054 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1055 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1056 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1057 	port->port_default_alias = qlt->qlt_port_alias;
1058 	port->port_pp = qlt_pp;
1059 	port->port_fds = fds;
1060 	port->port_max_logins = QLT_MAX_LOGINS;
1061 	port->port_max_xchges = QLT_MAX_XCHGES;
1062 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1063 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1064 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1065 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1066 	port->port_get_link_info = qlt_get_link_info;
1067 	port->port_register_remote_port = qlt_register_remote_port;
1068 	port->port_deregister_remote_port = qlt_deregister_remote_port;
1069 	port->port_send_cmd = qlt_send_cmd;
1070 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1071 	port->port_send_cmd_response = qlt_send_cmd_response;
1072 	port->port_abort_cmd = qlt_abort_cmd;
1073 	port->port_ctl = qlt_ctl;
1074 	port->port_flogi_xchg = qlt_do_flogi;
1075 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1076 	port->port_info = qlt_info;
1077 
1078 	if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1079 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1080 		goto qlt_pstart_fail_2_5;
1081 	}
1082 
1083 	return (QLT_SUCCESS);
1084 #if 0
1085 qlt_pstart_fail_3:
1086 	(void) fct_deregister_local_port(port);
1087 #endif
1088 qlt_pstart_fail_2_5:
1089 	fct_free(fds);
1090 qlt_pstart_fail_2:
1091 	fct_free(port);
1092 	qlt->qlt_port = NULL;
1093 qlt_pstart_fail_1:
1094 	qlt_dmem_fini(qlt);
1095 	return (QLT_FAILURE);
1096 }
1097 
1098 fct_status_t
1099 qlt_port_stop(caddr_t arg)
1100 {
1101 	qlt_state_t *qlt = (qlt_state_t *)arg;
1102 	fct_status_t ret;
1103 
1104 	if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1105 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1106 		return (QLT_FAILURE);
1107 	}
1108 	fct_free(qlt->qlt_port->port_fds);
1109 	fct_free(qlt->qlt_port);
1110 	qlt->qlt_port = NULL;
1111 	qlt_dmem_fini(qlt);
1112 	return (QLT_SUCCESS);
1113 }
1114 
1115 /*
1116  * Called by framework to init the HBA.
1117  * Can be called in the middle of I/O. (Why ??)
1118  * Should make sure sane state both before and after the initialization
1119  */
1120 fct_status_t
1121 qlt_port_online(qlt_state_t *qlt)
1122 {
1123 	uint64_t	da;
1124 	int		instance;
1125 	fct_status_t	ret;
1126 	uint16_t	rcount;
1127 	caddr_t		icb;
1128 	mbox_cmd_t	*mcp;
1129 	uint8_t		*elsbmp;
1130 
1131 	instance = ddi_get_instance(qlt->dip);
1132 
1133 	/* XXX Make sure a sane state */
1134 
1135 	if ((ret = qlt_reset_chip_and_download_fw(qlt, 0)) != QLT_SUCCESS) {
1136 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1137 		return (ret);
1138 	}
1139 
1140 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1141 
1142 	/* Get resource count */
1143 	REG_WR16(qlt, REG_MBOX(0), 0x42);
1144 	ret = qlt_raw_mailbox_command(qlt);
1145 	rcount = REG_RD16(qlt, REG_MBOX(3));
1146 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1147 	if (ret != QLT_SUCCESS) {
1148 		EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1149 		return (ret);
1150 	}
1151 
1152 	/* Enable PUREX */
1153 	REG_WR16(qlt, REG_MBOX(0), 0x38);
1154 	REG_WR16(qlt, REG_MBOX(1), 0x0400);
1155 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1156 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1157 	ret = qlt_raw_mailbox_command(qlt);
1158 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1159 	if (ret != QLT_SUCCESS) {
1160 		EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1161 		cmn_err(CE_NOTE, "Enable PUREX failed");
1162 		return (ret);
1163 	}
1164 
1165 	/* Pass ELS bitmap to fw */
1166 	REG_WR16(qlt, REG_MBOX(0), 0x59);
1167 	REG_WR16(qlt, REG_MBOX(1), 0x0500);
1168 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1169 	bzero(elsbmp, 32);
1170 	da = qlt->queue_mem_cookie.dmac_laddress;
1171 	da += MBOX_DMA_MEM_OFFSET;
1172 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
1173 	da >>= 16;
1174 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
1175 	da >>= 16;
1176 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
1177 	da >>= 16;
1178 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
1179 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1180 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1181 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1182 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1183 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1184 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1185 	SETELSBIT(elsbmp, ELS_OP_SCN);
1186 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1187 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1188 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1189 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1190 	SETELSBIT(elsbmp, ELS_OP_RNID);
1191 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1192 	    DDI_DMA_SYNC_FORDEV);
1193 	ret = qlt_raw_mailbox_command(qlt);
1194 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1195 	if (ret != QLT_SUCCESS) {
1196 		EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1197 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1198 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1199 		    elsbmp[1]);
1200 		return (ret);
1201 	}
1202 
1203 	/* Init queue pointers */
1204 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1205 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1206 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1207 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1208 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1209 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1210 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1211 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1212 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1213 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1214 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1215 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1216 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1217 
1218 	/*
1219 	 * XXX support for tunables. Also should we cache icb ?
1220 	 */
1221 	if (qlt->qlt_81xx_chip) {
1222 	    /* allocate extra 64 bytes for Extended init control block */
1223 		mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1224 	} else {
1225 		mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1226 	}
1227 	if (mcp == NULL) {
1228 		EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1229 		return (STMF_ALLOC_FAILURE);
1230 	}
1231 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1232 	if (qlt->qlt_81xx_chip) {
1233 		bzero(icb, 0xC0);
1234 	} else {
1235 		bzero(icb, 0x80);
1236 	}
1237 	da = qlt->queue_mem_cookie.dmac_laddress;
1238 	DMEM_WR16(qlt, icb, 1);		/* Version */
1239 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1240 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1241 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1242 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1243 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1244 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1245 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1246 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1247 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1248 	if (!qlt->qlt_81xx_chip) {
1249 		DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1250 	}
1251 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1252 	DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1253 	DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1254 	DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1255 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1256 	DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1257 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1258 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1259 	if (qlt->qlt_81xx_chip) {
1260 		qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1261 
1262 		DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1263 		DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1264 		DMEM_WR32(qlt, icb+0x70,
1265 		    qlt81nvr->enode_mac[0] |
1266 		    (qlt81nvr->enode_mac[1] << 8) |
1267 		    (qlt81nvr->enode_mac[2] << 16) |
1268 		    (qlt81nvr->enode_mac[3] << 24));
1269 		DMEM_WR16(qlt, icb+0x74,
1270 		    qlt81nvr->enode_mac[4] |
1271 		    (qlt81nvr->enode_mac[5] << 8));
1272 	} else {
1273 		DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1274 		    BIT_2 | BIT_1 | BIT_0);
1275 		DMEM_WR32(qlt, icb+0x60, BIT_5);
1276 		DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1277 		    BIT_4);
1278 	}
1279 
1280 	if (qlt->qlt_81xx_chip) {
1281 		qlt_dmem_bctl_t		*bctl;
1282 		uint32_t		index;
1283 		caddr_t			src;
1284 		caddr_t			dst;
1285 		qlt_nvram_81xx_t	*qlt81nvr;
1286 
1287 		dst = icb+0x80;
1288 		qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1289 		src = (caddr_t)&qlt81nvr->ext_blk;
1290 		index = sizeof (qlt_ext_icb_81xx_t);
1291 
1292 		/* Use defaults for cases where we find nothing in NVR */
1293 		if (*src == 0) {
1294 			EL(qlt, "nvram eicb=null\n");
1295 			cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1296 			    instance);
1297 			qlt81nvr->ext_blk.version[0] = 1;
1298 /*
1299  * not yet, for !FIP firmware at least
1300  *
1301  *                qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1302  */
1303 #ifdef _LITTLE_ENDIAN
1304 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1305 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1306 #else
1307 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1308 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1309 #endif
1310 		}
1311 
1312 		while (index--) {
1313 			*dst++ = *src++;
1314 		}
1315 
1316 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1317 		da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1318 
1319 		mcp->to_fw[11] = (uint16_t)(da & 0xffff);
1320 		da >>= 16;
1321 		mcp->to_fw[10] = (uint16_t)(da & 0xffff);
1322 		da >>= 16;
1323 		mcp->to_fw[13] = (uint16_t)(da & 0xffff);
1324 		da >>= 16;
1325 		mcp->to_fw[12] = (uint16_t)(da & 0xffff);
1326 		mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1327 		    0xffff);
1328 
1329 		/* eicb enable */
1330 		mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1331 		mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1332 		    BIT_1;
1333 	}
1334 
1335 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1336 	mcp->to_fw[0] = 0x60;
1337 
1338 	/*
1339 	 * This is the 1st command after adapter initialize which will
1340 	 * use interrupts and regular mailbox interface.
1341 	 */
1342 	qlt->mbox_io_state = MBOX_STATE_READY;
1343 	qlt_enable_intr(qlt);
1344 	qlt->qlt_intr_enabled = 1;
1345 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1346 	/* Issue mailbox to firmware */
1347 	ret = qlt_mailbox_command(qlt, mcp);
1348 	if (ret != QLT_SUCCESS) {
1349 		EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1350 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1351 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1352 	}
1353 
1354 	mcp->to_fw_mask = BIT_0;
1355 	mcp->from_fw_mask = BIT_0 | BIT_1;
1356 	mcp->to_fw[0] = 0x28;
1357 	ret = qlt_mailbox_command(qlt, mcp);
1358 	if (ret != QLT_SUCCESS) {
1359 		EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1360 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1361 		    (long long)ret);
1362 	}
1363 
1364 	/*
1365 	 * Report FW versions for 81xx - MPI rev is useful
1366 	 */
1367 	if (qlt->qlt_81xx_chip) {
1368 		mcp->to_fw_mask = BIT_0;
1369 		mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1370 		    BIT_0;
1371 		mcp->to_fw[0] = 0x8;
1372 		ret = qlt_mailbox_command(qlt, mcp);
1373 		if (ret != QLT_SUCCESS) {
1374 			EL(qlt, "about fw failed: %llx\n", (long long)ret);
1375 		} else {
1376 			EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1377 			    mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1378 			    mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1379 			    mcp->from_fw[11] & 0xff);
1380 		}
1381 	}
1382 
1383 	qlt_free_mailbox_command(qlt, mcp);
1384 	if (ret != QLT_SUCCESS)
1385 		return (ret);
1386 	return (FCT_SUCCESS);
1387 }
1388 
1389 fct_status_t
1390 qlt_port_offline(qlt_state_t *qlt)
1391 {
1392 	int		retries;
1393 
1394 	mutex_enter(&qlt->mbox_lock);
1395 
1396 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1397 		mutex_exit(&qlt->mbox_lock);
1398 		goto poff_mbox_done;
1399 	}
1400 
1401 	/* Wait to grab the mailboxes */
1402 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1403 	    retries++) {
1404 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1405 		if ((retries > 5) ||
1406 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1407 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1408 			mutex_exit(&qlt->mbox_lock);
1409 			goto poff_mbox_done;
1410 		}
1411 	}
1412 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1413 	mutex_exit(&qlt->mbox_lock);
1414 poff_mbox_done:;
1415 	qlt->intr_sneak_counter = 10;
1416 	qlt_disable_intr(qlt);
1417 	mutex_enter(&qlt->intr_lock);
1418 	qlt->qlt_intr_enabled = 0;
1419 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
1420 	drv_usecwait(20);
1421 	qlt->intr_sneak_counter = 0;
1422 	mutex_exit(&qlt->intr_lock);
1423 
1424 	return (FCT_SUCCESS);
1425 }
1426 
1427 static fct_status_t
1428 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1429 {
1430 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1431 	mbox_cmd_t *mcp;
1432 	fct_status_t fc_ret;
1433 	fct_status_t ret;
1434 	clock_t et;
1435 
1436 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1437 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1438 link_info_retry:
1439 	mcp->to_fw[0] = 0x20;
1440 	mcp->to_fw[9] = 0;
1441 	mcp->to_fw_mask |= BIT_0 | BIT_9;
1442 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1443 	/* Issue mailbox to firmware */
1444 	ret = qlt_mailbox_command(qlt, mcp);
1445 	if (ret != QLT_SUCCESS) {
1446 		EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1447 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1448 			/* Firmware is not ready */
1449 			if (ddi_get_lbolt() < et) {
1450 				delay(drv_usectohz(50000));
1451 				goto link_info_retry;
1452 			}
1453 		}
1454 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1455 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1456 		fc_ret = FCT_FAILURE;
1457 	} else {
1458 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1459 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1460 
1461 		li->port_speed = qlt->link_speed;
1462 		switch (mcp->from_fw[6]) {
1463 		case 1:
1464 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1465 			li->port_fca_flogi_done = 1;
1466 			break;
1467 		case 0:
1468 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1469 			li->port_no_fct_flogi = 1;
1470 			break;
1471 		case 3:
1472 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1473 			li->port_fca_flogi_done = 1;
1474 			break;
1475 		case 2: /*FALLTHROUGH*/
1476 		case 4:
1477 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1478 			li->port_fca_flogi_done = 1;
1479 			break;
1480 		default:
1481 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1482 			EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1483 		}
1484 		qlt->cur_topology = li->port_topology;
1485 		fc_ret = FCT_SUCCESS;
1486 	}
1487 	qlt_free_mailbox_command(qlt, mcp);
1488 
1489 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1490 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1491 		mcp->to_fw[0] = 0x64;
1492 		mcp->to_fw[1] = 0x7FE;
1493 		mcp->to_fw[9] = 0;
1494 		mcp->to_fw[10] = 0;
1495 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1496 		fc_ret = qlt_mailbox_command(qlt, mcp);
1497 		if (fc_ret != QLT_SUCCESS) {
1498 			EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1499 			    fc_ret);
1500 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1501 			    "database for F_port failed, ret = %llx", fc_ret);
1502 		} else {
1503 			uint8_t *p;
1504 
1505 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1506 			p = mcp->dbuf->db_sglist[0].seg_addr;
1507 			bcopy(p + 0x18, li->port_rpwwn, 8);
1508 			bcopy(p + 0x20, li->port_rnwwn, 8);
1509 		}
1510 		qlt_free_mailbox_command(qlt, mcp);
1511 	}
1512 	return (fc_ret);
1513 }
1514 
1515 static int
1516 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1517 {
1518 	int		instance;
1519 	qlt_state_t	*qlt;
1520 
1521 	if (otype != OTYP_CHR) {
1522 		return (EINVAL);
1523 	}
1524 
1525 	/*
1526 	 * Since this is for debugging only, only allow root to issue ioctl now
1527 	 */
1528 	if (drv_priv(credp)) {
1529 		return (EPERM);
1530 	}
1531 
1532 	instance = (int)getminor(*devp);
1533 	qlt = ddi_get_soft_state(qlt_state, instance);
1534 	if (qlt == NULL) {
1535 		return (ENXIO);
1536 	}
1537 
1538 	mutex_enter(&qlt->qlt_ioctl_lock);
1539 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1540 		/*
1541 		 * It is already open for exclusive access.
1542 		 * So shut the door on this caller.
1543 		 */
1544 		mutex_exit(&qlt->qlt_ioctl_lock);
1545 		return (EBUSY);
1546 	}
1547 
1548 	if (flag & FEXCL) {
1549 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1550 			/*
1551 			 * Exclusive operation not possible
1552 			 * as it is already opened
1553 			 */
1554 			mutex_exit(&qlt->qlt_ioctl_lock);
1555 			return (EBUSY);
1556 		}
1557 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1558 	}
1559 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1560 	mutex_exit(&qlt->qlt_ioctl_lock);
1561 
1562 	return (0);
1563 }
1564 
1565 /* ARGSUSED */
1566 static int
1567 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1568 {
1569 	int		instance;
1570 	qlt_state_t	*qlt;
1571 
1572 	if (otype != OTYP_CHR) {
1573 		return (EINVAL);
1574 	}
1575 
1576 	instance = (int)getminor(dev);
1577 	qlt = ddi_get_soft_state(qlt_state, instance);
1578 	if (qlt == NULL) {
1579 		return (ENXIO);
1580 	}
1581 
1582 	mutex_enter(&qlt->qlt_ioctl_lock);
1583 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1584 		mutex_exit(&qlt->qlt_ioctl_lock);
1585 		return (ENODEV);
1586 	}
1587 
1588 	/*
1589 	 * It looks there's one hole here, maybe there could several concurrent
1590 	 * shareed open session, but we never check this case.
1591 	 * But it will not hurt too much, disregard it now.
1592 	 */
1593 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1594 	mutex_exit(&qlt->qlt_ioctl_lock);
1595 
1596 	return (0);
1597 }
1598 
1599 /*
1600  * All of these ioctls are unstable interfaces which are meant to be used
1601  * in a controlled lab env. No formal testing will be (or needs to be) done
1602  * for these ioctls. Specially note that running with an additional
1603  * uploaded firmware is not supported and is provided here for test
1604  * purposes only.
1605  */
1606 /* ARGSUSED */
1607 static int
1608 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1609     cred_t *credp, int *rval)
1610 {
1611 	qlt_state_t	*qlt;
1612 	int		ret = 0;
1613 #ifdef _LITTLE_ENDIAN
1614 	int		i;
1615 #endif
1616 	stmf_iocdata_t	*iocd;
1617 	void		*ibuf = NULL;
1618 	void		*obuf = NULL;
1619 	uint32_t	*intp;
1620 	qlt_fw_info_t	*fwi;
1621 	mbox_cmd_t	*mcp;
1622 	fct_status_t	st;
1623 	char		info[80];
1624 	fct_status_t	ret2;
1625 
1626 	if (drv_priv(credp) != 0)
1627 		return (EPERM);
1628 
1629 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1630 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1631 	if (ret)
1632 		return (ret);
1633 	iocd->stmf_error = 0;
1634 
1635 	switch (cmd) {
1636 	case QLT_IOCTL_FETCH_FWDUMP:
1637 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1638 			EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1639 			    iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1640 			ret = EINVAL;
1641 			break;
1642 		}
1643 		mutex_enter(&qlt->qlt_ioctl_lock);
1644 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1645 			mutex_exit(&qlt->qlt_ioctl_lock);
1646 			ret = ENODATA;
1647 			EL(qlt, "no fwdump\n");
1648 			iocd->stmf_error = QLTIO_NO_DUMP;
1649 			break;
1650 		}
1651 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1652 			mutex_exit(&qlt->qlt_ioctl_lock);
1653 			ret = EBUSY;
1654 			EL(qlt, "fwdump inprogress\n");
1655 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1656 			break;
1657 		}
1658 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1659 			mutex_exit(&qlt->qlt_ioctl_lock);
1660 			ret = EEXIST;
1661 			EL(qlt, "fwdump already fetched\n");
1662 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1663 			break;
1664 		}
1665 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1666 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1667 		mutex_exit(&qlt->qlt_ioctl_lock);
1668 
1669 		break;
1670 
1671 	case QLT_IOCTL_TRIGGER_FWDUMP:
1672 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1673 			ret = EACCES;
1674 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1675 			break;
1676 		}
1677 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1678 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1679 		info[79] = 0;
1680 		if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1681 		    STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1682 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1683 			EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1684 			    "%llxh\n", ret2);
1685 			ret = EIO;
1686 		}
1687 		break;
1688 	case QLT_IOCTL_UPLOAD_FW:
1689 		if ((iocd->stmf_ibuf_size < 1024) ||
1690 		    (iocd->stmf_ibuf_size & 3)) {
1691 			EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1692 			    iocd->stmf_ibuf_size);
1693 			ret = EINVAL;
1694 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1695 			break;
1696 		}
1697 		intp = (uint32_t *)ibuf;
1698 #ifdef _LITTLE_ENDIAN
1699 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1700 			intp[i] = BSWAP_32(intp[i]);
1701 		}
1702 #endif
1703 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1704 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1705 		    iocd->stmf_ibuf_size)) {
1706 			EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1707 			    iocd->stmf_ibuf_size);
1708 			ret = EINVAL;
1709 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1710 			break;
1711 		}
1712 		if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1713 		    (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1714 		    (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1715 		    ((intp[8] & 3) == 0))) {
1716 			EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1717 			ret = EACCES;
1718 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1719 			break;
1720 		}
1721 
1722 		/* Everything looks ok, lets copy this firmware */
1723 		if (qlt->fw_code01) {
1724 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1725 			    qlt->fw_length02) << 2);
1726 			qlt->fw_code01 = NULL;
1727 		} else {
1728 			atomic_add_32(&qlt_loaded_counter, 1);
1729 		}
1730 		qlt->fw_length01 = intp[3];
1731 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1732 		    KM_SLEEP);
1733 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1734 		qlt->fw_addr01 = intp[2];
1735 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1736 		qlt->fw_addr02 = qlt->fw_code02[2];
1737 		qlt->fw_length02 = qlt->fw_code02[3];
1738 		break;
1739 
1740 	case QLT_IOCTL_CLEAR_FW:
1741 		if (qlt->fw_code01) {
1742 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1743 			    qlt->fw_length02) << 2);
1744 			qlt->fw_code01 = NULL;
1745 			atomic_add_32(&qlt_loaded_counter, -1);
1746 		}
1747 		break;
1748 
1749 	case QLT_IOCTL_GET_FW_INFO:
1750 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1751 			EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1752 			    iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1753 			ret = EINVAL;
1754 			break;
1755 		}
1756 		fwi = (qlt_fw_info_t *)obuf;
1757 		if (qlt->qlt_stay_offline) {
1758 			fwi->fwi_stay_offline = 1;
1759 		}
1760 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1761 			fwi->fwi_port_active = 1;
1762 		}
1763 		fwi->fwi_active_major = qlt->fw_major;
1764 		fwi->fwi_active_minor = qlt->fw_minor;
1765 		fwi->fwi_active_subminor = qlt->fw_subminor;
1766 		fwi->fwi_active_attr = qlt->fw_attr;
1767 		if (qlt->fw_code01) {
1768 			fwi->fwi_fw_uploaded = 1;
1769 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1770 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1771 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1772 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1773 		}
1774 		if (qlt->qlt_81xx_chip) {
1775 			fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1776 			fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1777 			fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1778 			fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1779 		} else if (qlt->qlt_25xx_chip) {
1780 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1781 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1782 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1783 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1784 		} else {
1785 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1786 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1787 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1788 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1789 		}
1790 		break;
1791 
1792 	case QLT_IOCTL_STAY_OFFLINE:
1793 		if (!iocd->stmf_ibuf_size) {
1794 			EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1795 			    iocd->stmf_ibuf_size);
1796 			ret = EINVAL;
1797 			break;
1798 		}
1799 		if (*((char *)ibuf)) {
1800 			qlt->qlt_stay_offline = 1;
1801 		} else {
1802 			qlt->qlt_stay_offline = 0;
1803 		}
1804 		break;
1805 
1806 	case QLT_IOCTL_MBOX:
1807 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1808 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1809 			EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1810 			    iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1811 			ret = EINVAL;
1812 			break;
1813 		}
1814 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1815 		if (mcp == NULL) {
1816 			EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1817 			ret = ENOMEM;
1818 			break;
1819 		}
1820 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1821 		st = qlt_mailbox_command(qlt, mcp);
1822 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1823 		qlt_free_mailbox_command(qlt, mcp);
1824 		if (st != QLT_SUCCESS) {
1825 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1826 				st = QLT_SUCCESS;
1827 		}
1828 		if (st != QLT_SUCCESS) {
1829 			EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1830 			ret = EIO;
1831 			switch (st) {
1832 			case QLT_MBOX_NOT_INITIALIZED:
1833 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1834 				break;
1835 			case QLT_MBOX_BUSY:
1836 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1837 				break;
1838 			case QLT_MBOX_TIMEOUT:
1839 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1840 				break;
1841 			case QLT_MBOX_ABORTED:
1842 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1843 				break;
1844 			}
1845 		}
1846 		break;
1847 
1848 	case QLT_IOCTL_ELOG:
1849 		qlt_dump_el_trace_buffer(qlt);
1850 		break;
1851 
1852 	default:
1853 		EL(qlt, "Unknown ioctl-%xh\n", cmd);
1854 		ret = ENOTTY;
1855 	}
1856 
1857 	if (ret == 0) {
1858 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1859 	} else if (iocd->stmf_error) {
1860 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1861 	}
1862 	if (obuf) {
1863 		kmem_free(obuf, iocd->stmf_obuf_size);
1864 		obuf = NULL;
1865 	}
1866 	if (ibuf) {
1867 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1868 		ibuf = NULL;
1869 	}
1870 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1871 	return (ret);
1872 }
1873 
1874 static void
1875 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1876 {
1877 	stmf_change_status_t		st;
1878 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1879 	qlt_state_t			*qlt;
1880 	fct_status_t			ret;
1881 
1882 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1883 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1884 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1885 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1886 
1887 	qlt = (qlt_state_t *)port->port_fca_private;
1888 	st.st_completion_status = FCT_SUCCESS;
1889 	st.st_additional_info = NULL;
1890 
1891 	switch (cmd) {
1892 	case FCT_CMD_PORT_ONLINE:
1893 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1894 			st.st_completion_status = STMF_ALREADY;
1895 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1896 			st.st_completion_status = FCT_FAILURE;
1897 		if (st.st_completion_status == FCT_SUCCESS) {
1898 			qlt->qlt_state = FCT_STATE_ONLINING;
1899 			qlt->qlt_state_not_acked = 1;
1900 			st.st_completion_status = qlt_port_online(qlt);
1901 			if (st.st_completion_status != STMF_SUCCESS) {
1902 				EL(qlt, "PORT_ONLINE status=%xh\n",
1903 				    st.st_completion_status);
1904 				qlt->qlt_state = FCT_STATE_OFFLINE;
1905 				qlt->qlt_state_not_acked = 0;
1906 			} else {
1907 				qlt->qlt_state = FCT_STATE_ONLINE;
1908 			}
1909 		}
1910 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
1911 		qlt->qlt_change_state_flags = 0;
1912 		break;
1913 
1914 	case FCT_CMD_PORT_OFFLINE:
1915 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
1916 			st.st_completion_status = STMF_ALREADY;
1917 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
1918 			st.st_completion_status = FCT_FAILURE;
1919 		}
1920 		if (st.st_completion_status == FCT_SUCCESS) {
1921 			qlt->qlt_state = FCT_STATE_OFFLINING;
1922 			qlt->qlt_state_not_acked = 1;
1923 
1924 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
1925 				(void) qlt_firmware_dump(port, ssci);
1926 			}
1927 			qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
1928 			st.st_completion_status = qlt_port_offline(qlt);
1929 			if (st.st_completion_status != STMF_SUCCESS) {
1930 				EL(qlt, "PORT_OFFLINE status=%xh\n",
1931 				    st.st_completion_status);
1932 				qlt->qlt_state = FCT_STATE_ONLINE;
1933 				qlt->qlt_state_not_acked = 0;
1934 			} else {
1935 				qlt->qlt_state = FCT_STATE_OFFLINE;
1936 			}
1937 		}
1938 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
1939 		break;
1940 
1941 	case FCT_ACK_PORT_ONLINE_COMPLETE:
1942 		qlt->qlt_state_not_acked = 0;
1943 		break;
1944 
1945 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
1946 		qlt->qlt_state_not_acked = 0;
1947 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
1948 		    (qlt->qlt_stay_offline == 0)) {
1949 			if ((ret = fct_port_initialize(port,
1950 			    qlt->qlt_change_state_flags,
1951 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
1952 			    "with RLFLAG_RESET")) != FCT_SUCCESS) {
1953 				EL(qlt, "fct_port_initialize status=%llxh\n",
1954 				    ret);
1955 				cmn_err(CE_WARN, "qlt_ctl: "
1956 				    "fct_port_initialize failed, please use "
1957 				    "stmfstate to start the port-%s manualy",
1958 				    qlt->qlt_port_alias);
1959 			}
1960 		}
1961 		break;
1962 	}
1963 }
1964 
1965 /* ARGSUSED */
1966 static fct_status_t
1967 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
1968 {
1969 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
1970 
1971 	EL(qlt, "FLOGI requested not supported\n");
1972 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
1973 	return (FCT_FAILURE);
1974 }
1975 
1976 /*
1977  * Return a pointer to n entries in the request queue. Assumes that
1978  * request queue lock is held. Does a very short busy wait if
1979  * less/zero entries are available. Retuns NULL if it still cannot
1980  * fullfill the request.
1981  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
1982  */
1983 caddr_t
1984 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
1985 {
1986 	int try = 0;
1987 
1988 	while (qlt->req_available < n) {
1989 		uint32_t val1, val2, val3;
1990 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1991 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1992 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
1993 		if ((val1 != val2) || (val2 != val3))
1994 			continue;
1995 
1996 		qlt->req_ndx_from_fw = val1;
1997 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
1998 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
1999 		    (REQUEST_QUEUE_ENTRIES - 1));
2000 		if (qlt->req_available < n) {
2001 			if (try < 2) {
2002 				drv_usecwait(100);
2003 				try++;
2004 				continue;
2005 			} else {
2006 				stmf_trace(qlt->qlt_port_alias,
2007 				    "Req Q is full");
2008 				return (NULL);
2009 			}
2010 		}
2011 		break;
2012 	}
2013 	/* We dont change anything until the entries are sumitted */
2014 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2015 }
2016 
2017 /*
2018  * updates the req in ptr to fw. Assumes that req lock is held.
2019  */
2020 void
2021 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2022 {
2023 	ASSERT(n >= 1);
2024 	qlt->req_ndx_to_fw += n;
2025 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2026 	qlt->req_available -= n;
2027 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2028 }
2029 
2030 
2031 /*
2032  * Return a pointer to n entries in the priority request queue. Assumes that
2033  * priority request queue lock is held. Does a very short busy wait if
2034  * less/zero entries are available. Retuns NULL if it still cannot
2035  * fullfill the request.
2036  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2037  */
2038 caddr_t
2039 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2040 {
2041 	int try = 0;
2042 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2043 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2044 	    (PRIORITY_QUEUE_ENTRIES - 1));
2045 
2046 	while (req_available < n) {
2047 		uint32_t val1, val2, val3;
2048 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2049 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2050 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2051 		if ((val1 != val2) || (val2 != val3))
2052 			continue;
2053 
2054 		qlt->preq_ndx_from_fw = val1;
2055 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2056 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2057 		    (PRIORITY_QUEUE_ENTRIES - 1));
2058 		if (req_available < n) {
2059 			if (try < 2) {
2060 				drv_usecwait(100);
2061 				try++;
2062 				continue;
2063 			} else {
2064 				return (NULL);
2065 			}
2066 		}
2067 		break;
2068 	}
2069 	/* We dont change anything until the entries are sumitted */
2070 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2071 }
2072 
2073 /*
2074  * updates the req in ptr to fw. Assumes that req lock is held.
2075  */
2076 void
2077 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2078 {
2079 	ASSERT(n >= 1);
2080 	qlt->preq_ndx_to_fw += n;
2081 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2082 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2083 }
2084 
2085 /*
2086  * - Should not be called from Interrupt.
2087  * - A very hardware specific function. Does not touch driver state.
2088  * - Assumes that interrupts are disabled or not there.
2089  * - Expects that the caller makes sure that all activity has stopped
2090  *   and its ok now to go ahead and reset the chip. Also the caller
2091  *   takes care of post reset damage control.
2092  * - called by initialize adapter() and dump_fw(for reset only).
2093  * - During attach() nothing much is happening and during initialize_adapter()
2094  *   the function (caller) does all the housekeeping so that this function
2095  *   can execute in peace.
2096  * - Returns 0 on success.
2097  */
2098 static fct_status_t
2099 qlt_reset_chip_and_download_fw(qlt_state_t *qlt, int reset_only)
2100 {
2101 	int cntr;
2102 	uint32_t start_addr;
2103 	fct_status_t ret;
2104 
2105 	EL(qlt, "initiated, flags=%xh\n", reset_only);
2106 
2107 	/* XXX: Switch off LEDs */
2108 
2109 	/* Disable Interrupts */
2110 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2111 	(void) REG_RD32(qlt, REG_INTR_CTRL);
2112 	/* Stop DMA */
2113 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2114 
2115 	/* Wait for DMA to be stopped */
2116 	cntr = 0;
2117 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2118 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2119 		cntr++;
2120 		/* 3 sec should be more than enough */
2121 		if (cntr == 300)
2122 			return (QLT_DMA_STUCK);
2123 	}
2124 
2125 	/* Reset the Chip */
2126 	REG_WR32(qlt, REG_CTRL_STATUS,
2127 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2128 
2129 	qlt->qlt_link_up = 0;
2130 
2131 	drv_usecwait(100);
2132 
2133 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2134 	cntr = 0;
2135 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2136 		delay(drv_usectohz(10000));
2137 		cntr++;
2138 		/* 3 sec should be more than enough */
2139 		if (cntr == 300)
2140 			return (QLT_ROM_STUCK);
2141 	}
2142 	/* Disable Interrupts (Probably not needed) */
2143 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2144 	if (reset_only)
2145 		return (QLT_SUCCESS);
2146 
2147 	/* Load the two segments */
2148 	if (qlt->fw_code01 != NULL) {
2149 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2150 		    qlt->fw_addr01);
2151 		if (ret == QLT_SUCCESS) {
2152 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2153 			    qlt->fw_length02, qlt->fw_addr02);
2154 		}
2155 		start_addr = qlt->fw_addr01;
2156 	} else if (qlt->qlt_81xx_chip) {
2157 		ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2158 		    fw8100_addr01);
2159 		if (ret == QLT_SUCCESS) {
2160 			ret = qlt_load_risc_ram(qlt, fw8100_code02,
2161 			    fw8100_length02, fw8100_addr02);
2162 		}
2163 		start_addr = fw8100_addr01;
2164 	} else if (qlt->qlt_25xx_chip) {
2165 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2166 		    fw2500_addr01);
2167 		if (ret == QLT_SUCCESS) {
2168 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
2169 			    fw2500_length02, fw2500_addr02);
2170 		}
2171 		start_addr = fw2500_addr01;
2172 	} else {
2173 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2174 		    fw2400_addr01);
2175 		if (ret == QLT_SUCCESS) {
2176 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
2177 			    fw2400_length02, fw2400_addr02);
2178 		}
2179 		start_addr = fw2400_addr01;
2180 	}
2181 	if (ret != QLT_SUCCESS) {
2182 		EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2183 		return (ret);
2184 	}
2185 
2186 	/* Verify Checksum */
2187 	REG_WR16(qlt, REG_MBOX(0), 7);
2188 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
2189 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
2190 	ret = qlt_raw_mailbox_command(qlt);
2191 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2192 	if (ret != QLT_SUCCESS) {
2193 		EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2194 		return (ret);
2195 	}
2196 
2197 	/* Execute firmware */
2198 	REG_WR16(qlt, REG_MBOX(0), 2);
2199 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
2200 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
2201 	REG_WR16(qlt, REG_MBOX(3), 0);
2202 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
2203 	ret = qlt_raw_mailbox_command(qlt);
2204 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2205 	if (ret != QLT_SUCCESS) {
2206 		EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2207 		return (ret);
2208 	}
2209 
2210 	/* Get revisions (About Firmware) */
2211 	REG_WR16(qlt, REG_MBOX(0), 8);
2212 	ret = qlt_raw_mailbox_command(qlt);
2213 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2214 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2215 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2216 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2217 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2218 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2219 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2220 	if (ret != QLT_SUCCESS) {
2221 		EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2222 		return (ret);
2223 	}
2224 
2225 	return (QLT_SUCCESS);
2226 }
2227 
2228 /*
2229  * Used only from qlt_reset_chip_and_download_fw().
2230  */
2231 static fct_status_t
2232 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2233 				uint32_t word_count, uint32_t risc_addr)
2234 {
2235 	uint32_t words_sent = 0;
2236 	uint32_t words_being_sent;
2237 	uint32_t *cur_host_addr;
2238 	uint32_t cur_risc_addr;
2239 	uint64_t da;
2240 	fct_status_t ret;
2241 
2242 	while (words_sent < word_count) {
2243 		cur_host_addr = &(host_addr[words_sent]);
2244 		cur_risc_addr = risc_addr + (words_sent << 2);
2245 		words_being_sent = min(word_count - words_sent,
2246 		    TOTAL_DMA_MEM_SIZE >> 2);
2247 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2248 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2249 		    DDI_DEV_AUTOINCR);
2250 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2251 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2252 		da = qlt->queue_mem_cookie.dmac_laddress;
2253 		REG_WR16(qlt, REG_MBOX(0), 0x0B);
2254 		REG_WR16(qlt, REG_MBOX(1), risc_addr & 0xffff);
2255 		REG_WR16(qlt, REG_MBOX(8), ((cur_risc_addr >> 16) & 0xffff));
2256 		REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
2257 		da >>= 16;
2258 		REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
2259 		da >>= 16;
2260 		REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
2261 		da >>= 16;
2262 		REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
2263 		REG_WR16(qlt, REG_MBOX(5), words_being_sent & 0xffff);
2264 		REG_WR16(qlt, REG_MBOX(4), (words_being_sent >> 16) & 0xffff);
2265 		ret = qlt_raw_mailbox_command(qlt);
2266 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2267 		if (ret != QLT_SUCCESS) {
2268 			EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2269 			    ret);
2270 			return (ret);
2271 		}
2272 		words_sent += words_being_sent;
2273 	}
2274 	return (QLT_SUCCESS);
2275 }
2276 
2277 /*
2278  * Not used during normal operation. Only during driver init.
2279  * Assumes that interrupts are disabled and mailboxes are loaded.
2280  * Just triggers the mailbox command an waits for the completion.
2281  * Also expects that There is nothing else going on and we will only
2282  * get back a mailbox completion from firmware.
2283  * ---DOES NOT CLEAR INTERRUPT---
2284  * Used only from the code path originating from
2285  * qlt_reset_chip_and_download_fw()
2286  */
2287 static fct_status_t
2288 qlt_raw_mailbox_command(qlt_state_t *qlt)
2289 {
2290 	int cntr = 0;
2291 	uint32_t status;
2292 
2293 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2294 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_INTR_REQUEST) == 0) {
2295 		cntr++;
2296 		if (cntr == 100)
2297 			return (QLT_MAILBOX_STUCK);
2298 		delay(drv_usectohz(10000));
2299 	}
2300 	status = (REG_RD32(qlt, REG_RISC_STATUS) & 0xff);
2301 	if ((status == 1) || (status == 2) ||
2302 	    (status == 0x10) || (status == 0x11)) {
2303 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2304 		if (mbox0 == 0x4000)
2305 			return (QLT_SUCCESS);
2306 		else
2307 			return (QLT_MBOX_FAILED | mbox0);
2308 	}
2309 	/* This is unexpected, dump a message */
2310 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2311 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
2312 	return (QLT_UNEXPECTED_RESPONSE);
2313 }
2314 
2315 static mbox_cmd_t *
2316 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2317 {
2318 	mbox_cmd_t *mcp;
2319 
2320 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2321 	if (dma_size) {
2322 		qlt_dmem_bctl_t *bctl;
2323 		uint64_t da;
2324 
2325 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2326 		if (mcp->dbuf == NULL) {
2327 			kmem_free(mcp, sizeof (*mcp));
2328 			return (NULL);
2329 		}
2330 		mcp->dbuf->db_data_size = dma_size;
2331 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2332 
2333 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2334 		da = bctl->bctl_dev_addr;
2335 		/* This is the most common initialization of dma ptrs */
2336 		mcp->to_fw[3] = (uint16_t)(da & 0xffff);
2337 		da >>= 16;
2338 		mcp->to_fw[2] = (uint16_t)(da & 0xffff);
2339 		da >>= 16;
2340 		mcp->to_fw[7] = (uint16_t)(da & 0xffff);
2341 		da >>= 16;
2342 		mcp->to_fw[6] = (uint16_t)(da & 0xffff);
2343 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2344 	}
2345 	mcp->to_fw_mask |= BIT_0;
2346 	mcp->from_fw_mask |= BIT_0;
2347 	return (mcp);
2348 }
2349 
2350 void
2351 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2352 {
2353 	if (mcp->dbuf)
2354 		qlt_i_dmem_free(qlt, mcp->dbuf);
2355 	kmem_free(mcp, sizeof (*mcp));
2356 }
2357 
2358 /*
2359  * This can sleep. Should never be called from interrupt context.
2360  */
2361 static fct_status_t
2362 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2363 {
2364 	int	retries;
2365 	int	i;
2366 	char	info[80];
2367 
2368 	if (curthread->t_flag & T_INTR_THREAD) {
2369 		ASSERT(0);
2370 		return (QLT_MBOX_FAILED);
2371 	}
2372 
2373 	mutex_enter(&qlt->mbox_lock);
2374 	/* See if mailboxes are still uninitialized */
2375 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2376 		mutex_exit(&qlt->mbox_lock);
2377 		return (QLT_MBOX_NOT_INITIALIZED);
2378 	}
2379 
2380 	/* Wait to grab the mailboxes */
2381 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2382 	    retries++) {
2383 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2384 		if ((retries > 5) ||
2385 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2386 			mutex_exit(&qlt->mbox_lock);
2387 			return (QLT_MBOX_BUSY);
2388 		}
2389 	}
2390 	/* Make sure we always ask for mailbox 0 */
2391 	mcp->from_fw_mask |= BIT_0;
2392 
2393 	/* Load mailboxes, set state and generate RISC interrupt */
2394 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2395 	qlt->mcp = mcp;
2396 	for (i = 0; i < MAX_MBOXES; i++) {
2397 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2398 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2399 	}
2400 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2401 
2402 qlt_mbox_wait_loop:;
2403 	/* Wait for mailbox command completion */
2404 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2405 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2406 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2407 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2408 		info[79] = 0;
2409 		qlt->mcp = NULL;
2410 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2411 		mutex_exit(&qlt->mbox_lock);
2412 
2413 		/*
2414 		 * XXX Throw HBA fatal error event
2415 		 */
2416 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2417 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2418 		return (QLT_MBOX_TIMEOUT);
2419 	}
2420 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2421 		goto qlt_mbox_wait_loop;
2422 
2423 	qlt->mcp = NULL;
2424 
2425 	/* Make sure its a completion */
2426 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2427 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2428 		mutex_exit(&qlt->mbox_lock);
2429 		return (QLT_MBOX_ABORTED);
2430 	}
2431 
2432 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2433 	/* Mailboxes are already loaded by interrupt routine */
2434 	qlt->mbox_io_state = MBOX_STATE_READY;
2435 	mutex_exit(&qlt->mbox_lock);
2436 	if (mcp->from_fw[0] != 0x4000)
2437 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2438 
2439 	return (QLT_SUCCESS);
2440 }
2441 
2442 /*
2443  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2444  */
2445 /* ARGSUSED */
2446 static uint_t
2447 qlt_isr(caddr_t arg, caddr_t arg2)
2448 {
2449 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2450 	int		instance;
2451 	uint32_t	risc_status, intr_type;
2452 	int		i;
2453 	int		intr_loop_count;
2454 	char		info[80];
2455 
2456 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2457 	if (!mutex_tryenter(&qlt->intr_lock)) {
2458 		/*
2459 		 * Normally we will always get this lock. If tryenter is
2460 		 * failing then it means that driver is trying to do
2461 		 * some cleanup and is masking the intr but some intr
2462 		 * has sneaked in between. See if our device has generated
2463 		 * this intr. If so then wait a bit and return claimed.
2464 		 * If not then return claimed if this is the 1st instance
2465 		 * of a interrupt after driver has grabbed the lock.
2466 		 */
2467 		if (risc_status & BIT_15) {
2468 			drv_usecwait(10);
2469 			return (DDI_INTR_CLAIMED);
2470 		} else if (qlt->intr_sneak_counter) {
2471 			qlt->intr_sneak_counter--;
2472 			return (DDI_INTR_CLAIMED);
2473 		} else {
2474 			return (DDI_INTR_UNCLAIMED);
2475 		}
2476 	}
2477 	if (((risc_status & BIT_15) == 0) ||
2478 	    (qlt->qlt_intr_enabled == 0)) {
2479 		/*
2480 		 * This might be a pure coincedence that we are operating
2481 		 * in a interrupt disabled mode and another device
2482 		 * sharing the interrupt line has generated an interrupt
2483 		 * while an interrupt from our device might be pending. Just
2484 		 * ignore it and let the code handling the interrupt
2485 		 * disabled mode handle it.
2486 		 */
2487 		mutex_exit(&qlt->intr_lock);
2488 		return (DDI_INTR_UNCLAIMED);
2489 	}
2490 
2491 	/*
2492 	 * XXX take care for MSI case. disable intrs
2493 	 * Its gonna be complicated because of the max iterations.
2494 	 * as hba will have posted the intr which did not go on PCI
2495 	 * but we did not service it either because of max iterations.
2496 	 * Maybe offload the intr on a different thread.
2497 	 */
2498 	instance = ddi_get_instance(qlt->dip);
2499 	intr_loop_count = 0;
2500 
2501 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2502 
2503 intr_again:;
2504 
2505 	/* check for risc pause */
2506 	if (risc_status & BIT_8) {
2507 		EL(qlt, "Risc Pause status=%xh\n", risc_status);
2508 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2509 		    instance, risc_status);
2510 		(void) snprintf(info, 80, "Risc Pause %08x", risc_status);
2511 		info[79] = 0;
2512 		(void) fct_port_shutdown(qlt->qlt_port,
2513 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2514 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2515 	}
2516 
2517 	/* First check for high performance path */
2518 	intr_type = risc_status & 0xff;
2519 	if (intr_type == 0x1C) {
2520 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2521 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2522 		qlt_handle_atio_queue_update(qlt);
2523 	} else if (intr_type == 0x13) {
2524 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2525 		qlt->resp_ndx_from_fw = risc_status >> 16;
2526 		qlt_handle_resp_queue_update(qlt);
2527 		/* XXX what about priority queue */
2528 	} else if (intr_type == 0x1D) {
2529 		qlt->atio_ndx_from_fw = (uint16_t)
2530 		    REG_RD32(qlt, REG_ATIO_IN_PTR);
2531 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2532 		qlt->resp_ndx_from_fw = risc_status >> 16;
2533 		qlt_handle_atio_queue_update(qlt);
2534 		qlt_handle_resp_queue_update(qlt);
2535 	} else if (intr_type == 0x12) {
2536 		uint16_t code = (uint16_t)(risc_status >> 16);
2537 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2538 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2539 		uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2540 		uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2541 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2542 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2543 
2544 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2545 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2546 		    " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2547 		    mbox5, mbox6);
2548 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
2549 		    " mb3=%x, mb5=%x, mb6=%x", instance, code, mbox1, mbox2,
2550 		    mbox3, mbox5, mbox6);
2551 
2552 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2553 			if (qlt->qlt_link_up) {
2554 				fct_handle_event(qlt->qlt_port,
2555 				    FCT_EVENT_LINK_RESET, 0, 0);
2556 			}
2557 		} else if (code == 0x8012) {
2558 			qlt->qlt_link_up = 0;
2559 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2560 			    0, 0);
2561 		} else if (code == 0x8011) {
2562 			switch (mbox1) {
2563 			case 0: qlt->link_speed = PORT_SPEED_1G;
2564 				break;
2565 			case 1: qlt->link_speed = PORT_SPEED_2G;
2566 				break;
2567 			case 3: qlt->link_speed = PORT_SPEED_4G;
2568 				break;
2569 			case 4: qlt->link_speed = PORT_SPEED_8G;
2570 				break;
2571 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
2572 				break;
2573 			default:
2574 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2575 			}
2576 			qlt->qlt_link_up = 1;
2577 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2578 			    0, 0);
2579 		} else if ((code == 0x8002) || (code == 0x8003) ||
2580 		    (code == 0x8004) || (code == 0x8005)) {
2581 			(void) snprintf(info, 80,
2582 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2583 			    code, mbox1, mbox2, mbox5, mbox6);
2584 			info[79] = 0;
2585 			(void) fct_port_shutdown(qlt->qlt_port,
2586 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2587 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2588 		} else if (code == 0x800F) {
2589 			(void) snprintf(info, 80,
2590 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
2591 			    mbox1, mbox2, mbox3);
2592 
2593 			if (mbox1 != 1) {
2594 				/* issue "verify fw" */
2595 				qlt_verify_fw(qlt);
2596 			}
2597 		} else if (code == 0x8101) {
2598 			(void) snprintf(info, 80,
2599 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2600 			    code, mbox1, mbox2, mbox3);
2601 			info[79] = 0;
2602 
2603 			/* check if "ACK" is required (timeout != 0) */
2604 			if (mbox1 & 0x0f00) {
2605 				caddr_t	req;
2606 
2607 				/*
2608 				 * Ack the request (queue work to do it?)
2609 				 * using a mailbox iocb
2610 				 */
2611 				mutex_enter(&qlt->req_lock);
2612 				req = qlt_get_req_entries(qlt, 1);
2613 				if (req) {
2614 					bzero(req, IOCB_SIZE);
2615 					req[0] = 0x39; req[1] = 1;
2616 					QMEM_WR16(qlt, req+8, 0x101);
2617 					QMEM_WR16(qlt, req+10, mbox1);
2618 					QMEM_WR16(qlt, req+12, mbox2);
2619 					QMEM_WR16(qlt, req+14, mbox3);
2620 					QMEM_WR16(qlt, req+16, mbox4);
2621 					QMEM_WR16(qlt, req+18, mbox5);
2622 					QMEM_WR16(qlt, req+20, mbox6);
2623 					qlt_submit_req_entries(qlt, 1);
2624 				} else {
2625 					(void) snprintf(info, 80,
2626 					    "IDC ACK failed");
2627 					info[79] = 0;
2628 				}
2629 				mutex_exit(&qlt->req_lock);
2630 			}
2631 		}
2632 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2633 		/* Handle mailbox completion */
2634 		mutex_enter(&qlt->mbox_lock);
2635 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2636 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2637 			    " when driver wasn't waiting for it %d",
2638 			    instance, qlt->mbox_io_state);
2639 		} else {
2640 			for (i = 0; i < MAX_MBOXES; i++) {
2641 				if (qlt->mcp->from_fw_mask &
2642 				    (((uint32_t)1) << i)) {
2643 					qlt->mcp->from_fw[i] =
2644 					    REG_RD16(qlt, REG_MBOX(i));
2645 				}
2646 			}
2647 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2648 		}
2649 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2650 		cv_broadcast(&qlt->mbox_cv);
2651 		mutex_exit(&qlt->mbox_lock);
2652 	} else {
2653 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2654 		    instance, intr_type);
2655 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2656 	}
2657 
2658 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2659 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2660 	if ((risc_status & BIT_15) &&
2661 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2662 		goto intr_again;
2663 	}
2664 
2665 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2666 
2667 	mutex_exit(&qlt->intr_lock);
2668 	return (DDI_INTR_CLAIMED);
2669 }
2670 
2671 /* **************** NVRAM Functions ********************** */
2672 
2673 fct_status_t
2674 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2675 {
2676 	uint32_t	timer;
2677 
2678 	/* Clear access error flag */
2679 	REG_WR32(qlt, REG_CTRL_STATUS,
2680 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2681 
2682 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2683 
2684 	/* Wait for READ cycle to complete. */
2685 	for (timer = 3000; timer; timer--) {
2686 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2687 			break;
2688 		}
2689 		drv_usecwait(10);
2690 	}
2691 	if (timer == 0) {
2692 		EL(qlt, "flash timeout\n");
2693 		return (QLT_FLASH_TIMEOUT);
2694 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2695 		EL(qlt, "flash access error\n");
2696 		return (QLT_FLASH_ACCESS_ERROR);
2697 	}
2698 
2699 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2700 
2701 	return (QLT_SUCCESS);
2702 }
2703 
2704 fct_status_t
2705 qlt_read_nvram(qlt_state_t *qlt)
2706 {
2707 	uint32_t		index, addr, chksum;
2708 	uint32_t		val, *ptr;
2709 	fct_status_t		ret;
2710 	qlt_nvram_t		*nv;
2711 	uint64_t		empty_node_name = 0;
2712 
2713 	if (qlt->qlt_81xx_chip) {
2714 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2715 		    QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2716 	} else if (qlt->qlt_25xx_chip) {
2717 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2718 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2719 	} else {
2720 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2721 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2722 	}
2723 	mutex_enter(&qlt_global_lock);
2724 
2725 	/* Pause RISC. */
2726 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
2727 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2728 
2729 	/* Get NVRAM data and calculate checksum. */
2730 	ptr = (uint32_t *)qlt->nvram;
2731 	chksum = 0;
2732 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2733 		ret = qlt_read_flash_word(qlt, addr++, &val);
2734 		if (ret != QLT_SUCCESS) {
2735 			EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
2736 			mutex_exit(&qlt_global_lock);
2737 			return (ret);
2738 		}
2739 		chksum += val;
2740 		*ptr = LE_32(val);
2741 		ptr++;
2742 	}
2743 
2744 	/* Release RISC Pause */
2745 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_PAUSE);
2746 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2747 
2748 	mutex_exit(&qlt_global_lock);
2749 
2750 	/* Sanity check NVRAM Data */
2751 	nv = qlt->nvram;
2752 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2753 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2754 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2755 		EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2756 		    nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2757 		    nv->nvram_version[1], nv->nvram_version[0]);
2758 		return (QLT_BAD_NVRAM_DATA);
2759 	}
2760 
2761 	/* If node name is zero, hand craft it from port name */
2762 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2763 		bcopy(nv->port_name, nv->node_name, 8);
2764 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2765 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2766 	}
2767 
2768 	return (QLT_SUCCESS);
2769 }
2770 
2771 uint32_t
2772 qlt_sync_atio_queue(qlt_state_t *qlt)
2773 {
2774 	uint32_t total_ent;
2775 
2776 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2777 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2778 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2779 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2780 		    DDI_DMA_SYNC_FORCPU);
2781 	} else {
2782 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2783 		    qlt->atio_ndx_from_fw;
2784 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2785 		    + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2786 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2787 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2788 		    ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2789 		    DDI_DMA_SYNC_FORCPU);
2790 	}
2791 	return (total_ent);
2792 }
2793 
2794 void
2795 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2796 {
2797 	uint32_t total_ent;
2798 
2799 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2800 		return;
2801 
2802 	total_ent = qlt_sync_atio_queue(qlt);
2803 
2804 	do {
2805 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2806 		    qlt->atio_ndx_to_fw << 6];
2807 		uint32_t ent_cnt;
2808 
2809 		ent_cnt = (uint32_t)(atio[1]);
2810 		if (ent_cnt > total_ent) {
2811 			break;
2812 		}
2813 		switch ((uint8_t)(atio[0])) {
2814 		case 0x0d:	/* INOT */
2815 			qlt_handle_inot(qlt, atio);
2816 			break;
2817 		case 0x06:	/* ATIO */
2818 			qlt_handle_atio(qlt, atio);
2819 			break;
2820 		default:
2821 			EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2822 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2823 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2824 			break;
2825 		}
2826 		qlt->atio_ndx_to_fw = (uint16_t)(
2827 		    (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2828 		total_ent -= ent_cnt;
2829 	} while (total_ent > 0);
2830 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2831 }
2832 
2833 uint32_t
2834 qlt_sync_resp_queue(qlt_state_t *qlt)
2835 {
2836 	uint32_t total_ent;
2837 
2838 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2839 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2840 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2841 		    RESPONSE_QUEUE_OFFSET
2842 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2843 		    DDI_DMA_SYNC_FORCPU);
2844 	} else {
2845 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2846 		    qlt->resp_ndx_from_fw;
2847 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2848 		    RESPONSE_QUEUE_OFFSET
2849 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2850 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2851 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2852 		    RESPONSE_QUEUE_OFFSET,
2853 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2854 	}
2855 	return (total_ent);
2856 }
2857 
2858 void
2859 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2860 {
2861 	uint32_t total_ent;
2862 	uint8_t c;
2863 
2864 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2865 		return;
2866 
2867 	total_ent = qlt_sync_resp_queue(qlt);
2868 
2869 	do {
2870 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
2871 		uint32_t ent_cnt;
2872 
2873 		ent_cnt = (uint32_t)(resp[1]);
2874 		if (ent_cnt > total_ent) {
2875 			break;
2876 		}
2877 		switch ((uint8_t)(resp[0])) {
2878 		case 0x12:	/* CTIO completion */
2879 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
2880 			break;
2881 		case 0x0e:	/* NACK */
2882 			/* Do Nothing */
2883 			break;
2884 		case 0x1b:	/* Verify FW */
2885 			qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
2886 			break;
2887 		case 0x29:	/* CT PassThrough */
2888 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
2889 			break;
2890 		case 0x33:	/* Abort IO IOCB completion */
2891 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
2892 			break;
2893 		case 0x51:	/* PUREX */
2894 			qlt_handle_purex(qlt, (uint8_t *)resp);
2895 			break;
2896 		case 0x52:
2897 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
2898 			break;
2899 		case 0x53:	/* ELS passthrough */
2900 			c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
2901 			if (c == 0) {
2902 				qlt_handle_sol_els_completion(qlt,
2903 				    (uint8_t *)resp);
2904 			} else if (c == 3) {
2905 				qlt_handle_unsol_els_abort_completion(qlt,
2906 				    (uint8_t *)resp);
2907 			} else {
2908 				qlt_handle_unsol_els_completion(qlt,
2909 				    (uint8_t *)resp);
2910 			}
2911 			break;
2912 		case 0x54:	/* ABTS received */
2913 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
2914 			break;
2915 		case 0x55:	/* ABTS completion */
2916 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
2917 			break;
2918 		default:
2919 			EL(qlt, "response entry=%xh\n", resp[0]);
2920 			break;
2921 		}
2922 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
2923 		    (RESPONSE_QUEUE_ENTRIES - 1);
2924 		total_ent -= ent_cnt;
2925 	} while (total_ent > 0);
2926 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
2927 }
2928 
2929 fct_status_t
2930 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
2931 				uint16_t *ret_handle)
2932 {
2933 	fct_status_t ret;
2934 	mbox_cmd_t *mcp;
2935 	uint16_t n;
2936 	uint16_t h;
2937 	uint32_t ent_id;
2938 	uint8_t *p;
2939 	int found = 0;
2940 
2941 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
2942 	if (mcp == NULL) {
2943 		return (STMF_ALLOC_FAILURE);
2944 	}
2945 	mcp->to_fw[0] = 0x7C;	/* GET ID LIST */
2946 	mcp->to_fw[8] = 2048 * 8;
2947 	mcp->to_fw[9] = 0;
2948 	mcp->to_fw_mask |= BIT_9 | BIT_8;
2949 	mcp->from_fw_mask |= BIT_1 | BIT_2;
2950 
2951 	ret = qlt_mailbox_command(qlt, mcp);
2952 	if (ret != QLT_SUCCESS) {
2953 		EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
2954 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
2955 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
2956 		    mcp->from_fw[1], mcp->from_fw[2]);
2957 		qlt_free_mailbox_command(qlt, mcp);
2958 		return (ret);
2959 	}
2960 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
2961 	p = mcp->dbuf->db_sglist[0].seg_addr;
2962 	for (n = 0; n < mcp->from_fw[1]; n++) {
2963 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
2964 		h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
2965 		if (ent_id == id) {
2966 			found = 1;
2967 			*ret_handle = h;
2968 			if ((cmd_handle != FCT_HANDLE_NONE) &&
2969 			    (cmd_handle != h)) {
2970 				cmn_err(CE_WARN, "login for portid %x came in "
2971 				    "with handle %x, while the portid was "
2972 				    "already using a different handle %x",
2973 				    id, cmd_handle, h);
2974 				qlt_free_mailbox_command(qlt, mcp);
2975 				return (QLT_FAILURE);
2976 			}
2977 			break;
2978 		}
2979 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
2980 			cmn_err(CE_WARN, "login for portid %x came in with "
2981 			    "handle %x, while the handle was already in use "
2982 			    "for portid %x", id, cmd_handle, ent_id);
2983 			qlt_free_mailbox_command(qlt, mcp);
2984 			return (QLT_FAILURE);
2985 		}
2986 		p += 8;
2987 	}
2988 	if (!found) {
2989 		*ret_handle = cmd_handle;
2990 	}
2991 	qlt_free_mailbox_command(qlt, mcp);
2992 	return (FCT_SUCCESS);
2993 }
2994 
2995 /* ARGSUSED */
2996 fct_status_t
2997 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
2998 				fct_cmd_t *login)
2999 {
3000 	uint8_t *p;
3001 
3002 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3003 	p[0] = ELS_OP_PLOGI;
3004 	*((uint16_t *)(&p[4])) = 0x2020;
3005 	p[7] = 3;
3006 	p[8] = 0x88;
3007 	p[10] = 8;
3008 	p[13] = 0xff; p[15] = 0x1f;
3009 	p[18] = 7; p[19] = 0xd0;
3010 
3011 	bcopy(port->port_pwwn, p + 20, 8);
3012 	bcopy(port->port_nwwn, p + 28, 8);
3013 
3014 	p[68] = 0x80;
3015 	p[74] = 8;
3016 	p[77] = 0xff;
3017 	p[81] = 1;
3018 
3019 	return (FCT_SUCCESS);
3020 }
3021 
3022 /* ARGSUSED */
3023 fct_status_t
3024 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
3025 				fct_cmd_t *login)
3026 {
3027 	return (FCT_SUCCESS);
3028 }
3029 
3030 fct_status_t
3031 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
3032     fct_cmd_t *login)
3033 {
3034 	uint16_t h;
3035 	fct_status_t ret;
3036 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3037 
3038 	switch (rp->rp_id) {
3039 	case 0xFFFFFC:	h = 0x7FC; break;
3040 	case 0xFFFFFD:	h = 0x7FD; break;
3041 	case 0xFFFFFE:	h = 0x7FE; break;
3042 	case 0xFFFFFF:	h = 0x7FF; break;
3043 	default:
3044 		ret = qlt_portid_to_handle(qlt, rp->rp_id,
3045 		    login->cmd_rp_handle, &h);
3046 		if (ret != FCT_SUCCESS) {
3047 			EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3048 			return (ret);
3049 		}
3050 	}
3051 
3052 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
3053 		ret = qlt_fill_plogi_req(port, rp, login);
3054 	} else {
3055 		ret = qlt_fill_plogi_resp(port, rp, login);
3056 	}
3057 
3058 	if (ret != FCT_SUCCESS) {
3059 		EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3060 		return (ret);
3061 	}
3062 
3063 	if (h == FCT_HANDLE_NONE)
3064 		return (FCT_SUCCESS);
3065 
3066 	if (rp->rp_handle == FCT_HANDLE_NONE) {
3067 		rp->rp_handle = h;
3068 		return (FCT_SUCCESS);
3069 	}
3070 
3071 	if (rp->rp_handle == h)
3072 		return (FCT_SUCCESS);
3073 
3074 	EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3075 	return (FCT_FAILURE);
3076 }
3077 /* invoked in single thread */
3078 fct_status_t
3079 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3080 {
3081 	uint8_t *req;
3082 	qlt_state_t *qlt;
3083 	clock_t	dereg_req_timer;
3084 	fct_status_t ret;
3085 
3086 	qlt = (qlt_state_t *)port->port_fca_private;
3087 
3088 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3089 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
3090 		return (FCT_SUCCESS);
3091 	ASSERT(qlt->rp_id_in_dereg == 0);
3092 
3093 	mutex_enter(&qlt->preq_lock);
3094 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3095 	if (req == NULL) {
3096 		mutex_exit(&qlt->preq_lock);
3097 		return (FCT_BUSY);
3098 	}
3099 	bzero(req, IOCB_SIZE);
3100 	req[0] = 0x52; req[1] = 1;
3101 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
3102 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3103 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3104 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3105 	qlt->rp_id_in_dereg = rp->rp_id;
3106 	qlt_submit_preq_entries(qlt, 1);
3107 
3108 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
3109 	if (cv_timedwait(&qlt->rp_dereg_cv,
3110 	    &qlt->preq_lock, dereg_req_timer) > 0) {
3111 		ret = qlt->rp_dereg_status;
3112 	} else {
3113 		ret = FCT_BUSY;
3114 	}
3115 	qlt->rp_dereg_status = 0;
3116 	qlt->rp_id_in_dereg = 0;
3117 	mutex_exit(&qlt->preq_lock);
3118 	return (ret);
3119 }
3120 
3121 /*
3122  * Pass received ELS up to framework.
3123  */
3124 static void
3125 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3126 {
3127 	fct_cmd_t		*cmd;
3128 	fct_els_t		*els;
3129 	qlt_cmd_t		*qcmd;
3130 	uint32_t		payload_size;
3131 	uint32_t		remote_portid;
3132 	uint8_t			*pldptr, *bndrptr;
3133 	int			i, off;
3134 	uint16_t		iocb_flags;
3135 	char			info[160];
3136 
3137 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3138 	    ((uint32_t)(resp[0x1A])) << 16;
3139 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3140 	if (iocb_flags & BIT_15) {
3141 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3142 	} else {
3143 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3144 	}
3145 
3146 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
3147 		EL(qlt, "payload is too large = %xh\n", payload_size);
3148 		cmn_err(CE_WARN, "handle_purex: payload is too large");
3149 		goto cmd_null;
3150 	}
3151 
3152 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3153 	    (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3154 	if (cmd == NULL) {
3155 		EL(qlt, "fct_alloc cmd==NULL\n");
3156 cmd_null:;
3157 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
3158 		    "allocate space for fct_cmd", (void *)qlt);
3159 		info[159] = 0;
3160 		(void) fct_port_shutdown(qlt->qlt_port,
3161 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3162 		return;
3163 	}
3164 
3165 	cmd->cmd_port = qlt->qlt_port;
3166 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3167 	if (cmd->cmd_rp_handle == 0xFFFF) {
3168 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3169 	}
3170 
3171 	els = (fct_els_t *)cmd->cmd_specific;
3172 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3173 	els->els_req_size = (uint16_t)payload_size;
3174 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3175 	    GET_STRUCT_SIZE(qlt_cmd_t));
3176 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3177 	cmd->cmd_rportid = remote_portid;
3178 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3179 	    ((uint32_t)(resp[0x16])) << 16;
3180 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3181 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3182 	pldptr = &resp[0x2C];
3183 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
3184 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3185 		/* Take care of fw's swapping of payload */
3186 		els->els_req_payload[i] = pldptr[3];
3187 		els->els_req_payload[i+1] = pldptr[2];
3188 		els->els_req_payload[i+2] = pldptr[1];
3189 		els->els_req_payload[i+3] = pldptr[0];
3190 		pldptr += 4;
3191 		if (pldptr == bndrptr)
3192 			pldptr = (uint8_t *)qlt->resp_ptr;
3193 		off += 4;
3194 		if (off >= IOCB_SIZE) {
3195 			off = 4;
3196 			pldptr += 4;
3197 		}
3198 	}
3199 	fct_post_rcvd_cmd(cmd, 0);
3200 }
3201 
3202 fct_status_t
3203 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3204 {
3205 	qlt_state_t	*qlt;
3206 	char		info[160];
3207 
3208 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3209 
3210 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3211 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3212 			EL(qlt, "ioflags = %xh\n", ioflags);
3213 			goto fatal_panic;
3214 		} else {
3215 			return (qlt_send_status(qlt, cmd));
3216 		}
3217 	}
3218 
3219 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3220 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3221 			goto fatal_panic;
3222 		} else {
3223 			return (qlt_send_els_response(qlt, cmd));
3224 		}
3225 	}
3226 
3227 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3228 		cmd->cmd_handle = 0;
3229 	}
3230 
3231 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3232 		return (qlt_send_abts_response(qlt, cmd, 0));
3233 	} else {
3234 		EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3235 		ASSERT(0);
3236 		return (FCT_FAILURE);
3237 	}
3238 
3239 fatal_panic:;
3240 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
3241 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3242 	    ioflags);
3243 	info[159] = 0;
3244 	(void) fct_port_shutdown(qlt->qlt_port,
3245 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3246 	return (FCT_FAILURE);
3247 }
3248 
3249 /* ARGSUSED */
3250 fct_status_t
3251 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3252 {
3253 	qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3254 	qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3255 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3256 	uint8_t *req;
3257 	uint16_t flags;
3258 
3259 	if (dbuf->db_handle == 0)
3260 		qcmd->dbuf = dbuf;
3261 	flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3262 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3263 		flags = (uint16_t)(flags | 2);
3264 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3265 	} else {
3266 		flags = (uint16_t)(flags | 1);
3267 	}
3268 
3269 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3270 		flags = (uint16_t)(flags | BIT_15);
3271 
3272 	mutex_enter(&qlt->req_lock);
3273 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3274 	if (req == NULL) {
3275 		mutex_exit(&qlt->req_lock);
3276 		return (FCT_BUSY);
3277 	}
3278 	bzero(req, IOCB_SIZE);
3279 	req[0] = 0x12; req[1] = 0x1;
3280 	req[2] = dbuf->db_handle;
3281 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3282 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3283 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
3284 	req[12] = 1;
3285 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3286 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3287 	QMEM_WR16(qlt, req+0x1A, flags);
3288 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3289 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3290 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
3291 	QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3292 	QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
3293 	qlt_submit_req_entries(qlt, 1);
3294 	mutex_exit(&qlt->req_lock);
3295 
3296 	return (STMF_SUCCESS);
3297 }
3298 
3299 /*
3300  * We must construct proper FCP_RSP_IU now. Here we only focus on
3301  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3302  * we could have catched them before we enter here.
3303  */
3304 fct_status_t
3305 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3306 {
3307 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
3308 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
3309 	qlt_dmem_bctl_t *bctl;
3310 	uint32_t size;
3311 	uint8_t *req, *fcp_rsp_iu;
3312 	uint8_t *psd, sensbuf[24];		/* sense data */
3313 	uint16_t flags;
3314 	uint16_t scsi_status;
3315 	int use_mode2;
3316 	int ndx;
3317 
3318 	/*
3319 	 * Enter fast channel for non check condition
3320 	 */
3321 	if (task->task_scsi_status != STATUS_CHECK) {
3322 		/*
3323 		 * We will use mode1
3324 		 */
3325 		flags = (uint16_t)(BIT_6 | BIT_15 |
3326 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3327 		scsi_status = (uint16_t)task->task_scsi_status;
3328 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3329 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3330 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3331 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3332 		}
3333 		qcmd->dbuf_rsp_iu = NULL;
3334 
3335 		/*
3336 		 * Fillout CTIO type 7 IOCB
3337 		 */
3338 		mutex_enter(&qlt->req_lock);
3339 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3340 		if (req == NULL) {
3341 			mutex_exit(&qlt->req_lock);
3342 			return (FCT_BUSY);
3343 		}
3344 
3345 		/*
3346 		 * Common fields
3347 		 */
3348 		bzero(req, IOCB_SIZE);
3349 		req[0x00] = 0x12;
3350 		req[0x01] = 0x1;
3351 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
3352 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3353 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3354 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3355 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3356 
3357 		/*
3358 		 * Mode-specific fields
3359 		 */
3360 		QMEM_WR16(qlt, req + 0x1A, flags);
3361 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3362 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3363 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3364 
3365 		/*
3366 		 * Trigger FW to send SCSI status out
3367 		 */
3368 		qlt_submit_req_entries(qlt, 1);
3369 		mutex_exit(&qlt->req_lock);
3370 		return (STMF_SUCCESS);
3371 	}
3372 
3373 	ASSERT(task->task_scsi_status == STATUS_CHECK);
3374 	/*
3375 	 * Decide the SCSI status mode, that should be used
3376 	 */
3377 	use_mode2 = (task->task_sense_length > 24);
3378 
3379 	/*
3380 	 * Prepare required information per the SCSI status mode
3381 	 */
3382 	flags = (uint16_t)(BIT_15 |
3383 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3384 	if (use_mode2) {
3385 		flags = (uint16_t)(flags | BIT_7);
3386 
3387 		size = task->task_sense_length;
3388 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3389 		    task->task_sense_length, &size, 0);
3390 		if (!qcmd->dbuf_rsp_iu) {
3391 			return (FCT_ALLOC_FAILURE);
3392 		}
3393 
3394 		/*
3395 		 * Start to construct FCP_RSP IU
3396 		 */
3397 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3398 		bzero(fcp_rsp_iu, 24);
3399 
3400 		/*
3401 		 * FCP_RSP IU flags, byte10
3402 		 */
3403 		fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
3404 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3405 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
3406 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3407 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
3408 		}
3409 
3410 		/*
3411 		 * SCSI status code, byte11
3412 		 */
3413 		fcp_rsp_iu[11] = task->task_scsi_status;
3414 
3415 		/*
3416 		 * FCP_RESID (Overrun or underrun)
3417 		 */
3418 		fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
3419 		fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
3420 		fcp_rsp_iu[14] = (uint8_t)((task->task_resid >>  8) & 0xFF);
3421 		fcp_rsp_iu[15] = (uint8_t)((task->task_resid >>  0) & 0xFF);
3422 
3423 		/*
3424 		 * FCP_SNS_LEN
3425 		 */
3426 		fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
3427 		    0xFF);
3428 		fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
3429 		    0xFF);
3430 
3431 		/*
3432 		 * FCP_RSP_LEN
3433 		 */
3434 		/*
3435 		 * no FCP_RSP_INFO
3436 		 */
3437 		/*
3438 		 * FCP_SNS_INFO
3439 		 */
3440 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3441 		    task->task_sense_length);
3442 
3443 		/*
3444 		 * Ensure dma data consistency
3445 		 */
3446 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3447 	} else {
3448 		flags = (uint16_t)(flags | BIT_6);
3449 
3450 		scsi_status = (uint16_t)task->task_scsi_status;
3451 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3452 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3453 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3454 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3455 		}
3456 		if (task->task_sense_length) {
3457 			scsi_status = (uint16_t)(scsi_status | BIT_9);
3458 		}
3459 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3460 		qcmd->dbuf_rsp_iu = NULL;
3461 	}
3462 
3463 	/*
3464 	 * Fillout CTIO type 7 IOCB
3465 	 */
3466 	mutex_enter(&qlt->req_lock);
3467 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3468 	if (req == NULL) {
3469 		mutex_exit(&qlt->req_lock);
3470 		if (use_mode2) {
3471 			qlt_dmem_free(cmd->cmd_port->port_fds,
3472 			    qcmd->dbuf_rsp_iu);
3473 			qcmd->dbuf_rsp_iu = NULL;
3474 		}
3475 		return (FCT_BUSY);
3476 	}
3477 
3478 	/*
3479 	 * Common fields
3480 	 */
3481 	bzero(req, IOCB_SIZE);
3482 	req[0x00] = 0x12;
3483 	req[0x01] = 0x1;
3484 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3485 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3486 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3487 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3488 	if (use_mode2) {
3489 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3490 	}
3491 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3492 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3493 
3494 	/*
3495 	 * Mode-specific fields
3496 	 */
3497 	if (!use_mode2) {
3498 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3499 	}
3500 	QMEM_WR16(qlt, req + 0x1A, flags);
3501 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3502 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3503 	if (use_mode2) {
3504 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3505 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3506 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3507 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3508 	} else {
3509 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3510 		psd = req+0x28;
3511 
3512 		/*
3513 		 * Data in sense buf is always big-endian, data in IOCB
3514 		 * should always be little-endian, so we must do swapping.
3515 		 */
3516 		size = ((task->task_sense_length + 3) & (~3));
3517 		for (ndx = 0; ndx < size; ndx += 4) {
3518 			psd[ndx + 0] = sensbuf[ndx + 3];
3519 			psd[ndx + 1] = sensbuf[ndx + 2];
3520 			psd[ndx + 2] = sensbuf[ndx + 1];
3521 			psd[ndx + 3] = sensbuf[ndx + 0];
3522 		}
3523 	}
3524 
3525 	/*
3526 	 * Trigger FW to send SCSI status out
3527 	 */
3528 	qlt_submit_req_entries(qlt, 1);
3529 	mutex_exit(&qlt->req_lock);
3530 
3531 	return (STMF_SUCCESS);
3532 }
3533 
3534 fct_status_t
3535 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3536 {
3537 	qlt_cmd_t	*qcmd;
3538 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3539 	uint8_t *req, *addr;
3540 	qlt_dmem_bctl_t *bctl;
3541 	uint32_t minsize;
3542 	uint8_t elsop, req1f;
3543 
3544 	addr = els->els_resp_payload;
3545 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3546 
3547 	minsize = els->els_resp_size;
3548 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3549 	if (qcmd->dbuf == NULL)
3550 		return (FCT_BUSY);
3551 
3552 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3553 
3554 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3555 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3556 
3557 	if (addr[0] == 0x02) {	/* ACC */
3558 		req1f = BIT_5;
3559 	} else {
3560 		req1f = BIT_6;
3561 	}
3562 	elsop = els->els_req_payload[0];
3563 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3564 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3565 		req1f = (uint8_t)(req1f | BIT_4);
3566 	}
3567 
3568 	mutex_enter(&qlt->req_lock);
3569 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3570 	if (req == NULL) {
3571 		mutex_exit(&qlt->req_lock);
3572 		qlt_dmem_free(NULL, qcmd->dbuf);
3573 		qcmd->dbuf = NULL;
3574 		return (FCT_BUSY);
3575 	}
3576 	bzero(req, IOCB_SIZE);
3577 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3578 	req[0x16] = elsop; req[0x1f] = req1f;
3579 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3580 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3581 	QMEM_WR16(qlt, (&req[0xC]), 1);
3582 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3583 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3584 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3585 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3586 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3587 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3588 	}
3589 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3590 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3591 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3592 	qlt_submit_req_entries(qlt, 1);
3593 	mutex_exit(&qlt->req_lock);
3594 
3595 	return (FCT_SUCCESS);
3596 }
3597 
3598 fct_status_t
3599 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3600 {
3601 	qlt_abts_cmd_t *qcmd;
3602 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3603 	uint8_t *req;
3604 	uint32_t lportid;
3605 	uint32_t fctl;
3606 	int i;
3607 
3608 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3609 
3610 	mutex_enter(&qlt->req_lock);
3611 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3612 	if (req == NULL) {
3613 		mutex_exit(&qlt->req_lock);
3614 		return (FCT_BUSY);
3615 	}
3616 	bcopy(qcmd->buf, req, IOCB_SIZE);
3617 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3618 	fctl = QMEM_RD32(qlt, req+0x1C);
3619 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3620 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3621 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3622 	if (cmd->cmd_rp)
3623 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3624 	else
3625 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3626 	if (terminate) {
3627 		QMEM_WR16(qlt, (&req[0xC]), 1);
3628 	}
3629 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3630 	req[0x17] = abts->abts_resp_rctl;
3631 	QMEM_WR32(qlt, req+0x18, lportid);
3632 	QMEM_WR32(qlt, req+0x1C, fctl);
3633 	req[0x23]++;
3634 	for (i = 0; i < 12; i += 4) {
3635 		/* Take care of firmware's LE requirement */
3636 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3637 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3638 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3639 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3640 	}
3641 	qlt_submit_req_entries(qlt, 1);
3642 	mutex_exit(&qlt->req_lock);
3643 
3644 	return (FCT_SUCCESS);
3645 }
3646 
3647 static void
3648 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3649 {
3650 	int i;
3651 	uint32_t d;
3652 	caddr_t req;
3653 	/* Just put it on the request queue */
3654 	mutex_enter(&qlt->req_lock);
3655 	req = qlt_get_req_entries(qlt, 1);
3656 	if (req == NULL) {
3657 		mutex_exit(&qlt->req_lock);
3658 		/* XXX handle this */
3659 		return;
3660 	}
3661 	for (i = 0; i < 16; i++) {
3662 		d = QMEM_RD32(qlt, inot);
3663 		inot += 4;
3664 		QMEM_WR32(qlt, req, d);
3665 		req += 4;
3666 	}
3667 	req -= 64;
3668 	req[0] = 0x0e;
3669 	qlt_submit_req_entries(qlt, 1);
3670 	mutex_exit(&qlt->req_lock);
3671 }
3672 
3673 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3674 static void
3675 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3676 {
3677 	fct_cmd_t	*cmd;
3678 	scsi_task_t	*task;
3679 	qlt_cmd_t	*qcmd;
3680 	uint32_t	rportid, fw_xchg_addr;
3681 	uint8_t		*p, *q, *req, tm;
3682 	uint16_t	cdb_size, flags, oxid;
3683 	char		info[160];
3684 
3685 	/*
3686 	 * If either bidirection xfer is requested of there is extended
3687 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3688 	 */
3689 	cdb_size = 16;
3690 	if (atio[0x20 + 11] >= 3) {
3691 		uint8_t b = atio[0x20 + 11];
3692 		uint16_t b1;
3693 		if ((b & 3) == 3) {
3694 			EL(qlt, "bidirectional I/O not supported\n");
3695 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3696 			    "received, dropping the cmd as bidirectional "
3697 			    " transfers are not yet supported", qlt->instance);
3698 			/* XXX abort the I/O */
3699 			return;
3700 		}
3701 		cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3702 		/*
3703 		 * Verify that we have enough entries. Without additional CDB
3704 		 * Everything will fit nicely within the same 64 bytes. So the
3705 		 * additional cdb size is essentially the # of additional bytes
3706 		 * we need.
3707 		 */
3708 		b1 = (uint16_t)b;
3709 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3710 			EL(qlt, "extended cdb received\n");
3711 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3712 			    " cdb (cdb size = %d bytes), however the firmware "
3713 			    " did not DMAed the entire FCP_CMD IU, entry count "
3714 			    " is %d while it should be %d", qlt->instance,
3715 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3716 			/* XXX abort the I/O */
3717 			return;
3718 		}
3719 	}
3720 
3721 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3722 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3723 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3724 	oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3725 
3726 	if (fw_xchg_addr == 0xFFFFFFFF) {
3727 		EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3728 		cmd = NULL;
3729 	} else {
3730 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3731 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3732 		if (cmd == NULL) {
3733 			EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3734 		}
3735 	}
3736 	if (cmd == NULL) {
3737 		EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3738 		/* Abort this IO */
3739 		flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3740 
3741 		mutex_enter(&qlt->req_lock);
3742 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3743 		if (req == NULL) {
3744 			mutex_exit(&qlt->req_lock);
3745 
3746 			(void) snprintf(info, 160,
3747 			    "qlt_handle_atio: qlt-%p, can't "
3748 			    "allocate space for scsi_task", (void *)qlt);
3749 			info[159] = 0;
3750 			(void) fct_port_shutdown(qlt->qlt_port,
3751 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3752 			return;
3753 		}
3754 		bzero(req, IOCB_SIZE);
3755 		req[0] = 0x12; req[1] = 0x1;
3756 		QMEM_WR32(qlt, req+4, 0);
3757 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3758 		    rportid));
3759 		QMEM_WR16(qlt, req+10, 60);
3760 		QMEM_WR32(qlt, req+0x10, rportid);
3761 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3762 		QMEM_WR16(qlt, req+0x1A, flags);
3763 		QMEM_WR16(qlt, req+0x20, oxid);
3764 		qlt_submit_req_entries(qlt, 1);
3765 		mutex_exit(&qlt->req_lock);
3766 
3767 		return;
3768 	}
3769 
3770 	task = (scsi_task_t *)cmd->cmd_specific;
3771 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3772 	qcmd->fw_xchg_addr = fw_xchg_addr;
3773 	qcmd->param.atio_byte3 = atio[3];
3774 	cmd->cmd_oxid = oxid;
3775 	cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3776 	    atio[8+19]);
3777 	cmd->cmd_rportid = rportid;
3778 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3779 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3780 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3781 	/* Dont do a 64 byte read as this is IOMMU */
3782 	q = atio+0x28;
3783 	/* XXX Handle fcp_cntl */
3784 	task->task_cmd_seq_no = (uint32_t)(*q++);
3785 	task->task_csn_size = 8;
3786 	task->task_flags = qlt_task_flags[(*q++) & 7];
3787 	tm = *q++;
3788 	if (tm) {
3789 		if (tm & BIT_1)
3790 			task->task_mgmt_function = TM_ABORT_TASK_SET;
3791 		else if (tm & BIT_2)
3792 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
3793 		else if (tm & BIT_4)
3794 			task->task_mgmt_function = TM_LUN_RESET;
3795 		else if (tm & BIT_5)
3796 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
3797 		else if (tm & BIT_6)
3798 			task->task_mgmt_function = TM_CLEAR_ACA;
3799 		else
3800 			task->task_mgmt_function = TM_ABORT_TASK;
3801 	}
3802 	task->task_max_nbufs = STMF_BUFS_MAX;
3803 	task->task_csn_size = 8;
3804 	task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
3805 	p = task->task_cdb;
3806 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3807 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3808 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3809 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3810 	if (cdb_size > 16) {
3811 		uint16_t xtra = (uint16_t)(cdb_size - 16);
3812 		uint16_t i;
3813 		uint8_t cb[4];
3814 
3815 		while (xtra) {
3816 			*p++ = *q++;
3817 			xtra--;
3818 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3819 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3820 				q = (uint8_t *)qlt->queue_mem_ptr +
3821 				    ATIO_QUEUE_OFFSET;
3822 			}
3823 		}
3824 		for (i = 0; i < 4; i++) {
3825 			cb[i] = *q++;
3826 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3827 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3828 				q = (uint8_t *)qlt->queue_mem_ptr +
3829 				    ATIO_QUEUE_OFFSET;
3830 			}
3831 		}
3832 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
3833 		    (((uint32_t)cb[1]) << 16) |
3834 		    (((uint32_t)cb[2]) << 8) | cb[3];
3835 	} else {
3836 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
3837 		    (((uint32_t)q[1]) << 16) |
3838 		    (((uint32_t)q[2]) << 8) | q[3];
3839 	}
3840 	fct_post_rcvd_cmd(cmd, 0);
3841 }
3842 
3843 static void
3844 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
3845 {
3846 	uint16_t status;
3847 	uint32_t portid;
3848 	uint32_t subcode1, subcode2;
3849 
3850 	status = QMEM_RD16(qlt, rsp+8);
3851 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
3852 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
3853 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
3854 
3855 	mutex_enter(&qlt->preq_lock);
3856 	if (portid != qlt->rp_id_in_dereg) {
3857 		int instance = ddi_get_instance(qlt->dip);
3858 
3859 		EL(qlt, "implicit logout reveived portid = %xh\n", portid);
3860 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
3861 		    " received when driver wasn't waiting for it",
3862 		    instance, portid);
3863 		mutex_exit(&qlt->preq_lock);
3864 		return;
3865 	}
3866 
3867 	if (status != 0) {
3868 		EL(qlt, "implicit logout completed for %xh with status %xh, "
3869 		    "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
3870 		    subcode2);
3871 		if (status == 0x31 && subcode1 == 0x0a) {
3872 			qlt->rp_dereg_status = FCT_SUCCESS;
3873 		} else {
3874 			EL(qlt, "implicit logout portid=%xh, status=%xh, "
3875 			    "subcode1=%xh, subcode2=%xh\n", portid, status,
3876 			    subcode1, subcode2);
3877 			qlt->rp_dereg_status =
3878 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
3879 		}
3880 	} else {
3881 		qlt->rp_dereg_status = FCT_SUCCESS;
3882 	}
3883 	cv_signal(&qlt->rp_dereg_cv);
3884 	mutex_exit(&qlt->preq_lock);
3885 }
3886 
3887 /*
3888  * Note that when an ELS is aborted, the regular or aborted completion
3889  * (if any) gets posted before the abort IOCB comes back on response queue.
3890  */
3891 static void
3892 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3893 {
3894 	char		info[160];
3895 	fct_cmd_t	*cmd;
3896 	qlt_cmd_t	*qcmd;
3897 	uint32_t	hndl;
3898 	uint32_t	subcode1, subcode2;
3899 	uint16_t	status;
3900 
3901 	hndl = QMEM_RD32(qlt, rsp+4);
3902 	status = QMEM_RD16(qlt, rsp+8);
3903 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3904 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3905 
3906 	if (!CMD_HANDLE_VALID(hndl)) {
3907 		EL(qlt, "handle = %xh\n", hndl);
3908 		/*
3909 		 * This cannot happen for unsol els completion. This can
3910 		 * only happen when abort for an unsol els completes.
3911 		 * This condition indicates a firmware bug.
3912 		 */
3913 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
3914 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3915 		    hndl, status, subcode1, subcode2, (void *)rsp);
3916 		info[159] = 0;
3917 		(void) fct_port_shutdown(qlt->qlt_port,
3918 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3919 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3920 		return;
3921 	}
3922 
3923 	if (status == 5) {
3924 		/*
3925 		 * When an unsolicited els is aborted, the abort is done
3926 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
3927 		 * and not the abortee. We will do the cleanup when the
3928 		 * IOCB which caused the abort, returns.
3929 		 */
3930 		EL(qlt, "status = %xh\n", status);
3931 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
3932 		return;
3933 	}
3934 
3935 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3936 	if (cmd == NULL) {
3937 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
3938 		/*
3939 		 * Now why would this happen ???
3940 		 */
3941 		(void) snprintf(info, 160,
3942 		    "qlt_handle_unsol_els_completion: can not "
3943 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3944 		    (void *)rsp);
3945 		info[159] = 0;
3946 		(void) fct_port_shutdown(qlt->qlt_port,
3947 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3948 
3949 		return;
3950 	}
3951 
3952 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3953 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3954 	if (qcmd->flags & QLT_CMD_ABORTING) {
3955 		/*
3956 		 * This is the same case as "if (status == 5)" above. The
3957 		 * only difference is that in this case the firmware actually
3958 		 * finished sending the response. So the abort attempt will
3959 		 * come back with status ?. We will handle it there.
3960 		 */
3961 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
3962 		    "abort it");
3963 		return;
3964 	}
3965 
3966 	if (qcmd->dbuf != NULL) {
3967 		qlt_dmem_free(NULL, qcmd->dbuf);
3968 		qcmd->dbuf = NULL;
3969 	}
3970 
3971 	if (status == 0) {
3972 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
3973 	} else {
3974 		fct_send_response_done(cmd,
3975 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
3976 	}
3977 }
3978 
3979 static void
3980 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
3981 {
3982 	char		info[160];
3983 	fct_cmd_t	*cmd;
3984 	qlt_cmd_t	*qcmd;
3985 	uint32_t	hndl;
3986 	uint32_t	subcode1, subcode2;
3987 	uint16_t	status;
3988 
3989 	hndl = QMEM_RD32(qlt, rsp+4);
3990 	status = QMEM_RD16(qlt, rsp+8);
3991 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3992 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3993 
3994 	if (!CMD_HANDLE_VALID(hndl)) {
3995 		EL(qlt, "handle = %xh\n", hndl);
3996 		ASSERT(hndl == 0);
3997 		/*
3998 		 * Someone has requested to abort it, but no one is waiting for
3999 		 * this completion.
4000 		 */
4001 		if ((status != 0) && (status != 8)) {
4002 			EL(qlt, "status = %xh\n", status);
4003 			/*
4004 			 * There could be exchange resource leakage, so
4005 			 * throw HBA fatal error event now
4006 			 */
4007 			(void) snprintf(info, 160,
4008 			    "qlt_handle_unsol_els_abort_completion: "
4009 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4010 			    hndl, status, subcode1, subcode2, (void *)rsp);
4011 			info[159] = 0;
4012 			(void) fct_port_shutdown(qlt->qlt_port,
4013 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4014 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4015 			return;
4016 		}
4017 
4018 		return;
4019 	}
4020 
4021 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4022 	if (cmd == NULL) {
4023 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4024 		/*
4025 		 * Why would this happen ??
4026 		 */
4027 		(void) snprintf(info, 160,
4028 		    "qlt_handle_unsol_els_abort_completion: can not get "
4029 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4030 		    (void *)rsp);
4031 		info[159] = 0;
4032 		(void) fct_port_shutdown(qlt->qlt_port,
4033 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4034 
4035 		return;
4036 	}
4037 
4038 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4039 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4040 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4041 
4042 	if (qcmd->dbuf != NULL) {
4043 		qlt_dmem_free(NULL, qcmd->dbuf);
4044 		qcmd->dbuf = NULL;
4045 	}
4046 
4047 	if (status == 0) {
4048 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4049 	} else if (status == 8) {
4050 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4051 	} else {
4052 		fct_cmd_fca_aborted(cmd,
4053 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4054 	}
4055 }
4056 
4057 static void
4058 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4059 {
4060 	char		info[160];
4061 	fct_cmd_t	*cmd;
4062 	fct_els_t	*els;
4063 	qlt_cmd_t	*qcmd;
4064 	uint32_t	hndl;
4065 	uint32_t	subcode1, subcode2;
4066 	uint16_t	status;
4067 
4068 	hndl = QMEM_RD32(qlt, rsp+4);
4069 	status = QMEM_RD16(qlt, rsp+8);
4070 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4071 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4072 
4073 	if (!CMD_HANDLE_VALID(hndl)) {
4074 		EL(qlt, "handle = %xh\n", hndl);
4075 		/*
4076 		 * This cannot happen for sol els completion.
4077 		 */
4078 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
4079 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4080 		    hndl, status, subcode1, subcode2, (void *)rsp);
4081 		info[159] = 0;
4082 		(void) fct_port_shutdown(qlt->qlt_port,
4083 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4084 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4085 		return;
4086 	}
4087 
4088 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4089 	if (cmd == NULL) {
4090 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4091 		(void) snprintf(info, 160,
4092 		    "qlt_handle_sol_els_completion: can not "
4093 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4094 		    (void *)rsp);
4095 		info[159] = 0;
4096 		(void) fct_port_shutdown(qlt->qlt_port,
4097 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4098 
4099 		return;
4100 	}
4101 
4102 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4103 	els = (fct_els_t *)cmd->cmd_specific;
4104 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4105 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4106 
4107 	if (qcmd->flags & QLT_CMD_ABORTING) {
4108 		/*
4109 		 * We will handle it when the ABORT IO IOCB returns.
4110 		 */
4111 		return;
4112 	}
4113 
4114 	if (qcmd->dbuf != NULL) {
4115 		if (status == 0) {
4116 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4117 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4118 			    qcmd->param.resp_offset,
4119 			    els->els_resp_payload, els->els_resp_size);
4120 		}
4121 		qlt_dmem_free(NULL, qcmd->dbuf);
4122 		qcmd->dbuf = NULL;
4123 	}
4124 
4125 	if (status == 0) {
4126 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4127 	} else {
4128 		fct_send_cmd_done(cmd,
4129 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4130 	}
4131 }
4132 
4133 static void
4134 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4135 {
4136 	fct_cmd_t	*cmd;
4137 	fct_sol_ct_t	*ct;
4138 	qlt_cmd_t	*qcmd;
4139 	uint32_t	 hndl;
4140 	uint16_t	 status;
4141 	char		 info[160];
4142 
4143 	hndl = QMEM_RD32(qlt, rsp+4);
4144 	status = QMEM_RD16(qlt, rsp+8);
4145 
4146 	if (!CMD_HANDLE_VALID(hndl)) {
4147 		EL(qlt, "handle = %xh\n", hndl);
4148 		/*
4149 		 * Solicited commands will always have a valid handle.
4150 		 */
4151 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
4152 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4153 		info[159] = 0;
4154 		(void) fct_port_shutdown(qlt->qlt_port,
4155 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4156 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4157 		return;
4158 	}
4159 
4160 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4161 	if (cmd == NULL) {
4162 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4163 		(void) snprintf(info, 160,
4164 		    "qlt_handle_ct_completion: cannot find "
4165 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4166 		    (void *)rsp);
4167 		info[159] = 0;
4168 		(void) fct_port_shutdown(qlt->qlt_port,
4169 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4170 
4171 		return;
4172 	}
4173 
4174 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4175 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4176 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4177 
4178 	if (qcmd->flags & QLT_CMD_ABORTING) {
4179 		/*
4180 		 * We will handle it when ABORT IO IOCB returns;
4181 		 */
4182 		return;
4183 	}
4184 
4185 	ASSERT(qcmd->dbuf);
4186 	if (status == 0) {
4187 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4188 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4189 		    qcmd->param.resp_offset,
4190 		    ct->ct_resp_payload, ct->ct_resp_size);
4191 	}
4192 	qlt_dmem_free(NULL, qcmd->dbuf);
4193 	qcmd->dbuf = NULL;
4194 
4195 	if (status == 0) {
4196 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4197 	} else {
4198 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4199 	}
4200 }
4201 
4202 static void
4203 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
4204 {
4205 	fct_cmd_t	*cmd;
4206 	scsi_task_t	*task;
4207 	qlt_cmd_t	*qcmd;
4208 	stmf_data_buf_t	*dbuf;
4209 	fct_status_t	fc_st;
4210 	uint32_t	iof = 0;
4211 	uint32_t	hndl;
4212 	uint16_t	status;
4213 	uint16_t	flags;
4214 	uint8_t		abort_req;
4215 	uint8_t		n;
4216 	char		info[160];
4217 
4218 	/* XXX: Check validity of the IOCB by checking 4th byte. */
4219 	hndl = QMEM_RD32(qlt, rsp+4);
4220 	status = QMEM_RD16(qlt, rsp+8);
4221 	flags = QMEM_RD16(qlt, rsp+0x1a);
4222 	n = rsp[2];
4223 
4224 	if (!CMD_HANDLE_VALID(hndl)) {
4225 		EL(qlt, "handle = %xh\n", hndl);
4226 		ASSERT(hndl == 0);
4227 		/*
4228 		 * Someone has requested to abort it, but no one is waiting for
4229 		 * this completion.
4230 		 */
4231 		EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4232 		    (void *)rsp);
4233 		if ((status != 1) && (status != 2)) {
4234 			EL(qlt, "status = %xh\n", status);
4235 			/*
4236 			 * There could be exchange resource leakage, so
4237 			 * throw HBA fatal error event now
4238 			 */
4239 			(void) snprintf(info, 160,
4240 			    "qlt_handle_ctio_completion: hndl-"
4241 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4242 			info[159] = 0;
4243 			(void) fct_port_shutdown(qlt->qlt_port,
4244 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4245 
4246 		}
4247 
4248 		return;
4249 	}
4250 
4251 	if (flags & BIT_14) {
4252 		abort_req = 1;
4253 		EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4254 		    (void *)rsp);
4255 	} else {
4256 		abort_req = 0;
4257 	}
4258 
4259 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4260 	if (cmd == NULL) {
4261 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4262 		(void) snprintf(info, 160,
4263 		    "qlt_handle_ctio_completion: cannot find "
4264 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4265 		    (void *)rsp);
4266 		info[159] = 0;
4267 		(void) fct_port_shutdown(qlt->qlt_port,
4268 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4269 
4270 		return;
4271 	}
4272 
4273 	task = (scsi_task_t *)cmd->cmd_specific;
4274 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4275 	if (qcmd->dbuf_rsp_iu) {
4276 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4277 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4278 		qcmd->dbuf_rsp_iu = NULL;
4279 	}
4280 
4281 	if ((status == 1) || (status == 2)) {
4282 		if (abort_req) {
4283 			fc_st = FCT_ABORT_SUCCESS;
4284 			iof = FCT_IOF_FCA_DONE;
4285 		} else {
4286 			fc_st = FCT_SUCCESS;
4287 			if (flags & BIT_15) {
4288 				iof = FCT_IOF_FCA_DONE;
4289 			}
4290 		}
4291 	} else {
4292 		EL(qlt, "status = %xh\n", status);
4293 		if ((status == 8) && abort_req) {
4294 			fc_st = FCT_NOT_FOUND;
4295 			iof = FCT_IOF_FCA_DONE;
4296 		} else {
4297 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
4298 		}
4299 	}
4300 	dbuf = NULL;
4301 	if (((n & BIT_7) == 0) && (!abort_req)) {
4302 		/* A completion of data xfer */
4303 		if (n == 0) {
4304 			dbuf = qcmd->dbuf;
4305 		} else {
4306 			dbuf = stmf_handle_to_buf(task, n);
4307 		}
4308 
4309 		ASSERT(dbuf != NULL);
4310 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4311 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4312 		if (flags & BIT_15) {
4313 			dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4314 			    DB_STATUS_GOOD_SENT);
4315 		}
4316 
4317 		dbuf->db_xfer_status = fc_st;
4318 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
4319 		return;
4320 	}
4321 	if (!abort_req) {
4322 		/*
4323 		 * This was just a pure status xfer.
4324 		 */
4325 		fct_send_response_done(cmd, fc_st, iof);
4326 		return;
4327 	}
4328 
4329 	fct_cmd_fca_aborted(cmd, fc_st, iof);
4330 }
4331 
4332 static void
4333 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4334 {
4335 	char		info[80];
4336 	fct_cmd_t	*cmd;
4337 	qlt_cmd_t	*qcmd;
4338 	uint32_t	h;
4339 	uint16_t	status;
4340 
4341 	h = QMEM_RD32(qlt, rsp+4);
4342 	status = QMEM_RD16(qlt, rsp+8);
4343 
4344 	if (!CMD_HANDLE_VALID(h)) {
4345 		EL(qlt, "handle = %xh\n", h);
4346 		/*
4347 		 * Solicited commands always have a valid handle.
4348 		 */
4349 		(void) snprintf(info, 80,
4350 		    "qlt_handle_sol_abort_completion: hndl-"
4351 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4352 		info[79] = 0;
4353 		(void) fct_port_shutdown(qlt->qlt_port,
4354 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4355 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4356 		return;
4357 	}
4358 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4359 	if (cmd == NULL) {
4360 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4361 		/*
4362 		 * What happened to the cmd ??
4363 		 */
4364 		(void) snprintf(info, 80,
4365 		    "qlt_handle_sol_abort_completion: cannot "
4366 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4367 		    (void *)rsp);
4368 		info[79] = 0;
4369 		(void) fct_port_shutdown(qlt->qlt_port,
4370 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4371 
4372 		return;
4373 	}
4374 
4375 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4376 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
4377 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4378 	if (qcmd->dbuf != NULL) {
4379 		qlt_dmem_free(NULL, qcmd->dbuf);
4380 		qcmd->dbuf = NULL;
4381 	}
4382 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4383 	if (status == 0) {
4384 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4385 	} else if (status == 0x31) {
4386 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4387 	} else {
4388 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4389 	}
4390 }
4391 
4392 static void
4393 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4394 {
4395 	qlt_abts_cmd_t	*qcmd;
4396 	fct_cmd_t	*cmd;
4397 	uint32_t	remote_portid;
4398 	char		info[160];
4399 
4400 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4401 	    ((uint32_t)(resp[0x1A])) << 16;
4402 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4403 	    sizeof (qlt_abts_cmd_t), 0);
4404 	if (cmd == NULL) {
4405 		EL(qlt, "fct_alloc cmd==NULL\n");
4406 		(void) snprintf(info, 160,
4407 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
4408 		    "allocate space for fct_cmd", (void *)qlt);
4409 		info[159] = 0;
4410 		(void) fct_port_shutdown(qlt->qlt_port,
4411 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4412 		return;
4413 	}
4414 
4415 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
4416 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4417 	bcopy(resp, qcmd->buf, IOCB_SIZE);
4418 	cmd->cmd_port = qlt->qlt_port;
4419 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4420 	if (cmd->cmd_rp_handle == 0xFFFF)
4421 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4422 
4423 	cmd->cmd_rportid = remote_portid;
4424 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4425 	    ((uint32_t)(resp[0x16])) << 16;
4426 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4427 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4428 	fct_post_rcvd_cmd(cmd, 0);
4429 }
4430 
4431 static void
4432 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4433 {
4434 	uint16_t status;
4435 	char	info[80];
4436 
4437 	status = QMEM_RD16(qlt, resp+8);
4438 
4439 	if ((status == 0) || (status == 5)) {
4440 		return;
4441 	}
4442 	EL(qlt, "status = %xh\n", status);
4443 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4444 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4445 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4446 	info[79] = 0;
4447 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4448 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4449 }
4450 
4451 #ifdef	DEBUG
4452 uint32_t qlt_drop_abort_counter = 0;
4453 #endif
4454 
4455 fct_status_t
4456 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4457 {
4458 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4459 
4460 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4461 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4462 		return (FCT_NOT_FOUND);
4463 	}
4464 
4465 #ifdef DEBUG
4466 	if (qlt_drop_abort_counter > 0) {
4467 		if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
4468 			return (FCT_SUCCESS);
4469 	}
4470 #endif
4471 
4472 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4473 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4474 	}
4475 
4476 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4477 		cmd->cmd_handle = 0;
4478 	}
4479 
4480 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4481 		return (qlt_send_abts_response(qlt, cmd, 1));
4482 	}
4483 
4484 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4485 		return (qlt_abort_purex(qlt, cmd));
4486 	}
4487 
4488 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4489 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4490 		return (qlt_abort_sol_cmd(qlt, cmd));
4491 	}
4492 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4493 
4494 	ASSERT(0);
4495 	return (FCT_FAILURE);
4496 }
4497 
4498 fct_status_t
4499 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4500 {
4501 	uint8_t *req;
4502 	qlt_cmd_t *qcmd;
4503 
4504 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4505 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4506 	EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
4507 
4508 	mutex_enter(&qlt->req_lock);
4509 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4510 	if (req == NULL) {
4511 		mutex_exit(&qlt->req_lock);
4512 
4513 		return (FCT_BUSY);
4514 	}
4515 	bzero(req, IOCB_SIZE);
4516 	req[0] = 0x33; req[1] = 1;
4517 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4518 	if (cmd->cmd_rp) {
4519 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4520 	} else {
4521 		QMEM_WR16(qlt, req+8, 0xFFFF);
4522 	}
4523 
4524 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4525 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4526 	qlt_submit_req_entries(qlt, 1);
4527 	mutex_exit(&qlt->req_lock);
4528 
4529 	return (FCT_SUCCESS);
4530 }
4531 
4532 fct_status_t
4533 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4534 {
4535 	uint8_t *req;
4536 	qlt_cmd_t *qcmd;
4537 	fct_els_t *els;
4538 	uint8_t elsop, req1f;
4539 
4540 	els = (fct_els_t *)cmd->cmd_specific;
4541 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4542 	elsop = els->els_req_payload[0];
4543 	EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4544 	    elsop);
4545 	req1f = 0x60;	/* Terminate xchg */
4546 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4547 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4548 		req1f = (uint8_t)(req1f | BIT_4);
4549 	}
4550 
4551 	mutex_enter(&qlt->req_lock);
4552 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4553 	if (req == NULL) {
4554 		mutex_exit(&qlt->req_lock);
4555 
4556 		return (FCT_BUSY);
4557 	}
4558 
4559 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4560 	bzero(req, IOCB_SIZE);
4561 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4562 	req[0x16] = elsop; req[0x1f] = req1f;
4563 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4564 	if (cmd->cmd_rp) {
4565 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4566 		EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4567 	} else {
4568 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4569 		EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4570 	}
4571 
4572 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4573 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4574 	qlt_submit_req_entries(qlt, 1);
4575 	mutex_exit(&qlt->req_lock);
4576 
4577 	return (FCT_SUCCESS);
4578 }
4579 
4580 fct_status_t
4581 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4582 {
4583 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4584 	uint8_t *req;
4585 	uint16_t flags;
4586 
4587 	flags = (uint16_t)(BIT_14 |
4588 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4589 	EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4590 
4591 	mutex_enter(&qlt->req_lock);
4592 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4593 	if (req == NULL) {
4594 		mutex_exit(&qlt->req_lock);
4595 
4596 		return (FCT_BUSY);
4597 	}
4598 
4599 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4600 	bzero(req, IOCB_SIZE);
4601 	req[0] = 0x12; req[1] = 0x1;
4602 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4603 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4604 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4605 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4606 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4607 	QMEM_WR16(qlt, req+0x1A, flags);
4608 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4609 	qlt_submit_req_entries(qlt, 1);
4610 	mutex_exit(&qlt->req_lock);
4611 
4612 	return (FCT_SUCCESS);
4613 }
4614 
4615 fct_status_t
4616 qlt_send_cmd(fct_cmd_t *cmd)
4617 {
4618 	qlt_state_t *qlt;
4619 
4620 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4621 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4622 		return (qlt_send_els(qlt, cmd));
4623 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4624 		return (qlt_send_ct(qlt, cmd));
4625 	}
4626 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4627 
4628 	ASSERT(0);
4629 	return (FCT_FAILURE);
4630 }
4631 
4632 fct_status_t
4633 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4634 {
4635 	uint8_t *req;
4636 	fct_els_t *els;
4637 	qlt_cmd_t *qcmd;
4638 	stmf_data_buf_t *buf;
4639 	qlt_dmem_bctl_t *bctl;
4640 	uint32_t sz, minsz;
4641 
4642 	els = (fct_els_t *)cmd->cmd_specific;
4643 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4644 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4645 	qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4646 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4647 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4648 	if (buf == NULL) {
4649 		return (FCT_BUSY);
4650 	}
4651 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4652 
4653 	qcmd->dbuf = buf;
4654 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4655 	    els->els_req_size);
4656 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4657 
4658 	mutex_enter(&qlt->req_lock);
4659 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4660 	if (req == NULL) {
4661 		qlt_dmem_free(NULL, buf);
4662 		mutex_exit(&qlt->req_lock);
4663 		return (FCT_BUSY);
4664 	}
4665 	bzero(req, IOCB_SIZE);
4666 	req[0] = 0x53; req[1] = 1;
4667 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4668 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4669 	QMEM_WR16(qlt, (&req[0xC]), 1);
4670 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4671 	QMEM_WR16(qlt, (&req[0x14]), 1);
4672 	req[0x16] = els->els_req_payload[0];
4673 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4674 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4675 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
4676 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4677 	}
4678 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4679 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4680 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4681 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4682 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4683 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4684 	    qcmd->param.resp_offset));
4685 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4686 	qlt_submit_req_entries(qlt, 1);
4687 	mutex_exit(&qlt->req_lock);
4688 
4689 	return (FCT_SUCCESS);
4690 }
4691 
4692 fct_status_t
4693 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4694 {
4695 	uint8_t *req;
4696 	fct_sol_ct_t *ct;
4697 	qlt_cmd_t *qcmd;
4698 	stmf_data_buf_t *buf;
4699 	qlt_dmem_bctl_t *bctl;
4700 	uint32_t sz, minsz;
4701 
4702 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4703 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4704 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4705 	qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4706 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4707 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4708 	if (buf == NULL) {
4709 		return (FCT_BUSY);
4710 	}
4711 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4712 
4713 	qcmd->dbuf = buf;
4714 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4715 	    ct->ct_req_size);
4716 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4717 
4718 	mutex_enter(&qlt->req_lock);
4719 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4720 	if (req == NULL) {
4721 		qlt_dmem_free(NULL, buf);
4722 		mutex_exit(&qlt->req_lock);
4723 		return (FCT_BUSY);
4724 	}
4725 	bzero(req, IOCB_SIZE);
4726 	req[0] = 0x29; req[1] = 1;
4727 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4728 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4729 	QMEM_WR16(qlt, (&req[0xC]), 1);
4730 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4731 	QMEM_WR16(qlt, (&req[0x14]), 1);
4732 
4733 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4734 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4735 
4736 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4737 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4738 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4739 	    qcmd->param.resp_offset));		/* RESPONSE DSD */
4740 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4741 
4742 	qlt_submit_req_entries(qlt, 1);
4743 	mutex_exit(&qlt->req_lock);
4744 
4745 	return (FCT_SUCCESS);
4746 }
4747 
4748 
4749 /*
4750  * All QLT_FIRMWARE_* will mainly be handled in this function
4751  * It can not be called in interrupt context
4752  *
4753  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4754  * and qlt_ioctl_lock
4755  */
4756 static fct_status_t
4757 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4758 {
4759 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4760 	int		i;
4761 	int		retries, n;
4762 	uint_t		size_left;
4763 	char		c = ' ';
4764 	uint32_t	addr, endaddr, words_to_read;
4765 	caddr_t		buf;
4766 	fct_status_t	ret;
4767 
4768 	mutex_enter(&qlt->qlt_ioctl_lock);
4769 	/*
4770 	 * To make sure that there's no outstanding dumping task
4771 	 */
4772 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4773 		mutex_exit(&qlt->qlt_ioctl_lock);
4774 		EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4775 		    qlt->qlt_ioctl_flags);
4776 		EL(qlt, "outstanding\n");
4777 		return (FCT_FAILURE);
4778 	}
4779 
4780 	/*
4781 	 * To make sure not to overwrite existing dump
4782 	 */
4783 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4784 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4785 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4786 		/*
4787 		 * If we have alreay one dump, but it's not triggered by user
4788 		 * and the user hasn't fetched it, we shouldn't dump again.
4789 		 */
4790 		mutex_exit(&qlt->qlt_ioctl_lock);
4791 		EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
4792 		    qlt->qlt_ioctl_flags);
4793 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
4794 		    "is one already outstanding.", qlt->instance);
4795 		return (FCT_FAILURE);
4796 	}
4797 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
4798 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
4799 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
4800 	} else {
4801 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
4802 	}
4803 	mutex_exit(&qlt->qlt_ioctl_lock);
4804 
4805 	size_left = QLT_FWDUMP_BUFSIZE;
4806 	if (!qlt->qlt_fwdump_buf) {
4807 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
4808 		/*
4809 		 * It's the only place that we allocate buf for dumping. After
4810 		 * it's allocated, we will use it until the port is detached.
4811 		 */
4812 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
4813 	}
4814 
4815 	/*
4816 	 * Start to dump firmware
4817 	 */
4818 	buf = (caddr_t)qlt->qlt_fwdump_buf;
4819 
4820 	/*
4821 	 * Print the ISP firmware revision number and attributes information
4822 	 * Read the RISC to Host Status register
4823 	 */
4824 	n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
4825 	    "Attributes %04x\n\nR2H Status Register\n%08x",
4826 	    qlt->fw_major, qlt->fw_minor,
4827 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
4828 	buf += n; size_left -= n;
4829 
4830 	/*
4831 	 * Before pausing the RISC, make sure no mailbox can execute
4832 	 */
4833 	mutex_enter(&qlt->mbox_lock);
4834 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
4835 		/*
4836 		 * Wait to grab the mailboxes
4837 		 */
4838 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
4839 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
4840 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
4841 			    ddi_get_lbolt() + drv_usectohz(1000000));
4842 			if (retries > 5) {
4843 				mutex_exit(&qlt->mbox_lock);
4844 				EL(qlt, "can't drain out mailbox commands\n");
4845 				goto dump_fail;
4846 			}
4847 		}
4848 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
4849 		cv_broadcast(&qlt->mbox_cv);
4850 	}
4851 	mutex_exit(&qlt->mbox_lock);
4852 
4853 	/*
4854 	 * Pause the RISC processor
4855 	 */
4856 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
4857 
4858 	/*
4859 	 * Wait for the RISC processor to pause
4860 	 */
4861 	for (i = 0; i < 200; i++) {
4862 		if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
4863 			break;
4864 		}
4865 		drv_usecwait(1000);
4866 	}
4867 	if (i == 200) {
4868 		EL(qlt, "can't pause\n");
4869 		return (FCT_FAILURE);
4870 	}
4871 
4872 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
4873 		goto over_25xx_specific_dump;
4874 	}
4875 	n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
4876 	buf += n; size_left -= n;
4877 	REG_WR32(qlt, 0x54, 0x7000);
4878 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4879 	buf += n; size_left -= n;
4880 	REG_WR32(qlt, 0x54, 0x7010);
4881 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4882 	buf += n; size_left -= n;
4883 	REG_WR32(qlt, 0x54, 0x7C00);
4884 
4885 	n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
4886 	buf += n; size_left -= n;
4887 	REG_WR32(qlt, 0xC0, 0x1);
4888 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
4889 	buf += n; size_left -= n;
4890 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
4891 	buf += n; size_left -= n;
4892 	REG_WR32(qlt, 0xC0, 0x0);
4893 
4894 over_25xx_specific_dump:;
4895 	n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
4896 	buf += n; size_left -= n;
4897 	/*
4898 	 * Capture data from 32 regsiters
4899 	 */
4900 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
4901 	buf += n; size_left -= n;
4902 
4903 	/*
4904 	 * Disable interrupts
4905 	 */
4906 	REG_WR32(qlt, 0xc, 0);
4907 
4908 	/*
4909 	 * Shadow registers
4910 	 */
4911 	n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
4912 	buf += n; size_left -= n;
4913 
4914 	REG_WR32(qlt, 0x54, 0xF70);
4915 	addr = 0xb0000000;
4916 	for (i = 0; i < 0xb; i++) {
4917 		if ((!qlt->qlt_25xx_chip) &&
4918 		    (!qlt->qlt_81xx_chip) &&
4919 		    (i >= 7)) {
4920 			break;
4921 		}
4922 		if (i && ((i & 7) == 0)) {
4923 			n = (int)snprintf(buf, size_left, "\n");
4924 			buf += n; size_left -= n;
4925 		}
4926 		REG_WR32(qlt, 0xF0, addr);
4927 		n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
4928 		buf += n; size_left -= n;
4929 		addr += 0x100000;
4930 	}
4931 
4932 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
4933 		REG_WR32(qlt, 0x54, 0x10);
4934 		n = (int)snprintf(buf, size_left,
4935 		    "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
4936 		buf += n; size_left -= n;
4937 	}
4938 
4939 	/*
4940 	 * Mailbox registers
4941 	 */
4942 	n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
4943 	buf += n; size_left -= n;
4944 	for (i = 0; i < 32; i += 2) {
4945 		if ((i + 2) & 15) {
4946 			c = ' ';
4947 		} else {
4948 			c = '\n';
4949 		}
4950 		n = (int)snprintf(buf, size_left, "%04x %04x%c",
4951 		    REG_RD16(qlt, 0x80 + (i << 1)),
4952 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
4953 		buf += n; size_left -= n;
4954 	}
4955 
4956 	/*
4957 	 * Transfer sequence registers
4958 	 */
4959 	n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
4960 	buf += n; size_left -= n;
4961 
4962 	REG_WR32(qlt, 0x54, 0xBF00);
4963 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4964 	buf += n; size_left -= n;
4965 	REG_WR32(qlt, 0x54, 0xBF10);
4966 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4967 	buf += n; size_left -= n;
4968 	REG_WR32(qlt, 0x54, 0xBF20);
4969 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4970 	buf += n; size_left -= n;
4971 	REG_WR32(qlt, 0x54, 0xBF30);
4972 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4973 	buf += n; size_left -= n;
4974 	REG_WR32(qlt, 0x54, 0xBF40);
4975 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4976 	buf += n; size_left -= n;
4977 	REG_WR32(qlt, 0x54, 0xBF50);
4978 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4979 	buf += n; size_left -= n;
4980 	REG_WR32(qlt, 0x54, 0xBF60);
4981 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4982 	buf += n; size_left -= n;
4983 	REG_WR32(qlt, 0x54, 0xBF70);
4984 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4985 	buf += n; size_left -= n;
4986 	n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
4987 	buf += n; size_left -= n;
4988 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
4989 		REG_WR32(qlt, 0x54, 0xBFC0);
4990 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4991 		buf += n; size_left -= n;
4992 		REG_WR32(qlt, 0x54, 0xBFD0);
4993 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4994 		buf += n; size_left -= n;
4995 	}
4996 	REG_WR32(qlt, 0x54, 0xBFE0);
4997 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4998 	buf += n; size_left -= n;
4999 	n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5000 	buf += n; size_left -= n;
5001 	REG_WR32(qlt, 0x54, 0xBFF0);
5002 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5003 	buf += n; size_left -= n;
5004 
5005 	/*
5006 	 * Receive sequence registers
5007 	 */
5008 	n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
5009 	buf += n; size_left -= n;
5010 	REG_WR32(qlt, 0x54, 0xFF00);
5011 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5012 	buf += n; size_left -= n;
5013 	REG_WR32(qlt, 0x54, 0xFF10);
5014 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5015 	buf += n; size_left -= n;
5016 	REG_WR32(qlt, 0x54, 0xFF20);
5017 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5018 	buf += n; size_left -= n;
5019 	REG_WR32(qlt, 0x54, 0xFF30);
5020 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5021 	buf += n; size_left -= n;
5022 	REG_WR32(qlt, 0x54, 0xFF40);
5023 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5024 	buf += n; size_left -= n;
5025 	REG_WR32(qlt, 0x54, 0xFF50);
5026 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5027 	buf += n; size_left -= n;
5028 	REG_WR32(qlt, 0x54, 0xFF60);
5029 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5030 	buf += n; size_left -= n;
5031 	REG_WR32(qlt, 0x54, 0xFF70);
5032 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5033 	buf += n; size_left -= n;
5034 	n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5035 	buf += n; size_left -= n;
5036 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5037 		REG_WR32(qlt, 0x54, 0xFFC0);
5038 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5039 		buf += n; size_left -= n;
5040 	}
5041 	REG_WR32(qlt, 0x54, 0xFFD0);
5042 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5043 	buf += n; size_left -= n;
5044 	n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5045 	buf += n; size_left -= n;
5046 	REG_WR32(qlt, 0x54, 0xFFE0);
5047 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5048 	buf += n; size_left -= n;
5049 	n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5050 	buf += n; size_left -= n;
5051 	REG_WR32(qlt, 0x54, 0xFFF0);
5052 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5053 	buf += n; size_left -= n;
5054 
5055 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
5056 		goto over_aseq_regs;
5057 
5058 	/*
5059 	 * Auxiliary sequencer registers
5060 	 */
5061 	n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
5062 	buf += n; size_left -= n;
5063 	REG_WR32(qlt, 0x54, 0xB000);
5064 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5065 	buf += n; size_left -= n;
5066 	REG_WR32(qlt, 0x54, 0xB010);
5067 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5068 	buf += n; size_left -= n;
5069 	REG_WR32(qlt, 0x54, 0xB020);
5070 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5071 	buf += n; size_left -= n;
5072 	REG_WR32(qlt, 0x54, 0xB030);
5073 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5074 	buf += n; size_left -= n;
5075 	REG_WR32(qlt, 0x54, 0xB040);
5076 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5077 	buf += n; size_left -= n;
5078 	REG_WR32(qlt, 0x54, 0xB050);
5079 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5080 	buf += n; size_left -= n;
5081 	REG_WR32(qlt, 0x54, 0xB060);
5082 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5083 	buf += n; size_left -= n;
5084 	REG_WR32(qlt, 0x54, 0xB070);
5085 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5086 	buf += n; size_left -= n;
5087 	n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5088 	buf += n; size_left -= n;
5089 	REG_WR32(qlt, 0x54, 0xB0C0);
5090 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5091 	buf += n; size_left -= n;
5092 	REG_WR32(qlt, 0x54, 0xB0D0);
5093 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5094 	buf += n; size_left -= n;
5095 	n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5096 	buf += n; size_left -= n;
5097 	REG_WR32(qlt, 0x54, 0xB0E0);
5098 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5099 	buf += n; size_left -= n;
5100 	n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5101 	buf += n; size_left -= n;
5102 	REG_WR32(qlt, 0x54, 0xB0F0);
5103 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5104 	buf += n; size_left -= n;
5105 
5106 over_aseq_regs:;
5107 
5108 	/*
5109 	 * Command DMA registers
5110 	 */
5111 	n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5112 	buf += n; size_left -= n;
5113 	REG_WR32(qlt, 0x54, 0x7100);
5114 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5115 	buf += n; size_left -= n;
5116 
5117 	/*
5118 	 * Queues
5119 	 */
5120 	n = (int)snprintf(buf, size_left,
5121 	    "\nRequest0 Queue DMA Channel registers\n");
5122 	buf += n; size_left -= n;
5123 	REG_WR32(qlt, 0x54, 0x7200);
5124 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5125 	buf += n; size_left -= n;
5126 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5127 	buf += n; size_left -= n;
5128 
5129 	n = (int)snprintf(buf, size_left,
5130 	    "\n\nResponse0 Queue DMA Channel registers\n");
5131 	buf += n; size_left -= n;
5132 	REG_WR32(qlt, 0x54, 0x7300);
5133 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5134 	buf += n; size_left -= n;
5135 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5136 	buf += n; size_left -= n;
5137 
5138 	n = (int)snprintf(buf, size_left,
5139 	    "\n\nRequest1 Queue DMA Channel registers\n");
5140 	buf += n; size_left -= n;
5141 	REG_WR32(qlt, 0x54, 0x7400);
5142 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5143 	buf += n; size_left -= n;
5144 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5145 	buf += n; size_left -= n;
5146 
5147 	/*
5148 	 * Transmit DMA registers
5149 	 */
5150 	n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
5151 	buf += n; size_left -= n;
5152 	REG_WR32(qlt, 0x54, 0x7600);
5153 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5154 	buf += n; size_left -= n;
5155 	REG_WR32(qlt, 0x54, 0x7610);
5156 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5157 	buf += n; size_left -= n;
5158 	n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
5159 	buf += n; size_left -= n;
5160 	REG_WR32(qlt, 0x54, 0x7620);
5161 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5162 	buf += n; size_left -= n;
5163 	REG_WR32(qlt, 0x54, 0x7630);
5164 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5165 	buf += n; size_left -= n;
5166 	n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
5167 	buf += n; size_left -= n;
5168 	REG_WR32(qlt, 0x54, 0x7640);
5169 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5170 	buf += n; size_left -= n;
5171 	REG_WR32(qlt, 0x54, 0x7650);
5172 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5173 	buf += n; size_left -= n;
5174 	n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
5175 	buf += n; size_left -= n;
5176 	REG_WR32(qlt, 0x54, 0x7660);
5177 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5178 	buf += n; size_left -= n;
5179 	REG_WR32(qlt, 0x54, 0x7670);
5180 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5181 	buf += n; size_left -= n;
5182 	n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
5183 	buf += n; size_left -= n;
5184 	REG_WR32(qlt, 0x54, 0x7680);
5185 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5186 	buf += n; size_left -= n;
5187 	REG_WR32(qlt, 0x54, 0x7690);
5188 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5189 	buf += n; size_left -= n;
5190 	n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
5191 	buf += n; size_left -= n;
5192 	REG_WR32(qlt, 0x54, 0x76A0);
5193 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5194 	buf += n; size_left -= n;
5195 
5196 	/*
5197 	 * Receive DMA registers
5198 	 */
5199 	n = (int)snprintf(buf, size_left,
5200 	    "\nRCV Thread 0 Data DMA registers\n");
5201 	buf += n; size_left -= n;
5202 	REG_WR32(qlt, 0x54, 0x7700);
5203 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5204 	buf += n; size_left -= n;
5205 	REG_WR32(qlt, 0x54, 0x7710);
5206 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5207 	buf += n; size_left -= n;
5208 	n = (int)snprintf(buf, size_left,
5209 	    "\nRCV Thread 1 Data DMA registers\n");
5210 	buf += n; size_left -= n;
5211 	REG_WR32(qlt, 0x54, 0x7720);
5212 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5213 	buf += n; size_left -= n;
5214 	REG_WR32(qlt, 0x54, 0x7730);
5215 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5216 	buf += n; size_left -= n;
5217 
5218 	/*
5219 	 * RISC registers
5220 	 */
5221 	n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
5222 	buf += n; size_left -= n;
5223 	REG_WR32(qlt, 0x54, 0x0F00);
5224 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5225 	buf += n; size_left -= n;
5226 	REG_WR32(qlt, 0x54, 0x0F10);
5227 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5228 	buf += n; size_left -= n;
5229 	REG_WR32(qlt, 0x54, 0x0F20);
5230 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5231 	buf += n; size_left -= n;
5232 	REG_WR32(qlt, 0x54, 0x0F30);
5233 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5234 	buf += n; size_left -= n;
5235 	REG_WR32(qlt, 0x54, 0x0F40);
5236 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5237 	buf += n; size_left -= n;
5238 	REG_WR32(qlt, 0x54, 0x0F50);
5239 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5240 	buf += n; size_left -= n;
5241 	REG_WR32(qlt, 0x54, 0x0F60);
5242 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5243 	buf += n; size_left -= n;
5244 	REG_WR32(qlt, 0x54, 0x0F70);
5245 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5246 	buf += n; size_left -= n;
5247 
5248 	/*
5249 	 * Local memory controller registers
5250 	 */
5251 	n = (int)snprintf(buf, size_left, "\nLMC registers\n");
5252 	buf += n; size_left -= n;
5253 	REG_WR32(qlt, 0x54, 0x3000);
5254 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5255 	buf += n; size_left -= n;
5256 	REG_WR32(qlt, 0x54, 0x3010);
5257 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5258 	buf += n; size_left -= n;
5259 	REG_WR32(qlt, 0x54, 0x3020);
5260 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5261 	buf += n; size_left -= n;
5262 	REG_WR32(qlt, 0x54, 0x3030);
5263 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5264 	buf += n; size_left -= n;
5265 	REG_WR32(qlt, 0x54, 0x3040);
5266 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5267 	buf += n; size_left -= n;
5268 	REG_WR32(qlt, 0x54, 0x3050);
5269 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5270 	buf += n; size_left -= n;
5271 	REG_WR32(qlt, 0x54, 0x3060);
5272 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5273 	buf += n; size_left -= n;
5274 
5275 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5276 		REG_WR32(qlt, 0x54, 0x3070);
5277 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5278 		buf += n; size_left -= n;
5279 	}
5280 
5281 	/*
5282 	 * Fibre protocol module regsiters
5283 	 */
5284 	n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5285 	buf += n; size_left -= n;
5286 	REG_WR32(qlt, 0x54, 0x4000);
5287 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5288 	buf += n; size_left -= n;
5289 	REG_WR32(qlt, 0x54, 0x4010);
5290 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5291 	buf += n; size_left -= n;
5292 	REG_WR32(qlt, 0x54, 0x4020);
5293 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5294 	buf += n; size_left -= n;
5295 	REG_WR32(qlt, 0x54, 0x4030);
5296 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5297 	buf += n; size_left -= n;
5298 	REG_WR32(qlt, 0x54, 0x4040);
5299 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5300 	buf += n; size_left -= n;
5301 	REG_WR32(qlt, 0x54, 0x4050);
5302 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5303 	buf += n; size_left -= n;
5304 	REG_WR32(qlt, 0x54, 0x4060);
5305 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5306 	buf += n; size_left -= n;
5307 	REG_WR32(qlt, 0x54, 0x4070);
5308 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5309 	buf += n; size_left -= n;
5310 	REG_WR32(qlt, 0x54, 0x4080);
5311 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5312 	buf += n; size_left -= n;
5313 	REG_WR32(qlt, 0x54, 0x4090);
5314 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5315 	buf += n; size_left -= n;
5316 	REG_WR32(qlt, 0x54, 0x40A0);
5317 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5318 	buf += n; size_left -= n;
5319 	REG_WR32(qlt, 0x54, 0x40B0);
5320 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5321 	buf += n; size_left -= n;
5322 	if (qlt->qlt_81xx_chip) {
5323 		REG_WR32(qlt, 0x54, 0x40C0);
5324 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5325 		buf += n; size_left -= n;
5326 		REG_WR32(qlt, 0x54, 0x40D0);
5327 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5328 		buf += n; size_left -= n;
5329 	}
5330 
5331 	/*
5332 	 * Fibre buffer registers
5333 	 */
5334 	n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5335 	buf += n; size_left -= n;
5336 	REG_WR32(qlt, 0x54, 0x6000);
5337 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5338 	buf += n; size_left -= n;
5339 	REG_WR32(qlt, 0x54, 0x6010);
5340 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5341 	buf += n; size_left -= n;
5342 	REG_WR32(qlt, 0x54, 0x6020);
5343 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5344 	buf += n; size_left -= n;
5345 	REG_WR32(qlt, 0x54, 0x6030);
5346 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5347 	buf += n; size_left -= n;
5348 	REG_WR32(qlt, 0x54, 0x6040);
5349 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5350 	buf += n; size_left -= n;
5351 	REG_WR32(qlt, 0x54, 0x6100);
5352 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5353 	buf += n; size_left -= n;
5354 	REG_WR32(qlt, 0x54, 0x6130);
5355 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5356 	buf += n; size_left -= n;
5357 	REG_WR32(qlt, 0x54, 0x6150);
5358 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5359 	buf += n; size_left -= n;
5360 	REG_WR32(qlt, 0x54, 0x6170);
5361 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5362 	buf += n; size_left -= n;
5363 	REG_WR32(qlt, 0x54, 0x6190);
5364 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5365 	buf += n; size_left -= n;
5366 	REG_WR32(qlt, 0x54, 0x61B0);
5367 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5368 	buf += n; size_left -= n;
5369 	if (qlt->qlt_81xx_chip) {
5370 		REG_WR32(qlt, 0x54, 0x61C0);
5371 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5372 		buf += n; size_left -= n;
5373 	}
5374 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5375 		REG_WR32(qlt, 0x54, 0x6F00);
5376 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5377 		buf += n; size_left -= n;
5378 	}
5379 
5380 	qlt->intr_sneak_counter = 10;
5381 	qlt_disable_intr(qlt);
5382 	mutex_enter(&qlt->intr_lock);
5383 	qlt->qlt_intr_enabled = 0;
5384 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
5385 	drv_usecwait(20);
5386 	qlt->intr_sneak_counter = 0;
5387 	mutex_exit(&qlt->intr_lock);
5388 
5389 	/*
5390 	 * Memory
5391 	 */
5392 	n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5393 	buf += n; size_left -= n;
5394 
5395 	addr = 0x20000;
5396 	endaddr = 0x22000;
5397 	words_to_read = 0;
5398 	while (addr < endaddr) {
5399 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5400 		if ((words_to_read + addr) > endaddr) {
5401 			words_to_read = endaddr - addr;
5402 		}
5403 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5404 		    QLT_SUCCESS) {
5405 			EL(qlt, "Error reading risc ram - CODE RAM status="
5406 			    "%llxh\n", ret);
5407 			goto dump_fail;
5408 		}
5409 
5410 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5411 		buf += n; size_left -= n;
5412 
5413 		if (size_left < 100000) {
5414 			EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5415 			    size_left);
5416 			goto dump_ok;
5417 		}
5418 		addr += words_to_read;
5419 	}
5420 
5421 	n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
5422 	buf += n; size_left -= n;
5423 
5424 	addr = 0x100000;
5425 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5426 	endaddr++;
5427 	if (endaddr & 7) {
5428 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
5429 	}
5430 
5431 	words_to_read = 0;
5432 	while (addr < endaddr) {
5433 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5434 		if ((words_to_read + addr) > endaddr) {
5435 			words_to_read = endaddr - addr;
5436 		}
5437 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5438 		    QLT_SUCCESS) {
5439 			EL(qlt, "Error reading risc ram - EXT RAM status="
5440 			    "%llxh\n", ret);
5441 			goto dump_fail;
5442 		}
5443 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5444 		buf += n; size_left -= n;
5445 		if (size_left < 100000) {
5446 			EL(qlt, "run out of space - EXT RAM\n");
5447 			goto dump_ok;
5448 		}
5449 		addr += words_to_read;
5450 	}
5451 
5452 	/*
5453 	 * Label the end tag
5454 	 */
5455 	n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5456 	buf += n; size_left -= n;
5457 
5458 	/*
5459 	 * Queue dumping
5460 	 */
5461 	n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5462 	buf += n; size_left -= n;
5463 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5464 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
5465 	buf += n; size_left -= n;
5466 
5467 	n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5468 	buf += n; size_left -= n;
5469 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5470 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
5471 	buf += n; size_left -= n;
5472 
5473 	n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5474 	buf += n; size_left -= n;
5475 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5476 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5477 	buf += n; size_left -= n;
5478 
5479 	n = (int)snprintf(buf, size_left, "\nATIO queue\n");
5480 	buf += n; size_left -= n;
5481 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5482 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5483 	buf += n; size_left -= n;
5484 
5485 	/*
5486 	 * Label dump reason
5487 	 */
5488 	n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5489 	    qlt->qlt_port_alias, ssci->st_additional_info);
5490 	buf += n; size_left -= n;
5491 
5492 dump_ok:
5493 	EL(qlt, "left-%d\n", size_left);
5494 
5495 	mutex_enter(&qlt->qlt_ioctl_lock);
5496 	qlt->qlt_ioctl_flags &=
5497 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5498 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5499 	mutex_exit(&qlt->qlt_ioctl_lock);
5500 	return (FCT_SUCCESS);
5501 
5502 dump_fail:
5503 	EL(qlt, "dump not done\n");
5504 	mutex_enter(&qlt->qlt_ioctl_lock);
5505 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5506 	mutex_exit(&qlt->qlt_ioctl_lock);
5507 	return (FCT_FAILURE);
5508 }
5509 
5510 static int
5511 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5512     uint_t size_left)
5513 {
5514 	int		i;
5515 	int		n;
5516 	char		c = ' ';
5517 
5518 	for (i = 0, n = 0; i < count; i++) {
5519 		if ((i + 1) & 7) {
5520 			c = ' ';
5521 		} else {
5522 			c = '\n';
5523 		}
5524 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5525 		    "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
5526 	}
5527 	return (n);
5528 }
5529 
5530 static int
5531 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5532     caddr_t buf, uint_t size_left)
5533 {
5534 	int		i;
5535 	int		n;
5536 	char		c = ' ';
5537 	uint32_t	*ptr;
5538 
5539 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5540 	for (i = 0, n = 0; i < words; i++) {
5541 		if ((i & 7) == 0) {
5542 			n = (int)(n + (int)snprintf(&buf[n],
5543 			    (uint_t)(size_left - n), "%08x: ", addr + i));
5544 		}
5545 		if ((i + 1) & 7) {
5546 			c = ' ';
5547 		} else {
5548 			c = '\n';
5549 		}
5550 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5551 		    "%08x%c", ptr[i], c));
5552 	}
5553 	return (n);
5554 }
5555 
5556 static int
5557 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5558     uint_t size_left)
5559 {
5560 	int		i;
5561 	int		n;
5562 	char		c = ' ';
5563 	int		words;
5564 	uint16_t	*ptr;
5565 	uint16_t	w;
5566 
5567 	words = entries * 32;
5568 	ptr = (uint16_t *)qadr;
5569 	for (i = 0, n = 0; i < words; i++) {
5570 		if ((i & 7) == 0) {
5571 			n = (int)(n + (int)snprintf(&buf[n],
5572 			    (uint_t)(size_left - n), "%05x: ", i));
5573 		}
5574 		if ((i + 1) & 7) {
5575 			c = ' ';
5576 		} else {
5577 			c = '\n';
5578 		}
5579 		w = QMEM_RD16(qlt, &ptr[i]);
5580 		n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
5581 		    w, c));
5582 	}
5583 	return (n);
5584 }
5585 
5586 /*
5587  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5588  * mailbox ram is available.
5589  * Copy data from RISC RAM to system memory
5590  */
5591 static fct_status_t
5592 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5593 {
5594 	uint64_t	da;
5595 	fct_status_t	ret;
5596 
5597 	REG_WR16(qlt, REG_MBOX(0), 0xc);
5598 	da = qlt->queue_mem_cookie.dmac_laddress;
5599 	da += MBOX_DMA_MEM_OFFSET;
5600 
5601 	/*
5602 	 * System destination address
5603 	 */
5604 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
5605 	da >>= 16;
5606 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
5607 	da >>= 16;
5608 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
5609 	da >>= 16;
5610 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
5611 
5612 	/*
5613 	 * Length
5614 	 */
5615 	REG_WR16(qlt, REG_MBOX(5), words & 0xffff);
5616 	REG_WR16(qlt, REG_MBOX(4), ((words >> 16) & 0xffff));
5617 
5618 	/*
5619 	 * RISC source address
5620 	 */
5621 	REG_WR16(qlt, REG_MBOX(1), addr & 0xffff);
5622 	REG_WR16(qlt, REG_MBOX(8), ((addr >> 16) & 0xffff));
5623 
5624 	ret = qlt_raw_mailbox_command(qlt);
5625 	REG_WR32(qlt, REG_HCCR, 0xA0000000);
5626 	if (ret == QLT_SUCCESS) {
5627 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5628 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5629 	} else {
5630 		EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
5631 	}
5632 	return (ret);
5633 }
5634 
5635 static void
5636 qlt_verify_fw(qlt_state_t *qlt)
5637 {
5638 	caddr_t req;
5639 	/* Just put it on the request queue */
5640 	mutex_enter(&qlt->req_lock);
5641 	req = qlt_get_req_entries(qlt, 1);
5642 	if (req == NULL) {
5643 		mutex_exit(&qlt->req_lock);
5644 		/* XXX handle this */
5645 		return;
5646 	}
5647 
5648 	bzero(req, IOCB_SIZE);
5649 
5650 	req[0] = 0x1b;
5651 	req[1] = 1;
5652 
5653 	QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5654 	QMEM_WR16(qlt, (&req[0x8]), 1);    /*  options - don't update */
5655 	QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5656 
5657 	qlt_submit_req_entries(qlt, 1);
5658 	mutex_exit(&qlt->req_lock);
5659 }
5660 
5661 static void
5662 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5663 {
5664 	uint16_t	status;
5665 	char		info[80];
5666 
5667 	status = QMEM_RD16(qlt, rsp+8);
5668 	if (status != 0) {
5669 		(void) snprintf(info, 80, "qlt_handle_verify_fw_completion: "
5670 		    "status:%x, rsp:%p", status, (void *)rsp);
5671 		if (status == 3) {
5672 			uint16_t error_code;
5673 
5674 			error_code = QMEM_RD16(qlt, rsp+0xA);
5675 			(void) snprintf(info, 80, "qlt_handle_verify_fw_"
5676 			    "completion: error code:%x", error_code);
5677 		}
5678 	}
5679 }
5680 
5681 /*
5682  * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5683  *
5684  * Input:	Pointer to the adapter state structure.
5685  * Returns:	Success or Failure.
5686  * Context:	Kernel context.
5687  */
5688 static int
5689 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5690 {
5691 	int	rval = DDI_SUCCESS;
5692 
5693 	qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5694 	    kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
5695 
5696 	if (qlt->el_trace_desc == NULL) {
5697 		cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5698 		    qlt->instance);
5699 		rval = DDI_FAILURE;
5700 	} else {
5701 		qlt->el_trace_desc->next = 0;
5702 		qlt->el_trace_desc->trace_buffer =
5703 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
5704 
5705 		if (qlt->el_trace_desc->trace_buffer == NULL) {
5706 			cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5707 			    qlt->instance);
5708 			kmem_free(qlt->el_trace_desc,
5709 			    sizeof (qlt_el_trace_desc_t));
5710 			qlt->el_trace_desc = NULL;
5711 			rval = DDI_FAILURE;
5712 		} else {
5713 			qlt->el_trace_desc->trace_buffer_size =
5714 			    EL_TRACE_BUF_SIZE;
5715 			mutex_init(&qlt->el_trace_desc->mutex, NULL,
5716 			    MUTEX_DRIVER, NULL);
5717 		}
5718 	}
5719 
5720 	return (rval);
5721 }
5722 
5723 /*
5724  * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5725  *
5726  * Input:	Pointer to the adapter state structure.
5727  * Returns:	Success or Failure.
5728  * Context:	Kernel context.
5729  */
5730 static int
5731 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5732 {
5733 	int	rval = DDI_SUCCESS;
5734 
5735 	if (qlt->el_trace_desc == NULL) {
5736 		cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5737 		    qlt->instance);
5738 		rval = DDI_FAILURE;
5739 	} else {
5740 		if (qlt->el_trace_desc->trace_buffer != NULL) {
5741 			kmem_free(qlt->el_trace_desc->trace_buffer,
5742 			    qlt->el_trace_desc->trace_buffer_size);
5743 		}
5744 		mutex_destroy(&qlt->el_trace_desc->mutex);
5745 		kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5746 		qlt->el_trace_desc = NULL;
5747 	}
5748 
5749 	return (rval);
5750 }
5751 
5752 /*
5753  * qlt_el_msg
5754  *	Extended logging message
5755  *
5756  * Input:
5757  *	qlt:	adapter state pointer.
5758  *	fn:	function name.
5759  *	ce:	level
5760  *	...:	Variable argument list.
5761  *
5762  * Context:
5763  *	Kernel/Interrupt context.
5764  */
5765 void
5766 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5767 {
5768 	char		*s, *fmt = 0, *fmt1 = 0;
5769 	char		fmt2[EL_BUFFER_RESERVE];
5770 	int		rval, tmp;
5771 	int		tracing = 0;
5772 	va_list		vl;
5773 
5774 	/* Tracing is the default but it can be disabled. */
5775 	if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5776 		tracing = 1;
5777 
5778 		mutex_enter(&qlt->el_trace_desc->mutex);
5779 
5780 		/*
5781 		 * Ensure enough space for the string. Wrap to
5782 		 * start when default message allocation size
5783 		 * would overrun the end.
5784 		 */
5785 		if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5786 		    qlt->el_trace_desc->trace_buffer_size) {
5787 			fmt = qlt->el_trace_desc->trace_buffer;
5788 			qlt->el_trace_desc->next = 0;
5789 		} else {
5790 			fmt = qlt->el_trace_desc->trace_buffer +
5791 			    qlt->el_trace_desc->next;
5792 		}
5793 	}
5794 
5795 	/* if no buffer use the stack */
5796 	if (fmt == NULL) {
5797 		fmt = fmt2;
5798 	}
5799 
5800 	va_start(vl, ce);
5801 
5802 	s = va_arg(vl, char *);
5803 
5804 	rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
5805 	    "QEL qlt(%d): %s, ", qlt->instance, fn);
5806 	fmt1 = fmt + rval;
5807 	tmp = (int)vsnprintf(fmt1,
5808 	    (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
5809 	rval += tmp;
5810 
5811 	/*
5812 	 * Calculate the offset where the next message will go,
5813 	 * skipping the NULL.
5814 	 */
5815 	if (tracing) {
5816 		uint16_t next = (uint16_t)(rval += 1);
5817 		qlt->el_trace_desc->next += next;
5818 		mutex_exit(&qlt->el_trace_desc->mutex);
5819 	}
5820 
5821 	va_end(vl);
5822 }
5823 
5824 /*
5825  * qlt_dump_el_trace_buffer
5826  *	 Outputs extended logging trace buffer.
5827  *
5828  * Input:
5829  *	qlt:	adapter state pointer.
5830  */
5831 void
5832 qlt_dump_el_trace_buffer(qlt_state_t *qlt)
5833 {
5834 	char		*dump_start = NULL;
5835 	char		*dump_current = NULL;
5836 	char		*trace_start;
5837 	char		*trace_end;
5838 	int		wrapped = 0;
5839 	int		rval;
5840 
5841 	mutex_enter(&qlt->el_trace_desc->mutex);
5842 
5843 	rval = qlt_validate_trace_desc(qlt);
5844 	if (rval != NULL) {
5845 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
5846 		    qlt->instance);
5847 	} else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
5848 		dump_current = dump_start;
5849 		trace_start = qlt->el_trace_desc->trace_buffer;
5850 		trace_end = trace_start +
5851 		    qlt->el_trace_desc->trace_buffer_size;
5852 
5853 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
5854 		    qlt->instance,
5855 		    (void *)dump_start, (void *)trace_start);
5856 
5857 		while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
5858 		    (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
5859 			/* Show it... */
5860 			cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
5861 			    dump_current);
5862 			/* Make the next the current */
5863 			dump_current += (strlen(dump_current) + 1);
5864 			/* check for wrap */
5865 			if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
5866 				dump_current = trace_start;
5867 				wrapped = 1;
5868 			} else if (wrapped) {
5869 				/* Don't go past next. */
5870 				if ((trace_start + qlt->el_trace_desc->next) <=
5871 				    dump_current) {
5872 					break;
5873 				}
5874 			} else if (*dump_current == NULL) {
5875 				break;
5876 			}
5877 		}
5878 	}
5879 	mutex_exit(&qlt->el_trace_desc->mutex);
5880 }
5881 
5882 /*
5883  * qlt_validate_trace_desc
5884  *	 Ensures the extended logging trace descriptor is good
5885  *
5886  * Input:
5887  *	qlt:	adapter state pointer.
5888  *
5889  * Returns:
5890  *	ql local function return status code.
5891  */
5892 static int
5893 qlt_validate_trace_desc(qlt_state_t *qlt)
5894 {
5895 	int	rval = DDI_SUCCESS;
5896 
5897 	if (qlt->el_trace_desc == NULL) {
5898 		rval = DDI_FAILURE;
5899 	} else if (qlt->el_trace_desc->trace_buffer == NULL) {
5900 		rval = DDI_FAILURE;
5901 	}
5902 	return (rval);
5903 }
5904 
5905 /*
5906  * qlt_find_trace_start
5907  *	 Locate the oldest extended logging trace entry.
5908  *
5909  * Input:
5910  *	qlt:	adapter state pointer.
5911  *
5912  * Returns:
5913  *	Pointer to a string.
5914  *
5915  * Context:
5916  *	Kernel/Interrupt context.
5917  */
5918 static char *
5919 qlt_find_trace_start(qlt_state_t *qlt)
5920 {
5921 	char	*trace_start = 0;
5922 	char	*trace_next  = 0;
5923 
5924 	trace_next = qlt->el_trace_desc->trace_buffer +
5925 	    qlt->el_trace_desc->next;
5926 
5927 	/*
5928 	 * if the buffer has not wrapped next will point at a null so
5929 	 * start is the beginning of the buffer.  if next points at a char
5930 	 * then we must traverse the buffer until a null is detected and
5931 	 * that will be the beginning of the oldest whole object in the buffer
5932 	 * which is the start.
5933 	 */
5934 
5935 	if ((trace_next + EL_BUFFER_RESERVE) >=
5936 	    (qlt->el_trace_desc->trace_buffer +
5937 	    qlt->el_trace_desc->trace_buffer_size)) {
5938 		trace_start = qlt->el_trace_desc->trace_buffer;
5939 	} else if (*trace_next != NULL) {
5940 		trace_start = trace_next + (strlen(trace_next) + 1);
5941 	} else {
5942 		trace_start = qlt->el_trace_desc->trace_buffer;
5943 	}
5944 	return (trace_start);
5945 }
5946