xref: /illumos-gate/usr/src/uts/common/io/comstar/port/qlt/qlt.c (revision 3fb517f786391b507780c78aabb8d98bfea9efe9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
29  */
30 
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/stat.h>
34 #include <sys/pci.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/file.h>
38 #include <sys/cred.h>
39 #include <sys/byteorder.h>
40 #include <sys/atomic.h>
41 #include <sys/scsi/scsi.h>
42 
43 #include <stmf_defines.h>
44 #include <fct_defines.h>
45 #include <stmf.h>
46 #include <portif.h>
47 #include <fct.h>
48 #include <qlt.h>
49 #include <qlt_dma.h>
50 #include <qlt_ioctl.h>
51 #include <qlt_open.h>
52 #include <stmf_ioctl.h>
53 
54 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
55 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
56 static void qlt_enable_intr(qlt_state_t *);
57 static void qlt_disable_intr(qlt_state_t *);
58 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
59 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
60 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
61     uint32_t word_count, uint32_t risc_addr);
62 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
63 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
64 					uint32_t dma_size);
65 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
66 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
67 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
68 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
69     stmf_state_change_info_t *ssci);
70 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
71 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
72 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
73 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
74 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
75 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
76 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
77 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
78     uint8_t *rsp);
79 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
80 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
81 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
82 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
83 static void qlt_verify_fw(qlt_state_t *qlt);
84 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
85 fct_status_t qlt_port_start(caddr_t arg);
86 fct_status_t qlt_port_stop(caddr_t arg);
87 fct_status_t qlt_port_online(qlt_state_t *qlt);
88 fct_status_t qlt_port_offline(qlt_state_t *qlt);
89 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
90     fct_link_info_t *li);
91 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
92 static fct_status_t qlt_force_lip(qlt_state_t *);
93 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
94 						fct_flogi_xchg_t *fx);
95 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
96 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
97 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
98     fct_remote_port_t *rp, fct_cmd_t *login);
99 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
100     fct_remote_port_t *rp);
101 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
102 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
103 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
104     fct_cmd_t *cmd, int terminate);
105 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
106 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
107 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
108     fct_cmd_t *cmd, uint32_t flags);
109 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
110 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
111 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
112 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
113 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
114 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
115 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
116     stmf_data_buf_t *dbuf, uint32_t ioflags);
117 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
118 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
119 static void qlt_release_intr(qlt_state_t *qlt);
120 static int qlt_setup_interrupts(qlt_state_t *qlt);
121 static void qlt_destroy_mutex(qlt_state_t *qlt);
122 
123 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
124     uint32_t words);
125 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
126     caddr_t buf, uint_t size_left);
127 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
128     caddr_t buf, uint_t size_left);
129 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
130     int count, uint_t size_left);
131 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
132     cred_t *credp, int *rval);
133 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
134 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
135 
136 static int qlt_setup_msi(qlt_state_t *qlt);
137 static int qlt_setup_msix(qlt_state_t *qlt);
138 
139 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
140 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
141 static int qlt_validate_trace_desc(qlt_state_t *qlt);
142 static char *qlt_find_trace_start(qlt_state_t *qlt);
143 
144 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
145 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
146 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
147     char **prop_val);
148 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
149 static int qlt_convert_string_to_ull(char *prop, int radix,
150     u_longlong_t *result);
151 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
152 static int qlt_quiesce(dev_info_t *dip);
153 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
154     uint32_t);
155 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
156     uint32_t *);
157 static void qlt_mps_reset(qlt_state_t *qlt);
158 static void qlt_properties(qlt_state_t *qlt);
159 
160 
161 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] = \
162 	(uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
163 
164 int qlt_enable_msix = 0;
165 int qlt_enable_msi = 1;
166 
167 
168 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
169 
170 /* Array to quickly calculate next free buf index to use */
171 #if 0
172 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
173 #endif
174 
175 static struct cb_ops qlt_cb_ops = {
176 	qlt_open,
177 	qlt_close,
178 	nodev,
179 	nodev,
180 	nodev,
181 	nodev,
182 	nodev,
183 	qlt_ioctl,
184 	nodev,
185 	nodev,
186 	nodev,
187 	nochpoll,
188 	ddi_prop_op,
189 	0,
190 	D_MP | D_NEW
191 };
192 
193 static struct dev_ops qlt_ops = {
194 	DEVO_REV,
195 	0,
196 	nodev,
197 	nulldev,
198 	nulldev,
199 	qlt_attach,
200 	qlt_detach,
201 	nodev,
202 	&qlt_cb_ops,
203 	NULL,
204 	ddi_power,
205 	qlt_quiesce
206 };
207 
208 #ifndef	PORT_SPEED_10G
209 #define	PORT_SPEED_10G		16
210 #endif
211 
212 static struct modldrv modldrv = {
213 	&mod_driverops,
214 	QLT_NAME" "QLT_VERSION,
215 	&qlt_ops,
216 };
217 
218 static struct modlinkage modlinkage = {
219 	MODREV_1, &modldrv, NULL
220 };
221 
222 void *qlt_state = NULL;
223 kmutex_t qlt_global_lock;
224 static uint32_t qlt_loaded_counter = 0;
225 
226 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
227 			"-X Mode 1 133", "--Invalid--",
228 			"-X Mode 2 66", "-X Mode 2 100",
229 			"-X Mode 2 133", " 66" };
230 
231 /* Always use 64 bit DMA. */
232 static ddi_dma_attr_t qlt_queue_dma_attr = {
233 	DMA_ATTR_V0,		/* dma_attr_version */
234 	0,			/* low DMA address range */
235 	0xffffffffffffffff,	/* high DMA address range */
236 	0xffffffff,		/* DMA counter register */
237 	64,			/* DMA address alignment */
238 	0xff,			/* DMA burstsizes */
239 	1,			/* min effective DMA size */
240 	0xffffffff,		/* max DMA xfer size */
241 	0xffffffff,		/* segment boundary */
242 	1,			/* s/g list length */
243 	1,			/* granularity of device */
244 	0			/* DMA transfer flags */
245 };
246 
247 /* qlogic logging */
248 int enable_extended_logging = 0;
249 
250 static char qlt_provider_name[] = "qlt";
251 static struct stmf_port_provider *qlt_pp;
252 
253 int
254 _init(void)
255 {
256 	int ret;
257 
258 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
259 	if (ret == 0) {
260 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
261 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
262 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
263 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
264 		qlt_pp->pp_name = qlt_provider_name;
265 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
266 			stmf_free(qlt_pp);
267 			mutex_destroy(&qlt_global_lock);
268 			ddi_soft_state_fini(&qlt_state);
269 			return (EIO);
270 		}
271 		ret = mod_install(&modlinkage);
272 		if (ret != 0) {
273 			(void) stmf_deregister_port_provider(qlt_pp);
274 			stmf_free(qlt_pp);
275 			mutex_destroy(&qlt_global_lock);
276 			ddi_soft_state_fini(&qlt_state);
277 		}
278 	}
279 	return (ret);
280 }
281 
282 int
283 _fini(void)
284 {
285 	int ret;
286 
287 	if (qlt_loaded_counter)
288 		return (EBUSY);
289 	ret = mod_remove(&modlinkage);
290 	if (ret == 0) {
291 		(void) stmf_deregister_port_provider(qlt_pp);
292 		stmf_free(qlt_pp);
293 		mutex_destroy(&qlt_global_lock);
294 		ddi_soft_state_fini(&qlt_state);
295 	}
296 	return (ret);
297 }
298 
299 int
300 _info(struct modinfo *modinfop)
301 {
302 	return (mod_info(&modlinkage, modinfop));
303 }
304 
305 
306 static int
307 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
308 {
309 	int		instance;
310 	qlt_state_t	*qlt;
311 	ddi_device_acc_attr_t	dev_acc_attr;
312 	uint16_t	did;
313 	uint16_t	val;
314 	uint16_t	mr;
315 	size_t		discard;
316 	uint_t		ncookies;
317 	int		max_read_size;
318 	int		max_payload_size;
319 	fct_status_t	ret;
320 
321 	/* No support for suspend resume yet */
322 	if (cmd != DDI_ATTACH)
323 		return (DDI_FAILURE);
324 	instance = ddi_get_instance(dip);
325 
326 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
327 		return (DDI_FAILURE);
328 	}
329 
330 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
331 	    NULL) {
332 		goto attach_fail_1;
333 	}
334 
335 	qlt->instance = instance;
336 
337 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
338 	qlt->dip = dip;
339 
340 	if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
341 		cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
342 		goto attach_fail_1;
343 	}
344 
345 	EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
346 
347 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
348 		goto attach_fail_2;
349 	}
350 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
351 	if ((did != 0x2422) && (did != 0x2432) &&
352 	    (did != 0x8432) && (did != 0x2532) &&
353 	    (did != 0x8001)) {
354 		cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
355 		    instance, did);
356 		goto attach_fail_4;
357 	}
358 
359 	if ((did & 0xFF00) == 0x8000)
360 		qlt->qlt_81xx_chip = 1;
361 	else if ((did & 0xFF00) == 0x2500)
362 		qlt->qlt_25xx_chip = 1;
363 
364 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
365 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
366 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
367 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
368 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
369 		goto attach_fail_4;
370 	}
371 	if (did == 0x2422) {
372 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
373 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
374 		pci_bits >>= 8;
375 		pci_bits &= 0xf;
376 		if ((pci_bits == 3) || (pci_bits == 7)) {
377 			cmn_err(CE_NOTE,
378 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
379 			    instance, pci_speeds[pci_bits], pci_bits);
380 		} else {
381 			cmn_err(CE_WARN,
382 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
383 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
384 			    "(Invalid)", ((pci_bits == 0) ||
385 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
386 			    "32 bit slot ") : "", pci_bits);
387 		}
388 	}
389 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
390 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
391 		    (unsigned long long)ret);
392 		goto attach_fail_5;
393 	}
394 
395 	qlt_properties(qlt);
396 
397 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
398 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
399 		goto attach_fail_5;
400 	}
401 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
402 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
403 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
404 	    DDI_SUCCESS) {
405 		goto attach_fail_6;
406 	}
407 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
408 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
409 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
410 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
411 		goto attach_fail_7;
412 	}
413 	if (ncookies != 1)
414 		goto attach_fail_8;
415 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
416 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
417 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
418 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
419 
420 	/* mutex are inited in this function */
421 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
422 		goto attach_fail_8;
423 
424 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
425 	    "qlt%d", instance);
426 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
427 	    "%s,0", qlt->qlt_minor_name);
428 
429 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
430 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
431 		goto attach_fail_9;
432 	}
433 
434 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
435 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
436 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
437 
438 	/* Setup PCI cfg space registers */
439 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
440 	if (max_read_size == 11)
441 		goto over_max_read_xfer_setting;
442 	if (did == 0x2422) {
443 		if (max_read_size == 512)
444 			val = 0;
445 		else if (max_read_size == 1024)
446 			val = 1;
447 		else if (max_read_size == 2048)
448 			val = 2;
449 		else if (max_read_size == 4096)
450 			val = 3;
451 		else {
452 			cmn_err(CE_WARN, "qlt(%d) malformed "
453 			    "pci-max-read-request in qlt.conf. Valid values "
454 			    "for this HBA are 512/1024/2048/4096", instance);
455 			goto over_max_read_xfer_setting;
456 		}
457 		mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
458 		mr = (uint16_t)(mr & 0xfff3);
459 		mr = (uint16_t)(mr | (val << 2));
460 		PCICFG_WR16(qlt, 0x4E, mr);
461 	} else if ((did == 0x2432) || (did == 0x8432) ||
462 	    (did == 0x2532) || (did == 0x8001)) {
463 		if (max_read_size == 128)
464 			val = 0;
465 		else if (max_read_size == 256)
466 			val = 1;
467 		else if (max_read_size == 512)
468 			val = 2;
469 		else if (max_read_size == 1024)
470 			val = 3;
471 		else if (max_read_size == 2048)
472 			val = 4;
473 		else if (max_read_size == 4096)
474 			val = 5;
475 		else {
476 			cmn_err(CE_WARN, "qlt(%d) malformed "
477 			    "pci-max-read-request in qlt.conf. Valid values "
478 			    "for this HBA are 128/256/512/1024/2048/4096",
479 			    instance);
480 			goto over_max_read_xfer_setting;
481 		}
482 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
483 		mr = (uint16_t)(mr & 0x8fff);
484 		mr = (uint16_t)(mr | (val << 12));
485 		PCICFG_WR16(qlt, 0x54, mr);
486 	} else {
487 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
488 		    "pci-max-read-request for this device (%x)",
489 		    instance, did);
490 	}
491 over_max_read_xfer_setting:;
492 
493 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
494 	if (max_payload_size == 11)
495 		goto over_max_payload_setting;
496 	if ((did == 0x2432) || (did == 0x8432) ||
497 	    (did == 0x2532) || (did == 0x8001)) {
498 		if (max_payload_size == 128)
499 			val = 0;
500 		else if (max_payload_size == 256)
501 			val = 1;
502 		else if (max_payload_size == 512)
503 			val = 2;
504 		else if (max_payload_size == 1024)
505 			val = 3;
506 		else {
507 			cmn_err(CE_WARN, "qlt(%d) malformed "
508 			    "pcie-max-payload-size in qlt.conf. Valid values "
509 			    "for this HBA are 128/256/512/1024",
510 			    instance);
511 			goto over_max_payload_setting;
512 		}
513 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
514 		mr = (uint16_t)(mr & 0xff1f);
515 		mr = (uint16_t)(mr | (val << 5));
516 		PCICFG_WR16(qlt, 0x54, mr);
517 	} else {
518 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
519 		    "pcie-max-payload-size for this device (%x)",
520 		    instance, did);
521 	}
522 
523 over_max_payload_setting:;
524 
525 	qlt_enable_intr(qlt);
526 
527 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
528 		goto attach_fail_10;
529 
530 	ddi_report_dev(dip);
531 	return (DDI_SUCCESS);
532 
533 attach_fail_10:;
534 	mutex_destroy(&qlt->qlt_ioctl_lock);
535 	cv_destroy(&qlt->mbox_cv);
536 	cv_destroy(&qlt->rp_dereg_cv);
537 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
538 attach_fail_9:;
539 	qlt_destroy_mutex(qlt);
540 	qlt_release_intr(qlt);
541 attach_fail_8:;
542 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
543 attach_fail_7:;
544 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
545 attach_fail_6:;
546 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
547 attach_fail_5:;
548 	ddi_regs_map_free(&qlt->regs_acc_handle);
549 attach_fail_4:;
550 	pci_config_teardown(&qlt->pcicfg_acc_handle);
551 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
552 	(void) qlt_el_trace_desc_dtor(qlt);
553 attach_fail_2:;
554 attach_fail_1:;
555 	ddi_soft_state_free(qlt_state, instance);
556 	return (DDI_FAILURE);
557 }
558 
559 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
560 
561 /* ARGSUSED */
562 static int
563 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
564 {
565 	qlt_state_t *qlt;
566 
567 	int instance;
568 
569 	instance = ddi_get_instance(dip);
570 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
571 	    NULL) {
572 		return (DDI_FAILURE);
573 	}
574 
575 	if (qlt->fw_code01) {
576 		return (DDI_FAILURE);
577 	}
578 
579 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
580 	    qlt->qlt_state_not_acked) {
581 		return (DDI_FAILURE);
582 	}
583 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
584 		return (DDI_FAILURE);
585 	}
586 
587 	qlt_disable_intr(qlt);
588 
589 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
590 	qlt_destroy_mutex(qlt);
591 	qlt_release_intr(qlt);
592 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
593 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
594 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
595 	ddi_regs_map_free(&qlt->regs_acc_handle);
596 	pci_config_teardown(&qlt->pcicfg_acc_handle);
597 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
598 	cv_destroy(&qlt->mbox_cv);
599 	cv_destroy(&qlt->rp_dereg_cv);
600 	(void) qlt_el_trace_desc_dtor(qlt);
601 	ddi_soft_state_free(qlt_state, instance);
602 
603 	return (DDI_SUCCESS);
604 }
605 
606 /*
607  * qlt_quiesce	quiesce a device attached to the system.
608  */
609 static int
610 qlt_quiesce(dev_info_t *dip)
611 {
612 	qlt_state_t	*qlt;
613 	uint32_t	timer;
614 	uint32_t	stat;
615 
616 	qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
617 	if (qlt == NULL) {
618 		/* Oh well.... */
619 		return (DDI_SUCCESS);
620 	}
621 
622 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
623 	REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
624 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
625 	for (timer = 0; timer < 30000; timer++) {
626 		stat = REG_RD32(qlt, REG_RISC_STATUS);
627 		if (stat & RISC_HOST_INTR_REQUEST) {
628 			if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
629 				REG_WR32(qlt, REG_HCCR,
630 				    HCCR_CMD(CLEAR_RISC_PAUSE));
631 				break;
632 			}
633 			REG_WR32(qlt, REG_HCCR,
634 			    HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
635 		}
636 		drv_usecwait(100);
637 	}
638 	/* Reset the chip. */
639 	REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
640 	    PCI_X_XFER_CTRL);
641 	drv_usecwait(100);
642 
643 	qlt_disable_intr(qlt);
644 
645 	return (DDI_SUCCESS);
646 }
647 
648 static void
649 qlt_enable_intr(qlt_state_t *qlt)
650 {
651 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
652 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
653 	} else {
654 		int i;
655 		for (i = 0; i < qlt->intr_cnt; i++)
656 			(void) ddi_intr_enable(qlt->htable[i]);
657 	}
658 	qlt->qlt_intr_enabled = 1;
659 }
660 
661 static void
662 qlt_disable_intr(qlt_state_t *qlt)
663 {
664 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
665 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
666 	} else {
667 		int i;
668 		for (i = 0; i < qlt->intr_cnt; i++)
669 			(void) ddi_intr_disable(qlt->htable[i]);
670 	}
671 	qlt->qlt_intr_enabled = 0;
672 }
673 
674 static void
675 qlt_release_intr(qlt_state_t *qlt)
676 {
677 	if (qlt->htable) {
678 		int i;
679 		for (i = 0; i < qlt->intr_cnt; i++) {
680 			(void) ddi_intr_remove_handler(qlt->htable[i]);
681 			(void) ddi_intr_free(qlt->htable[i]);
682 		}
683 		kmem_free(qlt->htable, (uint_t)qlt->intr_size);
684 	}
685 	qlt->htable = NULL;
686 	qlt->intr_pri = 0;
687 	qlt->intr_cnt = 0;
688 	qlt->intr_size = 0;
689 	qlt->intr_cap = 0;
690 }
691 
692 
693 static void
694 qlt_init_mutex(qlt_state_t *qlt)
695 {
696 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
697 	    INT2PTR(qlt->intr_pri, void *));
698 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
699 	    INT2PTR(qlt->intr_pri, void *));
700 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
701 	    INT2PTR(qlt->intr_pri, void *));
702 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
703 	    INT2PTR(qlt->intr_pri, void *));
704 }
705 
706 static void
707 qlt_destroy_mutex(qlt_state_t *qlt)
708 {
709 	mutex_destroy(&qlt->req_lock);
710 	mutex_destroy(&qlt->preq_lock);
711 	mutex_destroy(&qlt->mbox_lock);
712 	mutex_destroy(&qlt->intr_lock);
713 }
714 
715 
716 static int
717 qlt_setup_msix(qlt_state_t *qlt)
718 {
719 	int count, avail, actual;
720 	int ret;
721 	int itype = DDI_INTR_TYPE_MSIX;
722 	int i;
723 
724 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
725 	if (ret != DDI_SUCCESS || count == 0) {
726 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
727 		    count);
728 		return (DDI_FAILURE);
729 	}
730 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
731 	if (ret != DDI_SUCCESS || avail == 0) {
732 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
733 		    avail);
734 		return (DDI_FAILURE);
735 	}
736 	if (avail < count) {
737 		stmf_trace(qlt->qlt_port_alias,
738 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
739 	}
740 
741 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
742 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
743 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
744 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
745 	/* we need at least 2 interrupt vectors */
746 	if (ret != DDI_SUCCESS || actual < 2) {
747 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
748 		    actual);
749 		ret = DDI_FAILURE;
750 		goto release_intr;
751 	}
752 	if (actual < count) {
753 		EL(qlt, "requested: %d, received: %d\n", count, actual);
754 	}
755 
756 	qlt->intr_cnt = actual;
757 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
758 	if (ret != DDI_SUCCESS) {
759 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
760 		ret = DDI_FAILURE;
761 		goto release_intr;
762 	}
763 	qlt_init_mutex(qlt);
764 	for (i = 0; i < actual; i++) {
765 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
766 		    qlt, INT2PTR((uint_t)i, void *));
767 		if (ret != DDI_SUCCESS) {
768 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
769 			goto release_mutex;
770 		}
771 	}
772 
773 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
774 	qlt->intr_flags |= QLT_INTR_MSIX;
775 	return (DDI_SUCCESS);
776 
777 release_mutex:
778 	qlt_destroy_mutex(qlt);
779 release_intr:
780 	for (i = 0; i < actual; i++)
781 		(void) ddi_intr_free(qlt->htable[i]);
782 #if 0
783 free_mem:
784 #endif
785 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
786 	qlt->htable = NULL;
787 	qlt_release_intr(qlt);
788 	return (ret);
789 }
790 
791 
792 static int
793 qlt_setup_msi(qlt_state_t *qlt)
794 {
795 	int count, avail, actual;
796 	int itype = DDI_INTR_TYPE_MSI;
797 	int ret;
798 	int i;
799 
800 	/* get the # of interrupts */
801 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
802 	if (ret != DDI_SUCCESS || count == 0) {
803 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
804 		    count);
805 		return (DDI_FAILURE);
806 	}
807 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
808 	if (ret != DDI_SUCCESS || avail == 0) {
809 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
810 		    avail);
811 		return (DDI_FAILURE);
812 	}
813 	if (avail < count) {
814 		EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
815 	}
816 	/* MSI requires only 1 interrupt. */
817 	count = 1;
818 
819 	/* allocate interrupt */
820 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
821 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
822 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
823 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
824 	if (ret != DDI_SUCCESS || actual == 0) {
825 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
826 		    actual);
827 		ret = DDI_FAILURE;
828 		goto free_mem;
829 	}
830 	if (actual < count) {
831 		EL(qlt, "requested: %d, received: %d\n", count, actual);
832 	}
833 	qlt->intr_cnt = actual;
834 
835 	/*
836 	 * Get priority for first msi, assume remaining are all the same.
837 	 */
838 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
839 	if (ret != DDI_SUCCESS) {
840 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
841 		ret = DDI_FAILURE;
842 		goto release_intr;
843 	}
844 	qlt_init_mutex(qlt);
845 
846 	/* add handler */
847 	for (i = 0; i < actual; i++) {
848 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
849 		    qlt, INT2PTR((uint_t)i, void *));
850 		if (ret != DDI_SUCCESS) {
851 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
852 			goto release_mutex;
853 		}
854 	}
855 
856 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
857 	qlt->intr_flags |= QLT_INTR_MSI;
858 	return (DDI_SUCCESS);
859 
860 release_mutex:
861 	qlt_destroy_mutex(qlt);
862 release_intr:
863 	for (i = 0; i < actual; i++)
864 		(void) ddi_intr_free(qlt->htable[i]);
865 free_mem:
866 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
867 	qlt->htable = NULL;
868 	qlt_release_intr(qlt);
869 	return (ret);
870 }
871 
872 static int
873 qlt_setup_fixed(qlt_state_t *qlt)
874 {
875 	int count;
876 	int actual;
877 	int ret;
878 	int itype = DDI_INTR_TYPE_FIXED;
879 
880 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
881 	/* Fixed interrupts can only have one interrupt. */
882 	if (ret != DDI_SUCCESS || count != 1) {
883 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
884 		    count);
885 		return (DDI_FAILURE);
886 	}
887 
888 	qlt->intr_size = sizeof (ddi_intr_handle_t);
889 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
890 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
891 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
892 	if (ret != DDI_SUCCESS || actual != 1) {
893 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
894 		    actual);
895 		ret = DDI_FAILURE;
896 		goto free_mem;
897 	}
898 
899 	qlt->intr_cnt = actual;
900 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
901 	if (ret != DDI_SUCCESS) {
902 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
903 		ret = DDI_FAILURE;
904 		goto release_intr;
905 	}
906 	qlt_init_mutex(qlt);
907 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
908 	if (ret != DDI_SUCCESS) {
909 		EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
910 		goto release_mutex;
911 	}
912 
913 	qlt->intr_flags |= QLT_INTR_FIXED;
914 	return (DDI_SUCCESS);
915 
916 release_mutex:
917 	qlt_destroy_mutex(qlt);
918 release_intr:
919 	(void) ddi_intr_free(qlt->htable[0]);
920 free_mem:
921 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
922 	qlt->htable = NULL;
923 	qlt_release_intr(qlt);
924 	return (ret);
925 }
926 
927 static int
928 qlt_setup_interrupts(qlt_state_t *qlt)
929 {
930 	int itypes = 0;
931 
932 /*
933  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
934  */
935 #ifndef __sparc
936 	if (qlt_enable_msi != 0) {
937 #endif
938 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
939 		itypes = DDI_INTR_TYPE_FIXED;
940 	}
941 
942 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
943 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
944 			return (DDI_SUCCESS);
945 	}
946 
947 	if (itypes & DDI_INTR_TYPE_MSI) {
948 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
949 			return (DDI_SUCCESS);
950 	}
951 #ifndef __sparc
952 	}
953 #endif
954 	return (qlt_setup_fixed(qlt));
955 }
956 
957 /*
958  * Filling the hba attributes
959  */
960 void
961 qlt_populate_hba_fru_details(struct fct_local_port *port,
962     struct fct_port_attrs *port_attrs)
963 {
964 	caddr_t	bufp;
965 	int len;
966 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
967 
968 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
969 	    "QLogic Corp.");
970 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
971 	    "%s", QLT_NAME);
972 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
973 	    "%s", QLT_VERSION);
974 	port_attrs->serial_number[0] = '\0';
975 	port_attrs->hardware_version[0] = '\0';
976 
977 	(void) snprintf(port_attrs->firmware_version,
978 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
979 	    qlt->fw_minor, qlt->fw_subminor);
980 
981 	/* Get FCode version */
982 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
983 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
984 	    (int *)&len) == DDI_PROP_SUCCESS) {
985 		(void) snprintf(port_attrs->option_rom_version,
986 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
987 		kmem_free(bufp, (uint_t)len);
988 		bufp = NULL;
989 	} else {
990 #ifdef __sparc
991 		(void) snprintf(port_attrs->option_rom_version,
992 		    FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
993 #else
994 		(void) snprintf(port_attrs->option_rom_version,
995 		    FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
996 #endif
997 	}
998 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
999 	    qlt->nvram->subsystem_vendor_id[1] << 8;
1000 
1001 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1002 	    qlt->nvram->max_frame_length[0];
1003 
1004 	port_attrs->supported_cos = 0x10000000;
1005 	port_attrs->supported_speed = PORT_SPEED_1G |
1006 	    PORT_SPEED_2G | PORT_SPEED_4G;
1007 	if (qlt->qlt_25xx_chip)
1008 		port_attrs->supported_speed = PORT_SPEED_2G | PORT_SPEED_4G |
1009 		    PORT_SPEED_8G;
1010 	if (qlt->qlt_81xx_chip)
1011 		port_attrs->supported_speed = PORT_SPEED_10G;
1012 
1013 	/* limit string length to nvr model_name length */
1014 	len = (qlt->qlt_81xx_chip) ? 16 : 8;
1015 	(void) snprintf(port_attrs->model,
1016 	    (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1017 	    "%s", qlt->nvram->model_name);
1018 
1019 	(void) snprintf(port_attrs->model_description,
1020 	    (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1021 	    FCHBA_MODEL_DESCRIPTION_LEN),
1022 	    "%s", qlt->nvram->model_name);
1023 }
1024 
1025 /* ARGSUSED */
1026 fct_status_t
1027 qlt_info(uint32_t cmd, fct_local_port_t *port,
1028     void *arg, uint8_t *buf, uint32_t *bufsizep)
1029 {
1030 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
1031 	mbox_cmd_t	*mcp;
1032 	fct_status_t	ret = FCT_SUCCESS;
1033 	uint8_t		*p;
1034 	fct_port_link_status_t	*link_status;
1035 
1036 	switch (cmd) {
1037 	case FC_TGT_PORT_RLS:
1038 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1039 			EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1040 			    "fct_port_link_status_t=%xh\n", *bufsizep,
1041 			    sizeof (fct_port_link_status_t));
1042 			ret = FCT_FAILURE;
1043 			break;
1044 		}
1045 		/* send mailbox command to get link status */
1046 		mcp = qlt_alloc_mailbox_command(qlt, 156);
1047 		if (mcp == NULL) {
1048 			EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1049 			ret = FCT_ALLOC_FAILURE;
1050 			break;
1051 		}
1052 
1053 		/* GET LINK STATUS count */
1054 		mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1055 		mcp->to_fw[8] = 156/4;
1056 		mcp->to_fw_mask |= BIT_1 | BIT_8;
1057 		mcp->from_fw_mask |= BIT_1 | BIT_2;
1058 
1059 		ret = qlt_mailbox_command(qlt, mcp);
1060 		if (ret != QLT_SUCCESS) {
1061 			EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1062 			qlt_free_mailbox_command(qlt, mcp);
1063 			break;
1064 		}
1065 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1066 
1067 		p = mcp->dbuf->db_sglist[0].seg_addr;
1068 		link_status = (fct_port_link_status_t *)buf;
1069 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1070 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1071 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1072 		link_status->PrimitiveSeqProtocolErrorCount =
1073 		    LE_32(*((uint32_t *)(p + 12)));
1074 		link_status->InvalidTransmissionWordCount =
1075 		    LE_32(*((uint32_t *)(p + 16)));
1076 		link_status->InvalidCRCCount =
1077 		    LE_32(*((uint32_t *)(p + 20)));
1078 
1079 		qlt_free_mailbox_command(qlt, mcp);
1080 		break;
1081 	default:
1082 		EL(qlt, "Unknown cmd=%xh\n", cmd);
1083 		ret = FCT_FAILURE;
1084 		break;
1085 	}
1086 	return (ret);
1087 }
1088 
1089 fct_status_t
1090 qlt_port_start(caddr_t arg)
1091 {
1092 	qlt_state_t *qlt = (qlt_state_t *)arg;
1093 	fct_local_port_t *port;
1094 	fct_dbuf_store_t *fds;
1095 	fct_status_t ret;
1096 
1097 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1098 		return (FCT_FAILURE);
1099 	}
1100 	/* Initialize the ddi_dma_handle free pool */
1101 	qlt_dma_handle_pool_init(qlt);
1102 
1103 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1104 	if (port == NULL) {
1105 		goto qlt_pstart_fail_1;
1106 	}
1107 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1108 	if (fds == NULL) {
1109 		goto qlt_pstart_fail_2;
1110 	}
1111 	qlt->qlt_port = port;
1112 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
1113 	fds->fds_free_data_buf = qlt_dmem_free;
1114 	fds->fds_setup_dbuf = qlt_dma_setup_dbuf;
1115 	fds->fds_teardown_dbuf = qlt_dma_teardown_dbuf;
1116 	fds->fds_max_sgl_xfer_len = QLT_DMA_SG_LIST_LENGTH * MMU_PAGESIZE;
1117 	fds->fds_copy_threshold = MMU_PAGESIZE;
1118 	fds->fds_fca_private = (void *)qlt;
1119 	/*
1120 	 * Since we keep everything in the state struct and dont allocate any
1121 	 * port private area, just use that pointer to point to the
1122 	 * state struct.
1123 	 */
1124 	port->port_fca_private = qlt;
1125 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
1126 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1127 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1128 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1129 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1130 	port->port_default_alias = qlt->qlt_port_alias;
1131 	port->port_pp = qlt_pp;
1132 	port->port_fds = fds;
1133 	port->port_max_logins = QLT_MAX_LOGINS;
1134 	port->port_max_xchges = QLT_MAX_XCHGES;
1135 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1136 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1137 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1138 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1139 	port->port_get_link_info = qlt_get_link_info;
1140 	port->port_register_remote_port = qlt_register_remote_port;
1141 	port->port_deregister_remote_port = qlt_deregister_remote_port;
1142 	port->port_send_cmd = qlt_send_cmd;
1143 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1144 	port->port_send_cmd_response = qlt_send_cmd_response;
1145 	port->port_abort_cmd = qlt_abort_cmd;
1146 	port->port_ctl = qlt_ctl;
1147 	port->port_flogi_xchg = qlt_do_flogi;
1148 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1149 	port->port_info = qlt_info;
1150 	port->port_fca_version = FCT_FCA_MODREV_1;
1151 
1152 	if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1153 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1154 		goto qlt_pstart_fail_2_5;
1155 	}
1156 
1157 	return (QLT_SUCCESS);
1158 #if 0
1159 qlt_pstart_fail_3:
1160 	(void) fct_deregister_local_port(port);
1161 #endif
1162 qlt_pstart_fail_2_5:
1163 	fct_free(fds);
1164 qlt_pstart_fail_2:
1165 	fct_free(port);
1166 	qlt->qlt_port = NULL;
1167 qlt_pstart_fail_1:
1168 	qlt_dma_handle_pool_fini(qlt);
1169 	qlt_dmem_fini(qlt);
1170 	return (QLT_FAILURE);
1171 }
1172 
1173 fct_status_t
1174 qlt_port_stop(caddr_t arg)
1175 {
1176 	qlt_state_t *qlt = (qlt_state_t *)arg;
1177 	fct_status_t ret;
1178 
1179 	if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1180 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1181 		return (QLT_FAILURE);
1182 	}
1183 	fct_free(qlt->qlt_port->port_fds);
1184 	fct_free(qlt->qlt_port);
1185 	qlt->qlt_port = NULL;
1186 	qlt_dma_handle_pool_fini(qlt);
1187 	qlt_dmem_fini(qlt);
1188 	return (QLT_SUCCESS);
1189 }
1190 
1191 /*
1192  * Called by framework to init the HBA.
1193  * Can be called in the middle of I/O. (Why ??)
1194  * Should make sure sane state both before and after the initialization
1195  */
1196 fct_status_t
1197 qlt_port_online(qlt_state_t *qlt)
1198 {
1199 	uint64_t	da;
1200 	int		instance, i;
1201 	fct_status_t	ret;
1202 	uint16_t	rcount;
1203 	caddr_t		icb;
1204 	mbox_cmd_t	*mcp;
1205 	uint8_t		*elsbmp;
1206 
1207 	instance = ddi_get_instance(qlt->dip);
1208 
1209 	/* XXX Make sure a sane state */
1210 
1211 	if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
1212 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1213 		return (ret);
1214 	}
1215 
1216 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1217 
1218 	/* Get resource count */
1219 	REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
1220 	ret = qlt_raw_mailbox_command(qlt);
1221 	rcount = REG_RD16(qlt, REG_MBOX(3));
1222 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1223 	if (ret != QLT_SUCCESS) {
1224 		EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1225 		return (ret);
1226 	}
1227 
1228 	/* Enable PUREX */
1229 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
1230 	REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
1231 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1232 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1233 	ret = qlt_raw_mailbox_command(qlt);
1234 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1235 	if (ret != QLT_SUCCESS) {
1236 		EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1237 		cmn_err(CE_NOTE, "Enable PUREX failed");
1238 		return (ret);
1239 	}
1240 
1241 	/* Pass ELS bitmap to fw */
1242 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
1243 	REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
1244 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1245 	bzero(elsbmp, 32);
1246 	da = qlt->queue_mem_cookie.dmac_laddress;
1247 	da += MBOX_DMA_MEM_OFFSET;
1248 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
1249 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
1250 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
1251 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
1252 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1253 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1254 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1255 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1256 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1257 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1258 	SETELSBIT(elsbmp, ELS_OP_SCN);
1259 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1260 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1261 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1262 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1263 	SETELSBIT(elsbmp, ELS_OP_RNID);
1264 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1265 	    DDI_DMA_SYNC_FORDEV);
1266 	ret = qlt_raw_mailbox_command(qlt);
1267 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1268 	if (ret != QLT_SUCCESS) {
1269 		EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1270 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1271 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1272 		    elsbmp[1]);
1273 		return (ret);
1274 	}
1275 
1276 	/* Init queue pointers */
1277 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1278 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1279 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1280 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1281 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1282 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1283 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1284 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1285 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1286 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1287 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1288 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1289 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1290 
1291 	/*
1292 	 * XXX support for tunables. Also should we cache icb ?
1293 	 */
1294 	if (qlt->qlt_81xx_chip) {
1295 	    /* allocate extra 64 bytes for Extended init control block */
1296 		mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1297 	} else {
1298 		mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1299 	}
1300 	if (mcp == NULL) {
1301 		EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1302 		return (STMF_ALLOC_FAILURE);
1303 	}
1304 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1305 	if (qlt->qlt_81xx_chip) {
1306 		bzero(icb, 0xC0);
1307 	} else {
1308 		bzero(icb, 0x80);
1309 	}
1310 	da = qlt->queue_mem_cookie.dmac_laddress;
1311 	DMEM_WR16(qlt, icb, 1);		/* Version */
1312 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1313 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1314 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1315 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1316 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1317 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1318 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1319 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1320 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1321 	if (!qlt->qlt_81xx_chip) {
1322 		DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1323 	}
1324 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1325 	DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1326 	DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1327 	DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1328 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1329 	DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1330 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1331 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1332 	if (qlt->qlt_81xx_chip) {
1333 		qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1334 
1335 		DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1336 		DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1337 		DMEM_WR32(qlt, icb+0x70,
1338 		    qlt81nvr->enode_mac[0] |
1339 		    (qlt81nvr->enode_mac[1] << 8) |
1340 		    (qlt81nvr->enode_mac[2] << 16) |
1341 		    (qlt81nvr->enode_mac[3] << 24));
1342 		DMEM_WR16(qlt, icb+0x74,
1343 		    qlt81nvr->enode_mac[4] |
1344 		    (qlt81nvr->enode_mac[5] << 8));
1345 		} else {
1346 			DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1347 			    BIT_2 | BIT_1 | BIT_0);
1348 			DMEM_WR32(qlt, icb+0x60, BIT_5);
1349 			DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1350 			    BIT_4);
1351 		}
1352 
1353 	if (qlt->qlt_81xx_chip) {
1354 		qlt_dmem_bctl_t		*bctl;
1355 		uint32_t		index;
1356 		caddr_t			src;
1357 		caddr_t			dst;
1358 		qlt_nvram_81xx_t	*qlt81nvr;
1359 
1360 		dst = icb+0x80;
1361 		qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1362 		src = (caddr_t)&qlt81nvr->ext_blk;
1363 		index = sizeof (qlt_ext_icb_81xx_t);
1364 
1365 		/* Use defaults for cases where we find nothing in NVR */
1366 		if (*src == 0) {
1367 			EL(qlt, "nvram eicb=null\n");
1368 			cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1369 			    instance);
1370 			qlt81nvr->ext_blk.version[0] = 1;
1371 /*
1372  * not yet, for !FIP firmware at least
1373  *
1374  *                qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1375  */
1376 #ifdef _LITTLE_ENDIAN
1377 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1378 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1379 #else
1380 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1381 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1382 #endif
1383 		}
1384 
1385 		while (index--) {
1386 			*dst++ = *src++;
1387 		}
1388 
1389 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1390 		da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1391 
1392 		mcp->to_fw[11] = LSW(LSD(da));
1393 		mcp->to_fw[10] = MSW(LSD(da));
1394 		mcp->to_fw[13] = LSW(MSD(da));
1395 		mcp->to_fw[12] = MSW(MSD(da));
1396 		mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1397 		    0xffff);
1398 
1399 		/* eicb enable */
1400 		mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1401 		mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1402 		    BIT_1;
1403 	}
1404 
1405 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1406 	mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
1407 
1408 	/*
1409 	 * This is the 1st command after adapter initialize which will
1410 	 * use interrupts and regular mailbox interface.
1411 	 */
1412 	qlt->mbox_io_state = MBOX_STATE_READY;
1413 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1414 	/* Issue mailbox to firmware */
1415 	ret = qlt_mailbox_command(qlt, mcp);
1416 	if (ret != QLT_SUCCESS) {
1417 		EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1418 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1419 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1420 	}
1421 
1422 	mcp->to_fw_mask = BIT_0;
1423 	mcp->from_fw_mask = BIT_0 | BIT_1;
1424 	mcp->to_fw[0] = 0x28;
1425 	ret = qlt_mailbox_command(qlt, mcp);
1426 	if (ret != QLT_SUCCESS) {
1427 		EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1428 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1429 		    (long long)ret);
1430 	}
1431 
1432 	/*
1433 	 * Report FW versions for 81xx - MPI rev is useful
1434 	 */
1435 	if (qlt->qlt_81xx_chip) {
1436 		mcp->to_fw_mask = BIT_0;
1437 		mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1438 		    BIT_0;
1439 		mcp->to_fw[0] = 0x8;
1440 		ret = qlt_mailbox_command(qlt, mcp);
1441 		if (ret != QLT_SUCCESS) {
1442 			EL(qlt, "about fw failed: %llx\n", (long long)ret);
1443 		} else {
1444 			EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1445 			    mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1446 			    mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1447 			    mcp->from_fw[11] & 0xff);
1448 		}
1449 	}
1450 
1451 	qlt_free_mailbox_command(qlt, mcp);
1452 
1453 	for (i = 0; i < 5; i++) {
1454 		qlt->qlt_bufref[i] = 0;
1455 	}
1456 	qlt->qlt_bumpbucket = 0;
1457 	qlt->qlt_pmintry = 0;
1458 	qlt->qlt_pmin_ok = 0;
1459 
1460 	if (ret != QLT_SUCCESS)
1461 		return (ret);
1462 	return (FCT_SUCCESS);
1463 }
1464 
1465 fct_status_t
1466 qlt_port_offline(qlt_state_t *qlt)
1467 {
1468 	int		retries;
1469 
1470 	mutex_enter(&qlt->mbox_lock);
1471 
1472 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1473 		mutex_exit(&qlt->mbox_lock);
1474 		goto poff_mbox_done;
1475 	}
1476 
1477 	/* Wait to grab the mailboxes */
1478 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1479 	    retries++) {
1480 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1481 		if ((retries > 5) ||
1482 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1483 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1484 			mutex_exit(&qlt->mbox_lock);
1485 			goto poff_mbox_done;
1486 		}
1487 	}
1488 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1489 	mutex_exit(&qlt->mbox_lock);
1490 poff_mbox_done:;
1491 	qlt->intr_sneak_counter = 10;
1492 	mutex_enter(&qlt->intr_lock);
1493 	(void) qlt_reset_chip(qlt);
1494 	drv_usecwait(20);
1495 	qlt->intr_sneak_counter = 0;
1496 	mutex_exit(&qlt->intr_lock);
1497 
1498 	return (FCT_SUCCESS);
1499 }
1500 
1501 static fct_status_t
1502 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1503 {
1504 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1505 	mbox_cmd_t *mcp;
1506 	fct_status_t fc_ret;
1507 	fct_status_t ret;
1508 	clock_t et;
1509 
1510 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1511 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1512 link_info_retry:
1513 	mcp->to_fw[0] = MBC_GET_ID;
1514 	mcp->to_fw[9] = 0;
1515 	mcp->to_fw_mask |= BIT_0 | BIT_9;
1516 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1517 	/* Issue mailbox to firmware */
1518 	ret = qlt_mailbox_command(qlt, mcp);
1519 	if (ret != QLT_SUCCESS) {
1520 		EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1521 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1522 			/* Firmware is not ready */
1523 			if (ddi_get_lbolt() < et) {
1524 				delay(drv_usectohz(50000));
1525 				goto link_info_retry;
1526 			}
1527 		}
1528 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1529 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1530 		fc_ret = FCT_FAILURE;
1531 	} else {
1532 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1533 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1534 
1535 		li->port_speed = qlt->link_speed;
1536 		switch (mcp->from_fw[6]) {
1537 		case 1:
1538 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1539 			li->port_fca_flogi_done = 1;
1540 			break;
1541 		case 0:
1542 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1543 			li->port_no_fct_flogi = 1;
1544 			break;
1545 		case 3:
1546 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1547 			li->port_fca_flogi_done = 1;
1548 			break;
1549 		case 2: /*FALLTHROUGH*/
1550 		case 4:
1551 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1552 			li->port_fca_flogi_done = 1;
1553 			break;
1554 		default:
1555 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1556 			EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1557 		}
1558 		qlt->cur_topology = li->port_topology;
1559 		fc_ret = FCT_SUCCESS;
1560 	}
1561 	qlt_free_mailbox_command(qlt, mcp);
1562 
1563 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1564 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1565 		mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
1566 		mcp->to_fw[1] = 0x7FE;
1567 		mcp->to_fw[9] = 0;
1568 		mcp->to_fw[10] = 0;
1569 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1570 		fc_ret = qlt_mailbox_command(qlt, mcp);
1571 		if (fc_ret != QLT_SUCCESS) {
1572 			EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1573 			    fc_ret);
1574 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1575 			    "database for F_port failed, ret = %llx", fc_ret);
1576 		} else {
1577 			uint8_t *p;
1578 
1579 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1580 			p = mcp->dbuf->db_sglist[0].seg_addr;
1581 			bcopy(p + 0x18, li->port_rpwwn, 8);
1582 			bcopy(p + 0x20, li->port_rnwwn, 8);
1583 		}
1584 		qlt_free_mailbox_command(qlt, mcp);
1585 	}
1586 	return (fc_ret);
1587 }
1588 
1589 static int
1590 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1591 {
1592 	int		instance;
1593 	qlt_state_t	*qlt;
1594 
1595 	if (otype != OTYP_CHR) {
1596 		return (EINVAL);
1597 	}
1598 
1599 	/*
1600 	 * Since this is for debugging only, only allow root to issue ioctl now
1601 	 */
1602 	if (drv_priv(credp)) {
1603 		return (EPERM);
1604 	}
1605 
1606 	instance = (int)getminor(*devp);
1607 	qlt = ddi_get_soft_state(qlt_state, instance);
1608 	if (qlt == NULL) {
1609 		return (ENXIO);
1610 	}
1611 
1612 	mutex_enter(&qlt->qlt_ioctl_lock);
1613 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1614 		/*
1615 		 * It is already open for exclusive access.
1616 		 * So shut the door on this caller.
1617 		 */
1618 		mutex_exit(&qlt->qlt_ioctl_lock);
1619 		return (EBUSY);
1620 	}
1621 
1622 	if (flag & FEXCL) {
1623 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1624 			/*
1625 			 * Exclusive operation not possible
1626 			 * as it is already opened
1627 			 */
1628 			mutex_exit(&qlt->qlt_ioctl_lock);
1629 			return (EBUSY);
1630 		}
1631 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1632 	}
1633 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1634 	mutex_exit(&qlt->qlt_ioctl_lock);
1635 
1636 	return (0);
1637 }
1638 
1639 /* ARGSUSED */
1640 static int
1641 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1642 {
1643 	int		instance;
1644 	qlt_state_t	*qlt;
1645 
1646 	if (otype != OTYP_CHR) {
1647 		return (EINVAL);
1648 	}
1649 
1650 	instance = (int)getminor(dev);
1651 	qlt = ddi_get_soft_state(qlt_state, instance);
1652 	if (qlt == NULL) {
1653 		return (ENXIO);
1654 	}
1655 
1656 	mutex_enter(&qlt->qlt_ioctl_lock);
1657 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1658 		mutex_exit(&qlt->qlt_ioctl_lock);
1659 		return (ENODEV);
1660 	}
1661 
1662 	/*
1663 	 * It looks there's one hole here, maybe there could several concurrent
1664 	 * shareed open session, but we never check this case.
1665 	 * But it will not hurt too much, disregard it now.
1666 	 */
1667 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1668 	mutex_exit(&qlt->qlt_ioctl_lock);
1669 
1670 	return (0);
1671 }
1672 
1673 /*
1674  * All of these ioctls are unstable interfaces which are meant to be used
1675  * in a controlled lab env. No formal testing will be (or needs to be) done
1676  * for these ioctls. Specially note that running with an additional
1677  * uploaded firmware is not supported and is provided here for test
1678  * purposes only.
1679  */
1680 /* ARGSUSED */
1681 static int
1682 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1683     cred_t *credp, int *rval)
1684 {
1685 	qlt_state_t	*qlt;
1686 	int		ret = 0;
1687 #ifdef _LITTLE_ENDIAN
1688 	int		i;
1689 #endif
1690 	stmf_iocdata_t	*iocd;
1691 	void		*ibuf = NULL;
1692 	void		*obuf = NULL;
1693 	uint32_t	*intp;
1694 	qlt_fw_info_t	*fwi;
1695 	mbox_cmd_t	*mcp;
1696 	fct_status_t	st;
1697 	char		info[80];
1698 	fct_status_t	ret2;
1699 
1700 	if (drv_priv(credp) != 0)
1701 		return (EPERM);
1702 
1703 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1704 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1705 	if (ret)
1706 		return (ret);
1707 	iocd->stmf_error = 0;
1708 
1709 	switch (cmd) {
1710 	case QLT_IOCTL_FETCH_FWDUMP:
1711 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1712 			EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1713 			    iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1714 			ret = EINVAL;
1715 			break;
1716 		}
1717 		mutex_enter(&qlt->qlt_ioctl_lock);
1718 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1719 			mutex_exit(&qlt->qlt_ioctl_lock);
1720 			ret = ENODATA;
1721 			EL(qlt, "no fwdump\n");
1722 			iocd->stmf_error = QLTIO_NO_DUMP;
1723 			break;
1724 		}
1725 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1726 			mutex_exit(&qlt->qlt_ioctl_lock);
1727 			ret = EBUSY;
1728 			EL(qlt, "fwdump inprogress\n");
1729 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1730 			break;
1731 		}
1732 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1733 			mutex_exit(&qlt->qlt_ioctl_lock);
1734 			ret = EEXIST;
1735 			EL(qlt, "fwdump already fetched\n");
1736 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1737 			break;
1738 		}
1739 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1740 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1741 		mutex_exit(&qlt->qlt_ioctl_lock);
1742 
1743 		break;
1744 
1745 	case QLT_IOCTL_TRIGGER_FWDUMP:
1746 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1747 			ret = EACCES;
1748 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1749 			break;
1750 		}
1751 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1752 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1753 		info[79] = 0;
1754 		if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1755 		    STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1756 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1757 			EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1758 			    "%llxh\n", ret2);
1759 			ret = EIO;
1760 		}
1761 		break;
1762 	case QLT_IOCTL_UPLOAD_FW:
1763 		if ((iocd->stmf_ibuf_size < 1024) ||
1764 		    (iocd->stmf_ibuf_size & 3)) {
1765 			EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1766 			    iocd->stmf_ibuf_size);
1767 			ret = EINVAL;
1768 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1769 			break;
1770 		}
1771 		intp = (uint32_t *)ibuf;
1772 #ifdef _LITTLE_ENDIAN
1773 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1774 			intp[i] = BSWAP_32(intp[i]);
1775 		}
1776 #endif
1777 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1778 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1779 		    iocd->stmf_ibuf_size)) {
1780 			EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1781 			    iocd->stmf_ibuf_size);
1782 			ret = EINVAL;
1783 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1784 			break;
1785 		}
1786 		if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1787 		    (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1788 		    (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1789 		    ((intp[8] & 3) == 0))) {
1790 			EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1791 			ret = EACCES;
1792 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1793 			break;
1794 		}
1795 
1796 		/* Everything looks ok, lets copy this firmware */
1797 		if (qlt->fw_code01) {
1798 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1799 			    qlt->fw_length02) << 2);
1800 			qlt->fw_code01 = NULL;
1801 		} else {
1802 			atomic_add_32(&qlt_loaded_counter, 1);
1803 		}
1804 		qlt->fw_length01 = intp[3];
1805 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1806 		    KM_SLEEP);
1807 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1808 		qlt->fw_addr01 = intp[2];
1809 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1810 		qlt->fw_addr02 = qlt->fw_code02[2];
1811 		qlt->fw_length02 = qlt->fw_code02[3];
1812 		break;
1813 
1814 	case QLT_IOCTL_CLEAR_FW:
1815 		if (qlt->fw_code01) {
1816 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1817 			    qlt->fw_length02) << 2);
1818 			qlt->fw_code01 = NULL;
1819 			atomic_add_32(&qlt_loaded_counter, -1);
1820 		}
1821 		break;
1822 
1823 	case QLT_IOCTL_GET_FW_INFO:
1824 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1825 			EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1826 			    iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1827 			ret = EINVAL;
1828 			break;
1829 		}
1830 		fwi = (qlt_fw_info_t *)obuf;
1831 		if (qlt->qlt_stay_offline) {
1832 			fwi->fwi_stay_offline = 1;
1833 		}
1834 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1835 			fwi->fwi_port_active = 1;
1836 		}
1837 		fwi->fwi_active_major = qlt->fw_major;
1838 		fwi->fwi_active_minor = qlt->fw_minor;
1839 		fwi->fwi_active_subminor = qlt->fw_subminor;
1840 		fwi->fwi_active_attr = qlt->fw_attr;
1841 		if (qlt->fw_code01) {
1842 			fwi->fwi_fw_uploaded = 1;
1843 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1844 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1845 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1846 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1847 		}
1848 		if (qlt->qlt_81xx_chip) {
1849 			fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1850 			fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1851 			fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1852 			fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1853 		} else if (qlt->qlt_25xx_chip) {
1854 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1855 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1856 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1857 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1858 		} else {
1859 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1860 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1861 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1862 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1863 		}
1864 		break;
1865 
1866 	case QLT_IOCTL_STAY_OFFLINE:
1867 		if (!iocd->stmf_ibuf_size) {
1868 			EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1869 			    iocd->stmf_ibuf_size);
1870 			ret = EINVAL;
1871 			break;
1872 		}
1873 		if (*((char *)ibuf)) {
1874 			qlt->qlt_stay_offline = 1;
1875 		} else {
1876 			qlt->qlt_stay_offline = 0;
1877 		}
1878 		break;
1879 
1880 	case QLT_IOCTL_MBOX:
1881 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1882 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1883 			EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1884 			    iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1885 			ret = EINVAL;
1886 			break;
1887 		}
1888 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1889 		if (mcp == NULL) {
1890 			EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1891 			ret = ENOMEM;
1892 			break;
1893 		}
1894 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1895 		st = qlt_mailbox_command(qlt, mcp);
1896 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1897 		qlt_free_mailbox_command(qlt, mcp);
1898 		if (st != QLT_SUCCESS) {
1899 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1900 				st = QLT_SUCCESS;
1901 		}
1902 		if (st != QLT_SUCCESS) {
1903 			EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1904 			ret = EIO;
1905 			switch (st) {
1906 			case QLT_MBOX_NOT_INITIALIZED:
1907 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1908 				break;
1909 			case QLT_MBOX_BUSY:
1910 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1911 				break;
1912 			case QLT_MBOX_TIMEOUT:
1913 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1914 				break;
1915 			case QLT_MBOX_ABORTED:
1916 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1917 				break;
1918 			}
1919 		}
1920 		break;
1921 
1922 	case QLT_IOCTL_ELOG:
1923 		qlt_dump_el_trace_buffer(qlt);
1924 		break;
1925 
1926 	default:
1927 		EL(qlt, "Unknown ioctl-%xh\n", cmd);
1928 		ret = ENOTTY;
1929 	}
1930 
1931 	if (ret == 0) {
1932 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1933 	} else if (iocd->stmf_error) {
1934 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1935 	}
1936 	if (obuf) {
1937 		kmem_free(obuf, iocd->stmf_obuf_size);
1938 		obuf = NULL;
1939 	}
1940 	if (ibuf) {
1941 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1942 		ibuf = NULL;
1943 	}
1944 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1945 	return (ret);
1946 }
1947 
1948 static fct_status_t
1949 qlt_force_lip(qlt_state_t *qlt)
1950 {
1951 	mbox_cmd_t	*mcp;
1952 	fct_status_t	 rval;
1953 
1954 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1955 	mcp->to_fw[0] = 0x0072;
1956 	mcp->to_fw[1] = BIT_4;
1957 	mcp->to_fw[3] = 1;
1958 	mcp->to_fw_mask |= BIT_1 | BIT_3;
1959 	rval = qlt_mailbox_command(qlt, mcp);
1960 	if (rval != FCT_SUCCESS) {
1961 		EL(qlt, "qlt force lip MB failed: rval=%x", rval);
1962 	} else {
1963 		if (mcp->from_fw[0] != 0x4000) {
1964 			QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
1965 			    mcp->from_fw[0]);
1966 			rval = FCT_FAILURE;
1967 		}
1968 	}
1969 	qlt_free_mailbox_command(qlt, mcp);
1970 	return (rval);
1971 }
1972 
1973 static void
1974 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1975 {
1976 	stmf_change_status_t		st;
1977 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1978 	qlt_state_t			*qlt;
1979 	fct_status_t			ret;
1980 
1981 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1982 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1983 	    (cmd == FCT_CMD_FORCE_LIP) ||
1984 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1985 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1986 
1987 	qlt = (qlt_state_t *)port->port_fca_private;
1988 	st.st_completion_status = FCT_SUCCESS;
1989 	st.st_additional_info = NULL;
1990 
1991 	switch (cmd) {
1992 	case FCT_CMD_PORT_ONLINE:
1993 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1994 			st.st_completion_status = STMF_ALREADY;
1995 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1996 			st.st_completion_status = FCT_FAILURE;
1997 		if (st.st_completion_status == FCT_SUCCESS) {
1998 			qlt->qlt_state = FCT_STATE_ONLINING;
1999 			qlt->qlt_state_not_acked = 1;
2000 			st.st_completion_status = qlt_port_online(qlt);
2001 			if (st.st_completion_status != STMF_SUCCESS) {
2002 				EL(qlt, "PORT_ONLINE status=%xh\n",
2003 				    st.st_completion_status);
2004 				qlt->qlt_state = FCT_STATE_OFFLINE;
2005 				qlt->qlt_state_not_acked = 0;
2006 			} else {
2007 				qlt->qlt_state = FCT_STATE_ONLINE;
2008 			}
2009 		}
2010 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
2011 		qlt->qlt_change_state_flags = 0;
2012 		break;
2013 
2014 	case FCT_CMD_PORT_OFFLINE:
2015 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
2016 			st.st_completion_status = STMF_ALREADY;
2017 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
2018 			st.st_completion_status = FCT_FAILURE;
2019 		}
2020 		if (st.st_completion_status == FCT_SUCCESS) {
2021 			qlt->qlt_state = FCT_STATE_OFFLINING;
2022 			qlt->qlt_state_not_acked = 1;
2023 
2024 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
2025 				(void) qlt_firmware_dump(port, ssci);
2026 			}
2027 			qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
2028 			st.st_completion_status = qlt_port_offline(qlt);
2029 			if (st.st_completion_status != STMF_SUCCESS) {
2030 				EL(qlt, "PORT_OFFLINE status=%xh\n",
2031 				    st.st_completion_status);
2032 				qlt->qlt_state = FCT_STATE_ONLINE;
2033 				qlt->qlt_state_not_acked = 0;
2034 			} else {
2035 				qlt->qlt_state = FCT_STATE_OFFLINE;
2036 			}
2037 		}
2038 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
2039 		break;
2040 
2041 	case FCT_ACK_PORT_ONLINE_COMPLETE:
2042 		qlt->qlt_state_not_acked = 0;
2043 		break;
2044 
2045 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
2046 		qlt->qlt_state_not_acked = 0;
2047 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
2048 		    (qlt->qlt_stay_offline == 0)) {
2049 			if ((ret = fct_port_initialize(port,
2050 			    qlt->qlt_change_state_flags,
2051 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
2052 			    "with RLFLAG_RESET")) != FCT_SUCCESS) {
2053 				EL(qlt, "fct_port_initialize status=%llxh\n",
2054 				    ret);
2055 				cmn_err(CE_WARN, "qlt_ctl: "
2056 				    "fct_port_initialize failed, please use "
2057 				    "stmfstate to start the port-%s manualy",
2058 				    qlt->qlt_port_alias);
2059 			}
2060 		}
2061 		break;
2062 
2063 	case FCT_CMD_FORCE_LIP:
2064 		if (qlt->qlt_81xx_chip) {
2065 			EL(qlt, "force lip is an unsupported command "
2066 			    "for this adapter type\n");
2067 		} else {
2068 			*((fct_status_t *)arg) = qlt_force_lip(qlt);
2069 			EL(qlt, "forcelip done\n");
2070 		}
2071 		break;
2072 
2073 	default:
2074 		EL(qlt, "unsupport cmd - 0x%02X", cmd);
2075 		break;
2076 	}
2077 }
2078 
2079 /* ARGSUSED */
2080 static fct_status_t
2081 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2082 {
2083 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
2084 
2085 	EL(qlt, "FLOGI requested not supported\n");
2086 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2087 	return (FCT_FAILURE);
2088 }
2089 
2090 /*
2091  * Return a pointer to n entries in the request queue. Assumes that
2092  * request queue lock is held. Does a very short busy wait if
2093  * less/zero entries are available. Retuns NULL if it still cannot
2094  * fullfill the request.
2095  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2096  */
2097 caddr_t
2098 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
2099 {
2100 	int try = 0;
2101 
2102 	while (qlt->req_available < n) {
2103 		uint32_t val1, val2, val3;
2104 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2105 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2106 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2107 		if ((val1 != val2) || (val2 != val3))
2108 			continue;
2109 
2110 		qlt->req_ndx_from_fw = val1;
2111 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2112 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2113 		    (REQUEST_QUEUE_ENTRIES - 1));
2114 		if (qlt->req_available < n) {
2115 			if (try < 2) {
2116 				drv_usecwait(100);
2117 				try++;
2118 				continue;
2119 			} else {
2120 				stmf_trace(qlt->qlt_port_alias,
2121 				    "Req Q is full");
2122 				return (NULL);
2123 			}
2124 		}
2125 		break;
2126 	}
2127 	/* We dont change anything until the entries are sumitted */
2128 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2129 }
2130 
2131 /*
2132  * updates the req in ptr to fw. Assumes that req lock is held.
2133  */
2134 void
2135 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2136 {
2137 	ASSERT(n >= 1);
2138 	qlt->req_ndx_to_fw += n;
2139 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2140 	qlt->req_available -= n;
2141 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2142 }
2143 
2144 
2145 /*
2146  * Return a pointer to n entries in the priority request queue. Assumes that
2147  * priority request queue lock is held. Does a very short busy wait if
2148  * less/zero entries are available. Retuns NULL if it still cannot
2149  * fullfill the request.
2150  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2151  */
2152 caddr_t
2153 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2154 {
2155 	int try = 0;
2156 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2157 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2158 	    (PRIORITY_QUEUE_ENTRIES - 1));
2159 
2160 	while (req_available < n) {
2161 		uint32_t val1, val2, val3;
2162 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2163 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2164 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2165 		if ((val1 != val2) || (val2 != val3))
2166 			continue;
2167 
2168 		qlt->preq_ndx_from_fw = val1;
2169 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2170 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2171 		    (PRIORITY_QUEUE_ENTRIES - 1));
2172 		if (req_available < n) {
2173 			if (try < 2) {
2174 				drv_usecwait(100);
2175 				try++;
2176 				continue;
2177 			} else {
2178 				return (NULL);
2179 			}
2180 		}
2181 		break;
2182 	}
2183 	/* We dont change anything until the entries are sumitted */
2184 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2185 }
2186 
2187 /*
2188  * updates the req in ptr to fw. Assumes that req lock is held.
2189  */
2190 void
2191 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2192 {
2193 	ASSERT(n >= 1);
2194 	qlt->preq_ndx_to_fw += n;
2195 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2196 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2197 }
2198 
2199 /*
2200  * - Should not be called from Interrupt.
2201  * - A very hardware specific function. Does not touch driver state.
2202  * - Assumes that interrupts are disabled or not there.
2203  * - Expects that the caller makes sure that all activity has stopped
2204  *   and its ok now to go ahead and reset the chip. Also the caller
2205  *   takes care of post reset damage control.
2206  * - called by initialize adapter() and dump_fw(for reset only).
2207  * - During attach() nothing much is happening and during initialize_adapter()
2208  *   the function (caller) does all the housekeeping so that this function
2209  *   can execute in peace.
2210  * - Returns 0 on success.
2211  */
2212 static fct_status_t
2213 qlt_reset_chip(qlt_state_t *qlt)
2214 {
2215 	int cntr;
2216 
2217 	EL(qlt, "initiated\n");
2218 
2219 	/* XXX: Switch off LEDs */
2220 
2221 	/* Disable Interrupts */
2222 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2223 	(void) REG_RD32(qlt, REG_INTR_CTRL);
2224 	/* Stop DMA */
2225 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2226 
2227 	/* Wait for DMA to be stopped */
2228 	cntr = 0;
2229 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2230 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2231 		cntr++;
2232 		/* 3 sec should be more than enough */
2233 		if (cntr == 300)
2234 			return (QLT_DMA_STUCK);
2235 	}
2236 
2237 	/* Reset the Chip */
2238 	REG_WR32(qlt, REG_CTRL_STATUS,
2239 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2240 
2241 	qlt->qlt_link_up = 0;
2242 
2243 	drv_usecwait(100);
2244 
2245 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2246 	cntr = 0;
2247 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2248 		delay(drv_usectohz(10000));
2249 		cntr++;
2250 		/* 3 sec should be more than enough */
2251 		if (cntr == 300)
2252 			return (QLT_ROM_STUCK);
2253 	}
2254 	/* Disable Interrupts (Probably not needed) */
2255 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2256 
2257 	return (QLT_SUCCESS);
2258 }
2259 /*
2260  * - Should not be called from Interrupt.
2261  * - A very hardware specific function. Does not touch driver state.
2262  * - Assumes that interrupts are disabled or not there.
2263  * - Expects that the caller makes sure that all activity has stopped
2264  *   and its ok now to go ahead and reset the chip. Also the caller
2265  *   takes care of post reset damage control.
2266  * - called by initialize adapter() and dump_fw(for reset only).
2267  * - During attach() nothing much is happening and during initialize_adapter()
2268  *   the function (caller) does all the housekeeping so that this function
2269  *   can execute in peace.
2270  * - Returns 0 on success.
2271  */
2272 static fct_status_t
2273 qlt_download_fw(qlt_state_t *qlt)
2274 {
2275 	uint32_t start_addr;
2276 	fct_status_t ret;
2277 
2278 	EL(qlt, "initiated\n");
2279 
2280 	(void) qlt_reset_chip(qlt);
2281 
2282 	if (qlt->qlt_81xx_chip) {
2283 		qlt_mps_reset(qlt);
2284 	}
2285 
2286 	/* Load the two segments */
2287 	if (qlt->fw_code01 != NULL) {
2288 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2289 		    qlt->fw_addr01);
2290 		if (ret == QLT_SUCCESS) {
2291 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2292 			    qlt->fw_length02, qlt->fw_addr02);
2293 		}
2294 		start_addr = qlt->fw_addr01;
2295 	} else if (qlt->qlt_81xx_chip) {
2296 		ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2297 		    fw8100_addr01);
2298 		if (ret == QLT_SUCCESS) {
2299 			ret = qlt_load_risc_ram(qlt, fw8100_code02,
2300 			    fw8100_length02, fw8100_addr02);
2301 		}
2302 		start_addr = fw8100_addr01;
2303 	} else if (qlt->qlt_25xx_chip) {
2304 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2305 		    fw2500_addr01);
2306 		if (ret == QLT_SUCCESS) {
2307 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
2308 			    fw2500_length02, fw2500_addr02);
2309 		}
2310 		start_addr = fw2500_addr01;
2311 	} else {
2312 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2313 		    fw2400_addr01);
2314 		if (ret == QLT_SUCCESS) {
2315 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
2316 			    fw2400_length02, fw2400_addr02);
2317 		}
2318 		start_addr = fw2400_addr01;
2319 	}
2320 	if (ret != QLT_SUCCESS) {
2321 		EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2322 		return (ret);
2323 	}
2324 
2325 	/* Verify Checksum */
2326 	REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
2327 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2328 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2329 	ret = qlt_raw_mailbox_command(qlt);
2330 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2331 	if (ret != QLT_SUCCESS) {
2332 		EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2333 		return (ret);
2334 	}
2335 
2336 	/* Execute firmware */
2337 	REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
2338 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2339 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2340 	REG_WR16(qlt, REG_MBOX(3), 0);
2341 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
2342 	ret = qlt_raw_mailbox_command(qlt);
2343 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2344 	if (ret != QLT_SUCCESS) {
2345 		EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2346 		return (ret);
2347 	}
2348 
2349 	/* Get revisions (About Firmware) */
2350 	REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
2351 	ret = qlt_raw_mailbox_command(qlt);
2352 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2353 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2354 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2355 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2356 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2357 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2358 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2359 	if (ret != QLT_SUCCESS) {
2360 		EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2361 		return (ret);
2362 	}
2363 
2364 	return (QLT_SUCCESS);
2365 }
2366 
2367 /*
2368  * Used only from qlt_download_fw().
2369  */
2370 static fct_status_t
2371 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2372 				uint32_t word_count, uint32_t risc_addr)
2373 {
2374 	uint32_t words_sent = 0;
2375 	uint32_t words_being_sent;
2376 	uint32_t *cur_host_addr;
2377 	uint32_t cur_risc_addr;
2378 	uint64_t da;
2379 	fct_status_t ret;
2380 
2381 	while (words_sent < word_count) {
2382 		cur_host_addr = &(host_addr[words_sent]);
2383 		cur_risc_addr = risc_addr + (words_sent << 2);
2384 		words_being_sent = min(word_count - words_sent,
2385 		    TOTAL_DMA_MEM_SIZE >> 2);
2386 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2387 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2388 		    DDI_DEV_AUTOINCR);
2389 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2390 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2391 		da = qlt->queue_mem_cookie.dmac_laddress;
2392 		REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
2393 		REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
2394 		REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
2395 		REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2396 		REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2397 		REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2398 		REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2399 		REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
2400 		REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
2401 		ret = qlt_raw_mailbox_command(qlt);
2402 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2403 		if (ret != QLT_SUCCESS) {
2404 			EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2405 			    ret);
2406 			return (ret);
2407 		}
2408 		words_sent += words_being_sent;
2409 	}
2410 	return (QLT_SUCCESS);
2411 }
2412 
2413 /*
2414  * Not used during normal operation. Only during driver init.
2415  * Assumes that interrupts are disabled and mailboxes are loaded.
2416  * Just triggers the mailbox command an waits for the completion.
2417  * Also expects that There is nothing else going on and we will only
2418  * get back a mailbox completion from firmware.
2419  * ---DOES NOT CLEAR INTERRUPT---
2420  * Used only from the code path originating from
2421  * qlt_reset_chip_and_download_fw()
2422  */
2423 static fct_status_t
2424 qlt_raw_mailbox_command(qlt_state_t *qlt)
2425 {
2426 	int cntr = 0;
2427 	uint32_t status;
2428 
2429 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2430 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
2431 		cntr++;
2432 		if (cntr == 100) {
2433 			return (QLT_MAILBOX_STUCK);
2434 		}
2435 		delay(drv_usectohz(10000));
2436 	}
2437 	status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
2438 
2439 	if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
2440 	    (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
2441 	    (status == MBX_CMD_SUCCESSFUL) ||
2442 	    (status == MBX_CMD_NOT_SUCCESSFUL)) {
2443 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2444 		if (mbox0 == QLT_MBX_CMD_SUCCESS) {
2445 			return (QLT_SUCCESS);
2446 		} else {
2447 			return (QLT_MBOX_FAILED | mbox0);
2448 		}
2449 	}
2450 	/* This is unexpected, dump a message */
2451 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2452 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
2453 	return (QLT_UNEXPECTED_RESPONSE);
2454 }
2455 
2456 static mbox_cmd_t *
2457 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2458 {
2459 	mbox_cmd_t *mcp;
2460 
2461 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2462 	if (dma_size) {
2463 		qlt_dmem_bctl_t *bctl;
2464 		uint64_t da;
2465 
2466 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2467 		if (mcp->dbuf == NULL) {
2468 			kmem_free(mcp, sizeof (*mcp));
2469 			return (NULL);
2470 		}
2471 		mcp->dbuf->db_data_size = dma_size;
2472 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2473 
2474 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2475 		da = bctl->bctl_dev_addr;
2476 		/* This is the most common initialization of dma ptrs */
2477 		mcp->to_fw[3] = LSW(LSD(da));
2478 		mcp->to_fw[2] = MSW(LSD(da));
2479 		mcp->to_fw[7] = LSW(MSD(da));
2480 		mcp->to_fw[6] = MSW(MSD(da));
2481 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2482 	}
2483 	mcp->to_fw_mask |= BIT_0;
2484 	mcp->from_fw_mask |= BIT_0;
2485 	return (mcp);
2486 }
2487 
2488 void
2489 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2490 {
2491 	if (mcp->dbuf)
2492 		qlt_i_dmem_free(qlt, mcp->dbuf);
2493 	kmem_free(mcp, sizeof (*mcp));
2494 }
2495 
2496 /*
2497  * This can sleep. Should never be called from interrupt context.
2498  */
2499 static fct_status_t
2500 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2501 {
2502 	int	retries;
2503 	int	i;
2504 	char	info[80];
2505 
2506 	if (curthread->t_flag & T_INTR_THREAD) {
2507 		ASSERT(0);
2508 		return (QLT_MBOX_FAILED);
2509 	}
2510 
2511 	mutex_enter(&qlt->mbox_lock);
2512 	/* See if mailboxes are still uninitialized */
2513 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2514 		mutex_exit(&qlt->mbox_lock);
2515 		return (QLT_MBOX_NOT_INITIALIZED);
2516 	}
2517 
2518 	/* Wait to grab the mailboxes */
2519 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2520 	    retries++) {
2521 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2522 		if ((retries > 5) ||
2523 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2524 			mutex_exit(&qlt->mbox_lock);
2525 			return (QLT_MBOX_BUSY);
2526 		}
2527 	}
2528 	/* Make sure we always ask for mailbox 0 */
2529 	mcp->from_fw_mask |= BIT_0;
2530 
2531 	/* Load mailboxes, set state and generate RISC interrupt */
2532 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2533 	qlt->mcp = mcp;
2534 	for (i = 0; i < MAX_MBOXES; i++) {
2535 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2536 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2537 	}
2538 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2539 
2540 qlt_mbox_wait_loop:;
2541 	/* Wait for mailbox command completion */
2542 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2543 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2544 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2545 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2546 		info[79] = 0;
2547 		qlt->mcp = NULL;
2548 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2549 		mutex_exit(&qlt->mbox_lock);
2550 
2551 		/*
2552 		 * XXX Throw HBA fatal error event
2553 		 */
2554 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2555 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2556 		return (QLT_MBOX_TIMEOUT);
2557 	}
2558 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2559 		goto qlt_mbox_wait_loop;
2560 
2561 	qlt->mcp = NULL;
2562 
2563 	/* Make sure its a completion */
2564 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2565 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2566 		mutex_exit(&qlt->mbox_lock);
2567 		return (QLT_MBOX_ABORTED);
2568 	}
2569 
2570 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2571 	/* Mailboxes are already loaded by interrupt routine */
2572 	qlt->mbox_io_state = MBOX_STATE_READY;
2573 	mutex_exit(&qlt->mbox_lock);
2574 	if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS)
2575 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2576 
2577 	return (QLT_SUCCESS);
2578 }
2579 
2580 /*
2581  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2582  */
2583 /* ARGSUSED */
2584 static uint_t
2585 qlt_isr(caddr_t arg, caddr_t arg2)
2586 {
2587 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2588 	uint32_t	risc_status, intr_type;
2589 	int		i;
2590 	int		intr_loop_count;
2591 	char		info[80];
2592 
2593 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2594 	if (!mutex_tryenter(&qlt->intr_lock)) {
2595 		/*
2596 		 * Normally we will always get this lock. If tryenter is
2597 		 * failing then it means that driver is trying to do
2598 		 * some cleanup and is masking the intr but some intr
2599 		 * has sneaked in between. See if our device has generated
2600 		 * this intr. If so then wait a bit and return claimed.
2601 		 * If not then return claimed if this is the 1st instance
2602 		 * of a interrupt after driver has grabbed the lock.
2603 		 */
2604 		if (risc_status & BIT_15) {
2605 			drv_usecwait(10);
2606 			return (DDI_INTR_CLAIMED);
2607 		} else if (qlt->intr_sneak_counter) {
2608 			qlt->intr_sneak_counter--;
2609 			return (DDI_INTR_CLAIMED);
2610 		} else {
2611 			return (DDI_INTR_UNCLAIMED);
2612 		}
2613 	}
2614 	if (((risc_status & BIT_15) == 0) ||
2615 	    (qlt->qlt_intr_enabled == 0)) {
2616 		/*
2617 		 * This might be a pure coincedence that we are operating
2618 		 * in a interrupt disabled mode and another device
2619 		 * sharing the interrupt line has generated an interrupt
2620 		 * while an interrupt from our device might be pending. Just
2621 		 * ignore it and let the code handling the interrupt
2622 		 * disabled mode handle it.
2623 		 */
2624 		mutex_exit(&qlt->intr_lock);
2625 		return (DDI_INTR_UNCLAIMED);
2626 	}
2627 
2628 	/*
2629 	 * XXX take care for MSI case. disable intrs
2630 	 * Its gonna be complicated because of the max iterations.
2631 	 * as hba will have posted the intr which did not go on PCI
2632 	 * but we did not service it either because of max iterations.
2633 	 * Maybe offload the intr on a different thread.
2634 	 */
2635 	intr_loop_count = 0;
2636 
2637 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2638 
2639 intr_again:;
2640 
2641 	/* check for risc pause */
2642 	if (risc_status & BIT_8) {
2643 		EL(qlt, "Risc Pause status=%xh\n", risc_status);
2644 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2645 		    qlt->instance, risc_status);
2646 		(void) snprintf(info, 80, "Risc Pause %08x", risc_status);
2647 		info[79] = 0;
2648 		(void) fct_port_shutdown(qlt->qlt_port,
2649 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2650 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2651 	}
2652 
2653 	/* First check for high performance path */
2654 	intr_type = risc_status & 0xff;
2655 	if (intr_type == 0x1D) {
2656 		qlt->atio_ndx_from_fw = (uint16_t)
2657 		    REG_RD32(qlt, REG_ATIO_IN_PTR);
2658 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2659 		qlt->resp_ndx_from_fw = risc_status >> 16;
2660 		qlt_handle_atio_queue_update(qlt);
2661 		qlt_handle_resp_queue_update(qlt);
2662 	} else if (intr_type == 0x1C) {
2663 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2664 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2665 		qlt_handle_atio_queue_update(qlt);
2666 	} else if (intr_type == 0x13) {
2667 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2668 		qlt->resp_ndx_from_fw = risc_status >> 16;
2669 		qlt_handle_resp_queue_update(qlt);
2670 	} else if (intr_type == 0x12) {
2671 		uint16_t code = (uint16_t)(risc_status >> 16);
2672 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2673 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2674 		uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2675 		uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2676 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2677 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2678 
2679 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2680 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2681 		    " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2682 		    mbox5, mbox6);
2683 		EL(qlt, "Async event %x mb1=%x mb2=%x, mb3=%x, mb5=%x, mb6=%x",
2684 		    code, mbox1, mbox2, mbox3, mbox5, mbox6);
2685 
2686 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2687 			if (qlt->qlt_link_up) {
2688 				fct_handle_event(qlt->qlt_port,
2689 				    FCT_EVENT_LINK_RESET, 0, 0);
2690 			}
2691 		} else if (code == 0x8012) {
2692 			qlt->qlt_link_up = 0;
2693 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2694 			    0, 0);
2695 		} else if (code == 0x8011) {
2696 			switch (mbox1) {
2697 			case 0: qlt->link_speed = PORT_SPEED_1G;
2698 				break;
2699 			case 1: qlt->link_speed = PORT_SPEED_2G;
2700 				break;
2701 			case 3: qlt->link_speed = PORT_SPEED_4G;
2702 				break;
2703 			case 4: qlt->link_speed = PORT_SPEED_8G;
2704 				break;
2705 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
2706 				break;
2707 			default:
2708 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2709 			}
2710 			qlt->qlt_link_up = 1;
2711 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2712 			    0, 0);
2713 		} else if ((code == 0x8002) || (code == 0x8003) ||
2714 		    (code == 0x8004) || (code == 0x8005)) {
2715 			(void) snprintf(info, 80,
2716 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2717 			    code, mbox1, mbox2, mbox5, mbox6);
2718 			info[79] = 0;
2719 			(void) fct_port_shutdown(qlt->qlt_port,
2720 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2721 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2722 		} else if (code == 0x800F) {
2723 			(void) snprintf(info, 80,
2724 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
2725 			    mbox1, mbox2, mbox3);
2726 
2727 			if (mbox1 != 1) {
2728 				/* issue "verify fw" */
2729 				qlt_verify_fw(qlt);
2730 			}
2731 		} else if (code == 0x8101) {
2732 			(void) snprintf(info, 80,
2733 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2734 			    code, mbox1, mbox2, mbox3);
2735 			info[79] = 0;
2736 
2737 			/* check if "ACK" is required (timeout != 0) */
2738 			if (mbox1 & 0x0f00) {
2739 				caddr_t	req;
2740 
2741 				/*
2742 				 * Ack the request (queue work to do it?)
2743 				 * using a mailbox iocb
2744 				 */
2745 				mutex_enter(&qlt->req_lock);
2746 				req = qlt_get_req_entries(qlt, 1);
2747 				if (req) {
2748 					bzero(req, IOCB_SIZE);
2749 					req[0] = 0x39; req[1] = 1;
2750 					QMEM_WR16(qlt, req+8, 0x101);
2751 					QMEM_WR16(qlt, req+10, mbox1);
2752 					QMEM_WR16(qlt, req+12, mbox2);
2753 					QMEM_WR16(qlt, req+14, mbox3);
2754 					QMEM_WR16(qlt, req+16, mbox4);
2755 					QMEM_WR16(qlt, req+18, mbox5);
2756 					QMEM_WR16(qlt, req+20, mbox6);
2757 					qlt_submit_req_entries(qlt, 1);
2758 				} else {
2759 					(void) snprintf(info, 80,
2760 					    "IDC ACK failed");
2761 					info[79] = 0;
2762 				}
2763 				mutex_exit(&qlt->req_lock);
2764 			}
2765 		}
2766 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2767 		/* Handle mailbox completion */
2768 		mutex_enter(&qlt->mbox_lock);
2769 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2770 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2771 			    " when driver wasn't waiting for it %d",
2772 			    qlt->instance, qlt->mbox_io_state);
2773 		} else {
2774 			for (i = 0; i < MAX_MBOXES; i++) {
2775 				if (qlt->mcp->from_fw_mask &
2776 				    (((uint32_t)1) << i)) {
2777 					qlt->mcp->from_fw[i] =
2778 					    REG_RD16(qlt, REG_MBOX(i));
2779 				}
2780 			}
2781 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2782 		}
2783 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2784 		cv_broadcast(&qlt->mbox_cv);
2785 		mutex_exit(&qlt->mbox_lock);
2786 	} else {
2787 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2788 		    qlt->instance, intr_type);
2789 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2790 	}
2791 
2792 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2793 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2794 	if ((risc_status & BIT_15) &&
2795 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2796 		goto intr_again;
2797 	}
2798 
2799 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2800 
2801 	mutex_exit(&qlt->intr_lock);
2802 	return (DDI_INTR_CLAIMED);
2803 }
2804 
2805 /* **************** NVRAM Functions ********************** */
2806 
2807 fct_status_t
2808 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2809 {
2810 	uint32_t	timer;
2811 
2812 	/* Clear access error flag */
2813 	REG_WR32(qlt, REG_CTRL_STATUS,
2814 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2815 
2816 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2817 
2818 	/* Wait for READ cycle to complete. */
2819 	for (timer = 3000; timer; timer--) {
2820 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2821 			break;
2822 		}
2823 		drv_usecwait(10);
2824 	}
2825 	if (timer == 0) {
2826 		EL(qlt, "flash timeout\n");
2827 		return (QLT_FLASH_TIMEOUT);
2828 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2829 		EL(qlt, "flash access error\n");
2830 		return (QLT_FLASH_ACCESS_ERROR);
2831 	}
2832 
2833 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2834 
2835 	return (QLT_SUCCESS);
2836 }
2837 
2838 fct_status_t
2839 qlt_read_nvram(qlt_state_t *qlt)
2840 {
2841 	uint32_t		index, addr, chksum;
2842 	uint32_t		val, *ptr;
2843 	fct_status_t		ret;
2844 	qlt_nvram_t		*nv;
2845 	uint64_t		empty_node_name = 0;
2846 
2847 	if (qlt->qlt_81xx_chip) {
2848 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2849 		    QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2850 	} else if (qlt->qlt_25xx_chip) {
2851 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2852 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2853 	} else {
2854 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2855 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2856 	}
2857 	mutex_enter(&qlt_global_lock);
2858 
2859 	/* Pause RISC. */
2860 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
2861 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2862 
2863 	/* Get NVRAM data and calculate checksum. */
2864 	ptr = (uint32_t *)qlt->nvram;
2865 	chksum = 0;
2866 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2867 		ret = qlt_read_flash_word(qlt, addr++, &val);
2868 		if (ret != QLT_SUCCESS) {
2869 			EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
2870 			mutex_exit(&qlt_global_lock);
2871 			return (ret);
2872 		}
2873 		chksum += val;
2874 		*ptr = LE_32(val);
2875 		ptr++;
2876 	}
2877 
2878 	/* Release RISC Pause */
2879 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
2880 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2881 
2882 	mutex_exit(&qlt_global_lock);
2883 
2884 	/* Sanity check NVRAM Data */
2885 	nv = qlt->nvram;
2886 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2887 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2888 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2889 		EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2890 		    nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2891 		    nv->nvram_version[1], nv->nvram_version[0]);
2892 		return (QLT_BAD_NVRAM_DATA);
2893 	}
2894 
2895 	/* If node name is zero, hand craft it from port name */
2896 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2897 		bcopy(nv->port_name, nv->node_name, 8);
2898 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2899 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2900 	}
2901 
2902 	return (QLT_SUCCESS);
2903 }
2904 
2905 uint32_t
2906 qlt_sync_atio_queue(qlt_state_t *qlt)
2907 {
2908 	uint32_t total_ent;
2909 
2910 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2911 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2912 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2913 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2914 		    DDI_DMA_SYNC_FORCPU);
2915 	} else {
2916 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2917 		    qlt->atio_ndx_from_fw;
2918 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2919 		    + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2920 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2921 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2922 		    ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2923 		    DDI_DMA_SYNC_FORCPU);
2924 	}
2925 	return (total_ent);
2926 }
2927 
2928 void
2929 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2930 {
2931 	uint32_t total_ent;
2932 
2933 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2934 		return;
2935 
2936 	total_ent = qlt_sync_atio_queue(qlt);
2937 
2938 	do {
2939 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2940 		    qlt->atio_ndx_to_fw << 6];
2941 		uint32_t ent_cnt;
2942 
2943 		ent_cnt = (uint32_t)(atio[1]);
2944 		if (ent_cnt > total_ent) {
2945 			break;
2946 		}
2947 		switch ((uint8_t)(atio[0])) {
2948 		case 0x0d:	/* INOT */
2949 			qlt_handle_inot(qlt, atio);
2950 			break;
2951 		case 0x06:	/* ATIO */
2952 			qlt_handle_atio(qlt, atio);
2953 			break;
2954 		default:
2955 			EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2956 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2957 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2958 			break;
2959 		}
2960 		qlt->atio_ndx_to_fw = (uint16_t)(
2961 		    (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2962 		total_ent -= ent_cnt;
2963 	} while (total_ent > 0);
2964 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2965 }
2966 
2967 uint32_t
2968 qlt_sync_resp_queue(qlt_state_t *qlt)
2969 {
2970 	uint32_t total_ent;
2971 
2972 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2973 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2974 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2975 		    RESPONSE_QUEUE_OFFSET
2976 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2977 		    DDI_DMA_SYNC_FORCPU);
2978 	} else {
2979 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2980 		    qlt->resp_ndx_from_fw;
2981 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2982 		    RESPONSE_QUEUE_OFFSET
2983 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2984 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2985 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2986 		    RESPONSE_QUEUE_OFFSET,
2987 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2988 	}
2989 	return (total_ent);
2990 }
2991 
2992 void
2993 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2994 {
2995 	uint32_t total_ent;
2996 	uint8_t c;
2997 
2998 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2999 		return;
3000 
3001 	total_ent = qlt_sync_resp_queue(qlt);
3002 
3003 	do {
3004 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
3005 		uint32_t ent_cnt;
3006 
3007 		ent_cnt = (uint32_t)(resp[0] == 0x51 ? resp[1] : 1);
3008 		if (ent_cnt > total_ent) {
3009 			break;
3010 		}
3011 		switch ((uint8_t)(resp[0])) {
3012 		case 0x12:	/* CTIO completion */
3013 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
3014 			break;
3015 		case 0x0e:	/* NACK */
3016 			/* Do Nothing */
3017 			break;
3018 		case 0x1b:	/* Verify FW */
3019 			qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
3020 			break;
3021 		case 0x29:	/* CT PassThrough */
3022 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
3023 			break;
3024 		case 0x33:	/* Abort IO IOCB completion */
3025 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
3026 			break;
3027 		case 0x51:	/* PUREX */
3028 			qlt_handle_purex(qlt, (uint8_t *)resp);
3029 			break;
3030 		case 0x52:
3031 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
3032 			break;
3033 		case 0x53:	/* ELS passthrough */
3034 			c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
3035 			if (c == 0) {
3036 				qlt_handle_sol_els_completion(qlt,
3037 				    (uint8_t *)resp);
3038 			} else if (c == 3) {
3039 				qlt_handle_unsol_els_abort_completion(qlt,
3040 				    (uint8_t *)resp);
3041 			} else {
3042 				qlt_handle_unsol_els_completion(qlt,
3043 				    (uint8_t *)resp);
3044 			}
3045 			break;
3046 		case 0x54:	/* ABTS received */
3047 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
3048 			break;
3049 		case 0x55:	/* ABTS completion */
3050 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
3051 			break;
3052 		default:
3053 			EL(qlt, "response entry=%xh\n", resp[0]);
3054 			break;
3055 		}
3056 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
3057 		    (RESPONSE_QUEUE_ENTRIES - 1);
3058 		total_ent -= ent_cnt;
3059 	} while (total_ent > 0);
3060 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
3061 }
3062 
3063 fct_status_t
3064 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
3065 				uint16_t *ret_handle)
3066 {
3067 	fct_status_t ret;
3068 	mbox_cmd_t *mcp;
3069 	uint16_t n;
3070 	uint16_t h;
3071 	uint32_t ent_id;
3072 	uint8_t *p;
3073 	int found = 0;
3074 
3075 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
3076 	if (mcp == NULL) {
3077 		return (STMF_ALLOC_FAILURE);
3078 	}
3079 	mcp->to_fw[0] = MBC_GET_ID_LIST;
3080 	mcp->to_fw[8] = 2048 * 8;
3081 	mcp->to_fw[9] = 0;
3082 	mcp->to_fw_mask |= BIT_9 | BIT_8;
3083 	mcp->from_fw_mask |= BIT_1 | BIT_2;
3084 
3085 	ret = qlt_mailbox_command(qlt, mcp);
3086 	if (ret != QLT_SUCCESS) {
3087 		EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
3088 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
3089 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
3090 		    mcp->from_fw[1], mcp->from_fw[2]);
3091 		qlt_free_mailbox_command(qlt, mcp);
3092 		return (ret);
3093 	}
3094 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
3095 	p = mcp->dbuf->db_sglist[0].seg_addr;
3096 	for (n = 0; n < mcp->from_fw[1]; n++) {
3097 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
3098 		h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
3099 		if (ent_id == id) {
3100 			found = 1;
3101 			*ret_handle = h;
3102 			if ((cmd_handle != FCT_HANDLE_NONE) &&
3103 			    (cmd_handle != h)) {
3104 				cmn_err(CE_WARN, "login for portid %x came in "
3105 				    "with handle %x, while the portid was "
3106 				    "already using a different handle %x",
3107 				    id, cmd_handle, h);
3108 				qlt_free_mailbox_command(qlt, mcp);
3109 				return (QLT_FAILURE);
3110 			}
3111 			break;
3112 		}
3113 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
3114 			cmn_err(CE_WARN, "login for portid %x came in with "
3115 			    "handle %x, while the handle was already in use "
3116 			    "for portid %x", id, cmd_handle, ent_id);
3117 			qlt_free_mailbox_command(qlt, mcp);
3118 			return (QLT_FAILURE);
3119 		}
3120 		p += 8;
3121 	}
3122 	if (!found) {
3123 		*ret_handle = cmd_handle;
3124 	}
3125 	qlt_free_mailbox_command(qlt, mcp);
3126 	return (FCT_SUCCESS);
3127 }
3128 
3129 /* ARGSUSED */
3130 fct_status_t
3131 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
3132 				fct_cmd_t *login)
3133 {
3134 	uint8_t *p;
3135 
3136 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3137 	p[0] = ELS_OP_PLOGI;
3138 	*((uint16_t *)(&p[4])) = 0x2020;
3139 	p[7] = 3;
3140 	p[8] = 0x88;
3141 	p[10] = 8;
3142 	p[13] = 0xff; p[15] = 0x1f;
3143 	p[18] = 7; p[19] = 0xd0;
3144 
3145 	bcopy(port->port_pwwn, p + 20, 8);
3146 	bcopy(port->port_nwwn, p + 28, 8);
3147 
3148 	p[68] = 0x80;
3149 	p[74] = 8;
3150 	p[77] = 0xff;
3151 	p[81] = 1;
3152 
3153 	return (FCT_SUCCESS);
3154 }
3155 
3156 /* ARGSUSED */
3157 fct_status_t
3158 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
3159 				fct_cmd_t *login)
3160 {
3161 	return (FCT_SUCCESS);
3162 }
3163 
3164 fct_status_t
3165 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
3166     fct_cmd_t *login)
3167 {
3168 	uint16_t h;
3169 	fct_status_t ret;
3170 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3171 
3172 	switch (rp->rp_id) {
3173 	case 0xFFFFFC:	h = 0x7FC; break;
3174 	case 0xFFFFFD:	h = 0x7FD; break;
3175 	case 0xFFFFFE:	h = 0x7FE; break;
3176 	case 0xFFFFFF:	h = 0x7FF; break;
3177 	default:
3178 		ret = qlt_portid_to_handle(qlt, rp->rp_id,
3179 		    login->cmd_rp_handle, &h);
3180 		if (ret != FCT_SUCCESS) {
3181 			EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3182 			return (ret);
3183 		}
3184 	}
3185 
3186 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
3187 		ret = qlt_fill_plogi_req(port, rp, login);
3188 	} else {
3189 		ret = qlt_fill_plogi_resp(port, rp, login);
3190 	}
3191 
3192 	if (ret != FCT_SUCCESS) {
3193 		EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3194 		return (ret);
3195 	}
3196 
3197 	if (h == FCT_HANDLE_NONE)
3198 		return (FCT_SUCCESS);
3199 
3200 	if (rp->rp_handle == FCT_HANDLE_NONE) {
3201 		rp->rp_handle = h;
3202 		return (FCT_SUCCESS);
3203 	}
3204 
3205 	if (rp->rp_handle == h)
3206 		return (FCT_SUCCESS);
3207 
3208 	EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3209 	return (FCT_FAILURE);
3210 }
3211 /* invoked in single thread */
3212 fct_status_t
3213 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3214 {
3215 	uint8_t *req;
3216 	qlt_state_t *qlt;
3217 	clock_t	dereg_req_timer;
3218 	fct_status_t ret;
3219 
3220 	qlt = (qlt_state_t *)port->port_fca_private;
3221 
3222 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3223 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
3224 		return (FCT_SUCCESS);
3225 	ASSERT(qlt->rp_id_in_dereg == 0);
3226 
3227 	mutex_enter(&qlt->preq_lock);
3228 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3229 	if (req == NULL) {
3230 		mutex_exit(&qlt->preq_lock);
3231 		return (FCT_BUSY);
3232 	}
3233 	bzero(req, IOCB_SIZE);
3234 	req[0] = 0x52; req[1] = 1;
3235 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
3236 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3237 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3238 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3239 	qlt->rp_id_in_dereg = rp->rp_id;
3240 	qlt_submit_preq_entries(qlt, 1);
3241 
3242 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
3243 	if (cv_timedwait(&qlt->rp_dereg_cv,
3244 	    &qlt->preq_lock, dereg_req_timer) > 0) {
3245 		ret = qlt->rp_dereg_status;
3246 	} else {
3247 		ret = FCT_BUSY;
3248 	}
3249 	qlt->rp_dereg_status = 0;
3250 	qlt->rp_id_in_dereg = 0;
3251 	mutex_exit(&qlt->preq_lock);
3252 	return (ret);
3253 }
3254 
3255 /*
3256  * Pass received ELS up to framework.
3257  */
3258 static void
3259 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3260 {
3261 	fct_cmd_t		*cmd;
3262 	fct_els_t		*els;
3263 	qlt_cmd_t		*qcmd;
3264 	uint32_t		payload_size;
3265 	uint32_t		remote_portid;
3266 	uint8_t			*pldptr, *bndrptr;
3267 	int			i, off;
3268 	uint16_t		iocb_flags;
3269 	char			info[160];
3270 
3271 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3272 	    ((uint32_t)(resp[0x1A])) << 16;
3273 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3274 	if (iocb_flags & BIT_15) {
3275 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3276 	} else {
3277 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3278 	}
3279 
3280 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
3281 		EL(qlt, "payload is too large = %xh\n", payload_size);
3282 		cmn_err(CE_WARN, "handle_purex: payload is too large");
3283 		goto cmd_null;
3284 	}
3285 
3286 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3287 	    (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3288 	if (cmd == NULL) {
3289 		EL(qlt, "fct_alloc cmd==NULL\n");
3290 cmd_null:;
3291 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
3292 		    "allocate space for fct_cmd", (void *)qlt);
3293 		info[159] = 0;
3294 		(void) fct_port_shutdown(qlt->qlt_port,
3295 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3296 		return;
3297 	}
3298 
3299 	cmd->cmd_port = qlt->qlt_port;
3300 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3301 	if (cmd->cmd_rp_handle == 0xFFFF) {
3302 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3303 	}
3304 
3305 	els = (fct_els_t *)cmd->cmd_specific;
3306 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3307 	els->els_req_size = (uint16_t)payload_size;
3308 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3309 	    GET_STRUCT_SIZE(qlt_cmd_t));
3310 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3311 	cmd->cmd_rportid = remote_portid;
3312 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3313 	    ((uint32_t)(resp[0x16])) << 16;
3314 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3315 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3316 	pldptr = &resp[0x2C];
3317 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
3318 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3319 		/* Take care of fw's swapping of payload */
3320 		els->els_req_payload[i] = pldptr[3];
3321 		els->els_req_payload[i+1] = pldptr[2];
3322 		els->els_req_payload[i+2] = pldptr[1];
3323 		els->els_req_payload[i+3] = pldptr[0];
3324 		pldptr += 4;
3325 		if (pldptr == bndrptr)
3326 			pldptr = (uint8_t *)qlt->resp_ptr;
3327 		off += 4;
3328 		if (off >= IOCB_SIZE) {
3329 			off = 4;
3330 			pldptr += 4;
3331 		}
3332 	}
3333 	fct_post_rcvd_cmd(cmd, 0);
3334 }
3335 
3336 fct_status_t
3337 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3338 {
3339 	qlt_state_t	*qlt;
3340 	char		info[160];
3341 
3342 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3343 
3344 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3345 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3346 			EL(qlt, "ioflags = %xh\n", ioflags);
3347 			goto fatal_panic;
3348 		} else {
3349 			return (qlt_send_status(qlt, cmd));
3350 		}
3351 	}
3352 
3353 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3354 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3355 			goto fatal_panic;
3356 		} else {
3357 			return (qlt_send_els_response(qlt, cmd));
3358 		}
3359 	}
3360 
3361 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3362 		cmd->cmd_handle = 0;
3363 	}
3364 
3365 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3366 		return (qlt_send_abts_response(qlt, cmd, 0));
3367 	} else {
3368 		EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3369 		ASSERT(0);
3370 		return (FCT_FAILURE);
3371 	}
3372 
3373 fatal_panic:;
3374 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
3375 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3376 	    ioflags);
3377 	info[159] = 0;
3378 	(void) fct_port_shutdown(qlt->qlt_port,
3379 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3380 	return (FCT_FAILURE);
3381 }
3382 
3383 /* ARGSUSED */
3384 fct_status_t
3385 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3386 {
3387 	qlt_dmem_bctl_t	*bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3388 	qlt_state_t	*qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3389 	qlt_cmd_t	*qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3390 	uint8_t		*req, rcnt;
3391 	uint16_t	flags;
3392 	uint16_t	cookie_count;
3393 
3394 	if (dbuf->db_handle == 0)
3395 		qcmd->dbuf = dbuf;
3396 	flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3397 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3398 		flags = (uint16_t)(flags | 2);
3399 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3400 	} else {
3401 		flags = (uint16_t)(flags | 1);
3402 	}
3403 
3404 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3405 		flags = (uint16_t)(flags | BIT_15);
3406 
3407 	if (dbuf->db_flags & DB_LU_DATA_BUF) {
3408 		/*
3409 		 * Data bufs from LU are in scatter/gather list format.
3410 		 */
3411 		cookie_count = qlt_get_cookie_count(dbuf);
3412 		rcnt = qlt_get_iocb_count(cookie_count);
3413 	} else {
3414 		cookie_count = 1;
3415 		rcnt = 1;
3416 	}
3417 	mutex_enter(&qlt->req_lock);
3418 	req = (uint8_t *)qlt_get_req_entries(qlt, rcnt);
3419 	if (req == NULL) {
3420 		mutex_exit(&qlt->req_lock);
3421 		return (FCT_BUSY);
3422 	}
3423 	bzero(req, IOCB_SIZE);	/* XXX needed ? */
3424 	req[0] = 0x12;
3425 	req[1] = rcnt;
3426 	req[2] = dbuf->db_handle;
3427 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3428 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3429 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
3430 	QMEM_WR16(qlt, req+12, cookie_count);
3431 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3432 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3433 	QMEM_WR16(qlt, req+0x1A, flags);
3434 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3435 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3436 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
3437 	if (dbuf->db_flags & DB_LU_DATA_BUF) {
3438 		uint8_t			*qptr;	/* qlt continuation segs */
3439 		uint16_t		cookie_resid;
3440 		uint16_t		cont_segs;
3441 		ddi_dma_cookie_t	cookie, *ckp;
3442 
3443 		/*
3444 		 * See if the dma cookies are in simple array format.
3445 		 */
3446 		ckp = qlt_get_cookie_array(dbuf);
3447 
3448 		/*
3449 		 * Program the first segment into main record.
3450 		 */
3451 		if (ckp) {
3452 			ASSERT(ckp->dmac_size);
3453 			QMEM_WR64(qlt, req+0x34, ckp->dmac_laddress);
3454 			QMEM_WR32(qlt, req+0x3c, ckp->dmac_size);
3455 		} else {
3456 			qlt_ddi_dma_nextcookie(dbuf, &cookie);
3457 			ASSERT(cookie.dmac_size);
3458 			QMEM_WR64(qlt, req+0x34, cookie.dmac_laddress);
3459 			QMEM_WR32(qlt, req+0x3c, cookie.dmac_size);
3460 		}
3461 		cookie_resid = cookie_count-1;
3462 
3463 		/*
3464 		 * Program remaining segments into continuation records.
3465 		 */
3466 		while (cookie_resid) {
3467 			req += IOCB_SIZE;
3468 			if (req >= (uint8_t *)qlt->resp_ptr) {
3469 				req = (uint8_t *)qlt->req_ptr;
3470 			}
3471 			req[0] = 0x0a;
3472 			req[1] = 1;
3473 			req[2] = req[3] = 0;	/* tidy */
3474 			qptr = &req[4];
3475 			for (cont_segs = CONT_A64_DATA_SEGMENTS;
3476 			    cont_segs && cookie_resid; cont_segs--) {
3477 
3478 				if (ckp) {
3479 					++ckp;		/* next cookie */
3480 					ASSERT(ckp->dmac_size != 0);
3481 					QMEM_WR64(qlt, qptr,
3482 					    ckp->dmac_laddress);
3483 					qptr += 8;	/* skip over laddress */
3484 					QMEM_WR32(qlt, qptr, ckp->dmac_size);
3485 					qptr += 4;	/* skip over size */
3486 				} else {
3487 					qlt_ddi_dma_nextcookie(dbuf, &cookie);
3488 					ASSERT(cookie.dmac_size != 0);
3489 					QMEM_WR64(qlt, qptr,
3490 					    cookie.dmac_laddress);
3491 					qptr += 8;	/* skip over laddress */
3492 					QMEM_WR32(qlt, qptr, cookie.dmac_size);
3493 					qptr += 4;	/* skip over size */
3494 				}
3495 				cookie_resid--;
3496 			}
3497 			/*
3498 			 * zero unused remainder of IOCB
3499 			 */
3500 			if (cont_segs) {
3501 				size_t resid;
3502 				resid = (size_t)((uintptr_t)(req+IOCB_SIZE) -
3503 				    (uintptr_t)qptr);
3504 				ASSERT(resid < IOCB_SIZE);
3505 				bzero(qptr, resid);
3506 			}
3507 		}
3508 	} else {
3509 		/* Single, contiguous buffer */
3510 		QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3511 		QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
3512 	}
3513 
3514 	qlt_submit_req_entries(qlt, rcnt);
3515 	mutex_exit(&qlt->req_lock);
3516 
3517 	return (STMF_SUCCESS);
3518 }
3519 
3520 /*
3521  * We must construct proper FCP_RSP_IU now. Here we only focus on
3522  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3523  * we could have catched them before we enter here.
3524  */
3525 fct_status_t
3526 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3527 {
3528 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
3529 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
3530 	qlt_dmem_bctl_t *bctl;
3531 	uint32_t size;
3532 	uint8_t *req, *fcp_rsp_iu;
3533 	uint8_t *psd, sensbuf[24];		/* sense data */
3534 	uint16_t flags;
3535 	uint16_t scsi_status;
3536 	int use_mode2;
3537 	int ndx;
3538 
3539 	/*
3540 	 * Enter fast channel for non check condition
3541 	 */
3542 	if (task->task_scsi_status != STATUS_CHECK) {
3543 		/*
3544 		 * We will use mode1
3545 		 */
3546 		flags = (uint16_t)(BIT_6 | BIT_15 |
3547 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3548 		scsi_status = (uint16_t)task->task_scsi_status;
3549 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3550 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3551 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3552 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3553 		}
3554 		qcmd->dbuf_rsp_iu = NULL;
3555 
3556 		/*
3557 		 * Fillout CTIO type 7 IOCB
3558 		 */
3559 		mutex_enter(&qlt->req_lock);
3560 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3561 		if (req == NULL) {
3562 			mutex_exit(&qlt->req_lock);
3563 			return (FCT_BUSY);
3564 		}
3565 
3566 		/*
3567 		 * Common fields
3568 		 */
3569 		bzero(req, IOCB_SIZE);
3570 		req[0x00] = 0x12;
3571 		req[0x01] = 0x1;
3572 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
3573 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3574 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3575 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3576 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3577 
3578 		/*
3579 		 * Mode-specific fields
3580 		 */
3581 		QMEM_WR16(qlt, req + 0x1A, flags);
3582 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3583 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3584 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3585 
3586 		/*
3587 		 * Trigger FW to send SCSI status out
3588 		 */
3589 		qlt_submit_req_entries(qlt, 1);
3590 		mutex_exit(&qlt->req_lock);
3591 		return (STMF_SUCCESS);
3592 	}
3593 
3594 	ASSERT(task->task_scsi_status == STATUS_CHECK);
3595 	/*
3596 	 * Decide the SCSI status mode, that should be used
3597 	 */
3598 	use_mode2 = (task->task_sense_length > 24);
3599 
3600 	/*
3601 	 * Prepare required information per the SCSI status mode
3602 	 */
3603 	flags = (uint16_t)(BIT_15 |
3604 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3605 	if (use_mode2) {
3606 		flags = (uint16_t)(flags | BIT_7);
3607 
3608 		size = task->task_sense_length;
3609 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3610 		    task->task_sense_length, &size, 0);
3611 		if (!qcmd->dbuf_rsp_iu) {
3612 			return (FCT_ALLOC_FAILURE);
3613 		}
3614 
3615 		/*
3616 		 * Start to construct FCP_RSP IU
3617 		 */
3618 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3619 		bzero(fcp_rsp_iu, 24);
3620 
3621 		/*
3622 		 * FCP_RSP IU flags, byte10
3623 		 */
3624 		fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
3625 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3626 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
3627 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3628 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
3629 		}
3630 
3631 		/*
3632 		 * SCSI status code, byte11
3633 		 */
3634 		fcp_rsp_iu[11] = task->task_scsi_status;
3635 
3636 		/*
3637 		 * FCP_RESID (Overrun or underrun)
3638 		 */
3639 		fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
3640 		fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
3641 		fcp_rsp_iu[14] = (uint8_t)((task->task_resid >>  8) & 0xFF);
3642 		fcp_rsp_iu[15] = (uint8_t)((task->task_resid >>  0) & 0xFF);
3643 
3644 		/*
3645 		 * FCP_SNS_LEN
3646 		 */
3647 		fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
3648 		    0xFF);
3649 		fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
3650 		    0xFF);
3651 
3652 		/*
3653 		 * FCP_RSP_LEN
3654 		 */
3655 		/*
3656 		 * no FCP_RSP_INFO
3657 		 */
3658 		/*
3659 		 * FCP_SNS_INFO
3660 		 */
3661 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3662 		    task->task_sense_length);
3663 
3664 		/*
3665 		 * Ensure dma data consistency
3666 		 */
3667 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3668 	} else {
3669 		flags = (uint16_t)(flags | BIT_6);
3670 
3671 		scsi_status = (uint16_t)task->task_scsi_status;
3672 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3673 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3674 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3675 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3676 		}
3677 		if (task->task_sense_length) {
3678 			scsi_status = (uint16_t)(scsi_status | BIT_9);
3679 		}
3680 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3681 		qcmd->dbuf_rsp_iu = NULL;
3682 	}
3683 
3684 	/*
3685 	 * Fillout CTIO type 7 IOCB
3686 	 */
3687 	mutex_enter(&qlt->req_lock);
3688 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3689 	if (req == NULL) {
3690 		mutex_exit(&qlt->req_lock);
3691 		if (use_mode2) {
3692 			qlt_dmem_free(cmd->cmd_port->port_fds,
3693 			    qcmd->dbuf_rsp_iu);
3694 			qcmd->dbuf_rsp_iu = NULL;
3695 		}
3696 		return (FCT_BUSY);
3697 	}
3698 
3699 	/*
3700 	 * Common fields
3701 	 */
3702 	bzero(req, IOCB_SIZE);
3703 	req[0x00] = 0x12;
3704 	req[0x01] = 0x1;
3705 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3706 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3707 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3708 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3709 	if (use_mode2) {
3710 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3711 	}
3712 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3713 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3714 
3715 	/*
3716 	 * Mode-specific fields
3717 	 */
3718 	if (!use_mode2) {
3719 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3720 	}
3721 	QMEM_WR16(qlt, req + 0x1A, flags);
3722 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3723 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3724 	if (use_mode2) {
3725 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3726 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3727 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3728 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3729 	} else {
3730 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3731 		psd = req+0x28;
3732 
3733 		/*
3734 		 * Data in sense buf is always big-endian, data in IOCB
3735 		 * should always be little-endian, so we must do swapping.
3736 		 */
3737 		size = ((task->task_sense_length + 3) & (~3));
3738 		for (ndx = 0; ndx < size; ndx += 4) {
3739 			psd[ndx + 0] = sensbuf[ndx + 3];
3740 			psd[ndx + 1] = sensbuf[ndx + 2];
3741 			psd[ndx + 2] = sensbuf[ndx + 1];
3742 			psd[ndx + 3] = sensbuf[ndx + 0];
3743 		}
3744 	}
3745 
3746 	/*
3747 	 * Trigger FW to send SCSI status out
3748 	 */
3749 	qlt_submit_req_entries(qlt, 1);
3750 	mutex_exit(&qlt->req_lock);
3751 
3752 	return (STMF_SUCCESS);
3753 }
3754 
3755 fct_status_t
3756 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3757 {
3758 	qlt_cmd_t	*qcmd;
3759 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3760 	uint8_t *req, *addr;
3761 	qlt_dmem_bctl_t *bctl;
3762 	uint32_t minsize;
3763 	uint8_t elsop, req1f;
3764 
3765 	addr = els->els_resp_payload;
3766 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3767 
3768 	minsize = els->els_resp_size;
3769 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3770 	if (qcmd->dbuf == NULL)
3771 		return (FCT_BUSY);
3772 
3773 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3774 
3775 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3776 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3777 
3778 	if (addr[0] == 0x02) {	/* ACC */
3779 		req1f = BIT_5;
3780 	} else {
3781 		req1f = BIT_6;
3782 	}
3783 	elsop = els->els_req_payload[0];
3784 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3785 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3786 		req1f = (uint8_t)(req1f | BIT_4);
3787 	}
3788 
3789 	mutex_enter(&qlt->req_lock);
3790 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3791 	if (req == NULL) {
3792 		mutex_exit(&qlt->req_lock);
3793 		qlt_dmem_free(NULL, qcmd->dbuf);
3794 		qcmd->dbuf = NULL;
3795 		return (FCT_BUSY);
3796 	}
3797 	bzero(req, IOCB_SIZE);
3798 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3799 	req[0x16] = elsop; req[0x1f] = req1f;
3800 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3801 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3802 	QMEM_WR16(qlt, (&req[0xC]), 1);
3803 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3804 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3805 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3806 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3807 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3808 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3809 	}
3810 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3811 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3812 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3813 	qlt_submit_req_entries(qlt, 1);
3814 	mutex_exit(&qlt->req_lock);
3815 
3816 	return (FCT_SUCCESS);
3817 }
3818 
3819 fct_status_t
3820 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3821 {
3822 	qlt_abts_cmd_t *qcmd;
3823 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3824 	uint8_t *req;
3825 	uint32_t lportid;
3826 	uint32_t fctl;
3827 	int i;
3828 
3829 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3830 
3831 	mutex_enter(&qlt->req_lock);
3832 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3833 	if (req == NULL) {
3834 		mutex_exit(&qlt->req_lock);
3835 		return (FCT_BUSY);
3836 	}
3837 	bcopy(qcmd->buf, req, IOCB_SIZE);
3838 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3839 	fctl = QMEM_RD32(qlt, req+0x1C);
3840 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3841 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3842 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3843 	if (cmd->cmd_rp)
3844 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3845 	else
3846 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3847 	if (terminate) {
3848 		QMEM_WR16(qlt, (&req[0xC]), 1);
3849 	}
3850 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3851 	req[0x17] = abts->abts_resp_rctl;
3852 	QMEM_WR32(qlt, req+0x18, lportid);
3853 	QMEM_WR32(qlt, req+0x1C, fctl);
3854 	req[0x23]++;
3855 	for (i = 0; i < 12; i += 4) {
3856 		/* Take care of firmware's LE requirement */
3857 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3858 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3859 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3860 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3861 	}
3862 	qlt_submit_req_entries(qlt, 1);
3863 	mutex_exit(&qlt->req_lock);
3864 
3865 	return (FCT_SUCCESS);
3866 }
3867 
3868 static void
3869 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3870 {
3871 	int i;
3872 	uint32_t d;
3873 	caddr_t req;
3874 	/* Just put it on the request queue */
3875 	mutex_enter(&qlt->req_lock);
3876 	req = qlt_get_req_entries(qlt, 1);
3877 	if (req == NULL) {
3878 		mutex_exit(&qlt->req_lock);
3879 		/* XXX handle this */
3880 		return;
3881 	}
3882 	for (i = 0; i < 16; i++) {
3883 		d = QMEM_RD32(qlt, inot);
3884 		inot += 4;
3885 		QMEM_WR32(qlt, req, d);
3886 		req += 4;
3887 	}
3888 	req -= 64;
3889 	req[0] = 0x0e;
3890 	qlt_submit_req_entries(qlt, 1);
3891 	mutex_exit(&qlt->req_lock);
3892 }
3893 
3894 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3895 static void
3896 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3897 {
3898 	fct_cmd_t	*cmd;
3899 	scsi_task_t	*task;
3900 	qlt_cmd_t	*qcmd;
3901 	uint32_t	rportid, fw_xchg_addr;
3902 	uint8_t		*p, *q, *req, tm;
3903 	uint16_t	cdb_size, flags, oxid;
3904 	char		info[160];
3905 
3906 	/*
3907 	 * If either bidirection xfer is requested of there is extended
3908 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3909 	 */
3910 	cdb_size = 16;
3911 	if (atio[0x20 + 11] >= 3) {
3912 		uint8_t b = atio[0x20 + 11];
3913 		uint16_t b1;
3914 		if ((b & 3) == 3) {
3915 			EL(qlt, "bidirectional I/O not supported\n");
3916 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3917 			    "received, dropping the cmd as bidirectional "
3918 			    " transfers are not yet supported", qlt->instance);
3919 			/* XXX abort the I/O */
3920 			return;
3921 		}
3922 		cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3923 		/*
3924 		 * Verify that we have enough entries. Without additional CDB
3925 		 * Everything will fit nicely within the same 64 bytes. So the
3926 		 * additional cdb size is essentially the # of additional bytes
3927 		 * we need.
3928 		 */
3929 		b1 = (uint16_t)b;
3930 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3931 			EL(qlt, "extended cdb received\n");
3932 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3933 			    " cdb (cdb size = %d bytes), however the firmware "
3934 			    " did not DMAed the entire FCP_CMD IU, entry count "
3935 			    " is %d while it should be %d", qlt->instance,
3936 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3937 			/* XXX abort the I/O */
3938 			return;
3939 		}
3940 	}
3941 
3942 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3943 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3944 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3945 	oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3946 
3947 	if (fw_xchg_addr == 0xFFFFFFFF) {
3948 		EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3949 		cmd = NULL;
3950 	} else {
3951 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3952 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3953 		if (cmd == NULL) {
3954 			EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3955 		}
3956 	}
3957 	if (cmd == NULL) {
3958 		EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3959 		/* Abort this IO */
3960 		flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3961 
3962 		mutex_enter(&qlt->req_lock);
3963 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3964 		if (req == NULL) {
3965 			mutex_exit(&qlt->req_lock);
3966 
3967 			(void) snprintf(info, 160,
3968 			    "qlt_handle_atio: qlt-%p, can't "
3969 			    "allocate space for scsi_task", (void *)qlt);
3970 			info[159] = 0;
3971 			(void) fct_port_shutdown(qlt->qlt_port,
3972 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3973 			return;
3974 		}
3975 		bzero(req, IOCB_SIZE);
3976 		req[0] = 0x12; req[1] = 0x1;
3977 		QMEM_WR32(qlt, req+4, 0);
3978 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3979 		    rportid));
3980 		QMEM_WR16(qlt, req+10, 60);
3981 		QMEM_WR32(qlt, req+0x10, rportid);
3982 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3983 		QMEM_WR16(qlt, req+0x1A, flags);
3984 		QMEM_WR16(qlt, req+0x20, oxid);
3985 		qlt_submit_req_entries(qlt, 1);
3986 		mutex_exit(&qlt->req_lock);
3987 
3988 		return;
3989 	}
3990 
3991 	task = (scsi_task_t *)cmd->cmd_specific;
3992 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3993 	qcmd->fw_xchg_addr = fw_xchg_addr;
3994 	qcmd->param.atio_byte3 = atio[3];
3995 	cmd->cmd_oxid = oxid;
3996 	cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3997 	    atio[8+19]);
3998 	cmd->cmd_rportid = rportid;
3999 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
4000 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
4001 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4002 	/* Dont do a 64 byte read as this is IOMMU */
4003 	q = atio+0x28;
4004 	/* XXX Handle fcp_cntl */
4005 	task->task_cmd_seq_no = (uint32_t)(*q++);
4006 	task->task_csn_size = 8;
4007 	task->task_flags = qlt_task_flags[(*q++) & 7];
4008 	tm = *q++;
4009 	if (tm) {
4010 		if (tm & BIT_1)
4011 			task->task_mgmt_function = TM_ABORT_TASK_SET;
4012 		else if (tm & BIT_2)
4013 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
4014 		else if (tm & BIT_4)
4015 			task->task_mgmt_function = TM_LUN_RESET;
4016 		else if (tm & BIT_5)
4017 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
4018 		else if (tm & BIT_6)
4019 			task->task_mgmt_function = TM_CLEAR_ACA;
4020 		else
4021 			task->task_mgmt_function = TM_ABORT_TASK;
4022 	}
4023 	task->task_max_nbufs = STMF_BUFS_MAX;
4024 	task->task_csn_size = 8;
4025 	task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
4026 	p = task->task_cdb;
4027 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4028 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4029 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4030 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4031 	if (cdb_size > 16) {
4032 		uint16_t xtra = (uint16_t)(cdb_size - 16);
4033 		uint16_t i;
4034 		uint8_t cb[4];
4035 
4036 		while (xtra) {
4037 			*p++ = *q++;
4038 			xtra--;
4039 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
4040 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4041 				q = (uint8_t *)qlt->queue_mem_ptr +
4042 				    ATIO_QUEUE_OFFSET;
4043 			}
4044 		}
4045 		for (i = 0; i < 4; i++) {
4046 			cb[i] = *q++;
4047 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
4048 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4049 				q = (uint8_t *)qlt->queue_mem_ptr +
4050 				    ATIO_QUEUE_OFFSET;
4051 			}
4052 		}
4053 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
4054 		    (((uint32_t)cb[1]) << 16) |
4055 		    (((uint32_t)cb[2]) << 8) | cb[3];
4056 	} else {
4057 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
4058 		    (((uint32_t)q[1]) << 16) |
4059 		    (((uint32_t)q[2]) << 8) | q[3];
4060 	}
4061 	fct_post_rcvd_cmd(cmd, 0);
4062 }
4063 
4064 static void
4065 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
4066 {
4067 	uint16_t status;
4068 	uint32_t portid;
4069 	uint32_t subcode1, subcode2;
4070 
4071 	status = QMEM_RD16(qlt, rsp+8);
4072 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
4073 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
4074 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
4075 
4076 	mutex_enter(&qlt->preq_lock);
4077 	if (portid != qlt->rp_id_in_dereg) {
4078 		int instance = ddi_get_instance(qlt->dip);
4079 
4080 		EL(qlt, "implicit logout reveived portid = %xh\n", portid);
4081 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
4082 		    " received when driver wasn't waiting for it",
4083 		    instance, portid);
4084 		mutex_exit(&qlt->preq_lock);
4085 		return;
4086 	}
4087 
4088 	if (status != 0) {
4089 		EL(qlt, "implicit logout completed for %xh with status %xh, "
4090 		    "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
4091 		    subcode2);
4092 		if (status == 0x31 && subcode1 == 0x0a) {
4093 			qlt->rp_dereg_status = FCT_SUCCESS;
4094 		} else {
4095 			EL(qlt, "implicit logout portid=%xh, status=%xh, "
4096 			    "subcode1=%xh, subcode2=%xh\n", portid, status,
4097 			    subcode1, subcode2);
4098 			qlt->rp_dereg_status =
4099 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
4100 		}
4101 	} else {
4102 		qlt->rp_dereg_status = FCT_SUCCESS;
4103 	}
4104 	cv_signal(&qlt->rp_dereg_cv);
4105 	mutex_exit(&qlt->preq_lock);
4106 }
4107 
4108 /*
4109  * Note that when an ELS is aborted, the regular or aborted completion
4110  * (if any) gets posted before the abort IOCB comes back on response queue.
4111  */
4112 static void
4113 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4114 {
4115 	char		info[160];
4116 	fct_cmd_t	*cmd;
4117 	qlt_cmd_t	*qcmd;
4118 	uint32_t	hndl;
4119 	uint32_t	subcode1, subcode2;
4120 	uint16_t	status;
4121 
4122 	hndl = QMEM_RD32(qlt, rsp+4);
4123 	status = QMEM_RD16(qlt, rsp+8);
4124 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4125 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4126 
4127 	if (!CMD_HANDLE_VALID(hndl)) {
4128 		EL(qlt, "handle = %xh\n", hndl);
4129 		/*
4130 		 * This cannot happen for unsol els completion. This can
4131 		 * only happen when abort for an unsol els completes.
4132 		 * This condition indicates a firmware bug.
4133 		 */
4134 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
4135 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4136 		    hndl, status, subcode1, subcode2, (void *)rsp);
4137 		info[159] = 0;
4138 		(void) fct_port_shutdown(qlt->qlt_port,
4139 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4140 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4141 		return;
4142 	}
4143 
4144 	if (status == 5) {
4145 		/*
4146 		 * When an unsolicited els is aborted, the abort is done
4147 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
4148 		 * and not the abortee. We will do the cleanup when the
4149 		 * IOCB which caused the abort, returns.
4150 		 */
4151 		EL(qlt, "status = %xh\n", status);
4152 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
4153 		return;
4154 	}
4155 
4156 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4157 	if (cmd == NULL) {
4158 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4159 		/*
4160 		 * Now why would this happen ???
4161 		 */
4162 		(void) snprintf(info, 160,
4163 		    "qlt_handle_unsol_els_completion: can not "
4164 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4165 		    (void *)rsp);
4166 		info[159] = 0;
4167 		(void) fct_port_shutdown(qlt->qlt_port,
4168 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4169 
4170 		return;
4171 	}
4172 
4173 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4174 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4175 	if (qcmd->flags & QLT_CMD_ABORTING) {
4176 		/*
4177 		 * This is the same case as "if (status == 5)" above. The
4178 		 * only difference is that in this case the firmware actually
4179 		 * finished sending the response. So the abort attempt will
4180 		 * come back with status ?. We will handle it there.
4181 		 */
4182 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
4183 		    "abort it");
4184 		return;
4185 	}
4186 
4187 	if (qcmd->dbuf != NULL) {
4188 		qlt_dmem_free(NULL, qcmd->dbuf);
4189 		qcmd->dbuf = NULL;
4190 	}
4191 
4192 	if (status == 0) {
4193 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4194 	} else {
4195 		fct_send_response_done(cmd,
4196 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4197 	}
4198 }
4199 
4200 static void
4201 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4202 {
4203 	char		info[160];
4204 	fct_cmd_t	*cmd;
4205 	qlt_cmd_t	*qcmd;
4206 	uint32_t	hndl;
4207 	uint32_t	subcode1, subcode2;
4208 	uint16_t	status;
4209 
4210 	hndl = QMEM_RD32(qlt, rsp+4);
4211 	status = QMEM_RD16(qlt, rsp+8);
4212 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4213 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4214 
4215 	if (!CMD_HANDLE_VALID(hndl)) {
4216 		EL(qlt, "handle = %xh\n", hndl);
4217 		ASSERT(hndl == 0);
4218 		/*
4219 		 * Someone has requested to abort it, but no one is waiting for
4220 		 * this completion.
4221 		 */
4222 		if ((status != 0) && (status != 8)) {
4223 			EL(qlt, "status = %xh\n", status);
4224 			/*
4225 			 * There could be exchange resource leakage, so
4226 			 * throw HBA fatal error event now
4227 			 */
4228 			(void) snprintf(info, 160,
4229 			    "qlt_handle_unsol_els_abort_completion: "
4230 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4231 			    hndl, status, subcode1, subcode2, (void *)rsp);
4232 			info[159] = 0;
4233 			(void) fct_port_shutdown(qlt->qlt_port,
4234 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4235 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4236 			return;
4237 		}
4238 
4239 		return;
4240 	}
4241 
4242 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4243 	if (cmd == NULL) {
4244 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4245 		/*
4246 		 * Why would this happen ??
4247 		 */
4248 		(void) snprintf(info, 160,
4249 		    "qlt_handle_unsol_els_abort_completion: can not get "
4250 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4251 		    (void *)rsp);
4252 		info[159] = 0;
4253 		(void) fct_port_shutdown(qlt->qlt_port,
4254 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4255 
4256 		return;
4257 	}
4258 
4259 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4260 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4261 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4262 
4263 	if (qcmd->dbuf != NULL) {
4264 		qlt_dmem_free(NULL, qcmd->dbuf);
4265 		qcmd->dbuf = NULL;
4266 	}
4267 
4268 	if (status == 0) {
4269 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4270 	} else if (status == 8) {
4271 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4272 	} else {
4273 		fct_cmd_fca_aborted(cmd,
4274 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4275 	}
4276 }
4277 
4278 static void
4279 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4280 {
4281 	char		info[160];
4282 	fct_cmd_t	*cmd;
4283 	fct_els_t	*els;
4284 	qlt_cmd_t	*qcmd;
4285 	uint32_t	hndl;
4286 	uint32_t	subcode1, subcode2;
4287 	uint16_t	status;
4288 
4289 	hndl = QMEM_RD32(qlt, rsp+4);
4290 	status = QMEM_RD16(qlt, rsp+8);
4291 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4292 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4293 
4294 	if (!CMD_HANDLE_VALID(hndl)) {
4295 		EL(qlt, "handle = %xh\n", hndl);
4296 		/*
4297 		 * This cannot happen for sol els completion.
4298 		 */
4299 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
4300 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4301 		    hndl, status, subcode1, subcode2, (void *)rsp);
4302 		info[159] = 0;
4303 		(void) fct_port_shutdown(qlt->qlt_port,
4304 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4305 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4306 		return;
4307 	}
4308 
4309 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4310 	if (cmd == NULL) {
4311 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4312 		(void) snprintf(info, 160,
4313 		    "qlt_handle_sol_els_completion: can not "
4314 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4315 		    (void *)rsp);
4316 		info[159] = 0;
4317 		(void) fct_port_shutdown(qlt->qlt_port,
4318 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4319 
4320 		return;
4321 	}
4322 
4323 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4324 	els = (fct_els_t *)cmd->cmd_specific;
4325 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4326 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4327 
4328 	if (qcmd->flags & QLT_CMD_ABORTING) {
4329 		/*
4330 		 * We will handle it when the ABORT IO IOCB returns.
4331 		 */
4332 		return;
4333 	}
4334 
4335 	if (qcmd->dbuf != NULL) {
4336 		if (status == 0) {
4337 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4338 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4339 			    qcmd->param.resp_offset,
4340 			    els->els_resp_payload, els->els_resp_size);
4341 		}
4342 		qlt_dmem_free(NULL, qcmd->dbuf);
4343 		qcmd->dbuf = NULL;
4344 	}
4345 
4346 	if (status == 0) {
4347 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4348 	} else {
4349 		fct_send_cmd_done(cmd,
4350 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4351 	}
4352 }
4353 
4354 static void
4355 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4356 {
4357 	fct_cmd_t	*cmd;
4358 	fct_sol_ct_t	*ct;
4359 	qlt_cmd_t	*qcmd;
4360 	uint32_t	 hndl;
4361 	uint16_t	 status;
4362 	char		 info[160];
4363 
4364 	hndl = QMEM_RD32(qlt, rsp+4);
4365 	status = QMEM_RD16(qlt, rsp+8);
4366 
4367 	if (!CMD_HANDLE_VALID(hndl)) {
4368 		EL(qlt, "handle = %xh\n", hndl);
4369 		/*
4370 		 * Solicited commands will always have a valid handle.
4371 		 */
4372 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
4373 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4374 		info[159] = 0;
4375 		(void) fct_port_shutdown(qlt->qlt_port,
4376 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4377 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4378 		return;
4379 	}
4380 
4381 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4382 	if (cmd == NULL) {
4383 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4384 		(void) snprintf(info, 160,
4385 		    "qlt_handle_ct_completion: cannot find "
4386 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4387 		    (void *)rsp);
4388 		info[159] = 0;
4389 		(void) fct_port_shutdown(qlt->qlt_port,
4390 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4391 
4392 		return;
4393 	}
4394 
4395 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4396 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4397 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4398 
4399 	if (qcmd->flags & QLT_CMD_ABORTING) {
4400 		/*
4401 		 * We will handle it when ABORT IO IOCB returns;
4402 		 */
4403 		return;
4404 	}
4405 
4406 	ASSERT(qcmd->dbuf);
4407 	if (status == 0) {
4408 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4409 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4410 		    qcmd->param.resp_offset,
4411 		    ct->ct_resp_payload, ct->ct_resp_size);
4412 	}
4413 	qlt_dmem_free(NULL, qcmd->dbuf);
4414 	qcmd->dbuf = NULL;
4415 
4416 	if (status == 0) {
4417 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4418 	} else {
4419 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4420 	}
4421 }
4422 
4423 static void
4424 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
4425 {
4426 	fct_cmd_t	*cmd;
4427 	scsi_task_t	*task;
4428 	qlt_cmd_t	*qcmd;
4429 	stmf_data_buf_t	*dbuf;
4430 	fct_status_t	fc_st;
4431 	uint32_t	iof = 0;
4432 	uint32_t	hndl;
4433 	uint16_t	status;
4434 	uint16_t	flags;
4435 	uint8_t		abort_req;
4436 	uint8_t		n;
4437 	char		info[160];
4438 
4439 	/* XXX: Check validity of the IOCB by checking 4th byte. */
4440 	hndl = QMEM_RD32(qlt, rsp+4);
4441 	status = QMEM_RD16(qlt, rsp+8);
4442 	flags = QMEM_RD16(qlt, rsp+0x1a);
4443 	n = rsp[2];
4444 
4445 	if (!CMD_HANDLE_VALID(hndl)) {
4446 		EL(qlt, "handle = %xh\n", hndl);
4447 		ASSERT(hndl == 0);
4448 		/*
4449 		 * Someone has requested to abort it, but no one is waiting for
4450 		 * this completion.
4451 		 */
4452 		EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4453 		    (void *)rsp);
4454 		if ((status != 1) && (status != 2)) {
4455 			EL(qlt, "status = %xh\n", status);
4456 			/*
4457 			 * There could be exchange resource leakage, so
4458 			 * throw HBA fatal error event now
4459 			 */
4460 			(void) snprintf(info, 160,
4461 			    "qlt_handle_ctio_completion: hndl-"
4462 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4463 			info[159] = 0;
4464 			(void) fct_port_shutdown(qlt->qlt_port,
4465 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4466 
4467 		}
4468 
4469 		return;
4470 	}
4471 
4472 	if (flags & BIT_14) {
4473 		abort_req = 1;
4474 		EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4475 		    (void *)rsp);
4476 	} else {
4477 		abort_req = 0;
4478 	}
4479 
4480 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4481 	if (cmd == NULL) {
4482 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4483 		(void) snprintf(info, 160,
4484 		    "qlt_handle_ctio_completion: cannot find "
4485 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4486 		    (void *)rsp);
4487 		info[159] = 0;
4488 		(void) fct_port_shutdown(qlt->qlt_port,
4489 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4490 
4491 		return;
4492 	}
4493 
4494 	task = (scsi_task_t *)cmd->cmd_specific;
4495 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4496 	if (qcmd->dbuf_rsp_iu) {
4497 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4498 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4499 		qcmd->dbuf_rsp_iu = NULL;
4500 	}
4501 
4502 	if ((status == 1) || (status == 2)) {
4503 		if (abort_req) {
4504 			fc_st = FCT_ABORT_SUCCESS;
4505 			iof = FCT_IOF_FCA_DONE;
4506 		} else {
4507 			fc_st = FCT_SUCCESS;
4508 			if (flags & BIT_15) {
4509 				iof = FCT_IOF_FCA_DONE;
4510 			}
4511 		}
4512 	} else {
4513 		EL(qlt, "status = %xh\n", status);
4514 		if ((status == 8) && abort_req) {
4515 			fc_st = FCT_NOT_FOUND;
4516 			iof = FCT_IOF_FCA_DONE;
4517 		} else {
4518 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
4519 		}
4520 	}
4521 	dbuf = NULL;
4522 	if (((n & BIT_7) == 0) && (!abort_req)) {
4523 		/* A completion of data xfer */
4524 		if (n == 0) {
4525 			dbuf = qcmd->dbuf;
4526 		} else {
4527 			dbuf = stmf_handle_to_buf(task, n);
4528 		}
4529 
4530 		ASSERT(dbuf != NULL);
4531 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4532 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4533 		if (flags & BIT_15) {
4534 			dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4535 			    DB_STATUS_GOOD_SENT);
4536 		}
4537 
4538 		dbuf->db_xfer_status = fc_st;
4539 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
4540 		return;
4541 	}
4542 	if (!abort_req) {
4543 		/*
4544 		 * This was just a pure status xfer.
4545 		 */
4546 		fct_send_response_done(cmd, fc_st, iof);
4547 		return;
4548 	}
4549 
4550 	fct_cmd_fca_aborted(cmd, fc_st, iof);
4551 }
4552 
4553 static void
4554 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4555 {
4556 	char		info[80];
4557 	fct_cmd_t	*cmd;
4558 	qlt_cmd_t	*qcmd;
4559 	uint32_t	h;
4560 	uint16_t	status;
4561 
4562 	h = QMEM_RD32(qlt, rsp+4);
4563 	status = QMEM_RD16(qlt, rsp+8);
4564 
4565 	if (!CMD_HANDLE_VALID(h)) {
4566 		EL(qlt, "handle = %xh\n", h);
4567 		/*
4568 		 * Solicited commands always have a valid handle.
4569 		 */
4570 		(void) snprintf(info, 80,
4571 		    "qlt_handle_sol_abort_completion: hndl-"
4572 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4573 		info[79] = 0;
4574 		(void) fct_port_shutdown(qlt->qlt_port,
4575 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4576 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4577 		return;
4578 	}
4579 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4580 	if (cmd == NULL) {
4581 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4582 		/*
4583 		 * What happened to the cmd ??
4584 		 */
4585 		(void) snprintf(info, 80,
4586 		    "qlt_handle_sol_abort_completion: cannot "
4587 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4588 		    (void *)rsp);
4589 		info[79] = 0;
4590 		(void) fct_port_shutdown(qlt->qlt_port,
4591 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4592 
4593 		return;
4594 	}
4595 
4596 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4597 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
4598 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4599 	if (qcmd->dbuf != NULL) {
4600 		qlt_dmem_free(NULL, qcmd->dbuf);
4601 		qcmd->dbuf = NULL;
4602 	}
4603 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4604 	if (status == 0) {
4605 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4606 	} else if (status == 0x31) {
4607 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4608 	} else {
4609 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4610 	}
4611 }
4612 
4613 static void
4614 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4615 {
4616 	qlt_abts_cmd_t	*qcmd;
4617 	fct_cmd_t	*cmd;
4618 	uint32_t	remote_portid;
4619 	char		info[160];
4620 
4621 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4622 	    ((uint32_t)(resp[0x1A])) << 16;
4623 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4624 	    sizeof (qlt_abts_cmd_t), 0);
4625 	if (cmd == NULL) {
4626 		EL(qlt, "fct_alloc cmd==NULL\n");
4627 		(void) snprintf(info, 160,
4628 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
4629 		    "allocate space for fct_cmd", (void *)qlt);
4630 		info[159] = 0;
4631 		(void) fct_port_shutdown(qlt->qlt_port,
4632 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4633 		return;
4634 	}
4635 
4636 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
4637 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4638 	bcopy(resp, qcmd->buf, IOCB_SIZE);
4639 	cmd->cmd_port = qlt->qlt_port;
4640 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4641 	if (cmd->cmd_rp_handle == 0xFFFF)
4642 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4643 
4644 	cmd->cmd_rportid = remote_portid;
4645 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4646 	    ((uint32_t)(resp[0x16])) << 16;
4647 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4648 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4649 	fct_post_rcvd_cmd(cmd, 0);
4650 }
4651 
4652 static void
4653 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4654 {
4655 	uint16_t status;
4656 	char	info[80];
4657 
4658 	status = QMEM_RD16(qlt, resp+8);
4659 
4660 	if ((status == 0) || (status == 5)) {
4661 		return;
4662 	}
4663 	EL(qlt, "status = %xh\n", status);
4664 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4665 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4666 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4667 	info[79] = 0;
4668 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4669 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4670 }
4671 
4672 #ifdef	DEBUG
4673 uint32_t qlt_drop_abort_counter = 0;
4674 #endif
4675 
4676 fct_status_t
4677 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4678 {
4679 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4680 
4681 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4682 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4683 		return (FCT_NOT_FOUND);
4684 	}
4685 
4686 #ifdef DEBUG
4687 	if (qlt_drop_abort_counter > 0) {
4688 		if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
4689 			return (FCT_SUCCESS);
4690 	}
4691 #endif
4692 
4693 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4694 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4695 	}
4696 
4697 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4698 		cmd->cmd_handle = 0;
4699 	}
4700 
4701 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4702 		return (qlt_send_abts_response(qlt, cmd, 1));
4703 	}
4704 
4705 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4706 		return (qlt_abort_purex(qlt, cmd));
4707 	}
4708 
4709 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4710 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4711 		return (qlt_abort_sol_cmd(qlt, cmd));
4712 	}
4713 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4714 
4715 	ASSERT(0);
4716 	return (FCT_FAILURE);
4717 }
4718 
4719 fct_status_t
4720 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4721 {
4722 	uint8_t *req;
4723 	qlt_cmd_t *qcmd;
4724 
4725 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4726 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4727 	EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
4728 
4729 	mutex_enter(&qlt->req_lock);
4730 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4731 	if (req == NULL) {
4732 		mutex_exit(&qlt->req_lock);
4733 
4734 		return (FCT_BUSY);
4735 	}
4736 	bzero(req, IOCB_SIZE);
4737 	req[0] = 0x33; req[1] = 1;
4738 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4739 	if (cmd->cmd_rp) {
4740 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4741 	} else {
4742 		QMEM_WR16(qlt, req+8, 0xFFFF);
4743 	}
4744 
4745 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4746 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4747 	qlt_submit_req_entries(qlt, 1);
4748 	mutex_exit(&qlt->req_lock);
4749 
4750 	return (FCT_SUCCESS);
4751 }
4752 
4753 fct_status_t
4754 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4755 {
4756 	uint8_t *req;
4757 	qlt_cmd_t *qcmd;
4758 	fct_els_t *els;
4759 	uint8_t elsop, req1f;
4760 
4761 	els = (fct_els_t *)cmd->cmd_specific;
4762 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4763 	elsop = els->els_req_payload[0];
4764 	EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4765 	    elsop);
4766 	req1f = 0x60;	/* Terminate xchg */
4767 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4768 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4769 		req1f = (uint8_t)(req1f | BIT_4);
4770 	}
4771 
4772 	mutex_enter(&qlt->req_lock);
4773 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4774 	if (req == NULL) {
4775 		mutex_exit(&qlt->req_lock);
4776 
4777 		return (FCT_BUSY);
4778 	}
4779 
4780 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4781 	bzero(req, IOCB_SIZE);
4782 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4783 	req[0x16] = elsop; req[0x1f] = req1f;
4784 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4785 	if (cmd->cmd_rp) {
4786 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4787 		EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4788 	} else {
4789 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4790 		EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4791 	}
4792 
4793 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4794 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4795 	qlt_submit_req_entries(qlt, 1);
4796 	mutex_exit(&qlt->req_lock);
4797 
4798 	return (FCT_SUCCESS);
4799 }
4800 
4801 fct_status_t
4802 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4803 {
4804 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4805 	uint8_t *req;
4806 	uint16_t flags;
4807 
4808 	flags = (uint16_t)(BIT_14 |
4809 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4810 	EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4811 
4812 	mutex_enter(&qlt->req_lock);
4813 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4814 	if (req == NULL) {
4815 		mutex_exit(&qlt->req_lock);
4816 
4817 		return (FCT_BUSY);
4818 	}
4819 
4820 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4821 	bzero(req, IOCB_SIZE);
4822 	req[0] = 0x12; req[1] = 0x1;
4823 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4824 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4825 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4826 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4827 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4828 	QMEM_WR16(qlt, req+0x1A, flags);
4829 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4830 	qlt_submit_req_entries(qlt, 1);
4831 	mutex_exit(&qlt->req_lock);
4832 
4833 	return (FCT_SUCCESS);
4834 }
4835 
4836 fct_status_t
4837 qlt_send_cmd(fct_cmd_t *cmd)
4838 {
4839 	qlt_state_t *qlt;
4840 
4841 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4842 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4843 		return (qlt_send_els(qlt, cmd));
4844 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4845 		return (qlt_send_ct(qlt, cmd));
4846 	}
4847 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4848 
4849 	ASSERT(0);
4850 	return (FCT_FAILURE);
4851 }
4852 
4853 fct_status_t
4854 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4855 {
4856 	uint8_t *req;
4857 	fct_els_t *els;
4858 	qlt_cmd_t *qcmd;
4859 	stmf_data_buf_t *buf;
4860 	qlt_dmem_bctl_t *bctl;
4861 	uint32_t sz, minsz;
4862 
4863 	els = (fct_els_t *)cmd->cmd_specific;
4864 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4865 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4866 	qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4867 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4868 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4869 	if (buf == NULL) {
4870 		return (FCT_BUSY);
4871 	}
4872 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4873 
4874 	qcmd->dbuf = buf;
4875 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4876 	    els->els_req_size);
4877 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4878 
4879 	mutex_enter(&qlt->req_lock);
4880 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4881 	if (req == NULL) {
4882 		qlt_dmem_free(NULL, buf);
4883 		mutex_exit(&qlt->req_lock);
4884 		return (FCT_BUSY);
4885 	}
4886 	bzero(req, IOCB_SIZE);
4887 	req[0] = 0x53; req[1] = 1;
4888 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4889 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4890 	QMEM_WR16(qlt, (&req[0xC]), 1);
4891 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4892 	QMEM_WR16(qlt, (&req[0x14]), 1);
4893 	req[0x16] = els->els_req_payload[0];
4894 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4895 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4896 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
4897 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4898 	}
4899 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4900 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4901 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4902 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4903 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4904 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4905 	    qcmd->param.resp_offset));
4906 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4907 	qlt_submit_req_entries(qlt, 1);
4908 	mutex_exit(&qlt->req_lock);
4909 
4910 	return (FCT_SUCCESS);
4911 }
4912 
4913 fct_status_t
4914 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4915 {
4916 	uint8_t *req;
4917 	fct_sol_ct_t *ct;
4918 	qlt_cmd_t *qcmd;
4919 	stmf_data_buf_t *buf;
4920 	qlt_dmem_bctl_t *bctl;
4921 	uint32_t sz, minsz;
4922 
4923 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4924 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4925 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4926 	qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4927 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4928 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4929 	if (buf == NULL) {
4930 		return (FCT_BUSY);
4931 	}
4932 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4933 
4934 	qcmd->dbuf = buf;
4935 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4936 	    ct->ct_req_size);
4937 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4938 
4939 	mutex_enter(&qlt->req_lock);
4940 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4941 	if (req == NULL) {
4942 		qlt_dmem_free(NULL, buf);
4943 		mutex_exit(&qlt->req_lock);
4944 		return (FCT_BUSY);
4945 	}
4946 	bzero(req, IOCB_SIZE);
4947 	req[0] = 0x29; req[1] = 1;
4948 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4949 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4950 	QMEM_WR16(qlt, (&req[0xC]), 1);
4951 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4952 	QMEM_WR16(qlt, (&req[0x14]), 1);
4953 
4954 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4955 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4956 
4957 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4958 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4959 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4960 	    qcmd->param.resp_offset));		/* RESPONSE DSD */
4961 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4962 
4963 	qlt_submit_req_entries(qlt, 1);
4964 	mutex_exit(&qlt->req_lock);
4965 
4966 	return (FCT_SUCCESS);
4967 }
4968 
4969 
4970 /*
4971  * All QLT_FIRMWARE_* will mainly be handled in this function
4972  * It can not be called in interrupt context
4973  *
4974  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4975  * and qlt_ioctl_lock
4976  */
4977 static fct_status_t
4978 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4979 {
4980 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4981 	int		i;
4982 	int		retries, n;
4983 	uint_t		size_left;
4984 	char		c = ' ';
4985 	uint32_t	addr, endaddr, words_to_read;
4986 	caddr_t		buf;
4987 	fct_status_t	ret;
4988 
4989 	mutex_enter(&qlt->qlt_ioctl_lock);
4990 	/*
4991 	 * To make sure that there's no outstanding dumping task
4992 	 */
4993 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4994 		mutex_exit(&qlt->qlt_ioctl_lock);
4995 		EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4996 		    qlt->qlt_ioctl_flags);
4997 		EL(qlt, "outstanding\n");
4998 		return (FCT_FAILURE);
4999 	}
5000 
5001 	/*
5002 	 * To make sure not to overwrite existing dump
5003 	 */
5004 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
5005 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
5006 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
5007 		/*
5008 		 * If we have alreay one dump, but it's not triggered by user
5009 		 * and the user hasn't fetched it, we shouldn't dump again.
5010 		 */
5011 		mutex_exit(&qlt->qlt_ioctl_lock);
5012 		EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
5013 		    qlt->qlt_ioctl_flags);
5014 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
5015 		    "is one already outstanding.", qlt->instance);
5016 		return (FCT_FAILURE);
5017 	}
5018 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
5019 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5020 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
5021 	} else {
5022 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
5023 	}
5024 	mutex_exit(&qlt->qlt_ioctl_lock);
5025 
5026 	size_left = QLT_FWDUMP_BUFSIZE;
5027 	if (!qlt->qlt_fwdump_buf) {
5028 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
5029 		/*
5030 		 * It's the only place that we allocate buf for dumping. After
5031 		 * it's allocated, we will use it until the port is detached.
5032 		 */
5033 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
5034 	}
5035 
5036 	/*
5037 	 * Start to dump firmware
5038 	 */
5039 	buf = (caddr_t)qlt->qlt_fwdump_buf;
5040 
5041 	/*
5042 	 * Print the ISP firmware revision number and attributes information
5043 	 * Read the RISC to Host Status register
5044 	 */
5045 	n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
5046 	    "Attributes %04x\n\nR2H Status Register\n%08x",
5047 	    qlt->fw_major, qlt->fw_minor,
5048 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
5049 	buf += n; size_left -= n;
5050 
5051 	/*
5052 	 * Before pausing the RISC, make sure no mailbox can execute
5053 	 */
5054 	mutex_enter(&qlt->mbox_lock);
5055 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
5056 		/*
5057 		 * Wait to grab the mailboxes
5058 		 */
5059 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
5060 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
5061 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
5062 			    ddi_get_lbolt() + drv_usectohz(1000000));
5063 			if (retries > 5) {
5064 				mutex_exit(&qlt->mbox_lock);
5065 				EL(qlt, "can't drain out mailbox commands\n");
5066 				goto dump_fail;
5067 			}
5068 		}
5069 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
5070 		cv_broadcast(&qlt->mbox_cv);
5071 	}
5072 	mutex_exit(&qlt->mbox_lock);
5073 
5074 	/*
5075 	 * Pause the RISC processor
5076 	 */
5077 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
5078 
5079 	/*
5080 	 * Wait for the RISC processor to pause
5081 	 */
5082 	for (i = 0; i < 200; i++) {
5083 		if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
5084 			break;
5085 		}
5086 		drv_usecwait(1000);
5087 	}
5088 	if (i == 200) {
5089 		EL(qlt, "can't pause\n");
5090 		return (FCT_FAILURE);
5091 	}
5092 
5093 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
5094 		goto over_25xx_specific_dump;
5095 	}
5096 	n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
5097 	buf += n; size_left -= n;
5098 	REG_WR32(qlt, 0x54, 0x7000);
5099 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5100 	buf += n; size_left -= n;
5101 	REG_WR32(qlt, 0x54, 0x7010);
5102 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5103 	buf += n; size_left -= n;
5104 	REG_WR32(qlt, 0x54, 0x7C00);
5105 
5106 	n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
5107 	buf += n; size_left -= n;
5108 	REG_WR32(qlt, 0xC0, 0x1);
5109 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
5110 	buf += n; size_left -= n;
5111 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
5112 	buf += n; size_left -= n;
5113 	REG_WR32(qlt, 0xC0, 0x0);
5114 
5115 over_25xx_specific_dump:;
5116 	n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
5117 	buf += n; size_left -= n;
5118 	/*
5119 	 * Capture data from 32 regsiters
5120 	 */
5121 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
5122 	buf += n; size_left -= n;
5123 
5124 	/*
5125 	 * Disable interrupts
5126 	 */
5127 	REG_WR32(qlt, 0xc, 0);
5128 
5129 	/*
5130 	 * Shadow registers
5131 	 */
5132 	n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
5133 	buf += n; size_left -= n;
5134 
5135 	REG_WR32(qlt, 0x54, 0xF70);
5136 	addr = 0xb0000000;
5137 	for (i = 0; i < 0xb; i++) {
5138 		if ((!qlt->qlt_25xx_chip) &&
5139 		    (!qlt->qlt_81xx_chip) &&
5140 		    (i >= 7)) {
5141 			break;
5142 		}
5143 		if (i && ((i & 7) == 0)) {
5144 			n = (int)snprintf(buf, size_left, "\n");
5145 			buf += n; size_left -= n;
5146 		}
5147 		REG_WR32(qlt, 0xF0, addr);
5148 		n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
5149 		buf += n; size_left -= n;
5150 		addr += 0x100000;
5151 	}
5152 
5153 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5154 		REG_WR32(qlt, 0x54, 0x10);
5155 		n = (int)snprintf(buf, size_left,
5156 		    "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
5157 		buf += n; size_left -= n;
5158 	}
5159 
5160 	/*
5161 	 * Mailbox registers
5162 	 */
5163 	n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
5164 	buf += n; size_left -= n;
5165 	for (i = 0; i < 32; i += 2) {
5166 		if ((i + 2) & 15) {
5167 			c = ' ';
5168 		} else {
5169 			c = '\n';
5170 		}
5171 		n = (int)snprintf(buf, size_left, "%04x %04x%c",
5172 		    REG_RD16(qlt, 0x80 + (i << 1)),
5173 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
5174 		buf += n; size_left -= n;
5175 	}
5176 
5177 	/*
5178 	 * Transfer sequence registers
5179 	 */
5180 	n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
5181 	buf += n; size_left -= n;
5182 
5183 	REG_WR32(qlt, 0x54, 0xBF00);
5184 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5185 	buf += n; size_left -= n;
5186 	REG_WR32(qlt, 0x54, 0xBF10);
5187 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5188 	buf += n; size_left -= n;
5189 	REG_WR32(qlt, 0x54, 0xBF20);
5190 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5191 	buf += n; size_left -= n;
5192 	REG_WR32(qlt, 0x54, 0xBF30);
5193 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5194 	buf += n; size_left -= n;
5195 	REG_WR32(qlt, 0x54, 0xBF40);
5196 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5197 	buf += n; size_left -= n;
5198 	REG_WR32(qlt, 0x54, 0xBF50);
5199 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5200 	buf += n; size_left -= n;
5201 	REG_WR32(qlt, 0x54, 0xBF60);
5202 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5203 	buf += n; size_left -= n;
5204 	REG_WR32(qlt, 0x54, 0xBF70);
5205 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5206 	buf += n; size_left -= n;
5207 	n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
5208 	buf += n; size_left -= n;
5209 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5210 		REG_WR32(qlt, 0x54, 0xBFC0);
5211 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5212 		buf += n; size_left -= n;
5213 		REG_WR32(qlt, 0x54, 0xBFD0);
5214 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5215 		buf += n; size_left -= n;
5216 	}
5217 	REG_WR32(qlt, 0x54, 0xBFE0);
5218 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5219 	buf += n; size_left -= n;
5220 	n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5221 	buf += n; size_left -= n;
5222 	REG_WR32(qlt, 0x54, 0xBFF0);
5223 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5224 	buf += n; size_left -= n;
5225 
5226 	/*
5227 	 * Receive sequence registers
5228 	 */
5229 	n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
5230 	buf += n; size_left -= n;
5231 	REG_WR32(qlt, 0x54, 0xFF00);
5232 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5233 	buf += n; size_left -= n;
5234 	REG_WR32(qlt, 0x54, 0xFF10);
5235 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5236 	buf += n; size_left -= n;
5237 	REG_WR32(qlt, 0x54, 0xFF20);
5238 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5239 	buf += n; size_left -= n;
5240 	REG_WR32(qlt, 0x54, 0xFF30);
5241 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5242 	buf += n; size_left -= n;
5243 	REG_WR32(qlt, 0x54, 0xFF40);
5244 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5245 	buf += n; size_left -= n;
5246 	REG_WR32(qlt, 0x54, 0xFF50);
5247 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5248 	buf += n; size_left -= n;
5249 	REG_WR32(qlt, 0x54, 0xFF60);
5250 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5251 	buf += n; size_left -= n;
5252 	REG_WR32(qlt, 0x54, 0xFF70);
5253 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5254 	buf += n; size_left -= n;
5255 	n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5256 	buf += n; size_left -= n;
5257 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5258 		REG_WR32(qlt, 0x54, 0xFFC0);
5259 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5260 		buf += n; size_left -= n;
5261 	}
5262 	REG_WR32(qlt, 0x54, 0xFFD0);
5263 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5264 	buf += n; size_left -= n;
5265 	n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5266 	buf += n; size_left -= n;
5267 	REG_WR32(qlt, 0x54, 0xFFE0);
5268 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5269 	buf += n; size_left -= n;
5270 	n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5271 	buf += n; size_left -= n;
5272 	REG_WR32(qlt, 0x54, 0xFFF0);
5273 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5274 	buf += n; size_left -= n;
5275 
5276 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
5277 		goto over_aseq_regs;
5278 
5279 	/*
5280 	 * Auxiliary sequencer registers
5281 	 */
5282 	n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
5283 	buf += n; size_left -= n;
5284 	REG_WR32(qlt, 0x54, 0xB000);
5285 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5286 	buf += n; size_left -= n;
5287 	REG_WR32(qlt, 0x54, 0xB010);
5288 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5289 	buf += n; size_left -= n;
5290 	REG_WR32(qlt, 0x54, 0xB020);
5291 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5292 	buf += n; size_left -= n;
5293 	REG_WR32(qlt, 0x54, 0xB030);
5294 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5295 	buf += n; size_left -= n;
5296 	REG_WR32(qlt, 0x54, 0xB040);
5297 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5298 	buf += n; size_left -= n;
5299 	REG_WR32(qlt, 0x54, 0xB050);
5300 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5301 	buf += n; size_left -= n;
5302 	REG_WR32(qlt, 0x54, 0xB060);
5303 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5304 	buf += n; size_left -= n;
5305 	REG_WR32(qlt, 0x54, 0xB070);
5306 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5307 	buf += n; size_left -= n;
5308 	n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5309 	buf += n; size_left -= n;
5310 	REG_WR32(qlt, 0x54, 0xB0C0);
5311 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5312 	buf += n; size_left -= n;
5313 	REG_WR32(qlt, 0x54, 0xB0D0);
5314 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5315 	buf += n; size_left -= n;
5316 	n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5317 	buf += n; size_left -= n;
5318 	REG_WR32(qlt, 0x54, 0xB0E0);
5319 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5320 	buf += n; size_left -= n;
5321 	n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5322 	buf += n; size_left -= n;
5323 	REG_WR32(qlt, 0x54, 0xB0F0);
5324 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5325 	buf += n; size_left -= n;
5326 
5327 over_aseq_regs:;
5328 
5329 	/*
5330 	 * Command DMA registers
5331 	 */
5332 	n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5333 	buf += n; size_left -= n;
5334 	REG_WR32(qlt, 0x54, 0x7100);
5335 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5336 	buf += n; size_left -= n;
5337 
5338 	/*
5339 	 * Queues
5340 	 */
5341 	n = (int)snprintf(buf, size_left,
5342 	    "\nRequest0 Queue DMA Channel registers\n");
5343 	buf += n; size_left -= n;
5344 	REG_WR32(qlt, 0x54, 0x7200);
5345 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5346 	buf += n; size_left -= n;
5347 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5348 	buf += n; size_left -= n;
5349 
5350 	n = (int)snprintf(buf, size_left,
5351 	    "\n\nResponse0 Queue DMA Channel registers\n");
5352 	buf += n; size_left -= n;
5353 	REG_WR32(qlt, 0x54, 0x7300);
5354 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5355 	buf += n; size_left -= n;
5356 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5357 	buf += n; size_left -= n;
5358 
5359 	n = (int)snprintf(buf, size_left,
5360 	    "\n\nRequest1 Queue DMA Channel registers\n");
5361 	buf += n; size_left -= n;
5362 	REG_WR32(qlt, 0x54, 0x7400);
5363 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5364 	buf += n; size_left -= n;
5365 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5366 	buf += n; size_left -= n;
5367 
5368 	/*
5369 	 * Transmit DMA registers
5370 	 */
5371 	n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
5372 	buf += n; size_left -= n;
5373 	REG_WR32(qlt, 0x54, 0x7600);
5374 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5375 	buf += n; size_left -= n;
5376 	REG_WR32(qlt, 0x54, 0x7610);
5377 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5378 	buf += n; size_left -= n;
5379 	n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
5380 	buf += n; size_left -= n;
5381 	REG_WR32(qlt, 0x54, 0x7620);
5382 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5383 	buf += n; size_left -= n;
5384 	REG_WR32(qlt, 0x54, 0x7630);
5385 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5386 	buf += n; size_left -= n;
5387 	n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
5388 	buf += n; size_left -= n;
5389 	REG_WR32(qlt, 0x54, 0x7640);
5390 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5391 	buf += n; size_left -= n;
5392 	REG_WR32(qlt, 0x54, 0x7650);
5393 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5394 	buf += n; size_left -= n;
5395 	n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
5396 	buf += n; size_left -= n;
5397 	REG_WR32(qlt, 0x54, 0x7660);
5398 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5399 	buf += n; size_left -= n;
5400 	REG_WR32(qlt, 0x54, 0x7670);
5401 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5402 	buf += n; size_left -= n;
5403 	n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
5404 	buf += n; size_left -= n;
5405 	REG_WR32(qlt, 0x54, 0x7680);
5406 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5407 	buf += n; size_left -= n;
5408 	REG_WR32(qlt, 0x54, 0x7690);
5409 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5410 	buf += n; size_left -= n;
5411 	n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
5412 	buf += n; size_left -= n;
5413 	REG_WR32(qlt, 0x54, 0x76A0);
5414 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5415 	buf += n; size_left -= n;
5416 
5417 	/*
5418 	 * Receive DMA registers
5419 	 */
5420 	n = (int)snprintf(buf, size_left,
5421 	    "\nRCV Thread 0 Data DMA registers\n");
5422 	buf += n; size_left -= n;
5423 	REG_WR32(qlt, 0x54, 0x7700);
5424 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5425 	buf += n; size_left -= n;
5426 	REG_WR32(qlt, 0x54, 0x7710);
5427 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5428 	buf += n; size_left -= n;
5429 	n = (int)snprintf(buf, size_left,
5430 	    "\nRCV Thread 1 Data DMA registers\n");
5431 	buf += n; size_left -= n;
5432 	REG_WR32(qlt, 0x54, 0x7720);
5433 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5434 	buf += n; size_left -= n;
5435 	REG_WR32(qlt, 0x54, 0x7730);
5436 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5437 	buf += n; size_left -= n;
5438 
5439 	/*
5440 	 * RISC registers
5441 	 */
5442 	n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
5443 	buf += n; size_left -= n;
5444 	REG_WR32(qlt, 0x54, 0x0F00);
5445 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5446 	buf += n; size_left -= n;
5447 	REG_WR32(qlt, 0x54, 0x0F10);
5448 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5449 	buf += n; size_left -= n;
5450 	REG_WR32(qlt, 0x54, 0x0F20);
5451 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5452 	buf += n; size_left -= n;
5453 	REG_WR32(qlt, 0x54, 0x0F30);
5454 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5455 	buf += n; size_left -= n;
5456 	REG_WR32(qlt, 0x54, 0x0F40);
5457 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5458 	buf += n; size_left -= n;
5459 	REG_WR32(qlt, 0x54, 0x0F50);
5460 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5461 	buf += n; size_left -= n;
5462 	REG_WR32(qlt, 0x54, 0x0F60);
5463 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5464 	buf += n; size_left -= n;
5465 	REG_WR32(qlt, 0x54, 0x0F70);
5466 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5467 	buf += n; size_left -= n;
5468 
5469 	/*
5470 	 * Local memory controller registers
5471 	 */
5472 	n = (int)snprintf(buf, size_left, "\nLMC registers\n");
5473 	buf += n; size_left -= n;
5474 	REG_WR32(qlt, 0x54, 0x3000);
5475 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5476 	buf += n; size_left -= n;
5477 	REG_WR32(qlt, 0x54, 0x3010);
5478 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5479 	buf += n; size_left -= n;
5480 	REG_WR32(qlt, 0x54, 0x3020);
5481 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5482 	buf += n; size_left -= n;
5483 	REG_WR32(qlt, 0x54, 0x3030);
5484 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5485 	buf += n; size_left -= n;
5486 	REG_WR32(qlt, 0x54, 0x3040);
5487 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5488 	buf += n; size_left -= n;
5489 	REG_WR32(qlt, 0x54, 0x3050);
5490 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5491 	buf += n; size_left -= n;
5492 	REG_WR32(qlt, 0x54, 0x3060);
5493 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5494 	buf += n; size_left -= n;
5495 
5496 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5497 		REG_WR32(qlt, 0x54, 0x3070);
5498 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5499 		buf += n; size_left -= n;
5500 	}
5501 
5502 	/*
5503 	 * Fibre protocol module regsiters
5504 	 */
5505 	n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5506 	buf += n; size_left -= n;
5507 	REG_WR32(qlt, 0x54, 0x4000);
5508 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5509 	buf += n; size_left -= n;
5510 	REG_WR32(qlt, 0x54, 0x4010);
5511 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5512 	buf += n; size_left -= n;
5513 	REG_WR32(qlt, 0x54, 0x4020);
5514 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5515 	buf += n; size_left -= n;
5516 	REG_WR32(qlt, 0x54, 0x4030);
5517 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5518 	buf += n; size_left -= n;
5519 	REG_WR32(qlt, 0x54, 0x4040);
5520 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5521 	buf += n; size_left -= n;
5522 	REG_WR32(qlt, 0x54, 0x4050);
5523 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5524 	buf += n; size_left -= n;
5525 	REG_WR32(qlt, 0x54, 0x4060);
5526 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5527 	buf += n; size_left -= n;
5528 	REG_WR32(qlt, 0x54, 0x4070);
5529 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5530 	buf += n; size_left -= n;
5531 	REG_WR32(qlt, 0x54, 0x4080);
5532 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5533 	buf += n; size_left -= n;
5534 	REG_WR32(qlt, 0x54, 0x4090);
5535 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5536 	buf += n; size_left -= n;
5537 	REG_WR32(qlt, 0x54, 0x40A0);
5538 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5539 	buf += n; size_left -= n;
5540 	REG_WR32(qlt, 0x54, 0x40B0);
5541 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5542 	buf += n; size_left -= n;
5543 	if (qlt->qlt_81xx_chip) {
5544 		REG_WR32(qlt, 0x54, 0x40C0);
5545 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5546 		buf += n; size_left -= n;
5547 		REG_WR32(qlt, 0x54, 0x40D0);
5548 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5549 		buf += n; size_left -= n;
5550 	}
5551 
5552 	/*
5553 	 * Fibre buffer registers
5554 	 */
5555 	n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5556 	buf += n; size_left -= n;
5557 	REG_WR32(qlt, 0x54, 0x6000);
5558 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5559 	buf += n; size_left -= n;
5560 	REG_WR32(qlt, 0x54, 0x6010);
5561 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5562 	buf += n; size_left -= n;
5563 	REG_WR32(qlt, 0x54, 0x6020);
5564 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5565 	buf += n; size_left -= n;
5566 	REG_WR32(qlt, 0x54, 0x6030);
5567 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5568 	buf += n; size_left -= n;
5569 	REG_WR32(qlt, 0x54, 0x6040);
5570 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5571 	buf += n; size_left -= n;
5572 	REG_WR32(qlt, 0x54, 0x6100);
5573 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5574 	buf += n; size_left -= n;
5575 	REG_WR32(qlt, 0x54, 0x6130);
5576 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5577 	buf += n; size_left -= n;
5578 	REG_WR32(qlt, 0x54, 0x6150);
5579 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5580 	buf += n; size_left -= n;
5581 	REG_WR32(qlt, 0x54, 0x6170);
5582 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5583 	buf += n; size_left -= n;
5584 	REG_WR32(qlt, 0x54, 0x6190);
5585 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5586 	buf += n; size_left -= n;
5587 	REG_WR32(qlt, 0x54, 0x61B0);
5588 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5589 	buf += n; size_left -= n;
5590 	if (qlt->qlt_81xx_chip) {
5591 		REG_WR32(qlt, 0x54, 0x61C0);
5592 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5593 		buf += n; size_left -= n;
5594 	}
5595 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5596 		REG_WR32(qlt, 0x54, 0x6F00);
5597 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5598 		buf += n; size_left -= n;
5599 	}
5600 
5601 	qlt->intr_sneak_counter = 10;
5602 	mutex_enter(&qlt->intr_lock);
5603 	(void) qlt_reset_chip(qlt);
5604 	drv_usecwait(20);
5605 	qlt->intr_sneak_counter = 0;
5606 	mutex_exit(&qlt->intr_lock);
5607 
5608 	/*
5609 	 * Memory
5610 	 */
5611 	n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5612 	buf += n; size_left -= n;
5613 
5614 	addr = 0x20000;
5615 	endaddr = 0x22000;
5616 	words_to_read = 0;
5617 	while (addr < endaddr) {
5618 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5619 		if ((words_to_read + addr) > endaddr) {
5620 			words_to_read = endaddr - addr;
5621 		}
5622 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5623 		    QLT_SUCCESS) {
5624 			EL(qlt, "Error reading risc ram - CODE RAM status="
5625 			    "%llxh\n", ret);
5626 			goto dump_fail;
5627 		}
5628 
5629 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5630 		buf += n; size_left -= n;
5631 
5632 		if (size_left < 100000) {
5633 			EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5634 			    size_left);
5635 			goto dump_ok;
5636 		}
5637 		addr += words_to_read;
5638 	}
5639 
5640 	n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
5641 	buf += n; size_left -= n;
5642 
5643 	addr = 0x100000;
5644 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5645 	endaddr++;
5646 	if (endaddr & 7) {
5647 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
5648 	}
5649 
5650 	words_to_read = 0;
5651 	while (addr < endaddr) {
5652 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5653 		if ((words_to_read + addr) > endaddr) {
5654 			words_to_read = endaddr - addr;
5655 		}
5656 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5657 		    QLT_SUCCESS) {
5658 			EL(qlt, "Error reading risc ram - EXT RAM status="
5659 			    "%llxh\n", ret);
5660 			goto dump_fail;
5661 		}
5662 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5663 		buf += n; size_left -= n;
5664 		if (size_left < 100000) {
5665 			EL(qlt, "run out of space - EXT RAM\n");
5666 			goto dump_ok;
5667 		}
5668 		addr += words_to_read;
5669 	}
5670 
5671 	/*
5672 	 * Label the end tag
5673 	 */
5674 	n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5675 	buf += n; size_left -= n;
5676 
5677 	/*
5678 	 * Queue dumping
5679 	 */
5680 	n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5681 	buf += n; size_left -= n;
5682 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5683 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
5684 	buf += n; size_left -= n;
5685 
5686 	n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5687 	buf += n; size_left -= n;
5688 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5689 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
5690 	buf += n; size_left -= n;
5691 
5692 	n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5693 	buf += n; size_left -= n;
5694 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5695 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5696 	buf += n; size_left -= n;
5697 
5698 	n = (int)snprintf(buf, size_left, "\nATIO queue\n");
5699 	buf += n; size_left -= n;
5700 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5701 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5702 	buf += n; size_left -= n;
5703 
5704 	/*
5705 	 * Label dump reason
5706 	 */
5707 	n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5708 	    qlt->qlt_port_alias, ssci->st_additional_info);
5709 	buf += n; size_left -= n;
5710 
5711 dump_ok:
5712 	EL(qlt, "left-%d\n", size_left);
5713 
5714 	mutex_enter(&qlt->qlt_ioctl_lock);
5715 	qlt->qlt_ioctl_flags &=
5716 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5717 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5718 	mutex_exit(&qlt->qlt_ioctl_lock);
5719 	return (FCT_SUCCESS);
5720 
5721 dump_fail:
5722 	EL(qlt, "dump not done\n");
5723 	mutex_enter(&qlt->qlt_ioctl_lock);
5724 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5725 	mutex_exit(&qlt->qlt_ioctl_lock);
5726 	return (FCT_FAILURE);
5727 }
5728 
5729 static int
5730 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5731     uint_t size_left)
5732 {
5733 	int		i;
5734 	int		n;
5735 	char		c = ' ';
5736 
5737 	for (i = 0, n = 0; i < count; i++) {
5738 		if ((i + 1) & 7) {
5739 			c = ' ';
5740 		} else {
5741 			c = '\n';
5742 		}
5743 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5744 		    "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
5745 	}
5746 	return (n);
5747 }
5748 
5749 static int
5750 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5751     caddr_t buf, uint_t size_left)
5752 {
5753 	int		i;
5754 	int		n;
5755 	char		c = ' ';
5756 	uint32_t	*ptr;
5757 
5758 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5759 	for (i = 0, n = 0; i < words; i++) {
5760 		if ((i & 7) == 0) {
5761 			n = (int)(n + (int)snprintf(&buf[n],
5762 			    (uint_t)(size_left - n), "%08x: ", addr + i));
5763 		}
5764 		if ((i + 1) & 7) {
5765 			c = ' ';
5766 		} else {
5767 			c = '\n';
5768 		}
5769 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5770 		    "%08x%c", ptr[i], c));
5771 	}
5772 	return (n);
5773 }
5774 
5775 static int
5776 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5777     uint_t size_left)
5778 {
5779 	int		i;
5780 	int		n;
5781 	char		c = ' ';
5782 	int		words;
5783 	uint16_t	*ptr;
5784 	uint16_t	w;
5785 
5786 	words = entries * 32;
5787 	ptr = (uint16_t *)qadr;
5788 	for (i = 0, n = 0; i < words; i++) {
5789 		if ((i & 7) == 0) {
5790 			n = (int)(n + (int)snprintf(&buf[n],
5791 			    (uint_t)(size_left - n), "%05x: ", i));
5792 		}
5793 		if ((i + 1) & 7) {
5794 			c = ' ';
5795 		} else {
5796 			c = '\n';
5797 		}
5798 		w = QMEM_RD16(qlt, &ptr[i]);
5799 		n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
5800 		    w, c));
5801 	}
5802 	return (n);
5803 }
5804 
5805 /*
5806  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5807  * mailbox ram is available.
5808  * Copy data from RISC RAM to system memory
5809  */
5810 static fct_status_t
5811 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5812 {
5813 	uint64_t	da;
5814 	fct_status_t	ret;
5815 
5816 	REG_WR16(qlt, REG_MBOX(0), MBC_DUMP_RAM_EXTENDED);
5817 	da = qlt->queue_mem_cookie.dmac_laddress;
5818 	da += MBOX_DMA_MEM_OFFSET;
5819 
5820 	/* System destination address */
5821 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
5822 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
5823 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
5824 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
5825 
5826 	/* Length */
5827 	REG_WR16(qlt, REG_MBOX(5), LSW(words));
5828 	REG_WR16(qlt, REG_MBOX(4), MSW(words));
5829 
5830 	/* RISC source address */
5831 	REG_WR16(qlt, REG_MBOX(1), LSW(addr));
5832 	REG_WR16(qlt, REG_MBOX(8), MSW(addr));
5833 
5834 	ret = qlt_raw_mailbox_command(qlt);
5835 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
5836 	if (ret == QLT_SUCCESS) {
5837 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5838 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5839 	} else {
5840 		EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
5841 	}
5842 	return (ret);
5843 }
5844 
5845 static void
5846 qlt_verify_fw(qlt_state_t *qlt)
5847 {
5848 	caddr_t req;
5849 	/* Just put it on the request queue */
5850 	mutex_enter(&qlt->req_lock);
5851 	req = qlt_get_req_entries(qlt, 1);
5852 	if (req == NULL) {
5853 		mutex_exit(&qlt->req_lock);
5854 		/* XXX handle this */
5855 		return;
5856 	}
5857 
5858 	bzero(req, IOCB_SIZE);
5859 
5860 	req[0] = 0x1b;
5861 	req[1] = 1;
5862 
5863 	QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5864 	QMEM_WR16(qlt, (&req[0x8]), 1);    /*  options - don't update */
5865 	QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5866 
5867 	qlt_submit_req_entries(qlt, 1);
5868 	mutex_exit(&qlt->req_lock);
5869 }
5870 
5871 static void
5872 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5873 {
5874 	uint16_t	status;
5875 	char		info[80];
5876 
5877 	status = QMEM_RD16(qlt, rsp+8);
5878 	if (status != 0) {
5879 		(void) snprintf(info, 80, "qlt_handle_verify_fw_completion: "
5880 		    "status:%x, rsp:%p", status, (void *)rsp);
5881 		if (status == 3) {
5882 			uint16_t error_code;
5883 
5884 			error_code = QMEM_RD16(qlt, rsp+0xA);
5885 			(void) snprintf(info, 80, "qlt_handle_verify_fw_"
5886 			    "completion: error code:%x", error_code);
5887 		}
5888 	}
5889 }
5890 
5891 /*
5892  * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5893  *
5894  * Input:	Pointer to the adapter state structure.
5895  * Returns:	Success or Failure.
5896  * Context:	Kernel context.
5897  */
5898 static int
5899 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5900 {
5901 	int	rval = DDI_SUCCESS;
5902 
5903 	qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5904 	    kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
5905 
5906 	if (qlt->el_trace_desc == NULL) {
5907 		cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5908 		    qlt->instance);
5909 		rval = DDI_FAILURE;
5910 	} else {
5911 		qlt->el_trace_desc->next = 0;
5912 		qlt->el_trace_desc->trace_buffer =
5913 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
5914 
5915 		if (qlt->el_trace_desc->trace_buffer == NULL) {
5916 			cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5917 			    qlt->instance);
5918 			kmem_free(qlt->el_trace_desc,
5919 			    sizeof (qlt_el_trace_desc_t));
5920 			qlt->el_trace_desc = NULL;
5921 			rval = DDI_FAILURE;
5922 		} else {
5923 			qlt->el_trace_desc->trace_buffer_size =
5924 			    EL_TRACE_BUF_SIZE;
5925 			mutex_init(&qlt->el_trace_desc->mutex, NULL,
5926 			    MUTEX_DRIVER, NULL);
5927 		}
5928 	}
5929 
5930 	return (rval);
5931 }
5932 
5933 /*
5934  * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5935  *
5936  * Input:	Pointer to the adapter state structure.
5937  * Returns:	Success or Failure.
5938  * Context:	Kernel context.
5939  */
5940 static int
5941 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5942 {
5943 	int	rval = DDI_SUCCESS;
5944 
5945 	if (qlt->el_trace_desc == NULL) {
5946 		cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5947 		    qlt->instance);
5948 		rval = DDI_FAILURE;
5949 	} else {
5950 		if (qlt->el_trace_desc->trace_buffer != NULL) {
5951 			kmem_free(qlt->el_trace_desc->trace_buffer,
5952 			    qlt->el_trace_desc->trace_buffer_size);
5953 		}
5954 		mutex_destroy(&qlt->el_trace_desc->mutex);
5955 		kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5956 		qlt->el_trace_desc = NULL;
5957 	}
5958 
5959 	return (rval);
5960 }
5961 
5962 /*
5963  * qlt_el_msg
5964  *	Extended logging message
5965  *
5966  * Input:
5967  *	qlt:	adapter state pointer.
5968  *	fn:	function name.
5969  *	ce:	level
5970  *	...:	Variable argument list.
5971  *
5972  * Context:
5973  *	Kernel/Interrupt context.
5974  */
5975 void
5976 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5977 {
5978 	char		*s, *fmt = 0, *fmt1 = 0;
5979 	char		fmt2[EL_BUFFER_RESERVE];
5980 	int		rval, tmp;
5981 	int		tracing = 0;
5982 	va_list		vl;
5983 
5984 	/* Tracing is the default but it can be disabled. */
5985 	if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5986 		tracing = 1;
5987 
5988 		mutex_enter(&qlt->el_trace_desc->mutex);
5989 
5990 		/*
5991 		 * Ensure enough space for the string. Wrap to
5992 		 * start when default message allocation size
5993 		 * would overrun the end.
5994 		 */
5995 		if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5996 		    qlt->el_trace_desc->trace_buffer_size) {
5997 			fmt = qlt->el_trace_desc->trace_buffer;
5998 			qlt->el_trace_desc->next = 0;
5999 		} else {
6000 			fmt = qlt->el_trace_desc->trace_buffer +
6001 			    qlt->el_trace_desc->next;
6002 		}
6003 	}
6004 
6005 	/* if no buffer use the stack */
6006 	if (fmt == NULL) {
6007 		fmt = fmt2;
6008 	}
6009 
6010 	va_start(vl, ce);
6011 
6012 	s = va_arg(vl, char *);
6013 
6014 	rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
6015 	    "QEL qlt(%d): %s, ", qlt->instance, fn);
6016 	fmt1 = fmt + rval;
6017 	tmp = (int)vsnprintf(fmt1,
6018 	    (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
6019 	rval += tmp;
6020 
6021 	/*
6022 	 * Calculate the offset where the next message will go,
6023 	 * skipping the NULL.
6024 	 */
6025 	if (tracing) {
6026 		uint16_t next = (uint16_t)(rval += 1);
6027 		qlt->el_trace_desc->next += next;
6028 		mutex_exit(&qlt->el_trace_desc->mutex);
6029 	}
6030 
6031 	if (enable_extended_logging) {
6032 		cmn_err(ce, fmt);
6033 	}
6034 
6035 	va_end(vl);
6036 }
6037 
6038 /*
6039  * qlt_dump_el_trace_buffer
6040  *	 Outputs extended logging trace buffer.
6041  *
6042  * Input:
6043  *	qlt:	adapter state pointer.
6044  */
6045 void
6046 qlt_dump_el_trace_buffer(qlt_state_t *qlt)
6047 {
6048 	char		*dump_start = NULL;
6049 	char		*dump_current = NULL;
6050 	char		*trace_start;
6051 	char		*trace_end;
6052 	int		wrapped = 0;
6053 	int		rval;
6054 
6055 	mutex_enter(&qlt->el_trace_desc->mutex);
6056 
6057 	rval = qlt_validate_trace_desc(qlt);
6058 	if (rval != NULL) {
6059 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
6060 		    qlt->instance);
6061 	} else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
6062 		dump_current = dump_start;
6063 		trace_start = qlt->el_trace_desc->trace_buffer;
6064 		trace_end = trace_start +
6065 		    qlt->el_trace_desc->trace_buffer_size;
6066 
6067 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
6068 		    qlt->instance,
6069 		    (void *)dump_start, (void *)trace_start);
6070 
6071 		while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
6072 		    (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
6073 			/* Show it... */
6074 			cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
6075 			    dump_current);
6076 			/* Make the next the current */
6077 			dump_current += (strlen(dump_current) + 1);
6078 			/* check for wrap */
6079 			if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
6080 				dump_current = trace_start;
6081 				wrapped = 1;
6082 			} else if (wrapped) {
6083 				/* Don't go past next. */
6084 				if ((trace_start + qlt->el_trace_desc->next) <=
6085 				    dump_current) {
6086 					break;
6087 				}
6088 			} else if (*dump_current == NULL) {
6089 				break;
6090 			}
6091 		}
6092 	}
6093 	mutex_exit(&qlt->el_trace_desc->mutex);
6094 }
6095 
6096 /*
6097  * qlt_validate_trace_desc
6098  *	 Ensures the extended logging trace descriptor is good.
6099  *
6100  * Input:
6101  *	qlt:	adapter state pointer.
6102  *
6103  * Returns:
6104  *	ql local function return status code.
6105  */
6106 static int
6107 qlt_validate_trace_desc(qlt_state_t *qlt)
6108 {
6109 	int	rval = DDI_SUCCESS;
6110 
6111 	if (qlt->el_trace_desc == NULL) {
6112 		rval = DDI_FAILURE;
6113 	} else if (qlt->el_trace_desc->trace_buffer == NULL) {
6114 		rval = DDI_FAILURE;
6115 	}
6116 	return (rval);
6117 }
6118 
6119 /*
6120  * qlt_find_trace_start
6121  *	 Locate the oldest extended logging trace entry.
6122  *
6123  * Input:
6124  *	qlt:	adapter state pointer.
6125  *
6126  * Returns:
6127  *	Pointer to a string.
6128  *
6129  * Context:
6130  *	Kernel/Interrupt context.
6131  */
6132 static char *
6133 qlt_find_trace_start(qlt_state_t *qlt)
6134 {
6135 	char	*trace_start = 0;
6136 	char	*trace_next  = 0;
6137 
6138 	trace_next = qlt->el_trace_desc->trace_buffer +
6139 	    qlt->el_trace_desc->next;
6140 
6141 	/*
6142 	 * If the buffer has not wrapped next will point at a null so
6143 	 * start is the beginning of the buffer.  If next points at a char
6144 	 * then we must traverse the buffer until a null is detected and
6145 	 * that will be the beginning of the oldest whole object in the buffer
6146 	 * which is the start.
6147 	 */
6148 
6149 	if ((trace_next + EL_BUFFER_RESERVE) >=
6150 	    (qlt->el_trace_desc->trace_buffer +
6151 	    qlt->el_trace_desc->trace_buffer_size)) {
6152 		trace_start = qlt->el_trace_desc->trace_buffer;
6153 	} else if (*trace_next != NULL) {
6154 		trace_start = trace_next + (strlen(trace_next) + 1);
6155 	} else {
6156 		trace_start = qlt->el_trace_desc->trace_buffer;
6157 	}
6158 	return (trace_start);
6159 }
6160 
6161 
6162 static int
6163 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
6164 {
6165 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
6166 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
6167 }
6168 
6169 static int
6170 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6171 {
6172 	return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
6173 	    DDI_PROP_DONTPASS, prop, prop_val));
6174 }
6175 
6176 static int
6177 qlt_read_int_instance_prop(qlt_state_t *qlt, char *prop, int defval)
6178 {
6179 	char		inst_prop[256];
6180 	int		val;
6181 
6182 	/*
6183 	 * Get adapter instance specific parameters. If the instance
6184 	 * specific parameter isn't there, try the global parameter.
6185 	 */
6186 
6187 	(void) sprintf(inst_prop, "hba%d-%s", qlt->instance, prop);
6188 
6189 	if ((val = qlt_read_int_prop(qlt, inst_prop, defval)) == defval) {
6190 		val = qlt_read_int_prop(qlt, prop, defval);
6191 	}
6192 
6193 	return (val);
6194 }
6195 
6196 static int
6197 qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6198 {
6199 	char		instance_prop[256];
6200 
6201 	/* Get adapter instance specific parameter. */
6202 	(void) sprintf(instance_prop, "hba%d-%s", qlt->instance, prop);
6203 	return (qlt_read_string_prop(qlt, instance_prop, prop_val));
6204 }
6205 
6206 static int
6207 qlt_convert_string_to_ull(char *prop, int radix,
6208     u_longlong_t *result)
6209 {
6210 	return (ddi_strtoull((const char *)prop, 0, radix, result));
6211 }
6212 
6213 static boolean_t
6214 qlt_wwn_overload_prop(qlt_state_t *qlt)
6215 {
6216 	char		*prop_val = 0;
6217 	int		rval;
6218 	int		radix;
6219 	u_longlong_t	wwnn = 0, wwpn = 0;
6220 	boolean_t	overloaded = FALSE;
6221 
6222 	radix = 16;
6223 
6224 	rval = qlt_read_string_instance_prop(qlt, "adapter-wwnn", &prop_val);
6225 	if (rval == DDI_PROP_SUCCESS) {
6226 		rval = qlt_convert_string_to_ull(prop_val, radix, &wwnn);
6227 	}
6228 	if (rval == DDI_PROP_SUCCESS) {
6229 		rval = qlt_read_string_instance_prop(qlt, "adapter-wwpn",
6230 		    &prop_val);
6231 		if (rval == DDI_PROP_SUCCESS) {
6232 			rval = qlt_convert_string_to_ull(prop_val, radix,
6233 			    &wwpn);
6234 		}
6235 	}
6236 	if (rval == DDI_PROP_SUCCESS) {
6237 		overloaded = TRUE;
6238 		/* Overload the current node/port name nvram copy */
6239 		bcopy((char *)&wwnn, qlt->nvram->node_name, 8);
6240 		BIG_ENDIAN_64(qlt->nvram->node_name);
6241 		bcopy((char *)&wwpn, qlt->nvram->port_name, 8);
6242 		BIG_ENDIAN_64(qlt->nvram->port_name);
6243 	}
6244 	return (overloaded);
6245 }
6246 
6247 /*
6248  * prop_text - Return a pointer to a string describing the status
6249  *
6250  * Input:	prop_status = the return status from a property function.
6251  * Returns:	pointer to a string.
6252  * Context:	Kernel context.
6253  */
6254 char *
6255 prop_text(int prop_status)
6256 {
6257 	string_table_t *entry = &prop_status_tbl[0];
6258 
6259 	return (value2string(entry, prop_status, 0xFFFF));
6260 }
6261 
6262 /*
6263  * value2string	Return a pointer to a string associated with the value
6264  *
6265  * Input:	entry = the value to string table
6266  *		value = the value
6267  * Returns:	pointer to a string.
6268  * Context:	Kernel context.
6269  */
6270 char *
6271 value2string(string_table_t *entry, int value, int delimiter)
6272 {
6273 	for (; entry->value != delimiter; entry++) {
6274 		if (entry->value == value) {
6275 			break;
6276 		}
6277 	}
6278 	return (entry->string);
6279 }
6280 
6281 /*
6282  * qlt_chg_endian Change endianess of byte array.
6283  *
6284  * Input:	buf = array pointer.
6285  *		size = size of array in bytes.
6286  *
6287  * Context:	Interrupt or Kernel context.
6288  */
6289 void
6290 qlt_chg_endian(uint8_t buf[], size_t size)
6291 {
6292 	uint8_t byte;
6293 	size_t  cnt1;
6294 	size_t  cnt;
6295 
6296 	cnt1 = size - 1;
6297 	for (cnt = 0; cnt < size / 2; cnt++) {
6298 		byte = buf[cnt1];
6299 		buf[cnt1] = buf[cnt];
6300 		buf[cnt] = byte;
6301 		cnt1--;
6302 	}
6303 }
6304 
6305 /*
6306  * ql_mps_reset
6307  *	Reset MPS for FCoE functions.
6308  *
6309  * Input:
6310  *	ha = virtual adapter state pointer.
6311  *
6312  * Context:
6313  *	Kernel context.
6314  */
6315 static void
6316 qlt_mps_reset(qlt_state_t *qlt)
6317 {
6318 	uint32_t	data, dctl = 1000;
6319 
6320 	do {
6321 		if (dctl-- == 0 || qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 1) !=
6322 		    QLT_SUCCESS) {
6323 			return;
6324 		}
6325 		if (qlt_raw_rd_risc_ram_word(qlt, 0x7c00, &data) !=
6326 		    QLT_SUCCESS) {
6327 			(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6328 			return;
6329 		}
6330 	} while (!(data & BIT_0));
6331 
6332 	if (qlt_raw_rd_risc_ram_word(qlt, 0x7A15, &data) == QLT_SUCCESS) {
6333 		dctl = (uint16_t)PCICFG_RD16(qlt, 0x54);
6334 		if ((data & 0xe0) != (dctl & 0xe0)) {
6335 			data &= 0xff1f;
6336 			data |= dctl & 0xe0;
6337 			(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7A15, data);
6338 		}
6339 	}
6340 	(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6341 }
6342 
6343 /*
6344  * qlt_raw_wrt_risc_ram_word
6345  *	Write RISC RAM word.
6346  *
6347  * Input:	qlt:		adapter state pointer.
6348  *		risc_address:	risc ram word address.
6349  *		data:		data.
6350  *
6351  * Returns:	qlt local function return status code.
6352  *
6353  * Context:	Kernel context.
6354  */
6355 static fct_status_t
6356 qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6357     uint32_t data)
6358 {
6359 	fct_status_t	ret;
6360 
6361 	REG_WR16(qlt, REG_MBOX(0), MBC_WRITE_RAM_EXTENDED);
6362 	REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6363 	REG_WR16(qlt, REG_MBOX(2), LSW(data));
6364 	REG_WR16(qlt, REG_MBOX(3), MSW(data));
6365 	REG_WR16(qlt, REG_MBOX(8), MSW(risc_address));
6366 	ret = qlt_raw_mailbox_command(qlt);
6367 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6368 	if (ret != QLT_SUCCESS) {
6369 		EL(qlt, "qlt_raw_mailbox_command=MBC_WRITE_RAM_EXTENDED status"
6370 		    "=%llxh\n", ret);
6371 	}
6372 	return (ret);
6373 }
6374 
6375 /*
6376  * ql_raw_rd_risc_ram_word
6377  *	Read RISC RAM word.
6378  *
6379  * Input:	qlt:		adapter state pointer.
6380  *		risc_address:	risc ram word address.
6381  *		data:		data pointer.
6382  *
6383  * Returns:	ql local function return status code.
6384  *
6385  * Context:	Kernel context.
6386  */
6387 static fct_status_t
6388 qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6389     uint32_t *data)
6390 {
6391 	fct_status_t	ret;
6392 
6393 	REG_WR16(qlt, REG_MBOX(0), MBC_READ_RAM_EXTENDED);
6394 	REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6395 	REG_WR16(qlt, REG_MBOX(2), MSW(risc_address));
6396 	ret = qlt_raw_mailbox_command(qlt);
6397 	*data = REG_RD16(qlt, REG_MBOX(2));
6398 	*data |= (REG_RD16(qlt, REG_MBOX(3)) << 16);
6399 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6400 	if (ret != QLT_SUCCESS) {
6401 		EL(qlt, "qlt_raw_mailbox_command=MBC_READ_RAM_EXTENDED status"
6402 		    "=%llxh\n", ret);
6403 	}
6404 	return (ret);
6405 }
6406 
6407 static void
6408 qlt_properties(qlt_state_t *qlt)
6409 {
6410 	int32_t		cnt = 0;
6411 	int32_t		defval = 0xffff;
6412 
6413 	if (qlt_wwn_overload_prop(qlt) == TRUE) {
6414 		EL(qlt, "wwnn overloaded.\n");
6415 	}
6416 
6417 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt2k", defval)) !=
6418 	    defval) {
6419 		qlt->qlt_bucketcnt[0] = cnt;
6420 		EL(qlt, "2k bucket o/l=%d\n", cnt);
6421 	}
6422 
6423 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt8k", defval)) !=
6424 	    defval) {
6425 		qlt->qlt_bucketcnt[1] = cnt;
6426 		EL(qlt, "8k bucket o/l=%d\n", cnt);
6427 	}
6428 
6429 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt64k", defval)) !=
6430 	    defval) {
6431 		qlt->qlt_bucketcnt[2] = cnt;
6432 		EL(qlt, "64k bucket o/l=%d\n", cnt);
6433 	}
6434 
6435 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt128k", defval)) !=
6436 	    defval) {
6437 		qlt->qlt_bucketcnt[3] = cnt;
6438 		EL(qlt, "128k bucket o/l=%d\n", cnt);
6439 	}
6440 
6441 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt256", defval)) !=
6442 	    defval) {
6443 		qlt->qlt_bucketcnt[4] = cnt;
6444 		EL(qlt, "256k bucket o/l=%d\n", cnt);
6445 	}
6446 }
6447