xref: /illumos-gate/usr/src/uts/common/io/comstar/port/qlt/qlt.c (revision 0ff6bfafbd510fac2721570482eceb0d24afe291)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
29  * Use is subject to license terms.
30  */
31 
32 #include <sys/conf.h>
33 #include <sys/ddi.h>
34 #include <sys/stat.h>
35 #include <sys/pci.h>
36 #include <sys/sunddi.h>
37 #include <sys/modctl.h>
38 #include <sys/file.h>
39 #include <sys/cred.h>
40 #include <sys/byteorder.h>
41 #include <sys/atomic.h>
42 #include <sys/scsi/scsi.h>
43 
44 #include <stmf_defines.h>
45 #include <fct_defines.h>
46 #include <stmf.h>
47 #include <portif.h>
48 #include <fct.h>
49 #include <qlt.h>
50 #include <qlt_dma.h>
51 #include <qlt_ioctl.h>
52 #include <qlt_open.h>
53 #include <stmf_ioctl.h>
54 
55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
58     int reset_only);
59 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
60     uint32_t word_count, uint32_t risc_addr);
61 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
62 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
63 					uint32_t dma_size);
64 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
65 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
66 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
67 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
68     stmf_state_change_info_t *ssci);
69 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
70 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
71 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
72 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
73 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
74 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
75 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
76 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
77     uint8_t *rsp);
78 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
79 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
80 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
81 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
82 static void qlt_verify_fw(qlt_state_t *qlt);
83 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
84 fct_status_t qlt_port_start(caddr_t arg);
85 fct_status_t qlt_port_stop(caddr_t arg);
86 fct_status_t qlt_port_online(qlt_state_t *qlt);
87 fct_status_t qlt_port_offline(qlt_state_t *qlt);
88 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
89     fct_link_info_t *li);
90 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
91 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
92 						fct_flogi_xchg_t *fx);
93 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
94 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
95 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
96     fct_remote_port_t *rp, fct_cmd_t *login);
97 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
98     fct_remote_port_t *rp);
99 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
100 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
101 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
102     fct_cmd_t *cmd, int terminate);
103 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
104 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
105 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
106     fct_cmd_t *cmd, uint32_t flags);
107 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
108 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
109 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
110 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
111 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
112 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
113 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
114     stmf_data_buf_t *dbuf, uint32_t ioflags);
115 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
116 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
117 static void qlt_release_intr(qlt_state_t *qlt);
118 static int qlt_setup_interrupts(qlt_state_t *qlt);
119 static void qlt_destroy_mutex(qlt_state_t *qlt);
120 
121 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
122     uint32_t words);
123 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
124     caddr_t buf, uint_t size_left);
125 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
126     caddr_t buf, uint_t size_left);
127 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
128     int count, uint_t size_left);
129 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
130     cred_t *credp, int *rval);
131 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
132 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
133 
134 #if defined(__sparc)
135 static int qlt_setup_msi(qlt_state_t *qlt);
136 static int qlt_setup_msix(qlt_state_t *qlt);
137 #endif
138 
139 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
140 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
141 static int qlt_validate_trace_desc(qlt_state_t *qlt);
142 static char *qlt_find_trace_start(qlt_state_t *qlt);
143 
144 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
145 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
146 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
147     char **prop_val);
148 static int qlt_convert_string_to_ull(char *prop, int radix,
149     u_longlong_t *result);
150 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
151 static int qlt_quiesce(dev_info_t *dip);
152 
153 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] = \
154 	(uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
155 
156 int qlt_enable_msix = 0;
157 
158 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
159 
160 /* Array to quickly calculate next free buf index to use */
161 #if 0
162 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
163 #endif
164 
165 static struct cb_ops qlt_cb_ops = {
166 	qlt_open,
167 	qlt_close,
168 	nodev,
169 	nodev,
170 	nodev,
171 	nodev,
172 	nodev,
173 	qlt_ioctl,
174 	nodev,
175 	nodev,
176 	nodev,
177 	nochpoll,
178 	ddi_prop_op,
179 	0,
180 	D_MP | D_NEW
181 };
182 
183 static struct dev_ops qlt_ops = {
184 	DEVO_REV,
185 	0,
186 	nodev,
187 	nulldev,
188 	nulldev,
189 	qlt_attach,
190 	qlt_detach,
191 	nodev,
192 	&qlt_cb_ops,
193 	NULL,
194 	ddi_power,
195 	qlt_quiesce
196 };
197 
198 #ifndef	PORT_SPEED_10G
199 #define	PORT_SPEED_10G		16
200 #endif
201 
202 static struct modldrv modldrv = {
203 	&mod_driverops,
204 	QLT_NAME" "QLT_VERSION,
205 	&qlt_ops,
206 };
207 
208 static struct modlinkage modlinkage = {
209 	MODREV_1, &modldrv, NULL
210 };
211 
212 void *qlt_state = NULL;
213 kmutex_t qlt_global_lock;
214 static uint32_t qlt_loaded_counter = 0;
215 
216 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
217 			"-X Mode 1 133", "--Invalid--",
218 			"-X Mode 2 66", "-X Mode 2 100",
219 			"-X Mode 2 133", " 66" };
220 
221 /* Always use 64 bit DMA. */
222 static ddi_dma_attr_t qlt_queue_dma_attr = {
223 	DMA_ATTR_V0,		/* dma_attr_version */
224 	0,			/* low DMA address range */
225 	0xffffffffffffffff,	/* high DMA address range */
226 	0xffffffff,		/* DMA counter register */
227 	64,			/* DMA address alignment */
228 	0xff,			/* DMA burstsizes */
229 	1,			/* min effective DMA size */
230 	0xffffffff,		/* max DMA xfer size */
231 	0xffffffff,		/* segment boundary */
232 	1,			/* s/g list length */
233 	1,			/* granularity of device */
234 	0			/* DMA transfer flags */
235 };
236 
237 /* qlogic logging */
238 int enable_extended_logging = 0;
239 
240 static char qlt_provider_name[] = "qlt";
241 static struct stmf_port_provider *qlt_pp;
242 
243 int
244 _init(void)
245 {
246 	int ret;
247 
248 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
249 	if (ret == 0) {
250 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
251 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
252 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
253 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
254 		qlt_pp->pp_name = qlt_provider_name;
255 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
256 			stmf_free(qlt_pp);
257 			mutex_destroy(&qlt_global_lock);
258 			ddi_soft_state_fini(&qlt_state);
259 			return (EIO);
260 		}
261 		ret = mod_install(&modlinkage);
262 		if (ret != 0) {
263 			(void) stmf_deregister_port_provider(qlt_pp);
264 			stmf_free(qlt_pp);
265 			mutex_destroy(&qlt_global_lock);
266 			ddi_soft_state_fini(&qlt_state);
267 		}
268 	}
269 	return (ret);
270 }
271 
272 int
273 _fini(void)
274 {
275 	int ret;
276 
277 	if (qlt_loaded_counter)
278 		return (EBUSY);
279 	ret = mod_remove(&modlinkage);
280 	if (ret == 0) {
281 		(void) stmf_deregister_port_provider(qlt_pp);
282 		stmf_free(qlt_pp);
283 		mutex_destroy(&qlt_global_lock);
284 		ddi_soft_state_fini(&qlt_state);
285 	}
286 	return (ret);
287 }
288 
289 int
290 _info(struct modinfo *modinfop)
291 {
292 	return (mod_info(&modlinkage, modinfop));
293 }
294 
295 
296 static int
297 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
298 {
299 	int		instance;
300 	qlt_state_t	*qlt;
301 	ddi_device_acc_attr_t	dev_acc_attr;
302 	uint16_t	did;
303 	uint16_t	val;
304 	uint16_t	mr;
305 	size_t		discard;
306 	uint_t		ncookies;
307 	int		max_read_size;
308 	int		max_payload_size;
309 	fct_status_t	ret;
310 
311 	/* No support for suspend resume yet */
312 	if (cmd != DDI_ATTACH)
313 		return (DDI_FAILURE);
314 	instance = ddi_get_instance(dip);
315 
316 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
317 		return (DDI_FAILURE);
318 	}
319 
320 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
321 	    NULL) {
322 		goto attach_fail_1;
323 	}
324 	qlt->instance = instance;
325 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
326 	qlt->dip = dip;
327 
328 	if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
329 		cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
330 		goto attach_fail_1;
331 	}
332 
333 	EL(qlt, "instance=%d\n", instance);
334 
335 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
336 		goto attach_fail_2;
337 	}
338 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
339 	if ((did != 0x2422) && (did != 0x2432) &&
340 	    (did != 0x8432) && (did != 0x2532) &&
341 	    (did != 0x8001)) {
342 		cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
343 		    instance, did);
344 		goto attach_fail_4;
345 	}
346 
347 	if ((did & 0xFF00) == 0x8000)
348 		qlt->qlt_81xx_chip = 1;
349 	else if ((did & 0xFF00) == 0x2500)
350 		qlt->qlt_25xx_chip = 1;
351 
352 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
353 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
354 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
355 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
356 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
357 		goto attach_fail_4;
358 	}
359 	if (did == 0x2422) {
360 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
361 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
362 		pci_bits >>= 8;
363 		pci_bits &= 0xf;
364 		if ((pci_bits == 3) || (pci_bits == 7)) {
365 			cmn_err(CE_NOTE,
366 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
367 			    instance, pci_speeds[pci_bits], pci_bits);
368 		} else {
369 			cmn_err(CE_WARN,
370 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
371 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
372 			    "(Invalid)", ((pci_bits == 0) ||
373 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
374 			    "32 bit slot ") : "", pci_bits);
375 		}
376 	}
377 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
378 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
379 		    (unsigned long long)ret);
380 		goto attach_fail_5;
381 	}
382 	if (qlt_wwn_overload_prop(qlt) == TRUE) {
383 		EL(qlt, "wwnn overloaded.\n", instance);
384 	}
385 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
386 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
387 		goto attach_fail_5;
388 	}
389 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
390 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
391 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
392 	    DDI_SUCCESS) {
393 		goto attach_fail_6;
394 	}
395 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
396 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
397 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
398 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
399 		goto attach_fail_7;
400 	}
401 	if (ncookies != 1)
402 		goto attach_fail_8;
403 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
404 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
405 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
406 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
407 
408 	/* mutex are inited in this function */
409 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
410 		goto attach_fail_8;
411 
412 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
413 	    "qlt%d", instance);
414 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
415 	    "%s,0", qlt->qlt_minor_name);
416 
417 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
418 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
419 		goto attach_fail_9;
420 	}
421 
422 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
423 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
424 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
425 
426 	/* Setup PCI cfg space registers */
427 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
428 	if (max_read_size == 11)
429 		goto over_max_read_xfer_setting;
430 	if (did == 0x2422) {
431 		if (max_read_size == 512)
432 			val = 0;
433 		else if (max_read_size == 1024)
434 			val = 1;
435 		else if (max_read_size == 2048)
436 			val = 2;
437 		else if (max_read_size == 4096)
438 			val = 3;
439 		else {
440 			cmn_err(CE_WARN, "qlt(%d) malformed "
441 			    "pci-max-read-request in qlt.conf. Valid values "
442 			    "for this HBA are 512/1024/2048/4096", instance);
443 			goto over_max_read_xfer_setting;
444 		}
445 		mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
446 		mr = (uint16_t)(mr & 0xfff3);
447 		mr = (uint16_t)(mr | (val << 2));
448 		PCICFG_WR16(qlt, 0x4E, mr);
449 	} else if ((did == 0x2432) || (did == 0x8432) ||
450 	    (did == 0x2532) || (did == 0x8001)) {
451 		if (max_read_size == 128)
452 			val = 0;
453 		else if (max_read_size == 256)
454 			val = 1;
455 		else if (max_read_size == 512)
456 			val = 2;
457 		else if (max_read_size == 1024)
458 			val = 3;
459 		else if (max_read_size == 2048)
460 			val = 4;
461 		else if (max_read_size == 4096)
462 			val = 5;
463 		else {
464 			cmn_err(CE_WARN, "qlt(%d) malformed "
465 			    "pci-max-read-request in qlt.conf. Valid values "
466 			    "for this HBA are 128/256/512/1024/2048/4096",
467 			    instance);
468 			goto over_max_read_xfer_setting;
469 		}
470 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
471 		mr = (uint16_t)(mr & 0x8fff);
472 		mr = (uint16_t)(mr | (val << 12));
473 		PCICFG_WR16(qlt, 0x54, mr);
474 	} else {
475 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
476 		    "pci-max-read-request for this device (%x)",
477 		    instance, did);
478 	}
479 over_max_read_xfer_setting:;
480 
481 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
482 	if (max_payload_size == 11)
483 		goto over_max_payload_setting;
484 	if ((did == 0x2432) || (did == 0x8432) ||
485 	    (did == 0x2532) || (did == 0x8001)) {
486 		if (max_payload_size == 128)
487 			val = 0;
488 		else if (max_payload_size == 256)
489 			val = 1;
490 		else if (max_payload_size == 512)
491 			val = 2;
492 		else if (max_payload_size == 1024)
493 			val = 3;
494 		else {
495 			cmn_err(CE_WARN, "qlt(%d) malformed "
496 			    "pcie-max-payload-size in qlt.conf. Valid values "
497 			    "for this HBA are 128/256/512/1024",
498 			    instance);
499 			goto over_max_payload_setting;
500 		}
501 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
502 		mr = (uint16_t)(mr & 0xff1f);
503 		mr = (uint16_t)(mr | (val << 5));
504 		PCICFG_WR16(qlt, 0x54, mr);
505 	} else {
506 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
507 		    "pcie-max-payload-size for this device (%x)",
508 		    instance, did);
509 	}
510 
511 over_max_payload_setting:;
512 
513 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
514 		goto attach_fail_10;
515 
516 	ddi_report_dev(dip);
517 	return (DDI_SUCCESS);
518 
519 attach_fail_10:;
520 	mutex_destroy(&qlt->qlt_ioctl_lock);
521 	cv_destroy(&qlt->mbox_cv);
522 	cv_destroy(&qlt->rp_dereg_cv);
523 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
524 attach_fail_9:;
525 	qlt_destroy_mutex(qlt);
526 	qlt_release_intr(qlt);
527 attach_fail_8:;
528 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
529 attach_fail_7:;
530 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
531 attach_fail_6:;
532 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
533 attach_fail_5:;
534 	ddi_regs_map_free(&qlt->regs_acc_handle);
535 attach_fail_4:;
536 	pci_config_teardown(&qlt->pcicfg_acc_handle);
537 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
538 	(void) qlt_el_trace_desc_dtor(qlt);
539 attach_fail_2:;
540 attach_fail_1:;
541 	ddi_soft_state_free(qlt_state, instance);
542 	return (DDI_FAILURE);
543 }
544 
545 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
546 
547 /* ARGSUSED */
548 static int
549 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
550 {
551 	qlt_state_t *qlt;
552 
553 	int instance;
554 
555 	instance = ddi_get_instance(dip);
556 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
557 	    NULL) {
558 		return (DDI_FAILURE);
559 	}
560 
561 	if (qlt->fw_code01) {
562 		return (DDI_FAILURE);
563 	}
564 
565 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
566 	    qlt->qlt_state_not_acked) {
567 		return (DDI_FAILURE);
568 	}
569 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS)
570 		return (DDI_FAILURE);
571 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
572 	qlt_destroy_mutex(qlt);
573 	qlt_release_intr(qlt);
574 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
575 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
576 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
577 	ddi_regs_map_free(&qlt->regs_acc_handle);
578 	pci_config_teardown(&qlt->pcicfg_acc_handle);
579 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
580 	cv_destroy(&qlt->mbox_cv);
581 	cv_destroy(&qlt->rp_dereg_cv);
582 	(void) qlt_el_trace_desc_dtor(qlt);
583 	ddi_soft_state_free(qlt_state, instance);
584 
585 	return (DDI_SUCCESS);
586 }
587 
588 /*
589  * qlt_quiesce	quiesce a device attached to the system.
590  */
591 static int
592 qlt_quiesce(dev_info_t *dip)
593 {
594 	qlt_state_t	*qlt;
595 	uint32_t	timer;
596 	uint32_t	stat;
597 
598 	qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
599 	if (qlt == NULL) {
600 		/* Oh well.... */
601 		return (DDI_SUCCESS);
602 	}
603 
604 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_HOST_TO_RISC_INTR);
605 	REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
606 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
607 	for (timer = 0; timer < 30000; timer++) {
608 		stat = REG_RD32(qlt, REG_RISC_STATUS);
609 		if (stat & RISC_HOST_INTR_REQUEST) {
610 			if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
611 				REG_WR32(qlt, REG_HCCR,
612 				    HCCR_CMD_CLEAR_RISC_PAUSE);
613 				break;
614 			}
615 			REG_WR32(qlt, REG_HCCR,
616 			    HCCR_CMD_CLEAR_HOST_TO_RISC_INTR);
617 		}
618 		drv_usecwait(100);
619 	}
620 	/* Reset the chip. */
621 	REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
622 	    PCI_X_XFER_CTRL);
623 	drv_usecwait(100);
624 
625 	return (DDI_SUCCESS);
626 }
627 
628 static void
629 qlt_enable_intr(qlt_state_t *qlt)
630 {
631 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
632 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
633 	} else {
634 		int i;
635 		for (i = 0; i < qlt->intr_cnt; i++)
636 			(void) ddi_intr_enable(qlt->htable[i]);
637 	}
638 }
639 
640 static void
641 qlt_disable_intr(qlt_state_t *qlt)
642 {
643 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
644 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
645 	} else {
646 		int i;
647 		for (i = 0; i < qlt->intr_cnt; i++)
648 			(void) ddi_intr_disable(qlt->htable[i]);
649 	}
650 }
651 
652 static void
653 qlt_release_intr(qlt_state_t *qlt)
654 {
655 	if (qlt->htable) {
656 		int i;
657 		for (i = 0; i < qlt->intr_cnt; i++) {
658 			(void) ddi_intr_remove_handler(qlt->htable[i]);
659 			(void) ddi_intr_free(qlt->htable[i]);
660 		}
661 		kmem_free(qlt->htable, (uint_t)qlt->intr_size);
662 	}
663 	qlt->htable = NULL;
664 	qlt->intr_pri = 0;
665 	qlt->intr_cnt = 0;
666 	qlt->intr_size = 0;
667 	qlt->intr_cap = 0;
668 }
669 
670 
671 static void
672 qlt_init_mutex(qlt_state_t *qlt)
673 {
674 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
675 	    INT2PTR(qlt->intr_pri, void *));
676 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
677 	    INT2PTR(qlt->intr_pri, void *));
678 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
679 	    INT2PTR(qlt->intr_pri, void *));
680 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
681 	    INT2PTR(qlt->intr_pri, void *));
682 }
683 
684 static void
685 qlt_destroy_mutex(qlt_state_t *qlt)
686 {
687 	mutex_destroy(&qlt->req_lock);
688 	mutex_destroy(&qlt->preq_lock);
689 	mutex_destroy(&qlt->mbox_lock);
690 	mutex_destroy(&qlt->intr_lock);
691 }
692 
693 
694 #if defined(__sparc)
695 static int
696 qlt_setup_msix(qlt_state_t *qlt)
697 {
698 	int count, avail, actual;
699 	int ret;
700 	int itype = DDI_INTR_TYPE_MSIX;
701 	int i;
702 
703 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
704 	if (ret != DDI_SUCCESS || count == 0) {
705 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
706 		    count);
707 		return (DDI_FAILURE);
708 	}
709 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
710 	if (ret != DDI_SUCCESS || avail == 0) {
711 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
712 		    avail);
713 		return (DDI_FAILURE);
714 	}
715 	if (avail < count) {
716 		stmf_trace(qlt->qlt_port_alias,
717 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
718 	}
719 
720 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
721 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
722 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
723 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
724 	/* we need at least 2 interrupt vectors */
725 	if (ret != DDI_SUCCESS || actual < 2) {
726 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
727 		    actual);
728 		ret = DDI_FAILURE;
729 		goto release_intr;
730 	}
731 	if (actual < count) {
732 		EL(qlt, "requested: %d, received: %d\n", count, actual);
733 	}
734 
735 	qlt->intr_cnt = actual;
736 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
737 	if (ret != DDI_SUCCESS) {
738 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
739 		ret = DDI_FAILURE;
740 		goto release_intr;
741 	}
742 	qlt_init_mutex(qlt);
743 	for (i = 0; i < actual; i++) {
744 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
745 		    qlt, INT2PTR((uint_t)i, void *));
746 		if (ret != DDI_SUCCESS) {
747 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
748 			goto release_mutex;
749 		}
750 	}
751 
752 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
753 	qlt->intr_flags |= QLT_INTR_MSIX;
754 	return (DDI_SUCCESS);
755 
756 release_mutex:
757 	qlt_destroy_mutex(qlt);
758 release_intr:
759 	for (i = 0; i < actual; i++)
760 		(void) ddi_intr_free(qlt->htable[i]);
761 #if 0
762 free_mem:
763 #endif
764 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
765 	qlt->htable = NULL;
766 	qlt_release_intr(qlt);
767 	return (ret);
768 }
769 
770 
771 static int
772 qlt_setup_msi(qlt_state_t *qlt)
773 {
774 	int count, avail, actual;
775 	int itype = DDI_INTR_TYPE_MSI;
776 	int ret;
777 	int i;
778 
779 	/* get the # of interrupts */
780 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
781 	if (ret != DDI_SUCCESS || count == 0) {
782 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
783 		    count);
784 		return (DDI_FAILURE);
785 	}
786 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
787 	if (ret != DDI_SUCCESS || avail == 0) {
788 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
789 		    avail);
790 		return (DDI_FAILURE);
791 	}
792 	if (avail < count) {
793 		EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
794 	}
795 	/* MSI requires only 1 interrupt. */
796 	count = 1;
797 
798 	/* allocate interrupt */
799 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
800 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
801 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
802 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
803 	if (ret != DDI_SUCCESS || actual == 0) {
804 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
805 		    actual);
806 		ret = DDI_FAILURE;
807 		goto free_mem;
808 	}
809 	if (actual < count) {
810 		EL(qlt, "requested: %d, received: %d\n", count, actual);
811 	}
812 	qlt->intr_cnt = actual;
813 
814 	/*
815 	 * Get priority for first msi, assume remaining are all the same.
816 	 */
817 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
818 	if (ret != DDI_SUCCESS) {
819 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
820 		ret = DDI_FAILURE;
821 		goto release_intr;
822 	}
823 	qlt_init_mutex(qlt);
824 
825 	/* add handler */
826 	for (i = 0; i < actual; i++) {
827 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
828 		    qlt, INT2PTR((uint_t)i, void *));
829 		if (ret != DDI_SUCCESS) {
830 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
831 			goto release_mutex;
832 		}
833 	}
834 
835 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
836 	qlt->intr_flags |= QLT_INTR_MSI;
837 	return (DDI_SUCCESS);
838 
839 release_mutex:
840 	qlt_destroy_mutex(qlt);
841 release_intr:
842 	for (i = 0; i < actual; i++)
843 		(void) ddi_intr_free(qlt->htable[i]);
844 free_mem:
845 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
846 	qlt->htable = NULL;
847 	qlt_release_intr(qlt);
848 	return (ret);
849 }
850 #endif
851 
852 static int
853 qlt_setup_fixed(qlt_state_t *qlt)
854 {
855 	int count;
856 	int actual;
857 	int ret;
858 	int itype = DDI_INTR_TYPE_FIXED;
859 
860 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
861 	/* Fixed interrupts can only have one interrupt. */
862 	if (ret != DDI_SUCCESS || count != 1) {
863 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
864 		    count);
865 		return (DDI_FAILURE);
866 	}
867 
868 	qlt->intr_size = sizeof (ddi_intr_handle_t);
869 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
870 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
871 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
872 	if (ret != DDI_SUCCESS || actual != 1) {
873 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
874 		    actual);
875 		ret = DDI_FAILURE;
876 		goto free_mem;
877 	}
878 
879 	qlt->intr_cnt = actual;
880 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
881 	if (ret != DDI_SUCCESS) {
882 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
883 		ret = DDI_FAILURE;
884 		goto release_intr;
885 	}
886 	qlt_init_mutex(qlt);
887 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
888 	if (ret != DDI_SUCCESS) {
889 		EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
890 		goto release_mutex;
891 	}
892 
893 	qlt->intr_flags |= QLT_INTR_FIXED;
894 	return (DDI_SUCCESS);
895 
896 release_mutex:
897 	qlt_destroy_mutex(qlt);
898 release_intr:
899 	(void) ddi_intr_free(qlt->htable[0]);
900 free_mem:
901 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
902 	qlt->htable = NULL;
903 	qlt_release_intr(qlt);
904 	return (ret);
905 }
906 
907 
908 static int
909 qlt_setup_interrupts(qlt_state_t *qlt)
910 {
911 #if defined(__sparc)
912 	int itypes = 0;
913 #endif
914 
915 /*
916  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198). So use
917  * MSI for sparc only for now.
918  */
919 #if defined(__sparc)
920 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
921 		itypes = DDI_INTR_TYPE_FIXED;
922 	}
923 
924 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
925 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
926 			return (DDI_SUCCESS);
927 	}
928 	if (itypes & DDI_INTR_TYPE_MSI) {
929 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
930 			return (DDI_SUCCESS);
931 	}
932 #endif
933 	return (qlt_setup_fixed(qlt));
934 }
935 
936 /*
937  * Filling the hba attributes
938  */
939 void
940 qlt_populate_hba_fru_details(struct fct_local_port *port,
941     struct fct_port_attrs *port_attrs)
942 {
943 	caddr_t	bufp;
944 	int len;
945 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
946 
947 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
948 	    "QLogic Corp.");
949 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
950 	    "%s", QLT_NAME);
951 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
952 	    "%s", QLT_VERSION);
953 	port_attrs->serial_number[0] = '\0';
954 	port_attrs->hardware_version[0] = '\0';
955 
956 	(void) snprintf(port_attrs->firmware_version,
957 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
958 	    qlt->fw_minor, qlt->fw_subminor);
959 
960 	/* Get FCode version */
961 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
962 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
963 	    (int *)&len) == DDI_PROP_SUCCESS) {
964 		(void) snprintf(port_attrs->option_rom_version,
965 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
966 		kmem_free(bufp, (uint_t)len);
967 		bufp = NULL;
968 	} else {
969 #ifdef __sparc
970 		(void) snprintf(port_attrs->option_rom_version,
971 		    FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
972 #else
973 		(void) snprintf(port_attrs->option_rom_version,
974 		    FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
975 #endif
976 	}
977 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
978 	    qlt->nvram->subsystem_vendor_id[1] << 8;
979 
980 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
981 	    qlt->nvram->max_frame_length[0];
982 
983 	port_attrs->supported_cos = 0x10000000;
984 	port_attrs->supported_speed = PORT_SPEED_1G |
985 	    PORT_SPEED_2G | PORT_SPEED_4G;
986 	if (qlt->qlt_25xx_chip)
987 		port_attrs->supported_speed |= PORT_SPEED_8G;
988 	if (qlt->qlt_81xx_chip)
989 		port_attrs->supported_speed = PORT_SPEED_10G;
990 
991 	/* limit string length to nvr model_name length */
992 	len = (qlt->qlt_81xx_chip) ? 16 : 8;
993 	(void) snprintf(port_attrs->model,
994 	    (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
995 	    "%s", qlt->nvram->model_name);
996 
997 	(void) snprintf(port_attrs->model_description,
998 	    (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
999 	    FCHBA_MODEL_DESCRIPTION_LEN),
1000 	    "%s", qlt->nvram->model_name);
1001 }
1002 
1003 /* ARGSUSED */
1004 fct_status_t
1005 qlt_info(uint32_t cmd, fct_local_port_t *port,
1006     void *arg, uint8_t *buf, uint32_t *bufsizep)
1007 {
1008 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
1009 	mbox_cmd_t	*mcp;
1010 	fct_status_t	ret = FCT_SUCCESS;
1011 	uint8_t		*p;
1012 	fct_port_link_status_t	*link_status;
1013 
1014 	switch (cmd) {
1015 	case FC_TGT_PORT_RLS:
1016 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1017 			EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1018 			    "fct_port_link_status_t=%xh\n", *bufsizep,
1019 			    sizeof (fct_port_link_status_t));
1020 			ret = FCT_FAILURE;
1021 			break;
1022 		}
1023 		/* send mailbox command to get link status */
1024 		mcp = qlt_alloc_mailbox_command(qlt, 156);
1025 		if (mcp == NULL) {
1026 			EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1027 			ret = FCT_ALLOC_FAILURE;
1028 			break;
1029 		}
1030 
1031 		/* GET LINK STATUS count */
1032 		mcp->to_fw[0] = 0x6d;
1033 		mcp->to_fw[8] = 156/4;
1034 		mcp->to_fw_mask |= BIT_1 | BIT_8;
1035 		mcp->from_fw_mask |= BIT_1 | BIT_2;
1036 
1037 		ret = qlt_mailbox_command(qlt, mcp);
1038 		if (ret != QLT_SUCCESS) {
1039 			EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1040 			qlt_free_mailbox_command(qlt, mcp);
1041 			break;
1042 		}
1043 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1044 
1045 		p = mcp->dbuf->db_sglist[0].seg_addr;
1046 		link_status = (fct_port_link_status_t *)buf;
1047 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1048 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1049 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1050 		link_status->PrimitiveSeqProtocolErrorCount =
1051 		    LE_32(*((uint32_t *)(p + 12)));
1052 		link_status->InvalidTransmissionWordCount =
1053 		    LE_32(*((uint32_t *)(p + 16)));
1054 		link_status->InvalidCRCCount =
1055 		    LE_32(*((uint32_t *)(p + 20)));
1056 
1057 		qlt_free_mailbox_command(qlt, mcp);
1058 		break;
1059 	default:
1060 		EL(qlt, "Unknown cmd=%xh\n", cmd);
1061 		ret = FCT_FAILURE;
1062 		break;
1063 	}
1064 	return (ret);
1065 }
1066 
1067 fct_status_t
1068 qlt_port_start(caddr_t arg)
1069 {
1070 	qlt_state_t *qlt = (qlt_state_t *)arg;
1071 	fct_local_port_t *port;
1072 	fct_dbuf_store_t *fds;
1073 	fct_status_t ret;
1074 
1075 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1076 		return (FCT_FAILURE);
1077 	}
1078 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1079 	if (port == NULL) {
1080 		goto qlt_pstart_fail_1;
1081 	}
1082 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1083 	if (fds == NULL) {
1084 		goto qlt_pstart_fail_2;
1085 	}
1086 	qlt->qlt_port = port;
1087 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
1088 	fds->fds_free_data_buf = qlt_dmem_free;
1089 	fds->fds_fca_private = (void *)qlt;
1090 	/*
1091 	 * Since we keep everything in the state struct and dont allocate any
1092 	 * port private area, just use that pointer to point to the
1093 	 * state struct.
1094 	 */
1095 	port->port_fca_private = qlt;
1096 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
1097 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1098 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1099 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1100 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1101 	port->port_default_alias = qlt->qlt_port_alias;
1102 	port->port_pp = qlt_pp;
1103 	port->port_fds = fds;
1104 	port->port_max_logins = QLT_MAX_LOGINS;
1105 	port->port_max_xchges = QLT_MAX_XCHGES;
1106 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1107 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1108 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1109 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1110 	port->port_get_link_info = qlt_get_link_info;
1111 	port->port_register_remote_port = qlt_register_remote_port;
1112 	port->port_deregister_remote_port = qlt_deregister_remote_port;
1113 	port->port_send_cmd = qlt_send_cmd;
1114 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1115 	port->port_send_cmd_response = qlt_send_cmd_response;
1116 	port->port_abort_cmd = qlt_abort_cmd;
1117 	port->port_ctl = qlt_ctl;
1118 	port->port_flogi_xchg = qlt_do_flogi;
1119 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1120 	port->port_info = qlt_info;
1121 
1122 	if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1123 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1124 		goto qlt_pstart_fail_2_5;
1125 	}
1126 
1127 	return (QLT_SUCCESS);
1128 #if 0
1129 qlt_pstart_fail_3:
1130 	(void) fct_deregister_local_port(port);
1131 #endif
1132 qlt_pstart_fail_2_5:
1133 	fct_free(fds);
1134 qlt_pstart_fail_2:
1135 	fct_free(port);
1136 	qlt->qlt_port = NULL;
1137 qlt_pstart_fail_1:
1138 	qlt_dmem_fini(qlt);
1139 	return (QLT_FAILURE);
1140 }
1141 
1142 fct_status_t
1143 qlt_port_stop(caddr_t arg)
1144 {
1145 	qlt_state_t *qlt = (qlt_state_t *)arg;
1146 	fct_status_t ret;
1147 
1148 	if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1149 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1150 		return (QLT_FAILURE);
1151 	}
1152 	fct_free(qlt->qlt_port->port_fds);
1153 	fct_free(qlt->qlt_port);
1154 	qlt->qlt_port = NULL;
1155 	qlt_dmem_fini(qlt);
1156 	return (QLT_SUCCESS);
1157 }
1158 
1159 /*
1160  * Called by framework to init the HBA.
1161  * Can be called in the middle of I/O. (Why ??)
1162  * Should make sure sane state both before and after the initialization
1163  */
1164 fct_status_t
1165 qlt_port_online(qlt_state_t *qlt)
1166 {
1167 	uint64_t	da;
1168 	int		instance;
1169 	fct_status_t	ret;
1170 	uint16_t	rcount;
1171 	caddr_t		icb;
1172 	mbox_cmd_t	*mcp;
1173 	uint8_t		*elsbmp;
1174 
1175 	instance = ddi_get_instance(qlt->dip);
1176 
1177 	/* XXX Make sure a sane state */
1178 
1179 	if ((ret = qlt_reset_chip_and_download_fw(qlt, 0)) != QLT_SUCCESS) {
1180 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1181 		return (ret);
1182 	}
1183 
1184 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1185 
1186 	/* Get resource count */
1187 	REG_WR16(qlt, REG_MBOX(0), 0x42);
1188 	ret = qlt_raw_mailbox_command(qlt);
1189 	rcount = REG_RD16(qlt, REG_MBOX(3));
1190 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1191 	if (ret != QLT_SUCCESS) {
1192 		EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1193 		return (ret);
1194 	}
1195 
1196 	/* Enable PUREX */
1197 	REG_WR16(qlt, REG_MBOX(0), 0x38);
1198 	REG_WR16(qlt, REG_MBOX(1), 0x0400);
1199 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1200 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1201 	ret = qlt_raw_mailbox_command(qlt);
1202 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1203 	if (ret != QLT_SUCCESS) {
1204 		EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1205 		cmn_err(CE_NOTE, "Enable PUREX failed");
1206 		return (ret);
1207 	}
1208 
1209 	/* Pass ELS bitmap to fw */
1210 	REG_WR16(qlt, REG_MBOX(0), 0x59);
1211 	REG_WR16(qlt, REG_MBOX(1), 0x0500);
1212 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1213 	bzero(elsbmp, 32);
1214 	da = qlt->queue_mem_cookie.dmac_laddress;
1215 	da += MBOX_DMA_MEM_OFFSET;
1216 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
1217 	da >>= 16;
1218 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
1219 	da >>= 16;
1220 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
1221 	da >>= 16;
1222 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
1223 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1224 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1225 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1226 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1227 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1228 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1229 	SETELSBIT(elsbmp, ELS_OP_SCN);
1230 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1231 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1232 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1233 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1234 	SETELSBIT(elsbmp, ELS_OP_RNID);
1235 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1236 	    DDI_DMA_SYNC_FORDEV);
1237 	ret = qlt_raw_mailbox_command(qlt);
1238 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1239 	if (ret != QLT_SUCCESS) {
1240 		EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1241 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1242 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1243 		    elsbmp[1]);
1244 		return (ret);
1245 	}
1246 
1247 	/* Init queue pointers */
1248 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1249 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1250 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1251 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1252 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1253 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1254 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1255 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1256 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1257 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1258 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1259 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1260 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1261 
1262 	/*
1263 	 * XXX support for tunables. Also should we cache icb ?
1264 	 */
1265 	if (qlt->qlt_81xx_chip) {
1266 	    /* allocate extra 64 bytes for Extended init control block */
1267 		mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1268 	} else {
1269 		mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1270 	}
1271 	if (mcp == NULL) {
1272 		EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1273 		return (STMF_ALLOC_FAILURE);
1274 	}
1275 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1276 	if (qlt->qlt_81xx_chip) {
1277 		bzero(icb, 0xC0);
1278 	} else {
1279 		bzero(icb, 0x80);
1280 	}
1281 	da = qlt->queue_mem_cookie.dmac_laddress;
1282 	DMEM_WR16(qlt, icb, 1);		/* Version */
1283 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1284 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1285 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1286 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1287 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1288 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1289 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1290 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1291 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1292 	if (!qlt->qlt_81xx_chip) {
1293 		DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1294 	}
1295 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1296 	DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1297 	DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1298 	DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1299 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1300 	DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1301 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1302 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1303 	if (qlt->qlt_81xx_chip) {
1304 		qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1305 
1306 		DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1307 		DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1308 		DMEM_WR32(qlt, icb+0x70,
1309 		    qlt81nvr->enode_mac[0] |
1310 		    (qlt81nvr->enode_mac[1] << 8) |
1311 		    (qlt81nvr->enode_mac[2] << 16) |
1312 		    (qlt81nvr->enode_mac[3] << 24));
1313 		DMEM_WR16(qlt, icb+0x74,
1314 		    qlt81nvr->enode_mac[4] |
1315 		    (qlt81nvr->enode_mac[5] << 8));
1316 	} else {
1317 		DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1318 		    BIT_2 | BIT_1 | BIT_0);
1319 		DMEM_WR32(qlt, icb+0x60, BIT_5);
1320 		DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1321 		    BIT_4);
1322 	}
1323 
1324 	if (qlt->qlt_81xx_chip) {
1325 		qlt_dmem_bctl_t		*bctl;
1326 		uint32_t		index;
1327 		caddr_t			src;
1328 		caddr_t			dst;
1329 		qlt_nvram_81xx_t	*qlt81nvr;
1330 
1331 		dst = icb+0x80;
1332 		qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1333 		src = (caddr_t)&qlt81nvr->ext_blk;
1334 		index = sizeof (qlt_ext_icb_81xx_t);
1335 
1336 		/* Use defaults for cases where we find nothing in NVR */
1337 		if (*src == 0) {
1338 			EL(qlt, "nvram eicb=null\n");
1339 			cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1340 			    instance);
1341 			qlt81nvr->ext_blk.version[0] = 1;
1342 /*
1343  * not yet, for !FIP firmware at least
1344  *
1345  *                qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1346  */
1347 #ifdef _LITTLE_ENDIAN
1348 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1349 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1350 #else
1351 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1352 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1353 #endif
1354 		}
1355 
1356 		while (index--) {
1357 			*dst++ = *src++;
1358 		}
1359 
1360 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1361 		da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1362 
1363 		mcp->to_fw[11] = (uint16_t)(da & 0xffff);
1364 		da >>= 16;
1365 		mcp->to_fw[10] = (uint16_t)(da & 0xffff);
1366 		da >>= 16;
1367 		mcp->to_fw[13] = (uint16_t)(da & 0xffff);
1368 		da >>= 16;
1369 		mcp->to_fw[12] = (uint16_t)(da & 0xffff);
1370 		mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1371 		    0xffff);
1372 
1373 		/* eicb enable */
1374 		mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1375 		mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1376 		    BIT_1;
1377 	}
1378 
1379 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1380 	mcp->to_fw[0] = 0x60;
1381 
1382 	/*
1383 	 * This is the 1st command after adapter initialize which will
1384 	 * use interrupts and regular mailbox interface.
1385 	 */
1386 	qlt->mbox_io_state = MBOX_STATE_READY;
1387 	qlt_enable_intr(qlt);
1388 	qlt->qlt_intr_enabled = 1;
1389 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1390 	/* Issue mailbox to firmware */
1391 	ret = qlt_mailbox_command(qlt, mcp);
1392 	if (ret != QLT_SUCCESS) {
1393 		EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1394 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1395 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1396 	}
1397 
1398 	mcp->to_fw_mask = BIT_0;
1399 	mcp->from_fw_mask = BIT_0 | BIT_1;
1400 	mcp->to_fw[0] = 0x28;
1401 	ret = qlt_mailbox_command(qlt, mcp);
1402 	if (ret != QLT_SUCCESS) {
1403 		EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1404 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1405 		    (long long)ret);
1406 	}
1407 
1408 	/*
1409 	 * Report FW versions for 81xx - MPI rev is useful
1410 	 */
1411 	if (qlt->qlt_81xx_chip) {
1412 		mcp->to_fw_mask = BIT_0;
1413 		mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1414 		    BIT_0;
1415 		mcp->to_fw[0] = 0x8;
1416 		ret = qlt_mailbox_command(qlt, mcp);
1417 		if (ret != QLT_SUCCESS) {
1418 			EL(qlt, "about fw failed: %llx\n", (long long)ret);
1419 		} else {
1420 			EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1421 			    mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1422 			    mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1423 			    mcp->from_fw[11] & 0xff);
1424 		}
1425 	}
1426 
1427 	qlt_free_mailbox_command(qlt, mcp);
1428 	if (ret != QLT_SUCCESS)
1429 		return (ret);
1430 	return (FCT_SUCCESS);
1431 }
1432 
1433 fct_status_t
1434 qlt_port_offline(qlt_state_t *qlt)
1435 {
1436 	int		retries;
1437 
1438 	mutex_enter(&qlt->mbox_lock);
1439 
1440 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1441 		mutex_exit(&qlt->mbox_lock);
1442 		goto poff_mbox_done;
1443 	}
1444 
1445 	/* Wait to grab the mailboxes */
1446 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1447 	    retries++) {
1448 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1449 		if ((retries > 5) ||
1450 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1451 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1452 			mutex_exit(&qlt->mbox_lock);
1453 			goto poff_mbox_done;
1454 		}
1455 	}
1456 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1457 	mutex_exit(&qlt->mbox_lock);
1458 poff_mbox_done:;
1459 	qlt->intr_sneak_counter = 10;
1460 	qlt_disable_intr(qlt);
1461 	mutex_enter(&qlt->intr_lock);
1462 	qlt->qlt_intr_enabled = 0;
1463 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
1464 	drv_usecwait(20);
1465 	qlt->intr_sneak_counter = 0;
1466 	mutex_exit(&qlt->intr_lock);
1467 
1468 	return (FCT_SUCCESS);
1469 }
1470 
1471 static fct_status_t
1472 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1473 {
1474 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1475 	mbox_cmd_t *mcp;
1476 	fct_status_t fc_ret;
1477 	fct_status_t ret;
1478 	clock_t et;
1479 
1480 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1481 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1482 link_info_retry:
1483 	mcp->to_fw[0] = 0x20;
1484 	mcp->to_fw[9] = 0;
1485 	mcp->to_fw_mask |= BIT_0 | BIT_9;
1486 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1487 	/* Issue mailbox to firmware */
1488 	ret = qlt_mailbox_command(qlt, mcp);
1489 	if (ret != QLT_SUCCESS) {
1490 		EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1491 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1492 			/* Firmware is not ready */
1493 			if (ddi_get_lbolt() < et) {
1494 				delay(drv_usectohz(50000));
1495 				goto link_info_retry;
1496 			}
1497 		}
1498 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1499 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1500 		fc_ret = FCT_FAILURE;
1501 	} else {
1502 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1503 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1504 
1505 		li->port_speed = qlt->link_speed;
1506 		switch (mcp->from_fw[6]) {
1507 		case 1:
1508 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1509 			li->port_fca_flogi_done = 1;
1510 			break;
1511 		case 0:
1512 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1513 			li->port_no_fct_flogi = 1;
1514 			break;
1515 		case 3:
1516 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1517 			li->port_fca_flogi_done = 1;
1518 			break;
1519 		case 2: /*FALLTHROUGH*/
1520 		case 4:
1521 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1522 			li->port_fca_flogi_done = 1;
1523 			break;
1524 		default:
1525 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1526 			EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1527 		}
1528 		qlt->cur_topology = li->port_topology;
1529 		fc_ret = FCT_SUCCESS;
1530 	}
1531 	qlt_free_mailbox_command(qlt, mcp);
1532 
1533 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1534 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1535 		mcp->to_fw[0] = 0x64;
1536 		mcp->to_fw[1] = 0x7FE;
1537 		mcp->to_fw[9] = 0;
1538 		mcp->to_fw[10] = 0;
1539 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1540 		fc_ret = qlt_mailbox_command(qlt, mcp);
1541 		if (fc_ret != QLT_SUCCESS) {
1542 			EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1543 			    fc_ret);
1544 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1545 			    "database for F_port failed, ret = %llx", fc_ret);
1546 		} else {
1547 			uint8_t *p;
1548 
1549 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1550 			p = mcp->dbuf->db_sglist[0].seg_addr;
1551 			bcopy(p + 0x18, li->port_rpwwn, 8);
1552 			bcopy(p + 0x20, li->port_rnwwn, 8);
1553 		}
1554 		qlt_free_mailbox_command(qlt, mcp);
1555 	}
1556 	return (fc_ret);
1557 }
1558 
1559 static int
1560 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1561 {
1562 	int		instance;
1563 	qlt_state_t	*qlt;
1564 
1565 	if (otype != OTYP_CHR) {
1566 		return (EINVAL);
1567 	}
1568 
1569 	/*
1570 	 * Since this is for debugging only, only allow root to issue ioctl now
1571 	 */
1572 	if (drv_priv(credp)) {
1573 		return (EPERM);
1574 	}
1575 
1576 	instance = (int)getminor(*devp);
1577 	qlt = ddi_get_soft_state(qlt_state, instance);
1578 	if (qlt == NULL) {
1579 		return (ENXIO);
1580 	}
1581 
1582 	mutex_enter(&qlt->qlt_ioctl_lock);
1583 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1584 		/*
1585 		 * It is already open for exclusive access.
1586 		 * So shut the door on this caller.
1587 		 */
1588 		mutex_exit(&qlt->qlt_ioctl_lock);
1589 		return (EBUSY);
1590 	}
1591 
1592 	if (flag & FEXCL) {
1593 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1594 			/*
1595 			 * Exclusive operation not possible
1596 			 * as it is already opened
1597 			 */
1598 			mutex_exit(&qlt->qlt_ioctl_lock);
1599 			return (EBUSY);
1600 		}
1601 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1602 	}
1603 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1604 	mutex_exit(&qlt->qlt_ioctl_lock);
1605 
1606 	return (0);
1607 }
1608 
1609 /* ARGSUSED */
1610 static int
1611 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1612 {
1613 	int		instance;
1614 	qlt_state_t	*qlt;
1615 
1616 	if (otype != OTYP_CHR) {
1617 		return (EINVAL);
1618 	}
1619 
1620 	instance = (int)getminor(dev);
1621 	qlt = ddi_get_soft_state(qlt_state, instance);
1622 	if (qlt == NULL) {
1623 		return (ENXIO);
1624 	}
1625 
1626 	mutex_enter(&qlt->qlt_ioctl_lock);
1627 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1628 		mutex_exit(&qlt->qlt_ioctl_lock);
1629 		return (ENODEV);
1630 	}
1631 
1632 	/*
1633 	 * It looks there's one hole here, maybe there could several concurrent
1634 	 * shareed open session, but we never check this case.
1635 	 * But it will not hurt too much, disregard it now.
1636 	 */
1637 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1638 	mutex_exit(&qlt->qlt_ioctl_lock);
1639 
1640 	return (0);
1641 }
1642 
1643 /*
1644  * All of these ioctls are unstable interfaces which are meant to be used
1645  * in a controlled lab env. No formal testing will be (or needs to be) done
1646  * for these ioctls. Specially note that running with an additional
1647  * uploaded firmware is not supported and is provided here for test
1648  * purposes only.
1649  */
1650 /* ARGSUSED */
1651 static int
1652 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1653     cred_t *credp, int *rval)
1654 {
1655 	qlt_state_t	*qlt;
1656 	int		ret = 0;
1657 #ifdef _LITTLE_ENDIAN
1658 	int		i;
1659 #endif
1660 	stmf_iocdata_t	*iocd;
1661 	void		*ibuf = NULL;
1662 	void		*obuf = NULL;
1663 	uint32_t	*intp;
1664 	qlt_fw_info_t	*fwi;
1665 	mbox_cmd_t	*mcp;
1666 	fct_status_t	st;
1667 	char		info[80];
1668 	fct_status_t	ret2;
1669 
1670 	if (drv_priv(credp) != 0)
1671 		return (EPERM);
1672 
1673 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1674 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1675 	if (ret)
1676 		return (ret);
1677 	iocd->stmf_error = 0;
1678 
1679 	switch (cmd) {
1680 	case QLT_IOCTL_FETCH_FWDUMP:
1681 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1682 			EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1683 			    iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1684 			ret = EINVAL;
1685 			break;
1686 		}
1687 		mutex_enter(&qlt->qlt_ioctl_lock);
1688 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1689 			mutex_exit(&qlt->qlt_ioctl_lock);
1690 			ret = ENODATA;
1691 			EL(qlt, "no fwdump\n");
1692 			iocd->stmf_error = QLTIO_NO_DUMP;
1693 			break;
1694 		}
1695 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1696 			mutex_exit(&qlt->qlt_ioctl_lock);
1697 			ret = EBUSY;
1698 			EL(qlt, "fwdump inprogress\n");
1699 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1700 			break;
1701 		}
1702 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1703 			mutex_exit(&qlt->qlt_ioctl_lock);
1704 			ret = EEXIST;
1705 			EL(qlt, "fwdump already fetched\n");
1706 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1707 			break;
1708 		}
1709 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1710 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1711 		mutex_exit(&qlt->qlt_ioctl_lock);
1712 
1713 		break;
1714 
1715 	case QLT_IOCTL_TRIGGER_FWDUMP:
1716 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1717 			ret = EACCES;
1718 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1719 			break;
1720 		}
1721 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1722 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1723 		info[79] = 0;
1724 		if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1725 		    STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1726 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1727 			EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1728 			    "%llxh\n", ret2);
1729 			ret = EIO;
1730 		}
1731 		break;
1732 	case QLT_IOCTL_UPLOAD_FW:
1733 		if ((iocd->stmf_ibuf_size < 1024) ||
1734 		    (iocd->stmf_ibuf_size & 3)) {
1735 			EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1736 			    iocd->stmf_ibuf_size);
1737 			ret = EINVAL;
1738 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1739 			break;
1740 		}
1741 		intp = (uint32_t *)ibuf;
1742 #ifdef _LITTLE_ENDIAN
1743 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1744 			intp[i] = BSWAP_32(intp[i]);
1745 		}
1746 #endif
1747 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1748 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1749 		    iocd->stmf_ibuf_size)) {
1750 			EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1751 			    iocd->stmf_ibuf_size);
1752 			ret = EINVAL;
1753 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1754 			break;
1755 		}
1756 		if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1757 		    (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1758 		    (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1759 		    ((intp[8] & 3) == 0))) {
1760 			EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1761 			ret = EACCES;
1762 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1763 			break;
1764 		}
1765 
1766 		/* Everything looks ok, lets copy this firmware */
1767 		if (qlt->fw_code01) {
1768 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1769 			    qlt->fw_length02) << 2);
1770 			qlt->fw_code01 = NULL;
1771 		} else {
1772 			atomic_add_32(&qlt_loaded_counter, 1);
1773 		}
1774 		qlt->fw_length01 = intp[3];
1775 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1776 		    KM_SLEEP);
1777 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1778 		qlt->fw_addr01 = intp[2];
1779 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1780 		qlt->fw_addr02 = qlt->fw_code02[2];
1781 		qlt->fw_length02 = qlt->fw_code02[3];
1782 		break;
1783 
1784 	case QLT_IOCTL_CLEAR_FW:
1785 		if (qlt->fw_code01) {
1786 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1787 			    qlt->fw_length02) << 2);
1788 			qlt->fw_code01 = NULL;
1789 			atomic_add_32(&qlt_loaded_counter, -1);
1790 		}
1791 		break;
1792 
1793 	case QLT_IOCTL_GET_FW_INFO:
1794 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1795 			EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1796 			    iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1797 			ret = EINVAL;
1798 			break;
1799 		}
1800 		fwi = (qlt_fw_info_t *)obuf;
1801 		if (qlt->qlt_stay_offline) {
1802 			fwi->fwi_stay_offline = 1;
1803 		}
1804 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1805 			fwi->fwi_port_active = 1;
1806 		}
1807 		fwi->fwi_active_major = qlt->fw_major;
1808 		fwi->fwi_active_minor = qlt->fw_minor;
1809 		fwi->fwi_active_subminor = qlt->fw_subminor;
1810 		fwi->fwi_active_attr = qlt->fw_attr;
1811 		if (qlt->fw_code01) {
1812 			fwi->fwi_fw_uploaded = 1;
1813 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1814 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1815 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1816 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1817 		}
1818 		if (qlt->qlt_81xx_chip) {
1819 			fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1820 			fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1821 			fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1822 			fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1823 		} else if (qlt->qlt_25xx_chip) {
1824 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1825 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1826 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1827 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1828 		} else {
1829 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1830 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1831 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1832 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1833 		}
1834 		break;
1835 
1836 	case QLT_IOCTL_STAY_OFFLINE:
1837 		if (!iocd->stmf_ibuf_size) {
1838 			EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1839 			    iocd->stmf_ibuf_size);
1840 			ret = EINVAL;
1841 			break;
1842 		}
1843 		if (*((char *)ibuf)) {
1844 			qlt->qlt_stay_offline = 1;
1845 		} else {
1846 			qlt->qlt_stay_offline = 0;
1847 		}
1848 		break;
1849 
1850 	case QLT_IOCTL_MBOX:
1851 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1852 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1853 			EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1854 			    iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1855 			ret = EINVAL;
1856 			break;
1857 		}
1858 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1859 		if (mcp == NULL) {
1860 			EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1861 			ret = ENOMEM;
1862 			break;
1863 		}
1864 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1865 		st = qlt_mailbox_command(qlt, mcp);
1866 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1867 		qlt_free_mailbox_command(qlt, mcp);
1868 		if (st != QLT_SUCCESS) {
1869 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1870 				st = QLT_SUCCESS;
1871 		}
1872 		if (st != QLT_SUCCESS) {
1873 			EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1874 			ret = EIO;
1875 			switch (st) {
1876 			case QLT_MBOX_NOT_INITIALIZED:
1877 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1878 				break;
1879 			case QLT_MBOX_BUSY:
1880 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1881 				break;
1882 			case QLT_MBOX_TIMEOUT:
1883 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1884 				break;
1885 			case QLT_MBOX_ABORTED:
1886 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1887 				break;
1888 			}
1889 		}
1890 		break;
1891 
1892 	case QLT_IOCTL_ELOG:
1893 		qlt_dump_el_trace_buffer(qlt);
1894 		break;
1895 
1896 	default:
1897 		EL(qlt, "Unknown ioctl-%xh\n", cmd);
1898 		ret = ENOTTY;
1899 	}
1900 
1901 	if (ret == 0) {
1902 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1903 	} else if (iocd->stmf_error) {
1904 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1905 	}
1906 	if (obuf) {
1907 		kmem_free(obuf, iocd->stmf_obuf_size);
1908 		obuf = NULL;
1909 	}
1910 	if (ibuf) {
1911 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1912 		ibuf = NULL;
1913 	}
1914 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1915 	return (ret);
1916 }
1917 
1918 static void
1919 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1920 {
1921 	stmf_change_status_t		st;
1922 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1923 	qlt_state_t			*qlt;
1924 	fct_status_t			ret;
1925 
1926 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1927 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1928 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1929 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1930 
1931 	qlt = (qlt_state_t *)port->port_fca_private;
1932 	st.st_completion_status = FCT_SUCCESS;
1933 	st.st_additional_info = NULL;
1934 
1935 	switch (cmd) {
1936 	case FCT_CMD_PORT_ONLINE:
1937 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1938 			st.st_completion_status = STMF_ALREADY;
1939 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1940 			st.st_completion_status = FCT_FAILURE;
1941 		if (st.st_completion_status == FCT_SUCCESS) {
1942 			qlt->qlt_state = FCT_STATE_ONLINING;
1943 			qlt->qlt_state_not_acked = 1;
1944 			st.st_completion_status = qlt_port_online(qlt);
1945 			if (st.st_completion_status != STMF_SUCCESS) {
1946 				EL(qlt, "PORT_ONLINE status=%xh\n",
1947 				    st.st_completion_status);
1948 				qlt->qlt_state = FCT_STATE_OFFLINE;
1949 				qlt->qlt_state_not_acked = 0;
1950 			} else {
1951 				qlt->qlt_state = FCT_STATE_ONLINE;
1952 			}
1953 		}
1954 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
1955 		qlt->qlt_change_state_flags = 0;
1956 		break;
1957 
1958 	case FCT_CMD_PORT_OFFLINE:
1959 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
1960 			st.st_completion_status = STMF_ALREADY;
1961 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
1962 			st.st_completion_status = FCT_FAILURE;
1963 		}
1964 		if (st.st_completion_status == FCT_SUCCESS) {
1965 			qlt->qlt_state = FCT_STATE_OFFLINING;
1966 			qlt->qlt_state_not_acked = 1;
1967 
1968 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
1969 				(void) qlt_firmware_dump(port, ssci);
1970 			}
1971 			qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
1972 			st.st_completion_status = qlt_port_offline(qlt);
1973 			if (st.st_completion_status != STMF_SUCCESS) {
1974 				EL(qlt, "PORT_OFFLINE status=%xh\n",
1975 				    st.st_completion_status);
1976 				qlt->qlt_state = FCT_STATE_ONLINE;
1977 				qlt->qlt_state_not_acked = 0;
1978 			} else {
1979 				qlt->qlt_state = FCT_STATE_OFFLINE;
1980 			}
1981 		}
1982 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
1983 		break;
1984 
1985 	case FCT_ACK_PORT_ONLINE_COMPLETE:
1986 		qlt->qlt_state_not_acked = 0;
1987 		break;
1988 
1989 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
1990 		qlt->qlt_state_not_acked = 0;
1991 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
1992 		    (qlt->qlt_stay_offline == 0)) {
1993 			if ((ret = fct_port_initialize(port,
1994 			    qlt->qlt_change_state_flags,
1995 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
1996 			    "with RLFLAG_RESET")) != FCT_SUCCESS) {
1997 				EL(qlt, "fct_port_initialize status=%llxh\n",
1998 				    ret);
1999 				cmn_err(CE_WARN, "qlt_ctl: "
2000 				    "fct_port_initialize failed, please use "
2001 				    "stmfstate to start the port-%s manualy",
2002 				    qlt->qlt_port_alias);
2003 			}
2004 		}
2005 		break;
2006 	}
2007 }
2008 
2009 /* ARGSUSED */
2010 static fct_status_t
2011 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2012 {
2013 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
2014 
2015 	EL(qlt, "FLOGI requested not supported\n");
2016 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2017 	return (FCT_FAILURE);
2018 }
2019 
2020 /*
2021  * Return a pointer to n entries in the request queue. Assumes that
2022  * request queue lock is held. Does a very short busy wait if
2023  * less/zero entries are available. Retuns NULL if it still cannot
2024  * fullfill the request.
2025  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2026  */
2027 caddr_t
2028 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
2029 {
2030 	int try = 0;
2031 
2032 	while (qlt->req_available < n) {
2033 		uint32_t val1, val2, val3;
2034 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2035 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2036 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2037 		if ((val1 != val2) || (val2 != val3))
2038 			continue;
2039 
2040 		qlt->req_ndx_from_fw = val1;
2041 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2042 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2043 		    (REQUEST_QUEUE_ENTRIES - 1));
2044 		if (qlt->req_available < n) {
2045 			if (try < 2) {
2046 				drv_usecwait(100);
2047 				try++;
2048 				continue;
2049 			} else {
2050 				stmf_trace(qlt->qlt_port_alias,
2051 				    "Req Q is full");
2052 				return (NULL);
2053 			}
2054 		}
2055 		break;
2056 	}
2057 	/* We dont change anything until the entries are sumitted */
2058 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2059 }
2060 
2061 /*
2062  * updates the req in ptr to fw. Assumes that req lock is held.
2063  */
2064 void
2065 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2066 {
2067 	ASSERT(n >= 1);
2068 	qlt->req_ndx_to_fw += n;
2069 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2070 	qlt->req_available -= n;
2071 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2072 }
2073 
2074 
2075 /*
2076  * Return a pointer to n entries in the priority request queue. Assumes that
2077  * priority request queue lock is held. Does a very short busy wait if
2078  * less/zero entries are available. Retuns NULL if it still cannot
2079  * fullfill the request.
2080  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2081  */
2082 caddr_t
2083 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2084 {
2085 	int try = 0;
2086 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2087 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2088 	    (PRIORITY_QUEUE_ENTRIES - 1));
2089 
2090 	while (req_available < n) {
2091 		uint32_t val1, val2, val3;
2092 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2093 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2094 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2095 		if ((val1 != val2) || (val2 != val3))
2096 			continue;
2097 
2098 		qlt->preq_ndx_from_fw = val1;
2099 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2100 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2101 		    (PRIORITY_QUEUE_ENTRIES - 1));
2102 		if (req_available < n) {
2103 			if (try < 2) {
2104 				drv_usecwait(100);
2105 				try++;
2106 				continue;
2107 			} else {
2108 				return (NULL);
2109 			}
2110 		}
2111 		break;
2112 	}
2113 	/* We dont change anything until the entries are sumitted */
2114 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2115 }
2116 
2117 /*
2118  * updates the req in ptr to fw. Assumes that req lock is held.
2119  */
2120 void
2121 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2122 {
2123 	ASSERT(n >= 1);
2124 	qlt->preq_ndx_to_fw += n;
2125 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2126 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2127 }
2128 
2129 /*
2130  * - Should not be called from Interrupt.
2131  * - A very hardware specific function. Does not touch driver state.
2132  * - Assumes that interrupts are disabled or not there.
2133  * - Expects that the caller makes sure that all activity has stopped
2134  *   and its ok now to go ahead and reset the chip. Also the caller
2135  *   takes care of post reset damage control.
2136  * - called by initialize adapter() and dump_fw(for reset only).
2137  * - During attach() nothing much is happening and during initialize_adapter()
2138  *   the function (caller) does all the housekeeping so that this function
2139  *   can execute in peace.
2140  * - Returns 0 on success.
2141  */
2142 static fct_status_t
2143 qlt_reset_chip_and_download_fw(qlt_state_t *qlt, int reset_only)
2144 {
2145 	int cntr;
2146 	uint32_t start_addr;
2147 	fct_status_t ret;
2148 
2149 	EL(qlt, "initiated, flags=%xh\n", reset_only);
2150 
2151 	/* XXX: Switch off LEDs */
2152 
2153 	/* Disable Interrupts */
2154 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2155 	(void) REG_RD32(qlt, REG_INTR_CTRL);
2156 	/* Stop DMA */
2157 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2158 
2159 	/* Wait for DMA to be stopped */
2160 	cntr = 0;
2161 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2162 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2163 		cntr++;
2164 		/* 3 sec should be more than enough */
2165 		if (cntr == 300)
2166 			return (QLT_DMA_STUCK);
2167 	}
2168 
2169 	/* Reset the Chip */
2170 	REG_WR32(qlt, REG_CTRL_STATUS,
2171 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2172 
2173 	qlt->qlt_link_up = 0;
2174 
2175 	drv_usecwait(100);
2176 
2177 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2178 	cntr = 0;
2179 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2180 		delay(drv_usectohz(10000));
2181 		cntr++;
2182 		/* 3 sec should be more than enough */
2183 		if (cntr == 300)
2184 			return (QLT_ROM_STUCK);
2185 	}
2186 	/* Disable Interrupts (Probably not needed) */
2187 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2188 	if (reset_only)
2189 		return (QLT_SUCCESS);
2190 
2191 	/* Load the two segments */
2192 	if (qlt->fw_code01 != NULL) {
2193 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2194 		    qlt->fw_addr01);
2195 		if (ret == QLT_SUCCESS) {
2196 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2197 			    qlt->fw_length02, qlt->fw_addr02);
2198 		}
2199 		start_addr = qlt->fw_addr01;
2200 	} else if (qlt->qlt_81xx_chip) {
2201 		ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2202 		    fw8100_addr01);
2203 		if (ret == QLT_SUCCESS) {
2204 			ret = qlt_load_risc_ram(qlt, fw8100_code02,
2205 			    fw8100_length02, fw8100_addr02);
2206 		}
2207 		start_addr = fw8100_addr01;
2208 	} else if (qlt->qlt_25xx_chip) {
2209 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2210 		    fw2500_addr01);
2211 		if (ret == QLT_SUCCESS) {
2212 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
2213 			    fw2500_length02, fw2500_addr02);
2214 		}
2215 		start_addr = fw2500_addr01;
2216 	} else {
2217 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2218 		    fw2400_addr01);
2219 		if (ret == QLT_SUCCESS) {
2220 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
2221 			    fw2400_length02, fw2400_addr02);
2222 		}
2223 		start_addr = fw2400_addr01;
2224 	}
2225 	if (ret != QLT_SUCCESS) {
2226 		EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2227 		return (ret);
2228 	}
2229 
2230 	/* Verify Checksum */
2231 	REG_WR16(qlt, REG_MBOX(0), 7);
2232 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
2233 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
2234 	ret = qlt_raw_mailbox_command(qlt);
2235 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2236 	if (ret != QLT_SUCCESS) {
2237 		EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2238 		return (ret);
2239 	}
2240 
2241 	/* Execute firmware */
2242 	REG_WR16(qlt, REG_MBOX(0), 2);
2243 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
2244 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
2245 	REG_WR16(qlt, REG_MBOX(3), 0);
2246 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
2247 	ret = qlt_raw_mailbox_command(qlt);
2248 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2249 	if (ret != QLT_SUCCESS) {
2250 		EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2251 		return (ret);
2252 	}
2253 
2254 	/* Get revisions (About Firmware) */
2255 	REG_WR16(qlt, REG_MBOX(0), 8);
2256 	ret = qlt_raw_mailbox_command(qlt);
2257 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2258 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2259 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2260 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2261 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2262 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2263 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2264 	if (ret != QLT_SUCCESS) {
2265 		EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2266 		return (ret);
2267 	}
2268 
2269 	return (QLT_SUCCESS);
2270 }
2271 
2272 /*
2273  * Used only from qlt_reset_chip_and_download_fw().
2274  */
2275 static fct_status_t
2276 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2277 				uint32_t word_count, uint32_t risc_addr)
2278 {
2279 	uint32_t words_sent = 0;
2280 	uint32_t words_being_sent;
2281 	uint32_t *cur_host_addr;
2282 	uint32_t cur_risc_addr;
2283 	uint64_t da;
2284 	fct_status_t ret;
2285 
2286 	while (words_sent < word_count) {
2287 		cur_host_addr = &(host_addr[words_sent]);
2288 		cur_risc_addr = risc_addr + (words_sent << 2);
2289 		words_being_sent = min(word_count - words_sent,
2290 		    TOTAL_DMA_MEM_SIZE >> 2);
2291 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2292 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2293 		    DDI_DEV_AUTOINCR);
2294 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2295 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2296 		da = qlt->queue_mem_cookie.dmac_laddress;
2297 		REG_WR16(qlt, REG_MBOX(0), 0x0B);
2298 		REG_WR16(qlt, REG_MBOX(1), risc_addr & 0xffff);
2299 		REG_WR16(qlt, REG_MBOX(8), ((cur_risc_addr >> 16) & 0xffff));
2300 		REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
2301 		da >>= 16;
2302 		REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
2303 		da >>= 16;
2304 		REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
2305 		da >>= 16;
2306 		REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
2307 		REG_WR16(qlt, REG_MBOX(5), words_being_sent & 0xffff);
2308 		REG_WR16(qlt, REG_MBOX(4), (words_being_sent >> 16) & 0xffff);
2309 		ret = qlt_raw_mailbox_command(qlt);
2310 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2311 		if (ret != QLT_SUCCESS) {
2312 			EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2313 			    ret);
2314 			return (ret);
2315 		}
2316 		words_sent += words_being_sent;
2317 	}
2318 	return (QLT_SUCCESS);
2319 }
2320 
2321 /*
2322  * Not used during normal operation. Only during driver init.
2323  * Assumes that interrupts are disabled and mailboxes are loaded.
2324  * Just triggers the mailbox command an waits for the completion.
2325  * Also expects that There is nothing else going on and we will only
2326  * get back a mailbox completion from firmware.
2327  * ---DOES NOT CLEAR INTERRUPT---
2328  * Used only from the code path originating from
2329  * qlt_reset_chip_and_download_fw()
2330  */
2331 static fct_status_t
2332 qlt_raw_mailbox_command(qlt_state_t *qlt)
2333 {
2334 	int cntr = 0;
2335 	uint32_t status;
2336 
2337 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2338 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_INTR_REQUEST) == 0) {
2339 		cntr++;
2340 		if (cntr == 100)
2341 			return (QLT_MAILBOX_STUCK);
2342 		delay(drv_usectohz(10000));
2343 	}
2344 	status = (REG_RD32(qlt, REG_RISC_STATUS) & 0xff);
2345 	if ((status == 1) || (status == 2) ||
2346 	    (status == 0x10) || (status == 0x11)) {
2347 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2348 		if (mbox0 == 0x4000)
2349 			return (QLT_SUCCESS);
2350 		else
2351 			return (QLT_MBOX_FAILED | mbox0);
2352 	}
2353 	/* This is unexpected, dump a message */
2354 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2355 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
2356 	return (QLT_UNEXPECTED_RESPONSE);
2357 }
2358 
2359 static mbox_cmd_t *
2360 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2361 {
2362 	mbox_cmd_t *mcp;
2363 
2364 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2365 	if (dma_size) {
2366 		qlt_dmem_bctl_t *bctl;
2367 		uint64_t da;
2368 
2369 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2370 		if (mcp->dbuf == NULL) {
2371 			kmem_free(mcp, sizeof (*mcp));
2372 			return (NULL);
2373 		}
2374 		mcp->dbuf->db_data_size = dma_size;
2375 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2376 
2377 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2378 		da = bctl->bctl_dev_addr;
2379 		/* This is the most common initialization of dma ptrs */
2380 		mcp->to_fw[3] = (uint16_t)(da & 0xffff);
2381 		da >>= 16;
2382 		mcp->to_fw[2] = (uint16_t)(da & 0xffff);
2383 		da >>= 16;
2384 		mcp->to_fw[7] = (uint16_t)(da & 0xffff);
2385 		da >>= 16;
2386 		mcp->to_fw[6] = (uint16_t)(da & 0xffff);
2387 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2388 	}
2389 	mcp->to_fw_mask |= BIT_0;
2390 	mcp->from_fw_mask |= BIT_0;
2391 	return (mcp);
2392 }
2393 
2394 void
2395 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2396 {
2397 	if (mcp->dbuf)
2398 		qlt_i_dmem_free(qlt, mcp->dbuf);
2399 	kmem_free(mcp, sizeof (*mcp));
2400 }
2401 
2402 /*
2403  * This can sleep. Should never be called from interrupt context.
2404  */
2405 static fct_status_t
2406 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2407 {
2408 	int	retries;
2409 	int	i;
2410 	char	info[80];
2411 
2412 	if (curthread->t_flag & T_INTR_THREAD) {
2413 		ASSERT(0);
2414 		return (QLT_MBOX_FAILED);
2415 	}
2416 
2417 	mutex_enter(&qlt->mbox_lock);
2418 	/* See if mailboxes are still uninitialized */
2419 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2420 		mutex_exit(&qlt->mbox_lock);
2421 		return (QLT_MBOX_NOT_INITIALIZED);
2422 	}
2423 
2424 	/* Wait to grab the mailboxes */
2425 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2426 	    retries++) {
2427 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2428 		if ((retries > 5) ||
2429 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2430 			mutex_exit(&qlt->mbox_lock);
2431 			return (QLT_MBOX_BUSY);
2432 		}
2433 	}
2434 	/* Make sure we always ask for mailbox 0 */
2435 	mcp->from_fw_mask |= BIT_0;
2436 
2437 	/* Load mailboxes, set state and generate RISC interrupt */
2438 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2439 	qlt->mcp = mcp;
2440 	for (i = 0; i < MAX_MBOXES; i++) {
2441 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2442 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2443 	}
2444 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2445 
2446 qlt_mbox_wait_loop:;
2447 	/* Wait for mailbox command completion */
2448 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2449 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2450 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2451 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2452 		info[79] = 0;
2453 		qlt->mcp = NULL;
2454 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2455 		mutex_exit(&qlt->mbox_lock);
2456 
2457 		/*
2458 		 * XXX Throw HBA fatal error event
2459 		 */
2460 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2461 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2462 		return (QLT_MBOX_TIMEOUT);
2463 	}
2464 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2465 		goto qlt_mbox_wait_loop;
2466 
2467 	qlt->mcp = NULL;
2468 
2469 	/* Make sure its a completion */
2470 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2471 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2472 		mutex_exit(&qlt->mbox_lock);
2473 		return (QLT_MBOX_ABORTED);
2474 	}
2475 
2476 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2477 	/* Mailboxes are already loaded by interrupt routine */
2478 	qlt->mbox_io_state = MBOX_STATE_READY;
2479 	mutex_exit(&qlt->mbox_lock);
2480 	if (mcp->from_fw[0] != 0x4000)
2481 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2482 
2483 	return (QLT_SUCCESS);
2484 }
2485 
2486 /*
2487  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2488  */
2489 /* ARGSUSED */
2490 static uint_t
2491 qlt_isr(caddr_t arg, caddr_t arg2)
2492 {
2493 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2494 	uint32_t	risc_status, intr_type;
2495 	int		i;
2496 	int		intr_loop_count;
2497 	char		info[80];
2498 
2499 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2500 	if (!mutex_tryenter(&qlt->intr_lock)) {
2501 		/*
2502 		 * Normally we will always get this lock. If tryenter is
2503 		 * failing then it means that driver is trying to do
2504 		 * some cleanup and is masking the intr but some intr
2505 		 * has sneaked in between. See if our device has generated
2506 		 * this intr. If so then wait a bit and return claimed.
2507 		 * If not then return claimed if this is the 1st instance
2508 		 * of a interrupt after driver has grabbed the lock.
2509 		 */
2510 		if (risc_status & BIT_15) {
2511 			drv_usecwait(10);
2512 			return (DDI_INTR_CLAIMED);
2513 		} else if (qlt->intr_sneak_counter) {
2514 			qlt->intr_sneak_counter--;
2515 			return (DDI_INTR_CLAIMED);
2516 		} else {
2517 			return (DDI_INTR_UNCLAIMED);
2518 		}
2519 	}
2520 	if (((risc_status & BIT_15) == 0) ||
2521 	    (qlt->qlt_intr_enabled == 0)) {
2522 		/*
2523 		 * This might be a pure coincedence that we are operating
2524 		 * in a interrupt disabled mode and another device
2525 		 * sharing the interrupt line has generated an interrupt
2526 		 * while an interrupt from our device might be pending. Just
2527 		 * ignore it and let the code handling the interrupt
2528 		 * disabled mode handle it.
2529 		 */
2530 		mutex_exit(&qlt->intr_lock);
2531 		return (DDI_INTR_UNCLAIMED);
2532 	}
2533 
2534 	/*
2535 	 * XXX take care for MSI case. disable intrs
2536 	 * Its gonna be complicated because of the max iterations.
2537 	 * as hba will have posted the intr which did not go on PCI
2538 	 * but we did not service it either because of max iterations.
2539 	 * Maybe offload the intr on a different thread.
2540 	 */
2541 	intr_loop_count = 0;
2542 
2543 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2544 
2545 intr_again:;
2546 
2547 	/* check for risc pause */
2548 	if (risc_status & BIT_8) {
2549 		EL(qlt, "Risc Pause status=%xh\n", risc_status);
2550 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2551 		    qlt->instance, risc_status);
2552 		(void) snprintf(info, 80, "Risc Pause %08x", risc_status);
2553 		info[79] = 0;
2554 		(void) fct_port_shutdown(qlt->qlt_port,
2555 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2556 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2557 	}
2558 
2559 	/* First check for high performance path */
2560 	intr_type = risc_status & 0xff;
2561 	if (intr_type == 0x1D) {
2562 		qlt->atio_ndx_from_fw = (uint16_t)
2563 		    REG_RD32(qlt, REG_ATIO_IN_PTR);
2564 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2565 		qlt->resp_ndx_from_fw = risc_status >> 16;
2566 		qlt_handle_atio_queue_update(qlt);
2567 		qlt_handle_resp_queue_update(qlt);
2568 	} else if (intr_type == 0x1C) {
2569 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2570 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2571 		qlt_handle_atio_queue_update(qlt);
2572 	} else if (intr_type == 0x13) {
2573 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2574 		qlt->resp_ndx_from_fw = risc_status >> 16;
2575 		qlt_handle_resp_queue_update(qlt);
2576 	} else if (intr_type == 0x12) {
2577 		uint16_t code = (uint16_t)(risc_status >> 16);
2578 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2579 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2580 		uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2581 		uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2582 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2583 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2584 
2585 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2586 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2587 		    " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2588 		    mbox5, mbox6);
2589 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
2590 		    " mb3=%x, mb5=%x, mb6=%x", qlt->instance, code, mbox1,
2591 		    mbox2, mbox3, mbox5, mbox6);
2592 
2593 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2594 			if (qlt->qlt_link_up) {
2595 				fct_handle_event(qlt->qlt_port,
2596 				    FCT_EVENT_LINK_RESET, 0, 0);
2597 			}
2598 		} else if (code == 0x8012) {
2599 			qlt->qlt_link_up = 0;
2600 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2601 			    0, 0);
2602 		} else if (code == 0x8011) {
2603 			switch (mbox1) {
2604 			case 0: qlt->link_speed = PORT_SPEED_1G;
2605 				break;
2606 			case 1: qlt->link_speed = PORT_SPEED_2G;
2607 				break;
2608 			case 3: qlt->link_speed = PORT_SPEED_4G;
2609 				break;
2610 			case 4: qlt->link_speed = PORT_SPEED_8G;
2611 				break;
2612 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
2613 				break;
2614 			default:
2615 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2616 			}
2617 			qlt->qlt_link_up = 1;
2618 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2619 			    0, 0);
2620 		} else if ((code == 0x8002) || (code == 0x8003) ||
2621 		    (code == 0x8004) || (code == 0x8005)) {
2622 			(void) snprintf(info, 80,
2623 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2624 			    code, mbox1, mbox2, mbox5, mbox6);
2625 			info[79] = 0;
2626 			(void) fct_port_shutdown(qlt->qlt_port,
2627 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2628 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2629 		} else if (code == 0x800F) {
2630 			(void) snprintf(info, 80,
2631 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
2632 			    mbox1, mbox2, mbox3);
2633 
2634 			if (mbox1 != 1) {
2635 				/* issue "verify fw" */
2636 				qlt_verify_fw(qlt);
2637 			}
2638 		} else if (code == 0x8101) {
2639 			(void) snprintf(info, 80,
2640 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2641 			    code, mbox1, mbox2, mbox3);
2642 			info[79] = 0;
2643 
2644 			/* check if "ACK" is required (timeout != 0) */
2645 			if (mbox1 & 0x0f00) {
2646 				caddr_t	req;
2647 
2648 				/*
2649 				 * Ack the request (queue work to do it?)
2650 				 * using a mailbox iocb
2651 				 */
2652 				mutex_enter(&qlt->req_lock);
2653 				req = qlt_get_req_entries(qlt, 1);
2654 				if (req) {
2655 					bzero(req, IOCB_SIZE);
2656 					req[0] = 0x39; req[1] = 1;
2657 					QMEM_WR16(qlt, req+8, 0x101);
2658 					QMEM_WR16(qlt, req+10, mbox1);
2659 					QMEM_WR16(qlt, req+12, mbox2);
2660 					QMEM_WR16(qlt, req+14, mbox3);
2661 					QMEM_WR16(qlt, req+16, mbox4);
2662 					QMEM_WR16(qlt, req+18, mbox5);
2663 					QMEM_WR16(qlt, req+20, mbox6);
2664 					qlt_submit_req_entries(qlt, 1);
2665 				} else {
2666 					(void) snprintf(info, 80,
2667 					    "IDC ACK failed");
2668 					info[79] = 0;
2669 				}
2670 				mutex_exit(&qlt->req_lock);
2671 			}
2672 		}
2673 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2674 		/* Handle mailbox completion */
2675 		mutex_enter(&qlt->mbox_lock);
2676 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2677 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2678 			    " when driver wasn't waiting for it %d",
2679 			    qlt->instance, qlt->mbox_io_state);
2680 		} else {
2681 			for (i = 0; i < MAX_MBOXES; i++) {
2682 				if (qlt->mcp->from_fw_mask &
2683 				    (((uint32_t)1) << i)) {
2684 					qlt->mcp->from_fw[i] =
2685 					    REG_RD16(qlt, REG_MBOX(i));
2686 				}
2687 			}
2688 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2689 		}
2690 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2691 		cv_broadcast(&qlt->mbox_cv);
2692 		mutex_exit(&qlt->mbox_lock);
2693 	} else {
2694 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2695 		    qlt->instance, intr_type);
2696 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2697 	}
2698 
2699 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2700 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2701 	if ((risc_status & BIT_15) &&
2702 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2703 		goto intr_again;
2704 	}
2705 
2706 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2707 
2708 	mutex_exit(&qlt->intr_lock);
2709 	return (DDI_INTR_CLAIMED);
2710 }
2711 
2712 /* **************** NVRAM Functions ********************** */
2713 
2714 fct_status_t
2715 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2716 {
2717 	uint32_t	timer;
2718 
2719 	/* Clear access error flag */
2720 	REG_WR32(qlt, REG_CTRL_STATUS,
2721 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2722 
2723 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2724 
2725 	/* Wait for READ cycle to complete. */
2726 	for (timer = 3000; timer; timer--) {
2727 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2728 			break;
2729 		}
2730 		drv_usecwait(10);
2731 	}
2732 	if (timer == 0) {
2733 		EL(qlt, "flash timeout\n");
2734 		return (QLT_FLASH_TIMEOUT);
2735 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2736 		EL(qlt, "flash access error\n");
2737 		return (QLT_FLASH_ACCESS_ERROR);
2738 	}
2739 
2740 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2741 
2742 	return (QLT_SUCCESS);
2743 }
2744 
2745 fct_status_t
2746 qlt_read_nvram(qlt_state_t *qlt)
2747 {
2748 	uint32_t		index, addr, chksum;
2749 	uint32_t		val, *ptr;
2750 	fct_status_t		ret;
2751 	qlt_nvram_t		*nv;
2752 	uint64_t		empty_node_name = 0;
2753 
2754 	if (qlt->qlt_81xx_chip) {
2755 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2756 		    QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2757 	} else if (qlt->qlt_25xx_chip) {
2758 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2759 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2760 	} else {
2761 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2762 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2763 	}
2764 	mutex_enter(&qlt_global_lock);
2765 
2766 	/* Pause RISC. */
2767 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
2768 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2769 
2770 	/* Get NVRAM data and calculate checksum. */
2771 	ptr = (uint32_t *)qlt->nvram;
2772 	chksum = 0;
2773 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2774 		ret = qlt_read_flash_word(qlt, addr++, &val);
2775 		if (ret != QLT_SUCCESS) {
2776 			EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
2777 			mutex_exit(&qlt_global_lock);
2778 			return (ret);
2779 		}
2780 		chksum += val;
2781 		*ptr = LE_32(val);
2782 		ptr++;
2783 	}
2784 
2785 	/* Release RISC Pause */
2786 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_PAUSE);
2787 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2788 
2789 	mutex_exit(&qlt_global_lock);
2790 
2791 	/* Sanity check NVRAM Data */
2792 	nv = qlt->nvram;
2793 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2794 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2795 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2796 		EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2797 		    nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2798 		    nv->nvram_version[1], nv->nvram_version[0]);
2799 		return (QLT_BAD_NVRAM_DATA);
2800 	}
2801 
2802 	/* If node name is zero, hand craft it from port name */
2803 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2804 		bcopy(nv->port_name, nv->node_name, 8);
2805 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2806 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2807 	}
2808 
2809 	return (QLT_SUCCESS);
2810 }
2811 
2812 uint32_t
2813 qlt_sync_atio_queue(qlt_state_t *qlt)
2814 {
2815 	uint32_t total_ent;
2816 
2817 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2818 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2819 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2820 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2821 		    DDI_DMA_SYNC_FORCPU);
2822 	} else {
2823 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2824 		    qlt->atio_ndx_from_fw;
2825 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2826 		    + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2827 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2828 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2829 		    ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2830 		    DDI_DMA_SYNC_FORCPU);
2831 	}
2832 	return (total_ent);
2833 }
2834 
2835 void
2836 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2837 {
2838 	uint32_t total_ent;
2839 
2840 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2841 		return;
2842 
2843 	total_ent = qlt_sync_atio_queue(qlt);
2844 
2845 	do {
2846 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2847 		    qlt->atio_ndx_to_fw << 6];
2848 		uint32_t ent_cnt;
2849 
2850 		ent_cnt = (uint32_t)(atio[1]);
2851 		if (ent_cnt > total_ent) {
2852 			break;
2853 		}
2854 		switch ((uint8_t)(atio[0])) {
2855 		case 0x0d:	/* INOT */
2856 			qlt_handle_inot(qlt, atio);
2857 			break;
2858 		case 0x06:	/* ATIO */
2859 			qlt_handle_atio(qlt, atio);
2860 			break;
2861 		default:
2862 			EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2863 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2864 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2865 			break;
2866 		}
2867 		qlt->atio_ndx_to_fw = (uint16_t)(
2868 		    (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2869 		total_ent -= ent_cnt;
2870 	} while (total_ent > 0);
2871 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2872 }
2873 
2874 uint32_t
2875 qlt_sync_resp_queue(qlt_state_t *qlt)
2876 {
2877 	uint32_t total_ent;
2878 
2879 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2880 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2881 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2882 		    RESPONSE_QUEUE_OFFSET
2883 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2884 		    DDI_DMA_SYNC_FORCPU);
2885 	} else {
2886 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2887 		    qlt->resp_ndx_from_fw;
2888 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2889 		    RESPONSE_QUEUE_OFFSET
2890 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2891 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2892 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2893 		    RESPONSE_QUEUE_OFFSET,
2894 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2895 	}
2896 	return (total_ent);
2897 }
2898 
2899 void
2900 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2901 {
2902 	uint32_t total_ent;
2903 	uint8_t c;
2904 
2905 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2906 		return;
2907 
2908 	total_ent = qlt_sync_resp_queue(qlt);
2909 
2910 	do {
2911 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
2912 		uint32_t ent_cnt;
2913 
2914 		ent_cnt = (uint32_t)(resp[1]);
2915 		if (ent_cnt > total_ent) {
2916 			break;
2917 		}
2918 		switch ((uint8_t)(resp[0])) {
2919 		case 0x12:	/* CTIO completion */
2920 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
2921 			break;
2922 		case 0x0e:	/* NACK */
2923 			/* Do Nothing */
2924 			break;
2925 		case 0x1b:	/* Verify FW */
2926 			qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
2927 			break;
2928 		case 0x29:	/* CT PassThrough */
2929 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
2930 			break;
2931 		case 0x33:	/* Abort IO IOCB completion */
2932 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
2933 			break;
2934 		case 0x51:	/* PUREX */
2935 			qlt_handle_purex(qlt, (uint8_t *)resp);
2936 			break;
2937 		case 0x52:
2938 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
2939 			break;
2940 		case 0x53:	/* ELS passthrough */
2941 			c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
2942 			if (c == 0) {
2943 				qlt_handle_sol_els_completion(qlt,
2944 				    (uint8_t *)resp);
2945 			} else if (c == 3) {
2946 				qlt_handle_unsol_els_abort_completion(qlt,
2947 				    (uint8_t *)resp);
2948 			} else {
2949 				qlt_handle_unsol_els_completion(qlt,
2950 				    (uint8_t *)resp);
2951 			}
2952 			break;
2953 		case 0x54:	/* ABTS received */
2954 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
2955 			break;
2956 		case 0x55:	/* ABTS completion */
2957 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
2958 			break;
2959 		default:
2960 			EL(qlt, "response entry=%xh\n", resp[0]);
2961 			break;
2962 		}
2963 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
2964 		    (RESPONSE_QUEUE_ENTRIES - 1);
2965 		total_ent -= ent_cnt;
2966 	} while (total_ent > 0);
2967 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
2968 }
2969 
2970 fct_status_t
2971 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
2972 				uint16_t *ret_handle)
2973 {
2974 	fct_status_t ret;
2975 	mbox_cmd_t *mcp;
2976 	uint16_t n;
2977 	uint16_t h;
2978 	uint32_t ent_id;
2979 	uint8_t *p;
2980 	int found = 0;
2981 
2982 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
2983 	if (mcp == NULL) {
2984 		return (STMF_ALLOC_FAILURE);
2985 	}
2986 	mcp->to_fw[0] = 0x7C;	/* GET ID LIST */
2987 	mcp->to_fw[8] = 2048 * 8;
2988 	mcp->to_fw[9] = 0;
2989 	mcp->to_fw_mask |= BIT_9 | BIT_8;
2990 	mcp->from_fw_mask |= BIT_1 | BIT_2;
2991 
2992 	ret = qlt_mailbox_command(qlt, mcp);
2993 	if (ret != QLT_SUCCESS) {
2994 		EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
2995 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
2996 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
2997 		    mcp->from_fw[1], mcp->from_fw[2]);
2998 		qlt_free_mailbox_command(qlt, mcp);
2999 		return (ret);
3000 	}
3001 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
3002 	p = mcp->dbuf->db_sglist[0].seg_addr;
3003 	for (n = 0; n < mcp->from_fw[1]; n++) {
3004 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
3005 		h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
3006 		if (ent_id == id) {
3007 			found = 1;
3008 			*ret_handle = h;
3009 			if ((cmd_handle != FCT_HANDLE_NONE) &&
3010 			    (cmd_handle != h)) {
3011 				cmn_err(CE_WARN, "login for portid %x came in "
3012 				    "with handle %x, while the portid was "
3013 				    "already using a different handle %x",
3014 				    id, cmd_handle, h);
3015 				qlt_free_mailbox_command(qlt, mcp);
3016 				return (QLT_FAILURE);
3017 			}
3018 			break;
3019 		}
3020 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
3021 			cmn_err(CE_WARN, "login for portid %x came in with "
3022 			    "handle %x, while the handle was already in use "
3023 			    "for portid %x", id, cmd_handle, ent_id);
3024 			qlt_free_mailbox_command(qlt, mcp);
3025 			return (QLT_FAILURE);
3026 		}
3027 		p += 8;
3028 	}
3029 	if (!found) {
3030 		*ret_handle = cmd_handle;
3031 	}
3032 	qlt_free_mailbox_command(qlt, mcp);
3033 	return (FCT_SUCCESS);
3034 }
3035 
3036 /* ARGSUSED */
3037 fct_status_t
3038 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
3039 				fct_cmd_t *login)
3040 {
3041 	uint8_t *p;
3042 
3043 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3044 	p[0] = ELS_OP_PLOGI;
3045 	*((uint16_t *)(&p[4])) = 0x2020;
3046 	p[7] = 3;
3047 	p[8] = 0x88;
3048 	p[10] = 8;
3049 	p[13] = 0xff; p[15] = 0x1f;
3050 	p[18] = 7; p[19] = 0xd0;
3051 
3052 	bcopy(port->port_pwwn, p + 20, 8);
3053 	bcopy(port->port_nwwn, p + 28, 8);
3054 
3055 	p[68] = 0x80;
3056 	p[74] = 8;
3057 	p[77] = 0xff;
3058 	p[81] = 1;
3059 
3060 	return (FCT_SUCCESS);
3061 }
3062 
3063 /* ARGSUSED */
3064 fct_status_t
3065 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
3066 				fct_cmd_t *login)
3067 {
3068 	return (FCT_SUCCESS);
3069 }
3070 
3071 fct_status_t
3072 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
3073     fct_cmd_t *login)
3074 {
3075 	uint16_t h;
3076 	fct_status_t ret;
3077 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3078 
3079 	switch (rp->rp_id) {
3080 	case 0xFFFFFC:	h = 0x7FC; break;
3081 	case 0xFFFFFD:	h = 0x7FD; break;
3082 	case 0xFFFFFE:	h = 0x7FE; break;
3083 	case 0xFFFFFF:	h = 0x7FF; break;
3084 	default:
3085 		ret = qlt_portid_to_handle(qlt, rp->rp_id,
3086 		    login->cmd_rp_handle, &h);
3087 		if (ret != FCT_SUCCESS) {
3088 			EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3089 			return (ret);
3090 		}
3091 	}
3092 
3093 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
3094 		ret = qlt_fill_plogi_req(port, rp, login);
3095 	} else {
3096 		ret = qlt_fill_plogi_resp(port, rp, login);
3097 	}
3098 
3099 	if (ret != FCT_SUCCESS) {
3100 		EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3101 		return (ret);
3102 	}
3103 
3104 	if (h == FCT_HANDLE_NONE)
3105 		return (FCT_SUCCESS);
3106 
3107 	if (rp->rp_handle == FCT_HANDLE_NONE) {
3108 		rp->rp_handle = h;
3109 		return (FCT_SUCCESS);
3110 	}
3111 
3112 	if (rp->rp_handle == h)
3113 		return (FCT_SUCCESS);
3114 
3115 	EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3116 	return (FCT_FAILURE);
3117 }
3118 /* invoked in single thread */
3119 fct_status_t
3120 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3121 {
3122 	uint8_t *req;
3123 	qlt_state_t *qlt;
3124 	clock_t	dereg_req_timer;
3125 	fct_status_t ret;
3126 
3127 	qlt = (qlt_state_t *)port->port_fca_private;
3128 
3129 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3130 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
3131 		return (FCT_SUCCESS);
3132 	ASSERT(qlt->rp_id_in_dereg == 0);
3133 
3134 	mutex_enter(&qlt->preq_lock);
3135 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3136 	if (req == NULL) {
3137 		mutex_exit(&qlt->preq_lock);
3138 		return (FCT_BUSY);
3139 	}
3140 	bzero(req, IOCB_SIZE);
3141 	req[0] = 0x52; req[1] = 1;
3142 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
3143 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3144 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3145 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3146 	qlt->rp_id_in_dereg = rp->rp_id;
3147 	qlt_submit_preq_entries(qlt, 1);
3148 
3149 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
3150 	if (cv_timedwait(&qlt->rp_dereg_cv,
3151 	    &qlt->preq_lock, dereg_req_timer) > 0) {
3152 		ret = qlt->rp_dereg_status;
3153 	} else {
3154 		ret = FCT_BUSY;
3155 	}
3156 	qlt->rp_dereg_status = 0;
3157 	qlt->rp_id_in_dereg = 0;
3158 	mutex_exit(&qlt->preq_lock);
3159 	return (ret);
3160 }
3161 
3162 /*
3163  * Pass received ELS up to framework.
3164  */
3165 static void
3166 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3167 {
3168 	fct_cmd_t		*cmd;
3169 	fct_els_t		*els;
3170 	qlt_cmd_t		*qcmd;
3171 	uint32_t		payload_size;
3172 	uint32_t		remote_portid;
3173 	uint8_t			*pldptr, *bndrptr;
3174 	int			i, off;
3175 	uint16_t		iocb_flags;
3176 	char			info[160];
3177 
3178 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3179 	    ((uint32_t)(resp[0x1A])) << 16;
3180 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3181 	if (iocb_flags & BIT_15) {
3182 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3183 	} else {
3184 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3185 	}
3186 
3187 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
3188 		EL(qlt, "payload is too large = %xh\n", payload_size);
3189 		cmn_err(CE_WARN, "handle_purex: payload is too large");
3190 		goto cmd_null;
3191 	}
3192 
3193 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3194 	    (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3195 	if (cmd == NULL) {
3196 		EL(qlt, "fct_alloc cmd==NULL\n");
3197 cmd_null:;
3198 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
3199 		    "allocate space for fct_cmd", (void *)qlt);
3200 		info[159] = 0;
3201 		(void) fct_port_shutdown(qlt->qlt_port,
3202 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3203 		return;
3204 	}
3205 
3206 	cmd->cmd_port = qlt->qlt_port;
3207 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3208 	if (cmd->cmd_rp_handle == 0xFFFF) {
3209 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3210 	}
3211 
3212 	els = (fct_els_t *)cmd->cmd_specific;
3213 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3214 	els->els_req_size = (uint16_t)payload_size;
3215 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3216 	    GET_STRUCT_SIZE(qlt_cmd_t));
3217 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3218 	cmd->cmd_rportid = remote_portid;
3219 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3220 	    ((uint32_t)(resp[0x16])) << 16;
3221 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3222 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3223 	pldptr = &resp[0x2C];
3224 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
3225 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3226 		/* Take care of fw's swapping of payload */
3227 		els->els_req_payload[i] = pldptr[3];
3228 		els->els_req_payload[i+1] = pldptr[2];
3229 		els->els_req_payload[i+2] = pldptr[1];
3230 		els->els_req_payload[i+3] = pldptr[0];
3231 		pldptr += 4;
3232 		if (pldptr == bndrptr)
3233 			pldptr = (uint8_t *)qlt->resp_ptr;
3234 		off += 4;
3235 		if (off >= IOCB_SIZE) {
3236 			off = 4;
3237 			pldptr += 4;
3238 		}
3239 	}
3240 	fct_post_rcvd_cmd(cmd, 0);
3241 }
3242 
3243 fct_status_t
3244 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3245 {
3246 	qlt_state_t	*qlt;
3247 	char		info[160];
3248 
3249 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3250 
3251 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3252 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3253 			EL(qlt, "ioflags = %xh\n", ioflags);
3254 			goto fatal_panic;
3255 		} else {
3256 			return (qlt_send_status(qlt, cmd));
3257 		}
3258 	}
3259 
3260 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3261 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3262 			goto fatal_panic;
3263 		} else {
3264 			return (qlt_send_els_response(qlt, cmd));
3265 		}
3266 	}
3267 
3268 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3269 		cmd->cmd_handle = 0;
3270 	}
3271 
3272 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3273 		return (qlt_send_abts_response(qlt, cmd, 0));
3274 	} else {
3275 		EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3276 		ASSERT(0);
3277 		return (FCT_FAILURE);
3278 	}
3279 
3280 fatal_panic:;
3281 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
3282 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3283 	    ioflags);
3284 	info[159] = 0;
3285 	(void) fct_port_shutdown(qlt->qlt_port,
3286 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3287 	return (FCT_FAILURE);
3288 }
3289 
3290 /* ARGSUSED */
3291 fct_status_t
3292 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3293 {
3294 	qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3295 	qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3296 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3297 	uint8_t *req;
3298 	uint16_t flags;
3299 
3300 	if (dbuf->db_handle == 0)
3301 		qcmd->dbuf = dbuf;
3302 	flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3303 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3304 		flags = (uint16_t)(flags | 2);
3305 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3306 	} else {
3307 		flags = (uint16_t)(flags | 1);
3308 	}
3309 
3310 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3311 		flags = (uint16_t)(flags | BIT_15);
3312 
3313 	mutex_enter(&qlt->req_lock);
3314 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3315 	if (req == NULL) {
3316 		mutex_exit(&qlt->req_lock);
3317 		return (FCT_BUSY);
3318 	}
3319 	bzero(req, IOCB_SIZE);
3320 	req[0] = 0x12; req[1] = 0x1;
3321 	req[2] = dbuf->db_handle;
3322 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3323 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3324 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
3325 	req[12] = 1;
3326 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3327 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3328 	QMEM_WR16(qlt, req+0x1A, flags);
3329 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3330 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3331 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
3332 	QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3333 	QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
3334 	qlt_submit_req_entries(qlt, 1);
3335 	mutex_exit(&qlt->req_lock);
3336 
3337 	return (STMF_SUCCESS);
3338 }
3339 
3340 /*
3341  * We must construct proper FCP_RSP_IU now. Here we only focus on
3342  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3343  * we could have catched them before we enter here.
3344  */
3345 fct_status_t
3346 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3347 {
3348 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
3349 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
3350 	qlt_dmem_bctl_t *bctl;
3351 	uint32_t size;
3352 	uint8_t *req, *fcp_rsp_iu;
3353 	uint8_t *psd, sensbuf[24];		/* sense data */
3354 	uint16_t flags;
3355 	uint16_t scsi_status;
3356 	int use_mode2;
3357 	int ndx;
3358 
3359 	/*
3360 	 * Enter fast channel for non check condition
3361 	 */
3362 	if (task->task_scsi_status != STATUS_CHECK) {
3363 		/*
3364 		 * We will use mode1
3365 		 */
3366 		flags = (uint16_t)(BIT_6 | BIT_15 |
3367 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3368 		scsi_status = (uint16_t)task->task_scsi_status;
3369 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3370 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3371 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3372 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3373 		}
3374 		qcmd->dbuf_rsp_iu = NULL;
3375 
3376 		/*
3377 		 * Fillout CTIO type 7 IOCB
3378 		 */
3379 		mutex_enter(&qlt->req_lock);
3380 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3381 		if (req == NULL) {
3382 			mutex_exit(&qlt->req_lock);
3383 			return (FCT_BUSY);
3384 		}
3385 
3386 		/*
3387 		 * Common fields
3388 		 */
3389 		bzero(req, IOCB_SIZE);
3390 		req[0x00] = 0x12;
3391 		req[0x01] = 0x1;
3392 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
3393 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3394 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3395 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3396 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3397 
3398 		/*
3399 		 * Mode-specific fields
3400 		 */
3401 		QMEM_WR16(qlt, req + 0x1A, flags);
3402 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3403 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3404 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3405 
3406 		/*
3407 		 * Trigger FW to send SCSI status out
3408 		 */
3409 		qlt_submit_req_entries(qlt, 1);
3410 		mutex_exit(&qlt->req_lock);
3411 		return (STMF_SUCCESS);
3412 	}
3413 
3414 	ASSERT(task->task_scsi_status == STATUS_CHECK);
3415 	/*
3416 	 * Decide the SCSI status mode, that should be used
3417 	 */
3418 	use_mode2 = (task->task_sense_length > 24);
3419 
3420 	/*
3421 	 * Prepare required information per the SCSI status mode
3422 	 */
3423 	flags = (uint16_t)(BIT_15 |
3424 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3425 	if (use_mode2) {
3426 		flags = (uint16_t)(flags | BIT_7);
3427 
3428 		size = task->task_sense_length;
3429 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3430 		    task->task_sense_length, &size, 0);
3431 		if (!qcmd->dbuf_rsp_iu) {
3432 			return (FCT_ALLOC_FAILURE);
3433 		}
3434 
3435 		/*
3436 		 * Start to construct FCP_RSP IU
3437 		 */
3438 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3439 		bzero(fcp_rsp_iu, 24);
3440 
3441 		/*
3442 		 * FCP_RSP IU flags, byte10
3443 		 */
3444 		fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
3445 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3446 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
3447 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3448 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
3449 		}
3450 
3451 		/*
3452 		 * SCSI status code, byte11
3453 		 */
3454 		fcp_rsp_iu[11] = task->task_scsi_status;
3455 
3456 		/*
3457 		 * FCP_RESID (Overrun or underrun)
3458 		 */
3459 		fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
3460 		fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
3461 		fcp_rsp_iu[14] = (uint8_t)((task->task_resid >>  8) & 0xFF);
3462 		fcp_rsp_iu[15] = (uint8_t)((task->task_resid >>  0) & 0xFF);
3463 
3464 		/*
3465 		 * FCP_SNS_LEN
3466 		 */
3467 		fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
3468 		    0xFF);
3469 		fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
3470 		    0xFF);
3471 
3472 		/*
3473 		 * FCP_RSP_LEN
3474 		 */
3475 		/*
3476 		 * no FCP_RSP_INFO
3477 		 */
3478 		/*
3479 		 * FCP_SNS_INFO
3480 		 */
3481 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3482 		    task->task_sense_length);
3483 
3484 		/*
3485 		 * Ensure dma data consistency
3486 		 */
3487 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3488 	} else {
3489 		flags = (uint16_t)(flags | BIT_6);
3490 
3491 		scsi_status = (uint16_t)task->task_scsi_status;
3492 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3493 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3494 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3495 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3496 		}
3497 		if (task->task_sense_length) {
3498 			scsi_status = (uint16_t)(scsi_status | BIT_9);
3499 		}
3500 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3501 		qcmd->dbuf_rsp_iu = NULL;
3502 	}
3503 
3504 	/*
3505 	 * Fillout CTIO type 7 IOCB
3506 	 */
3507 	mutex_enter(&qlt->req_lock);
3508 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3509 	if (req == NULL) {
3510 		mutex_exit(&qlt->req_lock);
3511 		if (use_mode2) {
3512 			qlt_dmem_free(cmd->cmd_port->port_fds,
3513 			    qcmd->dbuf_rsp_iu);
3514 			qcmd->dbuf_rsp_iu = NULL;
3515 		}
3516 		return (FCT_BUSY);
3517 	}
3518 
3519 	/*
3520 	 * Common fields
3521 	 */
3522 	bzero(req, IOCB_SIZE);
3523 	req[0x00] = 0x12;
3524 	req[0x01] = 0x1;
3525 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3526 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3527 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3528 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3529 	if (use_mode2) {
3530 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3531 	}
3532 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3533 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3534 
3535 	/*
3536 	 * Mode-specific fields
3537 	 */
3538 	if (!use_mode2) {
3539 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3540 	}
3541 	QMEM_WR16(qlt, req + 0x1A, flags);
3542 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3543 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3544 	if (use_mode2) {
3545 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3546 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3547 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3548 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3549 	} else {
3550 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3551 		psd = req+0x28;
3552 
3553 		/*
3554 		 * Data in sense buf is always big-endian, data in IOCB
3555 		 * should always be little-endian, so we must do swapping.
3556 		 */
3557 		size = ((task->task_sense_length + 3) & (~3));
3558 		for (ndx = 0; ndx < size; ndx += 4) {
3559 			psd[ndx + 0] = sensbuf[ndx + 3];
3560 			psd[ndx + 1] = sensbuf[ndx + 2];
3561 			psd[ndx + 2] = sensbuf[ndx + 1];
3562 			psd[ndx + 3] = sensbuf[ndx + 0];
3563 		}
3564 	}
3565 
3566 	/*
3567 	 * Trigger FW to send SCSI status out
3568 	 */
3569 	qlt_submit_req_entries(qlt, 1);
3570 	mutex_exit(&qlt->req_lock);
3571 
3572 	return (STMF_SUCCESS);
3573 }
3574 
3575 fct_status_t
3576 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3577 {
3578 	qlt_cmd_t	*qcmd;
3579 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3580 	uint8_t *req, *addr;
3581 	qlt_dmem_bctl_t *bctl;
3582 	uint32_t minsize;
3583 	uint8_t elsop, req1f;
3584 
3585 	addr = els->els_resp_payload;
3586 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3587 
3588 	minsize = els->els_resp_size;
3589 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3590 	if (qcmd->dbuf == NULL)
3591 		return (FCT_BUSY);
3592 
3593 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3594 
3595 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3596 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3597 
3598 	if (addr[0] == 0x02) {	/* ACC */
3599 		req1f = BIT_5;
3600 	} else {
3601 		req1f = BIT_6;
3602 	}
3603 	elsop = els->els_req_payload[0];
3604 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3605 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3606 		req1f = (uint8_t)(req1f | BIT_4);
3607 	}
3608 
3609 	mutex_enter(&qlt->req_lock);
3610 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3611 	if (req == NULL) {
3612 		mutex_exit(&qlt->req_lock);
3613 		qlt_dmem_free(NULL, qcmd->dbuf);
3614 		qcmd->dbuf = NULL;
3615 		return (FCT_BUSY);
3616 	}
3617 	bzero(req, IOCB_SIZE);
3618 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3619 	req[0x16] = elsop; req[0x1f] = req1f;
3620 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3621 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3622 	QMEM_WR16(qlt, (&req[0xC]), 1);
3623 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3624 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3625 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3626 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3627 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3628 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3629 	}
3630 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3631 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3632 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3633 	qlt_submit_req_entries(qlt, 1);
3634 	mutex_exit(&qlt->req_lock);
3635 
3636 	return (FCT_SUCCESS);
3637 }
3638 
3639 fct_status_t
3640 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3641 {
3642 	qlt_abts_cmd_t *qcmd;
3643 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3644 	uint8_t *req;
3645 	uint32_t lportid;
3646 	uint32_t fctl;
3647 	int i;
3648 
3649 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3650 
3651 	mutex_enter(&qlt->req_lock);
3652 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3653 	if (req == NULL) {
3654 		mutex_exit(&qlt->req_lock);
3655 		return (FCT_BUSY);
3656 	}
3657 	bcopy(qcmd->buf, req, IOCB_SIZE);
3658 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3659 	fctl = QMEM_RD32(qlt, req+0x1C);
3660 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3661 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3662 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3663 	if (cmd->cmd_rp)
3664 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3665 	else
3666 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3667 	if (terminate) {
3668 		QMEM_WR16(qlt, (&req[0xC]), 1);
3669 	}
3670 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3671 	req[0x17] = abts->abts_resp_rctl;
3672 	QMEM_WR32(qlt, req+0x18, lportid);
3673 	QMEM_WR32(qlt, req+0x1C, fctl);
3674 	req[0x23]++;
3675 	for (i = 0; i < 12; i += 4) {
3676 		/* Take care of firmware's LE requirement */
3677 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3678 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3679 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3680 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3681 	}
3682 	qlt_submit_req_entries(qlt, 1);
3683 	mutex_exit(&qlt->req_lock);
3684 
3685 	return (FCT_SUCCESS);
3686 }
3687 
3688 static void
3689 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3690 {
3691 	int i;
3692 	uint32_t d;
3693 	caddr_t req;
3694 	/* Just put it on the request queue */
3695 	mutex_enter(&qlt->req_lock);
3696 	req = qlt_get_req_entries(qlt, 1);
3697 	if (req == NULL) {
3698 		mutex_exit(&qlt->req_lock);
3699 		/* XXX handle this */
3700 		return;
3701 	}
3702 	for (i = 0; i < 16; i++) {
3703 		d = QMEM_RD32(qlt, inot);
3704 		inot += 4;
3705 		QMEM_WR32(qlt, req, d);
3706 		req += 4;
3707 	}
3708 	req -= 64;
3709 	req[0] = 0x0e;
3710 	qlt_submit_req_entries(qlt, 1);
3711 	mutex_exit(&qlt->req_lock);
3712 }
3713 
3714 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3715 static void
3716 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3717 {
3718 	fct_cmd_t	*cmd;
3719 	scsi_task_t	*task;
3720 	qlt_cmd_t	*qcmd;
3721 	uint32_t	rportid, fw_xchg_addr;
3722 	uint8_t		*p, *q, *req, tm;
3723 	uint16_t	cdb_size, flags, oxid;
3724 	char		info[160];
3725 
3726 	/*
3727 	 * If either bidirection xfer is requested of there is extended
3728 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3729 	 */
3730 	cdb_size = 16;
3731 	if (atio[0x20 + 11] >= 3) {
3732 		uint8_t b = atio[0x20 + 11];
3733 		uint16_t b1;
3734 		if ((b & 3) == 3) {
3735 			EL(qlt, "bidirectional I/O not supported\n");
3736 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3737 			    "received, dropping the cmd as bidirectional "
3738 			    " transfers are not yet supported", qlt->instance);
3739 			/* XXX abort the I/O */
3740 			return;
3741 		}
3742 		cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3743 		/*
3744 		 * Verify that we have enough entries. Without additional CDB
3745 		 * Everything will fit nicely within the same 64 bytes. So the
3746 		 * additional cdb size is essentially the # of additional bytes
3747 		 * we need.
3748 		 */
3749 		b1 = (uint16_t)b;
3750 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3751 			EL(qlt, "extended cdb received\n");
3752 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3753 			    " cdb (cdb size = %d bytes), however the firmware "
3754 			    " did not DMAed the entire FCP_CMD IU, entry count "
3755 			    " is %d while it should be %d", qlt->instance,
3756 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3757 			/* XXX abort the I/O */
3758 			return;
3759 		}
3760 	}
3761 
3762 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3763 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3764 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3765 	oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3766 
3767 	if (fw_xchg_addr == 0xFFFFFFFF) {
3768 		EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3769 		cmd = NULL;
3770 	} else {
3771 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3772 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3773 		if (cmd == NULL) {
3774 			EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3775 		}
3776 	}
3777 	if (cmd == NULL) {
3778 		EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3779 		/* Abort this IO */
3780 		flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3781 
3782 		mutex_enter(&qlt->req_lock);
3783 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3784 		if (req == NULL) {
3785 			mutex_exit(&qlt->req_lock);
3786 
3787 			(void) snprintf(info, 160,
3788 			    "qlt_handle_atio: qlt-%p, can't "
3789 			    "allocate space for scsi_task", (void *)qlt);
3790 			info[159] = 0;
3791 			(void) fct_port_shutdown(qlt->qlt_port,
3792 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3793 			return;
3794 		}
3795 		bzero(req, IOCB_SIZE);
3796 		req[0] = 0x12; req[1] = 0x1;
3797 		QMEM_WR32(qlt, req+4, 0);
3798 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3799 		    rportid));
3800 		QMEM_WR16(qlt, req+10, 60);
3801 		QMEM_WR32(qlt, req+0x10, rportid);
3802 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3803 		QMEM_WR16(qlt, req+0x1A, flags);
3804 		QMEM_WR16(qlt, req+0x20, oxid);
3805 		qlt_submit_req_entries(qlt, 1);
3806 		mutex_exit(&qlt->req_lock);
3807 
3808 		return;
3809 	}
3810 
3811 	task = (scsi_task_t *)cmd->cmd_specific;
3812 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3813 	qcmd->fw_xchg_addr = fw_xchg_addr;
3814 	qcmd->param.atio_byte3 = atio[3];
3815 	cmd->cmd_oxid = oxid;
3816 	cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3817 	    atio[8+19]);
3818 	cmd->cmd_rportid = rportid;
3819 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3820 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3821 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3822 	/* Dont do a 64 byte read as this is IOMMU */
3823 	q = atio+0x28;
3824 	/* XXX Handle fcp_cntl */
3825 	task->task_cmd_seq_no = (uint32_t)(*q++);
3826 	task->task_csn_size = 8;
3827 	task->task_flags = qlt_task_flags[(*q++) & 7];
3828 	tm = *q++;
3829 	if (tm) {
3830 		if (tm & BIT_1)
3831 			task->task_mgmt_function = TM_ABORT_TASK_SET;
3832 		else if (tm & BIT_2)
3833 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
3834 		else if (tm & BIT_4)
3835 			task->task_mgmt_function = TM_LUN_RESET;
3836 		else if (tm & BIT_5)
3837 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
3838 		else if (tm & BIT_6)
3839 			task->task_mgmt_function = TM_CLEAR_ACA;
3840 		else
3841 			task->task_mgmt_function = TM_ABORT_TASK;
3842 	}
3843 	task->task_max_nbufs = STMF_BUFS_MAX;
3844 	task->task_csn_size = 8;
3845 	task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
3846 	p = task->task_cdb;
3847 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3848 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3849 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3850 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3851 	if (cdb_size > 16) {
3852 		uint16_t xtra = (uint16_t)(cdb_size - 16);
3853 		uint16_t i;
3854 		uint8_t cb[4];
3855 
3856 		while (xtra) {
3857 			*p++ = *q++;
3858 			xtra--;
3859 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3860 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3861 				q = (uint8_t *)qlt->queue_mem_ptr +
3862 				    ATIO_QUEUE_OFFSET;
3863 			}
3864 		}
3865 		for (i = 0; i < 4; i++) {
3866 			cb[i] = *q++;
3867 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3868 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3869 				q = (uint8_t *)qlt->queue_mem_ptr +
3870 				    ATIO_QUEUE_OFFSET;
3871 			}
3872 		}
3873 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
3874 		    (((uint32_t)cb[1]) << 16) |
3875 		    (((uint32_t)cb[2]) << 8) | cb[3];
3876 	} else {
3877 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
3878 		    (((uint32_t)q[1]) << 16) |
3879 		    (((uint32_t)q[2]) << 8) | q[3];
3880 	}
3881 	fct_post_rcvd_cmd(cmd, 0);
3882 }
3883 
3884 static void
3885 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
3886 {
3887 	uint16_t status;
3888 	uint32_t portid;
3889 	uint32_t subcode1, subcode2;
3890 
3891 	status = QMEM_RD16(qlt, rsp+8);
3892 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
3893 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
3894 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
3895 
3896 	mutex_enter(&qlt->preq_lock);
3897 	if (portid != qlt->rp_id_in_dereg) {
3898 		int instance = ddi_get_instance(qlt->dip);
3899 
3900 		EL(qlt, "implicit logout reveived portid = %xh\n", portid);
3901 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
3902 		    " received when driver wasn't waiting for it",
3903 		    instance, portid);
3904 		mutex_exit(&qlt->preq_lock);
3905 		return;
3906 	}
3907 
3908 	if (status != 0) {
3909 		EL(qlt, "implicit logout completed for %xh with status %xh, "
3910 		    "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
3911 		    subcode2);
3912 		if (status == 0x31 && subcode1 == 0x0a) {
3913 			qlt->rp_dereg_status = FCT_SUCCESS;
3914 		} else {
3915 			EL(qlt, "implicit logout portid=%xh, status=%xh, "
3916 			    "subcode1=%xh, subcode2=%xh\n", portid, status,
3917 			    subcode1, subcode2);
3918 			qlt->rp_dereg_status =
3919 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
3920 		}
3921 	} else {
3922 		qlt->rp_dereg_status = FCT_SUCCESS;
3923 	}
3924 	cv_signal(&qlt->rp_dereg_cv);
3925 	mutex_exit(&qlt->preq_lock);
3926 }
3927 
3928 /*
3929  * Note that when an ELS is aborted, the regular or aborted completion
3930  * (if any) gets posted before the abort IOCB comes back on response queue.
3931  */
3932 static void
3933 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3934 {
3935 	char		info[160];
3936 	fct_cmd_t	*cmd;
3937 	qlt_cmd_t	*qcmd;
3938 	uint32_t	hndl;
3939 	uint32_t	subcode1, subcode2;
3940 	uint16_t	status;
3941 
3942 	hndl = QMEM_RD32(qlt, rsp+4);
3943 	status = QMEM_RD16(qlt, rsp+8);
3944 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3945 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3946 
3947 	if (!CMD_HANDLE_VALID(hndl)) {
3948 		EL(qlt, "handle = %xh\n", hndl);
3949 		/*
3950 		 * This cannot happen for unsol els completion. This can
3951 		 * only happen when abort for an unsol els completes.
3952 		 * This condition indicates a firmware bug.
3953 		 */
3954 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
3955 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3956 		    hndl, status, subcode1, subcode2, (void *)rsp);
3957 		info[159] = 0;
3958 		(void) fct_port_shutdown(qlt->qlt_port,
3959 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3960 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3961 		return;
3962 	}
3963 
3964 	if (status == 5) {
3965 		/*
3966 		 * When an unsolicited els is aborted, the abort is done
3967 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
3968 		 * and not the abortee. We will do the cleanup when the
3969 		 * IOCB which caused the abort, returns.
3970 		 */
3971 		EL(qlt, "status = %xh\n", status);
3972 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
3973 		return;
3974 	}
3975 
3976 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3977 	if (cmd == NULL) {
3978 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
3979 		/*
3980 		 * Now why would this happen ???
3981 		 */
3982 		(void) snprintf(info, 160,
3983 		    "qlt_handle_unsol_els_completion: can not "
3984 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3985 		    (void *)rsp);
3986 		info[159] = 0;
3987 		(void) fct_port_shutdown(qlt->qlt_port,
3988 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3989 
3990 		return;
3991 	}
3992 
3993 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3994 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3995 	if (qcmd->flags & QLT_CMD_ABORTING) {
3996 		/*
3997 		 * This is the same case as "if (status == 5)" above. The
3998 		 * only difference is that in this case the firmware actually
3999 		 * finished sending the response. So the abort attempt will
4000 		 * come back with status ?. We will handle it there.
4001 		 */
4002 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
4003 		    "abort it");
4004 		return;
4005 	}
4006 
4007 	if (qcmd->dbuf != NULL) {
4008 		qlt_dmem_free(NULL, qcmd->dbuf);
4009 		qcmd->dbuf = NULL;
4010 	}
4011 
4012 	if (status == 0) {
4013 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4014 	} else {
4015 		fct_send_response_done(cmd,
4016 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4017 	}
4018 }
4019 
4020 static void
4021 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4022 {
4023 	char		info[160];
4024 	fct_cmd_t	*cmd;
4025 	qlt_cmd_t	*qcmd;
4026 	uint32_t	hndl;
4027 	uint32_t	subcode1, subcode2;
4028 	uint16_t	status;
4029 
4030 	hndl = QMEM_RD32(qlt, rsp+4);
4031 	status = QMEM_RD16(qlt, rsp+8);
4032 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4033 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4034 
4035 	if (!CMD_HANDLE_VALID(hndl)) {
4036 		EL(qlt, "handle = %xh\n", hndl);
4037 		ASSERT(hndl == 0);
4038 		/*
4039 		 * Someone has requested to abort it, but no one is waiting for
4040 		 * this completion.
4041 		 */
4042 		if ((status != 0) && (status != 8)) {
4043 			EL(qlt, "status = %xh\n", status);
4044 			/*
4045 			 * There could be exchange resource leakage, so
4046 			 * throw HBA fatal error event now
4047 			 */
4048 			(void) snprintf(info, 160,
4049 			    "qlt_handle_unsol_els_abort_completion: "
4050 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4051 			    hndl, status, subcode1, subcode2, (void *)rsp);
4052 			info[159] = 0;
4053 			(void) fct_port_shutdown(qlt->qlt_port,
4054 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4055 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4056 			return;
4057 		}
4058 
4059 		return;
4060 	}
4061 
4062 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4063 	if (cmd == NULL) {
4064 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4065 		/*
4066 		 * Why would this happen ??
4067 		 */
4068 		(void) snprintf(info, 160,
4069 		    "qlt_handle_unsol_els_abort_completion: can not get "
4070 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4071 		    (void *)rsp);
4072 		info[159] = 0;
4073 		(void) fct_port_shutdown(qlt->qlt_port,
4074 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4075 
4076 		return;
4077 	}
4078 
4079 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4080 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4081 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4082 
4083 	if (qcmd->dbuf != NULL) {
4084 		qlt_dmem_free(NULL, qcmd->dbuf);
4085 		qcmd->dbuf = NULL;
4086 	}
4087 
4088 	if (status == 0) {
4089 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4090 	} else if (status == 8) {
4091 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4092 	} else {
4093 		fct_cmd_fca_aborted(cmd,
4094 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4095 	}
4096 }
4097 
4098 static void
4099 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4100 {
4101 	char		info[160];
4102 	fct_cmd_t	*cmd;
4103 	fct_els_t	*els;
4104 	qlt_cmd_t	*qcmd;
4105 	uint32_t	hndl;
4106 	uint32_t	subcode1, subcode2;
4107 	uint16_t	status;
4108 
4109 	hndl = QMEM_RD32(qlt, rsp+4);
4110 	status = QMEM_RD16(qlt, rsp+8);
4111 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4112 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4113 
4114 	if (!CMD_HANDLE_VALID(hndl)) {
4115 		EL(qlt, "handle = %xh\n", hndl);
4116 		/*
4117 		 * This cannot happen for sol els completion.
4118 		 */
4119 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
4120 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4121 		    hndl, status, subcode1, subcode2, (void *)rsp);
4122 		info[159] = 0;
4123 		(void) fct_port_shutdown(qlt->qlt_port,
4124 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4125 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4126 		return;
4127 	}
4128 
4129 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4130 	if (cmd == NULL) {
4131 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4132 		(void) snprintf(info, 160,
4133 		    "qlt_handle_sol_els_completion: can not "
4134 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4135 		    (void *)rsp);
4136 		info[159] = 0;
4137 		(void) fct_port_shutdown(qlt->qlt_port,
4138 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4139 
4140 		return;
4141 	}
4142 
4143 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4144 	els = (fct_els_t *)cmd->cmd_specific;
4145 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4146 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4147 
4148 	if (qcmd->flags & QLT_CMD_ABORTING) {
4149 		/*
4150 		 * We will handle it when the ABORT IO IOCB returns.
4151 		 */
4152 		return;
4153 	}
4154 
4155 	if (qcmd->dbuf != NULL) {
4156 		if (status == 0) {
4157 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4158 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4159 			    qcmd->param.resp_offset,
4160 			    els->els_resp_payload, els->els_resp_size);
4161 		}
4162 		qlt_dmem_free(NULL, qcmd->dbuf);
4163 		qcmd->dbuf = NULL;
4164 	}
4165 
4166 	if (status == 0) {
4167 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4168 	} else {
4169 		fct_send_cmd_done(cmd,
4170 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4171 	}
4172 }
4173 
4174 static void
4175 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4176 {
4177 	fct_cmd_t	*cmd;
4178 	fct_sol_ct_t	*ct;
4179 	qlt_cmd_t	*qcmd;
4180 	uint32_t	 hndl;
4181 	uint16_t	 status;
4182 	char		 info[160];
4183 
4184 	hndl = QMEM_RD32(qlt, rsp+4);
4185 	status = QMEM_RD16(qlt, rsp+8);
4186 
4187 	if (!CMD_HANDLE_VALID(hndl)) {
4188 		EL(qlt, "handle = %xh\n", hndl);
4189 		/*
4190 		 * Solicited commands will always have a valid handle.
4191 		 */
4192 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
4193 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4194 		info[159] = 0;
4195 		(void) fct_port_shutdown(qlt->qlt_port,
4196 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4197 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4198 		return;
4199 	}
4200 
4201 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4202 	if (cmd == NULL) {
4203 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4204 		(void) snprintf(info, 160,
4205 		    "qlt_handle_ct_completion: cannot find "
4206 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4207 		    (void *)rsp);
4208 		info[159] = 0;
4209 		(void) fct_port_shutdown(qlt->qlt_port,
4210 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4211 
4212 		return;
4213 	}
4214 
4215 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4216 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4217 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4218 
4219 	if (qcmd->flags & QLT_CMD_ABORTING) {
4220 		/*
4221 		 * We will handle it when ABORT IO IOCB returns;
4222 		 */
4223 		return;
4224 	}
4225 
4226 	ASSERT(qcmd->dbuf);
4227 	if (status == 0) {
4228 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4229 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4230 		    qcmd->param.resp_offset,
4231 		    ct->ct_resp_payload, ct->ct_resp_size);
4232 	}
4233 	qlt_dmem_free(NULL, qcmd->dbuf);
4234 	qcmd->dbuf = NULL;
4235 
4236 	if (status == 0) {
4237 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4238 	} else {
4239 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4240 	}
4241 }
4242 
4243 static void
4244 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
4245 {
4246 	fct_cmd_t	*cmd;
4247 	scsi_task_t	*task;
4248 	qlt_cmd_t	*qcmd;
4249 	stmf_data_buf_t	*dbuf;
4250 	fct_status_t	fc_st;
4251 	uint32_t	iof = 0;
4252 	uint32_t	hndl;
4253 	uint16_t	status;
4254 	uint16_t	flags;
4255 	uint8_t		abort_req;
4256 	uint8_t		n;
4257 	char		info[160];
4258 
4259 	/* XXX: Check validity of the IOCB by checking 4th byte. */
4260 	hndl = QMEM_RD32(qlt, rsp+4);
4261 	status = QMEM_RD16(qlt, rsp+8);
4262 	flags = QMEM_RD16(qlt, rsp+0x1a);
4263 	n = rsp[2];
4264 
4265 	if (!CMD_HANDLE_VALID(hndl)) {
4266 		EL(qlt, "handle = %xh\n", hndl);
4267 		ASSERT(hndl == 0);
4268 		/*
4269 		 * Someone has requested to abort it, but no one is waiting for
4270 		 * this completion.
4271 		 */
4272 		EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4273 		    (void *)rsp);
4274 		if ((status != 1) && (status != 2)) {
4275 			EL(qlt, "status = %xh\n", status);
4276 			/*
4277 			 * There could be exchange resource leakage, so
4278 			 * throw HBA fatal error event now
4279 			 */
4280 			(void) snprintf(info, 160,
4281 			    "qlt_handle_ctio_completion: hndl-"
4282 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4283 			info[159] = 0;
4284 			(void) fct_port_shutdown(qlt->qlt_port,
4285 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4286 
4287 		}
4288 
4289 		return;
4290 	}
4291 
4292 	if (flags & BIT_14) {
4293 		abort_req = 1;
4294 		EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4295 		    (void *)rsp);
4296 	} else {
4297 		abort_req = 0;
4298 	}
4299 
4300 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4301 	if (cmd == NULL) {
4302 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4303 		(void) snprintf(info, 160,
4304 		    "qlt_handle_ctio_completion: cannot find "
4305 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4306 		    (void *)rsp);
4307 		info[159] = 0;
4308 		(void) fct_port_shutdown(qlt->qlt_port,
4309 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4310 
4311 		return;
4312 	}
4313 
4314 	task = (scsi_task_t *)cmd->cmd_specific;
4315 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4316 	if (qcmd->dbuf_rsp_iu) {
4317 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4318 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4319 		qcmd->dbuf_rsp_iu = NULL;
4320 	}
4321 
4322 	if ((status == 1) || (status == 2)) {
4323 		if (abort_req) {
4324 			fc_st = FCT_ABORT_SUCCESS;
4325 			iof = FCT_IOF_FCA_DONE;
4326 		} else {
4327 			fc_st = FCT_SUCCESS;
4328 			if (flags & BIT_15) {
4329 				iof = FCT_IOF_FCA_DONE;
4330 			}
4331 		}
4332 	} else {
4333 		EL(qlt, "status = %xh\n", status);
4334 		if ((status == 8) && abort_req) {
4335 			fc_st = FCT_NOT_FOUND;
4336 			iof = FCT_IOF_FCA_DONE;
4337 		} else {
4338 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
4339 		}
4340 	}
4341 	dbuf = NULL;
4342 	if (((n & BIT_7) == 0) && (!abort_req)) {
4343 		/* A completion of data xfer */
4344 		if (n == 0) {
4345 			dbuf = qcmd->dbuf;
4346 		} else {
4347 			dbuf = stmf_handle_to_buf(task, n);
4348 		}
4349 
4350 		ASSERT(dbuf != NULL);
4351 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4352 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4353 		if (flags & BIT_15) {
4354 			dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4355 			    DB_STATUS_GOOD_SENT);
4356 		}
4357 
4358 		dbuf->db_xfer_status = fc_st;
4359 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
4360 		return;
4361 	}
4362 	if (!abort_req) {
4363 		/*
4364 		 * This was just a pure status xfer.
4365 		 */
4366 		fct_send_response_done(cmd, fc_st, iof);
4367 		return;
4368 	}
4369 
4370 	fct_cmd_fca_aborted(cmd, fc_st, iof);
4371 }
4372 
4373 static void
4374 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4375 {
4376 	char		info[80];
4377 	fct_cmd_t	*cmd;
4378 	qlt_cmd_t	*qcmd;
4379 	uint32_t	h;
4380 	uint16_t	status;
4381 
4382 	h = QMEM_RD32(qlt, rsp+4);
4383 	status = QMEM_RD16(qlt, rsp+8);
4384 
4385 	if (!CMD_HANDLE_VALID(h)) {
4386 		EL(qlt, "handle = %xh\n", h);
4387 		/*
4388 		 * Solicited commands always have a valid handle.
4389 		 */
4390 		(void) snprintf(info, 80,
4391 		    "qlt_handle_sol_abort_completion: hndl-"
4392 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4393 		info[79] = 0;
4394 		(void) fct_port_shutdown(qlt->qlt_port,
4395 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4396 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4397 		return;
4398 	}
4399 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4400 	if (cmd == NULL) {
4401 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4402 		/*
4403 		 * What happened to the cmd ??
4404 		 */
4405 		(void) snprintf(info, 80,
4406 		    "qlt_handle_sol_abort_completion: cannot "
4407 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4408 		    (void *)rsp);
4409 		info[79] = 0;
4410 		(void) fct_port_shutdown(qlt->qlt_port,
4411 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4412 
4413 		return;
4414 	}
4415 
4416 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4417 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
4418 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4419 	if (qcmd->dbuf != NULL) {
4420 		qlt_dmem_free(NULL, qcmd->dbuf);
4421 		qcmd->dbuf = NULL;
4422 	}
4423 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4424 	if (status == 0) {
4425 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4426 	} else if (status == 0x31) {
4427 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4428 	} else {
4429 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4430 	}
4431 }
4432 
4433 static void
4434 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4435 {
4436 	qlt_abts_cmd_t	*qcmd;
4437 	fct_cmd_t	*cmd;
4438 	uint32_t	remote_portid;
4439 	char		info[160];
4440 
4441 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4442 	    ((uint32_t)(resp[0x1A])) << 16;
4443 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4444 	    sizeof (qlt_abts_cmd_t), 0);
4445 	if (cmd == NULL) {
4446 		EL(qlt, "fct_alloc cmd==NULL\n");
4447 		(void) snprintf(info, 160,
4448 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
4449 		    "allocate space for fct_cmd", (void *)qlt);
4450 		info[159] = 0;
4451 		(void) fct_port_shutdown(qlt->qlt_port,
4452 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4453 		return;
4454 	}
4455 
4456 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
4457 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4458 	bcopy(resp, qcmd->buf, IOCB_SIZE);
4459 	cmd->cmd_port = qlt->qlt_port;
4460 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4461 	if (cmd->cmd_rp_handle == 0xFFFF)
4462 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4463 
4464 	cmd->cmd_rportid = remote_portid;
4465 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4466 	    ((uint32_t)(resp[0x16])) << 16;
4467 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4468 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4469 	fct_post_rcvd_cmd(cmd, 0);
4470 }
4471 
4472 static void
4473 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4474 {
4475 	uint16_t status;
4476 	char	info[80];
4477 
4478 	status = QMEM_RD16(qlt, resp+8);
4479 
4480 	if ((status == 0) || (status == 5)) {
4481 		return;
4482 	}
4483 	EL(qlt, "status = %xh\n", status);
4484 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4485 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4486 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4487 	info[79] = 0;
4488 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4489 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4490 }
4491 
4492 #ifdef	DEBUG
4493 uint32_t qlt_drop_abort_counter = 0;
4494 #endif
4495 
4496 fct_status_t
4497 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4498 {
4499 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4500 
4501 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4502 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4503 		return (FCT_NOT_FOUND);
4504 	}
4505 
4506 #ifdef DEBUG
4507 	if (qlt_drop_abort_counter > 0) {
4508 		if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
4509 			return (FCT_SUCCESS);
4510 	}
4511 #endif
4512 
4513 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4514 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4515 	}
4516 
4517 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4518 		cmd->cmd_handle = 0;
4519 	}
4520 
4521 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4522 		return (qlt_send_abts_response(qlt, cmd, 1));
4523 	}
4524 
4525 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4526 		return (qlt_abort_purex(qlt, cmd));
4527 	}
4528 
4529 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4530 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4531 		return (qlt_abort_sol_cmd(qlt, cmd));
4532 	}
4533 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4534 
4535 	ASSERT(0);
4536 	return (FCT_FAILURE);
4537 }
4538 
4539 fct_status_t
4540 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4541 {
4542 	uint8_t *req;
4543 	qlt_cmd_t *qcmd;
4544 
4545 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4546 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4547 	EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
4548 
4549 	mutex_enter(&qlt->req_lock);
4550 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4551 	if (req == NULL) {
4552 		mutex_exit(&qlt->req_lock);
4553 
4554 		return (FCT_BUSY);
4555 	}
4556 	bzero(req, IOCB_SIZE);
4557 	req[0] = 0x33; req[1] = 1;
4558 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4559 	if (cmd->cmd_rp) {
4560 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4561 	} else {
4562 		QMEM_WR16(qlt, req+8, 0xFFFF);
4563 	}
4564 
4565 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4566 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4567 	qlt_submit_req_entries(qlt, 1);
4568 	mutex_exit(&qlt->req_lock);
4569 
4570 	return (FCT_SUCCESS);
4571 }
4572 
4573 fct_status_t
4574 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4575 {
4576 	uint8_t *req;
4577 	qlt_cmd_t *qcmd;
4578 	fct_els_t *els;
4579 	uint8_t elsop, req1f;
4580 
4581 	els = (fct_els_t *)cmd->cmd_specific;
4582 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4583 	elsop = els->els_req_payload[0];
4584 	EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4585 	    elsop);
4586 	req1f = 0x60;	/* Terminate xchg */
4587 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4588 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4589 		req1f = (uint8_t)(req1f | BIT_4);
4590 	}
4591 
4592 	mutex_enter(&qlt->req_lock);
4593 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4594 	if (req == NULL) {
4595 		mutex_exit(&qlt->req_lock);
4596 
4597 		return (FCT_BUSY);
4598 	}
4599 
4600 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4601 	bzero(req, IOCB_SIZE);
4602 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4603 	req[0x16] = elsop; req[0x1f] = req1f;
4604 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4605 	if (cmd->cmd_rp) {
4606 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4607 		EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4608 	} else {
4609 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4610 		EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4611 	}
4612 
4613 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4614 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4615 	qlt_submit_req_entries(qlt, 1);
4616 	mutex_exit(&qlt->req_lock);
4617 
4618 	return (FCT_SUCCESS);
4619 }
4620 
4621 fct_status_t
4622 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4623 {
4624 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4625 	uint8_t *req;
4626 	uint16_t flags;
4627 
4628 	flags = (uint16_t)(BIT_14 |
4629 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4630 	EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4631 
4632 	mutex_enter(&qlt->req_lock);
4633 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4634 	if (req == NULL) {
4635 		mutex_exit(&qlt->req_lock);
4636 
4637 		return (FCT_BUSY);
4638 	}
4639 
4640 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4641 	bzero(req, IOCB_SIZE);
4642 	req[0] = 0x12; req[1] = 0x1;
4643 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4644 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4645 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4646 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4647 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4648 	QMEM_WR16(qlt, req+0x1A, flags);
4649 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4650 	qlt_submit_req_entries(qlt, 1);
4651 	mutex_exit(&qlt->req_lock);
4652 
4653 	return (FCT_SUCCESS);
4654 }
4655 
4656 fct_status_t
4657 qlt_send_cmd(fct_cmd_t *cmd)
4658 {
4659 	qlt_state_t *qlt;
4660 
4661 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4662 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4663 		return (qlt_send_els(qlt, cmd));
4664 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4665 		return (qlt_send_ct(qlt, cmd));
4666 	}
4667 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4668 
4669 	ASSERT(0);
4670 	return (FCT_FAILURE);
4671 }
4672 
4673 fct_status_t
4674 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4675 {
4676 	uint8_t *req;
4677 	fct_els_t *els;
4678 	qlt_cmd_t *qcmd;
4679 	stmf_data_buf_t *buf;
4680 	qlt_dmem_bctl_t *bctl;
4681 	uint32_t sz, minsz;
4682 
4683 	els = (fct_els_t *)cmd->cmd_specific;
4684 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4685 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4686 	qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4687 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4688 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4689 	if (buf == NULL) {
4690 		return (FCT_BUSY);
4691 	}
4692 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4693 
4694 	qcmd->dbuf = buf;
4695 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4696 	    els->els_req_size);
4697 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4698 
4699 	mutex_enter(&qlt->req_lock);
4700 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4701 	if (req == NULL) {
4702 		qlt_dmem_free(NULL, buf);
4703 		mutex_exit(&qlt->req_lock);
4704 		return (FCT_BUSY);
4705 	}
4706 	bzero(req, IOCB_SIZE);
4707 	req[0] = 0x53; req[1] = 1;
4708 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4709 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4710 	QMEM_WR16(qlt, (&req[0xC]), 1);
4711 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4712 	QMEM_WR16(qlt, (&req[0x14]), 1);
4713 	req[0x16] = els->els_req_payload[0];
4714 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4715 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4716 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
4717 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4718 	}
4719 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4720 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4721 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4722 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4723 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4724 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4725 	    qcmd->param.resp_offset));
4726 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4727 	qlt_submit_req_entries(qlt, 1);
4728 	mutex_exit(&qlt->req_lock);
4729 
4730 	return (FCT_SUCCESS);
4731 }
4732 
4733 fct_status_t
4734 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4735 {
4736 	uint8_t *req;
4737 	fct_sol_ct_t *ct;
4738 	qlt_cmd_t *qcmd;
4739 	stmf_data_buf_t *buf;
4740 	qlt_dmem_bctl_t *bctl;
4741 	uint32_t sz, minsz;
4742 
4743 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4744 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4745 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4746 	qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4747 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4748 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4749 	if (buf == NULL) {
4750 		return (FCT_BUSY);
4751 	}
4752 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4753 
4754 	qcmd->dbuf = buf;
4755 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4756 	    ct->ct_req_size);
4757 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4758 
4759 	mutex_enter(&qlt->req_lock);
4760 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4761 	if (req == NULL) {
4762 		qlt_dmem_free(NULL, buf);
4763 		mutex_exit(&qlt->req_lock);
4764 		return (FCT_BUSY);
4765 	}
4766 	bzero(req, IOCB_SIZE);
4767 	req[0] = 0x29; req[1] = 1;
4768 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4769 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4770 	QMEM_WR16(qlt, (&req[0xC]), 1);
4771 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4772 	QMEM_WR16(qlt, (&req[0x14]), 1);
4773 
4774 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4775 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4776 
4777 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4778 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4779 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4780 	    qcmd->param.resp_offset));		/* RESPONSE DSD */
4781 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4782 
4783 	qlt_submit_req_entries(qlt, 1);
4784 	mutex_exit(&qlt->req_lock);
4785 
4786 	return (FCT_SUCCESS);
4787 }
4788 
4789 
4790 /*
4791  * All QLT_FIRMWARE_* will mainly be handled in this function
4792  * It can not be called in interrupt context
4793  *
4794  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4795  * and qlt_ioctl_lock
4796  */
4797 static fct_status_t
4798 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4799 {
4800 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4801 	int		i;
4802 	int		retries, n;
4803 	uint_t		size_left;
4804 	char		c = ' ';
4805 	uint32_t	addr, endaddr, words_to_read;
4806 	caddr_t		buf;
4807 	fct_status_t	ret;
4808 
4809 	mutex_enter(&qlt->qlt_ioctl_lock);
4810 	/*
4811 	 * To make sure that there's no outstanding dumping task
4812 	 */
4813 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4814 		mutex_exit(&qlt->qlt_ioctl_lock);
4815 		EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4816 		    qlt->qlt_ioctl_flags);
4817 		EL(qlt, "outstanding\n");
4818 		return (FCT_FAILURE);
4819 	}
4820 
4821 	/*
4822 	 * To make sure not to overwrite existing dump
4823 	 */
4824 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4825 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4826 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4827 		/*
4828 		 * If we have alreay one dump, but it's not triggered by user
4829 		 * and the user hasn't fetched it, we shouldn't dump again.
4830 		 */
4831 		mutex_exit(&qlt->qlt_ioctl_lock);
4832 		EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
4833 		    qlt->qlt_ioctl_flags);
4834 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
4835 		    "is one already outstanding.", qlt->instance);
4836 		return (FCT_FAILURE);
4837 	}
4838 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
4839 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
4840 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
4841 	} else {
4842 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
4843 	}
4844 	mutex_exit(&qlt->qlt_ioctl_lock);
4845 
4846 	size_left = QLT_FWDUMP_BUFSIZE;
4847 	if (!qlt->qlt_fwdump_buf) {
4848 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
4849 		/*
4850 		 * It's the only place that we allocate buf for dumping. After
4851 		 * it's allocated, we will use it until the port is detached.
4852 		 */
4853 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
4854 	}
4855 
4856 	/*
4857 	 * Start to dump firmware
4858 	 */
4859 	buf = (caddr_t)qlt->qlt_fwdump_buf;
4860 
4861 	/*
4862 	 * Print the ISP firmware revision number and attributes information
4863 	 * Read the RISC to Host Status register
4864 	 */
4865 	n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
4866 	    "Attributes %04x\n\nR2H Status Register\n%08x",
4867 	    qlt->fw_major, qlt->fw_minor,
4868 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
4869 	buf += n; size_left -= n;
4870 
4871 	/*
4872 	 * Before pausing the RISC, make sure no mailbox can execute
4873 	 */
4874 	mutex_enter(&qlt->mbox_lock);
4875 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
4876 		/*
4877 		 * Wait to grab the mailboxes
4878 		 */
4879 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
4880 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
4881 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
4882 			    ddi_get_lbolt() + drv_usectohz(1000000));
4883 			if (retries > 5) {
4884 				mutex_exit(&qlt->mbox_lock);
4885 				EL(qlt, "can't drain out mailbox commands\n");
4886 				goto dump_fail;
4887 			}
4888 		}
4889 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
4890 		cv_broadcast(&qlt->mbox_cv);
4891 	}
4892 	mutex_exit(&qlt->mbox_lock);
4893 
4894 	/*
4895 	 * Pause the RISC processor
4896 	 */
4897 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
4898 
4899 	/*
4900 	 * Wait for the RISC processor to pause
4901 	 */
4902 	for (i = 0; i < 200; i++) {
4903 		if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
4904 			break;
4905 		}
4906 		drv_usecwait(1000);
4907 	}
4908 	if (i == 200) {
4909 		EL(qlt, "can't pause\n");
4910 		return (FCT_FAILURE);
4911 	}
4912 
4913 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
4914 		goto over_25xx_specific_dump;
4915 	}
4916 	n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
4917 	buf += n; size_left -= n;
4918 	REG_WR32(qlt, 0x54, 0x7000);
4919 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4920 	buf += n; size_left -= n;
4921 	REG_WR32(qlt, 0x54, 0x7010);
4922 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4923 	buf += n; size_left -= n;
4924 	REG_WR32(qlt, 0x54, 0x7C00);
4925 
4926 	n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
4927 	buf += n; size_left -= n;
4928 	REG_WR32(qlt, 0xC0, 0x1);
4929 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
4930 	buf += n; size_left -= n;
4931 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
4932 	buf += n; size_left -= n;
4933 	REG_WR32(qlt, 0xC0, 0x0);
4934 
4935 over_25xx_specific_dump:;
4936 	n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
4937 	buf += n; size_left -= n;
4938 	/*
4939 	 * Capture data from 32 regsiters
4940 	 */
4941 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
4942 	buf += n; size_left -= n;
4943 
4944 	/*
4945 	 * Disable interrupts
4946 	 */
4947 	REG_WR32(qlt, 0xc, 0);
4948 
4949 	/*
4950 	 * Shadow registers
4951 	 */
4952 	n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
4953 	buf += n; size_left -= n;
4954 
4955 	REG_WR32(qlt, 0x54, 0xF70);
4956 	addr = 0xb0000000;
4957 	for (i = 0; i < 0xb; i++) {
4958 		if ((!qlt->qlt_25xx_chip) &&
4959 		    (!qlt->qlt_81xx_chip) &&
4960 		    (i >= 7)) {
4961 			break;
4962 		}
4963 		if (i && ((i & 7) == 0)) {
4964 			n = (int)snprintf(buf, size_left, "\n");
4965 			buf += n; size_left -= n;
4966 		}
4967 		REG_WR32(qlt, 0xF0, addr);
4968 		n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
4969 		buf += n; size_left -= n;
4970 		addr += 0x100000;
4971 	}
4972 
4973 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
4974 		REG_WR32(qlt, 0x54, 0x10);
4975 		n = (int)snprintf(buf, size_left,
4976 		    "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
4977 		buf += n; size_left -= n;
4978 	}
4979 
4980 	/*
4981 	 * Mailbox registers
4982 	 */
4983 	n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
4984 	buf += n; size_left -= n;
4985 	for (i = 0; i < 32; i += 2) {
4986 		if ((i + 2) & 15) {
4987 			c = ' ';
4988 		} else {
4989 			c = '\n';
4990 		}
4991 		n = (int)snprintf(buf, size_left, "%04x %04x%c",
4992 		    REG_RD16(qlt, 0x80 + (i << 1)),
4993 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
4994 		buf += n; size_left -= n;
4995 	}
4996 
4997 	/*
4998 	 * Transfer sequence registers
4999 	 */
5000 	n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
5001 	buf += n; size_left -= n;
5002 
5003 	REG_WR32(qlt, 0x54, 0xBF00);
5004 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5005 	buf += n; size_left -= n;
5006 	REG_WR32(qlt, 0x54, 0xBF10);
5007 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5008 	buf += n; size_left -= n;
5009 	REG_WR32(qlt, 0x54, 0xBF20);
5010 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5011 	buf += n; size_left -= n;
5012 	REG_WR32(qlt, 0x54, 0xBF30);
5013 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5014 	buf += n; size_left -= n;
5015 	REG_WR32(qlt, 0x54, 0xBF40);
5016 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5017 	buf += n; size_left -= n;
5018 	REG_WR32(qlt, 0x54, 0xBF50);
5019 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5020 	buf += n; size_left -= n;
5021 	REG_WR32(qlt, 0x54, 0xBF60);
5022 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5023 	buf += n; size_left -= n;
5024 	REG_WR32(qlt, 0x54, 0xBF70);
5025 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5026 	buf += n; size_left -= n;
5027 	n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
5028 	buf += n; size_left -= n;
5029 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5030 		REG_WR32(qlt, 0x54, 0xBFC0);
5031 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5032 		buf += n; size_left -= n;
5033 		REG_WR32(qlt, 0x54, 0xBFD0);
5034 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5035 		buf += n; size_left -= n;
5036 	}
5037 	REG_WR32(qlt, 0x54, 0xBFE0);
5038 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5039 	buf += n; size_left -= n;
5040 	n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5041 	buf += n; size_left -= n;
5042 	REG_WR32(qlt, 0x54, 0xBFF0);
5043 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5044 	buf += n; size_left -= n;
5045 
5046 	/*
5047 	 * Receive sequence registers
5048 	 */
5049 	n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
5050 	buf += n; size_left -= n;
5051 	REG_WR32(qlt, 0x54, 0xFF00);
5052 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5053 	buf += n; size_left -= n;
5054 	REG_WR32(qlt, 0x54, 0xFF10);
5055 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5056 	buf += n; size_left -= n;
5057 	REG_WR32(qlt, 0x54, 0xFF20);
5058 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5059 	buf += n; size_left -= n;
5060 	REG_WR32(qlt, 0x54, 0xFF30);
5061 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5062 	buf += n; size_left -= n;
5063 	REG_WR32(qlt, 0x54, 0xFF40);
5064 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5065 	buf += n; size_left -= n;
5066 	REG_WR32(qlt, 0x54, 0xFF50);
5067 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5068 	buf += n; size_left -= n;
5069 	REG_WR32(qlt, 0x54, 0xFF60);
5070 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5071 	buf += n; size_left -= n;
5072 	REG_WR32(qlt, 0x54, 0xFF70);
5073 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5074 	buf += n; size_left -= n;
5075 	n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5076 	buf += n; size_left -= n;
5077 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5078 		REG_WR32(qlt, 0x54, 0xFFC0);
5079 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5080 		buf += n; size_left -= n;
5081 	}
5082 	REG_WR32(qlt, 0x54, 0xFFD0);
5083 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5084 	buf += n; size_left -= n;
5085 	n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5086 	buf += n; size_left -= n;
5087 	REG_WR32(qlt, 0x54, 0xFFE0);
5088 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5089 	buf += n; size_left -= n;
5090 	n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5091 	buf += n; size_left -= n;
5092 	REG_WR32(qlt, 0x54, 0xFFF0);
5093 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5094 	buf += n; size_left -= n;
5095 
5096 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
5097 		goto over_aseq_regs;
5098 
5099 	/*
5100 	 * Auxiliary sequencer registers
5101 	 */
5102 	n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
5103 	buf += n; size_left -= n;
5104 	REG_WR32(qlt, 0x54, 0xB000);
5105 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5106 	buf += n; size_left -= n;
5107 	REG_WR32(qlt, 0x54, 0xB010);
5108 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5109 	buf += n; size_left -= n;
5110 	REG_WR32(qlt, 0x54, 0xB020);
5111 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5112 	buf += n; size_left -= n;
5113 	REG_WR32(qlt, 0x54, 0xB030);
5114 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5115 	buf += n; size_left -= n;
5116 	REG_WR32(qlt, 0x54, 0xB040);
5117 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5118 	buf += n; size_left -= n;
5119 	REG_WR32(qlt, 0x54, 0xB050);
5120 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5121 	buf += n; size_left -= n;
5122 	REG_WR32(qlt, 0x54, 0xB060);
5123 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5124 	buf += n; size_left -= n;
5125 	REG_WR32(qlt, 0x54, 0xB070);
5126 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5127 	buf += n; size_left -= n;
5128 	n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5129 	buf += n; size_left -= n;
5130 	REG_WR32(qlt, 0x54, 0xB0C0);
5131 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5132 	buf += n; size_left -= n;
5133 	REG_WR32(qlt, 0x54, 0xB0D0);
5134 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5135 	buf += n; size_left -= n;
5136 	n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5137 	buf += n; size_left -= n;
5138 	REG_WR32(qlt, 0x54, 0xB0E0);
5139 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5140 	buf += n; size_left -= n;
5141 	n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5142 	buf += n; size_left -= n;
5143 	REG_WR32(qlt, 0x54, 0xB0F0);
5144 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5145 	buf += n; size_left -= n;
5146 
5147 over_aseq_regs:;
5148 
5149 	/*
5150 	 * Command DMA registers
5151 	 */
5152 	n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5153 	buf += n; size_left -= n;
5154 	REG_WR32(qlt, 0x54, 0x7100);
5155 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5156 	buf += n; size_left -= n;
5157 
5158 	/*
5159 	 * Queues
5160 	 */
5161 	n = (int)snprintf(buf, size_left,
5162 	    "\nRequest0 Queue DMA Channel registers\n");
5163 	buf += n; size_left -= n;
5164 	REG_WR32(qlt, 0x54, 0x7200);
5165 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5166 	buf += n; size_left -= n;
5167 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5168 	buf += n; size_left -= n;
5169 
5170 	n = (int)snprintf(buf, size_left,
5171 	    "\n\nResponse0 Queue DMA Channel registers\n");
5172 	buf += n; size_left -= n;
5173 	REG_WR32(qlt, 0x54, 0x7300);
5174 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5175 	buf += n; size_left -= n;
5176 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5177 	buf += n; size_left -= n;
5178 
5179 	n = (int)snprintf(buf, size_left,
5180 	    "\n\nRequest1 Queue DMA Channel registers\n");
5181 	buf += n; size_left -= n;
5182 	REG_WR32(qlt, 0x54, 0x7400);
5183 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5184 	buf += n; size_left -= n;
5185 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5186 	buf += n; size_left -= n;
5187 
5188 	/*
5189 	 * Transmit DMA registers
5190 	 */
5191 	n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
5192 	buf += n; size_left -= n;
5193 	REG_WR32(qlt, 0x54, 0x7600);
5194 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5195 	buf += n; size_left -= n;
5196 	REG_WR32(qlt, 0x54, 0x7610);
5197 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5198 	buf += n; size_left -= n;
5199 	n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
5200 	buf += n; size_left -= n;
5201 	REG_WR32(qlt, 0x54, 0x7620);
5202 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5203 	buf += n; size_left -= n;
5204 	REG_WR32(qlt, 0x54, 0x7630);
5205 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5206 	buf += n; size_left -= n;
5207 	n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
5208 	buf += n; size_left -= n;
5209 	REG_WR32(qlt, 0x54, 0x7640);
5210 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5211 	buf += n; size_left -= n;
5212 	REG_WR32(qlt, 0x54, 0x7650);
5213 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5214 	buf += n; size_left -= n;
5215 	n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
5216 	buf += n; size_left -= n;
5217 	REG_WR32(qlt, 0x54, 0x7660);
5218 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5219 	buf += n; size_left -= n;
5220 	REG_WR32(qlt, 0x54, 0x7670);
5221 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5222 	buf += n; size_left -= n;
5223 	n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
5224 	buf += n; size_left -= n;
5225 	REG_WR32(qlt, 0x54, 0x7680);
5226 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5227 	buf += n; size_left -= n;
5228 	REG_WR32(qlt, 0x54, 0x7690);
5229 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5230 	buf += n; size_left -= n;
5231 	n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
5232 	buf += n; size_left -= n;
5233 	REG_WR32(qlt, 0x54, 0x76A0);
5234 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5235 	buf += n; size_left -= n;
5236 
5237 	/*
5238 	 * Receive DMA registers
5239 	 */
5240 	n = (int)snprintf(buf, size_left,
5241 	    "\nRCV Thread 0 Data DMA registers\n");
5242 	buf += n; size_left -= n;
5243 	REG_WR32(qlt, 0x54, 0x7700);
5244 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5245 	buf += n; size_left -= n;
5246 	REG_WR32(qlt, 0x54, 0x7710);
5247 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5248 	buf += n; size_left -= n;
5249 	n = (int)snprintf(buf, size_left,
5250 	    "\nRCV Thread 1 Data DMA registers\n");
5251 	buf += n; size_left -= n;
5252 	REG_WR32(qlt, 0x54, 0x7720);
5253 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5254 	buf += n; size_left -= n;
5255 	REG_WR32(qlt, 0x54, 0x7730);
5256 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5257 	buf += n; size_left -= n;
5258 
5259 	/*
5260 	 * RISC registers
5261 	 */
5262 	n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
5263 	buf += n; size_left -= n;
5264 	REG_WR32(qlt, 0x54, 0x0F00);
5265 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5266 	buf += n; size_left -= n;
5267 	REG_WR32(qlt, 0x54, 0x0F10);
5268 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5269 	buf += n; size_left -= n;
5270 	REG_WR32(qlt, 0x54, 0x0F20);
5271 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5272 	buf += n; size_left -= n;
5273 	REG_WR32(qlt, 0x54, 0x0F30);
5274 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5275 	buf += n; size_left -= n;
5276 	REG_WR32(qlt, 0x54, 0x0F40);
5277 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5278 	buf += n; size_left -= n;
5279 	REG_WR32(qlt, 0x54, 0x0F50);
5280 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5281 	buf += n; size_left -= n;
5282 	REG_WR32(qlt, 0x54, 0x0F60);
5283 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5284 	buf += n; size_left -= n;
5285 	REG_WR32(qlt, 0x54, 0x0F70);
5286 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5287 	buf += n; size_left -= n;
5288 
5289 	/*
5290 	 * Local memory controller registers
5291 	 */
5292 	n = (int)snprintf(buf, size_left, "\nLMC registers\n");
5293 	buf += n; size_left -= n;
5294 	REG_WR32(qlt, 0x54, 0x3000);
5295 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5296 	buf += n; size_left -= n;
5297 	REG_WR32(qlt, 0x54, 0x3010);
5298 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5299 	buf += n; size_left -= n;
5300 	REG_WR32(qlt, 0x54, 0x3020);
5301 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5302 	buf += n; size_left -= n;
5303 	REG_WR32(qlt, 0x54, 0x3030);
5304 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5305 	buf += n; size_left -= n;
5306 	REG_WR32(qlt, 0x54, 0x3040);
5307 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5308 	buf += n; size_left -= n;
5309 	REG_WR32(qlt, 0x54, 0x3050);
5310 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5311 	buf += n; size_left -= n;
5312 	REG_WR32(qlt, 0x54, 0x3060);
5313 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5314 	buf += n; size_left -= n;
5315 
5316 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5317 		REG_WR32(qlt, 0x54, 0x3070);
5318 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5319 		buf += n; size_left -= n;
5320 	}
5321 
5322 	/*
5323 	 * Fibre protocol module regsiters
5324 	 */
5325 	n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5326 	buf += n; size_left -= n;
5327 	REG_WR32(qlt, 0x54, 0x4000);
5328 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5329 	buf += n; size_left -= n;
5330 	REG_WR32(qlt, 0x54, 0x4010);
5331 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5332 	buf += n; size_left -= n;
5333 	REG_WR32(qlt, 0x54, 0x4020);
5334 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5335 	buf += n; size_left -= n;
5336 	REG_WR32(qlt, 0x54, 0x4030);
5337 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5338 	buf += n; size_left -= n;
5339 	REG_WR32(qlt, 0x54, 0x4040);
5340 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5341 	buf += n; size_left -= n;
5342 	REG_WR32(qlt, 0x54, 0x4050);
5343 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5344 	buf += n; size_left -= n;
5345 	REG_WR32(qlt, 0x54, 0x4060);
5346 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5347 	buf += n; size_left -= n;
5348 	REG_WR32(qlt, 0x54, 0x4070);
5349 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5350 	buf += n; size_left -= n;
5351 	REG_WR32(qlt, 0x54, 0x4080);
5352 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5353 	buf += n; size_left -= n;
5354 	REG_WR32(qlt, 0x54, 0x4090);
5355 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5356 	buf += n; size_left -= n;
5357 	REG_WR32(qlt, 0x54, 0x40A0);
5358 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5359 	buf += n; size_left -= n;
5360 	REG_WR32(qlt, 0x54, 0x40B0);
5361 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5362 	buf += n; size_left -= n;
5363 	if (qlt->qlt_81xx_chip) {
5364 		REG_WR32(qlt, 0x54, 0x40C0);
5365 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5366 		buf += n; size_left -= n;
5367 		REG_WR32(qlt, 0x54, 0x40D0);
5368 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5369 		buf += n; size_left -= n;
5370 	}
5371 
5372 	/*
5373 	 * Fibre buffer registers
5374 	 */
5375 	n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5376 	buf += n; size_left -= n;
5377 	REG_WR32(qlt, 0x54, 0x6000);
5378 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5379 	buf += n; size_left -= n;
5380 	REG_WR32(qlt, 0x54, 0x6010);
5381 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5382 	buf += n; size_left -= n;
5383 	REG_WR32(qlt, 0x54, 0x6020);
5384 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5385 	buf += n; size_left -= n;
5386 	REG_WR32(qlt, 0x54, 0x6030);
5387 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5388 	buf += n; size_left -= n;
5389 	REG_WR32(qlt, 0x54, 0x6040);
5390 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5391 	buf += n; size_left -= n;
5392 	REG_WR32(qlt, 0x54, 0x6100);
5393 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5394 	buf += n; size_left -= n;
5395 	REG_WR32(qlt, 0x54, 0x6130);
5396 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5397 	buf += n; size_left -= n;
5398 	REG_WR32(qlt, 0x54, 0x6150);
5399 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5400 	buf += n; size_left -= n;
5401 	REG_WR32(qlt, 0x54, 0x6170);
5402 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5403 	buf += n; size_left -= n;
5404 	REG_WR32(qlt, 0x54, 0x6190);
5405 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5406 	buf += n; size_left -= n;
5407 	REG_WR32(qlt, 0x54, 0x61B0);
5408 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5409 	buf += n; size_left -= n;
5410 	if (qlt->qlt_81xx_chip) {
5411 		REG_WR32(qlt, 0x54, 0x61C0);
5412 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5413 		buf += n; size_left -= n;
5414 	}
5415 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5416 		REG_WR32(qlt, 0x54, 0x6F00);
5417 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5418 		buf += n; size_left -= n;
5419 	}
5420 
5421 	qlt->intr_sneak_counter = 10;
5422 	qlt_disable_intr(qlt);
5423 	mutex_enter(&qlt->intr_lock);
5424 	qlt->qlt_intr_enabled = 0;
5425 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
5426 	drv_usecwait(20);
5427 	qlt->intr_sneak_counter = 0;
5428 	mutex_exit(&qlt->intr_lock);
5429 
5430 	/*
5431 	 * Memory
5432 	 */
5433 	n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5434 	buf += n; size_left -= n;
5435 
5436 	addr = 0x20000;
5437 	endaddr = 0x22000;
5438 	words_to_read = 0;
5439 	while (addr < endaddr) {
5440 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5441 		if ((words_to_read + addr) > endaddr) {
5442 			words_to_read = endaddr - addr;
5443 		}
5444 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5445 		    QLT_SUCCESS) {
5446 			EL(qlt, "Error reading risc ram - CODE RAM status="
5447 			    "%llxh\n", ret);
5448 			goto dump_fail;
5449 		}
5450 
5451 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5452 		buf += n; size_left -= n;
5453 
5454 		if (size_left < 100000) {
5455 			EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5456 			    size_left);
5457 			goto dump_ok;
5458 		}
5459 		addr += words_to_read;
5460 	}
5461 
5462 	n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
5463 	buf += n; size_left -= n;
5464 
5465 	addr = 0x100000;
5466 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5467 	endaddr++;
5468 	if (endaddr & 7) {
5469 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
5470 	}
5471 
5472 	words_to_read = 0;
5473 	while (addr < endaddr) {
5474 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5475 		if ((words_to_read + addr) > endaddr) {
5476 			words_to_read = endaddr - addr;
5477 		}
5478 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5479 		    QLT_SUCCESS) {
5480 			EL(qlt, "Error reading risc ram - EXT RAM status="
5481 			    "%llxh\n", ret);
5482 			goto dump_fail;
5483 		}
5484 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5485 		buf += n; size_left -= n;
5486 		if (size_left < 100000) {
5487 			EL(qlt, "run out of space - EXT RAM\n");
5488 			goto dump_ok;
5489 		}
5490 		addr += words_to_read;
5491 	}
5492 
5493 	/*
5494 	 * Label the end tag
5495 	 */
5496 	n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5497 	buf += n; size_left -= n;
5498 
5499 	/*
5500 	 * Queue dumping
5501 	 */
5502 	n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5503 	buf += n; size_left -= n;
5504 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5505 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
5506 	buf += n; size_left -= n;
5507 
5508 	n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5509 	buf += n; size_left -= n;
5510 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5511 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
5512 	buf += n; size_left -= n;
5513 
5514 	n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5515 	buf += n; size_left -= n;
5516 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5517 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5518 	buf += n; size_left -= n;
5519 
5520 	n = (int)snprintf(buf, size_left, "\nATIO queue\n");
5521 	buf += n; size_left -= n;
5522 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5523 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5524 	buf += n; size_left -= n;
5525 
5526 	/*
5527 	 * Label dump reason
5528 	 */
5529 	n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5530 	    qlt->qlt_port_alias, ssci->st_additional_info);
5531 	buf += n; size_left -= n;
5532 
5533 dump_ok:
5534 	EL(qlt, "left-%d\n", size_left);
5535 
5536 	mutex_enter(&qlt->qlt_ioctl_lock);
5537 	qlt->qlt_ioctl_flags &=
5538 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5539 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5540 	mutex_exit(&qlt->qlt_ioctl_lock);
5541 	return (FCT_SUCCESS);
5542 
5543 dump_fail:
5544 	EL(qlt, "dump not done\n");
5545 	mutex_enter(&qlt->qlt_ioctl_lock);
5546 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5547 	mutex_exit(&qlt->qlt_ioctl_lock);
5548 	return (FCT_FAILURE);
5549 }
5550 
5551 static int
5552 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5553     uint_t size_left)
5554 {
5555 	int		i;
5556 	int		n;
5557 	char		c = ' ';
5558 
5559 	for (i = 0, n = 0; i < count; i++) {
5560 		if ((i + 1) & 7) {
5561 			c = ' ';
5562 		} else {
5563 			c = '\n';
5564 		}
5565 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5566 		    "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
5567 	}
5568 	return (n);
5569 }
5570 
5571 static int
5572 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5573     caddr_t buf, uint_t size_left)
5574 {
5575 	int		i;
5576 	int		n;
5577 	char		c = ' ';
5578 	uint32_t	*ptr;
5579 
5580 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5581 	for (i = 0, n = 0; i < words; i++) {
5582 		if ((i & 7) == 0) {
5583 			n = (int)(n + (int)snprintf(&buf[n],
5584 			    (uint_t)(size_left - n), "%08x: ", addr + i));
5585 		}
5586 		if ((i + 1) & 7) {
5587 			c = ' ';
5588 		} else {
5589 			c = '\n';
5590 		}
5591 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5592 		    "%08x%c", ptr[i], c));
5593 	}
5594 	return (n);
5595 }
5596 
5597 static int
5598 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5599     uint_t size_left)
5600 {
5601 	int		i;
5602 	int		n;
5603 	char		c = ' ';
5604 	int		words;
5605 	uint16_t	*ptr;
5606 	uint16_t	w;
5607 
5608 	words = entries * 32;
5609 	ptr = (uint16_t *)qadr;
5610 	for (i = 0, n = 0; i < words; i++) {
5611 		if ((i & 7) == 0) {
5612 			n = (int)(n + (int)snprintf(&buf[n],
5613 			    (uint_t)(size_left - n), "%05x: ", i));
5614 		}
5615 		if ((i + 1) & 7) {
5616 			c = ' ';
5617 		} else {
5618 			c = '\n';
5619 		}
5620 		w = QMEM_RD16(qlt, &ptr[i]);
5621 		n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
5622 		    w, c));
5623 	}
5624 	return (n);
5625 }
5626 
5627 /*
5628  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5629  * mailbox ram is available.
5630  * Copy data from RISC RAM to system memory
5631  */
5632 static fct_status_t
5633 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5634 {
5635 	uint64_t	da;
5636 	fct_status_t	ret;
5637 
5638 	REG_WR16(qlt, REG_MBOX(0), 0xc);
5639 	da = qlt->queue_mem_cookie.dmac_laddress;
5640 	da += MBOX_DMA_MEM_OFFSET;
5641 
5642 	/*
5643 	 * System destination address
5644 	 */
5645 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
5646 	da >>= 16;
5647 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
5648 	da >>= 16;
5649 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
5650 	da >>= 16;
5651 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
5652 
5653 	/*
5654 	 * Length
5655 	 */
5656 	REG_WR16(qlt, REG_MBOX(5), words & 0xffff);
5657 	REG_WR16(qlt, REG_MBOX(4), ((words >> 16) & 0xffff));
5658 
5659 	/*
5660 	 * RISC source address
5661 	 */
5662 	REG_WR16(qlt, REG_MBOX(1), addr & 0xffff);
5663 	REG_WR16(qlt, REG_MBOX(8), ((addr >> 16) & 0xffff));
5664 
5665 	ret = qlt_raw_mailbox_command(qlt);
5666 	REG_WR32(qlt, REG_HCCR, 0xA0000000);
5667 	if (ret == QLT_SUCCESS) {
5668 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5669 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5670 	} else {
5671 		EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
5672 	}
5673 	return (ret);
5674 }
5675 
5676 static void
5677 qlt_verify_fw(qlt_state_t *qlt)
5678 {
5679 	caddr_t req;
5680 	/* Just put it on the request queue */
5681 	mutex_enter(&qlt->req_lock);
5682 	req = qlt_get_req_entries(qlt, 1);
5683 	if (req == NULL) {
5684 		mutex_exit(&qlt->req_lock);
5685 		/* XXX handle this */
5686 		return;
5687 	}
5688 
5689 	bzero(req, IOCB_SIZE);
5690 
5691 	req[0] = 0x1b;
5692 	req[1] = 1;
5693 
5694 	QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5695 	QMEM_WR16(qlt, (&req[0x8]), 1);    /*  options - don't update */
5696 	QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5697 
5698 	qlt_submit_req_entries(qlt, 1);
5699 	mutex_exit(&qlt->req_lock);
5700 }
5701 
5702 static void
5703 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5704 {
5705 	uint16_t	status;
5706 	char		info[80];
5707 
5708 	status = QMEM_RD16(qlt, rsp+8);
5709 	if (status != 0) {
5710 		(void) snprintf(info, 80, "qlt_handle_verify_fw_completion: "
5711 		    "status:%x, rsp:%p", status, (void *)rsp);
5712 		if (status == 3) {
5713 			uint16_t error_code;
5714 
5715 			error_code = QMEM_RD16(qlt, rsp+0xA);
5716 			(void) snprintf(info, 80, "qlt_handle_verify_fw_"
5717 			    "completion: error code:%x", error_code);
5718 		}
5719 	}
5720 }
5721 
5722 /*
5723  * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5724  *
5725  * Input:	Pointer to the adapter state structure.
5726  * Returns:	Success or Failure.
5727  * Context:	Kernel context.
5728  */
5729 static int
5730 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5731 {
5732 	int	rval = DDI_SUCCESS;
5733 
5734 	qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5735 	    kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
5736 
5737 	if (qlt->el_trace_desc == NULL) {
5738 		cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5739 		    qlt->instance);
5740 		rval = DDI_FAILURE;
5741 	} else {
5742 		qlt->el_trace_desc->next = 0;
5743 		qlt->el_trace_desc->trace_buffer =
5744 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
5745 
5746 		if (qlt->el_trace_desc->trace_buffer == NULL) {
5747 			cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5748 			    qlt->instance);
5749 			kmem_free(qlt->el_trace_desc,
5750 			    sizeof (qlt_el_trace_desc_t));
5751 			qlt->el_trace_desc = NULL;
5752 			rval = DDI_FAILURE;
5753 		} else {
5754 			qlt->el_trace_desc->trace_buffer_size =
5755 			    EL_TRACE_BUF_SIZE;
5756 			mutex_init(&qlt->el_trace_desc->mutex, NULL,
5757 			    MUTEX_DRIVER, NULL);
5758 		}
5759 	}
5760 
5761 	return (rval);
5762 }
5763 
5764 /*
5765  * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5766  *
5767  * Input:	Pointer to the adapter state structure.
5768  * Returns:	Success or Failure.
5769  * Context:	Kernel context.
5770  */
5771 static int
5772 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5773 {
5774 	int	rval = DDI_SUCCESS;
5775 
5776 	if (qlt->el_trace_desc == NULL) {
5777 		cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5778 		    qlt->instance);
5779 		rval = DDI_FAILURE;
5780 	} else {
5781 		if (qlt->el_trace_desc->trace_buffer != NULL) {
5782 			kmem_free(qlt->el_trace_desc->trace_buffer,
5783 			    qlt->el_trace_desc->trace_buffer_size);
5784 		}
5785 		mutex_destroy(&qlt->el_trace_desc->mutex);
5786 		kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5787 		qlt->el_trace_desc = NULL;
5788 	}
5789 
5790 	return (rval);
5791 }
5792 
5793 /*
5794  * qlt_el_msg
5795  *	Extended logging message
5796  *
5797  * Input:
5798  *	qlt:	adapter state pointer.
5799  *	fn:	function name.
5800  *	ce:	level
5801  *	...:	Variable argument list.
5802  *
5803  * Context:
5804  *	Kernel/Interrupt context.
5805  */
5806 void
5807 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5808 {
5809 	char		*s, *fmt = 0, *fmt1 = 0;
5810 	char		fmt2[EL_BUFFER_RESERVE];
5811 	int		rval, tmp;
5812 	int		tracing = 0;
5813 	va_list		vl;
5814 
5815 	/* Tracing is the default but it can be disabled. */
5816 	if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5817 		tracing = 1;
5818 
5819 		mutex_enter(&qlt->el_trace_desc->mutex);
5820 
5821 		/*
5822 		 * Ensure enough space for the string. Wrap to
5823 		 * start when default message allocation size
5824 		 * would overrun the end.
5825 		 */
5826 		if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5827 		    qlt->el_trace_desc->trace_buffer_size) {
5828 			fmt = qlt->el_trace_desc->trace_buffer;
5829 			qlt->el_trace_desc->next = 0;
5830 		} else {
5831 			fmt = qlt->el_trace_desc->trace_buffer +
5832 			    qlt->el_trace_desc->next;
5833 		}
5834 	}
5835 
5836 	/* if no buffer use the stack */
5837 	if (fmt == NULL) {
5838 		fmt = fmt2;
5839 	}
5840 
5841 	va_start(vl, ce);
5842 
5843 	s = va_arg(vl, char *);
5844 
5845 	rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
5846 	    "QEL qlt(%d): %s, ", qlt->instance, fn);
5847 	fmt1 = fmt + rval;
5848 	tmp = (int)vsnprintf(fmt1,
5849 	    (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
5850 	rval += tmp;
5851 
5852 	/*
5853 	 * Calculate the offset where the next message will go,
5854 	 * skipping the NULL.
5855 	 */
5856 	if (tracing) {
5857 		uint16_t next = (uint16_t)(rval += 1);
5858 		qlt->el_trace_desc->next += next;
5859 		mutex_exit(&qlt->el_trace_desc->mutex);
5860 	}
5861 
5862 	va_end(vl);
5863 }
5864 
5865 /*
5866  * qlt_dump_el_trace_buffer
5867  *	 Outputs extended logging trace buffer.
5868  *
5869  * Input:
5870  *	qlt:	adapter state pointer.
5871  */
5872 void
5873 qlt_dump_el_trace_buffer(qlt_state_t *qlt)
5874 {
5875 	char		*dump_start = NULL;
5876 	char		*dump_current = NULL;
5877 	char		*trace_start;
5878 	char		*trace_end;
5879 	int		wrapped = 0;
5880 	int		rval;
5881 
5882 	mutex_enter(&qlt->el_trace_desc->mutex);
5883 
5884 	rval = qlt_validate_trace_desc(qlt);
5885 	if (rval != NULL) {
5886 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
5887 		    qlt->instance);
5888 	} else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
5889 		dump_current = dump_start;
5890 		trace_start = qlt->el_trace_desc->trace_buffer;
5891 		trace_end = trace_start +
5892 		    qlt->el_trace_desc->trace_buffer_size;
5893 
5894 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
5895 		    qlt->instance,
5896 		    (void *)dump_start, (void *)trace_start);
5897 
5898 		while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
5899 		    (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
5900 			/* Show it... */
5901 			cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
5902 			    dump_current);
5903 			/* Make the next the current */
5904 			dump_current += (strlen(dump_current) + 1);
5905 			/* check for wrap */
5906 			if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
5907 				dump_current = trace_start;
5908 				wrapped = 1;
5909 			} else if (wrapped) {
5910 				/* Don't go past next. */
5911 				if ((trace_start + qlt->el_trace_desc->next) <=
5912 				    dump_current) {
5913 					break;
5914 				}
5915 			} else if (*dump_current == NULL) {
5916 				break;
5917 			}
5918 		}
5919 	}
5920 	mutex_exit(&qlt->el_trace_desc->mutex);
5921 }
5922 
5923 /*
5924  * qlt_validate_trace_desc
5925  *	 Ensures the extended logging trace descriptor is good
5926  *
5927  * Input:
5928  *	qlt:	adapter state pointer.
5929  *
5930  * Returns:
5931  *	ql local function return status code.
5932  */
5933 static int
5934 qlt_validate_trace_desc(qlt_state_t *qlt)
5935 {
5936 	int	rval = DDI_SUCCESS;
5937 
5938 	if (qlt->el_trace_desc == NULL) {
5939 		rval = DDI_FAILURE;
5940 	} else if (qlt->el_trace_desc->trace_buffer == NULL) {
5941 		rval = DDI_FAILURE;
5942 	}
5943 	return (rval);
5944 }
5945 
5946 /*
5947  * qlt_find_trace_start
5948  *	 Locate the oldest extended logging trace entry.
5949  *
5950  * Input:
5951  *	qlt:	adapter state pointer.
5952  *
5953  * Returns:
5954  *	Pointer to a string.
5955  *
5956  * Context:
5957  *	Kernel/Interrupt context.
5958  */
5959 static char *
5960 qlt_find_trace_start(qlt_state_t *qlt)
5961 {
5962 	char	*trace_start = 0;
5963 	char	*trace_next  = 0;
5964 
5965 	trace_next = qlt->el_trace_desc->trace_buffer +
5966 	    qlt->el_trace_desc->next;
5967 
5968 	/*
5969 	 * If the buffer has not wrapped next will point at a null so
5970 	 * start is the beginning of the buffer.  If next points at a char
5971 	 * then we must traverse the buffer until a null is detected and
5972 	 * that will be the beginning of the oldest whole object in the buffer
5973 	 * which is the start.
5974 	 */
5975 
5976 	if ((trace_next + EL_BUFFER_RESERVE) >=
5977 	    (qlt->el_trace_desc->trace_buffer +
5978 	    qlt->el_trace_desc->trace_buffer_size)) {
5979 		trace_start = qlt->el_trace_desc->trace_buffer;
5980 	} else if (*trace_next != NULL) {
5981 		trace_start = trace_next + (strlen(trace_next) + 1);
5982 	} else {
5983 		trace_start = qlt->el_trace_desc->trace_buffer;
5984 	}
5985 	return (trace_start);
5986 }
5987 
5988 
5989 static int
5990 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
5991 {
5992 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
5993 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
5994 }
5995 
5996 static int
5997 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
5998 {
5999 	return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
6000 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, prop_val));
6001 }
6002 
6003 static int
6004 qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6005 {
6006 	char		instance_prop[256];
6007 
6008 	/* Get adapter instance specific parameter. */
6009 	(void) sprintf(instance_prop, "hba%d-%s", qlt->instance, prop);
6010 	return (qlt_read_string_prop(qlt, instance_prop, prop_val));
6011 }
6012 
6013 static int
6014 qlt_convert_string_to_ull(char *prop, int radix,
6015     u_longlong_t *result)
6016 {
6017 	return (ddi_strtoull((const char *)prop, 0, radix, result));
6018 }
6019 
6020 static boolean_t
6021 qlt_wwn_overload_prop(qlt_state_t *qlt)
6022 {
6023 	char		*prop_val = 0;
6024 	int		rval;
6025 	int		radix;
6026 	u_longlong_t	wwnn = 0, wwpn = 0;
6027 	boolean_t	overloaded = FALSE;
6028 
6029 	radix = 16;
6030 
6031 	rval = qlt_read_string_instance_prop(qlt, "adapter-wwnn", &prop_val);
6032 	if (rval == DDI_PROP_SUCCESS) {
6033 		rval = qlt_convert_string_to_ull(prop_val, radix, &wwnn);
6034 	}
6035 	if (rval == DDI_PROP_SUCCESS) {
6036 		rval = qlt_read_string_instance_prop(qlt, "adapter-wwpn",
6037 		    &prop_val);
6038 		if (rval == DDI_PROP_SUCCESS) {
6039 			rval = qlt_convert_string_to_ull(prop_val, radix,
6040 			    &wwpn);
6041 		}
6042 	}
6043 	if (rval == DDI_PROP_SUCCESS) {
6044 		overloaded = TRUE;
6045 		/* Overload the current node/port name nvram copy */
6046 		bcopy((char *)&wwnn, qlt->nvram->node_name, 8);
6047 		BIG_ENDIAN_64(qlt->nvram->node_name);
6048 		bcopy((char *)&wwpn, qlt->nvram->port_name, 8);
6049 		BIG_ENDIAN_64(qlt->nvram->port_name);
6050 	}
6051 	return (overloaded);
6052 }
6053 
6054 /*
6055  * prop_text - Return a pointer to a string describing the status
6056  *
6057  * Input:	prop_status = the return status from a property function.
6058  * Returns:	pointer to a string.
6059  * Context:	Kernel context.
6060  */
6061 char *
6062 prop_text(int prop_status)
6063 {
6064 	string_table_t *entry = &prop_status_tbl[0];
6065 
6066 	return (value2string(entry, prop_status, 0xFFFF));
6067 }
6068 
6069 /*
6070  * value2string	Return a pointer to a string associated with the value
6071  *
6072  * Input:	entry = the value to string table
6073  *		value = the value
6074  * Returns:	pointer to a string.
6075  * Context:	Kernel context.
6076  */
6077 char *
6078 value2string(string_table_t *entry, int value, int delimiter)
6079 {
6080 	for (; entry->value != delimiter; entry++) {
6081 		if (entry->value == value) {
6082 			break;
6083 		}
6084 	}
6085 	return (entry->string);
6086 }
6087 
6088 /*
6089  * qlt_chg_endian Change endianess of byte array.
6090  *
6091  * Input:	buf = array pointer.
6092  *		size = size of array in bytes.
6093  *
6094  * Context:	Interrupt or Kernel context.
6095  */
6096 void
6097 qlt_chg_endian(uint8_t buf[], size_t size)
6098 {
6099 	uint8_t byte;
6100 	size_t  cnt1;
6101 	size_t  cnt;
6102 
6103 	cnt1 = size - 1;
6104 	for (cnt = 0; cnt < size / 2; cnt++) {
6105 		byte = buf[cnt1];
6106 		buf[cnt1] = buf[cnt];
6107 		buf[cnt] = byte;
6108 		cnt1--;
6109 	}
6110 }
6111