1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
29  * Use is subject to license terms.
30  */
31 
32 #include <sys/conf.h>
33 #include <sys/ddi.h>
34 #include <sys/stat.h>
35 #include <sys/pci.h>
36 #include <sys/sunddi.h>
37 #include <sys/modctl.h>
38 #include <sys/file.h>
39 #include <sys/cred.h>
40 #include <sys/byteorder.h>
41 #include <sys/atomic.h>
42 #include <sys/scsi/scsi.h>
43 
44 #include <stmf_defines.h>
45 #include <fct_defines.h>
46 #include <stmf.h>
47 #include <portif.h>
48 #include <fct.h>
49 #include <qlt.h>
50 #include <qlt_dma.h>
51 #include <qlt_ioctl.h>
52 #include <qlt_open.h>
53 #include <stmf_ioctl.h>
54 
55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 static fct_status_t qlt_reset_chip_and_download_fw(qlt_state_t *qlt,
58     int reset_only);
59 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
60     uint32_t word_count, uint32_t risc_addr);
61 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
62 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
63 					uint32_t dma_size);
64 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
65 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
66 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
67 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
68     stmf_state_change_info_t *ssci);
69 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
70 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
71 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
72 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
73 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
74 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
75 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
76 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
77     uint8_t *rsp);
78 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
79 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
80 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
81 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
82 static void qlt_verify_fw(qlt_state_t *qlt);
83 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
84 fct_status_t qlt_port_start(caddr_t arg);
85 fct_status_t qlt_port_stop(caddr_t arg);
86 fct_status_t qlt_port_online(qlt_state_t *qlt);
87 fct_status_t qlt_port_offline(qlt_state_t *qlt);
88 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
89     fct_link_info_t *li);
90 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
91 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
92 						fct_flogi_xchg_t *fx);
93 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
94 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
95 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
96     fct_remote_port_t *rp, fct_cmd_t *login);
97 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
98     fct_remote_port_t *rp);
99 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
100 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
101 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
102     fct_cmd_t *cmd, int terminate);
103 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
104 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
105 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
106     fct_cmd_t *cmd, uint32_t flags);
107 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
108 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
109 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
110 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
111 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
112 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
113 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
114     stmf_data_buf_t *dbuf, uint32_t ioflags);
115 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
116 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
117 static void qlt_release_intr(qlt_state_t *qlt);
118 static int qlt_setup_interrupts(qlt_state_t *qlt);
119 static void qlt_destroy_mutex(qlt_state_t *qlt);
120 
121 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
122     uint32_t words);
123 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
124     caddr_t buf, uint_t size_left);
125 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
126     caddr_t buf, uint_t size_left);
127 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
128     int count, uint_t size_left);
129 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
130     cred_t *credp, int *rval);
131 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
132 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
133 
134 #if defined(__sparc)
135 static int qlt_setup_msi(qlt_state_t *qlt);
136 static int qlt_setup_msix(qlt_state_t *qlt);
137 #endif
138 
139 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
140 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
141 static int qlt_validate_trace_desc(qlt_state_t *qlt);
142 static char *qlt_find_trace_start(qlt_state_t *qlt);
143 
144 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
145 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
146 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
147     char **prop_val);
148 static int qlt_convert_string_to_ull(char *prop, int radix,
149     u_longlong_t *result);
150 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
151 static int qlt_quiesce(dev_info_t *dip);
152 
153 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] = \
154 	(uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
155 
156 int qlt_enable_msix = 0;
157 
158 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
159 
160 /* Array to quickly calculate next free buf index to use */
161 #if 0
162 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
163 #endif
164 
165 static struct cb_ops qlt_cb_ops = {
166 	qlt_open,
167 	qlt_close,
168 	nodev,
169 	nodev,
170 	nodev,
171 	nodev,
172 	nodev,
173 	qlt_ioctl,
174 	nodev,
175 	nodev,
176 	nodev,
177 	nochpoll,
178 	ddi_prop_op,
179 	0,
180 	D_MP | D_NEW
181 };
182 
183 static struct dev_ops qlt_ops = {
184 	DEVO_REV,
185 	0,
186 	nodev,
187 	nulldev,
188 	nulldev,
189 	qlt_attach,
190 	qlt_detach,
191 	nodev,
192 	&qlt_cb_ops,
193 	NULL,
194 	ddi_power,
195 	qlt_quiesce
196 };
197 
198 #ifndef	PORT_SPEED_10G
199 #define	PORT_SPEED_10G		16
200 #endif
201 
202 static struct modldrv modldrv = {
203 	&mod_driverops,
204 	QLT_NAME" "QLT_VERSION,
205 	&qlt_ops,
206 };
207 
208 static struct modlinkage modlinkage = {
209 	MODREV_1, &modldrv, NULL
210 };
211 
212 void *qlt_state = NULL;
213 kmutex_t qlt_global_lock;
214 static uint32_t qlt_loaded_counter = 0;
215 
216 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
217 			"-X Mode 1 133", "--Invalid--",
218 			"-X Mode 2 66", "-X Mode 2 100",
219 			"-X Mode 2 133", " 66" };
220 
221 /* Always use 64 bit DMA. */
222 static ddi_dma_attr_t qlt_queue_dma_attr = {
223 	DMA_ATTR_V0,		/* dma_attr_version */
224 	0,			/* low DMA address range */
225 	0xffffffffffffffff,	/* high DMA address range */
226 	0xffffffff,		/* DMA counter register */
227 	64,			/* DMA address alignment */
228 	0xff,			/* DMA burstsizes */
229 	1,			/* min effective DMA size */
230 	0xffffffff,		/* max DMA xfer size */
231 	0xffffffff,		/* segment boundary */
232 	1,			/* s/g list length */
233 	1,			/* granularity of device */
234 	0			/* DMA transfer flags */
235 };
236 
237 /* qlogic logging */
238 int enable_extended_logging = 0;
239 
240 static char qlt_provider_name[] = "qlt";
241 static struct stmf_port_provider *qlt_pp;
242 
243 int
244 _init(void)
245 {
246 	int ret;
247 
248 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
249 	if (ret == 0) {
250 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
251 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
252 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
253 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
254 		qlt_pp->pp_name = qlt_provider_name;
255 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
256 			stmf_free(qlt_pp);
257 			mutex_destroy(&qlt_global_lock);
258 			ddi_soft_state_fini(&qlt_state);
259 			return (EIO);
260 		}
261 		ret = mod_install(&modlinkage);
262 		if (ret != 0) {
263 			(void) stmf_deregister_port_provider(qlt_pp);
264 			stmf_free(qlt_pp);
265 			mutex_destroy(&qlt_global_lock);
266 			ddi_soft_state_fini(&qlt_state);
267 		}
268 	}
269 	return (ret);
270 }
271 
272 int
273 _fini(void)
274 {
275 	int ret;
276 
277 	if (qlt_loaded_counter)
278 		return (EBUSY);
279 	ret = mod_remove(&modlinkage);
280 	if (ret == 0) {
281 		(void) stmf_deregister_port_provider(qlt_pp);
282 		stmf_free(qlt_pp);
283 		mutex_destroy(&qlt_global_lock);
284 		ddi_soft_state_fini(&qlt_state);
285 	}
286 	return (ret);
287 }
288 
289 int
290 _info(struct modinfo *modinfop)
291 {
292 	return (mod_info(&modlinkage, modinfop));
293 }
294 
295 
296 static int
297 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
298 {
299 	int		instance;
300 	qlt_state_t	*qlt;
301 	ddi_device_acc_attr_t	dev_acc_attr;
302 	uint16_t	did;
303 	uint16_t	val;
304 	uint16_t	mr;
305 	size_t		discard;
306 	uint_t		ncookies;
307 	int		max_read_size;
308 	int		max_payload_size;
309 	fct_status_t	ret;
310 
311 	/* No support for suspend resume yet */
312 	if (cmd != DDI_ATTACH)
313 		return (DDI_FAILURE);
314 	instance = ddi_get_instance(dip);
315 
316 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
317 		return (DDI_FAILURE);
318 	}
319 
320 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
321 	    NULL) {
322 		goto attach_fail_1;
323 	}
324 	qlt->instance = instance;
325 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
326 	qlt->dip = dip;
327 
328 	if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
329 		cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
330 		goto attach_fail_1;
331 	}
332 
333 	EL(qlt, "instance=%d\n", instance);
334 
335 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
336 		goto attach_fail_2;
337 	}
338 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
339 	if ((did != 0x2422) && (did != 0x2432) &&
340 	    (did != 0x8432) && (did != 0x2532) &&
341 	    (did != 0x8001)) {
342 		cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
343 		    instance, did);
344 		goto attach_fail_4;
345 	}
346 
347 	if ((did & 0xFF00) == 0x8000)
348 		qlt->qlt_81xx_chip = 1;
349 	else if ((did & 0xFF00) == 0x2500)
350 		qlt->qlt_25xx_chip = 1;
351 
352 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
353 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
354 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
355 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
356 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
357 		goto attach_fail_4;
358 	}
359 	if (did == 0x2422) {
360 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
361 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
362 		pci_bits >>= 8;
363 		pci_bits &= 0xf;
364 		if ((pci_bits == 3) || (pci_bits == 7)) {
365 			cmn_err(CE_NOTE,
366 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
367 			    instance, pci_speeds[pci_bits], pci_bits);
368 		} else {
369 			cmn_err(CE_WARN,
370 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
371 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
372 			    "(Invalid)", ((pci_bits == 0) ||
373 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
374 			    "32 bit slot ") : "", pci_bits);
375 		}
376 	}
377 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
378 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
379 		    (unsigned long long)ret);
380 		goto attach_fail_5;
381 	}
382 	if (qlt_wwn_overload_prop(qlt) == TRUE) {
383 		EL(qlt, "wwnn overloaded.\n", instance);
384 	}
385 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
386 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
387 		goto attach_fail_5;
388 	}
389 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
390 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
391 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
392 	    DDI_SUCCESS) {
393 		goto attach_fail_6;
394 	}
395 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
396 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
397 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
398 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
399 		goto attach_fail_7;
400 	}
401 	if (ncookies != 1)
402 		goto attach_fail_8;
403 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
404 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
405 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
406 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
407 
408 	/* mutex are inited in this function */
409 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
410 		goto attach_fail_8;
411 
412 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
413 	    "qlt%d", instance);
414 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
415 	    "%s,0", qlt->qlt_minor_name);
416 
417 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
418 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
419 		goto attach_fail_9;
420 	}
421 
422 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
423 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
424 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
425 
426 	/* Setup PCI cfg space registers */
427 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
428 	if (max_read_size == 11)
429 		goto over_max_read_xfer_setting;
430 	if (did == 0x2422) {
431 		if (max_read_size == 512)
432 			val = 0;
433 		else if (max_read_size == 1024)
434 			val = 1;
435 		else if (max_read_size == 2048)
436 			val = 2;
437 		else if (max_read_size == 4096)
438 			val = 3;
439 		else {
440 			cmn_err(CE_WARN, "qlt(%d) malformed "
441 			    "pci-max-read-request in qlt.conf. Valid values "
442 			    "for this HBA are 512/1024/2048/4096", instance);
443 			goto over_max_read_xfer_setting;
444 		}
445 		mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
446 		mr = (uint16_t)(mr & 0xfff3);
447 		mr = (uint16_t)(mr | (val << 2));
448 		PCICFG_WR16(qlt, 0x4E, mr);
449 	} else if ((did == 0x2432) || (did == 0x8432) ||
450 	    (did == 0x2532) || (did == 0x8001)) {
451 		if (max_read_size == 128)
452 			val = 0;
453 		else if (max_read_size == 256)
454 			val = 1;
455 		else if (max_read_size == 512)
456 			val = 2;
457 		else if (max_read_size == 1024)
458 			val = 3;
459 		else if (max_read_size == 2048)
460 			val = 4;
461 		else if (max_read_size == 4096)
462 			val = 5;
463 		else {
464 			cmn_err(CE_WARN, "qlt(%d) malformed "
465 			    "pci-max-read-request in qlt.conf. Valid values "
466 			    "for this HBA are 128/256/512/1024/2048/4096",
467 			    instance);
468 			goto over_max_read_xfer_setting;
469 		}
470 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
471 		mr = (uint16_t)(mr & 0x8fff);
472 		mr = (uint16_t)(mr | (val << 12));
473 		PCICFG_WR16(qlt, 0x54, mr);
474 	} else {
475 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
476 		    "pci-max-read-request for this device (%x)",
477 		    instance, did);
478 	}
479 over_max_read_xfer_setting:;
480 
481 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
482 	if (max_payload_size == 11)
483 		goto over_max_payload_setting;
484 	if ((did == 0x2432) || (did == 0x8432) ||
485 	    (did == 0x2532) || (did == 0x8001)) {
486 		if (max_payload_size == 128)
487 			val = 0;
488 		else if (max_payload_size == 256)
489 			val = 1;
490 		else if (max_payload_size == 512)
491 			val = 2;
492 		else if (max_payload_size == 1024)
493 			val = 3;
494 		else {
495 			cmn_err(CE_WARN, "qlt(%d) malformed "
496 			    "pcie-max-payload-size in qlt.conf. Valid values "
497 			    "for this HBA are 128/256/512/1024",
498 			    instance);
499 			goto over_max_payload_setting;
500 		}
501 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
502 		mr = (uint16_t)(mr & 0xff1f);
503 		mr = (uint16_t)(mr | (val << 5));
504 		PCICFG_WR16(qlt, 0x54, mr);
505 	} else {
506 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
507 		    "pcie-max-payload-size for this device (%x)",
508 		    instance, did);
509 	}
510 
511 over_max_payload_setting:;
512 
513 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
514 		goto attach_fail_10;
515 
516 	ddi_report_dev(dip);
517 	return (DDI_SUCCESS);
518 
519 attach_fail_10:;
520 	mutex_destroy(&qlt->qlt_ioctl_lock);
521 	cv_destroy(&qlt->mbox_cv);
522 	cv_destroy(&qlt->rp_dereg_cv);
523 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
524 attach_fail_9:;
525 	qlt_destroy_mutex(qlt);
526 	qlt_release_intr(qlt);
527 attach_fail_8:;
528 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
529 attach_fail_7:;
530 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
531 attach_fail_6:;
532 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
533 attach_fail_5:;
534 	ddi_regs_map_free(&qlt->regs_acc_handle);
535 attach_fail_4:;
536 	pci_config_teardown(&qlt->pcicfg_acc_handle);
537 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
538 	(void) qlt_el_trace_desc_dtor(qlt);
539 attach_fail_2:;
540 attach_fail_1:;
541 	ddi_soft_state_free(qlt_state, instance);
542 	return (DDI_FAILURE);
543 }
544 
545 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
546 
547 /* ARGSUSED */
548 static int
549 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
550 {
551 	qlt_state_t *qlt;
552 
553 	int instance;
554 
555 	instance = ddi_get_instance(dip);
556 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
557 	    NULL) {
558 		return (DDI_FAILURE);
559 	}
560 
561 	if (qlt->fw_code01) {
562 		return (DDI_FAILURE);
563 	}
564 
565 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
566 	    qlt->qlt_state_not_acked) {
567 		return (DDI_FAILURE);
568 	}
569 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS)
570 		return (DDI_FAILURE);
571 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
572 	qlt_destroy_mutex(qlt);
573 	qlt_release_intr(qlt);
574 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
575 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
576 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
577 	ddi_regs_map_free(&qlt->regs_acc_handle);
578 	pci_config_teardown(&qlt->pcicfg_acc_handle);
579 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
580 	cv_destroy(&qlt->mbox_cv);
581 	cv_destroy(&qlt->rp_dereg_cv);
582 	(void) qlt_el_trace_desc_dtor(qlt);
583 	ddi_soft_state_free(qlt_state, instance);
584 
585 	return (DDI_SUCCESS);
586 }
587 
588 /*
589  * qlt_quiesce	quiesce a device attached to the system.
590  */
591 static int
592 qlt_quiesce(dev_info_t *dip)
593 {
594 	qlt_state_t	*qlt;
595 	uint32_t	timer;
596 	uint32_t	stat;
597 
598 	qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
599 	if (qlt == NULL) {
600 		/* Oh well.... */
601 		return (DDI_SUCCESS);
602 	}
603 
604 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_HOST_TO_RISC_INTR);
605 	REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
606 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
607 	for (timer = 0; timer < 30000; timer++) {
608 		stat = REG_RD32(qlt, REG_RISC_STATUS);
609 		if (stat & RISC_HOST_INTR_REQUEST) {
610 			if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
611 				REG_WR32(qlt, REG_HCCR,
612 				    HCCR_CMD_CLEAR_RISC_PAUSE);
613 				break;
614 			}
615 			REG_WR32(qlt, REG_HCCR,
616 			    HCCR_CMD_CLEAR_HOST_TO_RISC_INTR);
617 		}
618 		drv_usecwait(100);
619 	}
620 	/* Reset the chip. */
621 	REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
622 	    PCI_X_XFER_CTRL);
623 	drv_usecwait(100);
624 
625 	return (DDI_SUCCESS);
626 }
627 
628 static void
629 qlt_enable_intr(qlt_state_t *qlt)
630 {
631 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
632 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
633 	} else {
634 		int i;
635 		for (i = 0; i < qlt->intr_cnt; i++)
636 			(void) ddi_intr_enable(qlt->htable[i]);
637 	}
638 }
639 
640 static void
641 qlt_disable_intr(qlt_state_t *qlt)
642 {
643 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
644 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
645 	} else {
646 		int i;
647 		for (i = 0; i < qlt->intr_cnt; i++)
648 			(void) ddi_intr_disable(qlt->htable[i]);
649 	}
650 }
651 
652 static void
653 qlt_release_intr(qlt_state_t *qlt)
654 {
655 	if (qlt->htable) {
656 		int i;
657 		for (i = 0; i < qlt->intr_cnt; i++) {
658 			(void) ddi_intr_remove_handler(qlt->htable[i]);
659 			(void) ddi_intr_free(qlt->htable[i]);
660 		}
661 		kmem_free(qlt->htable, (uint_t)qlt->intr_size);
662 	}
663 	qlt->htable = NULL;
664 	qlt->intr_pri = 0;
665 	qlt->intr_cnt = 0;
666 	qlt->intr_size = 0;
667 	qlt->intr_cap = 0;
668 }
669 
670 
671 static void
672 qlt_init_mutex(qlt_state_t *qlt)
673 {
674 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
675 	    INT2PTR(qlt->intr_pri, void *));
676 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
677 	    INT2PTR(qlt->intr_pri, void *));
678 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
679 	    INT2PTR(qlt->intr_pri, void *));
680 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
681 	    INT2PTR(qlt->intr_pri, void *));
682 }
683 
684 static void
685 qlt_destroy_mutex(qlt_state_t *qlt)
686 {
687 	mutex_destroy(&qlt->req_lock);
688 	mutex_destroy(&qlt->preq_lock);
689 	mutex_destroy(&qlt->mbox_lock);
690 	mutex_destroy(&qlt->intr_lock);
691 }
692 
693 
694 #if defined(__sparc)
695 static int
696 qlt_setup_msix(qlt_state_t *qlt)
697 {
698 	int count, avail, actual;
699 	int ret;
700 	int itype = DDI_INTR_TYPE_MSIX;
701 	int i;
702 
703 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
704 	if (ret != DDI_SUCCESS || count == 0) {
705 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
706 		    count);
707 		return (DDI_FAILURE);
708 	}
709 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
710 	if (ret != DDI_SUCCESS || avail == 0) {
711 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
712 		    avail);
713 		return (DDI_FAILURE);
714 	}
715 	if (avail < count) {
716 		stmf_trace(qlt->qlt_port_alias,
717 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
718 	}
719 
720 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
721 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
722 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
723 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
724 	/* we need at least 2 interrupt vectors */
725 	if (ret != DDI_SUCCESS || actual < 2) {
726 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
727 		    actual);
728 		ret = DDI_FAILURE;
729 		goto release_intr;
730 	}
731 	if (actual < count) {
732 		EL(qlt, "requested: %d, received: %d\n", count, actual);
733 	}
734 
735 	qlt->intr_cnt = actual;
736 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
737 	if (ret != DDI_SUCCESS) {
738 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
739 		ret = DDI_FAILURE;
740 		goto release_intr;
741 	}
742 	qlt_init_mutex(qlt);
743 	for (i = 0; i < actual; i++) {
744 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
745 		    qlt, INT2PTR((uint_t)i, void *));
746 		if (ret != DDI_SUCCESS) {
747 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
748 			goto release_mutex;
749 		}
750 	}
751 
752 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
753 	qlt->intr_flags |= QLT_INTR_MSIX;
754 	return (DDI_SUCCESS);
755 
756 release_mutex:
757 	qlt_destroy_mutex(qlt);
758 release_intr:
759 	for (i = 0; i < actual; i++)
760 		(void) ddi_intr_free(qlt->htable[i]);
761 #if 0
762 free_mem:
763 #endif
764 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
765 	qlt->htable = NULL;
766 	qlt_release_intr(qlt);
767 	return (ret);
768 }
769 
770 
771 static int
772 qlt_setup_msi(qlt_state_t *qlt)
773 {
774 	int count, avail, actual;
775 	int itype = DDI_INTR_TYPE_MSI;
776 	int ret;
777 	int i;
778 
779 	/* get the # of interrupts */
780 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
781 	if (ret != DDI_SUCCESS || count == 0) {
782 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
783 		    count);
784 		return (DDI_FAILURE);
785 	}
786 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
787 	if (ret != DDI_SUCCESS || avail == 0) {
788 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
789 		    avail);
790 		return (DDI_FAILURE);
791 	}
792 	if (avail < count) {
793 		EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
794 	}
795 	/* MSI requires only 1 interrupt. */
796 	count = 1;
797 
798 	/* allocate interrupt */
799 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
800 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
801 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
802 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
803 	if (ret != DDI_SUCCESS || actual == 0) {
804 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
805 		    actual);
806 		ret = DDI_FAILURE;
807 		goto free_mem;
808 	}
809 	if (actual < count) {
810 		EL(qlt, "requested: %d, received: %d\n", count, actual);
811 	}
812 	qlt->intr_cnt = actual;
813 
814 	/*
815 	 * Get priority for first msi, assume remaining are all the same.
816 	 */
817 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
818 	if (ret != DDI_SUCCESS) {
819 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
820 		ret = DDI_FAILURE;
821 		goto release_intr;
822 	}
823 	qlt_init_mutex(qlt);
824 
825 	/* add handler */
826 	for (i = 0; i < actual; i++) {
827 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
828 		    qlt, INT2PTR((uint_t)i, void *));
829 		if (ret != DDI_SUCCESS) {
830 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
831 			goto release_mutex;
832 		}
833 	}
834 
835 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
836 	qlt->intr_flags |= QLT_INTR_MSI;
837 	return (DDI_SUCCESS);
838 
839 release_mutex:
840 	qlt_destroy_mutex(qlt);
841 release_intr:
842 	for (i = 0; i < actual; i++)
843 		(void) ddi_intr_free(qlt->htable[i]);
844 free_mem:
845 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
846 	qlt->htable = NULL;
847 	qlt_release_intr(qlt);
848 	return (ret);
849 }
850 #endif
851 
852 static int
853 qlt_setup_fixed(qlt_state_t *qlt)
854 {
855 	int count;
856 	int actual;
857 	int ret;
858 	int itype = DDI_INTR_TYPE_FIXED;
859 
860 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
861 	/* Fixed interrupts can only have one interrupt. */
862 	if (ret != DDI_SUCCESS || count != 1) {
863 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
864 		    count);
865 		return (DDI_FAILURE);
866 	}
867 
868 	qlt->intr_size = sizeof (ddi_intr_handle_t);
869 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
870 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
871 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
872 	if (ret != DDI_SUCCESS || actual != 1) {
873 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
874 		    actual);
875 		ret = DDI_FAILURE;
876 		goto free_mem;
877 	}
878 
879 	qlt->intr_cnt = actual;
880 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
881 	if (ret != DDI_SUCCESS) {
882 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
883 		ret = DDI_FAILURE;
884 		goto release_intr;
885 	}
886 	qlt_init_mutex(qlt);
887 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
888 	if (ret != DDI_SUCCESS) {
889 		EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
890 		goto release_mutex;
891 	}
892 
893 	qlt->intr_flags |= QLT_INTR_FIXED;
894 	return (DDI_SUCCESS);
895 
896 release_mutex:
897 	qlt_destroy_mutex(qlt);
898 release_intr:
899 	(void) ddi_intr_free(qlt->htable[0]);
900 free_mem:
901 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
902 	qlt->htable = NULL;
903 	qlt_release_intr(qlt);
904 	return (ret);
905 }
906 
907 
908 static int
909 qlt_setup_interrupts(qlt_state_t *qlt)
910 {
911 #if defined(__sparc)
912 	int itypes = 0;
913 #endif
914 
915 /*
916  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198). So use
917  * MSI for sparc only for now.
918  */
919 #if defined(__sparc)
920 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
921 		itypes = DDI_INTR_TYPE_FIXED;
922 	}
923 
924 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
925 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
926 			return (DDI_SUCCESS);
927 	}
928 	if (itypes & DDI_INTR_TYPE_MSI) {
929 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
930 			return (DDI_SUCCESS);
931 	}
932 #endif
933 	return (qlt_setup_fixed(qlt));
934 }
935 
936 /*
937  * Filling the hba attributes
938  */
939 void
940 qlt_populate_hba_fru_details(struct fct_local_port *port,
941     struct fct_port_attrs *port_attrs)
942 {
943 	caddr_t	bufp;
944 	int len;
945 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
946 
947 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
948 	    "QLogic Corp.");
949 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
950 	    "%s", QLT_NAME);
951 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
952 	    "%s", QLT_VERSION);
953 	port_attrs->serial_number[0] = '\0';
954 	port_attrs->hardware_version[0] = '\0';
955 
956 	(void) snprintf(port_attrs->firmware_version,
957 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
958 	    qlt->fw_minor, qlt->fw_subminor);
959 
960 	/* Get FCode version */
961 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
962 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
963 	    (int *)&len) == DDI_PROP_SUCCESS) {
964 		(void) snprintf(port_attrs->option_rom_version,
965 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
966 		kmem_free(bufp, (uint_t)len);
967 		bufp = NULL;
968 	} else {
969 #ifdef __sparc
970 		(void) snprintf(port_attrs->option_rom_version,
971 		    FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
972 #else
973 		(void) snprintf(port_attrs->option_rom_version,
974 		    FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
975 #endif
976 	}
977 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
978 	    qlt->nvram->subsystem_vendor_id[1] << 8;
979 
980 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
981 	    qlt->nvram->max_frame_length[0];
982 
983 	port_attrs->supported_cos = 0x10000000;
984 	port_attrs->supported_speed = PORT_SPEED_1G |
985 	    PORT_SPEED_2G | PORT_SPEED_4G;
986 	if (qlt->qlt_25xx_chip)
987 		port_attrs->supported_speed |= PORT_SPEED_8G;
988 	if (qlt->qlt_81xx_chip)
989 		port_attrs->supported_speed = PORT_SPEED_10G;
990 
991 	/* limit string length to nvr model_name length */
992 	len = (qlt->qlt_81xx_chip) ? 16 : 8;
993 	(void) snprintf(port_attrs->model,
994 	    (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
995 	    "%s", qlt->nvram->model_name);
996 
997 	(void) snprintf(port_attrs->model_description,
998 	    (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
999 	    FCHBA_MODEL_DESCRIPTION_LEN),
1000 	    "%s", qlt->nvram->model_name);
1001 }
1002 
1003 /* ARGSUSED */
1004 fct_status_t
1005 qlt_info(uint32_t cmd, fct_local_port_t *port,
1006     void *arg, uint8_t *buf, uint32_t *bufsizep)
1007 {
1008 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
1009 	mbox_cmd_t	*mcp;
1010 	fct_status_t	ret = FCT_SUCCESS;
1011 	uint8_t		*p;
1012 	fct_port_link_status_t	*link_status;
1013 
1014 	switch (cmd) {
1015 	case FC_TGT_PORT_RLS:
1016 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1017 			EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1018 			    "fct_port_link_status_t=%xh\n", *bufsizep,
1019 			    sizeof (fct_port_link_status_t));
1020 			ret = FCT_FAILURE;
1021 			break;
1022 		}
1023 		/* send mailbox command to get link status */
1024 		mcp = qlt_alloc_mailbox_command(qlt, 156);
1025 		if (mcp == NULL) {
1026 			EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1027 			ret = FCT_ALLOC_FAILURE;
1028 			break;
1029 		}
1030 
1031 		/* GET LINK STATUS count */
1032 		mcp->to_fw[0] = 0x6d;
1033 		mcp->to_fw[8] = 156/4;
1034 		mcp->to_fw_mask |= BIT_1 | BIT_8;
1035 		mcp->from_fw_mask |= BIT_1 | BIT_2;
1036 
1037 		ret = qlt_mailbox_command(qlt, mcp);
1038 		if (ret != QLT_SUCCESS) {
1039 			EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1040 			qlt_free_mailbox_command(qlt, mcp);
1041 			break;
1042 		}
1043 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1044 
1045 		p = mcp->dbuf->db_sglist[0].seg_addr;
1046 		link_status = (fct_port_link_status_t *)buf;
1047 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1048 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1049 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1050 		link_status->PrimitiveSeqProtocolErrorCount =
1051 		    LE_32(*((uint32_t *)(p + 12)));
1052 		link_status->InvalidTransmissionWordCount =
1053 		    LE_32(*((uint32_t *)(p + 16)));
1054 		link_status->InvalidCRCCount =
1055 		    LE_32(*((uint32_t *)(p + 20)));
1056 
1057 		qlt_free_mailbox_command(qlt, mcp);
1058 		break;
1059 	default:
1060 		EL(qlt, "Unknown cmd=%xh\n", cmd);
1061 		ret = FCT_FAILURE;
1062 		break;
1063 	}
1064 	return (ret);
1065 }
1066 
1067 fct_status_t
1068 qlt_port_start(caddr_t arg)
1069 {
1070 	qlt_state_t *qlt = (qlt_state_t *)arg;
1071 	fct_local_port_t *port;
1072 	fct_dbuf_store_t *fds;
1073 	fct_status_t ret;
1074 
1075 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1076 		return (FCT_FAILURE);
1077 	}
1078 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1079 	if (port == NULL) {
1080 		goto qlt_pstart_fail_1;
1081 	}
1082 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1083 	if (fds == NULL) {
1084 		goto qlt_pstart_fail_2;
1085 	}
1086 	qlt->qlt_port = port;
1087 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
1088 	fds->fds_free_data_buf = qlt_dmem_free;
1089 	fds->fds_fca_private = (void *)qlt;
1090 	/*
1091 	 * Since we keep everything in the state struct and dont allocate any
1092 	 * port private area, just use that pointer to point to the
1093 	 * state struct.
1094 	 */
1095 	port->port_fca_private = qlt;
1096 	port->port_fca_version = FCT_FCA_MODREV_1;
1097 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
1098 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1099 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1100 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1101 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1102 	port->port_default_alias = qlt->qlt_port_alias;
1103 	port->port_pp = qlt_pp;
1104 	port->port_fds = fds;
1105 	port->port_max_logins = QLT_MAX_LOGINS;
1106 	port->port_max_xchges = QLT_MAX_XCHGES;
1107 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1108 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1109 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1110 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1111 	port->port_get_link_info = qlt_get_link_info;
1112 	port->port_register_remote_port = qlt_register_remote_port;
1113 	port->port_deregister_remote_port = qlt_deregister_remote_port;
1114 	port->port_send_cmd = qlt_send_cmd;
1115 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1116 	port->port_send_cmd_response = qlt_send_cmd_response;
1117 	port->port_abort_cmd = qlt_abort_cmd;
1118 	port->port_ctl = qlt_ctl;
1119 	port->port_flogi_xchg = qlt_do_flogi;
1120 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1121 	port->port_info = qlt_info;
1122 
1123 	if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1124 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1125 		goto qlt_pstart_fail_2_5;
1126 	}
1127 
1128 	return (QLT_SUCCESS);
1129 #if 0
1130 qlt_pstart_fail_3:
1131 	(void) fct_deregister_local_port(port);
1132 #endif
1133 qlt_pstart_fail_2_5:
1134 	fct_free(fds);
1135 qlt_pstart_fail_2:
1136 	fct_free(port);
1137 	qlt->qlt_port = NULL;
1138 qlt_pstart_fail_1:
1139 	qlt_dmem_fini(qlt);
1140 	return (QLT_FAILURE);
1141 }
1142 
1143 fct_status_t
1144 qlt_port_stop(caddr_t arg)
1145 {
1146 	qlt_state_t *qlt = (qlt_state_t *)arg;
1147 	fct_status_t ret;
1148 
1149 	if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1150 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1151 		return (QLT_FAILURE);
1152 	}
1153 	fct_free(qlt->qlt_port->port_fds);
1154 	fct_free(qlt->qlt_port);
1155 	qlt->qlt_port = NULL;
1156 	qlt_dmem_fini(qlt);
1157 	return (QLT_SUCCESS);
1158 }
1159 
1160 /*
1161  * Called by framework to init the HBA.
1162  * Can be called in the middle of I/O. (Why ??)
1163  * Should make sure sane state both before and after the initialization
1164  */
1165 fct_status_t
1166 qlt_port_online(qlt_state_t *qlt)
1167 {
1168 	uint64_t	da;
1169 	int		instance;
1170 	fct_status_t	ret;
1171 	uint16_t	rcount;
1172 	caddr_t		icb;
1173 	mbox_cmd_t	*mcp;
1174 	uint8_t		*elsbmp;
1175 
1176 	instance = ddi_get_instance(qlt->dip);
1177 
1178 	/* XXX Make sure a sane state */
1179 
1180 	if ((ret = qlt_reset_chip_and_download_fw(qlt, 0)) != QLT_SUCCESS) {
1181 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1182 		return (ret);
1183 	}
1184 
1185 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1186 
1187 	/* Get resource count */
1188 	REG_WR16(qlt, REG_MBOX(0), 0x42);
1189 	ret = qlt_raw_mailbox_command(qlt);
1190 	rcount = REG_RD16(qlt, REG_MBOX(3));
1191 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1192 	if (ret != QLT_SUCCESS) {
1193 		EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1194 		return (ret);
1195 	}
1196 
1197 	/* Enable PUREX */
1198 	REG_WR16(qlt, REG_MBOX(0), 0x38);
1199 	REG_WR16(qlt, REG_MBOX(1), 0x0400);
1200 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1201 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1202 	ret = qlt_raw_mailbox_command(qlt);
1203 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1204 	if (ret != QLT_SUCCESS) {
1205 		EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1206 		cmn_err(CE_NOTE, "Enable PUREX failed");
1207 		return (ret);
1208 	}
1209 
1210 	/* Pass ELS bitmap to fw */
1211 	REG_WR16(qlt, REG_MBOX(0), 0x59);
1212 	REG_WR16(qlt, REG_MBOX(1), 0x0500);
1213 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1214 	bzero(elsbmp, 32);
1215 	da = qlt->queue_mem_cookie.dmac_laddress;
1216 	da += MBOX_DMA_MEM_OFFSET;
1217 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
1218 	da >>= 16;
1219 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
1220 	da >>= 16;
1221 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
1222 	da >>= 16;
1223 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
1224 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1225 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1226 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1227 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1228 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1229 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1230 	SETELSBIT(elsbmp, ELS_OP_SCN);
1231 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1232 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1233 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1234 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1235 	SETELSBIT(elsbmp, ELS_OP_RNID);
1236 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1237 	    DDI_DMA_SYNC_FORDEV);
1238 	ret = qlt_raw_mailbox_command(qlt);
1239 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
1240 	if (ret != QLT_SUCCESS) {
1241 		EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1242 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1243 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1244 		    elsbmp[1]);
1245 		return (ret);
1246 	}
1247 
1248 	/* Init queue pointers */
1249 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1250 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1251 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1252 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1253 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1254 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1255 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1256 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1257 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1258 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1259 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1260 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1261 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1262 
1263 	/*
1264 	 * XXX support for tunables. Also should we cache icb ?
1265 	 */
1266 	if (qlt->qlt_81xx_chip) {
1267 	    /* allocate extra 64 bytes for Extended init control block */
1268 		mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1269 	} else {
1270 		mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1271 	}
1272 	if (mcp == NULL) {
1273 		EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1274 		return (STMF_ALLOC_FAILURE);
1275 	}
1276 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1277 	if (qlt->qlt_81xx_chip) {
1278 		bzero(icb, 0xC0);
1279 	} else {
1280 		bzero(icb, 0x80);
1281 	}
1282 	da = qlt->queue_mem_cookie.dmac_laddress;
1283 	DMEM_WR16(qlt, icb, 1);		/* Version */
1284 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1285 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1286 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1287 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1288 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1289 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1290 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1291 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1292 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1293 	if (!qlt->qlt_81xx_chip) {
1294 		DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1295 	}
1296 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1297 	DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1298 	DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1299 	DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1300 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1301 	DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1302 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1303 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1304 	if (qlt->qlt_81xx_chip) {
1305 		qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1306 
1307 		DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1308 		DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1309 		DMEM_WR32(qlt, icb+0x70,
1310 		    qlt81nvr->enode_mac[0] |
1311 		    (qlt81nvr->enode_mac[1] << 8) |
1312 		    (qlt81nvr->enode_mac[2] << 16) |
1313 		    (qlt81nvr->enode_mac[3] << 24));
1314 		DMEM_WR16(qlt, icb+0x74,
1315 		    qlt81nvr->enode_mac[4] |
1316 		    (qlt81nvr->enode_mac[5] << 8));
1317 	} else {
1318 		DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1319 		    BIT_2 | BIT_1 | BIT_0);
1320 		DMEM_WR32(qlt, icb+0x60, BIT_5);
1321 		DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1322 		    BIT_4);
1323 	}
1324 
1325 	if (qlt->qlt_81xx_chip) {
1326 		qlt_dmem_bctl_t		*bctl;
1327 		uint32_t		index;
1328 		caddr_t			src;
1329 		caddr_t			dst;
1330 		qlt_nvram_81xx_t	*qlt81nvr;
1331 
1332 		dst = icb+0x80;
1333 		qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1334 		src = (caddr_t)&qlt81nvr->ext_blk;
1335 		index = sizeof (qlt_ext_icb_81xx_t);
1336 
1337 		/* Use defaults for cases where we find nothing in NVR */
1338 		if (*src == 0) {
1339 			EL(qlt, "nvram eicb=null\n");
1340 			cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1341 			    instance);
1342 			qlt81nvr->ext_blk.version[0] = 1;
1343 /*
1344  * not yet, for !FIP firmware at least
1345  *
1346  *                qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1347  */
1348 #ifdef _LITTLE_ENDIAN
1349 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1350 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1351 #else
1352 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1353 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1354 #endif
1355 		}
1356 
1357 		while (index--) {
1358 			*dst++ = *src++;
1359 		}
1360 
1361 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1362 		da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1363 
1364 		mcp->to_fw[11] = (uint16_t)(da & 0xffff);
1365 		da >>= 16;
1366 		mcp->to_fw[10] = (uint16_t)(da & 0xffff);
1367 		da >>= 16;
1368 		mcp->to_fw[13] = (uint16_t)(da & 0xffff);
1369 		da >>= 16;
1370 		mcp->to_fw[12] = (uint16_t)(da & 0xffff);
1371 		mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1372 		    0xffff);
1373 
1374 		/* eicb enable */
1375 		mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1376 		mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1377 		    BIT_1;
1378 	}
1379 
1380 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1381 	mcp->to_fw[0] = 0x60;
1382 
1383 	/*
1384 	 * This is the 1st command after adapter initialize which will
1385 	 * use interrupts and regular mailbox interface.
1386 	 */
1387 	qlt->mbox_io_state = MBOX_STATE_READY;
1388 	qlt_enable_intr(qlt);
1389 	qlt->qlt_intr_enabled = 1;
1390 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1391 	/* Issue mailbox to firmware */
1392 	ret = qlt_mailbox_command(qlt, mcp);
1393 	if (ret != QLT_SUCCESS) {
1394 		EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1395 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1396 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1397 	}
1398 
1399 	mcp->to_fw_mask = BIT_0;
1400 	mcp->from_fw_mask = BIT_0 | BIT_1;
1401 	mcp->to_fw[0] = 0x28;
1402 	ret = qlt_mailbox_command(qlt, mcp);
1403 	if (ret != QLT_SUCCESS) {
1404 		EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1405 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1406 		    (long long)ret);
1407 	}
1408 
1409 	/*
1410 	 * Report FW versions for 81xx - MPI rev is useful
1411 	 */
1412 	if (qlt->qlt_81xx_chip) {
1413 		mcp->to_fw_mask = BIT_0;
1414 		mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1415 		    BIT_0;
1416 		mcp->to_fw[0] = 0x8;
1417 		ret = qlt_mailbox_command(qlt, mcp);
1418 		if (ret != QLT_SUCCESS) {
1419 			EL(qlt, "about fw failed: %llx\n", (long long)ret);
1420 		} else {
1421 			EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1422 			    mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1423 			    mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1424 			    mcp->from_fw[11] & 0xff);
1425 		}
1426 	}
1427 
1428 	qlt_free_mailbox_command(qlt, mcp);
1429 	if (ret != QLT_SUCCESS)
1430 		return (ret);
1431 	return (FCT_SUCCESS);
1432 }
1433 
1434 fct_status_t
1435 qlt_port_offline(qlt_state_t *qlt)
1436 {
1437 	int		retries;
1438 
1439 	mutex_enter(&qlt->mbox_lock);
1440 
1441 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1442 		mutex_exit(&qlt->mbox_lock);
1443 		goto poff_mbox_done;
1444 	}
1445 
1446 	/* Wait to grab the mailboxes */
1447 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1448 	    retries++) {
1449 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1450 		if ((retries > 5) ||
1451 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1452 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1453 			mutex_exit(&qlt->mbox_lock);
1454 			goto poff_mbox_done;
1455 		}
1456 	}
1457 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1458 	mutex_exit(&qlt->mbox_lock);
1459 poff_mbox_done:;
1460 	qlt->intr_sneak_counter = 10;
1461 	qlt_disable_intr(qlt);
1462 	mutex_enter(&qlt->intr_lock);
1463 	qlt->qlt_intr_enabled = 0;
1464 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
1465 	drv_usecwait(20);
1466 	qlt->intr_sneak_counter = 0;
1467 	mutex_exit(&qlt->intr_lock);
1468 
1469 	return (FCT_SUCCESS);
1470 }
1471 
1472 static fct_status_t
1473 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1474 {
1475 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1476 	mbox_cmd_t *mcp;
1477 	fct_status_t fc_ret;
1478 	fct_status_t ret;
1479 	clock_t et;
1480 
1481 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1482 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1483 link_info_retry:
1484 	mcp->to_fw[0] = 0x20;
1485 	mcp->to_fw[9] = 0;
1486 	mcp->to_fw_mask |= BIT_0 | BIT_9;
1487 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1488 	/* Issue mailbox to firmware */
1489 	ret = qlt_mailbox_command(qlt, mcp);
1490 	if (ret != QLT_SUCCESS) {
1491 		EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1492 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1493 			/* Firmware is not ready */
1494 			if (ddi_get_lbolt() < et) {
1495 				delay(drv_usectohz(50000));
1496 				goto link_info_retry;
1497 			}
1498 		}
1499 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1500 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1501 		fc_ret = FCT_FAILURE;
1502 	} else {
1503 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1504 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1505 
1506 		li->port_speed = qlt->link_speed;
1507 		switch (mcp->from_fw[6]) {
1508 		case 1:
1509 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1510 			li->port_fca_flogi_done = 1;
1511 			break;
1512 		case 0:
1513 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1514 			li->port_no_fct_flogi = 1;
1515 			break;
1516 		case 3:
1517 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1518 			li->port_fca_flogi_done = 1;
1519 			break;
1520 		case 2: /*FALLTHROUGH*/
1521 		case 4:
1522 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1523 			li->port_fca_flogi_done = 1;
1524 			break;
1525 		default:
1526 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1527 			EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1528 		}
1529 		qlt->cur_topology = li->port_topology;
1530 		fc_ret = FCT_SUCCESS;
1531 	}
1532 	qlt_free_mailbox_command(qlt, mcp);
1533 
1534 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1535 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1536 		mcp->to_fw[0] = 0x64;
1537 		mcp->to_fw[1] = 0x7FE;
1538 		mcp->to_fw[9] = 0;
1539 		mcp->to_fw[10] = 0;
1540 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1541 		fc_ret = qlt_mailbox_command(qlt, mcp);
1542 		if (fc_ret != QLT_SUCCESS) {
1543 			EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1544 			    fc_ret);
1545 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1546 			    "database for F_port failed, ret = %llx", fc_ret);
1547 		} else {
1548 			uint8_t *p;
1549 
1550 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1551 			p = mcp->dbuf->db_sglist[0].seg_addr;
1552 			bcopy(p + 0x18, li->port_rpwwn, 8);
1553 			bcopy(p + 0x20, li->port_rnwwn, 8);
1554 		}
1555 		qlt_free_mailbox_command(qlt, mcp);
1556 	}
1557 	return (fc_ret);
1558 }
1559 
1560 static int
1561 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1562 {
1563 	int		instance;
1564 	qlt_state_t	*qlt;
1565 
1566 	if (otype != OTYP_CHR) {
1567 		return (EINVAL);
1568 	}
1569 
1570 	/*
1571 	 * Since this is for debugging only, only allow root to issue ioctl now
1572 	 */
1573 	if (drv_priv(credp)) {
1574 		return (EPERM);
1575 	}
1576 
1577 	instance = (int)getminor(*devp);
1578 	qlt = ddi_get_soft_state(qlt_state, instance);
1579 	if (qlt == NULL) {
1580 		return (ENXIO);
1581 	}
1582 
1583 	mutex_enter(&qlt->qlt_ioctl_lock);
1584 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1585 		/*
1586 		 * It is already open for exclusive access.
1587 		 * So shut the door on this caller.
1588 		 */
1589 		mutex_exit(&qlt->qlt_ioctl_lock);
1590 		return (EBUSY);
1591 	}
1592 
1593 	if (flag & FEXCL) {
1594 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1595 			/*
1596 			 * Exclusive operation not possible
1597 			 * as it is already opened
1598 			 */
1599 			mutex_exit(&qlt->qlt_ioctl_lock);
1600 			return (EBUSY);
1601 		}
1602 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1603 	}
1604 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1605 	mutex_exit(&qlt->qlt_ioctl_lock);
1606 
1607 	return (0);
1608 }
1609 
1610 /* ARGSUSED */
1611 static int
1612 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1613 {
1614 	int		instance;
1615 	qlt_state_t	*qlt;
1616 
1617 	if (otype != OTYP_CHR) {
1618 		return (EINVAL);
1619 	}
1620 
1621 	instance = (int)getminor(dev);
1622 	qlt = ddi_get_soft_state(qlt_state, instance);
1623 	if (qlt == NULL) {
1624 		return (ENXIO);
1625 	}
1626 
1627 	mutex_enter(&qlt->qlt_ioctl_lock);
1628 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1629 		mutex_exit(&qlt->qlt_ioctl_lock);
1630 		return (ENODEV);
1631 	}
1632 
1633 	/*
1634 	 * It looks there's one hole here, maybe there could several concurrent
1635 	 * shareed open session, but we never check this case.
1636 	 * But it will not hurt too much, disregard it now.
1637 	 */
1638 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1639 	mutex_exit(&qlt->qlt_ioctl_lock);
1640 
1641 	return (0);
1642 }
1643 
1644 /*
1645  * All of these ioctls are unstable interfaces which are meant to be used
1646  * in a controlled lab env. No formal testing will be (or needs to be) done
1647  * for these ioctls. Specially note that running with an additional
1648  * uploaded firmware is not supported and is provided here for test
1649  * purposes only.
1650  */
1651 /* ARGSUSED */
1652 static int
1653 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1654     cred_t *credp, int *rval)
1655 {
1656 	qlt_state_t	*qlt;
1657 	int		ret = 0;
1658 #ifdef _LITTLE_ENDIAN
1659 	int		i;
1660 #endif
1661 	stmf_iocdata_t	*iocd;
1662 	void		*ibuf = NULL;
1663 	void		*obuf = NULL;
1664 	uint32_t	*intp;
1665 	qlt_fw_info_t	*fwi;
1666 	mbox_cmd_t	*mcp;
1667 	fct_status_t	st;
1668 	char		info[80];
1669 	fct_status_t	ret2;
1670 
1671 	if (drv_priv(credp) != 0)
1672 		return (EPERM);
1673 
1674 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1675 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1676 	if (ret)
1677 		return (ret);
1678 	iocd->stmf_error = 0;
1679 
1680 	switch (cmd) {
1681 	case QLT_IOCTL_FETCH_FWDUMP:
1682 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1683 			EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1684 			    iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1685 			ret = EINVAL;
1686 			break;
1687 		}
1688 		mutex_enter(&qlt->qlt_ioctl_lock);
1689 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1690 			mutex_exit(&qlt->qlt_ioctl_lock);
1691 			ret = ENODATA;
1692 			EL(qlt, "no fwdump\n");
1693 			iocd->stmf_error = QLTIO_NO_DUMP;
1694 			break;
1695 		}
1696 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1697 			mutex_exit(&qlt->qlt_ioctl_lock);
1698 			ret = EBUSY;
1699 			EL(qlt, "fwdump inprogress\n");
1700 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1701 			break;
1702 		}
1703 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1704 			mutex_exit(&qlt->qlt_ioctl_lock);
1705 			ret = EEXIST;
1706 			EL(qlt, "fwdump already fetched\n");
1707 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1708 			break;
1709 		}
1710 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1711 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1712 		mutex_exit(&qlt->qlt_ioctl_lock);
1713 
1714 		break;
1715 
1716 	case QLT_IOCTL_TRIGGER_FWDUMP:
1717 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1718 			ret = EACCES;
1719 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1720 			break;
1721 		}
1722 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1723 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1724 		info[79] = 0;
1725 		if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1726 		    STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1727 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1728 			EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1729 			    "%llxh\n", ret2);
1730 			ret = EIO;
1731 		}
1732 		break;
1733 	case QLT_IOCTL_UPLOAD_FW:
1734 		if ((iocd->stmf_ibuf_size < 1024) ||
1735 		    (iocd->stmf_ibuf_size & 3)) {
1736 			EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1737 			    iocd->stmf_ibuf_size);
1738 			ret = EINVAL;
1739 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1740 			break;
1741 		}
1742 		intp = (uint32_t *)ibuf;
1743 #ifdef _LITTLE_ENDIAN
1744 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1745 			intp[i] = BSWAP_32(intp[i]);
1746 		}
1747 #endif
1748 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1749 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1750 		    iocd->stmf_ibuf_size)) {
1751 			EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1752 			    iocd->stmf_ibuf_size);
1753 			ret = EINVAL;
1754 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1755 			break;
1756 		}
1757 		if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1758 		    (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1759 		    (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1760 		    ((intp[8] & 3) == 0))) {
1761 			EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1762 			ret = EACCES;
1763 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1764 			break;
1765 		}
1766 
1767 		/* Everything looks ok, lets copy this firmware */
1768 		if (qlt->fw_code01) {
1769 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1770 			    qlt->fw_length02) << 2);
1771 			qlt->fw_code01 = NULL;
1772 		} else {
1773 			atomic_add_32(&qlt_loaded_counter, 1);
1774 		}
1775 		qlt->fw_length01 = intp[3];
1776 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1777 		    KM_SLEEP);
1778 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1779 		qlt->fw_addr01 = intp[2];
1780 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1781 		qlt->fw_addr02 = qlt->fw_code02[2];
1782 		qlt->fw_length02 = qlt->fw_code02[3];
1783 		break;
1784 
1785 	case QLT_IOCTL_CLEAR_FW:
1786 		if (qlt->fw_code01) {
1787 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1788 			    qlt->fw_length02) << 2);
1789 			qlt->fw_code01 = NULL;
1790 			atomic_add_32(&qlt_loaded_counter, -1);
1791 		}
1792 		break;
1793 
1794 	case QLT_IOCTL_GET_FW_INFO:
1795 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1796 			EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1797 			    iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1798 			ret = EINVAL;
1799 			break;
1800 		}
1801 		fwi = (qlt_fw_info_t *)obuf;
1802 		if (qlt->qlt_stay_offline) {
1803 			fwi->fwi_stay_offline = 1;
1804 		}
1805 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1806 			fwi->fwi_port_active = 1;
1807 		}
1808 		fwi->fwi_active_major = qlt->fw_major;
1809 		fwi->fwi_active_minor = qlt->fw_minor;
1810 		fwi->fwi_active_subminor = qlt->fw_subminor;
1811 		fwi->fwi_active_attr = qlt->fw_attr;
1812 		if (qlt->fw_code01) {
1813 			fwi->fwi_fw_uploaded = 1;
1814 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1815 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1816 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1817 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1818 		}
1819 		if (qlt->qlt_81xx_chip) {
1820 			fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1821 			fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1822 			fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1823 			fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1824 		} else if (qlt->qlt_25xx_chip) {
1825 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1826 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1827 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1828 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1829 		} else {
1830 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1831 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1832 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1833 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1834 		}
1835 		break;
1836 
1837 	case QLT_IOCTL_STAY_OFFLINE:
1838 		if (!iocd->stmf_ibuf_size) {
1839 			EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1840 			    iocd->stmf_ibuf_size);
1841 			ret = EINVAL;
1842 			break;
1843 		}
1844 		if (*((char *)ibuf)) {
1845 			qlt->qlt_stay_offline = 1;
1846 		} else {
1847 			qlt->qlt_stay_offline = 0;
1848 		}
1849 		break;
1850 
1851 	case QLT_IOCTL_MBOX:
1852 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1853 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1854 			EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1855 			    iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1856 			ret = EINVAL;
1857 			break;
1858 		}
1859 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1860 		if (mcp == NULL) {
1861 			EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1862 			ret = ENOMEM;
1863 			break;
1864 		}
1865 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1866 		st = qlt_mailbox_command(qlt, mcp);
1867 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1868 		qlt_free_mailbox_command(qlt, mcp);
1869 		if (st != QLT_SUCCESS) {
1870 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1871 				st = QLT_SUCCESS;
1872 		}
1873 		if (st != QLT_SUCCESS) {
1874 			EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1875 			ret = EIO;
1876 			switch (st) {
1877 			case QLT_MBOX_NOT_INITIALIZED:
1878 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1879 				break;
1880 			case QLT_MBOX_BUSY:
1881 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1882 				break;
1883 			case QLT_MBOX_TIMEOUT:
1884 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1885 				break;
1886 			case QLT_MBOX_ABORTED:
1887 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1888 				break;
1889 			}
1890 		}
1891 		break;
1892 
1893 	case QLT_IOCTL_ELOG:
1894 		qlt_dump_el_trace_buffer(qlt);
1895 		break;
1896 
1897 	default:
1898 		EL(qlt, "Unknown ioctl-%xh\n", cmd);
1899 		ret = ENOTTY;
1900 	}
1901 
1902 	if (ret == 0) {
1903 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1904 	} else if (iocd->stmf_error) {
1905 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1906 	}
1907 	if (obuf) {
1908 		kmem_free(obuf, iocd->stmf_obuf_size);
1909 		obuf = NULL;
1910 	}
1911 	if (ibuf) {
1912 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1913 		ibuf = NULL;
1914 	}
1915 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1916 	return (ret);
1917 }
1918 
1919 static void
1920 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1921 {
1922 	stmf_change_status_t		st;
1923 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1924 	qlt_state_t			*qlt;
1925 	fct_status_t			ret;
1926 
1927 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1928 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1929 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1930 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1931 
1932 	qlt = (qlt_state_t *)port->port_fca_private;
1933 	st.st_completion_status = FCT_SUCCESS;
1934 	st.st_additional_info = NULL;
1935 
1936 	switch (cmd) {
1937 	case FCT_CMD_PORT_ONLINE:
1938 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1939 			st.st_completion_status = STMF_ALREADY;
1940 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1941 			st.st_completion_status = FCT_FAILURE;
1942 		if (st.st_completion_status == FCT_SUCCESS) {
1943 			qlt->qlt_state = FCT_STATE_ONLINING;
1944 			qlt->qlt_state_not_acked = 1;
1945 			st.st_completion_status = qlt_port_online(qlt);
1946 			if (st.st_completion_status != STMF_SUCCESS) {
1947 				EL(qlt, "PORT_ONLINE status=%xh\n",
1948 				    st.st_completion_status);
1949 				qlt->qlt_state = FCT_STATE_OFFLINE;
1950 				qlt->qlt_state_not_acked = 0;
1951 			} else {
1952 				qlt->qlt_state = FCT_STATE_ONLINE;
1953 			}
1954 		}
1955 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
1956 		qlt->qlt_change_state_flags = 0;
1957 		break;
1958 
1959 	case FCT_CMD_PORT_OFFLINE:
1960 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
1961 			st.st_completion_status = STMF_ALREADY;
1962 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
1963 			st.st_completion_status = FCT_FAILURE;
1964 		}
1965 		if (st.st_completion_status == FCT_SUCCESS) {
1966 			qlt->qlt_state = FCT_STATE_OFFLINING;
1967 			qlt->qlt_state_not_acked = 1;
1968 
1969 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
1970 				(void) qlt_firmware_dump(port, ssci);
1971 			}
1972 			qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
1973 			st.st_completion_status = qlt_port_offline(qlt);
1974 			if (st.st_completion_status != STMF_SUCCESS) {
1975 				EL(qlt, "PORT_OFFLINE status=%xh\n",
1976 				    st.st_completion_status);
1977 				qlt->qlt_state = FCT_STATE_ONLINE;
1978 				qlt->qlt_state_not_acked = 0;
1979 			} else {
1980 				qlt->qlt_state = FCT_STATE_OFFLINE;
1981 			}
1982 		}
1983 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
1984 		break;
1985 
1986 	case FCT_ACK_PORT_ONLINE_COMPLETE:
1987 		qlt->qlt_state_not_acked = 0;
1988 		break;
1989 
1990 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
1991 		qlt->qlt_state_not_acked = 0;
1992 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
1993 		    (qlt->qlt_stay_offline == 0)) {
1994 			if ((ret = fct_port_initialize(port,
1995 			    qlt->qlt_change_state_flags,
1996 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
1997 			    "with RLFLAG_RESET")) != FCT_SUCCESS) {
1998 				EL(qlt, "fct_port_initialize status=%llxh\n",
1999 				    ret);
2000 				cmn_err(CE_WARN, "qlt_ctl: "
2001 				    "fct_port_initialize failed, please use "
2002 				    "stmfstate to start the port-%s manualy",
2003 				    qlt->qlt_port_alias);
2004 			}
2005 		}
2006 		break;
2007 	}
2008 }
2009 
2010 /* ARGSUSED */
2011 static fct_status_t
2012 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2013 {
2014 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
2015 
2016 	EL(qlt, "FLOGI requested not supported\n");
2017 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2018 	return (FCT_FAILURE);
2019 }
2020 
2021 /*
2022  * Return a pointer to n entries in the request queue. Assumes that
2023  * request queue lock is held. Does a very short busy wait if
2024  * less/zero entries are available. Retuns NULL if it still cannot
2025  * fullfill the request.
2026  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2027  */
2028 caddr_t
2029 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
2030 {
2031 	int try = 0;
2032 
2033 	while (qlt->req_available < n) {
2034 		uint32_t val1, val2, val3;
2035 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2036 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2037 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2038 		if ((val1 != val2) || (val2 != val3))
2039 			continue;
2040 
2041 		qlt->req_ndx_from_fw = val1;
2042 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2043 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2044 		    (REQUEST_QUEUE_ENTRIES - 1));
2045 		if (qlt->req_available < n) {
2046 			if (try < 2) {
2047 				drv_usecwait(100);
2048 				try++;
2049 				continue;
2050 			} else {
2051 				stmf_trace(qlt->qlt_port_alias,
2052 				    "Req Q is full");
2053 				return (NULL);
2054 			}
2055 		}
2056 		break;
2057 	}
2058 	/* We dont change anything until the entries are sumitted */
2059 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2060 }
2061 
2062 /*
2063  * updates the req in ptr to fw. Assumes that req lock is held.
2064  */
2065 void
2066 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2067 {
2068 	ASSERT(n >= 1);
2069 	qlt->req_ndx_to_fw += n;
2070 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2071 	qlt->req_available -= n;
2072 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2073 }
2074 
2075 
2076 /*
2077  * Return a pointer to n entries in the priority request queue. Assumes that
2078  * priority request queue lock is held. Does a very short busy wait if
2079  * less/zero entries are available. Retuns NULL if it still cannot
2080  * fullfill the request.
2081  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2082  */
2083 caddr_t
2084 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2085 {
2086 	int try = 0;
2087 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2088 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2089 	    (PRIORITY_QUEUE_ENTRIES - 1));
2090 
2091 	while (req_available < n) {
2092 		uint32_t val1, val2, val3;
2093 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2094 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2095 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2096 		if ((val1 != val2) || (val2 != val3))
2097 			continue;
2098 
2099 		qlt->preq_ndx_from_fw = val1;
2100 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2101 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2102 		    (PRIORITY_QUEUE_ENTRIES - 1));
2103 		if (req_available < n) {
2104 			if (try < 2) {
2105 				drv_usecwait(100);
2106 				try++;
2107 				continue;
2108 			} else {
2109 				return (NULL);
2110 			}
2111 		}
2112 		break;
2113 	}
2114 	/* We dont change anything until the entries are sumitted */
2115 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2116 }
2117 
2118 /*
2119  * updates the req in ptr to fw. Assumes that req lock is held.
2120  */
2121 void
2122 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2123 {
2124 	ASSERT(n >= 1);
2125 	qlt->preq_ndx_to_fw += n;
2126 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2127 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2128 }
2129 
2130 /*
2131  * - Should not be called from Interrupt.
2132  * - A very hardware specific function. Does not touch driver state.
2133  * - Assumes that interrupts are disabled or not there.
2134  * - Expects that the caller makes sure that all activity has stopped
2135  *   and its ok now to go ahead and reset the chip. Also the caller
2136  *   takes care of post reset damage control.
2137  * - called by initialize adapter() and dump_fw(for reset only).
2138  * - During attach() nothing much is happening and during initialize_adapter()
2139  *   the function (caller) does all the housekeeping so that this function
2140  *   can execute in peace.
2141  * - Returns 0 on success.
2142  */
2143 static fct_status_t
2144 qlt_reset_chip_and_download_fw(qlt_state_t *qlt, int reset_only)
2145 {
2146 	int cntr;
2147 	uint32_t start_addr;
2148 	fct_status_t ret;
2149 
2150 	EL(qlt, "initiated, flags=%xh\n", reset_only);
2151 
2152 	/* XXX: Switch off LEDs */
2153 
2154 	/* Disable Interrupts */
2155 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2156 	(void) REG_RD32(qlt, REG_INTR_CTRL);
2157 	/* Stop DMA */
2158 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2159 
2160 	/* Wait for DMA to be stopped */
2161 	cntr = 0;
2162 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2163 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2164 		cntr++;
2165 		/* 3 sec should be more than enough */
2166 		if (cntr == 300)
2167 			return (QLT_DMA_STUCK);
2168 	}
2169 
2170 	/* Reset the Chip */
2171 	REG_WR32(qlt, REG_CTRL_STATUS,
2172 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2173 
2174 	qlt->qlt_link_up = 0;
2175 
2176 	drv_usecwait(100);
2177 
2178 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2179 	cntr = 0;
2180 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2181 		delay(drv_usectohz(10000));
2182 		cntr++;
2183 		/* 3 sec should be more than enough */
2184 		if (cntr == 300)
2185 			return (QLT_ROM_STUCK);
2186 	}
2187 	/* Disable Interrupts (Probably not needed) */
2188 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2189 	if (reset_only)
2190 		return (QLT_SUCCESS);
2191 
2192 	/* Load the two segments */
2193 	if (qlt->fw_code01 != NULL) {
2194 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2195 		    qlt->fw_addr01);
2196 		if (ret == QLT_SUCCESS) {
2197 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2198 			    qlt->fw_length02, qlt->fw_addr02);
2199 		}
2200 		start_addr = qlt->fw_addr01;
2201 	} else if (qlt->qlt_81xx_chip) {
2202 		ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2203 		    fw8100_addr01);
2204 		if (ret == QLT_SUCCESS) {
2205 			ret = qlt_load_risc_ram(qlt, fw8100_code02,
2206 			    fw8100_length02, fw8100_addr02);
2207 		}
2208 		start_addr = fw8100_addr01;
2209 	} else if (qlt->qlt_25xx_chip) {
2210 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2211 		    fw2500_addr01);
2212 		if (ret == QLT_SUCCESS) {
2213 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
2214 			    fw2500_length02, fw2500_addr02);
2215 		}
2216 		start_addr = fw2500_addr01;
2217 	} else {
2218 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2219 		    fw2400_addr01);
2220 		if (ret == QLT_SUCCESS) {
2221 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
2222 			    fw2400_length02, fw2400_addr02);
2223 		}
2224 		start_addr = fw2400_addr01;
2225 	}
2226 	if (ret != QLT_SUCCESS) {
2227 		EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2228 		return (ret);
2229 	}
2230 
2231 	/* Verify Checksum */
2232 	REG_WR16(qlt, REG_MBOX(0), 7);
2233 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
2234 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
2235 	ret = qlt_raw_mailbox_command(qlt);
2236 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2237 	if (ret != QLT_SUCCESS) {
2238 		EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2239 		return (ret);
2240 	}
2241 
2242 	/* Execute firmware */
2243 	REG_WR16(qlt, REG_MBOX(0), 2);
2244 	REG_WR16(qlt, REG_MBOX(1), (start_addr >> 16) & 0xffff);
2245 	REG_WR16(qlt, REG_MBOX(2),  start_addr & 0xffff);
2246 	REG_WR16(qlt, REG_MBOX(3), 0);
2247 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
2248 	ret = qlt_raw_mailbox_command(qlt);
2249 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2250 	if (ret != QLT_SUCCESS) {
2251 		EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2252 		return (ret);
2253 	}
2254 
2255 	/* Get revisions (About Firmware) */
2256 	REG_WR16(qlt, REG_MBOX(0), 8);
2257 	ret = qlt_raw_mailbox_command(qlt);
2258 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2259 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2260 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2261 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2262 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2263 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2264 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2265 	if (ret != QLT_SUCCESS) {
2266 		EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2267 		return (ret);
2268 	}
2269 
2270 	return (QLT_SUCCESS);
2271 }
2272 
2273 /*
2274  * Used only from qlt_reset_chip_and_download_fw().
2275  */
2276 static fct_status_t
2277 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2278 				uint32_t word_count, uint32_t risc_addr)
2279 {
2280 	uint32_t words_sent = 0;
2281 	uint32_t words_being_sent;
2282 	uint32_t *cur_host_addr;
2283 	uint32_t cur_risc_addr;
2284 	uint64_t da;
2285 	fct_status_t ret;
2286 
2287 	while (words_sent < word_count) {
2288 		cur_host_addr = &(host_addr[words_sent]);
2289 		cur_risc_addr = risc_addr + (words_sent << 2);
2290 		words_being_sent = min(word_count - words_sent,
2291 		    TOTAL_DMA_MEM_SIZE >> 2);
2292 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2293 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2294 		    DDI_DEV_AUTOINCR);
2295 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2296 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2297 		da = qlt->queue_mem_cookie.dmac_laddress;
2298 		REG_WR16(qlt, REG_MBOX(0), 0x0B);
2299 		REG_WR16(qlt, REG_MBOX(1), risc_addr & 0xffff);
2300 		REG_WR16(qlt, REG_MBOX(8), ((cur_risc_addr >> 16) & 0xffff));
2301 		REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
2302 		da >>= 16;
2303 		REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
2304 		da >>= 16;
2305 		REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
2306 		da >>= 16;
2307 		REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
2308 		REG_WR16(qlt, REG_MBOX(5), words_being_sent & 0xffff);
2309 		REG_WR16(qlt, REG_MBOX(4), (words_being_sent >> 16) & 0xffff);
2310 		ret = qlt_raw_mailbox_command(qlt);
2311 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2312 		if (ret != QLT_SUCCESS) {
2313 			EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2314 			    ret);
2315 			return (ret);
2316 		}
2317 		words_sent += words_being_sent;
2318 	}
2319 	return (QLT_SUCCESS);
2320 }
2321 
2322 /*
2323  * Not used during normal operation. Only during driver init.
2324  * Assumes that interrupts are disabled and mailboxes are loaded.
2325  * Just triggers the mailbox command an waits for the completion.
2326  * Also expects that There is nothing else going on and we will only
2327  * get back a mailbox completion from firmware.
2328  * ---DOES NOT CLEAR INTERRUPT---
2329  * Used only from the code path originating from
2330  * qlt_reset_chip_and_download_fw()
2331  */
2332 static fct_status_t
2333 qlt_raw_mailbox_command(qlt_state_t *qlt)
2334 {
2335 	int cntr = 0;
2336 	uint32_t status;
2337 
2338 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2339 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_INTR_REQUEST) == 0) {
2340 		cntr++;
2341 		if (cntr == 100)
2342 			return (QLT_MAILBOX_STUCK);
2343 		delay(drv_usectohz(10000));
2344 	}
2345 	status = (REG_RD32(qlt, REG_RISC_STATUS) & 0xff);
2346 	if ((status == 1) || (status == 2) ||
2347 	    (status == 0x10) || (status == 0x11)) {
2348 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2349 		if (mbox0 == 0x4000)
2350 			return (QLT_SUCCESS);
2351 		else
2352 			return (QLT_MBOX_FAILED | mbox0);
2353 	}
2354 	/* This is unexpected, dump a message */
2355 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2356 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
2357 	return (QLT_UNEXPECTED_RESPONSE);
2358 }
2359 
2360 static mbox_cmd_t *
2361 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2362 {
2363 	mbox_cmd_t *mcp;
2364 
2365 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2366 	if (dma_size) {
2367 		qlt_dmem_bctl_t *bctl;
2368 		uint64_t da;
2369 
2370 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2371 		if (mcp->dbuf == NULL) {
2372 			kmem_free(mcp, sizeof (*mcp));
2373 			return (NULL);
2374 		}
2375 		mcp->dbuf->db_data_size = dma_size;
2376 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2377 
2378 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2379 		da = bctl->bctl_dev_addr;
2380 		/* This is the most common initialization of dma ptrs */
2381 		mcp->to_fw[3] = (uint16_t)(da & 0xffff);
2382 		da >>= 16;
2383 		mcp->to_fw[2] = (uint16_t)(da & 0xffff);
2384 		da >>= 16;
2385 		mcp->to_fw[7] = (uint16_t)(da & 0xffff);
2386 		da >>= 16;
2387 		mcp->to_fw[6] = (uint16_t)(da & 0xffff);
2388 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2389 	}
2390 	mcp->to_fw_mask |= BIT_0;
2391 	mcp->from_fw_mask |= BIT_0;
2392 	return (mcp);
2393 }
2394 
2395 void
2396 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2397 {
2398 	if (mcp->dbuf)
2399 		qlt_i_dmem_free(qlt, mcp->dbuf);
2400 	kmem_free(mcp, sizeof (*mcp));
2401 }
2402 
2403 /*
2404  * This can sleep. Should never be called from interrupt context.
2405  */
2406 static fct_status_t
2407 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2408 {
2409 	int	retries;
2410 	int	i;
2411 	char	info[80];
2412 
2413 	if (curthread->t_flag & T_INTR_THREAD) {
2414 		ASSERT(0);
2415 		return (QLT_MBOX_FAILED);
2416 	}
2417 
2418 	mutex_enter(&qlt->mbox_lock);
2419 	/* See if mailboxes are still uninitialized */
2420 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2421 		mutex_exit(&qlt->mbox_lock);
2422 		return (QLT_MBOX_NOT_INITIALIZED);
2423 	}
2424 
2425 	/* Wait to grab the mailboxes */
2426 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2427 	    retries++) {
2428 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2429 		if ((retries > 5) ||
2430 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2431 			mutex_exit(&qlt->mbox_lock);
2432 			return (QLT_MBOX_BUSY);
2433 		}
2434 	}
2435 	/* Make sure we always ask for mailbox 0 */
2436 	mcp->from_fw_mask |= BIT_0;
2437 
2438 	/* Load mailboxes, set state and generate RISC interrupt */
2439 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2440 	qlt->mcp = mcp;
2441 	for (i = 0; i < MAX_MBOXES; i++) {
2442 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2443 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2444 	}
2445 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_HOST_TO_RISC_INTR);
2446 
2447 qlt_mbox_wait_loop:;
2448 	/* Wait for mailbox command completion */
2449 	if (cv_reltimedwait(&qlt->mbox_cv, &qlt->mbox_lock,
2450 	    drv_usectohz(MBOX_TIMEOUT), TR_CLOCK_TICK) < 0) {
2451 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2452 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2453 		info[79] = 0;
2454 		qlt->mcp = NULL;
2455 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2456 		mutex_exit(&qlt->mbox_lock);
2457 
2458 		/*
2459 		 * XXX Throw HBA fatal error event
2460 		 */
2461 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2462 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2463 		return (QLT_MBOX_TIMEOUT);
2464 	}
2465 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2466 		goto qlt_mbox_wait_loop;
2467 
2468 	qlt->mcp = NULL;
2469 
2470 	/* Make sure its a completion */
2471 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2472 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2473 		mutex_exit(&qlt->mbox_lock);
2474 		return (QLT_MBOX_ABORTED);
2475 	}
2476 
2477 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2478 	/* Mailboxes are already loaded by interrupt routine */
2479 	qlt->mbox_io_state = MBOX_STATE_READY;
2480 	mutex_exit(&qlt->mbox_lock);
2481 	if (mcp->from_fw[0] != 0x4000)
2482 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2483 
2484 	return (QLT_SUCCESS);
2485 }
2486 
2487 /*
2488  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2489  */
2490 /* ARGSUSED */
2491 static uint_t
2492 qlt_isr(caddr_t arg, caddr_t arg2)
2493 {
2494 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2495 	uint32_t	risc_status, intr_type;
2496 	int		i;
2497 	int		intr_loop_count;
2498 	char		info[80];
2499 
2500 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2501 	if (!mutex_tryenter(&qlt->intr_lock)) {
2502 		/*
2503 		 * Normally we will always get this lock. If tryenter is
2504 		 * failing then it means that driver is trying to do
2505 		 * some cleanup and is masking the intr but some intr
2506 		 * has sneaked in between. See if our device has generated
2507 		 * this intr. If so then wait a bit and return claimed.
2508 		 * If not then return claimed if this is the 1st instance
2509 		 * of a interrupt after driver has grabbed the lock.
2510 		 */
2511 		if (risc_status & BIT_15) {
2512 			drv_usecwait(10);
2513 			return (DDI_INTR_CLAIMED);
2514 		} else if (qlt->intr_sneak_counter) {
2515 			qlt->intr_sneak_counter--;
2516 			return (DDI_INTR_CLAIMED);
2517 		} else {
2518 			return (DDI_INTR_UNCLAIMED);
2519 		}
2520 	}
2521 	if (((risc_status & BIT_15) == 0) ||
2522 	    (qlt->qlt_intr_enabled == 0)) {
2523 		/*
2524 		 * This might be a pure coincedence that we are operating
2525 		 * in a interrupt disabled mode and another device
2526 		 * sharing the interrupt line has generated an interrupt
2527 		 * while an interrupt from our device might be pending. Just
2528 		 * ignore it and let the code handling the interrupt
2529 		 * disabled mode handle it.
2530 		 */
2531 		mutex_exit(&qlt->intr_lock);
2532 		return (DDI_INTR_UNCLAIMED);
2533 	}
2534 
2535 	/*
2536 	 * XXX take care for MSI case. disable intrs
2537 	 * Its gonna be complicated because of the max iterations.
2538 	 * as hba will have posted the intr which did not go on PCI
2539 	 * but we did not service it either because of max iterations.
2540 	 * Maybe offload the intr on a different thread.
2541 	 */
2542 	intr_loop_count = 0;
2543 
2544 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2545 
2546 intr_again:;
2547 
2548 	/* check for risc pause */
2549 	if (risc_status & BIT_8) {
2550 		EL(qlt, "Risc Pause status=%xh\n", risc_status);
2551 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2552 		    qlt->instance, risc_status);
2553 		(void) snprintf(info, 80, "Risc Pause %08x", risc_status);
2554 		info[79] = 0;
2555 		(void) fct_port_shutdown(qlt->qlt_port,
2556 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2557 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2558 	}
2559 
2560 	/* First check for high performance path */
2561 	intr_type = risc_status & 0xff;
2562 	if (intr_type == 0x1D) {
2563 		qlt->atio_ndx_from_fw = (uint16_t)
2564 		    REG_RD32(qlt, REG_ATIO_IN_PTR);
2565 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2566 		qlt->resp_ndx_from_fw = risc_status >> 16;
2567 		qlt_handle_atio_queue_update(qlt);
2568 		qlt_handle_resp_queue_update(qlt);
2569 	} else if (intr_type == 0x1C) {
2570 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2571 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2572 		qlt_handle_atio_queue_update(qlt);
2573 	} else if (intr_type == 0x13) {
2574 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2575 		qlt->resp_ndx_from_fw = risc_status >> 16;
2576 		qlt_handle_resp_queue_update(qlt);
2577 	} else if (intr_type == 0x12) {
2578 		uint16_t code = (uint16_t)(risc_status >> 16);
2579 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2580 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2581 		uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2582 		uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2583 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2584 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2585 
2586 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2587 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2588 		    " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2589 		    mbox5, mbox6);
2590 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
2591 		    " mb3=%x, mb5=%x, mb6=%x", qlt->instance, code, mbox1,
2592 		    mbox2, mbox3, mbox5, mbox6);
2593 
2594 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2595 			if (qlt->qlt_link_up) {
2596 				fct_handle_event(qlt->qlt_port,
2597 				    FCT_EVENT_LINK_RESET, 0, 0);
2598 			}
2599 		} else if (code == 0x8012) {
2600 			qlt->qlt_link_up = 0;
2601 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2602 			    0, 0);
2603 		} else if (code == 0x8011) {
2604 			switch (mbox1) {
2605 			case 0: qlt->link_speed = PORT_SPEED_1G;
2606 				break;
2607 			case 1: qlt->link_speed = PORT_SPEED_2G;
2608 				break;
2609 			case 3: qlt->link_speed = PORT_SPEED_4G;
2610 				break;
2611 			case 4: qlt->link_speed = PORT_SPEED_8G;
2612 				break;
2613 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
2614 				break;
2615 			default:
2616 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2617 			}
2618 			qlt->qlt_link_up = 1;
2619 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2620 			    0, 0);
2621 		} else if ((code == 0x8002) || (code == 0x8003) ||
2622 		    (code == 0x8004) || (code == 0x8005)) {
2623 			(void) snprintf(info, 80,
2624 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2625 			    code, mbox1, mbox2, mbox5, mbox6);
2626 			info[79] = 0;
2627 			(void) fct_port_shutdown(qlt->qlt_port,
2628 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2629 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2630 		} else if (code == 0x800F) {
2631 			(void) snprintf(info, 80,
2632 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
2633 			    mbox1, mbox2, mbox3);
2634 
2635 			if (mbox1 != 1) {
2636 				/* issue "verify fw" */
2637 				qlt_verify_fw(qlt);
2638 			}
2639 		} else if (code == 0x8101) {
2640 			(void) snprintf(info, 80,
2641 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2642 			    code, mbox1, mbox2, mbox3);
2643 			info[79] = 0;
2644 
2645 			/* check if "ACK" is required (timeout != 0) */
2646 			if (mbox1 & 0x0f00) {
2647 				caddr_t	req;
2648 
2649 				/*
2650 				 * Ack the request (queue work to do it?)
2651 				 * using a mailbox iocb
2652 				 */
2653 				mutex_enter(&qlt->req_lock);
2654 				req = qlt_get_req_entries(qlt, 1);
2655 				if (req) {
2656 					bzero(req, IOCB_SIZE);
2657 					req[0] = 0x39; req[1] = 1;
2658 					QMEM_WR16(qlt, req+8, 0x101);
2659 					QMEM_WR16(qlt, req+10, mbox1);
2660 					QMEM_WR16(qlt, req+12, mbox2);
2661 					QMEM_WR16(qlt, req+14, mbox3);
2662 					QMEM_WR16(qlt, req+16, mbox4);
2663 					QMEM_WR16(qlt, req+18, mbox5);
2664 					QMEM_WR16(qlt, req+20, mbox6);
2665 					qlt_submit_req_entries(qlt, 1);
2666 				} else {
2667 					(void) snprintf(info, 80,
2668 					    "IDC ACK failed");
2669 					info[79] = 0;
2670 				}
2671 				mutex_exit(&qlt->req_lock);
2672 			}
2673 		}
2674 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2675 		/* Handle mailbox completion */
2676 		mutex_enter(&qlt->mbox_lock);
2677 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2678 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2679 			    " when driver wasn't waiting for it %d",
2680 			    qlt->instance, qlt->mbox_io_state);
2681 		} else {
2682 			for (i = 0; i < MAX_MBOXES; i++) {
2683 				if (qlt->mcp->from_fw_mask &
2684 				    (((uint32_t)1) << i)) {
2685 					qlt->mcp->from_fw[i] =
2686 					    REG_RD16(qlt, REG_MBOX(i));
2687 				}
2688 			}
2689 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2690 		}
2691 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2692 		cv_broadcast(&qlt->mbox_cv);
2693 		mutex_exit(&qlt->mbox_lock);
2694 	} else {
2695 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2696 		    qlt->instance, intr_type);
2697 		REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_TO_PCI_INTR);
2698 	}
2699 
2700 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2701 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2702 	if ((risc_status & BIT_15) &&
2703 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2704 		goto intr_again;
2705 	}
2706 
2707 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2708 
2709 	mutex_exit(&qlt->intr_lock);
2710 	return (DDI_INTR_CLAIMED);
2711 }
2712 
2713 /* **************** NVRAM Functions ********************** */
2714 
2715 fct_status_t
2716 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2717 {
2718 	uint32_t	timer;
2719 
2720 	/* Clear access error flag */
2721 	REG_WR32(qlt, REG_CTRL_STATUS,
2722 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2723 
2724 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2725 
2726 	/* Wait for READ cycle to complete. */
2727 	for (timer = 3000; timer; timer--) {
2728 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2729 			break;
2730 		}
2731 		drv_usecwait(10);
2732 	}
2733 	if (timer == 0) {
2734 		EL(qlt, "flash timeout\n");
2735 		return (QLT_FLASH_TIMEOUT);
2736 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2737 		EL(qlt, "flash access error\n");
2738 		return (QLT_FLASH_ACCESS_ERROR);
2739 	}
2740 
2741 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2742 
2743 	return (QLT_SUCCESS);
2744 }
2745 
2746 fct_status_t
2747 qlt_read_nvram(qlt_state_t *qlt)
2748 {
2749 	uint32_t		index, addr, chksum;
2750 	uint32_t		val, *ptr;
2751 	fct_status_t		ret;
2752 	qlt_nvram_t		*nv;
2753 	uint64_t		empty_node_name = 0;
2754 
2755 	if (qlt->qlt_81xx_chip) {
2756 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2757 		    QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2758 	} else if (qlt->qlt_25xx_chip) {
2759 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2760 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2761 	} else {
2762 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2763 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2764 	}
2765 	mutex_enter(&qlt_global_lock);
2766 
2767 	/* Pause RISC. */
2768 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
2769 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2770 
2771 	/* Get NVRAM data and calculate checksum. */
2772 	ptr = (uint32_t *)qlt->nvram;
2773 	chksum = 0;
2774 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2775 		ret = qlt_read_flash_word(qlt, addr++, &val);
2776 		if (ret != QLT_SUCCESS) {
2777 			EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
2778 			mutex_exit(&qlt_global_lock);
2779 			return (ret);
2780 		}
2781 		chksum += val;
2782 		*ptr = LE_32(val);
2783 		ptr++;
2784 	}
2785 
2786 	/* Release RISC Pause */
2787 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_CLEAR_RISC_PAUSE);
2788 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2789 
2790 	mutex_exit(&qlt_global_lock);
2791 
2792 	/* Sanity check NVRAM Data */
2793 	nv = qlt->nvram;
2794 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2795 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2796 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2797 		EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2798 		    nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2799 		    nv->nvram_version[1], nv->nvram_version[0]);
2800 		return (QLT_BAD_NVRAM_DATA);
2801 	}
2802 
2803 	/* If node name is zero, hand craft it from port name */
2804 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2805 		bcopy(nv->port_name, nv->node_name, 8);
2806 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2807 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2808 	}
2809 
2810 	return (QLT_SUCCESS);
2811 }
2812 
2813 uint32_t
2814 qlt_sync_atio_queue(qlt_state_t *qlt)
2815 {
2816 	uint32_t total_ent;
2817 
2818 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2819 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2820 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2821 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2822 		    DDI_DMA_SYNC_FORCPU);
2823 	} else {
2824 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2825 		    qlt->atio_ndx_from_fw;
2826 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2827 		    + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2828 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2829 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2830 		    ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2831 		    DDI_DMA_SYNC_FORCPU);
2832 	}
2833 	return (total_ent);
2834 }
2835 
2836 void
2837 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2838 {
2839 	uint32_t total_ent;
2840 
2841 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2842 		return;
2843 
2844 	total_ent = qlt_sync_atio_queue(qlt);
2845 
2846 	do {
2847 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2848 		    qlt->atio_ndx_to_fw << 6];
2849 		uint32_t ent_cnt;
2850 
2851 		ent_cnt = (uint32_t)(atio[1]);
2852 		if (ent_cnt > total_ent) {
2853 			break;
2854 		}
2855 		switch ((uint8_t)(atio[0])) {
2856 		case 0x0d:	/* INOT */
2857 			qlt_handle_inot(qlt, atio);
2858 			break;
2859 		case 0x06:	/* ATIO */
2860 			qlt_handle_atio(qlt, atio);
2861 			break;
2862 		default:
2863 			EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2864 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2865 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2866 			break;
2867 		}
2868 		qlt->atio_ndx_to_fw = (uint16_t)(
2869 		    (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2870 		total_ent -= ent_cnt;
2871 	} while (total_ent > 0);
2872 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2873 }
2874 
2875 uint32_t
2876 qlt_sync_resp_queue(qlt_state_t *qlt)
2877 {
2878 	uint32_t total_ent;
2879 
2880 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2881 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2882 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2883 		    RESPONSE_QUEUE_OFFSET
2884 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2885 		    DDI_DMA_SYNC_FORCPU);
2886 	} else {
2887 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2888 		    qlt->resp_ndx_from_fw;
2889 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2890 		    RESPONSE_QUEUE_OFFSET
2891 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2892 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2893 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2894 		    RESPONSE_QUEUE_OFFSET,
2895 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2896 	}
2897 	return (total_ent);
2898 }
2899 
2900 void
2901 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2902 {
2903 	uint32_t total_ent;
2904 	uint8_t c;
2905 
2906 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2907 		return;
2908 
2909 	total_ent = qlt_sync_resp_queue(qlt);
2910 
2911 	do {
2912 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
2913 		uint32_t ent_cnt;
2914 
2915 		ent_cnt = (uint32_t)(resp[1]);
2916 		if (ent_cnt > total_ent) {
2917 			break;
2918 		}
2919 		switch ((uint8_t)(resp[0])) {
2920 		case 0x12:	/* CTIO completion */
2921 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
2922 			break;
2923 		case 0x0e:	/* NACK */
2924 			/* Do Nothing */
2925 			break;
2926 		case 0x1b:	/* Verify FW */
2927 			qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
2928 			break;
2929 		case 0x29:	/* CT PassThrough */
2930 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
2931 			break;
2932 		case 0x33:	/* Abort IO IOCB completion */
2933 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
2934 			break;
2935 		case 0x51:	/* PUREX */
2936 			qlt_handle_purex(qlt, (uint8_t *)resp);
2937 			break;
2938 		case 0x52:
2939 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
2940 			break;
2941 		case 0x53:	/* ELS passthrough */
2942 			c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
2943 			if (c == 0) {
2944 				qlt_handle_sol_els_completion(qlt,
2945 				    (uint8_t *)resp);
2946 			} else if (c == 3) {
2947 				qlt_handle_unsol_els_abort_completion(qlt,
2948 				    (uint8_t *)resp);
2949 			} else {
2950 				qlt_handle_unsol_els_completion(qlt,
2951 				    (uint8_t *)resp);
2952 			}
2953 			break;
2954 		case 0x54:	/* ABTS received */
2955 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
2956 			break;
2957 		case 0x55:	/* ABTS completion */
2958 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
2959 			break;
2960 		default:
2961 			EL(qlt, "response entry=%xh\n", resp[0]);
2962 			break;
2963 		}
2964 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
2965 		    (RESPONSE_QUEUE_ENTRIES - 1);
2966 		total_ent -= ent_cnt;
2967 	} while (total_ent > 0);
2968 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
2969 }
2970 
2971 fct_status_t
2972 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
2973 				uint16_t *ret_handle)
2974 {
2975 	fct_status_t ret;
2976 	mbox_cmd_t *mcp;
2977 	uint16_t n;
2978 	uint16_t h;
2979 	uint32_t ent_id;
2980 	uint8_t *p;
2981 	int found = 0;
2982 
2983 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
2984 	if (mcp == NULL) {
2985 		return (STMF_ALLOC_FAILURE);
2986 	}
2987 	mcp->to_fw[0] = 0x7C;	/* GET ID LIST */
2988 	mcp->to_fw[8] = 2048 * 8;
2989 	mcp->to_fw[9] = 0;
2990 	mcp->to_fw_mask |= BIT_9 | BIT_8;
2991 	mcp->from_fw_mask |= BIT_1 | BIT_2;
2992 
2993 	ret = qlt_mailbox_command(qlt, mcp);
2994 	if (ret != QLT_SUCCESS) {
2995 		EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
2996 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
2997 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
2998 		    mcp->from_fw[1], mcp->from_fw[2]);
2999 		qlt_free_mailbox_command(qlt, mcp);
3000 		return (ret);
3001 	}
3002 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
3003 	p = mcp->dbuf->db_sglist[0].seg_addr;
3004 	for (n = 0; n < mcp->from_fw[1]; n++) {
3005 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
3006 		h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
3007 		if (ent_id == id) {
3008 			found = 1;
3009 			*ret_handle = h;
3010 			if ((cmd_handle != FCT_HANDLE_NONE) &&
3011 			    (cmd_handle != h)) {
3012 				cmn_err(CE_WARN, "login for portid %x came in "
3013 				    "with handle %x, while the portid was "
3014 				    "already using a different handle %x",
3015 				    id, cmd_handle, h);
3016 				qlt_free_mailbox_command(qlt, mcp);
3017 				return (QLT_FAILURE);
3018 			}
3019 			break;
3020 		}
3021 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
3022 			cmn_err(CE_WARN, "login for portid %x came in with "
3023 			    "handle %x, while the handle was already in use "
3024 			    "for portid %x", id, cmd_handle, ent_id);
3025 			qlt_free_mailbox_command(qlt, mcp);
3026 			return (QLT_FAILURE);
3027 		}
3028 		p += 8;
3029 	}
3030 	if (!found) {
3031 		*ret_handle = cmd_handle;
3032 	}
3033 	qlt_free_mailbox_command(qlt, mcp);
3034 	return (FCT_SUCCESS);
3035 }
3036 
3037 /* ARGSUSED */
3038 fct_status_t
3039 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
3040 				fct_cmd_t *login)
3041 {
3042 	uint8_t *p;
3043 
3044 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3045 	p[0] = ELS_OP_PLOGI;
3046 	*((uint16_t *)(&p[4])) = 0x2020;
3047 	p[7] = 3;
3048 	p[8] = 0x88;
3049 	p[10] = 8;
3050 	p[13] = 0xff; p[15] = 0x1f;
3051 	p[18] = 7; p[19] = 0xd0;
3052 
3053 	bcopy(port->port_pwwn, p + 20, 8);
3054 	bcopy(port->port_nwwn, p + 28, 8);
3055 
3056 	p[68] = 0x80;
3057 	p[74] = 8;
3058 	p[77] = 0xff;
3059 	p[81] = 1;
3060 
3061 	return (FCT_SUCCESS);
3062 }
3063 
3064 /* ARGSUSED */
3065 fct_status_t
3066 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
3067 				fct_cmd_t *login)
3068 {
3069 	return (FCT_SUCCESS);
3070 }
3071 
3072 fct_status_t
3073 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
3074     fct_cmd_t *login)
3075 {
3076 	uint16_t h;
3077 	fct_status_t ret;
3078 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3079 
3080 	switch (rp->rp_id) {
3081 	case 0xFFFFFC:	h = 0x7FC; break;
3082 	case 0xFFFFFD:	h = 0x7FD; break;
3083 	case 0xFFFFFE:	h = 0x7FE; break;
3084 	case 0xFFFFFF:	h = 0x7FF; break;
3085 	default:
3086 		ret = qlt_portid_to_handle(qlt, rp->rp_id,
3087 		    login->cmd_rp_handle, &h);
3088 		if (ret != FCT_SUCCESS) {
3089 			EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3090 			return (ret);
3091 		}
3092 	}
3093 
3094 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
3095 		ret = qlt_fill_plogi_req(port, rp, login);
3096 	} else {
3097 		ret = qlt_fill_plogi_resp(port, rp, login);
3098 	}
3099 
3100 	if (ret != FCT_SUCCESS) {
3101 		EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3102 		return (ret);
3103 	}
3104 
3105 	if (h == FCT_HANDLE_NONE)
3106 		return (FCT_SUCCESS);
3107 
3108 	if (rp->rp_handle == FCT_HANDLE_NONE) {
3109 		rp->rp_handle = h;
3110 		return (FCT_SUCCESS);
3111 	}
3112 
3113 	if (rp->rp_handle == h)
3114 		return (FCT_SUCCESS);
3115 
3116 	EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3117 	return (FCT_FAILURE);
3118 }
3119 /* invoked in single thread */
3120 fct_status_t
3121 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3122 {
3123 	uint8_t *req;
3124 	qlt_state_t *qlt;
3125 	clock_t	dereg_req_timer;
3126 	fct_status_t ret;
3127 
3128 	qlt = (qlt_state_t *)port->port_fca_private;
3129 
3130 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3131 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
3132 		return (FCT_SUCCESS);
3133 	ASSERT(qlt->rp_id_in_dereg == 0);
3134 
3135 	mutex_enter(&qlt->preq_lock);
3136 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3137 	if (req == NULL) {
3138 		mutex_exit(&qlt->preq_lock);
3139 		return (FCT_BUSY);
3140 	}
3141 	bzero(req, IOCB_SIZE);
3142 	req[0] = 0x52; req[1] = 1;
3143 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
3144 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3145 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3146 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3147 	qlt->rp_id_in_dereg = rp->rp_id;
3148 	qlt_submit_preq_entries(qlt, 1);
3149 
3150 	dereg_req_timer = drv_usectohz(DEREG_RP_TIMEOUT);
3151 	if (cv_reltimedwait(&qlt->rp_dereg_cv, &qlt->preq_lock,
3152 	    dereg_req_timer, TR_CLOCK_TICK) > 0) {
3153 		ret = qlt->rp_dereg_status;
3154 	} else {
3155 		ret = FCT_BUSY;
3156 	}
3157 	qlt->rp_dereg_status = 0;
3158 	qlt->rp_id_in_dereg = 0;
3159 	mutex_exit(&qlt->preq_lock);
3160 	return (ret);
3161 }
3162 
3163 /*
3164  * Pass received ELS up to framework.
3165  */
3166 static void
3167 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3168 {
3169 	fct_cmd_t		*cmd;
3170 	fct_els_t		*els;
3171 	qlt_cmd_t		*qcmd;
3172 	uint32_t		payload_size;
3173 	uint32_t		remote_portid;
3174 	uint8_t			*pldptr, *bndrptr;
3175 	int			i, off;
3176 	uint16_t		iocb_flags;
3177 	char			info[160];
3178 
3179 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3180 	    ((uint32_t)(resp[0x1A])) << 16;
3181 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3182 	if (iocb_flags & BIT_15) {
3183 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3184 	} else {
3185 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3186 	}
3187 
3188 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
3189 		EL(qlt, "payload is too large = %xh\n", payload_size);
3190 		cmn_err(CE_WARN, "handle_purex: payload is too large");
3191 		goto cmd_null;
3192 	}
3193 
3194 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3195 	    (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3196 	if (cmd == NULL) {
3197 		EL(qlt, "fct_alloc cmd==NULL\n");
3198 cmd_null:;
3199 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
3200 		    "allocate space for fct_cmd", (void *)qlt);
3201 		info[159] = 0;
3202 		(void) fct_port_shutdown(qlt->qlt_port,
3203 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3204 		return;
3205 	}
3206 
3207 	cmd->cmd_port = qlt->qlt_port;
3208 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3209 	if (cmd->cmd_rp_handle == 0xFFFF) {
3210 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3211 	}
3212 
3213 	els = (fct_els_t *)cmd->cmd_specific;
3214 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3215 	els->els_req_size = (uint16_t)payload_size;
3216 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3217 	    GET_STRUCT_SIZE(qlt_cmd_t));
3218 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3219 	cmd->cmd_rportid = remote_portid;
3220 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3221 	    ((uint32_t)(resp[0x16])) << 16;
3222 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3223 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3224 	pldptr = &resp[0x2C];
3225 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
3226 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3227 		/* Take care of fw's swapping of payload */
3228 		els->els_req_payload[i] = pldptr[3];
3229 		els->els_req_payload[i+1] = pldptr[2];
3230 		els->els_req_payload[i+2] = pldptr[1];
3231 		els->els_req_payload[i+3] = pldptr[0];
3232 		pldptr += 4;
3233 		if (pldptr == bndrptr)
3234 			pldptr = (uint8_t *)qlt->resp_ptr;
3235 		off += 4;
3236 		if (off >= IOCB_SIZE) {
3237 			off = 4;
3238 			pldptr += 4;
3239 		}
3240 	}
3241 	fct_post_rcvd_cmd(cmd, 0);
3242 }
3243 
3244 fct_status_t
3245 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3246 {
3247 	qlt_state_t	*qlt;
3248 	char		info[160];
3249 
3250 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3251 
3252 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3253 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3254 			EL(qlt, "ioflags = %xh\n", ioflags);
3255 			goto fatal_panic;
3256 		} else {
3257 			return (qlt_send_status(qlt, cmd));
3258 		}
3259 	}
3260 
3261 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3262 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3263 			goto fatal_panic;
3264 		} else {
3265 			return (qlt_send_els_response(qlt, cmd));
3266 		}
3267 	}
3268 
3269 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3270 		cmd->cmd_handle = 0;
3271 	}
3272 
3273 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3274 		return (qlt_send_abts_response(qlt, cmd, 0));
3275 	} else {
3276 		EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3277 		ASSERT(0);
3278 		return (FCT_FAILURE);
3279 	}
3280 
3281 fatal_panic:;
3282 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
3283 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3284 	    ioflags);
3285 	info[159] = 0;
3286 	(void) fct_port_shutdown(qlt->qlt_port,
3287 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3288 	return (FCT_FAILURE);
3289 }
3290 
3291 /* ARGSUSED */
3292 fct_status_t
3293 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3294 {
3295 	qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3296 	qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3297 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3298 	uint8_t *req;
3299 	uint16_t flags;
3300 
3301 	if (dbuf->db_handle == 0)
3302 		qcmd->dbuf = dbuf;
3303 	flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3304 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3305 		flags = (uint16_t)(flags | 2);
3306 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3307 	} else {
3308 		flags = (uint16_t)(flags | 1);
3309 	}
3310 
3311 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3312 		flags = (uint16_t)(flags | BIT_15);
3313 
3314 	mutex_enter(&qlt->req_lock);
3315 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3316 	if (req == NULL) {
3317 		mutex_exit(&qlt->req_lock);
3318 		return (FCT_BUSY);
3319 	}
3320 	bzero(req, IOCB_SIZE);
3321 	req[0] = 0x12; req[1] = 0x1;
3322 	req[2] = dbuf->db_handle;
3323 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3324 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3325 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
3326 	req[12] = 1;
3327 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3328 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3329 	QMEM_WR16(qlt, req+0x1A, flags);
3330 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3331 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3332 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
3333 	QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3334 	QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
3335 	qlt_submit_req_entries(qlt, 1);
3336 	mutex_exit(&qlt->req_lock);
3337 
3338 	return (STMF_SUCCESS);
3339 }
3340 
3341 /*
3342  * We must construct proper FCP_RSP_IU now. Here we only focus on
3343  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3344  * we could have catched them before we enter here.
3345  */
3346 fct_status_t
3347 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3348 {
3349 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
3350 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
3351 	qlt_dmem_bctl_t *bctl;
3352 	uint32_t size;
3353 	uint8_t *req, *fcp_rsp_iu;
3354 	uint8_t *psd, sensbuf[24];		/* sense data */
3355 	uint16_t flags;
3356 	uint16_t scsi_status;
3357 	int use_mode2;
3358 	int ndx;
3359 
3360 	/*
3361 	 * Enter fast channel for non check condition
3362 	 */
3363 	if (task->task_scsi_status != STATUS_CHECK) {
3364 		/*
3365 		 * We will use mode1
3366 		 */
3367 		flags = (uint16_t)(BIT_6 | BIT_15 |
3368 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3369 		scsi_status = (uint16_t)task->task_scsi_status;
3370 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3371 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3372 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3373 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3374 		}
3375 		qcmd->dbuf_rsp_iu = NULL;
3376 
3377 		/*
3378 		 * Fillout CTIO type 7 IOCB
3379 		 */
3380 		mutex_enter(&qlt->req_lock);
3381 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3382 		if (req == NULL) {
3383 			mutex_exit(&qlt->req_lock);
3384 			return (FCT_BUSY);
3385 		}
3386 
3387 		/*
3388 		 * Common fields
3389 		 */
3390 		bzero(req, IOCB_SIZE);
3391 		req[0x00] = 0x12;
3392 		req[0x01] = 0x1;
3393 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
3394 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3395 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3396 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3397 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3398 
3399 		/*
3400 		 * Mode-specific fields
3401 		 */
3402 		QMEM_WR16(qlt, req + 0x1A, flags);
3403 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3404 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3405 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3406 
3407 		/*
3408 		 * Trigger FW to send SCSI status out
3409 		 */
3410 		qlt_submit_req_entries(qlt, 1);
3411 		mutex_exit(&qlt->req_lock);
3412 		return (STMF_SUCCESS);
3413 	}
3414 
3415 	ASSERT(task->task_scsi_status == STATUS_CHECK);
3416 	/*
3417 	 * Decide the SCSI status mode, that should be used
3418 	 */
3419 	use_mode2 = (task->task_sense_length > 24);
3420 
3421 	/*
3422 	 * Prepare required information per the SCSI status mode
3423 	 */
3424 	flags = (uint16_t)(BIT_15 |
3425 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3426 	if (use_mode2) {
3427 		flags = (uint16_t)(flags | BIT_7);
3428 
3429 		size = task->task_sense_length;
3430 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3431 		    task->task_sense_length, &size, 0);
3432 		if (!qcmd->dbuf_rsp_iu) {
3433 			return (FCT_ALLOC_FAILURE);
3434 		}
3435 
3436 		/*
3437 		 * Start to construct FCP_RSP IU
3438 		 */
3439 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3440 		bzero(fcp_rsp_iu, 24);
3441 
3442 		/*
3443 		 * FCP_RSP IU flags, byte10
3444 		 */
3445 		fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
3446 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3447 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
3448 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3449 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
3450 		}
3451 
3452 		/*
3453 		 * SCSI status code, byte11
3454 		 */
3455 		fcp_rsp_iu[11] = task->task_scsi_status;
3456 
3457 		/*
3458 		 * FCP_RESID (Overrun or underrun)
3459 		 */
3460 		fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
3461 		fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
3462 		fcp_rsp_iu[14] = (uint8_t)((task->task_resid >>  8) & 0xFF);
3463 		fcp_rsp_iu[15] = (uint8_t)((task->task_resid >>  0) & 0xFF);
3464 
3465 		/*
3466 		 * FCP_SNS_LEN
3467 		 */
3468 		fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
3469 		    0xFF);
3470 		fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
3471 		    0xFF);
3472 
3473 		/*
3474 		 * FCP_RSP_LEN
3475 		 */
3476 		/*
3477 		 * no FCP_RSP_INFO
3478 		 */
3479 		/*
3480 		 * FCP_SNS_INFO
3481 		 */
3482 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3483 		    task->task_sense_length);
3484 
3485 		/*
3486 		 * Ensure dma data consistency
3487 		 */
3488 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3489 	} else {
3490 		flags = (uint16_t)(flags | BIT_6);
3491 
3492 		scsi_status = (uint16_t)task->task_scsi_status;
3493 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3494 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3495 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3496 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3497 		}
3498 		if (task->task_sense_length) {
3499 			scsi_status = (uint16_t)(scsi_status | BIT_9);
3500 		}
3501 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3502 		qcmd->dbuf_rsp_iu = NULL;
3503 	}
3504 
3505 	/*
3506 	 * Fillout CTIO type 7 IOCB
3507 	 */
3508 	mutex_enter(&qlt->req_lock);
3509 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3510 	if (req == NULL) {
3511 		mutex_exit(&qlt->req_lock);
3512 		if (use_mode2) {
3513 			qlt_dmem_free(cmd->cmd_port->port_fds,
3514 			    qcmd->dbuf_rsp_iu);
3515 			qcmd->dbuf_rsp_iu = NULL;
3516 		}
3517 		return (FCT_BUSY);
3518 	}
3519 
3520 	/*
3521 	 * Common fields
3522 	 */
3523 	bzero(req, IOCB_SIZE);
3524 	req[0x00] = 0x12;
3525 	req[0x01] = 0x1;
3526 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3527 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3528 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3529 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3530 	if (use_mode2) {
3531 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3532 	}
3533 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3534 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3535 
3536 	/*
3537 	 * Mode-specific fields
3538 	 */
3539 	if (!use_mode2) {
3540 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3541 	}
3542 	QMEM_WR16(qlt, req + 0x1A, flags);
3543 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3544 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3545 	if (use_mode2) {
3546 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3547 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3548 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3549 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3550 	} else {
3551 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3552 		psd = req+0x28;
3553 
3554 		/*
3555 		 * Data in sense buf is always big-endian, data in IOCB
3556 		 * should always be little-endian, so we must do swapping.
3557 		 */
3558 		size = ((task->task_sense_length + 3) & (~3));
3559 		for (ndx = 0; ndx < size; ndx += 4) {
3560 			psd[ndx + 0] = sensbuf[ndx + 3];
3561 			psd[ndx + 1] = sensbuf[ndx + 2];
3562 			psd[ndx + 2] = sensbuf[ndx + 1];
3563 			psd[ndx + 3] = sensbuf[ndx + 0];
3564 		}
3565 	}
3566 
3567 	/*
3568 	 * Trigger FW to send SCSI status out
3569 	 */
3570 	qlt_submit_req_entries(qlt, 1);
3571 	mutex_exit(&qlt->req_lock);
3572 
3573 	return (STMF_SUCCESS);
3574 }
3575 
3576 fct_status_t
3577 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3578 {
3579 	qlt_cmd_t	*qcmd;
3580 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3581 	uint8_t *req, *addr;
3582 	qlt_dmem_bctl_t *bctl;
3583 	uint32_t minsize;
3584 	uint8_t elsop, req1f;
3585 
3586 	addr = els->els_resp_payload;
3587 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3588 
3589 	minsize = els->els_resp_size;
3590 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3591 	if (qcmd->dbuf == NULL)
3592 		return (FCT_BUSY);
3593 
3594 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3595 
3596 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3597 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3598 
3599 	if (addr[0] == 0x02) {	/* ACC */
3600 		req1f = BIT_5;
3601 	} else {
3602 		req1f = BIT_6;
3603 	}
3604 	elsop = els->els_req_payload[0];
3605 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3606 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3607 		req1f = (uint8_t)(req1f | BIT_4);
3608 	}
3609 
3610 	mutex_enter(&qlt->req_lock);
3611 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3612 	if (req == NULL) {
3613 		mutex_exit(&qlt->req_lock);
3614 		qlt_dmem_free(NULL, qcmd->dbuf);
3615 		qcmd->dbuf = NULL;
3616 		return (FCT_BUSY);
3617 	}
3618 	bzero(req, IOCB_SIZE);
3619 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3620 	req[0x16] = elsop; req[0x1f] = req1f;
3621 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3622 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3623 	QMEM_WR16(qlt, (&req[0xC]), 1);
3624 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3625 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3626 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3627 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3628 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3629 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3630 	}
3631 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3632 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3633 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3634 	qlt_submit_req_entries(qlt, 1);
3635 	mutex_exit(&qlt->req_lock);
3636 
3637 	return (FCT_SUCCESS);
3638 }
3639 
3640 fct_status_t
3641 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3642 {
3643 	qlt_abts_cmd_t *qcmd;
3644 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3645 	uint8_t *req;
3646 	uint32_t lportid;
3647 	uint32_t fctl;
3648 	int i;
3649 
3650 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3651 
3652 	mutex_enter(&qlt->req_lock);
3653 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3654 	if (req == NULL) {
3655 		mutex_exit(&qlt->req_lock);
3656 		return (FCT_BUSY);
3657 	}
3658 	bcopy(qcmd->buf, req, IOCB_SIZE);
3659 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3660 	fctl = QMEM_RD32(qlt, req+0x1C);
3661 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3662 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3663 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3664 	if (cmd->cmd_rp)
3665 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3666 	else
3667 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3668 	if (terminate) {
3669 		QMEM_WR16(qlt, (&req[0xC]), 1);
3670 	}
3671 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3672 	req[0x17] = abts->abts_resp_rctl;
3673 	QMEM_WR32(qlt, req+0x18, lportid);
3674 	QMEM_WR32(qlt, req+0x1C, fctl);
3675 	req[0x23]++;
3676 	for (i = 0; i < 12; i += 4) {
3677 		/* Take care of firmware's LE requirement */
3678 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3679 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3680 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3681 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3682 	}
3683 	qlt_submit_req_entries(qlt, 1);
3684 	mutex_exit(&qlt->req_lock);
3685 
3686 	return (FCT_SUCCESS);
3687 }
3688 
3689 static void
3690 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3691 {
3692 	int i;
3693 	uint32_t d;
3694 	caddr_t req;
3695 	/* Just put it on the request queue */
3696 	mutex_enter(&qlt->req_lock);
3697 	req = qlt_get_req_entries(qlt, 1);
3698 	if (req == NULL) {
3699 		mutex_exit(&qlt->req_lock);
3700 		/* XXX handle this */
3701 		return;
3702 	}
3703 	for (i = 0; i < 16; i++) {
3704 		d = QMEM_RD32(qlt, inot);
3705 		inot += 4;
3706 		QMEM_WR32(qlt, req, d);
3707 		req += 4;
3708 	}
3709 	req -= 64;
3710 	req[0] = 0x0e;
3711 	qlt_submit_req_entries(qlt, 1);
3712 	mutex_exit(&qlt->req_lock);
3713 }
3714 
3715 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3716 static void
3717 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3718 {
3719 	fct_cmd_t	*cmd;
3720 	scsi_task_t	*task;
3721 	qlt_cmd_t	*qcmd;
3722 	uint32_t	rportid, fw_xchg_addr;
3723 	uint8_t		*p, *q, *req, tm;
3724 	uint16_t	cdb_size, flags, oxid;
3725 	char		info[160];
3726 
3727 	/*
3728 	 * If either bidirection xfer is requested of there is extended
3729 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3730 	 */
3731 	cdb_size = 16;
3732 	if (atio[0x20 + 11] >= 3) {
3733 		uint8_t b = atio[0x20 + 11];
3734 		uint16_t b1;
3735 		if ((b & 3) == 3) {
3736 			EL(qlt, "bidirectional I/O not supported\n");
3737 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3738 			    "received, dropping the cmd as bidirectional "
3739 			    " transfers are not yet supported", qlt->instance);
3740 			/* XXX abort the I/O */
3741 			return;
3742 		}
3743 		cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3744 		/*
3745 		 * Verify that we have enough entries. Without additional CDB
3746 		 * Everything will fit nicely within the same 64 bytes. So the
3747 		 * additional cdb size is essentially the # of additional bytes
3748 		 * we need.
3749 		 */
3750 		b1 = (uint16_t)b;
3751 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3752 			EL(qlt, "extended cdb received\n");
3753 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3754 			    " cdb (cdb size = %d bytes), however the firmware "
3755 			    " did not DMAed the entire FCP_CMD IU, entry count "
3756 			    " is %d while it should be %d", qlt->instance,
3757 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3758 			/* XXX abort the I/O */
3759 			return;
3760 		}
3761 	}
3762 
3763 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3764 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3765 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3766 	oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3767 
3768 	if (fw_xchg_addr == 0xFFFFFFFF) {
3769 		EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3770 		cmd = NULL;
3771 	} else {
3772 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3773 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3774 		if (cmd == NULL) {
3775 			EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3776 		}
3777 	}
3778 	if (cmd == NULL) {
3779 		EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3780 		/* Abort this IO */
3781 		flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3782 
3783 		mutex_enter(&qlt->req_lock);
3784 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3785 		if (req == NULL) {
3786 			mutex_exit(&qlt->req_lock);
3787 
3788 			(void) snprintf(info, 160,
3789 			    "qlt_handle_atio: qlt-%p, can't "
3790 			    "allocate space for scsi_task", (void *)qlt);
3791 			info[159] = 0;
3792 			(void) fct_port_shutdown(qlt->qlt_port,
3793 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3794 			return;
3795 		}
3796 		bzero(req, IOCB_SIZE);
3797 		req[0] = 0x12; req[1] = 0x1;
3798 		QMEM_WR32(qlt, req+4, 0);
3799 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3800 		    rportid));
3801 		QMEM_WR16(qlt, req+10, 60);
3802 		QMEM_WR32(qlt, req+0x10, rportid);
3803 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3804 		QMEM_WR16(qlt, req+0x1A, flags);
3805 		QMEM_WR16(qlt, req+0x20, oxid);
3806 		qlt_submit_req_entries(qlt, 1);
3807 		mutex_exit(&qlt->req_lock);
3808 
3809 		return;
3810 	}
3811 
3812 	task = (scsi_task_t *)cmd->cmd_specific;
3813 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3814 	qcmd->fw_xchg_addr = fw_xchg_addr;
3815 	qcmd->param.atio_byte3 = atio[3];
3816 	cmd->cmd_oxid = oxid;
3817 	cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3818 	    atio[8+19]);
3819 	cmd->cmd_rportid = rportid;
3820 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3821 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3822 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3823 	/* Dont do a 64 byte read as this is IOMMU */
3824 	q = atio+0x28;
3825 	/* XXX Handle fcp_cntl */
3826 	task->task_cmd_seq_no = (uint32_t)(*q++);
3827 	task->task_csn_size = 8;
3828 	task->task_flags = qlt_task_flags[(*q++) & 7];
3829 	tm = *q++;
3830 	if (tm) {
3831 		if (tm & BIT_1)
3832 			task->task_mgmt_function = TM_ABORT_TASK_SET;
3833 		else if (tm & BIT_2)
3834 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
3835 		else if (tm & BIT_4)
3836 			task->task_mgmt_function = TM_LUN_RESET;
3837 		else if (tm & BIT_5)
3838 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
3839 		else if (tm & BIT_6)
3840 			task->task_mgmt_function = TM_CLEAR_ACA;
3841 		else
3842 			task->task_mgmt_function = TM_ABORT_TASK;
3843 	}
3844 	task->task_max_nbufs = STMF_BUFS_MAX;
3845 	task->task_csn_size = 8;
3846 	task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
3847 	p = task->task_cdb;
3848 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3849 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3850 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3851 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
3852 	if (cdb_size > 16) {
3853 		uint16_t xtra = (uint16_t)(cdb_size - 16);
3854 		uint16_t i;
3855 		uint8_t cb[4];
3856 
3857 		while (xtra) {
3858 			*p++ = *q++;
3859 			xtra--;
3860 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3861 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3862 				q = (uint8_t *)qlt->queue_mem_ptr +
3863 				    ATIO_QUEUE_OFFSET;
3864 			}
3865 		}
3866 		for (i = 0; i < 4; i++) {
3867 			cb[i] = *q++;
3868 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
3869 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
3870 				q = (uint8_t *)qlt->queue_mem_ptr +
3871 				    ATIO_QUEUE_OFFSET;
3872 			}
3873 		}
3874 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
3875 		    (((uint32_t)cb[1]) << 16) |
3876 		    (((uint32_t)cb[2]) << 8) | cb[3];
3877 	} else {
3878 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
3879 		    (((uint32_t)q[1]) << 16) |
3880 		    (((uint32_t)q[2]) << 8) | q[3];
3881 	}
3882 	fct_post_rcvd_cmd(cmd, 0);
3883 }
3884 
3885 static void
3886 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
3887 {
3888 	uint16_t status;
3889 	uint32_t portid;
3890 	uint32_t subcode1, subcode2;
3891 
3892 	status = QMEM_RD16(qlt, rsp+8);
3893 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
3894 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
3895 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
3896 
3897 	mutex_enter(&qlt->preq_lock);
3898 	if (portid != qlt->rp_id_in_dereg) {
3899 		int instance = ddi_get_instance(qlt->dip);
3900 
3901 		EL(qlt, "implicit logout reveived portid = %xh\n", portid);
3902 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
3903 		    " received when driver wasn't waiting for it",
3904 		    instance, portid);
3905 		mutex_exit(&qlt->preq_lock);
3906 		return;
3907 	}
3908 
3909 	if (status != 0) {
3910 		EL(qlt, "implicit logout completed for %xh with status %xh, "
3911 		    "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
3912 		    subcode2);
3913 		if (status == 0x31 && subcode1 == 0x0a) {
3914 			qlt->rp_dereg_status = FCT_SUCCESS;
3915 		} else {
3916 			EL(qlt, "implicit logout portid=%xh, status=%xh, "
3917 			    "subcode1=%xh, subcode2=%xh\n", portid, status,
3918 			    subcode1, subcode2);
3919 			qlt->rp_dereg_status =
3920 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
3921 		}
3922 	} else {
3923 		qlt->rp_dereg_status = FCT_SUCCESS;
3924 	}
3925 	cv_signal(&qlt->rp_dereg_cv);
3926 	mutex_exit(&qlt->preq_lock);
3927 }
3928 
3929 /*
3930  * Note that when an ELS is aborted, the regular or aborted completion
3931  * (if any) gets posted before the abort IOCB comes back on response queue.
3932  */
3933 static void
3934 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
3935 {
3936 	char		info[160];
3937 	fct_cmd_t	*cmd;
3938 	qlt_cmd_t	*qcmd;
3939 	uint32_t	hndl;
3940 	uint32_t	subcode1, subcode2;
3941 	uint16_t	status;
3942 
3943 	hndl = QMEM_RD32(qlt, rsp+4);
3944 	status = QMEM_RD16(qlt, rsp+8);
3945 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
3946 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
3947 
3948 	if (!CMD_HANDLE_VALID(hndl)) {
3949 		EL(qlt, "handle = %xh\n", hndl);
3950 		/*
3951 		 * This cannot happen for unsol els completion. This can
3952 		 * only happen when abort for an unsol els completes.
3953 		 * This condition indicates a firmware bug.
3954 		 */
3955 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
3956 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
3957 		    hndl, status, subcode1, subcode2, (void *)rsp);
3958 		info[159] = 0;
3959 		(void) fct_port_shutdown(qlt->qlt_port,
3960 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3961 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3962 		return;
3963 	}
3964 
3965 	if (status == 5) {
3966 		/*
3967 		 * When an unsolicited els is aborted, the abort is done
3968 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
3969 		 * and not the abortee. We will do the cleanup when the
3970 		 * IOCB which caused the abort, returns.
3971 		 */
3972 		EL(qlt, "status = %xh\n", status);
3973 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
3974 		return;
3975 	}
3976 
3977 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
3978 	if (cmd == NULL) {
3979 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
3980 		/*
3981 		 * Now why would this happen ???
3982 		 */
3983 		(void) snprintf(info, 160,
3984 		    "qlt_handle_unsol_els_completion: can not "
3985 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
3986 		    (void *)rsp);
3987 		info[159] = 0;
3988 		(void) fct_port_shutdown(qlt->qlt_port,
3989 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3990 
3991 		return;
3992 	}
3993 
3994 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
3995 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3996 	if (qcmd->flags & QLT_CMD_ABORTING) {
3997 		/*
3998 		 * This is the same case as "if (status == 5)" above. The
3999 		 * only difference is that in this case the firmware actually
4000 		 * finished sending the response. So the abort attempt will
4001 		 * come back with status ?. We will handle it there.
4002 		 */
4003 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
4004 		    "abort it");
4005 		return;
4006 	}
4007 
4008 	if (qcmd->dbuf != NULL) {
4009 		qlt_dmem_free(NULL, qcmd->dbuf);
4010 		qcmd->dbuf = NULL;
4011 	}
4012 
4013 	if (status == 0) {
4014 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4015 	} else {
4016 		fct_send_response_done(cmd,
4017 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4018 	}
4019 }
4020 
4021 static void
4022 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4023 {
4024 	char		info[160];
4025 	fct_cmd_t	*cmd;
4026 	qlt_cmd_t	*qcmd;
4027 	uint32_t	hndl;
4028 	uint32_t	subcode1, subcode2;
4029 	uint16_t	status;
4030 
4031 	hndl = QMEM_RD32(qlt, rsp+4);
4032 	status = QMEM_RD16(qlt, rsp+8);
4033 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4034 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4035 
4036 	if (!CMD_HANDLE_VALID(hndl)) {
4037 		EL(qlt, "handle = %xh\n", hndl);
4038 		ASSERT(hndl == 0);
4039 		/*
4040 		 * Someone has requested to abort it, but no one is waiting for
4041 		 * this completion.
4042 		 */
4043 		if ((status != 0) && (status != 8)) {
4044 			EL(qlt, "status = %xh\n", status);
4045 			/*
4046 			 * There could be exchange resource leakage, so
4047 			 * throw HBA fatal error event now
4048 			 */
4049 			(void) snprintf(info, 160,
4050 			    "qlt_handle_unsol_els_abort_completion: "
4051 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4052 			    hndl, status, subcode1, subcode2, (void *)rsp);
4053 			info[159] = 0;
4054 			(void) fct_port_shutdown(qlt->qlt_port,
4055 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4056 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4057 			return;
4058 		}
4059 
4060 		return;
4061 	}
4062 
4063 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4064 	if (cmd == NULL) {
4065 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4066 		/*
4067 		 * Why would this happen ??
4068 		 */
4069 		(void) snprintf(info, 160,
4070 		    "qlt_handle_unsol_els_abort_completion: can not get "
4071 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4072 		    (void *)rsp);
4073 		info[159] = 0;
4074 		(void) fct_port_shutdown(qlt->qlt_port,
4075 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4076 
4077 		return;
4078 	}
4079 
4080 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4081 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4082 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4083 
4084 	if (qcmd->dbuf != NULL) {
4085 		qlt_dmem_free(NULL, qcmd->dbuf);
4086 		qcmd->dbuf = NULL;
4087 	}
4088 
4089 	if (status == 0) {
4090 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4091 	} else if (status == 8) {
4092 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4093 	} else {
4094 		fct_cmd_fca_aborted(cmd,
4095 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4096 	}
4097 }
4098 
4099 static void
4100 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4101 {
4102 	char		info[160];
4103 	fct_cmd_t	*cmd;
4104 	fct_els_t	*els;
4105 	qlt_cmd_t	*qcmd;
4106 	uint32_t	hndl;
4107 	uint32_t	subcode1, subcode2;
4108 	uint16_t	status;
4109 
4110 	hndl = QMEM_RD32(qlt, rsp+4);
4111 	status = QMEM_RD16(qlt, rsp+8);
4112 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4113 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4114 
4115 	if (!CMD_HANDLE_VALID(hndl)) {
4116 		EL(qlt, "handle = %xh\n", hndl);
4117 		/*
4118 		 * This cannot happen for sol els completion.
4119 		 */
4120 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
4121 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4122 		    hndl, status, subcode1, subcode2, (void *)rsp);
4123 		info[159] = 0;
4124 		(void) fct_port_shutdown(qlt->qlt_port,
4125 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4126 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4127 		return;
4128 	}
4129 
4130 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4131 	if (cmd == NULL) {
4132 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4133 		(void) snprintf(info, 160,
4134 		    "qlt_handle_sol_els_completion: can not "
4135 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4136 		    (void *)rsp);
4137 		info[159] = 0;
4138 		(void) fct_port_shutdown(qlt->qlt_port,
4139 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4140 
4141 		return;
4142 	}
4143 
4144 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4145 	els = (fct_els_t *)cmd->cmd_specific;
4146 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4147 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4148 
4149 	if (qcmd->flags & QLT_CMD_ABORTING) {
4150 		/*
4151 		 * We will handle it when the ABORT IO IOCB returns.
4152 		 */
4153 		return;
4154 	}
4155 
4156 	if (qcmd->dbuf != NULL) {
4157 		if (status == 0) {
4158 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4159 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4160 			    qcmd->param.resp_offset,
4161 			    els->els_resp_payload, els->els_resp_size);
4162 		}
4163 		qlt_dmem_free(NULL, qcmd->dbuf);
4164 		qcmd->dbuf = NULL;
4165 	}
4166 
4167 	if (status == 0) {
4168 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4169 	} else {
4170 		fct_send_cmd_done(cmd,
4171 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4172 	}
4173 }
4174 
4175 static void
4176 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4177 {
4178 	fct_cmd_t	*cmd;
4179 	fct_sol_ct_t	*ct;
4180 	qlt_cmd_t	*qcmd;
4181 	uint32_t	 hndl;
4182 	uint16_t	 status;
4183 	char		 info[160];
4184 
4185 	hndl = QMEM_RD32(qlt, rsp+4);
4186 	status = QMEM_RD16(qlt, rsp+8);
4187 
4188 	if (!CMD_HANDLE_VALID(hndl)) {
4189 		EL(qlt, "handle = %xh\n", hndl);
4190 		/*
4191 		 * Solicited commands will always have a valid handle.
4192 		 */
4193 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
4194 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4195 		info[159] = 0;
4196 		(void) fct_port_shutdown(qlt->qlt_port,
4197 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4198 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4199 		return;
4200 	}
4201 
4202 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4203 	if (cmd == NULL) {
4204 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4205 		(void) snprintf(info, 160,
4206 		    "qlt_handle_ct_completion: cannot find "
4207 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4208 		    (void *)rsp);
4209 		info[159] = 0;
4210 		(void) fct_port_shutdown(qlt->qlt_port,
4211 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4212 
4213 		return;
4214 	}
4215 
4216 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4217 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4218 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4219 
4220 	if (qcmd->flags & QLT_CMD_ABORTING) {
4221 		/*
4222 		 * We will handle it when ABORT IO IOCB returns;
4223 		 */
4224 		return;
4225 	}
4226 
4227 	ASSERT(qcmd->dbuf);
4228 	if (status == 0) {
4229 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4230 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4231 		    qcmd->param.resp_offset,
4232 		    ct->ct_resp_payload, ct->ct_resp_size);
4233 	}
4234 	qlt_dmem_free(NULL, qcmd->dbuf);
4235 	qcmd->dbuf = NULL;
4236 
4237 	if (status == 0) {
4238 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4239 	} else {
4240 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4241 	}
4242 }
4243 
4244 static void
4245 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
4246 {
4247 	fct_cmd_t	*cmd;
4248 	scsi_task_t	*task;
4249 	qlt_cmd_t	*qcmd;
4250 	stmf_data_buf_t	*dbuf;
4251 	fct_status_t	fc_st;
4252 	uint32_t	iof = 0;
4253 	uint32_t	hndl;
4254 	uint16_t	status;
4255 	uint16_t	flags;
4256 	uint8_t		abort_req;
4257 	uint8_t		n;
4258 	char		info[160];
4259 
4260 	/* XXX: Check validity of the IOCB by checking 4th byte. */
4261 	hndl = QMEM_RD32(qlt, rsp+4);
4262 	status = QMEM_RD16(qlt, rsp+8);
4263 	flags = QMEM_RD16(qlt, rsp+0x1a);
4264 	n = rsp[2];
4265 
4266 	if (!CMD_HANDLE_VALID(hndl)) {
4267 		EL(qlt, "handle = %xh\n", hndl);
4268 		ASSERT(hndl == 0);
4269 		/*
4270 		 * Someone has requested to abort it, but no one is waiting for
4271 		 * this completion.
4272 		 */
4273 		EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4274 		    (void *)rsp);
4275 		if ((status != 1) && (status != 2)) {
4276 			EL(qlt, "status = %xh\n", status);
4277 			/*
4278 			 * There could be exchange resource leakage, so
4279 			 * throw HBA fatal error event now
4280 			 */
4281 			(void) snprintf(info, 160,
4282 			    "qlt_handle_ctio_completion: hndl-"
4283 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4284 			info[159] = 0;
4285 			(void) fct_port_shutdown(qlt->qlt_port,
4286 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4287 
4288 		}
4289 
4290 		return;
4291 	}
4292 
4293 	if (flags & BIT_14) {
4294 		abort_req = 1;
4295 		EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4296 		    (void *)rsp);
4297 	} else {
4298 		abort_req = 0;
4299 	}
4300 
4301 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4302 	if (cmd == NULL) {
4303 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4304 		(void) snprintf(info, 160,
4305 		    "qlt_handle_ctio_completion: cannot find "
4306 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4307 		    (void *)rsp);
4308 		info[159] = 0;
4309 		(void) fct_port_shutdown(qlt->qlt_port,
4310 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4311 
4312 		return;
4313 	}
4314 
4315 	task = (scsi_task_t *)cmd->cmd_specific;
4316 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4317 	if (qcmd->dbuf_rsp_iu) {
4318 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4319 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4320 		qcmd->dbuf_rsp_iu = NULL;
4321 	}
4322 
4323 	if ((status == 1) || (status == 2)) {
4324 		if (abort_req) {
4325 			fc_st = FCT_ABORT_SUCCESS;
4326 			iof = FCT_IOF_FCA_DONE;
4327 		} else {
4328 			fc_st = FCT_SUCCESS;
4329 			if (flags & BIT_15) {
4330 				iof = FCT_IOF_FCA_DONE;
4331 			}
4332 		}
4333 	} else {
4334 		EL(qlt, "status = %xh\n", status);
4335 		if ((status == 8) && abort_req) {
4336 			fc_st = FCT_NOT_FOUND;
4337 			iof = FCT_IOF_FCA_DONE;
4338 		} else {
4339 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
4340 		}
4341 	}
4342 	dbuf = NULL;
4343 	if (((n & BIT_7) == 0) && (!abort_req)) {
4344 		/* A completion of data xfer */
4345 		if (n == 0) {
4346 			dbuf = qcmd->dbuf;
4347 		} else {
4348 			dbuf = stmf_handle_to_buf(task, n);
4349 		}
4350 
4351 		ASSERT(dbuf != NULL);
4352 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4353 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4354 		if (flags & BIT_15) {
4355 			dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4356 			    DB_STATUS_GOOD_SENT);
4357 		}
4358 
4359 		dbuf->db_xfer_status = fc_st;
4360 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
4361 		return;
4362 	}
4363 	if (!abort_req) {
4364 		/*
4365 		 * This was just a pure status xfer.
4366 		 */
4367 		fct_send_response_done(cmd, fc_st, iof);
4368 		return;
4369 	}
4370 
4371 	fct_cmd_fca_aborted(cmd, fc_st, iof);
4372 }
4373 
4374 static void
4375 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4376 {
4377 	char		info[80];
4378 	fct_cmd_t	*cmd;
4379 	qlt_cmd_t	*qcmd;
4380 	uint32_t	h;
4381 	uint16_t	status;
4382 
4383 	h = QMEM_RD32(qlt, rsp+4);
4384 	status = QMEM_RD16(qlt, rsp+8);
4385 
4386 	if (!CMD_HANDLE_VALID(h)) {
4387 		EL(qlt, "handle = %xh\n", h);
4388 		/*
4389 		 * Solicited commands always have a valid handle.
4390 		 */
4391 		(void) snprintf(info, 80,
4392 		    "qlt_handle_sol_abort_completion: hndl-"
4393 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4394 		info[79] = 0;
4395 		(void) fct_port_shutdown(qlt->qlt_port,
4396 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4397 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4398 		return;
4399 	}
4400 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4401 	if (cmd == NULL) {
4402 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4403 		/*
4404 		 * What happened to the cmd ??
4405 		 */
4406 		(void) snprintf(info, 80,
4407 		    "qlt_handle_sol_abort_completion: cannot "
4408 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4409 		    (void *)rsp);
4410 		info[79] = 0;
4411 		(void) fct_port_shutdown(qlt->qlt_port,
4412 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4413 
4414 		return;
4415 	}
4416 
4417 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4418 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
4419 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4420 	if (qcmd->dbuf != NULL) {
4421 		qlt_dmem_free(NULL, qcmd->dbuf);
4422 		qcmd->dbuf = NULL;
4423 	}
4424 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4425 	if (status == 0) {
4426 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4427 	} else if (status == 0x31) {
4428 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4429 	} else {
4430 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4431 	}
4432 }
4433 
4434 static void
4435 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4436 {
4437 	qlt_abts_cmd_t	*qcmd;
4438 	fct_cmd_t	*cmd;
4439 	uint32_t	remote_portid;
4440 	char		info[160];
4441 
4442 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4443 	    ((uint32_t)(resp[0x1A])) << 16;
4444 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4445 	    sizeof (qlt_abts_cmd_t), 0);
4446 	if (cmd == NULL) {
4447 		EL(qlt, "fct_alloc cmd==NULL\n");
4448 		(void) snprintf(info, 160,
4449 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
4450 		    "allocate space for fct_cmd", (void *)qlt);
4451 		info[159] = 0;
4452 		(void) fct_port_shutdown(qlt->qlt_port,
4453 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4454 		return;
4455 	}
4456 
4457 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
4458 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4459 	bcopy(resp, qcmd->buf, IOCB_SIZE);
4460 	cmd->cmd_port = qlt->qlt_port;
4461 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4462 	if (cmd->cmd_rp_handle == 0xFFFF)
4463 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4464 
4465 	cmd->cmd_rportid = remote_portid;
4466 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4467 	    ((uint32_t)(resp[0x16])) << 16;
4468 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4469 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4470 	fct_post_rcvd_cmd(cmd, 0);
4471 }
4472 
4473 static void
4474 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4475 {
4476 	uint16_t status;
4477 	char	info[80];
4478 
4479 	status = QMEM_RD16(qlt, resp+8);
4480 
4481 	if ((status == 0) || (status == 5)) {
4482 		return;
4483 	}
4484 	EL(qlt, "status = %xh\n", status);
4485 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4486 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4487 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4488 	info[79] = 0;
4489 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4490 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4491 }
4492 
4493 #ifdef	DEBUG
4494 uint32_t qlt_drop_abort_counter = 0;
4495 #endif
4496 
4497 fct_status_t
4498 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4499 {
4500 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4501 
4502 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4503 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4504 		return (FCT_NOT_FOUND);
4505 	}
4506 
4507 #ifdef DEBUG
4508 	if (qlt_drop_abort_counter > 0) {
4509 		if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
4510 			return (FCT_SUCCESS);
4511 	}
4512 #endif
4513 
4514 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4515 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4516 	}
4517 
4518 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4519 		cmd->cmd_handle = 0;
4520 	}
4521 
4522 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4523 		return (qlt_send_abts_response(qlt, cmd, 1));
4524 	}
4525 
4526 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4527 		return (qlt_abort_purex(qlt, cmd));
4528 	}
4529 
4530 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4531 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4532 		return (qlt_abort_sol_cmd(qlt, cmd));
4533 	}
4534 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4535 
4536 	ASSERT(0);
4537 	return (FCT_FAILURE);
4538 }
4539 
4540 fct_status_t
4541 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4542 {
4543 	uint8_t *req;
4544 	qlt_cmd_t *qcmd;
4545 
4546 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4547 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4548 	EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
4549 
4550 	mutex_enter(&qlt->req_lock);
4551 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4552 	if (req == NULL) {
4553 		mutex_exit(&qlt->req_lock);
4554 
4555 		return (FCT_BUSY);
4556 	}
4557 	bzero(req, IOCB_SIZE);
4558 	req[0] = 0x33; req[1] = 1;
4559 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4560 	if (cmd->cmd_rp) {
4561 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4562 	} else {
4563 		QMEM_WR16(qlt, req+8, 0xFFFF);
4564 	}
4565 
4566 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4567 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4568 	qlt_submit_req_entries(qlt, 1);
4569 	mutex_exit(&qlt->req_lock);
4570 
4571 	return (FCT_SUCCESS);
4572 }
4573 
4574 fct_status_t
4575 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4576 {
4577 	uint8_t *req;
4578 	qlt_cmd_t *qcmd;
4579 	fct_els_t *els;
4580 	uint8_t elsop, req1f;
4581 
4582 	els = (fct_els_t *)cmd->cmd_specific;
4583 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4584 	elsop = els->els_req_payload[0];
4585 	EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4586 	    elsop);
4587 	req1f = 0x60;	/* Terminate xchg */
4588 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4589 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4590 		req1f = (uint8_t)(req1f | BIT_4);
4591 	}
4592 
4593 	mutex_enter(&qlt->req_lock);
4594 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4595 	if (req == NULL) {
4596 		mutex_exit(&qlt->req_lock);
4597 
4598 		return (FCT_BUSY);
4599 	}
4600 
4601 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4602 	bzero(req, IOCB_SIZE);
4603 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4604 	req[0x16] = elsop; req[0x1f] = req1f;
4605 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4606 	if (cmd->cmd_rp) {
4607 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4608 		EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4609 	} else {
4610 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4611 		EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4612 	}
4613 
4614 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4615 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4616 	qlt_submit_req_entries(qlt, 1);
4617 	mutex_exit(&qlt->req_lock);
4618 
4619 	return (FCT_SUCCESS);
4620 }
4621 
4622 fct_status_t
4623 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4624 {
4625 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4626 	uint8_t *req;
4627 	uint16_t flags;
4628 
4629 	flags = (uint16_t)(BIT_14 |
4630 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4631 	EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4632 
4633 	mutex_enter(&qlt->req_lock);
4634 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4635 	if (req == NULL) {
4636 		mutex_exit(&qlt->req_lock);
4637 
4638 		return (FCT_BUSY);
4639 	}
4640 
4641 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4642 	bzero(req, IOCB_SIZE);
4643 	req[0] = 0x12; req[1] = 0x1;
4644 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4645 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4646 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4647 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4648 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4649 	QMEM_WR16(qlt, req+0x1A, flags);
4650 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4651 	qlt_submit_req_entries(qlt, 1);
4652 	mutex_exit(&qlt->req_lock);
4653 
4654 	return (FCT_SUCCESS);
4655 }
4656 
4657 fct_status_t
4658 qlt_send_cmd(fct_cmd_t *cmd)
4659 {
4660 	qlt_state_t *qlt;
4661 
4662 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4663 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4664 		return (qlt_send_els(qlt, cmd));
4665 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4666 		return (qlt_send_ct(qlt, cmd));
4667 	}
4668 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4669 
4670 	ASSERT(0);
4671 	return (FCT_FAILURE);
4672 }
4673 
4674 fct_status_t
4675 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4676 {
4677 	uint8_t *req;
4678 	fct_els_t *els;
4679 	qlt_cmd_t *qcmd;
4680 	stmf_data_buf_t *buf;
4681 	qlt_dmem_bctl_t *bctl;
4682 	uint32_t sz, minsz;
4683 
4684 	els = (fct_els_t *)cmd->cmd_specific;
4685 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4686 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4687 	qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4688 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4689 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4690 	if (buf == NULL) {
4691 		return (FCT_BUSY);
4692 	}
4693 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4694 
4695 	qcmd->dbuf = buf;
4696 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4697 	    els->els_req_size);
4698 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4699 
4700 	mutex_enter(&qlt->req_lock);
4701 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4702 	if (req == NULL) {
4703 		qlt_dmem_free(NULL, buf);
4704 		mutex_exit(&qlt->req_lock);
4705 		return (FCT_BUSY);
4706 	}
4707 	bzero(req, IOCB_SIZE);
4708 	req[0] = 0x53; req[1] = 1;
4709 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4710 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4711 	QMEM_WR16(qlt, (&req[0xC]), 1);
4712 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4713 	QMEM_WR16(qlt, (&req[0x14]), 1);
4714 	req[0x16] = els->els_req_payload[0];
4715 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4716 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4717 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
4718 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4719 	}
4720 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4721 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4722 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4723 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4724 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4725 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4726 	    qcmd->param.resp_offset));
4727 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4728 	qlt_submit_req_entries(qlt, 1);
4729 	mutex_exit(&qlt->req_lock);
4730 
4731 	return (FCT_SUCCESS);
4732 }
4733 
4734 fct_status_t
4735 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4736 {
4737 	uint8_t *req;
4738 	fct_sol_ct_t *ct;
4739 	qlt_cmd_t *qcmd;
4740 	stmf_data_buf_t *buf;
4741 	qlt_dmem_bctl_t *bctl;
4742 	uint32_t sz, minsz;
4743 
4744 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4745 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4746 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4747 	qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4748 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4749 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4750 	if (buf == NULL) {
4751 		return (FCT_BUSY);
4752 	}
4753 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4754 
4755 	qcmd->dbuf = buf;
4756 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4757 	    ct->ct_req_size);
4758 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4759 
4760 	mutex_enter(&qlt->req_lock);
4761 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4762 	if (req == NULL) {
4763 		qlt_dmem_free(NULL, buf);
4764 		mutex_exit(&qlt->req_lock);
4765 		return (FCT_BUSY);
4766 	}
4767 	bzero(req, IOCB_SIZE);
4768 	req[0] = 0x29; req[1] = 1;
4769 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4770 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4771 	QMEM_WR16(qlt, (&req[0xC]), 1);
4772 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4773 	QMEM_WR16(qlt, (&req[0x14]), 1);
4774 
4775 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4776 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4777 
4778 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4779 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4780 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4781 	    qcmd->param.resp_offset));		/* RESPONSE DSD */
4782 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4783 
4784 	qlt_submit_req_entries(qlt, 1);
4785 	mutex_exit(&qlt->req_lock);
4786 
4787 	return (FCT_SUCCESS);
4788 }
4789 
4790 
4791 /*
4792  * All QLT_FIRMWARE_* will mainly be handled in this function
4793  * It can not be called in interrupt context
4794  *
4795  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4796  * and qlt_ioctl_lock
4797  */
4798 static fct_status_t
4799 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4800 {
4801 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4802 	int		i;
4803 	int		retries, n;
4804 	uint_t		size_left;
4805 	char		c = ' ';
4806 	uint32_t	addr, endaddr, words_to_read;
4807 	caddr_t		buf;
4808 	fct_status_t	ret;
4809 
4810 	mutex_enter(&qlt->qlt_ioctl_lock);
4811 	/*
4812 	 * To make sure that there's no outstanding dumping task
4813 	 */
4814 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4815 		mutex_exit(&qlt->qlt_ioctl_lock);
4816 		EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4817 		    qlt->qlt_ioctl_flags);
4818 		EL(qlt, "outstanding\n");
4819 		return (FCT_FAILURE);
4820 	}
4821 
4822 	/*
4823 	 * To make sure not to overwrite existing dump
4824 	 */
4825 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4826 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4827 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4828 		/*
4829 		 * If we have alreay one dump, but it's not triggered by user
4830 		 * and the user hasn't fetched it, we shouldn't dump again.
4831 		 */
4832 		mutex_exit(&qlt->qlt_ioctl_lock);
4833 		EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
4834 		    qlt->qlt_ioctl_flags);
4835 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
4836 		    "is one already outstanding.", qlt->instance);
4837 		return (FCT_FAILURE);
4838 	}
4839 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
4840 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
4841 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
4842 	} else {
4843 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
4844 	}
4845 	mutex_exit(&qlt->qlt_ioctl_lock);
4846 
4847 	size_left = QLT_FWDUMP_BUFSIZE;
4848 	if (!qlt->qlt_fwdump_buf) {
4849 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
4850 		/*
4851 		 * It's the only place that we allocate buf for dumping. After
4852 		 * it's allocated, we will use it until the port is detached.
4853 		 */
4854 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
4855 	}
4856 
4857 	/*
4858 	 * Start to dump firmware
4859 	 */
4860 	buf = (caddr_t)qlt->qlt_fwdump_buf;
4861 
4862 	/*
4863 	 * Print the ISP firmware revision number and attributes information
4864 	 * Read the RISC to Host Status register
4865 	 */
4866 	n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
4867 	    "Attributes %04x\n\nR2H Status Register\n%08x",
4868 	    qlt->fw_major, qlt->fw_minor,
4869 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
4870 	buf += n; size_left -= n;
4871 
4872 	/*
4873 	 * Before pausing the RISC, make sure no mailbox can execute
4874 	 */
4875 	mutex_enter(&qlt->mbox_lock);
4876 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
4877 		clock_t timeout = drv_usectohz(1000000);
4878 		/*
4879 		 * Wait to grab the mailboxes
4880 		 */
4881 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
4882 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
4883 			(void) cv_reltimedwait(&qlt->mbox_cv, &qlt->mbox_lock,
4884 			    timeout, TR_CLOCK_TICK);
4885 			if (retries > 5) {
4886 				mutex_exit(&qlt->mbox_lock);
4887 				EL(qlt, "can't drain out mailbox commands\n");
4888 				goto dump_fail;
4889 			}
4890 		}
4891 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
4892 		cv_broadcast(&qlt->mbox_cv);
4893 	}
4894 	mutex_exit(&qlt->mbox_lock);
4895 
4896 	/*
4897 	 * Pause the RISC processor
4898 	 */
4899 	REG_WR32(qlt, REG_HCCR, HCCR_CMD_SET_RISC_PAUSE);
4900 
4901 	/*
4902 	 * Wait for the RISC processor to pause
4903 	 */
4904 	for (i = 0; i < 200; i++) {
4905 		if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
4906 			break;
4907 		}
4908 		drv_usecwait(1000);
4909 	}
4910 	if (i == 200) {
4911 		EL(qlt, "can't pause\n");
4912 		return (FCT_FAILURE);
4913 	}
4914 
4915 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
4916 		goto over_25xx_specific_dump;
4917 	}
4918 	n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
4919 	buf += n; size_left -= n;
4920 	REG_WR32(qlt, 0x54, 0x7000);
4921 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4922 	buf += n; size_left -= n;
4923 	REG_WR32(qlt, 0x54, 0x7010);
4924 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
4925 	buf += n; size_left -= n;
4926 	REG_WR32(qlt, 0x54, 0x7C00);
4927 
4928 	n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
4929 	buf += n; size_left -= n;
4930 	REG_WR32(qlt, 0xC0, 0x1);
4931 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
4932 	buf += n; size_left -= n;
4933 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
4934 	buf += n; size_left -= n;
4935 	REG_WR32(qlt, 0xC0, 0x0);
4936 
4937 over_25xx_specific_dump:;
4938 	n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
4939 	buf += n; size_left -= n;
4940 	/*
4941 	 * Capture data from 32 regsiters
4942 	 */
4943 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
4944 	buf += n; size_left -= n;
4945 
4946 	/*
4947 	 * Disable interrupts
4948 	 */
4949 	REG_WR32(qlt, 0xc, 0);
4950 
4951 	/*
4952 	 * Shadow registers
4953 	 */
4954 	n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
4955 	buf += n; size_left -= n;
4956 
4957 	REG_WR32(qlt, 0x54, 0xF70);
4958 	addr = 0xb0000000;
4959 	for (i = 0; i < 0xb; i++) {
4960 		if ((!qlt->qlt_25xx_chip) &&
4961 		    (!qlt->qlt_81xx_chip) &&
4962 		    (i >= 7)) {
4963 			break;
4964 		}
4965 		if (i && ((i & 7) == 0)) {
4966 			n = (int)snprintf(buf, size_left, "\n");
4967 			buf += n; size_left -= n;
4968 		}
4969 		REG_WR32(qlt, 0xF0, addr);
4970 		n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
4971 		buf += n; size_left -= n;
4972 		addr += 0x100000;
4973 	}
4974 
4975 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
4976 		REG_WR32(qlt, 0x54, 0x10);
4977 		n = (int)snprintf(buf, size_left,
4978 		    "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
4979 		buf += n; size_left -= n;
4980 	}
4981 
4982 	/*
4983 	 * Mailbox registers
4984 	 */
4985 	n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
4986 	buf += n; size_left -= n;
4987 	for (i = 0; i < 32; i += 2) {
4988 		if ((i + 2) & 15) {
4989 			c = ' ';
4990 		} else {
4991 			c = '\n';
4992 		}
4993 		n = (int)snprintf(buf, size_left, "%04x %04x%c",
4994 		    REG_RD16(qlt, 0x80 + (i << 1)),
4995 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
4996 		buf += n; size_left -= n;
4997 	}
4998 
4999 	/*
5000 	 * Transfer sequence registers
5001 	 */
5002 	n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
5003 	buf += n; size_left -= n;
5004 
5005 	REG_WR32(qlt, 0x54, 0xBF00);
5006 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5007 	buf += n; size_left -= n;
5008 	REG_WR32(qlt, 0x54, 0xBF10);
5009 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5010 	buf += n; size_left -= n;
5011 	REG_WR32(qlt, 0x54, 0xBF20);
5012 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5013 	buf += n; size_left -= n;
5014 	REG_WR32(qlt, 0x54, 0xBF30);
5015 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5016 	buf += n; size_left -= n;
5017 	REG_WR32(qlt, 0x54, 0xBF40);
5018 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5019 	buf += n; size_left -= n;
5020 	REG_WR32(qlt, 0x54, 0xBF50);
5021 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5022 	buf += n; size_left -= n;
5023 	REG_WR32(qlt, 0x54, 0xBF60);
5024 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5025 	buf += n; size_left -= n;
5026 	REG_WR32(qlt, 0x54, 0xBF70);
5027 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5028 	buf += n; size_left -= n;
5029 	n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
5030 	buf += n; size_left -= n;
5031 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5032 		REG_WR32(qlt, 0x54, 0xBFC0);
5033 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5034 		buf += n; size_left -= n;
5035 		REG_WR32(qlt, 0x54, 0xBFD0);
5036 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5037 		buf += n; size_left -= n;
5038 	}
5039 	REG_WR32(qlt, 0x54, 0xBFE0);
5040 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5041 	buf += n; size_left -= n;
5042 	n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5043 	buf += n; size_left -= n;
5044 	REG_WR32(qlt, 0x54, 0xBFF0);
5045 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5046 	buf += n; size_left -= n;
5047 
5048 	/*
5049 	 * Receive sequence registers
5050 	 */
5051 	n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
5052 	buf += n; size_left -= n;
5053 	REG_WR32(qlt, 0x54, 0xFF00);
5054 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5055 	buf += n; size_left -= n;
5056 	REG_WR32(qlt, 0x54, 0xFF10);
5057 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5058 	buf += n; size_left -= n;
5059 	REG_WR32(qlt, 0x54, 0xFF20);
5060 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5061 	buf += n; size_left -= n;
5062 	REG_WR32(qlt, 0x54, 0xFF30);
5063 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5064 	buf += n; size_left -= n;
5065 	REG_WR32(qlt, 0x54, 0xFF40);
5066 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5067 	buf += n; size_left -= n;
5068 	REG_WR32(qlt, 0x54, 0xFF50);
5069 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5070 	buf += n; size_left -= n;
5071 	REG_WR32(qlt, 0x54, 0xFF60);
5072 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5073 	buf += n; size_left -= n;
5074 	REG_WR32(qlt, 0x54, 0xFF70);
5075 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5076 	buf += n; size_left -= n;
5077 	n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5078 	buf += n; size_left -= n;
5079 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5080 		REG_WR32(qlt, 0x54, 0xFFC0);
5081 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5082 		buf += n; size_left -= n;
5083 	}
5084 	REG_WR32(qlt, 0x54, 0xFFD0);
5085 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5086 	buf += n; size_left -= n;
5087 	n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5088 	buf += n; size_left -= n;
5089 	REG_WR32(qlt, 0x54, 0xFFE0);
5090 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5091 	buf += n; size_left -= n;
5092 	n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5093 	buf += n; size_left -= n;
5094 	REG_WR32(qlt, 0x54, 0xFFF0);
5095 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5096 	buf += n; size_left -= n;
5097 
5098 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
5099 		goto over_aseq_regs;
5100 
5101 	/*
5102 	 * Auxiliary sequencer registers
5103 	 */
5104 	n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
5105 	buf += n; size_left -= n;
5106 	REG_WR32(qlt, 0x54, 0xB000);
5107 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5108 	buf += n; size_left -= n;
5109 	REG_WR32(qlt, 0x54, 0xB010);
5110 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5111 	buf += n; size_left -= n;
5112 	REG_WR32(qlt, 0x54, 0xB020);
5113 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5114 	buf += n; size_left -= n;
5115 	REG_WR32(qlt, 0x54, 0xB030);
5116 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5117 	buf += n; size_left -= n;
5118 	REG_WR32(qlt, 0x54, 0xB040);
5119 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5120 	buf += n; size_left -= n;
5121 	REG_WR32(qlt, 0x54, 0xB050);
5122 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5123 	buf += n; size_left -= n;
5124 	REG_WR32(qlt, 0x54, 0xB060);
5125 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5126 	buf += n; size_left -= n;
5127 	REG_WR32(qlt, 0x54, 0xB070);
5128 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5129 	buf += n; size_left -= n;
5130 	n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5131 	buf += n; size_left -= n;
5132 	REG_WR32(qlt, 0x54, 0xB0C0);
5133 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5134 	buf += n; size_left -= n;
5135 	REG_WR32(qlt, 0x54, 0xB0D0);
5136 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5137 	buf += n; size_left -= n;
5138 	n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5139 	buf += n; size_left -= n;
5140 	REG_WR32(qlt, 0x54, 0xB0E0);
5141 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5142 	buf += n; size_left -= n;
5143 	n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5144 	buf += n; size_left -= n;
5145 	REG_WR32(qlt, 0x54, 0xB0F0);
5146 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5147 	buf += n; size_left -= n;
5148 
5149 over_aseq_regs:;
5150 
5151 	/*
5152 	 * Command DMA registers
5153 	 */
5154 	n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5155 	buf += n; size_left -= n;
5156 	REG_WR32(qlt, 0x54, 0x7100);
5157 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5158 	buf += n; size_left -= n;
5159 
5160 	/*
5161 	 * Queues
5162 	 */
5163 	n = (int)snprintf(buf, size_left,
5164 	    "\nRequest0 Queue DMA Channel registers\n");
5165 	buf += n; size_left -= n;
5166 	REG_WR32(qlt, 0x54, 0x7200);
5167 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5168 	buf += n; size_left -= n;
5169 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5170 	buf += n; size_left -= n;
5171 
5172 	n = (int)snprintf(buf, size_left,
5173 	    "\n\nResponse0 Queue DMA Channel registers\n");
5174 	buf += n; size_left -= n;
5175 	REG_WR32(qlt, 0x54, 0x7300);
5176 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5177 	buf += n; size_left -= n;
5178 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5179 	buf += n; size_left -= n;
5180 
5181 	n = (int)snprintf(buf, size_left,
5182 	    "\n\nRequest1 Queue DMA Channel registers\n");
5183 	buf += n; size_left -= n;
5184 	REG_WR32(qlt, 0x54, 0x7400);
5185 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5186 	buf += n; size_left -= n;
5187 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5188 	buf += n; size_left -= n;
5189 
5190 	/*
5191 	 * Transmit DMA registers
5192 	 */
5193 	n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
5194 	buf += n; size_left -= n;
5195 	REG_WR32(qlt, 0x54, 0x7600);
5196 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5197 	buf += n; size_left -= n;
5198 	REG_WR32(qlt, 0x54, 0x7610);
5199 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5200 	buf += n; size_left -= n;
5201 	n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
5202 	buf += n; size_left -= n;
5203 	REG_WR32(qlt, 0x54, 0x7620);
5204 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5205 	buf += n; size_left -= n;
5206 	REG_WR32(qlt, 0x54, 0x7630);
5207 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5208 	buf += n; size_left -= n;
5209 	n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
5210 	buf += n; size_left -= n;
5211 	REG_WR32(qlt, 0x54, 0x7640);
5212 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5213 	buf += n; size_left -= n;
5214 	REG_WR32(qlt, 0x54, 0x7650);
5215 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5216 	buf += n; size_left -= n;
5217 	n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
5218 	buf += n; size_left -= n;
5219 	REG_WR32(qlt, 0x54, 0x7660);
5220 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5221 	buf += n; size_left -= n;
5222 	REG_WR32(qlt, 0x54, 0x7670);
5223 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5224 	buf += n; size_left -= n;
5225 	n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
5226 	buf += n; size_left -= n;
5227 	REG_WR32(qlt, 0x54, 0x7680);
5228 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5229 	buf += n; size_left -= n;
5230 	REG_WR32(qlt, 0x54, 0x7690);
5231 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5232 	buf += n; size_left -= n;
5233 	n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
5234 	buf += n; size_left -= n;
5235 	REG_WR32(qlt, 0x54, 0x76A0);
5236 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5237 	buf += n; size_left -= n;
5238 
5239 	/*
5240 	 * Receive DMA registers
5241 	 */
5242 	n = (int)snprintf(buf, size_left,
5243 	    "\nRCV Thread 0 Data DMA registers\n");
5244 	buf += n; size_left -= n;
5245 	REG_WR32(qlt, 0x54, 0x7700);
5246 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5247 	buf += n; size_left -= n;
5248 	REG_WR32(qlt, 0x54, 0x7710);
5249 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5250 	buf += n; size_left -= n;
5251 	n = (int)snprintf(buf, size_left,
5252 	    "\nRCV Thread 1 Data DMA registers\n");
5253 	buf += n; size_left -= n;
5254 	REG_WR32(qlt, 0x54, 0x7720);
5255 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5256 	buf += n; size_left -= n;
5257 	REG_WR32(qlt, 0x54, 0x7730);
5258 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5259 	buf += n; size_left -= n;
5260 
5261 	/*
5262 	 * RISC registers
5263 	 */
5264 	n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
5265 	buf += n; size_left -= n;
5266 	REG_WR32(qlt, 0x54, 0x0F00);
5267 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5268 	buf += n; size_left -= n;
5269 	REG_WR32(qlt, 0x54, 0x0F10);
5270 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5271 	buf += n; size_left -= n;
5272 	REG_WR32(qlt, 0x54, 0x0F20);
5273 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5274 	buf += n; size_left -= n;
5275 	REG_WR32(qlt, 0x54, 0x0F30);
5276 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5277 	buf += n; size_left -= n;
5278 	REG_WR32(qlt, 0x54, 0x0F40);
5279 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5280 	buf += n; size_left -= n;
5281 	REG_WR32(qlt, 0x54, 0x0F50);
5282 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5283 	buf += n; size_left -= n;
5284 	REG_WR32(qlt, 0x54, 0x0F60);
5285 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5286 	buf += n; size_left -= n;
5287 	REG_WR32(qlt, 0x54, 0x0F70);
5288 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5289 	buf += n; size_left -= n;
5290 
5291 	/*
5292 	 * Local memory controller registers
5293 	 */
5294 	n = (int)snprintf(buf, size_left, "\nLMC registers\n");
5295 	buf += n; size_left -= n;
5296 	REG_WR32(qlt, 0x54, 0x3000);
5297 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5298 	buf += n; size_left -= n;
5299 	REG_WR32(qlt, 0x54, 0x3010);
5300 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5301 	buf += n; size_left -= n;
5302 	REG_WR32(qlt, 0x54, 0x3020);
5303 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5304 	buf += n; size_left -= n;
5305 	REG_WR32(qlt, 0x54, 0x3030);
5306 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5307 	buf += n; size_left -= n;
5308 	REG_WR32(qlt, 0x54, 0x3040);
5309 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5310 	buf += n; size_left -= n;
5311 	REG_WR32(qlt, 0x54, 0x3050);
5312 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5313 	buf += n; size_left -= n;
5314 	REG_WR32(qlt, 0x54, 0x3060);
5315 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5316 	buf += n; size_left -= n;
5317 
5318 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5319 		REG_WR32(qlt, 0x54, 0x3070);
5320 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5321 		buf += n; size_left -= n;
5322 	}
5323 
5324 	/*
5325 	 * Fibre protocol module regsiters
5326 	 */
5327 	n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5328 	buf += n; size_left -= n;
5329 	REG_WR32(qlt, 0x54, 0x4000);
5330 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5331 	buf += n; size_left -= n;
5332 	REG_WR32(qlt, 0x54, 0x4010);
5333 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5334 	buf += n; size_left -= n;
5335 	REG_WR32(qlt, 0x54, 0x4020);
5336 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5337 	buf += n; size_left -= n;
5338 	REG_WR32(qlt, 0x54, 0x4030);
5339 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5340 	buf += n; size_left -= n;
5341 	REG_WR32(qlt, 0x54, 0x4040);
5342 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5343 	buf += n; size_left -= n;
5344 	REG_WR32(qlt, 0x54, 0x4050);
5345 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5346 	buf += n; size_left -= n;
5347 	REG_WR32(qlt, 0x54, 0x4060);
5348 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5349 	buf += n; size_left -= n;
5350 	REG_WR32(qlt, 0x54, 0x4070);
5351 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5352 	buf += n; size_left -= n;
5353 	REG_WR32(qlt, 0x54, 0x4080);
5354 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5355 	buf += n; size_left -= n;
5356 	REG_WR32(qlt, 0x54, 0x4090);
5357 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5358 	buf += n; size_left -= n;
5359 	REG_WR32(qlt, 0x54, 0x40A0);
5360 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5361 	buf += n; size_left -= n;
5362 	REG_WR32(qlt, 0x54, 0x40B0);
5363 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5364 	buf += n; size_left -= n;
5365 	if (qlt->qlt_81xx_chip) {
5366 		REG_WR32(qlt, 0x54, 0x40C0);
5367 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5368 		buf += n; size_left -= n;
5369 		REG_WR32(qlt, 0x54, 0x40D0);
5370 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5371 		buf += n; size_left -= n;
5372 	}
5373 
5374 	/*
5375 	 * Fibre buffer registers
5376 	 */
5377 	n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5378 	buf += n; size_left -= n;
5379 	REG_WR32(qlt, 0x54, 0x6000);
5380 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5381 	buf += n; size_left -= n;
5382 	REG_WR32(qlt, 0x54, 0x6010);
5383 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5384 	buf += n; size_left -= n;
5385 	REG_WR32(qlt, 0x54, 0x6020);
5386 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5387 	buf += n; size_left -= n;
5388 	REG_WR32(qlt, 0x54, 0x6030);
5389 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5390 	buf += n; size_left -= n;
5391 	REG_WR32(qlt, 0x54, 0x6040);
5392 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5393 	buf += n; size_left -= n;
5394 	REG_WR32(qlt, 0x54, 0x6100);
5395 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5396 	buf += n; size_left -= n;
5397 	REG_WR32(qlt, 0x54, 0x6130);
5398 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5399 	buf += n; size_left -= n;
5400 	REG_WR32(qlt, 0x54, 0x6150);
5401 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5402 	buf += n; size_left -= n;
5403 	REG_WR32(qlt, 0x54, 0x6170);
5404 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5405 	buf += n; size_left -= n;
5406 	REG_WR32(qlt, 0x54, 0x6190);
5407 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5408 	buf += n; size_left -= n;
5409 	REG_WR32(qlt, 0x54, 0x61B0);
5410 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5411 	buf += n; size_left -= n;
5412 	if (qlt->qlt_81xx_chip) {
5413 		REG_WR32(qlt, 0x54, 0x61C0);
5414 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5415 		buf += n; size_left -= n;
5416 	}
5417 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5418 		REG_WR32(qlt, 0x54, 0x6F00);
5419 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5420 		buf += n; size_left -= n;
5421 	}
5422 
5423 	qlt->intr_sneak_counter = 10;
5424 	qlt_disable_intr(qlt);
5425 	mutex_enter(&qlt->intr_lock);
5426 	qlt->qlt_intr_enabled = 0;
5427 	(void) qlt_reset_chip_and_download_fw(qlt, 1);
5428 	drv_usecwait(20);
5429 	qlt->intr_sneak_counter = 0;
5430 	mutex_exit(&qlt->intr_lock);
5431 
5432 	/*
5433 	 * Memory
5434 	 */
5435 	n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5436 	buf += n; size_left -= n;
5437 
5438 	addr = 0x20000;
5439 	endaddr = 0x22000;
5440 	words_to_read = 0;
5441 	while (addr < endaddr) {
5442 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5443 		if ((words_to_read + addr) > endaddr) {
5444 			words_to_read = endaddr - addr;
5445 		}
5446 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5447 		    QLT_SUCCESS) {
5448 			EL(qlt, "Error reading risc ram - CODE RAM status="
5449 			    "%llxh\n", ret);
5450 			goto dump_fail;
5451 		}
5452 
5453 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5454 		buf += n; size_left -= n;
5455 
5456 		if (size_left < 100000) {
5457 			EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5458 			    size_left);
5459 			goto dump_ok;
5460 		}
5461 		addr += words_to_read;
5462 	}
5463 
5464 	n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
5465 	buf += n; size_left -= n;
5466 
5467 	addr = 0x100000;
5468 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5469 	endaddr++;
5470 	if (endaddr & 7) {
5471 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
5472 	}
5473 
5474 	words_to_read = 0;
5475 	while (addr < endaddr) {
5476 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5477 		if ((words_to_read + addr) > endaddr) {
5478 			words_to_read = endaddr - addr;
5479 		}
5480 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5481 		    QLT_SUCCESS) {
5482 			EL(qlt, "Error reading risc ram - EXT RAM status="
5483 			    "%llxh\n", ret);
5484 			goto dump_fail;
5485 		}
5486 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5487 		buf += n; size_left -= n;
5488 		if (size_left < 100000) {
5489 			EL(qlt, "run out of space - EXT RAM\n");
5490 			goto dump_ok;
5491 		}
5492 		addr += words_to_read;
5493 	}
5494 
5495 	/*
5496 	 * Label the end tag
5497 	 */
5498 	n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5499 	buf += n; size_left -= n;
5500 
5501 	/*
5502 	 * Queue dumping
5503 	 */
5504 	n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5505 	buf += n; size_left -= n;
5506 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5507 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
5508 	buf += n; size_left -= n;
5509 
5510 	n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5511 	buf += n; size_left -= n;
5512 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5513 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
5514 	buf += n; size_left -= n;
5515 
5516 	n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5517 	buf += n; size_left -= n;
5518 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5519 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5520 	buf += n; size_left -= n;
5521 
5522 	n = (int)snprintf(buf, size_left, "\nATIO queue\n");
5523 	buf += n; size_left -= n;
5524 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5525 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5526 	buf += n; size_left -= n;
5527 
5528 	/*
5529 	 * Label dump reason
5530 	 */
5531 	n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5532 	    qlt->qlt_port_alias, ssci->st_additional_info);
5533 	buf += n; size_left -= n;
5534 
5535 dump_ok:
5536 	EL(qlt, "left-%d\n", size_left);
5537 
5538 	mutex_enter(&qlt->qlt_ioctl_lock);
5539 	qlt->qlt_ioctl_flags &=
5540 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5541 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5542 	mutex_exit(&qlt->qlt_ioctl_lock);
5543 	return (FCT_SUCCESS);
5544 
5545 dump_fail:
5546 	EL(qlt, "dump not done\n");
5547 	mutex_enter(&qlt->qlt_ioctl_lock);
5548 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5549 	mutex_exit(&qlt->qlt_ioctl_lock);
5550 	return (FCT_FAILURE);
5551 }
5552 
5553 static int
5554 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5555     uint_t size_left)
5556 {
5557 	int		i;
5558 	int		n;
5559 	char		c = ' ';
5560 
5561 	for (i = 0, n = 0; i < count; i++) {
5562 		if ((i + 1) & 7) {
5563 			c = ' ';
5564 		} else {
5565 			c = '\n';
5566 		}
5567 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5568 		    "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
5569 	}
5570 	return (n);
5571 }
5572 
5573 static int
5574 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5575     caddr_t buf, uint_t size_left)
5576 {
5577 	int		i;
5578 	int		n;
5579 	char		c = ' ';
5580 	uint32_t	*ptr;
5581 
5582 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5583 	for (i = 0, n = 0; i < words; i++) {
5584 		if ((i & 7) == 0) {
5585 			n = (int)(n + (int)snprintf(&buf[n],
5586 			    (uint_t)(size_left - n), "%08x: ", addr + i));
5587 		}
5588 		if ((i + 1) & 7) {
5589 			c = ' ';
5590 		} else {
5591 			c = '\n';
5592 		}
5593 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5594 		    "%08x%c", ptr[i], c));
5595 	}
5596 	return (n);
5597 }
5598 
5599 static int
5600 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5601     uint_t size_left)
5602 {
5603 	int		i;
5604 	int		n;
5605 	char		c = ' ';
5606 	int		words;
5607 	uint16_t	*ptr;
5608 	uint16_t	w;
5609 
5610 	words = entries * 32;
5611 	ptr = (uint16_t *)qadr;
5612 	for (i = 0, n = 0; i < words; i++) {
5613 		if ((i & 7) == 0) {
5614 			n = (int)(n + (int)snprintf(&buf[n],
5615 			    (uint_t)(size_left - n), "%05x: ", i));
5616 		}
5617 		if ((i + 1) & 7) {
5618 			c = ' ';
5619 		} else {
5620 			c = '\n';
5621 		}
5622 		w = QMEM_RD16(qlt, &ptr[i]);
5623 		n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
5624 		    w, c));
5625 	}
5626 	return (n);
5627 }
5628 
5629 /*
5630  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5631  * mailbox ram is available.
5632  * Copy data from RISC RAM to system memory
5633  */
5634 static fct_status_t
5635 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5636 {
5637 	uint64_t	da;
5638 	fct_status_t	ret;
5639 
5640 	REG_WR16(qlt, REG_MBOX(0), 0xc);
5641 	da = qlt->queue_mem_cookie.dmac_laddress;
5642 	da += MBOX_DMA_MEM_OFFSET;
5643 
5644 	/*
5645 	 * System destination address
5646 	 */
5647 	REG_WR16(qlt, REG_MBOX(3), da & 0xffff);
5648 	da >>= 16;
5649 	REG_WR16(qlt, REG_MBOX(2), da & 0xffff);
5650 	da >>= 16;
5651 	REG_WR16(qlt, REG_MBOX(7), da & 0xffff);
5652 	da >>= 16;
5653 	REG_WR16(qlt, REG_MBOX(6), da & 0xffff);
5654 
5655 	/*
5656 	 * Length
5657 	 */
5658 	REG_WR16(qlt, REG_MBOX(5), words & 0xffff);
5659 	REG_WR16(qlt, REG_MBOX(4), ((words >> 16) & 0xffff));
5660 
5661 	/*
5662 	 * RISC source address
5663 	 */
5664 	REG_WR16(qlt, REG_MBOX(1), addr & 0xffff);
5665 	REG_WR16(qlt, REG_MBOX(8), ((addr >> 16) & 0xffff));
5666 
5667 	ret = qlt_raw_mailbox_command(qlt);
5668 	REG_WR32(qlt, REG_HCCR, 0xA0000000);
5669 	if (ret == QLT_SUCCESS) {
5670 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5671 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5672 	} else {
5673 		EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
5674 	}
5675 	return (ret);
5676 }
5677 
5678 static void
5679 qlt_verify_fw(qlt_state_t *qlt)
5680 {
5681 	caddr_t req;
5682 	/* Just put it on the request queue */
5683 	mutex_enter(&qlt->req_lock);
5684 	req = qlt_get_req_entries(qlt, 1);
5685 	if (req == NULL) {
5686 		mutex_exit(&qlt->req_lock);
5687 		/* XXX handle this */
5688 		return;
5689 	}
5690 
5691 	bzero(req, IOCB_SIZE);
5692 
5693 	req[0] = 0x1b;
5694 	req[1] = 1;
5695 
5696 	QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5697 	QMEM_WR16(qlt, (&req[0x8]), 1);    /*  options - don't update */
5698 	QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5699 
5700 	qlt_submit_req_entries(qlt, 1);
5701 	mutex_exit(&qlt->req_lock);
5702 }
5703 
5704 static void
5705 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5706 {
5707 	uint16_t	status;
5708 	char		info[80];
5709 
5710 	status = QMEM_RD16(qlt, rsp+8);
5711 	if (status != 0) {
5712 		(void) snprintf(info, 80, "qlt_handle_verify_fw_completion: "
5713 		    "status:%x, rsp:%p", status, (void *)rsp);
5714 		if (status == 3) {
5715 			uint16_t error_code;
5716 
5717 			error_code = QMEM_RD16(qlt, rsp+0xA);
5718 			(void) snprintf(info, 80, "qlt_handle_verify_fw_"
5719 			    "completion: error code:%x", error_code);
5720 		}
5721 	}
5722 }
5723 
5724 /*
5725  * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5726  *
5727  * Input:	Pointer to the adapter state structure.
5728  * Returns:	Success or Failure.
5729  * Context:	Kernel context.
5730  */
5731 static int
5732 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5733 {
5734 	int	rval = DDI_SUCCESS;
5735 
5736 	qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5737 	    kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
5738 
5739 	if (qlt->el_trace_desc == NULL) {
5740 		cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5741 		    qlt->instance);
5742 		rval = DDI_FAILURE;
5743 	} else {
5744 		qlt->el_trace_desc->next = 0;
5745 		qlt->el_trace_desc->trace_buffer =
5746 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
5747 
5748 		if (qlt->el_trace_desc->trace_buffer == NULL) {
5749 			cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5750 			    qlt->instance);
5751 			kmem_free(qlt->el_trace_desc,
5752 			    sizeof (qlt_el_trace_desc_t));
5753 			qlt->el_trace_desc = NULL;
5754 			rval = DDI_FAILURE;
5755 		} else {
5756 			qlt->el_trace_desc->trace_buffer_size =
5757 			    EL_TRACE_BUF_SIZE;
5758 			mutex_init(&qlt->el_trace_desc->mutex, NULL,
5759 			    MUTEX_DRIVER, NULL);
5760 		}
5761 	}
5762 
5763 	return (rval);
5764 }
5765 
5766 /*
5767  * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5768  *
5769  * Input:	Pointer to the adapter state structure.
5770  * Returns:	Success or Failure.
5771  * Context:	Kernel context.
5772  */
5773 static int
5774 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5775 {
5776 	int	rval = DDI_SUCCESS;
5777 
5778 	if (qlt->el_trace_desc == NULL) {
5779 		cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5780 		    qlt->instance);
5781 		rval = DDI_FAILURE;
5782 	} else {
5783 		if (qlt->el_trace_desc->trace_buffer != NULL) {
5784 			kmem_free(qlt->el_trace_desc->trace_buffer,
5785 			    qlt->el_trace_desc->trace_buffer_size);
5786 		}
5787 		mutex_destroy(&qlt->el_trace_desc->mutex);
5788 		kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5789 		qlt->el_trace_desc = NULL;
5790 	}
5791 
5792 	return (rval);
5793 }
5794 
5795 /*
5796  * qlt_el_msg
5797  *	Extended logging message
5798  *
5799  * Input:
5800  *	qlt:	adapter state pointer.
5801  *	fn:	function name.
5802  *	ce:	level
5803  *	...:	Variable argument list.
5804  *
5805  * Context:
5806  *	Kernel/Interrupt context.
5807  */
5808 void
5809 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5810 {
5811 	char		*s, *fmt = 0, *fmt1 = 0;
5812 	char		fmt2[EL_BUFFER_RESERVE];
5813 	int		rval, tmp;
5814 	int		tracing = 0;
5815 	va_list		vl;
5816 
5817 	/* Tracing is the default but it can be disabled. */
5818 	if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5819 		tracing = 1;
5820 
5821 		mutex_enter(&qlt->el_trace_desc->mutex);
5822 
5823 		/*
5824 		 * Ensure enough space for the string. Wrap to
5825 		 * start when default message allocation size
5826 		 * would overrun the end.
5827 		 */
5828 		if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5829 		    qlt->el_trace_desc->trace_buffer_size) {
5830 			fmt = qlt->el_trace_desc->trace_buffer;
5831 			qlt->el_trace_desc->next = 0;
5832 		} else {
5833 			fmt = qlt->el_trace_desc->trace_buffer +
5834 			    qlt->el_trace_desc->next;
5835 		}
5836 	}
5837 
5838 	/* if no buffer use the stack */
5839 	if (fmt == NULL) {
5840 		fmt = fmt2;
5841 	}
5842 
5843 	va_start(vl, ce);
5844 
5845 	s = va_arg(vl, char *);
5846 
5847 	rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
5848 	    "QEL qlt(%d): %s, ", qlt->instance, fn);
5849 	fmt1 = fmt + rval;
5850 	tmp = (int)vsnprintf(fmt1,
5851 	    (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
5852 	rval += tmp;
5853 
5854 	/*
5855 	 * Calculate the offset where the next message will go,
5856 	 * skipping the NULL.
5857 	 */
5858 	if (tracing) {
5859 		uint16_t next = (uint16_t)(rval += 1);
5860 		qlt->el_trace_desc->next += next;
5861 		mutex_exit(&qlt->el_trace_desc->mutex);
5862 	}
5863 
5864 	va_end(vl);
5865 }
5866 
5867 /*
5868  * qlt_dump_el_trace_buffer
5869  *	 Outputs extended logging trace buffer.
5870  *
5871  * Input:
5872  *	qlt:	adapter state pointer.
5873  */
5874 void
5875 qlt_dump_el_trace_buffer(qlt_state_t *qlt)
5876 {
5877 	char		*dump_start = NULL;
5878 	char		*dump_current = NULL;
5879 	char		*trace_start;
5880 	char		*trace_end;
5881 	int		wrapped = 0;
5882 	int		rval;
5883 
5884 	mutex_enter(&qlt->el_trace_desc->mutex);
5885 
5886 	rval = qlt_validate_trace_desc(qlt);
5887 	if (rval != NULL) {
5888 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
5889 		    qlt->instance);
5890 	} else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
5891 		dump_current = dump_start;
5892 		trace_start = qlt->el_trace_desc->trace_buffer;
5893 		trace_end = trace_start +
5894 		    qlt->el_trace_desc->trace_buffer_size;
5895 
5896 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
5897 		    qlt->instance,
5898 		    (void *)dump_start, (void *)trace_start);
5899 
5900 		while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
5901 		    (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
5902 			/* Show it... */
5903 			cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
5904 			    dump_current);
5905 			/* Make the next the current */
5906 			dump_current += (strlen(dump_current) + 1);
5907 			/* check for wrap */
5908 			if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
5909 				dump_current = trace_start;
5910 				wrapped = 1;
5911 			} else if (wrapped) {
5912 				/* Don't go past next. */
5913 				if ((trace_start + qlt->el_trace_desc->next) <=
5914 				    dump_current) {
5915 					break;
5916 				}
5917 			} else if (*dump_current == NULL) {
5918 				break;
5919 			}
5920 		}
5921 	}
5922 	mutex_exit(&qlt->el_trace_desc->mutex);
5923 }
5924 
5925 /*
5926  * qlt_validate_trace_desc
5927  *	 Ensures the extended logging trace descriptor is good
5928  *
5929  * Input:
5930  *	qlt:	adapter state pointer.
5931  *
5932  * Returns:
5933  *	ql local function return status code.
5934  */
5935 static int
5936 qlt_validate_trace_desc(qlt_state_t *qlt)
5937 {
5938 	int	rval = DDI_SUCCESS;
5939 
5940 	if (qlt->el_trace_desc == NULL) {
5941 		rval = DDI_FAILURE;
5942 	} else if (qlt->el_trace_desc->trace_buffer == NULL) {
5943 		rval = DDI_FAILURE;
5944 	}
5945 	return (rval);
5946 }
5947 
5948 /*
5949  * qlt_find_trace_start
5950  *	 Locate the oldest extended logging trace entry.
5951  *
5952  * Input:
5953  *	qlt:	adapter state pointer.
5954  *
5955  * Returns:
5956  *	Pointer to a string.
5957  *
5958  * Context:
5959  *	Kernel/Interrupt context.
5960  */
5961 static char *
5962 qlt_find_trace_start(qlt_state_t *qlt)
5963 {
5964 	char	*trace_start = 0;
5965 	char	*trace_next  = 0;
5966 
5967 	trace_next = qlt->el_trace_desc->trace_buffer +
5968 	    qlt->el_trace_desc->next;
5969 
5970 	/*
5971 	 * If the buffer has not wrapped next will point at a null so
5972 	 * start is the beginning of the buffer.  If next points at a char
5973 	 * then we must traverse the buffer until a null is detected and
5974 	 * that will be the beginning of the oldest whole object in the buffer
5975 	 * which is the start.
5976 	 */
5977 
5978 	if ((trace_next + EL_BUFFER_RESERVE) >=
5979 	    (qlt->el_trace_desc->trace_buffer +
5980 	    qlt->el_trace_desc->trace_buffer_size)) {
5981 		trace_start = qlt->el_trace_desc->trace_buffer;
5982 	} else if (*trace_next != NULL) {
5983 		trace_start = trace_next + (strlen(trace_next) + 1);
5984 	} else {
5985 		trace_start = qlt->el_trace_desc->trace_buffer;
5986 	}
5987 	return (trace_start);
5988 }
5989 
5990 
5991 static int
5992 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
5993 {
5994 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
5995 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
5996 }
5997 
5998 static int
5999 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6000 {
6001 	return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
6002 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, prop_val));
6003 }
6004 
6005 static int
6006 qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6007 {
6008 	char		instance_prop[256];
6009 
6010 	/* Get adapter instance specific parameter. */
6011 	(void) sprintf(instance_prop, "hba%d-%s", qlt->instance, prop);
6012 	return (qlt_read_string_prop(qlt, instance_prop, prop_val));
6013 }
6014 
6015 static int
6016 qlt_convert_string_to_ull(char *prop, int radix,
6017     u_longlong_t *result)
6018 {
6019 	return (ddi_strtoull((const char *)prop, 0, radix, result));
6020 }
6021 
6022 static boolean_t
6023 qlt_wwn_overload_prop(qlt_state_t *qlt)
6024 {
6025 	char		*prop_val = 0;
6026 	int		rval;
6027 	int		radix;
6028 	u_longlong_t	wwnn = 0, wwpn = 0;
6029 	boolean_t	overloaded = FALSE;
6030 
6031 	radix = 16;
6032 
6033 	rval = qlt_read_string_instance_prop(qlt, "adapter-wwnn", &prop_val);
6034 	if (rval == DDI_PROP_SUCCESS) {
6035 		rval = qlt_convert_string_to_ull(prop_val, radix, &wwnn);
6036 	}
6037 	if (rval == DDI_PROP_SUCCESS) {
6038 		rval = qlt_read_string_instance_prop(qlt, "adapter-wwpn",
6039 		    &prop_val);
6040 		if (rval == DDI_PROP_SUCCESS) {
6041 			rval = qlt_convert_string_to_ull(prop_val, radix,
6042 			    &wwpn);
6043 		}
6044 	}
6045 	if (rval == DDI_PROP_SUCCESS) {
6046 		overloaded = TRUE;
6047 		/* Overload the current node/port name nvram copy */
6048 		bcopy((char *)&wwnn, qlt->nvram->node_name, 8);
6049 		BIG_ENDIAN_64(qlt->nvram->node_name);
6050 		bcopy((char *)&wwpn, qlt->nvram->port_name, 8);
6051 		BIG_ENDIAN_64(qlt->nvram->port_name);
6052 	}
6053 	return (overloaded);
6054 }
6055 
6056 /*
6057  * prop_text - Return a pointer to a string describing the status
6058  *
6059  * Input:	prop_status = the return status from a property function.
6060  * Returns:	pointer to a string.
6061  * Context:	Kernel context.
6062  */
6063 char *
6064 prop_text(int prop_status)
6065 {
6066 	string_table_t *entry = &prop_status_tbl[0];
6067 
6068 	return (value2string(entry, prop_status, 0xFFFF));
6069 }
6070 
6071 /*
6072  * value2string	Return a pointer to a string associated with the value
6073  *
6074  * Input:	entry = the value to string table
6075  *		value = the value
6076  * Returns:	pointer to a string.
6077  * Context:	Kernel context.
6078  */
6079 char *
6080 value2string(string_table_t *entry, int value, int delimiter)
6081 {
6082 	for (; entry->value != delimiter; entry++) {
6083 		if (entry->value == value) {
6084 			break;
6085 		}
6086 	}
6087 	return (entry->string);
6088 }
6089 
6090 /*
6091  * qlt_chg_endian Change endianess of byte array.
6092  *
6093  * Input:	buf = array pointer.
6094  *		size = size of array in bytes.
6095  *
6096  * Context:	Interrupt or Kernel context.
6097  */
6098 void
6099 qlt_chg_endian(uint8_t buf[], size_t size)
6100 {
6101 	uint8_t byte;
6102 	size_t  cnt1;
6103 	size_t  cnt;
6104 
6105 	cnt1 = size - 1;
6106 	for (cnt = 0; cnt < size / 2; cnt++) {
6107 		byte = buf[cnt1];
6108 		buf[cnt1] = buf[cnt];
6109 		buf[cnt] = byte;
6110 		cnt1--;
6111 	}
6112 }
6113