1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009-2015 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
29  */
30 
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/stat.h>
34 #include <sys/pci.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/file.h>
38 #include <sys/cred.h>
39 #include <sys/byteorder.h>
40 #include <sys/atomic.h>
41 #include <sys/scsi/scsi.h>
42 
43 #include <sys/stmf_defines.h>
44 #include <sys/fct_defines.h>
45 #include <sys/stmf.h>
46 #include <sys/portif.h>
47 #include <sys/fct.h>
48 
49 #include "qlt.h"
50 #include "qlt_dma.h"
51 #include "qlt_ioctl.h"
52 #include "qlt_open.h"
53 #include <sys/stmf_ioctl.h>
54 
55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 static uint8_t *qlt_vpd_findtag(qlt_state_t *qlt, uint8_t *vpdbuf,
58     int8_t *opcode);
59 static int qlt_vpd_lookup(qlt_state_t *qlt, uint8_t *opcode, uint8_t *bp,
60     int32_t bplen);
61 static void qlt_enable_intr(qlt_state_t *);
62 static void qlt_disable_intr(qlt_state_t *);
63 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
64 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
65 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
66     uint32_t word_count, uint32_t risc_addr);
67 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
68 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
69     uint32_t dma_size);
70 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
71 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
72 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
73 static uint_t qlt_msix_resp_handler(caddr_t arg, caddr_t arg2);
74 static uint_t qlt_msix_default_handler(caddr_t arg, caddr_t arg2);
75 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
76     stmf_state_change_info_t *ssci);
77 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
78 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
79 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
80 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp,
81     uint16_t qi);
82 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
83 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
84 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
85 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
86     uint8_t *rsp);
87 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
88 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp, uint16_t qi);
89 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp,
90     uint16_t qi);
91 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
92 static fct_status_t qlt_read_vpd(qlt_state_t *qlt);
93 static fct_status_t qlt_read_rom_image(qlt_state_t *qlt);
94 static void qlt_verify_fw(qlt_state_t *qlt);
95 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
96 fct_status_t qlt_port_start(caddr_t arg);
97 fct_status_t qlt_port_stop(caddr_t arg);
98 fct_status_t qlt_port_online(qlt_state_t *qlt);
99 fct_status_t qlt_port_offline(qlt_state_t *qlt);
100 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
101     fct_link_info_t *li);
102 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
103 static fct_status_t qlt_force_lip(qlt_state_t *);
104 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
105     fct_flogi_xchg_t *fx);
106 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
107 void qlt_handle_resp_queue_update(qlt_state_t *qlt, uint16_t qi);
108 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
109     fct_remote_port_t *rp, fct_cmd_t *login);
110 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
111     fct_remote_port_t *rp);
112 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
113 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
114 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
115     fct_cmd_t *cmd, int terminate);
116 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
117 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
118 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
119     fct_cmd_t *cmd, uint32_t flags);
120 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
121 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
122 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
123 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
124 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
125 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
126 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
127     stmf_data_buf_t *dbuf, uint32_t ioflags);
128 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
129 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
130 static void qlt_release_intr(qlt_state_t *qlt);
131 static int qlt_setup_interrupts(qlt_state_t *qlt);
132 static void qlt_destroy_mutex(qlt_state_t *qlt);
133 
134 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
135     uint32_t words);
136 static fct_status_t qlt_mbx_mpi_ram(qlt_state_t *qlt, uint32_t addr,
137     uint32_t words, uint16_t direction);
138 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
139     caddr_t buf, uint_t size_left);
140 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
141     caddr_t buf, uint_t size_left);
142 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
143     int count, uint_t size_left);
144 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
145     cred_t *credp, int *rval);
146 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
147 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
148 
149 static int qlt_setup_msi(qlt_state_t *qlt);
150 static int qlt_setup_msix(qlt_state_t *qlt);
151 
152 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
153 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
154 
155 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
156 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
157 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
158     char **prop_val);
159 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
160 static int qlt_convert_string_to_ull(char *prop, int radix,
161     u_longlong_t *result);
162 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
163 static int qlt_quiesce(dev_info_t *dip);
164 static void qlt_disable_intr(qlt_state_t *qlt);
165 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
166     uint32_t);
167 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
168     uint32_t *);
169 static void qlt_mps_reset(qlt_state_t *qlt);
170 static void qlt_properties(qlt_state_t *qlt);
171 
172 static fct_status_t qlt_mq_create(qlt_state_t *qlt, int idx);
173 static fct_status_t qlt_mq_destroy(qlt_state_t *qlt);
174 
175 static fct_status_t qlt_27xx_get_dmp_template(qlt_state_t *);
176 static uint32_t qlt_27xx_dmp_parse_template(qlt_state_t *, qlt_dt_hdr_t *,
177 	uint8_t *, uint32_t);
178 static int qlt_27xx_dump_ram(qlt_state_t *, uint16_t, uint32_t,
179 	uint32_t, uint8_t *);
180 
181 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] = \
182 	(uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
183 
184 int qlt_enable_msix = 1;
185 int qlt_enable_msi = 1;
186 
187 
188 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
189 
190 /* Array to quickly calculate next free buf index to use */
191 #if 0
192 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
193 #endif
194 
195 static struct cb_ops qlt_cb_ops = {
196 	qlt_open,
197 	qlt_close,
198 	nodev,
199 	nodev,
200 	nodev,
201 	nodev,
202 	nodev,
203 	qlt_ioctl,
204 	nodev,
205 	nodev,
206 	nodev,
207 	nochpoll,
208 	ddi_prop_op,
209 	0,
210 	D_MP | D_NEW
211 };
212 
213 static struct dev_ops qlt_ops = {
214 	DEVO_REV,
215 	0,
216 	nodev,
217 	nulldev,
218 	nulldev,
219 	qlt_attach,
220 	qlt_detach,
221 	nodev,
222 	&qlt_cb_ops,
223 	NULL,
224 	ddi_power,
225 	qlt_quiesce
226 };
227 
228 #ifndef	PORT_SPEED_16G
229 #define	PORT_SPEED_16G		32
230 #endif
231 
232 #ifndef	PORT_SPEED_32G
233 #define	PORT_SPEED_32G		64
234 #endif
235 
236 #ifndef QL_NAME
237 #define	QL_NAME "qlt"
238 #endif
239 
240 static struct modldrv modldrv = {
241 	&mod_driverops,
242 	QLT_NAME" "QLT_VERSION,
243 	&qlt_ops,
244 };
245 
246 static struct modlinkage modlinkage = {
247 	MODREV_1, &modldrv, NULL
248 };
249 
250 void *qlt_state = NULL;
251 kmutex_t qlt_global_lock;
252 static	uint32_t qlt_loaded_counter = 0;
253 uint8_t qlt_reprocess_attempt_cnt = 5;
254 uint32_t qlt_reprocess_delay = 75;	/* default 75 microseconds */
255 
256 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
257 			"-X Mode 1 133", "--Invalid--",
258 			"-X Mode 2 66", "-X Mode 2 100",
259 			"-X Mode 2 133", " 66" };
260 
261 /* Always use 64 bit DMA. */
262 static ddi_dma_attr_t qlt_queue_dma_attr = {
263 	DMA_ATTR_V0,		/* dma_attr_version */
264 	0,			/* low DMA address range */
265 	0xffffffffffffffff,	/* high DMA address range */
266 	0xffffffff,		/* DMA counter register */
267 	64,			/* DMA address alignment */
268 	0xff,			/* DMA burstsizes */
269 	1,			/* min effective DMA size */
270 	0xffffffff,		/* max DMA xfer size */
271 	0xffffffff,		/* segment boundary */
272 	1,			/* s/g list length */
273 	1,			/* granularity of device */
274 	0			/* DMA transfer flags */
275 };
276 
277 
278 /* Always use 64 bit DMA. */
279 static ddi_dma_attr_t qlt_queue_dma_attr_mq_req1 = {
280 	DMA_ATTR_V0,		/* dma_attr_version */
281 	0,			/* low DMA address range */
282 	0xffffffffffffffff,	/* high DMA address range */
283 	0xffffffff,		/* DMA counter register */
284 	64,			/* DMA address alignment */
285 	0xff,			/* DMA burstsizes */
286 	1,			/* min effective DMA size */
287 	0xffffffff,		/* max DMA xfer size */
288 	0xffffffff,		/* segment boundary */
289 	1,			/* s/g list length */
290 	1,			/* granularity of device */
291 	0			/* DMA transfer flags */
292 };
293 
294 /* Always use 64 bit DMA. */
295 static ddi_dma_attr_t qlt_queue_dma_attr_mq_rsp1 = {
296 	DMA_ATTR_V0,		/* dma_attr_version */
297 	0,			/* low DMA address range */
298 	0xffffffffffffffff,	/* high DMA address range */
299 	0xffffffff,		/* DMA counter register */
300 	64,			/* DMA address alignment */
301 	0xff,			/* DMA burstsizes */
302 	1,			/* min effective DMA size */
303 	0xffffffff,		/* max DMA xfer size */
304 	0xffffffff,		/* segment boundary */
305 	1,			/* s/g list length */
306 	1,			/* granularity of device */
307 	0			/* DMA transfer flags */
308 };
309 
310 
311 /* qlogic logging */
312 int enable_extended_logging = 0;
313 static char qlt_provider_name[] = "qlt";
314 static struct stmf_port_provider *qlt_pp;
315 
316 int
_init(void)317 _init(void)
318 {
319 	int ret;
320 
321 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
322 	if (ret == 0) {
323 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
324 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
325 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
326 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
327 		qlt_pp->pp_name = qlt_provider_name;
328 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
329 			stmf_free(qlt_pp);
330 			mutex_destroy(&qlt_global_lock);
331 			ddi_soft_state_fini(&qlt_state);
332 			return (EIO);
333 		}
334 		ret = mod_install(&modlinkage);
335 		if (ret != 0) {
336 			(void) stmf_deregister_port_provider(qlt_pp);
337 			stmf_free(qlt_pp);
338 			mutex_destroy(&qlt_global_lock);
339 			ddi_soft_state_fini(&qlt_state);
340 		}
341 	}
342 	return (ret);
343 }
344 
345 int
_fini(void)346 _fini(void)
347 {
348 	int ret;
349 
350 	if (qlt_loaded_counter)
351 		return (EBUSY);
352 	ret = mod_remove(&modlinkage);
353 	if (ret == 0) {
354 		(void) stmf_deregister_port_provider(qlt_pp);
355 		stmf_free(qlt_pp);
356 		mutex_destroy(&qlt_global_lock);
357 		ddi_soft_state_fini(&qlt_state);
358 	}
359 	return (ret);
360 }
361 
362 int
_info(struct modinfo * modinfop)363 _info(struct modinfo *modinfop)
364 {
365 	return (mod_info(&modlinkage, modinfop));
366 }
367 
368 static int
qlt_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)369 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
370 {
371 	int		instance;
372 	qlt_state_t	*qlt;
373 	ddi_device_acc_attr_t	dev_acc_attr;
374 	uint16_t	did;
375 	uint16_t	val;
376 	uint16_t	mr;
377 	size_t		discard;
378 	uint_t		ncookies;
379 	int		max_read_size;
380 	int		max_payload_size;
381 	fct_status_t	ret;
382 
383 	/* No support for suspend resume yet */
384 	if (cmd != DDI_ATTACH)
385 		return (DDI_FAILURE);
386 	instance = ddi_get_instance(dip);
387 
388 	cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
389 	    QLT_NAME, instance, QLT_VERSION);
390 
391 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
392 		cmn_err(CE_WARN, "qlt(%d): soft state alloc failed", instance);
393 		return (DDI_FAILURE);
394 	}
395 
396 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
397 	    NULL) {
398 		cmn_err(CE_WARN, "qlt(%d): can't get soft state", instance);
399 		goto attach_fail_1;
400 	}
401 
402 	qlt->instance = instance;
403 
404 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
405 	qlt->vpd = (uint32_t *)kmem_zalloc(QL_24XX_VPD_SIZE, KM_SLEEP);
406 	qlt->dip = dip;
407 
408 	if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
409 		cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
410 		goto attach_fail_2;
411 	}
412 
413 	EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
414 
415 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
416 		cmn_err(CE_WARN, "qlt(%d): pci_config_setup failed", instance);
417 		goto attach_fail_3;
418 	}
419 
420 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
421 	if ((did != 0x2422) && (did != 0x2432) &&
422 	    (did != 0x8432) && (did != 0x2532) &&
423 	    (did != 0x8001) && (did != 0x2031) &&
424 	    (did != 0x2071) && (did != 0x2261)) {
425 		cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
426 		    instance, did);
427 		goto attach_fail_4;
428 	}
429 
430 	if ((did & 0xFFFF) == 0x2071) {
431 		qlt->qlt_27xx_chip = 1;
432 		qlt->qlt_fcoe_enabled = 0;
433 	} else if ((did & 0xFFFF) == 0x2261) {
434 		qlt->qlt_27xx_chip = 1;
435 		qlt->qlt_fcoe_enabled = 0;
436 	} else if ((did & 0xFFFF) == 0x2031) {
437 		qlt->qlt_83xx_chip = 1;
438 		qlt->qlt_fcoe_enabled = 0;
439 	} else if ((did & 0xFFF0) == 0x8000) {
440 		qlt->qlt_81xx_chip = 1;
441 		qlt->qlt_fcoe_enabled = 1;
442 	} else if ((did & 0xFF00) == 0x2500)
443 		qlt->qlt_25xx_chip = 1;
444 
445 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
446 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
447 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
448 
449 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
450 		int stat_1;
451 		off_t regsize_1;
452 
453 		stat_1 = ddi_dev_regsize(dip, 1, &regsize_1);
454 		if (stat_1 != DDI_SUCCESS) {
455 			stmf_trace(qlt->qlt_port_alias,
456 			    "instance=%d, reg 1 regsize failed,"
457 			    " stat %x", instance, stat_1);
458 			goto attach_fail_4;
459 		}
460 
461 		if (ddi_regs_map_setup(dip, 1, &qlt->regs, 0, regsize_1,
462 		    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
463 			cmn_err(CE_NOTE, "qlt(%d) ddi_regs_map_setup failed\n",
464 			    instance);
465 			goto attach_fail_4;
466 		}
467 	} else {
468 		/*
469 		 * 24xx and 25xx: rnumber 0 is config space
470 		 * rnumber 1 is for IO space
471 		 * rnumber 2 is for MBAR0: ISP, MSIX, PBA
472 		 */
473 		if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
474 		    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
475 			goto attach_fail_4;
476 		}
477 	}
478 
479 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
480 
481 		uint32_t w32h;
482 		uint32_t w32l;
483 		int stat;
484 		off_t regsize;
485 
486 		w32l = PCICFG_RD32(qlt, PCI_CONF_BASE2);
487 		w32h = PCICFG_RD32(qlt, PCI_CONF_BASE3);
488 
489 		if ((w32h > 0) || w32l > 0) {
490 			stat = ddi_dev_regsize(dip, 2, &regsize);
491 			if (stat != DDI_SUCCESS) {
492 				stmf_trace(qlt->qlt_port_alias,
493 				    "instance=%d, MSI-X regsize failed,"
494 				    " stat %x", instance, stat);
495 			}
496 			stmf_trace(qlt->qlt_port_alias,
497 			    "instance=%d, MSI-X MEM Bar size %x",
498 			    instance, regsize);
499 
500 			stat = ddi_regs_map_setup(dip, 2, &qlt->msix_base, 0,
501 			    /* ((MQ_MAX_QUEUES * 2) +1) << 2, */
502 			    regsize,
503 			    &dev_acc_attr, &qlt->msix_acc_handle);
504 
505 			if (stat != DDI_SUCCESS || qlt->msix_base == NULL ||
506 			    qlt->msix_acc_handle == NULL) {
507 
508 				cmn_err(CE_WARN,
509 				    "qlt(%d): can't map MBar for MSI-X",
510 				    instance);
511 				stmf_trace(qlt->qlt_port_alias,
512 				    "instance=%d, MSI-X MEM Bar map fail",
513 				    instance);
514 
515 				if (qlt->msix_acc_handle != NULL) {
516 					ddi_regs_map_free(
517 					    &qlt->msix_acc_handle);
518 				}
519 				goto attach_fail_5;
520 			}
521 		} else {
522 			cmn_err(CE_WARN, "qlt(%d): can't setup MBar for MSI-X",
523 			    instance);
524 			stmf_trace(qlt->qlt_port_alias,
525 			    "instance=%d, No MSI-X MEM Bar", instance);
526 			goto attach_fail_5;
527 		}
528 
529 		w32l = PCICFG_RD32(qlt, PCI_CONF_BASE4);
530 		w32h = PCICFG_RD32(qlt, PCI_CONF_BASE5);
531 
532 		if ((w32h > 0) || w32l > 0) {
533 			stat = ddi_dev_regsize(dip, 3, &regsize);
534 			if (stat != DDI_SUCCESS) {
535 				stmf_trace(qlt->qlt_port_alias,
536 				    "instance=%d, MQ regsize failed, stat %x",
537 				    instance, stat);
538 			}
539 			stmf_trace(qlt->qlt_port_alias,
540 			    "instance=%d, MQ MEM Bar size %x",
541 			    instance, regsize);
542 
543 			/* for 83xx the QP pointers are in the 3rd MBar */
544 			stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
545 			    (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
546 			    &dev_acc_attr, &qlt->mq_reg_acc_handle);
547 
548 			if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
549 			    qlt->mq_reg_acc_handle == NULL) {
550 
551 				cmn_err(CE_WARN, "qlt(%d): can't map QP MBar",
552 				    instance);
553 				stmf_trace(qlt->qlt_port_alias,
554 				    "instance=%d, QP MEM Bar map fail st:%x",
555 				    instance, stat);
556 
557 				if (qlt->msix_acc_handle != NULL) {
558 					ddi_regs_map_free(
559 					    &qlt->msix_acc_handle);
560 				}
561 				if (qlt->mq_reg_acc_handle != NULL) {
562 					ddi_regs_map_free(
563 					    &qlt->mq_reg_acc_handle);
564 				}
565 				goto attach_fail_5;
566 			} else {
567 				qlt->qlt_mq_enabled = 1;
568 			}
569 		} else {
570 			cmn_err(CE_WARN, "qlt(%d): can't setup MBar for QPs",
571 			    instance);
572 			stmf_trace(qlt->qlt_port_alias,
573 			    "instance=%d, No QPs MEM Bar", instance);
574 
575 			if (qlt->msix_acc_handle != NULL) {
576 				ddi_regs_map_free(
577 				    &qlt->msix_acc_handle);
578 			}
579 			goto attach_fail_5;
580 		}
581 	} else if (qlt->qlt_81xx_chip) {
582 
583 		uint32_t w32;
584 		int stat;
585 
586 		w32 = PCICFG_RD32(qlt, PCI_CONF_BASE3);
587 		if (w32 == 0) {
588 
589 			cmn_err(CE_WARN, "qlt(%d): can't setup MBar2",
590 			    instance);
591 			stmf_trace(qlt->qlt_port_alias,
592 			    "instance=%d, No MEM Bar2", instance);
593 			goto attach_fail_5;
594 		}
595 
596 		stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
597 		    (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
598 		    &dev_acc_attr, &qlt->mq_reg_acc_handle);
599 
600 		if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
601 		    qlt->mq_reg_acc_handle == NULL) {
602 
603 			cmn_err(CE_WARN, "qlt(%d): can't map MBar2",
604 			    instance);
605 			stmf_trace(qlt->qlt_port_alias,
606 			    "instance=%d, MEM Bar2 map fail", instance);
607 
608 			if (qlt->mq_reg_acc_handle != NULL) {
609 				ddi_regs_map_free(&qlt->mq_reg_acc_handle);
610 			}
611 			goto attach_fail_5;
612 		} else {
613 			qlt->qlt_mq_enabled = 1;
614 		}
615 	} else if (qlt->qlt_25xx_chip) {
616 		uint32_t w32h;
617 		uint32_t w32l;
618 		int stat;
619 		off_t regsize;
620 
621 		/* MBAR2 rnumber 3 */
622 		w32l = PCICFG_RD32(qlt, PCI_CONF_BASE3);
623 		w32h = PCICFG_RD32(qlt, PCI_CONF_BASE4);
624 
625 		if ((w32h > 0) || (w32l > 0)) {
626 			stat = ddi_dev_regsize(dip, 3, &regsize);
627 			if (stat != DDI_SUCCESS) {
628 				stmf_trace(qlt->qlt_port_alias,
629 				"ISP25xx inst=%d, MQ regsize failed, stat %x",
630 				    instance, stat);
631 				EL(qlt, "ISP25xx MQ regsize failed, stat %x\n",
632 				    stat);
633 
634 			}
635 			stmf_trace(qlt->qlt_port_alias,
636 			    "ISP25xx instance=%d, MQ MEM Bar size %lx",
637 			    instance, regsize);
638 			EL(qlt, "ISP25xx MQ MEM Bar (MBAR2) size: %x\n",
639 			    regsize);
640 
641 			stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
642 			    (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
643 			    &dev_acc_attr, &qlt->mq_reg_acc_handle);
644 			if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
645 			    qlt->mq_reg_acc_handle == NULL) {
646 				cmn_err(CE_WARN,
647 				    "qlt(%d): ISP25xx can't map QP MBar",
648 				    instance);
649 				stmf_trace(qlt->qlt_port_alias,
650 				    "instance=%d, QP MEM Bar map fail st:%x",
651 				    instance, stat);
652 				if (qlt->mq_reg_acc_handle != NULL) {
653 					ddi_regs_map_free(
654 					    &qlt->mq_reg_acc_handle);
655 				}
656 			} else {
657 				qlt->qlt_mq_enabled = 1;
658 			}
659 		} else {
660 			stmf_trace(qlt->qlt_port_alias,
661 			    "instance=%d, No QPs MEM Bar", instance);
662 			EL(qlt,
663 			    "ISP25xx can't setup MBar QPs, use baseq\n");
664 		}
665 	}
666 
667 	if (qlt->qlt_mq_enabled) {
668 		qlt->mq_req = kmem_zalloc(
669 		    ((sizeof (qlt_mq_req_ptr_blk_t)) * MQ_MAX_QUEUES),
670 		    KM_SLEEP);
671 		qlt->mq_resp = kmem_zalloc(
672 		    ((sizeof (qlt_mq_rsp_ptr_blk_t)) * MQ_MAX_QUEUES),
673 		    KM_SLEEP);
674 	} else {
675 		qlt->mq_req = kmem_zalloc(
676 		    (sizeof (qlt_mq_req_ptr_blk_t)), KM_SLEEP);
677 		qlt->mq_resp = kmem_zalloc(
678 		    (sizeof (qlt_mq_rsp_ptr_blk_t)), KM_SLEEP);
679 	}
680 
681 	if (did == 0x2422) {
682 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
683 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
684 		pci_bits >>= 8;
685 		pci_bits &= 0xf;
686 		if ((pci_bits == 3) || (pci_bits == 7)) {
687 			cmn_err(CE_NOTE,
688 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
689 			    instance, pci_speeds[pci_bits], pci_bits);
690 		} else {
691 			cmn_err(CE_WARN,
692 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
693 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
694 			    "(Invalid)", ((pci_bits == 0) ||
695 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
696 			    "32 bit slot ") : "", pci_bits);
697 		}
698 	}
699 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
700 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
701 		    (unsigned long long)ret);
702 		goto attach_fail_5;
703 	}
704 	if ((ret = qlt_read_vpd(qlt)) != QLT_SUCCESS) {
705 		cmn_err(CE_WARN, "qlt(%d): read vpd failure %llx", instance,
706 		    (unsigned long long)ret);
707 		goto attach_fail_5;
708 	}
709 	if ((ret = qlt_read_rom_image(qlt)) != QLT_SUCCESS) {
710 		cmn_err(CE_WARN, "qlt(%d): read rom image failure %llx",
711 		    instance, (unsigned long long)ret);
712 		goto attach_fail_5;
713 	}
714 
715 	qlt_properties(qlt);
716 
717 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
718 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
719 		goto attach_fail_5;
720 	}
721 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
722 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
723 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
724 	    DDI_SUCCESS) {
725 		goto attach_fail_6;
726 	}
727 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
728 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
729 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
730 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
731 		goto attach_fail_7;
732 	}
733 	if (ncookies != 1)
734 		goto attach_fail_8;
735 
736 	/*
737 	 * Base queue (0), alwasy available
738 	 */
739 	qlt->mq_req[0].queue_mem_mq_base_addr =
740 	    qlt->mq_req[0].mq_ptr =
741 	    qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
742 	qlt->mq_resp[0].queue_mem_mq_base_addr =
743 	    qlt->mq_resp[0].mq_ptr =
744 	    qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
745 
746 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
747 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
748 
749 	/* mutex are inited in this function */
750 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
751 		goto attach_fail_8;
752 
753 	qlt->qlt_queue_cnt = 1;
754 	if ((qlt->qlt_mq_enabled) && (qlt->intr_cnt > 1)) {
755 		int i;
756 
757 		for (i = 1; i < qlt->intr_cnt; i++) {
758 			if (qlt_mq_create(qlt, i) != QLT_SUCCESS) {
759 				cmn_err(CE_WARN, "qlt(%d) mq create (%d) "
760 				    "failed\n", qlt->instance, i);
761 				break;
762 			}
763 			qlt->qlt_queue_cnt++;
764 			if (qlt->qlt_queue_cnt >= MQ_MAX_QUEUES)
765 				break;
766 		}
767 	}
768 	EL(qlt, "Queue count = %d\n", qlt->qlt_queue_cnt);
769 
770 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
771 	    "qlt%d", instance);
772 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
773 	    "%s,0", qlt->qlt_minor_name);
774 
775 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
776 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
777 		goto attach_fail_9;
778 	}
779 
780 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
781 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
782 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
783 
784 	/* Setup PCI cfg space registers */
785 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
786 	if (max_read_size == 11)
787 		goto over_max_read_xfer_setting;
788 	if (did == 0x2422) {
789 		if (max_read_size == 512)
790 			val = 0;
791 		else if (max_read_size == 1024)
792 			val = 1;
793 		else if (max_read_size == 2048)
794 			val = 2;
795 		else if (max_read_size == 4096)
796 			val = 3;
797 		else {
798 			cmn_err(CE_WARN, "qlt(%d) malformed "
799 			    "pci-max-read-request in qlt.conf. Valid values "
800 			    "for this HBA are 512/1024/2048/4096", instance);
801 			goto over_max_read_xfer_setting;
802 		}
803 		mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
804 		mr = (uint16_t)(mr & 0xfff3);
805 		mr = (uint16_t)(mr | (val << 2));
806 		PCICFG_WR16(qlt, 0x4E, mr);
807 	} else if ((did == 0x2432) || (did == 0x8432) ||
808 	    (did == 0x2532) || (did == 0x8001) ||
809 	    (did == 0x2031) || (did == 0x2071) ||
810 	    (did == 0x2261)) {
811 		if (max_read_size == 128)
812 			val = 0;
813 		else if (max_read_size == 256)
814 			val = 1;
815 		else if (max_read_size == 512)
816 			val = 2;
817 		else if (max_read_size == 1024)
818 			val = 3;
819 		else if (max_read_size == 2048)
820 			val = 4;
821 		else if (max_read_size == 4096)
822 			val = 5;
823 		else {
824 			cmn_err(CE_WARN, "qlt(%d) malformed "
825 			    "pci-max-read-request in qlt.conf. Valid values "
826 			    "for this HBA are 128/256/512/1024/2048/4096",
827 			    instance);
828 			goto over_max_read_xfer_setting;
829 		}
830 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
831 		mr = (uint16_t)(mr & 0x8fff);
832 		mr = (uint16_t)(mr | (val << 12));
833 		PCICFG_WR16(qlt, 0x54, mr);
834 	} else {
835 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
836 		    "pci-max-read-request for this device (%x)",
837 		    instance, did);
838 	}
839 over_max_read_xfer_setting:;
840 
841 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
842 	if (max_payload_size == 11)
843 		goto over_max_payload_setting;
844 	if ((did == 0x2432) || (did == 0x8432) ||
845 	    (did == 0x2532) || (did == 0x8001) ||
846 	    (did == 0x2031) || (did == 0x2071) ||
847 	    (did == 0x2261)) {
848 		if (max_payload_size == 128)
849 			val = 0;
850 		else if (max_payload_size == 256)
851 			val = 1;
852 		else if (max_payload_size == 512)
853 			val = 2;
854 		else if (max_payload_size == 1024)
855 			val = 3;
856 		else {
857 			cmn_err(CE_WARN, "qlt(%d) malformed "
858 			    "pcie-max-payload-size in qlt.conf. Valid values "
859 			    "for this HBA are 128/256/512/1024",
860 			    instance);
861 			goto over_max_payload_setting;
862 		}
863 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
864 		mr = (uint16_t)(mr & 0xff1f);
865 		mr = (uint16_t)(mr | (val << 5));
866 		PCICFG_WR16(qlt, 0x54, mr);
867 	} else {
868 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
869 		    "pcie-max-payload-size for this device (%x)",
870 		    instance, did);
871 	}
872 
873 over_max_payload_setting:;
874 
875 	qlt_enable_intr(qlt);
876 
877 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS) {
878 		EL(qlt, "qlt_port_start failed, tear down\n");
879 		qlt_disable_intr(qlt);
880 		goto attach_fail_10;
881 	}
882 
883 	ddi_report_dev(dip);
884 	return (DDI_SUCCESS);
885 
886 attach_fail_10:;
887 	mutex_destroy(&qlt->qlt_ioctl_lock);
888 	cv_destroy(&qlt->mbox_cv);
889 	cv_destroy(&qlt->rp_dereg_cv);
890 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
891 attach_fail_9:;
892 	qlt_destroy_mutex(qlt);
893 	qlt_release_intr(qlt);
894 	(void) qlt_mq_destroy(qlt);
895 
896 attach_fail_8:;
897 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
898 attach_fail_7:;
899 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
900 attach_fail_6:;
901 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
902 attach_fail_5:;
903 	if (qlt->mq_resp) {
904 		kmem_free(qlt->mq_resp,
905 		    (qlt->qlt_mq_enabled ?
906 		    (sizeof (qlt_mq_rsp_ptr_blk_t) * MQ_MAX_QUEUES) :
907 		    (sizeof (qlt_mq_rsp_ptr_blk_t))));
908 	}
909 	qlt->mq_resp = NULL;
910 	if (qlt->mq_req) {
911 		kmem_free(qlt->mq_req,
912 		    (qlt->qlt_mq_enabled ?
913 		    (sizeof (qlt_mq_req_ptr_blk_t) * MQ_MAX_QUEUES) :
914 		    (sizeof (qlt_mq_req_ptr_blk_t))));
915 	}
916 	qlt->mq_req = NULL;
917 
918 	ddi_regs_map_free(&qlt->regs_acc_handle);
919 attach_fail_4:;
920 	pci_config_teardown(&qlt->pcicfg_acc_handle);
921 attach_fail_3:;
922 	(void) qlt_el_trace_desc_dtor(qlt);
923 attach_fail_2:;
924 	kmem_free(qlt->vpd, QL_24XX_VPD_SIZE);
925 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
926 attach_fail_1:;
927 	ddi_soft_state_free(qlt_state, instance);
928 	return (DDI_FAILURE);
929 }
930 
931 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
932 
933 /* ARGSUSED */
934 static int
qlt_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)935 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
936 {
937 	qlt_state_t *qlt;
938 
939 	int instance;
940 
941 	instance = ddi_get_instance(dip);
942 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
943 	    NULL) {
944 		return (DDI_FAILURE);
945 	}
946 
947 	if (qlt->fw_code01) {
948 		return (DDI_FAILURE);
949 	}
950 
951 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
952 	    qlt->qlt_state_not_acked) {
953 		return (DDI_FAILURE);
954 	}
955 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
956 		return (DDI_FAILURE);
957 	}
958 
959 	qlt_disable_intr(qlt);
960 
961 	if (qlt->dmp_template_addr != NULL) {
962 		(void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
963 		ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
964 		ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
965 	}
966 
967 	if (qlt->fw_bin_dump_buf != NULL) {
968 		kmem_free(qlt->fw_bin_dump_buf, qlt->fw_bin_dump_size);
969 		qlt->fw_bin_dump_buf = NULL;
970 		qlt->fw_bin_dump_size = 0;
971 		qlt->fw_ascii_dump_size = 0;
972 	}
973 
974 	if (qlt->qlt_fwdump_buf) {
975 		kmem_free(qlt->qlt_fwdump_buf, qlt->fw_dump_size);
976 		qlt->qlt_fwdump_buf = NULL;
977 	}
978 
979 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
980 	qlt_destroy_mutex(qlt);
981 	qlt_release_intr(qlt);
982 	if (qlt->qlt_mq_enabled == 1) {
983 		(void) qlt_mq_destroy(qlt);
984 	}
985 
986 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
987 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
988 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
989 	ddi_regs_map_free(&qlt->regs_acc_handle);
990 
991 	if (qlt->mq_resp) {
992 		kmem_free(qlt->mq_resp,
993 		    (qlt->qlt_mq_enabled ?
994 		    (sizeof (qlt_mq_rsp_ptr_blk_t) * MQ_MAX_QUEUES) :
995 		    (sizeof (qlt_mq_rsp_ptr_blk_t))));
996 	}
997 	qlt->mq_resp = NULL;
998 	if (qlt->mq_req) {
999 		kmem_free(qlt->mq_req,
1000 		    (qlt->qlt_mq_enabled ?
1001 		    (sizeof (qlt_mq_req_ptr_blk_t) * MQ_MAX_QUEUES) :
1002 		    (sizeof (qlt_mq_req_ptr_blk_t))));
1003 	}
1004 	qlt->mq_req = NULL;
1005 
1006 	if (qlt->qlt_mq_enabled == 1) {
1007 		if ((qlt->msix_acc_handle != NULL) &&
1008 		    ((qlt->qlt_83xx_chip == 1) ||
1009 		    (qlt->qlt_27xx_chip == 1))) {
1010 			ddi_regs_map_free(&qlt->msix_acc_handle);
1011 		}
1012 		ddi_regs_map_free(&qlt->mq_reg_acc_handle);
1013 	}
1014 	pci_config_teardown(&qlt->pcicfg_acc_handle);
1015 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
1016 	cv_destroy(&qlt->mbox_cv);
1017 	cv_destroy(&qlt->rp_dereg_cv);
1018 	(void) qlt_el_trace_desc_dtor(qlt);
1019 	ddi_soft_state_free(qlt_state, instance);
1020 
1021 	return (DDI_SUCCESS);
1022 }
1023 
1024 /*
1025  * qlt_quiesce	quiesce a device attached to the system.
1026  */
1027 static int
qlt_quiesce(dev_info_t * dip)1028 qlt_quiesce(dev_info_t *dip)
1029 {
1030 	qlt_state_t	*qlt;
1031 	uint32_t	timer;
1032 	uint32_t	stat;
1033 
1034 	qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
1035 	if (qlt == NULL) {
1036 		/* Oh well.... */
1037 		return (DDI_SUCCESS);
1038 	}
1039 
1040 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
1041 	REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
1042 	REG_WR16(qlt, REG_MBOX(1), 0);
1043 	REG_WR16(qlt, REG_MBOX(2), 0);
1044 	REG_WR16(qlt, REG_MBOX(3), 0);
1045 	REG_WR16(qlt, REG_MBOX(4), 0);
1046 	REG_WR16(qlt, REG_MBOX(5), 0);
1047 	REG_WR16(qlt, REG_MBOX(6), 0);
1048 	REG_WR16(qlt, REG_MBOX(7), 0);
1049 	REG_WR16(qlt, REG_MBOX(8), 0);
1050 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
1051 	for (timer = 0; timer < 30000; timer++) {
1052 		stat = REG_RD32(qlt, REG_RISC_STATUS);
1053 		if (stat & RISC_HOST_INTR_REQUEST) {
1054 			if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
1055 				REG_WR32(qlt, REG_HCCR,
1056 				    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1057 				break;
1058 			}
1059 			REG_WR32(qlt, REG_HCCR,
1060 			    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1061 		}
1062 		drv_usecwait(100);
1063 	}
1064 
1065 
1066 	/* need to ensure no one accesses the hw during the reset 100us */
1067 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1068 		REG_WR32(qlt, REG_INTR_CTRL, 0);
1069 		mutex_enter(&qlt->mbox_lock);
1070 		if (qlt->qlt_mq_enabled == 1) {
1071 			int i;
1072 			for (i = 1; i < qlt->qlt_queue_cnt; i++) {
1073 				mutex_enter(&qlt->mq_req[i].mq_lock);
1074 			}
1075 		}
1076 		mutex_enter(&qlt->mq_req[0].mq_lock);
1077 		drv_usecwait(40);
1078 	}
1079 
1080 	/* Reset the chip. */
1081 	REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
1082 	    PCI_X_XFER_CTRL);
1083 	drv_usecwait(100);
1084 
1085 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1086 		mutex_exit(&qlt->mq_req[0].mq_lock);
1087 		if (qlt->qlt_mq_enabled == 1) {
1088 			int i;
1089 			for (i = 1; i < qlt->qlt_queue_cnt; i++) {
1090 				mutex_exit(&qlt->mq_req[i].mq_lock);
1091 			}
1092 		}
1093 		mutex_exit(&qlt->mbox_lock);
1094 	}
1095 
1096 	qlt_disable_intr(qlt);
1097 
1098 	return (DDI_SUCCESS);
1099 }
1100 
1101 static void
qlt_enable_intr(qlt_state_t * qlt)1102 qlt_enable_intr(qlt_state_t *qlt)
1103 {
1104 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
1105 		int stat;
1106 
1107 		stat = ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
1108 		if (stat != DDI_SUCCESS) {
1109 			stmf_trace(qlt->qlt_port_alias,
1110 			    "qlt_enable_intr: ddi_intr_block_enable failed:%x",
1111 			    stat);
1112 
1113 			cmn_err(CE_WARN, "!qlt(%d): qlt_enable_intr: "
1114 			    "ddi_intr_block_enable failed:%x",
1115 			    qlt->instance, stat);
1116 		}
1117 
1118 #ifndef __sparc
1119 		else {
1120 			/* Please see CR6840537, MSI isn't re-enabled x86 */
1121 			off_t offset;
1122 			uint8_t val8;
1123 			ddi_intr_handle_impl_t	*hdlp;
1124 
1125 			if (qlt->qlt_81xx_chip || qlt->qlt_25xx_chip) {
1126 				offset = (off_t)0x8a;
1127 			} else {
1128 				offset = (off_t)0x66;
1129 			}
1130 
1131 			hdlp = (ddi_intr_handle_impl_t *)qlt->htable[0];
1132 			if ((hdlp->ih_state == DDI_IHDL_STATE_ENABLE) &&
1133 			    (hdlp->ih_type == DDI_INTR_TYPE_MSI)) {
1134 
1135 				/* get MSI control */
1136 				val8 = pci_config_get8(qlt->pcicfg_acc_handle,
1137 				    offset);
1138 
1139 				if ((val8 & 1) == 0) {
1140 					stmf_trace(qlt->qlt_port_alias,
1141 					    "qlt(%d): qlt_enable_intr: "
1142 					    "MSI enable failed (%x)",
1143 					    qlt->instance, val8);
1144 
1145 					/* write enable to MSI control */
1146 					val8 = (uint8_t)(val8 | 1);
1147 					pci_config_put8(qlt->pcicfg_acc_handle,
1148 					    offset, val8);
1149 
1150 					/* read back to veriy */
1151 					val8 = pci_config_get8
1152 					    (qlt->pcicfg_acc_handle, offset);
1153 
1154 					if (val8 & 1) {
1155 						stmf_trace(qlt->qlt_port_alias,
1156 						    "qlt(%d): qlt_enable_intr: "
1157 						    "MSI enabled kludge!(%x)",
1158 						    qlt->instance, val8);
1159 					}
1160 				}
1161 			}
1162 		}
1163 #endif /* x86 specific hack */
1164 	} else {
1165 		int i;
1166 		int stat = DDI_SUCCESS;
1167 
1168 		for (i = 0;
1169 		    ((i < qlt->intr_cnt) && (stat == DDI_SUCCESS)); i++) {
1170 			stat = ddi_intr_enable(qlt->htable[i]);
1171 		}
1172 		if (stat != DDI_SUCCESS) {
1173 			stmf_trace(qlt->qlt_port_alias,
1174 			    "qlt_enable_intr: ddi_intr_enable failed:%x",
1175 			    stat);
1176 
1177 			cmn_err(CE_WARN, "!qlt(%d): qlt_enable_intr: "
1178 			    "ddi_intr_enable failed:%x", qlt->instance, stat);
1179 		}
1180 	}
1181 }
1182 
1183 static void
qlt_disable_intr(qlt_state_t * qlt)1184 qlt_disable_intr(qlt_state_t *qlt)
1185 {
1186 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
1187 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
1188 	} else {
1189 		int i;
1190 		for (i = 0; i < qlt->intr_cnt; i++)
1191 			(void) ddi_intr_disable(qlt->htable[i]);
1192 	}
1193 	qlt->qlt_intr_enabled = 0;
1194 }
1195 
1196 static void
qlt_release_intr(qlt_state_t * qlt)1197 qlt_release_intr(qlt_state_t *qlt)
1198 {
1199 	if (qlt->htable) {
1200 		int i;
1201 		for (i = 0; i < qlt->intr_cnt; i++) {
1202 			(void) ddi_intr_remove_handler(qlt->htable[i]);
1203 			(void) ddi_intr_free(qlt->htable[i]);
1204 		}
1205 		kmem_free(qlt->htable, (uint_t)qlt->intr_size);
1206 	}
1207 	qlt->htable = NULL;
1208 	qlt->intr_pri = 0;
1209 	qlt->intr_cnt = 0;
1210 	qlt->intr_size = 0;
1211 	qlt->intr_cap = 0;
1212 }
1213 
1214 static void
qlt_init_mutex(qlt_state_t * qlt)1215 qlt_init_mutex(qlt_state_t *qlt)
1216 {
1217 	if (qlt->qlt_mq_enabled == 1) {
1218 		int i;
1219 
1220 		for (i = 1; i < MQ_MAX_QUEUES; i++) {
1221 			mutex_init(&qlt->mq_req[i].mq_lock, 0, MUTEX_DRIVER,
1222 			    INT2PTR(qlt->intr_pri, void *));
1223 			mutex_init(&qlt->mq_resp[i].mq_lock, 0, MUTEX_DRIVER,
1224 			    INT2PTR(qlt->intr_pri, void *));
1225 		}
1226 	}
1227 	mutex_init(&qlt->mq_req[0].mq_lock, 0, MUTEX_DRIVER,
1228 	    INT2PTR(qlt->intr_pri, void *));
1229 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
1230 	    INT2PTR(qlt->intr_pri, void *));
1231 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
1232 	    INT2PTR(qlt->intr_pri, void *));
1233 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
1234 	    INT2PTR(qlt->intr_pri, void *));
1235 }
1236 
1237 static void
qlt_destroy_mutex(qlt_state_t * qlt)1238 qlt_destroy_mutex(qlt_state_t *qlt)
1239 {
1240 	if (qlt->qlt_mq_enabled == 1) {
1241 		int i;
1242 
1243 		for (i = 1; i < MQ_MAX_QUEUES; i++) {
1244 			mutex_destroy(&qlt->mq_req[i].mq_lock);
1245 			mutex_destroy(&qlt->mq_resp[i].mq_lock);
1246 		}
1247 	}
1248 	mutex_destroy(&qlt->mq_req[0].mq_lock);
1249 	mutex_destroy(&qlt->preq_lock);
1250 	mutex_destroy(&qlt->mbox_lock);
1251 	mutex_destroy(&qlt->intr_lock);
1252 }
1253 
1254 static int
qlt_setup_msix(qlt_state_t * qlt)1255 qlt_setup_msix(qlt_state_t *qlt)
1256 {
1257 	int count, avail, actual;
1258 	int ret;
1259 	int itype = DDI_INTR_TYPE_MSIX;
1260 	int i;
1261 
1262 	/* check 24xx revision */
1263 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
1264 	    (!qlt->qlt_83xx_chip) && (!qlt->qlt_27xx_chip)) {
1265 		uint8_t rev_id;
1266 		rev_id = (uint8_t)
1267 		    pci_config_get8(qlt->pcicfg_acc_handle, PCI_CONF_REVID);
1268 		if (rev_id < 3) {
1269 			return (DDI_FAILURE);
1270 		}
1271 	}
1272 
1273 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
1274 	if (ret != DDI_SUCCESS || count == 0) {
1275 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
1276 		    count);
1277 		return (DDI_FAILURE);
1278 	}
1279 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
1280 	if (ret != DDI_SUCCESS || avail == 0) {
1281 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
1282 		    avail);
1283 		return (DDI_FAILURE);
1284 	}
1285 	if (avail < count) {
1286 		stmf_trace(qlt->qlt_port_alias,
1287 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
1288 	}
1289 
1290 	if ((qlt->qlt_25xx_chip) && (qlt->qlt_mq_enabled == 0)) {
1291 		count = 2;
1292 	}
1293 
1294 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
1295 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
1296 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
1297 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
1298 
1299 	EL(qlt, "qlt_setup_msix: count=%d,avail=%d,actual=%d\n", count,
1300 	    avail, actual);
1301 
1302 	/* we need at least 2 interrupt vectors */
1303 	if (((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) &&
1304 	    (ret != DDI_SUCCESS || actual < 2)) {
1305 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1306 		    actual);
1307 		ret = DDI_FAILURE;
1308 		goto release_intr;
1309 	} else if ((qlt->qlt_81xx_chip) && (ret != DDI_SUCCESS || actual < 3)) {
1310 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1311 		    actual);
1312 		ret = DDI_FAILURE;
1313 		goto release_intr;
1314 	} else if (ret != DDI_SUCCESS || actual < 2) {
1315 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1316 		    actual);
1317 		ret = DDI_FAILURE;
1318 		goto release_intr;
1319 	}
1320 	if (actual < count) {
1321 		EL(qlt, "requested: %d, received: %d\n", count, actual);
1322 	}
1323 
1324 	qlt->intr_cnt = actual;
1325 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
1326 	if (ret != DDI_SUCCESS) {
1327 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
1328 		ret = DDI_FAILURE;
1329 		goto release_intr;
1330 	}
1331 	qlt_init_mutex(qlt);
1332 	for (i = 0; i < qlt->intr_cnt; i++) {
1333 		ret = ddi_intr_add_handler(qlt->htable[i],
1334 		    (i != 0) ? qlt_msix_resp_handler :
1335 		    qlt_msix_default_handler,
1336 		    qlt, INT2PTR((uint_t)i, void *));
1337 		if (ret != DDI_SUCCESS) {
1338 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
1339 			goto release_mutex;
1340 		}
1341 	}
1342 
1343 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
1344 	qlt->intr_flags |= QLT_INTR_MSIX;
1345 	return (DDI_SUCCESS);
1346 
1347 release_mutex:
1348 	qlt_destroy_mutex(qlt);
1349 release_intr:
1350 	for (i = 0; i < actual; i++)
1351 		(void) ddi_intr_free(qlt->htable[i]);
1352 
1353 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
1354 	qlt->htable = NULL;
1355 	qlt_release_intr(qlt);
1356 	return (ret);
1357 }
1358 
1359 static int
qlt_setup_msi(qlt_state_t * qlt)1360 qlt_setup_msi(qlt_state_t *qlt)
1361 {
1362 	int count, avail, actual;
1363 	int itype = DDI_INTR_TYPE_MSI;
1364 	int ret;
1365 	int i;
1366 
1367 	/* 83xx and 27xx doesn't do MSI - don't even bother? */
1368 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1369 		return (DDI_FAILURE);
1370 	}
1371 
1372 	/* get the # of interrupts */
1373 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
1374 	if (ret != DDI_SUCCESS || count == 0) {
1375 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
1376 		    count);
1377 		return (DDI_FAILURE);
1378 	}
1379 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
1380 	if (ret != DDI_SUCCESS || avail == 0) {
1381 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
1382 		    avail);
1383 		return (DDI_FAILURE);
1384 	}
1385 	if (avail < count) {
1386 		EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
1387 	}
1388 	/* MSI requires only 1 interrupt. */
1389 	count = 1;
1390 
1391 	/* allocate interrupt */
1392 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
1393 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
1394 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
1395 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
1396 	if (ret != DDI_SUCCESS || actual == 0) {
1397 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1398 		    actual);
1399 		ret = DDI_FAILURE;
1400 		goto free_mem;
1401 	}
1402 	if (actual < count) {
1403 		EL(qlt, "requested: %d, received: %d\n", count, actual);
1404 	}
1405 	qlt->intr_cnt = actual;
1406 
1407 	/*
1408 	 * Get priority for first msi, assume remaining are all the same.
1409 	 */
1410 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
1411 	if (ret != DDI_SUCCESS) {
1412 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
1413 		ret = DDI_FAILURE;
1414 		goto release_intr;
1415 	}
1416 	qlt_init_mutex(qlt);
1417 
1418 	/* add handler */
1419 	for (i = 0; i < actual; i++) {
1420 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
1421 		    qlt, INT2PTR((uint_t)i, void *));
1422 		if (ret != DDI_SUCCESS) {
1423 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
1424 			goto release_mutex;
1425 		}
1426 	}
1427 
1428 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
1429 	qlt->intr_flags |= QLT_INTR_MSI;
1430 	return (DDI_SUCCESS);
1431 
1432 release_mutex:
1433 	qlt_destroy_mutex(qlt);
1434 release_intr:
1435 	for (i = 0; i < actual; i++)
1436 		(void) ddi_intr_free(qlt->htable[i]);
1437 free_mem:
1438 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
1439 	qlt->htable = NULL;
1440 	qlt_release_intr(qlt);
1441 	return (ret);
1442 }
1443 
1444 static int
qlt_setup_fixed(qlt_state_t * qlt)1445 qlt_setup_fixed(qlt_state_t *qlt)
1446 {
1447 	int count;
1448 	int actual;
1449 	int ret;
1450 	int itype = DDI_INTR_TYPE_FIXED;
1451 
1452 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
1453 	/* Fixed interrupts can only have one interrupt. */
1454 	if (ret != DDI_SUCCESS || count != 1) {
1455 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
1456 		    count);
1457 		return (DDI_FAILURE);
1458 	}
1459 
1460 	qlt->intr_size = sizeof (ddi_intr_handle_t);
1461 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
1462 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
1463 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
1464 	if (ret != DDI_SUCCESS || actual != 1) {
1465 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1466 		    actual);
1467 		ret = DDI_FAILURE;
1468 		goto free_mem;
1469 	}
1470 
1471 	qlt->intr_cnt = actual;
1472 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
1473 	if (ret != DDI_SUCCESS) {
1474 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
1475 		ret = DDI_FAILURE;
1476 		goto release_intr;
1477 	}
1478 	qlt_init_mutex(qlt);
1479 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
1480 	if (ret != DDI_SUCCESS) {
1481 		EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
1482 		goto release_mutex;
1483 	}
1484 
1485 	qlt->intr_flags |= QLT_INTR_FIXED;
1486 	return (DDI_SUCCESS);
1487 
1488 release_mutex:
1489 	qlt_destroy_mutex(qlt);
1490 release_intr:
1491 	(void) ddi_intr_free(qlt->htable[0]);
1492 free_mem:
1493 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
1494 	qlt->htable = NULL;
1495 	qlt_release_intr(qlt);
1496 	return (ret);
1497 }
1498 
1499 static int
qlt_setup_interrupts(qlt_state_t * qlt)1500 qlt_setup_interrupts(qlt_state_t *qlt)
1501 {
1502 	int itypes = 0;
1503 
1504 /*
1505  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
1506  */
1507 #ifndef __sparc
1508 	if ((qlt_enable_msi != 0) || (qlt_enable_msix != 0)) {
1509 #endif
1510 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
1511 		itypes = DDI_INTR_TYPE_FIXED;
1512 	}
1513 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
1514 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
1515 			return (DDI_SUCCESS);
1516 	}
1517 	if (qlt_enable_msi && (itypes & DDI_INTR_TYPE_MSI)) {
1518 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
1519 			return (DDI_SUCCESS);
1520 	}
1521 #ifndef __sparc
1522 	}
1523 #endif
1524 	return (qlt_setup_fixed(qlt));
1525 }
1526 
1527 static uint8_t *
qlt_vpd_findtag(qlt_state_t * qlt,uint8_t * vpdbuf,int8_t * opcode)1528 qlt_vpd_findtag(qlt_state_t *qlt, uint8_t *vpdbuf, int8_t *opcode)
1529 {
1530 	uint8_t	*vpd = vpdbuf;
1531 	uint8_t	*end = vpdbuf + QL_24XX_VPD_SIZE;
1532 	uint32_t found = 0;
1533 
1534 	if (vpdbuf == NULL || opcode == NULL) {
1535 		EL(qlt, "null parameter passed!\n");
1536 		return (NULL);
1537 	}
1538 
1539 	while (vpd < end) {
1540 		if (vpd[0] == VPD_TAG_END) {
1541 			if (opcode[0] == VPD_TAG_END) {
1542 				found = 1;
1543 			} else {
1544 				found = 0;
1545 			}
1546 			break;
1547 		}
1548 
1549 		if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1550 			found = 1;
1551 			break;
1552 		}
1553 
1554 		if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1555 			vpd += (vpd[2] << 8) + vpd[1] + 3;
1556 		} else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1557 			vpd += 3;
1558 		} else {
1559 			vpd += vpd[2] +3;
1560 		}
1561 	}
1562 	return (found == 1 ? vpd : NULL);
1563 }
1564 
1565 /*
1566  * qlt_vpd_lookup
1567  *      Return the VPD data for the request VPD tag
1568  *
1569  * Input:
1570  *      qlt      = adapter state pointer.
1571  *      opcode  = VPD opcode to find (must be NULL terminated).
1572  *      bp      = Pointer to returned data buffer.
1573  *      bplen   = Length of returned data buffer.
1574  *
1575  * Returns:
1576  *      Length of data copied into returned data buffer.
1577  *              >0 = VPD data field (NULL terminated)
1578  *               0 = no data.
1579  *              -1 = Could not find opcode in vpd buffer / error.
1580  *
1581  * Context:
1582  *      Kernel context.
1583  *
1584  * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1585  *
1586  */
1587 static int
qlt_vpd_lookup(qlt_state_t * qlt,uint8_t * opcode,uint8_t * bp,int32_t bplen)1588 qlt_vpd_lookup(qlt_state_t *qlt, uint8_t *opcode, uint8_t *bp,
1589     int32_t bplen)
1590 {
1591 	uint8_t	*vpd = NULL;
1592 	uint8_t	*vpdbuf = NULL;
1593 	int32_t	len = -1;
1594 
1595 	if (opcode == NULL || bp == NULL || bplen < 1) {
1596 		EL(qlt, "invalid parameter passed: opcode=%ph, "
1597 		    "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1598 		return (len);
1599 	}
1600 
1601 	vpdbuf = (uint8_t *)qlt->vpd;
1602 	if ((vpd = qlt_vpd_findtag(qlt, vpdbuf, (int8_t *)opcode)) != NULL) {
1603 		/*
1604 		 * Found the tag
1605 		 */
1606 		if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1607 		    *opcode == VPD_TAG_LRTC) {
1608 			/*
1609 			 * We found it, but the tag doesn't have a data
1610 			 * field.
1611 			 */
1612 			len = 0;
1613 		} else if (!(strncmp((char *)vpd, (char *)
1614 		    VPD_TAG_PRODID, 1))) {
1615 			len = vpd[2] << 8;
1616 			len += vpd[1];
1617 		} else {
1618 			len = vpd[2];
1619 		}
1620 
1621 		/*
1622 		 * Make sure that the vpd len does not exceed the
1623 		 * vpd end
1624 		 */
1625 		if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) {
1626 			EL(qlt, "vpd tag len (%xh) exceeds vpd buffer "
1627 			    "length\n", len);
1628 			len = -1;
1629 		}
1630 	} else {
1631 		EL(qlt, "Cna't find vpd tag \n");
1632 		return (-1);
1633 	}
1634 
1635 	if (len >= 0) {
1636 		/*
1637 		 * make sure we don't exceed callers buffer space len
1638 		 */
1639 		if (len > bplen) {
1640 			len = bplen - 1;
1641 		}
1642 		/* copy the data back */
1643 		(void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len);
1644 		bp[len] = '\0';
1645 	} else {
1646 		/* error -- couldn't find tag */
1647 		bp[0] = '\0';
1648 		if (opcode[1] != '\0') {
1649 			EL(qlt, "unable to find tag '%s'\n", opcode);
1650 		} else {
1651 			EL(qlt, "unable to find tag '%xh'\n", opcode[0]);
1652 		}
1653 	}
1654 	return (len);
1655 }
1656 
1657 void
qlt_get_rom_version(qlt_state_t * qlt,caddr_t orv)1658 qlt_get_rom_version(qlt_state_t *qlt, caddr_t orv)
1659 {
1660 	int i;
1661 	char bios0_str[32];
1662 	char fcode_str[32];
1663 	char efi_str[32];
1664 	char hppa_str[32];
1665 	char tmp[80];
1666 	uint32_t bios_cnt = 0;
1667 	uint32_t fcode_cnt = 0;
1668 	boolean_t last_image = FALSE;
1669 
1670 	/* collect right rom_version from image[] */
1671 	i = 0;
1672 	do {
1673 		if (qlt->rimage[0].header.signature[0] != PCI_HEADER0) {
1674 			break;
1675 		}
1676 
1677 		if (qlt->rimage[i].data.codetype == PCI_CODE_X86PC) {
1678 			/* BIOS */
1679 			if (bios_cnt == 0) {
1680 				(void) snprintf(bios0_str,
1681 				    32,
1682 				    "%d.%02d",
1683 				    qlt->rimage[i].data.
1684 				    revisionlevel[1],
1685 				    qlt->rimage[i].data.
1686 				    revisionlevel[0]);
1687 				(void) snprintf(tmp, 80,
1688 				    " BIOS: %s;", bios0_str);
1689 				(void) strcat(orv, tmp);
1690 			}
1691 			bios_cnt++;
1692 		} else if (qlt->rimage[i].data.codetype == PCI_CODE_FCODE) {
1693 			/* FCode */
1694 			if (fcode_cnt == 0) {
1695 				(void) snprintf(fcode_str,
1696 				    32,
1697 				    "%d.%02d",
1698 				    qlt->rimage[i].data.revisionlevel[1],
1699 				    qlt->rimage[i].data.revisionlevel[0]);
1700 				(void) snprintf(tmp, 80,
1701 				    " FCode: %s;", fcode_str);
1702 				(void) strcat(orv, tmp);
1703 			}
1704 			fcode_cnt++;
1705 		} else if (qlt->rimage[i].data.codetype == PCI_CODE_EFI) {
1706 			/* EFI */
1707 			(void) snprintf(efi_str,
1708 			    32,
1709 			    "%d.%02d",
1710 			    qlt->rimage[i].data.revisionlevel[1],
1711 			    qlt->rimage[i].data.revisionlevel[0]);
1712 			(void) snprintf(tmp, 80, " EFI: %s;", efi_str);
1713 			(void) strcat(orv, tmp);
1714 		} else if (qlt->rimage[i].data.codetype == PCI_CODE_HPPA) {
1715 			/* HPPA */
1716 			(void) snprintf(hppa_str,
1717 			    32,
1718 			    "%d.%02d",
1719 			    qlt->rimage[i].data.revisionlevel[1],
1720 			    qlt->rimage[i].data.revisionlevel[0]);
1721 			(void) snprintf(orv, 80, " HPPA: %s;", hppa_str);
1722 			(void) strcat(orv, tmp);
1723 		} else if (qlt->rimage[i].data.codetype == PCI_CODE_FW) {
1724 			EL(qlt, "fw infor skip\n");
1725 		} else {
1726 			/* Unknown */
1727 			EL(qlt, "unknown image\n");
1728 			break;
1729 		}
1730 
1731 		if (qlt->rimage[i].data.indicator == PCI_IND_LAST_IMAGE) {
1732 			last_image = TRUE;
1733 			break;
1734 		}
1735 
1736 		i ++;
1737 	} while ((last_image != TRUE) && (i < 6));
1738 
1739 	if (last_image != TRUE) {
1740 		/* No boot image detected */
1741 		(void) snprintf(orv, FCHBA_OPTION_ROM_VERSION_LEN, "%s",
1742 		    "No boot image detected");
1743 	}
1744 }
1745 
1746 /*
1747  * Filling the hba attributes
1748  */
1749 void
qlt_populate_hba_fru_details(struct fct_local_port * port,struct fct_port_attrs * port_attrs)1750 qlt_populate_hba_fru_details(struct fct_local_port *port,
1751     struct fct_port_attrs *port_attrs)
1752 {
1753 	int len;
1754 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1755 
1756 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
1757 	    "QLogic Corp.");
1758 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
1759 	    "%s", QLT_NAME);
1760 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
1761 	    "%s", QLT_VERSION);
1762 	/* get serial_number from vpd data */
1763 	if (qlt_vpd_lookup(qlt, (uint8_t *)VPD_TAG_SN, (uint8_t *)
1764 	    port_attrs->serial_number, FCHBA_SERIAL_NUMBER_LEN) == -1) {
1765 			port_attrs->serial_number[0] = '\0';
1766 	}
1767 	port_attrs->hardware_version[0] = '\0';
1768 
1769 	(void) snprintf(port_attrs->firmware_version,
1770 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
1771 	    qlt->fw_minor, qlt->fw_subminor);
1772 
1773 	/* Get FCode version */
1774 	qlt_get_rom_version(qlt, (caddr_t)&port_attrs->option_rom_version[0]);
1775 
1776 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
1777 	    qlt->nvram->subsystem_vendor_id[1] << 8;
1778 
1779 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1780 	    qlt->nvram->max_frame_length[0];
1781 
1782 	port_attrs->supported_cos = 0x10000000;
1783 
1784 	if (qlt->qlt_fcoe_enabled) {
1785 		port_attrs->supported_speed = PORT_SPEED_10G;
1786 	} else if (qlt->qlt_27xx_chip) {
1787 		if ((qlt->qlt_27xx_speed & MAX_SPEED_MASK) == MAX_SPEED_32G) {
1788 			port_attrs->supported_speed = PORT_SPEED_8G |
1789 			    PORT_SPEED_16G | PORT_SPEED_32G;
1790 		} else {
1791 			port_attrs->supported_speed = PORT_SPEED_4G |
1792 			    PORT_SPEED_8G | PORT_SPEED_16G;
1793 		}
1794 	} else if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1795 		port_attrs->supported_speed = PORT_SPEED_4G |
1796 		    PORT_SPEED_8G | PORT_SPEED_16G;
1797 	} else if (qlt->qlt_25xx_chip) {
1798 		port_attrs->supported_speed = PORT_SPEED_2G | PORT_SPEED_4G |
1799 		    PORT_SPEED_8G;
1800 	} else {
1801 		port_attrs->supported_speed = PORT_SPEED_1G |
1802 		    PORT_SPEED_2G | PORT_SPEED_4G;
1803 	}
1804 
1805 	/* limit string length to nvr model_name length */
1806 	len = ((qlt->qlt_81xx_chip) || (qlt->qlt_83xx_chip) ||
1807 	    (qlt->qlt_27xx_chip)) ? 16 : 8;
1808 	(void) snprintf(port_attrs->model,
1809 	    (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1810 	    "%s", qlt->nvram->model_name);
1811 
1812 	(void) snprintf(port_attrs->model_description,
1813 	    (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1814 	    FCHBA_MODEL_DESCRIPTION_LEN),
1815 	    "%s", qlt->nvram->model_name);
1816 }
1817 
1818 /* ARGSUSED */
1819 fct_status_t
qlt_info(uint32_t cmd,fct_local_port_t * port,void * arg,uint8_t * buf,uint32_t * bufsizep)1820 qlt_info(uint32_t cmd, fct_local_port_t *port,
1821     void *arg, uint8_t *buf, uint32_t *bufsizep)
1822 {
1823 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
1824 	mbox_cmd_t	*mcp;
1825 	fct_status_t	ret = FCT_SUCCESS;
1826 	uint8_t		*p;
1827 	fct_port_link_status_t	*link_status;
1828 
1829 	switch (cmd) {
1830 	case FC_TGT_PORT_RLS:
1831 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1832 			break;
1833 		}
1834 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1835 			EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1836 			    "fct_port_link_status_t=%xh\n", *bufsizep,
1837 			    sizeof (fct_port_link_status_t));
1838 			ret = FCT_FAILURE;
1839 			break;
1840 		}
1841 		/* send mailbox command to get link status */
1842 		mcp = qlt_alloc_mailbox_command(qlt, 156);
1843 		if (mcp == NULL) {
1844 			EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1845 			ret = FCT_ALLOC_FAILURE;
1846 			break;
1847 		}
1848 
1849 		/* GET LINK STATUS count */
1850 		mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1851 		mcp->to_fw[8] = 156/4;
1852 		mcp->to_fw_mask |= BIT_1 | BIT_8;
1853 		mcp->from_fw_mask |= BIT_1 | BIT_2;
1854 
1855 		ret = qlt_mailbox_command(qlt, mcp);
1856 		if (ret != QLT_SUCCESS) {
1857 			EL(qlt, "qlt_mbox_command=6dh status=%llxh\n", ret);
1858 			qlt_free_mailbox_command(qlt, mcp);
1859 			break;
1860 		}
1861 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1862 
1863 		p = mcp->dbuf->db_sglist[0].seg_addr;
1864 		link_status = (fct_port_link_status_t *)buf;
1865 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1866 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1867 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1868 		link_status->PrimitiveSeqProtocolErrorCount =
1869 		    LE_32(*((uint32_t *)(p + 12)));
1870 		link_status->InvalidTransmissionWordCount =
1871 		    LE_32(*((uint32_t *)(p + 16)));
1872 		link_status->InvalidCRCCount =
1873 		    LE_32(*((uint32_t *)(p + 20)));
1874 
1875 		qlt_free_mailbox_command(qlt, mcp);
1876 		break;
1877 	default:
1878 		EL(qlt, "Unknown cmd=%xh\n", cmd);
1879 		ret = FCT_FAILURE;
1880 		break;
1881 	}
1882 	return (ret);
1883 }
1884 
1885 fct_status_t
qlt_port_start(caddr_t arg)1886 qlt_port_start(caddr_t arg)
1887 {
1888 	qlt_state_t *qlt = (qlt_state_t *)arg;
1889 	fct_local_port_t *port;
1890 	fct_dbuf_store_t *fds;
1891 	fct_status_t ret;
1892 
1893 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1894 		return (FCT_FAILURE);
1895 	}
1896 
1897 	/* Initialize the ddi_dma_handle free pool */
1898 	qlt_dma_handle_pool_init(qlt);
1899 
1900 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1901 	if (port == NULL) {
1902 		goto qlt_pstart_fail_1;
1903 	}
1904 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1905 	if (fds == NULL) {
1906 		goto qlt_pstart_fail_2;
1907 	}
1908 	qlt->qlt_port = port;
1909 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
1910 	fds->fds_free_data_buf = qlt_dmem_free;
1911 	fds->fds_setup_dbuf = qlt_dma_setup_dbuf;
1912 	fds->fds_teardown_dbuf = qlt_dma_teardown_dbuf;
1913 	fds->fds_max_sgl_xfer_len = QLT_DMA_SG_LIST_LENGTH * MMU_PAGESIZE;
1914 	fds->fds_copy_threshold = (uint32_t)MMU_PAGESIZE;
1915 	fds->fds_fca_private = (void *)qlt;
1916 	/*
1917 	 * Since we keep everything in the state struct and dont allocate any
1918 	 * port private area, just use that pointer to point to the
1919 	 * state struct.
1920 	 */
1921 	port->port_fca_private = qlt;
1922 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
1923 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1924 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1925 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1926 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1927 	port->port_default_alias = qlt->qlt_port_alias;
1928 	port->port_pp = qlt_pp;
1929 	port->port_fds = fds;
1930 	port->port_max_logins = QLT_MAX_LOGINS;
1931 	port->port_max_xchges = QLT_MAX_XCHGES;
1932 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1933 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1934 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1935 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1936 	port->port_get_link_info = qlt_get_link_info;
1937 	port->port_register_remote_port = qlt_register_remote_port;
1938 	port->port_deregister_remote_port = qlt_deregister_remote_port;
1939 	port->port_send_cmd = qlt_send_cmd;
1940 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1941 	port->port_send_cmd_response = qlt_send_cmd_response;
1942 	port->port_abort_cmd = qlt_abort_cmd;
1943 	port->port_ctl = qlt_ctl;
1944 	port->port_flogi_xchg = qlt_do_flogi;
1945 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1946 	port->port_info = qlt_info;
1947 	port->port_fca_version = FCT_FCA_MODREV_1;
1948 
1949 	if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1950 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1951 		goto qlt_pstart_fail_2_5;
1952 	}
1953 
1954 	EL(qlt, "Qlogic qlt(%d) "
1955 	    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x:"
1956 	    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1957 	    qlt->instance,
1958 	    qlt->nvram->port_name[0],
1959 	    qlt->nvram->port_name[1],
1960 	    qlt->nvram->port_name[2],
1961 	    qlt->nvram->port_name[3],
1962 	    qlt->nvram->port_name[4],
1963 	    qlt->nvram->port_name[5],
1964 	    qlt->nvram->port_name[6],
1965 	    qlt->nvram->port_name[7],
1966 	    qlt->nvram->node_name[0],
1967 	    qlt->nvram->node_name[1],
1968 	    qlt->nvram->node_name[2],
1969 	    qlt->nvram->node_name[3],
1970 	    qlt->nvram->node_name[4],
1971 	    qlt->nvram->node_name[5],
1972 	    qlt->nvram->node_name[6],
1973 	    qlt->nvram->node_name[7]);
1974 
1975 	return (QLT_SUCCESS);
1976 #if 0
1977 qlt_pstart_fail_3:
1978 	(void) fct_deregister_local_port(port);
1979 #endif
1980 qlt_pstart_fail_2_5:
1981 	fct_free(fds);
1982 qlt_pstart_fail_2:
1983 	fct_free(port);
1984 	qlt->qlt_port = NULL;
1985 qlt_pstart_fail_1:
1986 	qlt_dma_handle_pool_fini(qlt);
1987 	qlt_dmem_fini(qlt);
1988 	return (QLT_FAILURE);
1989 }
1990 
1991 fct_status_t
qlt_port_stop(caddr_t arg)1992 qlt_port_stop(caddr_t arg)
1993 {
1994 	qlt_state_t *qlt = (qlt_state_t *)arg;
1995 	fct_status_t ret;
1996 
1997 	if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1998 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1999 		return (QLT_FAILURE);
2000 	}
2001 	fct_free(qlt->qlt_port->port_fds);
2002 	fct_free(qlt->qlt_port);
2003 	qlt_dma_handle_pool_fini(qlt);
2004 	qlt->qlt_port = NULL;
2005 	qlt_dmem_fini(qlt);
2006 	return (QLT_SUCCESS);
2007 }
2008 
2009 /*
2010  * Called by framework to init the HBA.
2011  * Can be called in the middle of I/O. (Why ??)
2012  * Should make sure sane state both before and after the initialization
2013  */
2014 fct_status_t
qlt_port_online(qlt_state_t * qlt)2015 qlt_port_online(qlt_state_t *qlt)
2016 {
2017 	uint64_t	da;
2018 	int		instance, i, j;
2019 	fct_status_t	ret;
2020 	uint16_t	rcount;
2021 	caddr_t		icb;
2022 	mbox_cmd_t	*mcp;
2023 	uint8_t		*elsbmp;
2024 
2025 	instance = ddi_get_instance(qlt->dip);
2026 
2027 	/* XXX Make sure a sane state */
2028 
2029 	if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
2030 		cmn_err(CE_NOTE, "qlt(%d): reset chip failed %llx",
2031 		    qlt->instance, (long long)ret);
2032 		return (ret);
2033 	}
2034 
2035 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
2036 
2037 	/* Get resource count */
2038 	REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
2039 	ret = qlt_raw_mailbox_command(qlt);
2040 	rcount = REG_RD16(qlt, REG_MBOX(3));
2041 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2042 	if (ret != QLT_SUCCESS) {
2043 		EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
2044 		return (ret);
2045 	}
2046 
2047 	/* Enable PUREX */
2048 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
2049 	REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
2050 	REG_WR16(qlt, REG_MBOX(2), 0x0);
2051 	REG_WR16(qlt, REG_MBOX(3), 0x0);
2052 	ret = qlt_raw_mailbox_command(qlt);
2053 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2054 	if (ret != QLT_SUCCESS) {
2055 		EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
2056 		cmn_err(CE_NOTE, "Enable PUREX failed");
2057 		return (ret);
2058 	}
2059 
2060 	/* Pass ELS bitmap to fw */
2061 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
2062 	REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
2063 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
2064 	bzero(elsbmp, 32);
2065 	da = qlt->queue_mem_cookie.dmac_laddress;
2066 	da += MBOX_DMA_MEM_OFFSET;
2067 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2068 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2069 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2070 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2071 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
2072 	SETELSBIT(elsbmp, ELS_OP_LOGO);
2073 	SETELSBIT(elsbmp, ELS_OP_ABTX);
2074 /*	SETELSBIT(elsbmp, ELS_OP_ECHO); till fct handles it */
2075 	SETELSBIT(elsbmp, ELS_OP_PRLI);
2076 	SETELSBIT(elsbmp, ELS_OP_PRLO);
2077 	SETELSBIT(elsbmp, ELS_OP_SCN);
2078 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
2079 	SETELSBIT(elsbmp, ELS_OP_PDISC);
2080 	SETELSBIT(elsbmp, ELS_OP_ADISC);
2081 	SETELSBIT(elsbmp, ELS_OP_RSCN);
2082 	SETELSBIT(elsbmp, ELS_OP_RNID);
2083 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
2084 	    DDI_DMA_SYNC_FORDEV);
2085 	ret = qlt_raw_mailbox_command(qlt);
2086 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2087 	if (ret != QLT_SUCCESS) {
2088 		EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
2089 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
2090 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
2091 		    elsbmp[1]);
2092 		return (ret);
2093 	}
2094 
2095 	/* Init queue pointers */
2096 	if (qlt->qlt_mq_enabled == 1) {
2097 		uint16_t qi;
2098 
2099 		for (qi = 0; qi < MQ_MAX_QUEUES; qi++) {
2100 			MQBAR_WR32(qlt,
2101 			    (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN, 0);
2102 			MQBAR_WR32(qlt,
2103 			    (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT, 0);
2104 			MQBAR_WR32(qlt,
2105 			    (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN, 0);
2106 			MQBAR_WR32(qlt,
2107 			    (qi * MQBAR_REG_OFFSET) +
2108 			    MQBAR_RESP_OUT, 0);
2109 		}
2110 	} else {
2111 		REG_WR32(qlt, REG_REQ_IN_PTR, 0);
2112 		REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
2113 		REG_WR32(qlt, REG_RESP_IN_PTR, 0);
2114 		REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
2115 	}
2116 
2117 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
2118 		REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
2119 		REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
2120 		REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
2121 		REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
2122 	}
2123 	qlt->mq_req[0].mq_ndx_to_fw = qlt->mq_req[0].mq_ndx_from_fw = 0;
2124 	qlt->mq_req[0].mq_available = REQUEST_QUEUE_ENTRIES - 1;
2125 
2126 	if (qlt->qlt_mq_enabled == 1) {
2127 		for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2128 			qlt->mq_req[i].mq_ndx_to_fw = 0;
2129 			qlt->mq_req[i].mq_ndx_from_fw = 0;
2130 			qlt->mq_req[i].mq_available =
2131 			    REQUEST_QUEUE_MQ_ENTRIES - 1;
2132 		}
2133 	}
2134 	qlt->mq_resp[0].mq_ndx_to_fw = qlt->mq_resp[0].mq_ndx_from_fw = 0;
2135 
2136 	if (qlt->qlt_mq_enabled == 1) {
2137 		caddr_t resp;
2138 
2139 		for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2140 			qlt->mq_resp[i].mq_ndx_to_fw = 0;
2141 			qlt->mq_resp[i].mq_ndx_from_fw = 0;
2142 			for (j = 0; j < RESPONSE_QUEUE_MQ_ENTRIES; j++) {
2143 				resp = &qlt->mq_resp[i].mq_ptr[j << 6];
2144 				QMEM_WR32_RSPQ(qlt, i, resp+0x3c, 0xdeadbeef);
2145 			}
2146 		}
2147 	}
2148 
2149 	for (i = 0; i < ATIO_QUEUE_ENTRIES; i++) {
2150 		caddr_t atio;
2151 
2152 		atio = &qlt->atio_ptr[i << 6];
2153 		QMEM_WR32(qlt, atio+0x3c, 0xdeadbeef);
2154 	}
2155 
2156 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
2157 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
2158 
2159 	/*
2160 	 * XXX support for tunables. Also should we cache icb ?
2161 	 */
2162 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2163 	    (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2164 	    (qlt->qlt_mq_enabled))) {
2165 		/*
2166 		 * allocate extra 64 bytes for Extended init control block,
2167 		 * with separation to allow for a minimal MID section.
2168 		 */
2169 		mcp = qlt_alloc_mailbox_command(qlt, 0xE0);
2170 	} else {
2171 		mcp = qlt_alloc_mailbox_command(qlt, 0x80);
2172 	}
2173 	if (mcp == NULL) {
2174 		EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
2175 		return (STMF_ALLOC_FAILURE);
2176 	}
2177 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
2178 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2179 	    (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2180 	    (qlt->qlt_mq_enabled))) {
2181 		bzero(icb, 0xE0);
2182 	} else {
2183 		bzero(icb, 0x80);
2184 	}
2185 	da = qlt->queue_mem_cookie.dmac_laddress;
2186 	DMEM_WR16(qlt, icb, 1);		/* Version */
2187 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
2188 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
2189 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
2190 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
2191 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
2192 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
2193 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
2194 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
2195 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
2196 	if ((!qlt->qlt_83xx_chip) && (!qlt->qlt_81xx_chip) &&
2197 	    (!qlt->qlt_27xx_chip)) {
2198 		DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
2199 	}
2200 	if ((!qlt->qlt_83xx_chip) || (!qlt->qlt_27xx_chip)) {
2201 		DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
2202 	}
2203 	DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
2204 	DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
2205 	if ((!qlt->qlt_83xx_chip) || (!qlt->qlt_27xx_chip)) {
2206 		DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
2207 	}
2208 	/* XXX: all hba model atio/resp 0  use vector 0 */
2209 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
2210 	DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
2211 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
2212 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
2213 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2214 	    (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2215 	    (qlt->qlt_mq_enabled))) {
2216 		qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
2217 
2218 		/* fw options 1 */
2219 		if (qlt->qlt_fcoe_enabled) {
2220 			DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4);
2221 		} else {
2222 			DMEM_WR32(qlt, icb+0x5c,
2223 			    BIT_11 | BIT_5 | BIT_4 | BIT_2 | BIT_1 | BIT_0);
2224 		}
2225 		/* fw options 2 */
2226 		if (qlt->qlt_mq_enabled) {
2227 			if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
2228 				if (qlt->qlt_fcoe_enabled) {
2229 					DMEM_WR32(qlt, icb+0x60,
2230 					    BIT_26 | BIT_23 | BIT_22);
2231 				} else {
2232 					DMEM_WR32(qlt, icb+0x60,
2233 					    BIT_26 | BIT_23 | BIT_22 | BIT_5);
2234 				}
2235 			} else {
2236 				DMEM_WR32(qlt,
2237 				    icb+0x60, BIT_26 | BIT_23 | BIT_22 | BIT_5);
2238 			}
2239 		}
2240 
2241 		/* fw options 3 */
2242 		if (qlt->qlt_fcoe_enabled) {
2243 			DMEM_WR32(qlt, icb+0x64, BIT_4);
2244 		} else {
2245 			DMEM_WR32(qlt, icb+0x64,
2246 			    BIT_14 | BIT_8 | BIT_7 | BIT_4);
2247 		}
2248 
2249 		if (qlt->qlt_mq_enabled) {
2250 			DMEM_WR16(qlt, icb+0x68, 5); /* QoS priority = 5 */
2251 		}
2252 
2253 		DMEM_WR32(qlt, icb+0x70,
2254 		    qlt81nvr->enode_mac[0] |
2255 		    (qlt81nvr->enode_mac[1] << 8) |
2256 		    (qlt81nvr->enode_mac[2] << 16) |
2257 		    (qlt81nvr->enode_mac[3] << 24));
2258 		DMEM_WR16(qlt, icb+0x74,
2259 		    qlt81nvr->enode_mac[4] |
2260 		    (qlt81nvr->enode_mac[5] << 8));
2261 	} else {
2262 		DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
2263 		    BIT_2 | BIT_1 | BIT_0);
2264 		DMEM_WR32(qlt, icb+0x60, BIT_5);
2265 		DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
2266 		    BIT_4);
2267 
2268 /* null MID setup */
2269 		DMEM_WR16(qlt, icb+0x80, 1); /* VP count 1 */
2270 	}
2271 
2272 	if (qlt->qlt_fcoe_enabled) {
2273 		qlt_dmem_bctl_t		*bctl;
2274 		uint32_t		index;
2275 		caddr_t			src;
2276 		caddr_t			dst;
2277 		qlt_nvram_81xx_t	*qlt81nvr;
2278 
2279 		dst = icb+0xA0;
2280 		qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
2281 		src = (caddr_t)&qlt81nvr->ext_blk;
2282 		index = sizeof (qlt_ext_icb_81xx_t);
2283 
2284 		/* Use defaults for cases where we find nothing in NVR */
2285 		if ((qlt->qlt_83xx_chip) || (*src == 0)) {
2286 			if (*src == 0) {
2287 				EL(qlt, "nvram eicb=null\n");
2288 				cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
2289 				    instance);
2290 			}
2291 			qlt81nvr->ext_blk.version[0] = 1;
2292 /*
2293  * not yet, for !FIP firmware at least
2294  *
2295  *                qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
2296  */
2297 #ifdef _LITTLE_ENDIAN
2298 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
2299 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
2300 #else
2301 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
2302 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
2303 #endif
2304 		}
2305 
2306 		while (index--) {
2307 			*dst++ = *src++;
2308 		}
2309 
2310 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2311 		da = bctl->bctl_dev_addr + 0xA0; /* base addr of eicb (phys) */
2312 
2313 		mcp->to_fw[11] = LSW(LSD(da));
2314 		mcp->to_fw[10] = MSW(LSD(da));
2315 		mcp->to_fw[13] = LSW(MSD(da));
2316 		mcp->to_fw[12] = MSW(MSD(da));
2317 		mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
2318 		    0xffff);
2319 
2320 		/* eicb enable */
2321 		mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
2322 		mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
2323 		    BIT_1;
2324 	}
2325 
2326 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
2327 	if (((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2328 	    (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2329 	    (qlt->qlt_mq_enabled))) && (qlt->fw_attr & BIT_6)) {
2330 		mcp->to_fw[0] = MBC_INITIALIZE_MULTI_ID_FW;
2331 	} else {
2332 		mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
2333 	}
2334 
2335 	/*
2336 	 * This is the 1st command after adapter initialize which will
2337 	 * use interrupts and regular mailbox interface.
2338 	 */
2339 	qlt->qlt_intr_enabled = 1;
2340 	qlt->mbox_io_state = MBOX_STATE_READY;
2341 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2342 	/* Issue mailbox to firmware */
2343 	ret = qlt_mailbox_command(qlt, mcp);
2344 	if (ret != QLT_SUCCESS) {
2345 		EL(qlt, "qlt_mbox_command=48h/60h status=%llxh\n", ret);
2346 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
2347 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
2348 		qlt_free_mailbox_command(qlt, mcp);
2349 		return (ret);
2350 	}
2351 
2352 	mcp->to_fw_mask = BIT_0;
2353 	mcp->from_fw_mask = BIT_0 | BIT_1;
2354 	mcp->to_fw[0] = 0x28;
2355 	ret = qlt_mailbox_command(qlt, mcp);
2356 	if (ret != QLT_SUCCESS) {
2357 		EL(qlt, "qlt_mbox_command=28h status=%llxh\n", ret);
2358 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
2359 		    (long long)ret);
2360 		qlt_free_mailbox_command(qlt, mcp);
2361 		return (ret);
2362 	}
2363 
2364 	if (qlt->qlt_mq_enabled == 1) {
2365 
2366 		for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2367 			da = qlt->mq_resp[i].queue_mem_mq_cookie.dmac_laddress;
2368 
2369 			mcp->to_fw_mask = BIT_14 | BIT_13 | BIT_12 | BIT_11 |
2370 			    BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 |
2371 			    BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0;
2372 			mcp->from_fw_mask = BIT_0 | BIT_1;
2373 
2374 			/* msix vector setup */
2375 			mcp->to_fw[14] = (uint16_t)(i);
2376 
2377 			mcp->to_fw[13] = 0;
2378 			mcp->to_fw[12] = 0;
2379 			mcp->to_fw[11] = 0;
2380 			mcp->to_fw[10] = 0;
2381 			mcp->to_fw[9] = 0;
2382 			mcp->to_fw[8] = 0;
2383 			mcp->to_fw[7] = LSW(MSD(da));
2384 			mcp->to_fw[6] = MSW(MSD(da));
2385 			mcp->to_fw[5] = RESPONSE_QUEUE_MQ_ENTRIES;
2386 			mcp->to_fw[4] = (uint16_t)(i);
2387 			mcp->to_fw[3] = LSW(LSD(da));
2388 			mcp->to_fw[2] = MSW(LSD(da));
2389 			mcp->to_fw[1] = BIT_6 | BIT_1;
2390 			mcp->to_fw[0] = 0x1F;
2391 			ret = qlt_mailbox_command(qlt, mcp);
2392 
2393 			if (ret != QLT_SUCCESS) {
2394 				EL(qlt, "qlt_mbox_command=1fh status=%llxh\n",
2395 				    ret);
2396 				cmn_err(CE_NOTE, "qlt(%d) queue manage %llx",
2397 				    instance, (long long)ret);
2398 				qlt_free_mailbox_command(qlt, mcp);
2399 				return (ret);
2400 			}
2401 
2402 			da = qlt->mq_req[i].queue_mem_mq_cookie.dmac_laddress;
2403 
2404 			mcp->to_fw_mask = BIT_14 | BIT_13 | BIT_12 | BIT_11 |
2405 			    BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 |
2406 			    BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0;
2407 			mcp->from_fw_mask = BIT_0 | BIT_1;
2408 
2409 			/*
2410 			 * msix vector does not apply for request queue create
2411 			 */
2412 			mcp->to_fw[14] = 2;
2413 			mcp->to_fw[13] = 0;
2414 			mcp->to_fw[12] = 4;
2415 			mcp->to_fw[11] = 0;
2416 			mcp->to_fw[10] = (uint16_t)(i);
2417 			mcp->to_fw[9] = 0;
2418 			mcp->to_fw[8] = 0;
2419 			mcp->to_fw[7] = LSW(MSD(da));
2420 			mcp->to_fw[6] = MSW(MSD(da));
2421 			mcp->to_fw[5] = REQUEST_QUEUE_MQ_ENTRIES;
2422 			mcp->to_fw[4] = (uint16_t)(i);
2423 			mcp->to_fw[3] = LSW(LSD(da));
2424 			mcp->to_fw[2] = MSW(LSD(da));
2425 			mcp->to_fw[1] = BIT_6;
2426 			mcp->to_fw[0] = 0x1F;
2427 			ret = qlt_mailbox_command(qlt, mcp);
2428 
2429 			if (ret != QLT_SUCCESS) {
2430 				EL(qlt, "qlt_mbox_command=1fh status=%llxh\n",
2431 				    ret);
2432 				cmn_err(CE_NOTE, "qlt(%d) queue manage %llx",
2433 				    instance, (long long)ret);
2434 				qlt_free_mailbox_command(qlt, mcp);
2435 				return (ret);
2436 			}
2437 		}
2438 	}
2439 
2440 	/*
2441 	 * Report FW versions for 81xx - MPI rev is useful
2442 	 */
2443 	/* if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) { */
2444 	if (qlt->qlt_fcoe_enabled) {
2445 		mcp->to_fw_mask = BIT_0;
2446 		mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_6 | BIT_3 | BIT_2 |
2447 		    BIT_1 | BIT_0;
2448 
2449 		mcp->to_fw[0] = MBC_ABOUT_FIRMWARE;
2450 		ret = qlt_mailbox_command(qlt, mcp);
2451 		if (ret != QLT_SUCCESS) {
2452 			EL(qlt, "about fw failed: %llx\n", (long long)ret);
2453 		} else {
2454 			EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
2455 			    mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
2456 			    mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
2457 			    mcp->from_fw[11] & 0xff);
2458 			EL(qlt, "Firmware Attributes %x[h]\n",
2459 			    mcp->from_fw[6]);
2460 		}
2461 	}
2462 
2463 	qlt_free_mailbox_command(qlt, mcp);
2464 
2465 	for (i = 0; i < 5; i++) {
2466 		qlt->qlt_bufref[i] = 0;
2467 	}
2468 	qlt->qlt_bumpbucket = 0;
2469 	qlt->qlt_pmintry = 0;
2470 	qlt->qlt_pmin_ok = 0;
2471 
2472 	if (ret != QLT_SUCCESS)
2473 		return (ret);
2474 
2475 	return (FCT_SUCCESS);
2476 }
2477 
2478 fct_status_t
qlt_port_offline(qlt_state_t * qlt)2479 qlt_port_offline(qlt_state_t *qlt)
2480 {
2481 	int retries;
2482 	int i;
2483 
2484 	mutex_enter(&qlt->mbox_lock);
2485 
2486 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2487 		mutex_exit(&qlt->mbox_lock);
2488 		goto poff_mbox_done;
2489 	}
2490 
2491 	/* Wait to grab the mailboxes */
2492 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2493 	    retries++) {
2494 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2495 		if ((retries > 5) ||
2496 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2497 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2498 			mutex_exit(&qlt->mbox_lock);
2499 			goto poff_mbox_done;
2500 		}
2501 	}
2502 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2503 	mutex_exit(&qlt->mbox_lock);
2504 poff_mbox_done:;
2505 	qlt->intr_sneak_counter = 10;
2506 	mutex_enter(&qlt->intr_lock);
2507 	if (qlt->qlt_mq_enabled == 1) {
2508 		for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2509 			mutex_enter(&qlt->mq_resp[i].mq_lock);
2510 		}
2511 	}
2512 	(void) qlt_reset_chip(qlt);
2513 	drv_usecwait(20);
2514 	qlt->intr_sneak_counter = 0;
2515 	if (qlt->qlt_mq_enabled == 1) {
2516 		for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2517 			mutex_exit(&qlt->mq_resp[i].mq_lock);
2518 		}
2519 	}
2520 	mutex_exit(&qlt->intr_lock);
2521 
2522 	return (FCT_SUCCESS);
2523 }
2524 
2525 static fct_status_t
qlt_get_link_info(fct_local_port_t * port,fct_link_info_t * li)2526 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
2527 {
2528 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
2529 	mbox_cmd_t *mcp;
2530 	fct_status_t fc_ret;
2531 	fct_status_t ret;
2532 	clock_t et;
2533 
2534 	et = ddi_get_lbolt() + drv_usectohz(5000000);
2535 	mcp = qlt_alloc_mailbox_command(qlt, 0);
2536 link_info_retry:
2537 	mcp->to_fw[0] = MBC_GET_ID;
2538 	mcp->to_fw[9] = 0;
2539 	mcp->to_fw_mask |= BIT_0 | BIT_9;
2540 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
2541 	/* Issue mailbox to firmware */
2542 	ret = qlt_mailbox_command(qlt, mcp);
2543 	if (ret != QLT_SUCCESS) {
2544 		EL(qlt, "qlt_mbox_command=20h status=%llxh\n", ret);
2545 		if ((mcp->from_fw[0] == 0x4005) &&
2546 		    ((mcp->from_fw[1] == 7) || (mcp->from_fw[1] == 0x1b))) {
2547 			/* Firmware is not ready */
2548 			if (ddi_get_lbolt() < et) {
2549 				delay(drv_usectohz(50000));
2550 				goto link_info_retry;
2551 			}
2552 		}
2553 		EL(qlt, "GET ID mbox failed, ret=%llx mb0=%x mb1=%x",
2554 		    ret, mcp->from_fw[0], mcp->from_fw[1]);
2555 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
2556 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
2557 		fc_ret = FCT_FAILURE;
2558 	} else {
2559 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
2560 		    (((uint32_t)(mcp->from_fw[3])) << 16);
2561 
2562 		li->port_speed = qlt->link_speed;
2563 		switch (mcp->from_fw[6]) {
2564 		case 1:
2565 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
2566 			li->port_fca_flogi_done = 1;
2567 			break;
2568 		case 0:
2569 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
2570 			li->port_no_fct_flogi = 1;
2571 			break;
2572 		case 3:
2573 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
2574 			li->port_fca_flogi_done = 1;
2575 			break;
2576 		case 2: /*FALLTHROUGH*/
2577 		case 4:
2578 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
2579 			li->port_fca_flogi_done = 1;
2580 			break;
2581 		default:
2582 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
2583 			EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
2584 		}
2585 		qlt->cur_topology = li->port_topology;
2586 		fc_ret = FCT_SUCCESS;
2587 
2588 		EL(qlt, "MBC_GET_ID done, Topology=%x, portid=%xh, "
2589 		    "port speed=%xh\n", li->port_topology, li->portid,
2590 		    li->port_speed);
2591 	}
2592 	qlt_free_mailbox_command(qlt, mcp);
2593 
2594 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
2595 		mcp = qlt_alloc_mailbox_command(qlt, 64);
2596 		mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
2597 		mcp->to_fw[1] = 0x7FE;
2598 		mcp->to_fw[9] = 0;
2599 		mcp->to_fw[10] = 0;
2600 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
2601 		fc_ret = qlt_mailbox_command(qlt, mcp);
2602 		if (fc_ret != QLT_SUCCESS) {
2603 			EL(qlt, "qlt_mbox_command=64h status=%llxh\n",
2604 			    fc_ret);
2605 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
2606 			    "database for F_port failed, ret = %llx", fc_ret);
2607 		} else {
2608 			uint8_t *p;
2609 
2610 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
2611 			p = mcp->dbuf->db_sglist[0].seg_addr;
2612 			bcopy(p + 0x18, li->port_rpwwn, 8);
2613 			bcopy(p + 0x20, li->port_rnwwn, 8);
2614 			EL(qlt, "qlt_mbox_command=64h, GET_PORT_DATABASE "
2615 			    "complete\n");
2616 		}
2617 		qlt_free_mailbox_command(qlt, mcp);
2618 	}
2619 	return (fc_ret);
2620 }
2621 
2622 static int
qlt_open(dev_t * devp,int flag,int otype,cred_t * credp)2623 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
2624 {
2625 	int		instance;
2626 	qlt_state_t	*qlt;
2627 
2628 	if (otype != OTYP_CHR) {
2629 		return (EINVAL);
2630 	}
2631 
2632 	/*
2633 	 * Since this is for debugging only, only allow root to issue ioctl now
2634 	 */
2635 	if (drv_priv(credp)) {
2636 		return (EPERM);
2637 	}
2638 
2639 	instance = (int)getminor(*devp);
2640 	qlt = ddi_get_soft_state(qlt_state, instance);
2641 	if (qlt == NULL) {
2642 		return (ENXIO);
2643 	}
2644 
2645 	mutex_enter(&qlt->qlt_ioctl_lock);
2646 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
2647 		/*
2648 		 * It is already open for exclusive access.
2649 		 * So shut the door on this caller.
2650 		 */
2651 		mutex_exit(&qlt->qlt_ioctl_lock);
2652 		return (EBUSY);
2653 	}
2654 
2655 	if (flag & FEXCL) {
2656 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
2657 			/*
2658 			 * Exclusive operation not possible
2659 			 * as it is already opened
2660 			 */
2661 			mutex_exit(&qlt->qlt_ioctl_lock);
2662 			return (EBUSY);
2663 		}
2664 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
2665 	}
2666 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
2667 	mutex_exit(&qlt->qlt_ioctl_lock);
2668 
2669 	return (0);
2670 }
2671 
2672 /* ARGSUSED */
2673 static int
qlt_close(dev_t dev,int flag,int otype,cred_t * credp)2674 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
2675 {
2676 	int		instance;
2677 	qlt_state_t	*qlt;
2678 
2679 	if (otype != OTYP_CHR) {
2680 		return (EINVAL);
2681 	}
2682 
2683 	instance = (int)getminor(dev);
2684 	qlt = ddi_get_soft_state(qlt_state, instance);
2685 	if (qlt == NULL) {
2686 		return (ENXIO);
2687 	}
2688 
2689 	mutex_enter(&qlt->qlt_ioctl_lock);
2690 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
2691 		mutex_exit(&qlt->qlt_ioctl_lock);
2692 		return (ENODEV);
2693 	}
2694 
2695 	/*
2696 	 * It looks there's one hole here, maybe there could several concurrent
2697 	 * shareed open session, but we never check this case.
2698 	 * But it will not hurt too much, disregard it now.
2699 	 */
2700 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
2701 	mutex_exit(&qlt->qlt_ioctl_lock);
2702 
2703 	return (0);
2704 }
2705 
2706 /*
2707  * All of these ioctls are unstable interfaces which are meant to be used
2708  * in a controlled lab env. No formal testing will be (or needs to be) done
2709  * for these ioctls. Specially note that running with an additional
2710  * uploaded firmware is not supported and is provided here for test
2711  * purposes only.
2712  */
2713 /* ARGSUSED */
2714 static int
qlt_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)2715 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
2716     cred_t *credp, int *rval)
2717 {
2718 	qlt_state_t	*qlt;
2719 	int		ret = 0;
2720 #ifdef _LITTLE_ENDIAN
2721 	int		i;
2722 #endif
2723 	stmf_iocdata_t	*iocd;
2724 	void		*ibuf = NULL;
2725 	void		*obuf = NULL;
2726 	uint32_t	*intp;
2727 	qlt_fw_info_t	*fwi;
2728 	mbox_cmd_t	*mcp;
2729 	fct_status_t	st;
2730 	char		info[80];
2731 	fct_status_t	ret2;
2732 
2733 	if (drv_priv(credp) != 0)
2734 		return (EPERM);
2735 
2736 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
2737 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
2738 	if (ret)
2739 		return (ret);
2740 	iocd->stmf_error = 0;
2741 
2742 	switch (cmd) {
2743 	case QLT_IOCTL_FETCH_FWDUMP:
2744 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
2745 			EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
2746 			    iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
2747 			ret = EINVAL;
2748 			break;
2749 		}
2750 		mutex_enter(&qlt->qlt_ioctl_lock);
2751 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
2752 			mutex_exit(&qlt->qlt_ioctl_lock);
2753 			ret = ENODATA;
2754 			EL(qlt, "no fwdump\n");
2755 			iocd->stmf_error = QLTIO_NO_DUMP;
2756 			break;
2757 		}
2758 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
2759 			mutex_exit(&qlt->qlt_ioctl_lock);
2760 			ret = EBUSY;
2761 			EL(qlt, "fwdump inprogress\n");
2762 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
2763 			break;
2764 		}
2765 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
2766 			mutex_exit(&qlt->qlt_ioctl_lock);
2767 			ret = EEXIST;
2768 			EL(qlt, "fwdump already fetched\n");
2769 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
2770 			break;
2771 		}
2772 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
2773 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
2774 		mutex_exit(&qlt->qlt_ioctl_lock);
2775 
2776 		break;
2777 
2778 	case QLT_IOCTL_TRIGGER_FWDUMP:
2779 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
2780 			ret = EACCES;
2781 			iocd->stmf_error = QLTIO_NOT_ONLINE;
2782 			break;
2783 		}
2784 		(void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
2785 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
2786 		info[79] = 0;
2787 		if ((ret2 = fct_port_shutdown(qlt->qlt_port,
2788 		    STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
2789 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
2790 			EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
2791 			    "%llxh\n", ret2);
2792 			ret = EIO;
2793 		}
2794 		break;
2795 	case QLT_IOCTL_UPLOAD_FW:
2796 		if ((iocd->stmf_ibuf_size < 1024) ||
2797 		    (iocd->stmf_ibuf_size & 3)) {
2798 			EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
2799 			    iocd->stmf_ibuf_size);
2800 			ret = EINVAL;
2801 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
2802 			break;
2803 		}
2804 		intp = (uint32_t *)ibuf;
2805 #ifdef _LITTLE_ENDIAN
2806 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
2807 			intp[i] = BSWAP_32(intp[i]);
2808 		}
2809 #endif
2810 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
2811 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
2812 		    iocd->stmf_ibuf_size)) {
2813 			EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
2814 			    iocd->stmf_ibuf_size);
2815 			ret = EINVAL;
2816 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
2817 			break;
2818 		}
2819 		if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
2820 		    (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
2821 		    (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
2822 		    !qlt->qlt_83xx_chip && !qlt->qlt_27xx_chip &&
2823 		    ((intp[8] & 3) == 0))) {
2824 			EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
2825 			ret = EACCES;
2826 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
2827 			break;
2828 		}
2829 
2830 		/* Everything looks ok, lets copy this firmware */
2831 		if (qlt->fw_code01) {
2832 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
2833 			    qlt->fw_length02) << 2);
2834 			qlt->fw_code01 = NULL;
2835 		} else {
2836 			atomic_inc_32(&qlt_loaded_counter);
2837 		}
2838 		qlt->fw_length01 = intp[3];
2839 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
2840 		    KM_SLEEP);
2841 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
2842 		qlt->fw_addr01 = intp[2];
2843 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
2844 		qlt->fw_addr02 = qlt->fw_code02[2];
2845 		qlt->fw_length02 = qlt->fw_code02[3];
2846 		break;
2847 
2848 	case QLT_IOCTL_CLEAR_FW:
2849 		if (qlt->fw_code01) {
2850 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
2851 			    qlt->fw_length02) << 2);
2852 			qlt->fw_code01 = NULL;
2853 			atomic_dec_32(&qlt_loaded_counter);
2854 		}
2855 		break;
2856 
2857 	case QLT_IOCTL_GET_FW_INFO:
2858 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
2859 			EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
2860 			    iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
2861 			ret = EINVAL;
2862 			break;
2863 		}
2864 		fwi = (qlt_fw_info_t *)obuf;
2865 		if (qlt->qlt_stay_offline) {
2866 			fwi->fwi_stay_offline = 1;
2867 		}
2868 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
2869 			fwi->fwi_port_active = 1;
2870 		}
2871 		fwi->fwi_active_major = qlt->fw_major;
2872 		fwi->fwi_active_minor = qlt->fw_minor;
2873 		fwi->fwi_active_subminor = qlt->fw_subminor;
2874 		fwi->fwi_active_attr = qlt->fw_attr;
2875 		if (qlt->fw_code01) {
2876 			fwi->fwi_fw_uploaded = 1;
2877 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
2878 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
2879 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
2880 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
2881 		}
2882 		if (qlt->qlt_27xx_chip) {
2883 			fwi->fwi_default_major = (uint16_t)fw2700_code01[4];
2884 			fwi->fwi_default_minor = (uint16_t)fw2700_code01[5];
2885 			fwi->fwi_default_subminor = (uint16_t)fw2700_code01[6];
2886 			fwi->fwi_default_attr = (uint16_t)fw2700_code01[7];
2887 		} else if (qlt->qlt_83xx_chip) {
2888 			fwi->fwi_default_major = (uint16_t)fw8300fc_code01[4];
2889 			fwi->fwi_default_minor = (uint16_t)fw8300fc_code01[5];
2890 			fwi->fwi_default_subminor =
2891 			    (uint16_t)fw8300fc_code01[6];
2892 			fwi->fwi_default_attr = (uint16_t)fw8300fc_code01[7];
2893 		} else if (qlt->qlt_81xx_chip) {
2894 			fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
2895 			fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
2896 			fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
2897 			fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
2898 		} else if (qlt->qlt_25xx_chip) {
2899 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
2900 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
2901 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
2902 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
2903 		} else {
2904 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
2905 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
2906 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
2907 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
2908 		}
2909 		break;
2910 
2911 	case QLT_IOCTL_STAY_OFFLINE:
2912 		if (!iocd->stmf_ibuf_size) {
2913 			EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
2914 			    iocd->stmf_ibuf_size);
2915 			ret = EINVAL;
2916 			break;
2917 		}
2918 		if (*((char *)ibuf)) {
2919 			qlt->qlt_stay_offline = 1;
2920 		} else {
2921 			qlt->qlt_stay_offline = 0;
2922 		}
2923 		break;
2924 
2925 	case QLT_IOCTL_MBOX:
2926 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
2927 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
2928 			EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
2929 			    iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
2930 			ret = EINVAL;
2931 			break;
2932 		}
2933 		mcp = qlt_alloc_mailbox_command(qlt, 0);
2934 		if (mcp == NULL) {
2935 			EL(qlt, "IOCTL_MBOX mcp == NULL\n");
2936 			ret = ENOMEM;
2937 			break;
2938 		}
2939 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
2940 		st = qlt_mailbox_command(qlt, mcp);
2941 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
2942 		qlt_free_mailbox_command(qlt, mcp);
2943 		if (st != QLT_SUCCESS) {
2944 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
2945 				st = QLT_SUCCESS;
2946 		}
2947 		if (st != QLT_SUCCESS) {
2948 			EL(qlt, "IOCTL_MBOX status=%xh\n", st);
2949 			ret = EIO;
2950 			switch (st) {
2951 			case QLT_MBOX_NOT_INITIALIZED:
2952 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
2953 				break;
2954 			case QLT_MBOX_BUSY:
2955 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
2956 				break;
2957 			case QLT_MBOX_TIMEOUT:
2958 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
2959 				break;
2960 			case QLT_MBOX_ABORTED:
2961 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
2962 				break;
2963 			}
2964 		}
2965 		break;
2966 
2967 	case QLT_IOCTL_ELOG:
2968 		EL(qlt, "Not support yet, ioctl-%xh\n", cmd);
2969 		break;
2970 
2971 	default:
2972 		EL(qlt, "Unknown ioctl-%xh\n", cmd);
2973 		ret = ENOTTY;
2974 	}
2975 
2976 	if (ret == 0) {
2977 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
2978 	} else if (iocd->stmf_error) {
2979 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
2980 	}
2981 	if (obuf) {
2982 		kmem_free(obuf, iocd->stmf_obuf_size);
2983 		obuf = NULL;
2984 	}
2985 	if (ibuf) {
2986 		kmem_free(ibuf, iocd->stmf_ibuf_size);
2987 		ibuf = NULL;
2988 	}
2989 	kmem_free(iocd, sizeof (stmf_iocdata_t));
2990 	return (ret);
2991 }
2992 
2993 static fct_status_t
qlt_force_lip(qlt_state_t * qlt)2994 qlt_force_lip(qlt_state_t *qlt)
2995 {
2996 	mbox_cmd_t	*mcp;
2997 	fct_status_t	 rval;
2998 
2999 	mcp = qlt_alloc_mailbox_command(qlt, 0);
3000 	if (qlt->qlt_fcoe_enabled) {
3001 		mcp->to_fw[0] = MBC_PORT_RESET;
3002 	} else {
3003 		mcp->to_fw[0] = MBC_LIP_FULL_LOGIN;
3004 		mcp->to_fw[1] = BIT_4;
3005 		mcp->to_fw[3] = 1;
3006 		mcp->to_fw_mask |= BIT_1 | BIT_3;
3007 	}
3008 	rval = qlt_mailbox_command(qlt, mcp);
3009 	if (rval != FCT_SUCCESS) {
3010 		EL(qlt, "qlt force lip MB failed: rval=%x\n", rval);
3011 	} else {
3012 		if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS) {
3013 			QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
3014 			    mcp->from_fw[0]);
3015 			rval = FCT_FAILURE;
3016 		}
3017 	}
3018 	qlt_free_mailbox_command(qlt, mcp);
3019 	return (rval);
3020 }
3021 
3022 static void
qlt_ctl(struct fct_local_port * port,int cmd,void * arg)3023 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
3024 {
3025 	stmf_change_status_t		st;
3026 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
3027 	qlt_state_t			*qlt;
3028 	fct_status_t			ret;
3029 
3030 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
3031 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
3032 	    (cmd == FCT_CMD_FORCE_LIP) ||
3033 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
3034 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
3035 
3036 	qlt = (qlt_state_t *)port->port_fca_private;
3037 	st.st_completion_status = FCT_SUCCESS;
3038 	st.st_additional_info = NULL;
3039 
3040 	EL(qlt, "port (%p) qlt_state (%xh) cmd (%xh) arg (%p)\n",
3041 	    port, qlt->qlt_state, cmd, arg);
3042 
3043 	switch (cmd) {
3044 	case FCT_CMD_PORT_ONLINE:
3045 		if (qlt->qlt_state == FCT_STATE_ONLINE)
3046 			st.st_completion_status = STMF_ALREADY;
3047 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
3048 			st.st_completion_status = FCT_FAILURE;
3049 		if (st.st_completion_status == FCT_SUCCESS) {
3050 			qlt->qlt_state = FCT_STATE_ONLINING;
3051 			qlt->qlt_state_not_acked = 1;
3052 			st.st_completion_status = qlt_port_online(qlt);
3053 			if (st.st_completion_status != STMF_SUCCESS) {
3054 				EL(qlt, "PORT_ONLINE status=%xh\n",
3055 				    st.st_completion_status);
3056 				qlt->qlt_state = FCT_STATE_OFFLINE;
3057 				qlt->qlt_state_not_acked = 0;
3058 			} else {
3059 				qlt->qlt_state = FCT_STATE_ONLINE;
3060 			}
3061 		}
3062 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
3063 		qlt->qlt_change_state_flags = 0;
3064 		break;
3065 
3066 	case FCT_CMD_PORT_OFFLINE:
3067 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
3068 			st.st_completion_status = STMF_ALREADY;
3069 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
3070 			st.st_completion_status = FCT_FAILURE;
3071 		}
3072 		if (st.st_completion_status == FCT_SUCCESS) {
3073 			qlt->qlt_state = FCT_STATE_OFFLINING;
3074 			qlt->qlt_state_not_acked = 1;
3075 
3076 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
3077 				(void) qlt_firmware_dump(port, ssci);
3078 			}
3079 			qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
3080 			st.st_completion_status = qlt_port_offline(qlt);
3081 			if (st.st_completion_status != STMF_SUCCESS) {
3082 				EL(qlt, "PORT_OFFLINE status=%xh\n",
3083 				    st.st_completion_status);
3084 				qlt->qlt_state = FCT_STATE_ONLINE;
3085 				qlt->qlt_state_not_acked = 0;
3086 			} else {
3087 				qlt->qlt_state = FCT_STATE_OFFLINE;
3088 			}
3089 		}
3090 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
3091 		break;
3092 
3093 	case FCT_ACK_PORT_ONLINE_COMPLETE:
3094 		qlt->qlt_state_not_acked = 0;
3095 		break;
3096 
3097 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
3098 		qlt->qlt_state_not_acked = 0;
3099 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
3100 		    (qlt->qlt_stay_offline == 0)) {
3101 			if ((ret = fct_port_initialize(port,
3102 			    qlt->qlt_change_state_flags,
3103 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
3104 			    "with RLFLAG_RESET")) != FCT_SUCCESS) {
3105 				EL(qlt, "fct_port_initialize status=%llxh\n",
3106 				    ret);
3107 				cmn_err(CE_WARN, "qlt_ctl: "
3108 				    "fct_port_initialize failed, please use "
3109 				    "stmfstate to start the port-%s manualy",
3110 				    qlt->qlt_port_alias);
3111 			}
3112 		}
3113 		break;
3114 
3115 	case FCT_CMD_FORCE_LIP:
3116 		if (qlt->qlt_fcoe_enabled) {
3117 			EL(qlt, "force lip is an unsupported command "
3118 			    "for this adapter type\n");
3119 		} else {
3120 			if (qlt->qlt_state == FCT_STATE_ONLINE) {
3121 				*((fct_status_t *)arg) = qlt_force_lip(qlt);
3122 				EL(qlt, "forcelip done\n");
3123 			}
3124 		}
3125 		break;
3126 
3127 	default:
3128 		EL(qlt, "unsupport cmd - 0x%02X\n", cmd);
3129 		break;
3130 	}
3131 }
3132 
3133 /* ARGSUSED */
3134 static fct_status_t
qlt_do_flogi(fct_local_port_t * port,fct_flogi_xchg_t * fx)3135 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
3136 {
3137 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
3138 
3139 	EL(qlt, "FLOGI requested not supported\n");
3140 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
3141 	return (FCT_FAILURE);
3142 }
3143 
3144 /*
3145  * Return a pointer to n entries in the request queue. Assumes that
3146  * request queue lock is held. Does a very short busy wait if
3147  * less/zero entries are available. Retuns NULL if it still cannot
3148  * fullfill the request.
3149  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
3150  */
3151 caddr_t
qlt_get_req_entries(qlt_state_t * qlt,uint32_t n,uint16_t qi)3152 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n, uint16_t qi)
3153 {
3154 	int try = 0;
3155 
3156 	while (qlt->mq_req[qi].mq_available < n) {
3157 		uint32_t val1, val2, val3;
3158 
3159 		if (qlt->qlt_mq_enabled) {
3160 			/* debounce */
3161 			val1 = MQBAR_RD32(qlt,
3162 			    (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3163 			val2 = MQBAR_RD32(qlt,
3164 			    (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3165 			val3 = MQBAR_RD32(qlt,
3166 			    (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3167 		} else {
3168 			val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3169 			val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3170 			val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3171 		}
3172 		if ((val1 != val2) || (val2 != val3))
3173 			continue;
3174 
3175 		qlt->mq_req[qi].mq_ndx_from_fw = val1;
3176 		if (qi != 0) {
3177 			qlt->mq_req[qi].mq_available =
3178 			    REQUEST_QUEUE_MQ_ENTRIES - 1 -
3179 			    ((qlt->mq_req[qi].mq_ndx_to_fw -
3180 			    qlt->mq_req[qi].mq_ndx_from_fw) &
3181 			    (REQUEST_QUEUE_MQ_ENTRIES - 1));
3182 		} else {
3183 			qlt->mq_req[qi].mq_available =
3184 			    REQUEST_QUEUE_ENTRIES - 1 -
3185 			    ((qlt->mq_req[qi].mq_ndx_to_fw -
3186 			    qlt->mq_req[qi].mq_ndx_from_fw) &
3187 			    (REQUEST_QUEUE_ENTRIES - 1));
3188 		}
3189 		if (qlt->mq_req[qi].mq_available < n) {
3190 			if (try < 2) {
3191 				drv_usecwait(100);
3192 				try++;
3193 				continue;
3194 			} else {
3195 				stmf_trace(qlt->qlt_port_alias,
3196 				    "Req Q# %xh is full", qi);
3197 				EL(qlt, "Req %xh is full (%d,%d) (%d,%d)\n",
3198 				    qi, qlt->mq_req[qi].mq_ndx_to_fw,
3199 				    qlt->mq_req[qi].mq_ndx_from_fw,
3200 				    n, qlt->mq_req[qi].mq_available);
3201 				return (NULL);
3202 			}
3203 		}
3204 		break;
3205 	}
3206 	/* We dont change anything until the entries are sumitted */
3207 	return (&qlt->mq_req[qi].mq_ptr[qlt->mq_req[qi].mq_ndx_to_fw << 6]);
3208 }
3209 
3210 /*
3211  * updates the req in ptr to fw. Assumes that req lock is held.
3212  */
3213 void
qlt_submit_req_entries(qlt_state_t * qlt,uint32_t n,uint16_t qi)3214 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n, uint16_t qi)
3215 {
3216 
3217 	ASSERT(n >= 1);
3218 
3219 	qlt->mq_req[qi].mq_ndx_to_fw += n;
3220 	if (qi != 0) {
3221 		qlt->mq_req[qi].mq_ndx_to_fw &= REQUEST_QUEUE_MQ_ENTRIES - 1;
3222 	} else {
3223 		qlt->mq_req[qi].mq_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
3224 	}
3225 	qlt->mq_req[qi].mq_available -= n;
3226 
3227 	if (qlt->qlt_mq_enabled) {
3228 		MQBAR_WR32(qlt, (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN,
3229 		    qlt->mq_req[qi].mq_ndx_to_fw);
3230 	} else {
3231 		REG_WR32(qlt, REG_REQ_IN_PTR, qlt->mq_req[0].mq_ndx_to_fw);
3232 	}
3233 }
3234 
3235 /*
3236  * Return a pointer to n entries in the priority request queue. Assumes that
3237  * priority request queue lock is held. Does a very short busy wait if
3238  * less/zero entries are available. Retuns NULL if it still cannot
3239  * fullfill the request.
3240  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
3241  */
3242 caddr_t
qlt_get_preq_entries(qlt_state_t * qlt,uint32_t n)3243 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
3244 {
3245 	int try = 0;
3246 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
3247 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
3248 	    (PRIORITY_QUEUE_ENTRIES - 1));
3249 
3250 	while (req_available < n) {
3251 		uint32_t val1, val2, val3;
3252 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
3253 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
3254 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
3255 		if ((val1 != val2) || (val2 != val3))
3256 			continue;
3257 
3258 		qlt->preq_ndx_from_fw = val1;
3259 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
3260 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
3261 		    (PRIORITY_QUEUE_ENTRIES - 1));
3262 		if (req_available < n) {
3263 			if (try < 2) {
3264 				drv_usecwait(100);
3265 				try++;
3266 				continue;
3267 			} else {
3268 				return (NULL);
3269 			}
3270 		}
3271 		break;
3272 	}
3273 	/* We dont change anything until the entries are sumitted */
3274 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
3275 }
3276 
3277 /*
3278  * updates the req in ptr to fw. Assumes that req lock is held.
3279  */
3280 void
qlt_submit_preq_entries(qlt_state_t * qlt,uint32_t n)3281 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
3282 {
3283 	ASSERT(n >= 1);
3284 	qlt->preq_ndx_to_fw += n;
3285 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
3286 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
3287 }
3288 
3289 /*
3290  * - Should not be called from Interrupt.
3291  * - A very hardware specific function. Does not touch driver state.
3292  * - Assumes that interrupts are disabled or not there.
3293  * - Expects that the caller makes sure that all activity has stopped
3294  *   and its ok now to go ahead and reset the chip. Also the caller
3295  *   takes care of post reset damage control.
3296  * - called by initialize adapter() and dump_fw(for reset only).
3297  * - During attach() nothing much is happening and during initialize_adapter()
3298  *   the function (caller) does all the housekeeping so that this function
3299  *   can execute in peace.
3300  * - Returns 0 on success.
3301  */
3302 static fct_status_t
qlt_reset_chip(qlt_state_t * qlt)3303 qlt_reset_chip(qlt_state_t *qlt)
3304 {
3305 	int cntr;
3306 
3307 	EL(qlt, "initiated\n");
3308 
3309 	/* XXX: Switch off LEDs */
3310 
3311 	qlt->qlt_intr_enabled = 0;
3312 	/* Disable Interrupts */
3313 	REG_WR32(qlt, REG_INTR_CTRL, 0);
3314 	(void) REG_RD32(qlt, REG_INTR_CTRL);
3315 	/* Stop DMA */
3316 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
3317 
3318 	/* Wait for DMA to be stopped */
3319 	cntr = 0;
3320 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
3321 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
3322 		cntr++;
3323 		/* 3 sec should be more than enough */
3324 		if (cntr == 300)
3325 			return (QLT_DMA_STUCK);
3326 	}
3327 
3328 	/* need to ensure no one accesses the hw during the reset 100us */
3329 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3330 		mutex_enter(&qlt->mbox_lock);
3331 		if (qlt->qlt_mq_enabled == 1) {
3332 			int i;
3333 
3334 			for (i = 1; i < qlt->qlt_queue_cnt; i++) {
3335 				mutex_enter(&qlt->mq_req[i].mq_lock);
3336 			}
3337 		}
3338 		mutex_enter(&qlt->mq_req[0].mq_lock);
3339 		/*
3340 		 * We need to give time for other threads to finsh their
3341 		 * interupts (or we need another lock)
3342 		 */
3343 		drv_usecwait(40);
3344 	}
3345 
3346 	/* Reset the Chip */
3347 	REG_WR32(qlt, REG_CTRL_STATUS,
3348 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
3349 
3350 	qlt->qlt_link_up = 0;
3351 
3352 	drv_usecwait(100);
3353 
3354 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3355 		mutex_exit(&qlt->mq_req[0].mq_lock);
3356 		if (qlt->qlt_mq_enabled == 1) {
3357 			int i;
3358 
3359 			for (i = 1; i < qlt->qlt_queue_cnt; i++) {
3360 				mutex_exit(&qlt->mq_req[i].mq_lock);
3361 			}
3362 		}
3363 		mutex_exit(&qlt->mbox_lock);
3364 	}
3365 
3366 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
3367 	cntr = 0;
3368 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
3369 		delay(drv_usectohz(10000));
3370 		cntr++;
3371 		/* 3 sec should be more than enough */
3372 		if (cntr == 300)
3373 			return (QLT_ROM_STUCK);
3374 	}
3375 	/* Disable Interrupts (Probably not needed) */
3376 	REG_WR32(qlt, REG_INTR_CTRL, 0);
3377 
3378 	return (QLT_SUCCESS);
3379 }
3380 
3381 /*
3382  * - Should not be called from Interrupt.
3383  * - A very hardware specific function. Does not touch driver state.
3384  * - Assumes that interrupts are disabled or not there.
3385  * - Expects that the caller makes sure that all activity has stopped
3386  *   and its ok now to go ahead and reset the chip. Also the caller
3387  *   takes care of post reset damage control.
3388  * - called by initialize adapter() and dump_fw(for reset only).
3389  * - During attach() nothing much is happening and during initialize_adapter()
3390  *   the function (caller) does all the housekeeping so that this function
3391  *   can execute in peace.
3392  * - Returns 0 on success.
3393  */
3394 static fct_status_t
qlt_download_fw(qlt_state_t * qlt)3395 qlt_download_fw(qlt_state_t *qlt)
3396 {
3397 	uint32_t start_addr;
3398 	fct_status_t ret;
3399 
3400 	EL(qlt, "initiated\n");
3401 
3402 	(void) qlt_reset_chip(qlt);
3403 
3404 	if (qlt->qlt_81xx_chip) {
3405 		qlt_mps_reset(qlt);
3406 	}
3407 
3408 	/* Load the two segments */
3409 	if (qlt->fw_code01 != NULL) {
3410 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
3411 		    qlt->fw_addr01);
3412 		if (ret == QLT_SUCCESS) {
3413 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
3414 			    qlt->fw_length02, qlt->fw_addr02);
3415 		}
3416 		start_addr = qlt->fw_addr01;
3417 	} else if (qlt->qlt_27xx_chip) {
3418 		(void) qlt_27xx_get_dmp_template(qlt);
3419 		ret = qlt_load_risc_ram(qlt, fw2700_code01,
3420 		    fw2700_length01, fw2700_addr01);
3421 		if (ret == QLT_SUCCESS) {
3422 			ret = qlt_load_risc_ram(qlt, fw2700_code02,
3423 			    fw2700_length02, fw2700_addr02);
3424 		}
3425 		start_addr = fw2700_addr01;
3426 	} else if (qlt->qlt_83xx_chip) {
3427 		ret = qlt_load_risc_ram(qlt, fw8300fc_code01,
3428 		    fw8300fc_length01, fw8300fc_addr01);
3429 		if (ret == QLT_SUCCESS) {
3430 			ret = qlt_load_risc_ram(qlt, fw8300fc_code02,
3431 			    fw8300fc_length02, fw8300fc_addr02);
3432 		}
3433 		start_addr = fw8300fc_addr01;
3434 	} else if (qlt->qlt_81xx_chip) {
3435 		ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
3436 		    fw8100_addr01);
3437 		if (ret == QLT_SUCCESS) {
3438 			ret = qlt_load_risc_ram(qlt, fw8100_code02,
3439 			    fw8100_length02, fw8100_addr02);
3440 		}
3441 		start_addr = fw8100_addr01;
3442 	} else if (qlt->qlt_25xx_chip) {
3443 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
3444 		    fw2500_addr01);
3445 		if (ret == QLT_SUCCESS) {
3446 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
3447 			    fw2500_length02, fw2500_addr02);
3448 		}
3449 		start_addr = fw2500_addr01;
3450 	} else {
3451 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
3452 		    fw2400_addr01);
3453 		if (ret == QLT_SUCCESS) {
3454 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
3455 			    fw2400_length02, fw2400_addr02);
3456 		}
3457 		start_addr = fw2400_addr01;
3458 	}
3459 	if (ret != QLT_SUCCESS) {
3460 		EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
3461 		return (ret);
3462 	}
3463 
3464 	/* Verify Checksum */
3465 	REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
3466 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
3467 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
3468 	ret = qlt_raw_mailbox_command(qlt);
3469 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3470 	if (ret != QLT_SUCCESS) {
3471 		EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
3472 		return (ret);
3473 	}
3474 
3475 	/* Execute firmware */
3476 	REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
3477 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
3478 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
3479 	REG_WR16(qlt, REG_MBOX(3), 0);
3480 #ifdef EXTRA_CREDIT
3481 	/* enable extra credits (reduces available buffers) */
3482 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
3483 	    (qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3484 		REG_WR16(qlt, REG_MBOX(4), 1);
3485 	} else {
3486 		REG_WR16(qlt, REG_MBOX(4), 0);
3487 	}
3488 #else
3489 	REG_WR16(qlt, REG_MBOX(4), 0);
3490 #endif
3491 	ret = qlt_raw_mailbox_command(qlt);
3492 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3493 	if (ret != QLT_SUCCESS) {
3494 		EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
3495 		return (ret);
3496 	} else {
3497 		if (qlt->qlt_27xx_chip) {
3498 			qlt->qlt_27xx_speed = (uint32_t)
3499 			    (REG_RD16(qlt, REG_MBOX(3)) << 16 |
3500 			    REG_RD16(qlt, REG_MBOX(2)));
3501 
3502 		}
3503 	}
3504 
3505 	/* Get revisions (About Firmware) */
3506 	REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
3507 	ret = qlt_raw_mailbox_command(qlt);
3508 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
3509 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
3510 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
3511 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
3512 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
3513 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
3514 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3515 	if (ret != QLT_SUCCESS) {
3516 		EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
3517 		return (ret);
3518 	}
3519 
3520 	if (qlt->qlt_27xx_chip) {
3521 		qlt->fw_ext_memory_end = SHORT_TO_LONG(
3522 		    REG_RD16(qlt, REG_MBOX(4)),
3523 		    REG_RD16(qlt, REG_MBOX(5)));
3524 		qlt->fw_shared_ram_start = SHORT_TO_LONG(
3525 		    REG_RD16(qlt, REG_MBOX(18)),
3526 		    REG_RD16(qlt, REG_MBOX(19)));
3527 		qlt->fw_shared_ram_end = SHORT_TO_LONG(
3528 		    REG_RD16(qlt, REG_MBOX(20)),
3529 		    REG_RD16(qlt, REG_MBOX(21)));
3530 		qlt->fw_ddr_ram_start = SHORT_TO_LONG(
3531 		    REG_RD16(qlt, REG_MBOX(22)),
3532 		    REG_RD16(qlt, REG_MBOX(23)));
3533 		qlt->fw_ddr_ram_end = SHORT_TO_LONG(
3534 		    REG_RD16(qlt, REG_MBOX(24)),
3535 		    REG_RD16(qlt, REG_MBOX(25)));
3536 	}
3537 
3538 
3539 	return (QLT_SUCCESS);
3540 }
3541 
3542 /*
3543  * Used only from qlt_download_fw().
3544  */
3545 static fct_status_t
qlt_load_risc_ram(qlt_state_t * qlt,uint32_t * host_addr,uint32_t word_count,uint32_t risc_addr)3546 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
3547     uint32_t word_count, uint32_t risc_addr)
3548 {
3549 	uint32_t words_sent = 0;
3550 	uint32_t words_being_sent;
3551 	uint32_t *cur_host_addr;
3552 	uint32_t cur_risc_addr;
3553 	uint64_t da;
3554 	fct_status_t ret;
3555 
3556 	while (words_sent < word_count) {
3557 		cur_host_addr = &(host_addr[words_sent]);
3558 		cur_risc_addr = risc_addr + (words_sent << 2);
3559 		words_being_sent = min(word_count - words_sent,
3560 		    TOTAL_DMA_MEM_SIZE >> 2);
3561 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
3562 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
3563 		    DDI_DEV_AUTOINCR);
3564 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
3565 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
3566 		da = qlt->queue_mem_cookie.dmac_laddress;
3567 		REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
3568 		REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
3569 		REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
3570 		REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
3571 		REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
3572 		REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
3573 		REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
3574 		REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
3575 		REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
3576 		ret = qlt_raw_mailbox_command(qlt);
3577 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3578 		if (ret != QLT_SUCCESS) {
3579 			EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
3580 			    ret);
3581 			return (ret);
3582 		}
3583 		words_sent += words_being_sent;
3584 	}
3585 	EL(qlt, "qlt_raw_mailbox_command=0Bh, LOAD_RAM_EXTENDED complete\n");
3586 	return (QLT_SUCCESS);
3587 }
3588 
3589 /*
3590  * Not used during normal operation. Only during driver init.
3591  * Assumes that interrupts are disabled and mailboxes are loaded.
3592  * Just triggers the mailbox command an waits for the completion.
3593  * Also expects that There is nothing else going on and we will only
3594  * get back a mailbox completion from firmware.
3595  * ---DOES NOT CLEAR INTERRUPT---
3596  * Used only from the code path originating from
3597  * qlt_reset_chip()
3598  */
3599 static fct_status_t
qlt_raw_mailbox_command(qlt_state_t * qlt)3600 qlt_raw_mailbox_command(qlt_state_t *qlt)
3601 {
3602 	int cntr = 0;
3603 	uint32_t status;
3604 	fct_local_port_t *port = qlt->qlt_port;
3605 
3606 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
3607 retry_raw:;
3608 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
3609 		cntr++;
3610 		if (cntr == 3000) {
3611 			EL(qlt, "polling exhausted, dump fw now..\n");
3612 			(void) qlt_firmware_dump(port,
3613 			    (stmf_state_change_info_t *)NULL);
3614 			return (QLT_MAILBOX_STUCK);
3615 		}
3616 		delay(drv_usectohz(10000));
3617 	}
3618 	status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
3619 
3620 	if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
3621 	    (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
3622 	    (status == MBX_CMD_SUCCESSFUL) ||
3623 	    (status == MBX_CMD_NOT_SUCCESSFUL)) {
3624 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
3625 		if (mbox0 == QLT_MBX_CMD_SUCCESS) {
3626 			return (QLT_SUCCESS);
3627 		} else {
3628 			EL(qlt, "mbx cmd failed, dump fw now..\n");
3629 			(void) qlt_firmware_dump(port,
3630 			    (stmf_state_change_info_t *)NULL);
3631 			return (QLT_MBOX_FAILED | mbox0);
3632 		}
3633 	} else if (status == ASYNC_EVENT) {
3634 		uint16_t mbox0, mbox1, mbox2, mbox3;
3635 		uint16_t mbox4, mbox5, mbox6, mbox7;
3636 
3637 		mbox0 = REG_RD16(qlt, REG_MBOX(0));
3638 		mbox1 = REG_RD16(qlt, REG_MBOX(1));
3639 		mbox2 = REG_RD16(qlt, REG_MBOX(2));
3640 		mbox3 = REG_RD16(qlt, REG_MBOX(3));
3641 		mbox4 = REG_RD16(qlt, REG_MBOX(4));
3642 		mbox5 = REG_RD16(qlt, REG_MBOX(5));
3643 		mbox6 = REG_RD16(qlt, REG_MBOX(6));
3644 		mbox7 = REG_RD16(qlt, REG_MBOX(7));
3645 
3646 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x"
3647 		    "mb3=%x mb4=%x mb5=%x mb6=%x mb7=%x",
3648 		    qlt->instance, mbox0, mbox1, mbox2, mbox3,
3649 		    mbox4, mbox5, mbox6, mbox7);
3650 		if (mbox0 == 0x8002) {
3651 			(void) qlt_firmware_dump(port,
3652 			    (stmf_state_change_info_t *)NULL);
3653 			return (QLT_UNEXPECTED_RESPONSE);
3654 		} else {
3655 			REG_WR32(qlt,
3656 			    REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3657 			cntr = 0;
3658 			goto retry_raw;
3659 		}
3660 	}
3661 
3662 	/* This is unexpected, dump a message */
3663 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
3664 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
3665 	return (QLT_UNEXPECTED_RESPONSE);
3666 }
3667 
3668 static mbox_cmd_t *
qlt_alloc_mailbox_command(qlt_state_t * qlt,uint32_t dma_size)3669 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
3670 {
3671 	mbox_cmd_t *mcp;
3672 
3673 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
3674 	if (dma_size) {
3675 		qlt_dmem_bctl_t *bctl;
3676 		uint64_t da;
3677 
3678 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
3679 		if (mcp->dbuf == NULL) {
3680 			kmem_free(mcp, sizeof (*mcp));
3681 			return (NULL);
3682 		}
3683 		mcp->dbuf->db_data_size = dma_size;
3684 		ASSERT(mcp->dbuf->db_sglist_length == 1);
3685 
3686 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
3687 		da = bctl->bctl_dev_addr;
3688 		/* This is the most common initialization of dma ptrs */
3689 		mcp->to_fw[3] = LSW(LSD(da));
3690 		mcp->to_fw[2] = MSW(LSD(da));
3691 		mcp->to_fw[7] = LSW(MSD(da));
3692 		mcp->to_fw[6] = MSW(MSD(da));
3693 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
3694 	}
3695 	mcp->to_fw_mask |= BIT_0;
3696 	mcp->from_fw_mask |= BIT_0;
3697 	return (mcp);
3698 }
3699 
3700 void
qlt_free_mailbox_command(qlt_state_t * qlt,mbox_cmd_t * mcp)3701 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
3702 {
3703 	if (mcp->dbuf)
3704 		qlt_i_dmem_free(qlt, mcp->dbuf);
3705 	kmem_free(mcp, sizeof (*mcp));
3706 }
3707 
3708 /*
3709  * This can sleep. Should never be called from interrupt context.
3710  */
3711 static fct_status_t
qlt_mailbox_command(qlt_state_t * qlt,mbox_cmd_t * mcp)3712 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
3713 {
3714 	int	retries;
3715 	int	i;
3716 	char	info[80];
3717 
3718 	if (curthread->t_flag & T_INTR_THREAD) {
3719 		ASSERT(0);
3720 		return (QLT_MBOX_FAILED);
3721 	}
3722 
3723 	EL(qlt, "mailbox:[0]=%xh [1]=%xh\n",
3724 	    mcp->to_fw[0], mcp->to_fw[1]);
3725 
3726 	mutex_enter(&qlt->mbox_lock);
3727 	/* See if mailboxes are still uninitialized */
3728 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
3729 		mutex_exit(&qlt->mbox_lock);
3730 		return (QLT_MBOX_NOT_INITIALIZED);
3731 	}
3732 
3733 	/* Wait to grab the mailboxes */
3734 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
3735 	    retries++) {
3736 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
3737 		if ((retries > 5) ||
3738 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
3739 			mutex_exit(&qlt->mbox_lock);
3740 			return (QLT_MBOX_BUSY);
3741 		}
3742 	}
3743 	/* Make sure we always ask for mailbox 0 */
3744 	mcp->from_fw_mask |= BIT_0;
3745 
3746 	/* Load mailboxes, set state and generate RISC interrupt */
3747 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
3748 	qlt->mcp = mcp;
3749 	for (i = 0; i < MAX_MBOXES; i++) {
3750 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
3751 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
3752 	}
3753 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
3754 
3755 qlt_mbox_wait_loop:;
3756 	/* Wait for mailbox command completion */
3757 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
3758 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
3759 		(void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
3760 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
3761 		info[79] = 0;
3762 		qlt->mcp = NULL;
3763 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
3764 		mutex_exit(&qlt->mbox_lock);
3765 
3766 		/*
3767 		 * XXX Throw HBA fatal error event
3768 		 */
3769 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
3770 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3771 		return (QLT_MBOX_TIMEOUT);
3772 	}
3773 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
3774 		goto qlt_mbox_wait_loop;
3775 
3776 	qlt->mcp = NULL;
3777 
3778 	/* Make sure its a completion */
3779 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
3780 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
3781 		mutex_exit(&qlt->mbox_lock);
3782 		return (QLT_MBOX_ABORTED);
3783 	}
3784 
3785 	/* MBox command completed. Clear state, retuen based on mbox 0 */
3786 	/* Mailboxes are already loaded by interrupt routine */
3787 	qlt->mbox_io_state = MBOX_STATE_READY;
3788 	mutex_exit(&qlt->mbox_lock);
3789 	if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS) {
3790 		EL(qlt, "fw[0] = %xh\n", mcp->from_fw[0]);
3791 		if ((mcp->from_fw[0] != 0x4005) &&
3792 		    (mcp->from_fw[1] != 0x7)) {
3793 			(void) qlt_firmware_dump(qlt->qlt_port,
3794 			    (stmf_state_change_info_t *)NULL);
3795 		}
3796 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
3797 	}
3798 
3799 	return (QLT_SUCCESS);
3800 }
3801 
3802 /*
3803  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
3804  */
3805 /* ARGSUSED */
3806 static uint_t
qlt_msix_resp_handler(caddr_t arg,caddr_t arg2)3807 qlt_msix_resp_handler(caddr_t arg, caddr_t arg2)
3808 {
3809 	qlt_state_t	*qlt = (qlt_state_t *)arg;
3810 	uint32_t	risc_status;
3811 	uint16_t 	qi = 0;
3812 
3813 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
3814 	if (qlt->qlt_mq_enabled) {
3815 		/* XXX: */
3816 		/* qi = (uint16_t)((unsigned long)arg2); */
3817 		qi = (uint16_t)(risc_status >> 16);
3818 
3819 		mutex_enter(&qlt->mq_resp[qi].mq_lock);
3820 		if (!qlt->qlt_intr_enabled) {
3821 			/*
3822 			 * No further interrupt since intr disabled.
3823 			 */
3824 			REG_WR32(qlt, REG_HCCR,
3825 			    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3826 			mutex_exit(&qlt->mq_resp[qi].mq_lock);
3827 			return (DDI_INTR_UNCLAIMED);
3828 		}
3829 
3830 		qlt->mq_resp[qi].mq_ndx_from_fw =
3831 		    (uint16_t)MQBAR_RD32(qlt,
3832 		    (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
3833 
3834 		qlt_handle_resp_queue_update(qlt, qi);
3835 		mutex_exit(&qlt->mq_resp[qi].mq_lock);
3836 	} else {
3837 		mutex_enter(&qlt->intr_lock);
3838 		if (!qlt->qlt_intr_enabled) {
3839 			/*
3840 			 * No further interrupt since intr disabled.
3841 			 */
3842 			REG_WR32(qlt, REG_HCCR,
3843 			    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3844 			mutex_exit(&qlt->intr_lock);
3845 			return (DDI_INTR_UNCLAIMED);
3846 		}
3847 
3848 		qlt->atio_ndx_from_fw =
3849 		    (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
3850 		qlt_handle_atio_queue_update(qlt);
3851 
3852 		qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
3853 		qlt_handle_resp_queue_update(qlt, qi);
3854 		mutex_exit(&qlt->intr_lock);
3855 	}
3856 
3857 	if (risc_status & BIT_15) {
3858 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3859 	}
3860 	return (DDI_INTR_CLAIMED);
3861 }
3862 
3863 
3864 /*
3865  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
3866  */
3867 /* ARGSUSED */
3868 static uint_t
qlt_msix_default_handler(caddr_t arg,caddr_t arg2)3869 qlt_msix_default_handler(caddr_t arg, caddr_t arg2)
3870 {
3871 	qlt_state_t	*qlt = (qlt_state_t *)arg;
3872 	uint32_t	risc_status, intr_type;
3873 	int		i;
3874 	char		info[80];
3875 
3876 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
3877 	if (!mutex_tryenter(&qlt->intr_lock)) {
3878 		/*
3879 		 * Normally we will always get this lock. If tryenter is
3880 		 * failing then it means that driver is trying to do
3881 		 * some cleanup and is masking the intr but some intr
3882 		 * has sneaked in between. See if our device has generated
3883 		 * this intr. If so then wait a bit and return claimed.
3884 		 * If not then return claimed if this is the 1st instance
3885 		 * of a interrupt after driver has grabbed the lock.
3886 		 */
3887 		if ((risc_status & BIT_15) == 0) {
3888 			return (DDI_INTR_UNCLAIMED);
3889 		} else {
3890 			/* try again */
3891 			drv_usecwait(10);
3892 			if (!mutex_tryenter(&qlt->intr_lock)) {
3893 				/* really bad! */
3894 				return (DDI_INTR_CLAIMED);
3895 			}
3896 		}
3897 	}
3898 	if (((risc_status & BIT_15) == 0) ||
3899 	    (qlt->qlt_intr_enabled == 0)) {
3900 		/*
3901 		 * This might be a pure coincedence that we are operating
3902 		 * in a interrupt disabled mode and another device
3903 		 * sharing the interrupt line has generated an interrupt
3904 		 * while an interrupt from our device might be pending. Just
3905 		 * ignore it and let the code handling the interrupt
3906 		 * disabled mode handle it.
3907 		 */
3908 		mutex_exit(&qlt->intr_lock);
3909 		return (DDI_INTR_UNCLAIMED);
3910 	}
3911 
3912 	/* REG_WR32(qlt, REG_INTR_CTRL, 0); */
3913 
3914 	/* check for risc pause - unlikely */
3915 	if (risc_status & BIT_8) {
3916 		uint32_t hccsr;
3917 
3918 		hccsr = REG_RD32(qlt, REG_HCCR);
3919 		EL(qlt, "Risc Pause status=%xh hccsr=%x\n",
3920 		    risc_status, hccsr);
3921 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x hccsr:%x",
3922 		    qlt->instance, risc_status, hccsr);
3923 		(void) snprintf(info, 80, "Risc Pause %08x hccsr:%x",
3924 		    risc_status, hccsr);
3925 		info[79] = 0;
3926 		(void) fct_port_shutdown(qlt->qlt_port,
3927 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3928 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3929 	}
3930 
3931 	/* check most likely types first */
3932 	intr_type = risc_status & 0xff;
3933 	if (intr_type == 0x1D) {
3934 		qlt->atio_ndx_from_fw =
3935 		    (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
3936 		qlt_handle_atio_queue_update(qlt);
3937 		qlt->mq_resp[0].mq_ndx_from_fw = risc_status >> 16;
3938 		qlt_handle_resp_queue_update(qlt, 0);
3939 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3940 	} else if (intr_type == 0x1C) {
3941 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
3942 		qlt_handle_atio_queue_update(qlt);
3943 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3944 	} else if (intr_type == 0x1E) {
3945 		/* 83xx */
3946 		qlt->atio_ndx_from_fw =
3947 		    (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
3948 		qlt_handle_atio_queue_update(qlt);
3949 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3950 	} else if (intr_type == 0x13) {
3951 		uint16_t qi;
3952 
3953 		qlt->atio_ndx_from_fw =
3954 		    (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
3955 		qlt_handle_atio_queue_update(qlt);
3956 
3957 		if (qlt->qlt_mq_enabled) {
3958 			qi = (uint16_t)(risc_status >> 16);
3959 			qlt->mq_resp[qi].mq_ndx_from_fw =
3960 			    (uint16_t)MQBAR_RD32(qlt,
3961 			    (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
3962 			/* FIX THIS to be optional */
3963 			REG_WR32(qlt, REG_HCCR,
3964 			    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3965 		} else {
3966 			qi = 0;
3967 			REG_WR32(qlt, REG_HCCR,
3968 			    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3969 			qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
3970 		}
3971 		qlt_handle_resp_queue_update(qlt, qi);
3972 
3973 	} else if (intr_type == 0x14) {
3974 		uint16_t qi = (uint16_t)(risc_status >> 16);
3975 
3976 		if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3977 			qlt->atio_ndx_from_fw =
3978 			    (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
3979 		} else {
3980 			qlt->atio_ndx_from_fw = (uint16_t)
3981 			    REG_RD32(qlt, REG_ATIO_IN_PTR);
3982 		}
3983 		qlt_handle_atio_queue_update(qlt);
3984 
3985 		qlt->mq_resp[qi].mq_ndx_from_fw =
3986 		    (uint16_t)MQBAR_RD32(qlt,
3987 		    (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
3988 		qlt_handle_resp_queue_update(qlt, qi);
3989 
3990 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3991 
3992 	} else if (intr_type == 0x12) {
3993 		uint16_t code, mbox1, mbox2, mbox3, mbox4, mbox5, mbox6;
3994 
3995 		REG_WR32(qlt, REG_INTR_CTRL, 0);
3996 
3997 		code = (uint16_t)(risc_status >> 16);
3998 		mbox1 = REG_RD16(qlt, REG_MBOX(1));
3999 		mbox2 = REG_RD16(qlt, REG_MBOX(2));
4000 		mbox3 = REG_RD16(qlt, REG_MBOX(3));
4001 		mbox4 = REG_RD16(qlt, REG_MBOX(4));
4002 		mbox5 = REG_RD16(qlt, REG_MBOX(5));
4003 		mbox6 = REG_RD16(qlt, REG_MBOX(6));
4004 
4005 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4006 		EL(qlt, "Async event: %x mb1=%x mb2=%x,"
4007 		    " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4008 		    mbox3, mbox4, mbox5, mbox6);
4009 		stmf_trace(qlt->qlt_port_alias, "Async event: %x mb1=%x mb2=%x,"
4010 		    " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4011 		    mbox3, mbox4, mbox5, mbox6);
4012 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
4013 		    " mb3=%x, mb4=%x,  mb5=%x, mb6=%x", qlt->instance, code,
4014 		    mbox1, mbox2, mbox3, mbox4, mbox5, mbox6);
4015 
4016 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
4017 			if (qlt->qlt_link_up) {
4018 				fct_handle_event(qlt->qlt_port,
4019 				    FCT_EVENT_LINK_RESET, 0, 0);
4020 			}
4021 		} else if (code == 0x8012) {
4022 			qlt->qlt_link_up = 0;
4023 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
4024 			    0, 0);
4025 		} else if (code == 0x8014) {
4026 			if (mbox1 == 0xffff) { /* global event */
4027 				uint8_t reason_code;
4028 
4029 				reason_code = (uint8_t)(mbox3 >> 8);
4030 
4031 				switch (reason_code) {
4032 				case 0x1d: /* FIP Clear Virtual Link received */
4033 				case 0x1a: /* received FLOGO */
4034 				case 0x1c: /* FCF configuration changed */
4035 				case 0x1e: /* FKA timeout */
4036 					if (mbox2 == 7) {
4037 						qlt->qlt_link_up = 0;
4038 						fct_handle_event(qlt->qlt_port,
4039 						    FCT_EVENT_LINK_DOWN, 0, 0);
4040 					}
4041 					break;
4042 				case 0x12:
4043 					if (mbox2 == 4) {
4044 						qlt->qlt_link_up = 1;
4045 						fct_handle_event(qlt->qlt_port,
4046 						    FCT_EVENT_LINK_UP, 0, 0);
4047 						stmf_trace(qlt->qlt_port_alias,
4048 						    "SNS login and SCR done");
4049 					}
4050 					break;
4051 				case 0:
4052 					if ((mbox2 == 6) &&
4053 					    (!qlt->qlt_link_up)) {
4054 						qlt->qlt_link_up = 1;
4055 						fct_handle_event(qlt->qlt_port,
4056 						    FCT_EVENT_LINK_UP, 0, 0);
4057 						stmf_trace(qlt->qlt_port_alias,
4058 						    "Link reinitialised");
4059 					}
4060 					break;
4061 				default:
4062 					stmf_trace(qlt->qlt_port_alias,
4063 					    "AEN ignored");
4064 					break;
4065 				}
4066 			}
4067 		} else if (code == 0x8011) {
4068 			switch (mbox1) {
4069 			case 0: qlt->link_speed = PORT_SPEED_1G;
4070 				break;
4071 			case 1: qlt->link_speed = PORT_SPEED_2G;
4072 				break;
4073 			case 3: qlt->link_speed = PORT_SPEED_4G;
4074 				break;
4075 			case 4: qlt->link_speed = PORT_SPEED_8G;
4076 				break;
4077 			case 5: qlt->link_speed = PORT_SPEED_16G;
4078 				break;
4079 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
4080 				break;
4081 			default:
4082 				qlt->link_speed = PORT_SPEED_UNKNOWN;
4083 			}
4084 			qlt->qlt_link_up = 1;
4085 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
4086 			    0, 0);
4087 		} else if ((code == 0x8002) || (code == 0x8003) ||
4088 		    (code == 0x8004) || (code == 0x8005)) {
4089 			(void) snprintf(info, 80,
4090 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
4091 			    code, mbox1, mbox2, mbox5, mbox6);
4092 			info[79] = 0;
4093 			(void) fct_port_shutdown(qlt->qlt_port,
4094 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4095 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4096 		} else if (code == 0x800F) {
4097 			(void) snprintf(info, 80,
4098 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
4099 			    mbox1, mbox2, mbox3);
4100 
4101 			if (mbox1 != 1) {
4102 				/* issue "verify fw" */
4103 				qlt_verify_fw(qlt);
4104 			}
4105 		} else if (code == 0x8101) {
4106 			(void) snprintf(info, 80,
4107 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
4108 			    code, mbox1, mbox2, mbox3);
4109 			info[79] = 0;
4110 
4111 			/* check if "ACK" is required (timeout != 0) */
4112 			if (mbox1 & 0x0f00) {
4113 				caddr_t	req;
4114 
4115 				/*
4116 				 * Ack the request (queue work to do it?)
4117 				 * using a mailbox iocb
4118 				 * (Only Queue #0 allowed)
4119 				 */
4120 				mutex_enter(&qlt->mq_req[0].mq_lock);
4121 				req = qlt_get_req_entries(qlt, 1, 0);
4122 				if (req) {
4123 					bzero(req, IOCB_SIZE);
4124 					req[0] = 0x39; req[1] = 1;
4125 					QMEM_WR16(qlt, req+8, 0x101);
4126 					QMEM_WR16(qlt, req+10, mbox1);
4127 					QMEM_WR16(qlt, req+12, mbox2);
4128 					QMEM_WR16(qlt, req+14, mbox3);
4129 					QMEM_WR16(qlt, req+16, mbox4);
4130 					QMEM_WR16(qlt, req+18, mbox5);
4131 					QMEM_WR16(qlt, req+20, mbox6);
4132 					qlt_submit_req_entries(qlt, 1, 0);
4133 				} else {
4134 					(void) snprintf(info, 80,
4135 					    "IDC ACK failed");
4136 					info[79] = 0;
4137 				}
4138 				mutex_exit(&qlt->mq_req[0].mq_lock);
4139 			}
4140 		} else {
4141 			stmf_trace(qlt->qlt_port_alias,
4142 			    "Async event: 0x%x ignored",
4143 			    code);
4144 		}
4145 		REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
4146 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
4147 		/* Handle mailbox completion */
4148 		mutex_enter(&qlt->mbox_lock);
4149 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
4150 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
4151 			    " when driver wasn't waiting for it %d",
4152 			    qlt->instance, qlt->mbox_io_state);
4153 		} else {
4154 			for (i = 0; i < MAX_MBOXES; i++) {
4155 				if (qlt->mcp->from_fw_mask &
4156 				    (((uint32_t)1) << i)) {
4157 					qlt->mcp->from_fw[i] =
4158 					    REG_RD16(qlt, REG_MBOX(i));
4159 				}
4160 			}
4161 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
4162 		}
4163 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4164 		cv_broadcast(&qlt->mbox_cv);
4165 		mutex_exit(&qlt->mbox_lock);
4166 	} else {
4167 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
4168 		    qlt->instance, intr_type);
4169 		stmf_trace(qlt->qlt_port_alias,
4170 		    "%s: Unknown intr type 0x%x [%x]",
4171 		    __func__, intr_type, risc_status);
4172 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4173 	}
4174 
4175 	/* REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR); */
4176 	mutex_exit(&qlt->intr_lock);
4177 
4178 	return (DDI_INTR_CLAIMED);
4179 }
4180 
4181 /*
4182  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
4183  */
4184 /* ARGSUSED */
4185 static uint_t
qlt_isr(caddr_t arg,caddr_t arg2)4186 qlt_isr(caddr_t arg, caddr_t arg2)
4187 {
4188 	qlt_state_t	*qlt = (qlt_state_t *)arg;
4189 	uint32_t	risc_status, intr_type;
4190 	int		i;
4191 	int		intr_loop_count;
4192 	char		info[80];
4193 
4194 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
4195 	if (!mutex_tryenter(&qlt->intr_lock)) {
4196 		/*
4197 		 * Normally we will always get this lock. If tryenter is
4198 		 * failing then it means that driver is trying to do
4199 		 * some cleanup and is masking the intr but some intr
4200 		 * has sneaked in between. See if our device has generated
4201 		 * this intr. If so then wait a bit and return claimed.
4202 		 * If not then return claimed if this is the 1st instance
4203 		 * of a interrupt after driver has grabbed the lock.
4204 		 */
4205 		if (risc_status & BIT_15) {
4206 			drv_usecwait(10);
4207 			return (DDI_INTR_CLAIMED);
4208 		} else if (qlt->intr_sneak_counter) {
4209 			qlt->intr_sneak_counter--;
4210 			return (DDI_INTR_CLAIMED);
4211 		} else {
4212 			return (DDI_INTR_UNCLAIMED);
4213 		}
4214 	}
4215 	if (((risc_status & BIT_15) == 0) ||
4216 	    (qlt->qlt_intr_enabled == 0)) {
4217 		/*
4218 		 * This might be a pure coincedence that we are operating
4219 		 * in a interrupt disabled mode and another device
4220 		 * sharing the interrupt line has generated an interrupt
4221 		 * while an interrupt from our device might be pending. Just
4222 		 * ignore it and let the code handling the interrupt
4223 		 * disabled mode handle it.
4224 		 */
4225 		mutex_exit(&qlt->intr_lock);
4226 		return (DDI_INTR_UNCLAIMED);
4227 	}
4228 
4229 	/*
4230 	 * XXX take care for MSI-X case. disable intrs
4231 	 * Its gonna be complicated because of the max iterations.
4232 	 * as hba will have posted the intr which did not go on PCI
4233 	 * but we did not service it either because of max iterations.
4234 	 * Maybe offload the intr on a different thread.
4235 	 */
4236 	intr_loop_count = 0;
4237 
4238 	REG_WR32(qlt, REG_INTR_CTRL, 0);
4239 
4240 intr_again:;
4241 
4242 	/* check for risc pause */
4243 	if (risc_status & BIT_8) {
4244 		EL(qlt, "Risc Pause status=%xh\n", risc_status);
4245 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
4246 		    qlt->instance, risc_status);
4247 		(void) snprintf(info, 80, "Risc Pause %08x", risc_status);
4248 		info[79] = 0;
4249 		(void) fct_port_shutdown(qlt->qlt_port,
4250 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4251 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4252 	}
4253 
4254 	/* First check for high performance path */
4255 	intr_type = risc_status & 0xff;
4256 	if (intr_type == 0x1D) {
4257 
4258 		/* process the atio queue first */
4259 		qlt->atio_ndx_from_fw =
4260 		    (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
4261 		qlt_handle_atio_queue_update(qlt);
4262 
4263 		/* process the response queue next */
4264 		qlt->mq_resp[0].mq_ndx_from_fw =
4265 		    (uint16_t)REG_RD32(qlt, REG_RESP_IN_PTR);
4266 		qlt_handle_resp_queue_update(qlt, 0);
4267 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4268 
4269 	} else if (intr_type == 0x1C) {
4270 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4271 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
4272 		qlt_handle_atio_queue_update(qlt);
4273 	} else if (intr_type == 0x1E) {
4274 		/* 83xx Atio Queue update */
4275 		qlt->atio_ndx_from_fw =
4276 		    (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
4277 		qlt_handle_atio_queue_update(qlt);
4278 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4279 	} else if (intr_type == 0x13) {
4280 		uint16_t qi;
4281 
4282 		qlt->atio_ndx_from_fw =
4283 		    (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
4284 		qlt_handle_atio_queue_update(qlt);
4285 
4286 		if (qlt->qlt_mq_enabled) {
4287 			qi = (uint16_t)(risc_status >> 16);
4288 			qlt->mq_resp[0].mq_ndx_from_fw =
4289 			    (uint16_t)MQBAR_RD32(qlt,
4290 			    (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4291 			/* FIX THIS to be optional */
4292 			REG_WR32(qlt, REG_HCCR,
4293 			    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4294 		} else {
4295 			qi = 0;
4296 			REG_WR32(qlt, REG_HCCR,
4297 			    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4298 			qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
4299 			REG_WR32(qlt, REG_HCCR,
4300 			    HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4301 		}
4302 		qlt_handle_resp_queue_update(qlt, qi);
4303 
4304 	} else if (intr_type == 0x14) {
4305 		/* MQ */
4306 		uint16_t qi = 0;
4307 
4308 		if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
4309 			qlt->atio_ndx_from_fw =
4310 			    (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
4311 		} else {
4312 			qi = (uint16_t)(risc_status >> 16);
4313 			qlt->atio_ndx_from_fw = (uint16_t)
4314 			    REG_RD32(qlt, REG_ATIO_IN_PTR);
4315 		}
4316 		qlt_handle_atio_queue_update(qlt);
4317 
4318 		qlt->mq_resp[qi].mq_ndx_from_fw =
4319 		    (uint16_t)MQBAR_RD32(qlt,
4320 		    (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4321 		qlt_handle_resp_queue_update(qlt, qi);
4322 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4323 
4324 	} else if (intr_type == 0x12) {
4325 		uint16_t code = (uint16_t)(risc_status >> 16);
4326 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
4327 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
4328 		uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
4329 		uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
4330 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
4331 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
4332 
4333 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4334 		EL(qlt, "Async event %x mb1=%x, mb2=%x, mb3=%x, mb4=%x, "
4335 		    "mb5=%x, mb6=%x\n", code, mbox1, mbox2, mbox3, mbox4,
4336 		    mbox5, mbox6);
4337 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
4338 		    " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4339 		    mbox3, mbox4, mbox5, mbox6);
4340 		cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
4341 		    " mb3=%x, mb4=%x, mb5=%x, mb6=%x", qlt->instance, code,
4342 		    mbox1, mbox2, mbox3, mbox4, mbox5, mbox6);
4343 
4344 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
4345 			if (qlt->qlt_link_up) {
4346 				fct_handle_event(qlt->qlt_port,
4347 				    FCT_EVENT_LINK_RESET, 0, 0);
4348 			}
4349 		} else if (code == 0x8012) {
4350 			qlt->qlt_link_up = 0;
4351 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
4352 			    0, 0);
4353 		} else if (code == 0x8014) {
4354 			if (mbox1 == 0xffff) { /* global event */
4355 				uint8_t reason_code;
4356 
4357 				reason_code = (uint8_t)(mbox3 >> 8);
4358 
4359 				switch (reason_code) {
4360 				case 0x1d: /* FIP Clear Virtual Link received */
4361 				case 0x1a: /* received FLOGO */
4362 				case 0x1c: /* FCF configuration changed */
4363 				case 0x1e: /* FKA timeout */
4364 					if (mbox2 == 7) {
4365 						qlt->qlt_link_up = 0;
4366 						fct_handle_event(qlt->qlt_port,
4367 						    FCT_EVENT_LINK_DOWN, 0, 0);
4368 					}
4369 					break;
4370 				case 0x12:
4371 					if (mbox2 == 4) {
4372 						qlt->qlt_link_up = 1;
4373 						fct_handle_event(qlt->qlt_port,
4374 						    FCT_EVENT_LINK_UP, 0, 0);
4375 						stmf_trace(qlt->qlt_port_alias,
4376 						    "SNS login and SCR done");
4377 					}
4378 					break;
4379 				case 0:
4380 					if ((mbox2 == 6) &&
4381 					    (!qlt->qlt_link_up)) {
4382 						qlt->qlt_link_up = 1;
4383 						fct_handle_event(qlt->qlt_port,
4384 						    FCT_EVENT_LINK_UP, 0, 0);
4385 						stmf_trace(qlt->qlt_port_alias,
4386 						    "Link reinitialised");
4387 					}
4388 					break;
4389 				default:
4390 					stmf_trace(qlt->qlt_port_alias,
4391 					    "AEN ignored");
4392 					break;
4393 				}
4394 			}
4395 		} else if (code == 0x8011) {
4396 			switch (mbox1) {
4397 			case 0: qlt->link_speed = PORT_SPEED_1G;
4398 				break;
4399 			case 1: qlt->link_speed = PORT_SPEED_2G;
4400 				break;
4401 			case 3: qlt->link_speed = PORT_SPEED_4G;
4402 				break;
4403 			case 4: qlt->link_speed = PORT_SPEED_8G;
4404 				break;
4405 			case 5: qlt->link_speed = PORT_SPEED_16G;
4406 				break;
4407 			case 6: qlt->link_speed = PORT_SPEED_32G;
4408 				break;
4409 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
4410 				break;
4411 			default:
4412 				qlt->link_speed = PORT_SPEED_UNKNOWN;
4413 			}
4414 			qlt->qlt_link_up = 1;
4415 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
4416 			    0, 0);
4417 		} else if ((code == 0x8002) || (code == 0x8003) ||
4418 		    (code == 0x8004) || (code == 0x8005)) {
4419 			(void) snprintf(info, 80,
4420 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
4421 			    code, mbox1, mbox2, mbox5, mbox6);
4422 			info[79] = 0;
4423 			(void) fct_port_shutdown(qlt->qlt_port,
4424 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4425 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4426 		} else if (code == 0x800F) {
4427 			(void) snprintf(info, 80,
4428 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
4429 			    mbox1, mbox2, mbox3);
4430 
4431 			if (mbox1 != 1) {
4432 				/* issue "verify fw" */
4433 				qlt_verify_fw(qlt);
4434 			}
4435 		} else if (code == 0x8101) {
4436 			(void) snprintf(info, 80,
4437 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
4438 			    code, mbox1, mbox2, mbox3);
4439 			info[79] = 0;
4440 
4441 			/* check if "ACK" is required (timeout != 0) */
4442 			if (mbox1 & 0x0f00) {
4443 				caddr_t	req;
4444 
4445 				/*
4446 				 * Ack the request (queue work to do it?)
4447 				 * using a mailbox iocb (only Queue 0 allowed)
4448 				 */
4449 				mutex_enter(&qlt->mq_req[0].mq_lock);
4450 				req = qlt_get_req_entries(qlt, 1, 0);
4451 				if (req) {
4452 					bzero(req, IOCB_SIZE);
4453 					req[0] = 0x39; req[1] = 1;
4454 					QMEM_WR16(qlt, req+8, 0x101);
4455 					QMEM_WR16(qlt, req+10, mbox1);
4456 					QMEM_WR16(qlt, req+12, mbox2);
4457 					QMEM_WR16(qlt, req+14, mbox3);
4458 					QMEM_WR16(qlt, req+16, mbox4);
4459 					QMEM_WR16(qlt, req+18, mbox5);
4460 					QMEM_WR16(qlt, req+20, mbox6);
4461 					qlt_submit_req_entries(qlt, 1, 0);
4462 				} else {
4463 					(void) snprintf(info, 80,
4464 					    "IDC ACK failed");
4465 					info[79] = 0;
4466 				}
4467 				mutex_exit(&qlt->mq_req[0].mq_lock);
4468 			}
4469 		}
4470 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
4471 		/* Handle mailbox completion */
4472 		mutex_enter(&qlt->mbox_lock);
4473 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
4474 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
4475 			    " when driver wasn't waiting for it %d",
4476 			    qlt->instance, qlt->mbox_io_state);
4477 		} else {
4478 			for (i = 0; i < MAX_MBOXES; i++) {
4479 				if (qlt->mcp->from_fw_mask &
4480 				    (((uint32_t)1) << i)) {
4481 					qlt->mcp->from_fw[i] =
4482 					    REG_RD16(qlt, REG_MBOX(i));
4483 				}
4484 			}
4485 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
4486 		}
4487 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4488 		cv_broadcast(&qlt->mbox_cv);
4489 		mutex_exit(&qlt->mbox_lock);
4490 	} else {
4491 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
4492 		    qlt->instance, intr_type);
4493 		stmf_trace(qlt->qlt_port_alias,
4494 		    "%s: Unknown intr type 0x%x [%x]",
4495 		    __func__, intr_type, risc_status);
4496 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4497 	}
4498 
4499 	(void) REG_RD32(qlt, REG_HCCR); /* PCI Posting */
4500 
4501 	if ((qlt->intr_flags & QLT_INTR_MSIX) == 0) {
4502 		risc_status = REG_RD32(qlt, REG_RISC_STATUS);
4503 		if ((risc_status & BIT_15) &&
4504 		    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
4505 			goto intr_again;
4506 		}
4507 		REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
4508 		mutex_exit(&qlt->intr_lock);
4509 	} else {
4510 		mutex_exit(&qlt->intr_lock);
4511 		REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
4512 	}
4513 
4514 	return (DDI_INTR_CLAIMED);
4515 }
4516 
4517 /* **************** NVRAM Functions ********************** */
4518 
4519 fct_status_t
qlt_read_flash_word(qlt_state_t * qlt,uint32_t faddr,uint32_t * bp)4520 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
4521 {
4522 	uint32_t	timer;
4523 
4524 	/* Clear access error flag */
4525 	REG_WR32(qlt, REG_CTRL_STATUS,
4526 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
4527 
4528 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
4529 
4530 	/* Wait for READ cycle to complete. */
4531 	for (timer = 3000; timer; timer--) {
4532 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
4533 			break;
4534 		}
4535 		drv_usecwait(10);
4536 	}
4537 	if (timer == 0) {
4538 		EL(qlt, "flash timeout\n");
4539 		return (QLT_FLASH_TIMEOUT);
4540 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
4541 		EL(qlt, "flash access error\n");
4542 		return (QLT_FLASH_ACCESS_ERROR);
4543 	}
4544 
4545 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
4546 
4547 	return (QLT_SUCCESS);
4548 }
4549 
4550 fct_status_t
qlt_read_nvram(qlt_state_t * qlt)4551 qlt_read_nvram(qlt_state_t *qlt)
4552 {
4553 	uint32_t index, addr, chksum;
4554 	uint32_t val, *ptr;
4555 	fct_status_t ret;
4556 	qlt_nvram_t *nv;
4557 	uint64_t empty_node_name = 0;
4558 
4559 	if (qlt->qlt_27xx_chip) {
4560 		int func;
4561 
4562 		func = ((REG_RD32(qlt, REG_CTRL_STATUS) & 0x0000f000) >> 12);
4563 		switch (func) {
4564 		case 0: addr = QLT27_NVRAM_FUNC0_ADDR; break;
4565 		case 1: addr = QLT27_NVRAM_FUNC1_ADDR; break;
4566 		case 2: addr = QLT27_NVRAM_FUNC2_ADDR; break;
4567 		case 3: addr = QLT27_NVRAM_FUNC3_ADDR; break;
4568 		}
4569 	} else if (qlt->qlt_83xx_chip) {
4570 		if (qlt->qlt_fcoe_enabled) {
4571 			addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4572 			    QLT83FCOE_NVRAM_FUNC1_ADDR :
4573 			    QLT83FCOE_NVRAM_FUNC0_ADDR;
4574 		} else {
4575 			addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4576 			    QLT83FC_NVRAM_FUNC1_ADDR :
4577 			    QLT83FC_NVRAM_FUNC0_ADDR;
4578 		}
4579 	} else if (qlt->qlt_81xx_chip) {
4580 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4581 		    QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
4582 	} else if (qlt->qlt_25xx_chip) {
4583 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4584 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
4585 	} else {
4586 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4587 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
4588 	}
4589 	mutex_enter(&qlt_global_lock);
4590 
4591 	/* Pause RISC. */
4592 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4593 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
4594 
4595 	/* Get NVRAM data and calculate checksum. */
4596 	ptr = (uint32_t *)qlt->nvram;
4597 	chksum = 0;
4598 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
4599 		ret = qlt_read_flash_word(qlt, addr++, &val);
4600 		if (ret != QLT_SUCCESS) {
4601 			EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
4602 			mutex_exit(&qlt_global_lock);
4603 			return (ret);
4604 		}
4605 		chksum += val;
4606 		*ptr = LE_32(val);
4607 		ptr++;
4608 	}
4609 
4610 	/* Release RISC Pause */
4611 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
4612 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
4613 
4614 	mutex_exit(&qlt_global_lock);
4615 
4616 	/* Sanity check NVRAM Data */
4617 	nv = qlt->nvram;
4618 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
4619 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
4620 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
4621 		EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
4622 		    nv->id[0], nv->id[1], nv->id[2], nv->id[3],
4623 		    nv->nvram_version[1], nv->nvram_version[0]);
4624 		return (QLT_BAD_NVRAM_DATA);
4625 	}
4626 
4627 	/* If node name is zero, hand craft it from port name */
4628 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
4629 		bcopy(nv->port_name, nv->node_name, 8);
4630 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
4631 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
4632 	}
4633 
4634 	return (QLT_SUCCESS);
4635 }
4636 
4637 fct_status_t
qlt_read_vpd(qlt_state_t * qlt)4638 qlt_read_vpd(qlt_state_t *qlt)
4639 {
4640 	uint32_t index, addr, chksum;
4641 	uint32_t val, *ptr;
4642 	fct_status_t ret;
4643 
4644 	if (qlt->qlt_27xx_chip) {
4645 		int func;
4646 
4647 		func = ((REG_RD32(qlt, REG_CTRL_STATUS) & 0x0000f000) >> 12);
4648 		switch (func) {
4649 		case 0: addr = QLT27_VPD_FUNC0_ADDR; break;
4650 		case 1: addr = QLT27_VPD_FUNC1_ADDR; break;
4651 		case 2: addr = QLT27_VPD_FUNC2_ADDR; break;
4652 		case 3: addr = QLT27_VPD_FUNC3_ADDR; break;
4653 		}
4654 	} else if (qlt->qlt_83xx_chip) {
4655 		if (qlt->qlt_fcoe_enabled) {
4656 			addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4657 			    QLT83FCOE_VPD_FUNC1_ADDR :
4658 			    QLT83FCOE_VPD_FUNC0_ADDR;
4659 		} else {
4660 			addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4661 			    QLT83FC_VPD_FUNC1_ADDR :
4662 			    QLT83FC_VPD_FUNC0_ADDR;
4663 		}
4664 	} else if (qlt->qlt_81xx_chip) {
4665 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4666 		    QLT81_VPD_FUNC1_ADDR : QLT81_VPD_FUNC0_ADDR;
4667 	} else if (qlt->qlt_25xx_chip) {
4668 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4669 		    QLT25_VPD_FUNC1_ADDR : QLT25_VPD_FUNC0_ADDR;
4670 	} else {
4671 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4672 		    QLT24_VPD_FUNC1_ADDR : QLT24_VPD_FUNC0_ADDR;
4673 	}
4674 	mutex_enter(&qlt_global_lock);
4675 
4676 	/* Pause RISC. */
4677 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4678 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
4679 
4680 	/* Get VPD data and calculate checksum. */
4681 	ptr = (uint32_t *)qlt->vpd;
4682 	chksum = 0;
4683 	for (index = 0; index < QL_24XX_VPD_SIZE / 4; index++) {
4684 		ret = qlt_read_flash_word(qlt, addr++, &val);
4685 		if (ret != QLT_SUCCESS) {
4686 			EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
4687 			mutex_exit(&qlt_global_lock);
4688 			return (ret);
4689 		}
4690 		chksum += val;
4691 		*ptr = LE_32(val);
4692 		ptr++;
4693 	}
4694 
4695 	/* Release RISC Pause */
4696 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
4697 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
4698 
4699 	mutex_exit(&qlt_global_lock);
4700 
4701 	return (QLT_SUCCESS);
4702 }
4703 
4704 fct_status_t
qlt_read_bfe(qlt_state_t * qlt,uint32_t in_addr,uint32_t * out_addr,uint32_t i)4705 qlt_read_bfe(qlt_state_t *qlt, uint32_t in_addr, uint32_t *out_addr, uint32_t i)
4706 {
4707 	uint32_t index;
4708 	uint32_t chksum;
4709 	fct_status_t ret = QLT_SUCCESS;
4710 	uint32_t val;
4711 	uint16_t dataoffset;
4712 	uint32_t *ptr;
4713 	uint32_t addr, addr0;
4714 	uint16_t length;
4715 
4716 	val = chksum = 0;
4717 	ptr = (uint32_t *)&qlt->rimage[i].header;
4718 
4719 	addr = in_addr;
4720 	addr0 = addr;
4721 
4722 	/* read rom header first */
4723 	for (index = 0; index < sizeof (qlt_rom_header_t)/4;
4724 	    index ++) {
4725 		ret = qlt_read_flash_word(qlt, addr++, &val);
4726 		if (ret != QLT_SUCCESS) {
4727 			EL(qlt, "read flash, status=%llxh\n", ret);
4728 			return (ret);
4729 		}
4730 		chksum += val;
4731 		*ptr = LE_32(val);
4732 		ptr++;
4733 	}
4734 
4735 	/* check the signature */
4736 	if (qlt->rimage[i].header.signature[0] != PCI_HEADER0) {
4737 		EL(qlt, "hdr[%d] sig[1] [0] (%xh) (%xh) is wrong.\n",
4738 		    i, qlt->rimage[i].header.signature[1],
4739 		    qlt->rimage[i].header.signature[0]);
4740 		return (QLT_SUCCESS);
4741 	}
4742 
4743 	if ((qlt->rimage[i].header.signature[0] == PCI_HEADER0) &&
4744 	    (qlt->rimage[i].header.signature[1] == PCI_HEADER1)) {
4745 		/* get dataoffset */
4746 		dataoffset = (qlt->rimage[i].header.dataoffset[1] |
4747 		    qlt->rimage[i].header.dataoffset[0]);
4748 		EL(qlt, "dataoffset[0] = %xh\n", dataoffset);
4749 
4750 		ptr = (uint32_t *)&qlt->rimage[i].data;
4751 
4752 		/* adjust addr */
4753 		addr = addr0 + (dataoffset/4);
4754 		for (index = 0; index < sizeof (qlt_rom_data_t)/4;
4755 		    index ++) {
4756 			ret = qlt_read_flash_word(qlt, addr++, &val);
4757 			if (ret != QLT_SUCCESS) {
4758 				EL(qlt, "read flash, status=%llxh\n", ret);
4759 				return (ret);
4760 			}
4761 			chksum += val;
4762 			*ptr = LE_32(val);
4763 			ptr++;
4764 		}
4765 
4766 		/* check signature */
4767 		if ((qlt->rimage[i].data.signature[0] != 0x50) &&
4768 		    (qlt->rimage[i].data.signature[1] != 0x43) &&
4769 		    (qlt->rimage[i].data.signature[2] != 0x49) &&
4770 		    (qlt->rimage[i].data.signature[3] != 0x52)) {
4771 			EL(qlt,
4772 			    "data sig[3] [2] [1] [0] (%xh)(%xh)(%xh)(%xh)\n",
4773 			    qlt->rimage[i].data.signature[3],
4774 			    qlt->rimage[i].data.signature[2],
4775 			    qlt->rimage[i].data.signature[1],
4776 			    qlt->rimage[i].data.signature[0]);
4777 			return (QLT_SUCCESS);
4778 		}
4779 
4780 		EL(qlt, "codetype (%xh) revisionlevel[1][0] (%xh)(%xh)\n",
4781 		    qlt->rimage[i].data.codetype,
4782 		    qlt->rimage[i].data.revisionlevel[1],
4783 		    qlt->rimage[i].data.revisionlevel[0]);
4784 
4785 		/* check if this is the last image */
4786 		if (qlt->rimage[i].data.indicator == PCI_IND_LAST_IMAGE) {
4787 			EL(qlt, "last image (%xh)\n",
4788 			    qlt->rimage[i].data.indicator);
4789 			return (QLT_SUCCESS);
4790 
4791 		}
4792 
4793 		/* Get the image length and adjust the addr according */
4794 		length = (qlt->rimage[i].data.imagelength[1] |
4795 		    qlt->rimage[i].data.imagelength[0]);
4796 
4797 		EL(qlt, "image[%d] length[1][0] (%xh) (%xh) in sectors\n",
4798 		    i, length);
4799 
4800 		/* the starting addr of the next image */
4801 		addr = addr0 + ((length * 512)/4);
4802 		*out_addr = addr;
4803 	}
4804 
4805 	return (QLT_SUCCESS);
4806 }
4807 
4808 fct_status_t
qlt_read_rom_image(qlt_state_t * qlt)4809 qlt_read_rom_image(qlt_state_t *qlt)
4810 {
4811 	uint32_t addr;
4812 	uint32_t out_addr = 0;
4813 	uint32_t count = 0;
4814 	boolean_t last_image = FALSE;
4815 	fct_status_t ret;
4816 
4817 	if (qlt->qlt_27xx_chip) {
4818 		addr = FLASH_2700_DATA_ADDR + FLASH_2700_BOOT_CODE_ADDR;
4819 	} else if (qlt->qlt_83xx_chip) {
4820 		addr = FLASH_8300_DATA_ADDR + FLASH_8300_BOOT_CODE_ADDR;
4821 	} else if (qlt->qlt_81xx_chip) {
4822 		addr = FLASH_8100_DATA_ADDR + FLASH_8100_BOOT_CODE_ADDR;
4823 	} else if (qlt->qlt_25xx_chip) {
4824 		addr = FLASH_2500_DATA_ADDR + FLASH_2500_BOOT_CODE_ADDR;
4825 	} else {
4826 		addr = FLASH_2400_DATA_ADDR + FLASH_2400_BOOT_CODE_ADDR;
4827 	}
4828 	mutex_enter(&qlt_global_lock);
4829 
4830 	/* Pause RISC. */
4831 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4832 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
4833 
4834 	do {
4835 		ret = qlt_read_bfe(qlt, addr, &out_addr, count);
4836 		if (ret != QLT_SUCCESS) {
4837 			EL(qlt, "qlt_read_bfe, status=%llxh\n", ret);
4838 			break;
4839 		}
4840 		if (qlt->rimage[count].data.indicator ==
4841 		    PCI_IND_LAST_IMAGE) {
4842 			last_image = TRUE;
4843 		} else {
4844 			addr = out_addr;
4845 		}
4846 		count ++;
4847 	} while ((last_image != TRUE) && (count < 6));
4848 
4849 	/* Release RISC Pause */
4850 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
4851 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
4852 
4853 	mutex_exit(&qlt_global_lock);
4854 
4855 	return (QLT_SUCCESS);
4856 }
4857 
4858 uint32_t
qlt_sync_atio_queue(qlt_state_t * qlt)4859 qlt_sync_atio_queue(qlt_state_t *qlt)
4860 {
4861 	uint32_t total_ent;
4862 
4863 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
4864 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
4865 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
4866 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
4867 		    DDI_DMA_SYNC_FORCPU);
4868 	} else {
4869 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
4870 		    qlt->atio_ndx_from_fw;
4871 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
4872 		    + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
4873 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
4874 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
4875 		    ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
4876 		    DDI_DMA_SYNC_FORCPU);
4877 	}
4878 	return (total_ent);
4879 }
4880 
4881 void
qlt_handle_atio_queue_update(qlt_state_t * qlt)4882 qlt_handle_atio_queue_update(qlt_state_t *qlt)
4883 {
4884 	uint32_t total_ent;
4885 
4886 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
4887 		return;
4888 
4889 	total_ent = qlt_sync_atio_queue(qlt);
4890 
4891 	do {
4892 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
4893 		    qlt->atio_ndx_to_fw << 6];
4894 		uint32_t ent_cnt;
4895 
4896 		ent_cnt = (uint32_t)(atio[1]);
4897 		if (ent_cnt > total_ent) {
4898 			break;
4899 		}
4900 		switch ((uint8_t)(atio[0])) {
4901 		case 0x06:	/* ATIO, make performance case the 1st test */
4902 			qlt_handle_atio(qlt, atio);
4903 			break;
4904 		case 0x0d:	/* INOT */
4905 			qlt_handle_inot(qlt, atio);
4906 			break;
4907 		default:
4908 			EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
4909 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
4910 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
4911 			break;
4912 		}
4913 		qlt->atio_ndx_to_fw = (uint16_t)(
4914 		    (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
4915 		total_ent -= ent_cnt;
4916 	} while (total_ent > 0);
4917 
4918 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
4919 		MQBAR_WR32(qlt, MQBAR_ATIO_OUT, qlt->atio_ndx_to_fw);
4920 	} else {
4921 		REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
4922 	}
4923 }
4924 
4925 uint32_t
qlt_sync_resp_queue(qlt_state_t * qlt,uint16_t qi)4926 qlt_sync_resp_queue(qlt_state_t *qlt, uint16_t qi)
4927 {
4928 	uint32_t total_ent;
4929 
4930 	if (qlt->mq_resp[qi].mq_ndx_from_fw > qlt->mq_resp[qi].mq_ndx_to_fw) {
4931 		total_ent = qlt->mq_resp[qi].mq_ndx_from_fw -
4932 		    qlt->mq_resp[qi].mq_ndx_to_fw;
4933 		if (qi) {
4934 			(void) ddi_dma_sync(
4935 			    qlt->mq_resp[qi].queue_mem_mq_dma_handle,
4936 			    (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
4937 			    total_ent << 6,
4938 			    DDI_DMA_SYNC_FORCPU);
4939 		} else {
4940 			(void) ddi_dma_sync(
4941 			    qlt->queue_mem_dma_handle,
4942 			    RESPONSE_QUEUE_OFFSET +
4943 			    (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
4944 			    total_ent << 6,
4945 			    DDI_DMA_SYNC_FORCPU);
4946 		}
4947 	} else {
4948 		total_ent =
4949 		    (qi ? RESPONSE_QUEUE_MQ_ENTRIES : RESPONSE_QUEUE_ENTRIES) -
4950 		    qlt->mq_resp[qi].mq_ndx_to_fw +
4951 		    qlt->mq_resp[qi].mq_ndx_from_fw;
4952 
4953 		if (qi) {
4954 
4955 			(void) ddi_dma_sync(
4956 			    qlt->mq_resp[qi].queue_mem_mq_dma_handle,
4957 			    qlt->mq_resp[qi].mq_ndx_to_fw << 6,
4958 			    (RESPONSE_QUEUE_MQ_ENTRIES -
4959 			    qlt->mq_resp[qi].mq_ndx_to_fw) << 6,
4960 			    DDI_DMA_SYNC_FORCPU);
4961 			(void) ddi_dma_sync(
4962 			    qlt->mq_resp[qi].queue_mem_mq_dma_handle, 0,
4963 			    qlt->mq_resp[qi].mq_ndx_from_fw << 6,
4964 			    DDI_DMA_SYNC_FORCPU);
4965 		} else {
4966 			(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
4967 			    RESPONSE_QUEUE_OFFSET +
4968 			    (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
4969 			    (RESPONSE_QUEUE_ENTRIES -
4970 			    qlt->mq_resp[qi].mq_ndx_to_fw) << 6,
4971 			    DDI_DMA_SYNC_FORCPU);
4972 			(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
4973 			    RESPONSE_QUEUE_OFFSET,
4974 			    qlt->mq_resp[qi].mq_ndx_from_fw << 6,
4975 			    DDI_DMA_SYNC_FORCPU);
4976 		}
4977 	}
4978 
4979 	return (total_ent);
4980 }
4981 
4982 void
qlt_handle_resp_queue_update(qlt_state_t * qlt,uint16_t qi)4983 qlt_handle_resp_queue_update(qlt_state_t *qlt, uint16_t qi)
4984 {
4985 	uint32_t total_ent;
4986 	uint8_t c;
4987 
4988 	if (qlt->mq_resp[qi].mq_ndx_to_fw == qlt->mq_resp[qi].mq_ndx_from_fw)
4989 		return;
4990 
4991 	total_ent = qlt_sync_resp_queue(qlt, qi);
4992 
4993 	do {
4994 		uint32_t qe = qlt->mq_resp[qi].mq_ndx_to_fw;
4995 		caddr_t resp = &qlt->mq_resp[qi].mq_ptr[qe << 6];
4996 
4997 		uint32_t ent_cnt;
4998 
4999 		ent_cnt = (uint32_t)(resp[0] == 0x51 ? resp[1] : 1);
5000 		if (ent_cnt > total_ent) {
5001 			break;
5002 		}
5003 		switch ((uint8_t)(resp[0])) {
5004 		case 0x12:	/* CTIO completion */
5005 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp, qi);
5006 			break;
5007 		case 0x0e:	/* NACK */
5008 			/* Do Nothing */
5009 			break;
5010 		case 0x1b:	/* Verify FW */
5011 			qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
5012 			break;
5013 		case 0x29:	/* CT PassThrough */
5014 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
5015 			break;
5016 		case 0x32:	/* Report ID */
5017 			EL(qlt, "report Id received [type %xh]\n", resp[0]);
5018 			break;
5019 		case 0x33:	/* Abort IO IOCB completion */
5020 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
5021 			break;
5022 		case 0x51:	/* PUREX */
5023 			qlt_handle_purex(qlt, (uint8_t *)resp);
5024 			break;
5025 		case 0x52:
5026 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
5027 			break;
5028 		case 0x53:	/* ELS passthrough */
5029 			c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
5030 			if (c == 0) {
5031 				qlt_handle_sol_els_completion(qlt,
5032 				    (uint8_t *)resp);
5033 			} else if (c == 3) {
5034 				qlt_handle_unsol_els_abort_completion(qlt,
5035 				    (uint8_t *)resp);
5036 			} else {
5037 				qlt_handle_unsol_els_completion(qlt,
5038 				    (uint8_t *)resp);
5039 			}
5040 			break;
5041 		case 0x54:	/* ABTS received */
5042 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp, qi);
5043 			break;
5044 		case 0x55:	/* ABTS completion */
5045 			qlt_handle_abts_completion(qlt, (uint8_t *)resp, qi);
5046 			break;
5047 		default:
5048 			EL(qlt, "response entry=%xh\n", resp[0]);
5049 			break;
5050 		}
5051 		if (qi != 0) {
5052 			qlt->mq_resp[qi].mq_ndx_to_fw =
5053 			    (qlt->mq_resp[qi].mq_ndx_to_fw + ent_cnt) &
5054 			    (RESPONSE_QUEUE_MQ_ENTRIES - 1);
5055 		} else {
5056 			qlt->mq_resp[qi].mq_ndx_to_fw =
5057 			    (qlt->mq_resp[qi].mq_ndx_to_fw + ent_cnt) &
5058 			    (RESPONSE_QUEUE_ENTRIES - 1);
5059 		}
5060 		total_ent -= ent_cnt;
5061 	} while (total_ent > 0);
5062 	if (qlt->qlt_mq_enabled) {
5063 		MQBAR_WR32(qlt, (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_OUT,
5064 		    qlt->mq_resp[qi].mq_ndx_to_fw);
5065 	} else {
5066 		REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->mq_resp[qi].mq_ndx_to_fw);
5067 	}
5068 }
5069 
5070 fct_status_t
qlt_portid_to_handle(qlt_state_t * qlt,uint32_t id,uint16_t cmd_handle,uint16_t * ret_handle)5071 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
5072     uint16_t *ret_handle)
5073 {
5074 	fct_status_t ret;
5075 	mbox_cmd_t *mcp;
5076 	uint16_t n;
5077 	uint16_t h;
5078 	uint32_t ent_id;
5079 	uint8_t *p;
5080 	int found = 0;
5081 
5082 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
5083 	if (mcp == NULL) {
5084 		return (STMF_ALLOC_FAILURE);
5085 	}
5086 	mcp->to_fw[0] = MBC_GET_ID_LIST;
5087 	mcp->to_fw[8] = 2048 * 8;
5088 	mcp->to_fw[9] = 0;
5089 	mcp->to_fw_mask |= BIT_9 | BIT_8;
5090 	mcp->from_fw_mask |= BIT_1 | BIT_2;
5091 
5092 	ret = qlt_mailbox_command(qlt, mcp);
5093 	if (ret != QLT_SUCCESS) {
5094 		EL(qlt, "qlt_mbox_command=7Ch status=%llxh\n", ret);
5095 		cmn_err(CE_WARN, "qlt(%d) GET ID list failed, ret = %llx, "
5096 		    "mb0=%x, mb1=%x, mb2=%x", qlt->instance, (long long)ret,
5097 		    mcp->from_fw[0], mcp->from_fw[1], mcp->from_fw[2]);
5098 		qlt_free_mailbox_command(qlt, mcp);
5099 		return (ret);
5100 	}
5101 
5102 	EL(qlt, "mbx cmd=7Ch, GET_ID_LIST id=%x fw[1]=%x\n",
5103 	    id, mcp->from_fw[1]);
5104 
5105 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
5106 	p = mcp->dbuf->db_sglist[0].seg_addr;
5107 	for (n = 0; n < mcp->from_fw[1]; n++) {
5108 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
5109 		h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
5110 		if (ent_id == id) {
5111 			found = 1;
5112 			*ret_handle = h;
5113 			if ((cmd_handle != FCT_HANDLE_NONE) &&
5114 			    (cmd_handle != h)) {
5115 				cmn_err(CE_WARN, "qlt(%d) login for portid %x "
5116 				    "came in with handle %x, while the portid "
5117 				    "was already using a different handle %x",
5118 				    qlt->instance, id, cmd_handle, h);
5119 				qlt_free_mailbox_command(qlt, mcp);
5120 				return (QLT_FAILURE);
5121 			}
5122 			break;
5123 		}
5124 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
5125 			cmn_err(CE_WARN, "qlt(%d) login for portid %x came in "
5126 			    "with handle %x, while the handle was already in "
5127 			    "use for portid %x",
5128 			    qlt->instance, id, cmd_handle, ent_id);
5129 			qlt_free_mailbox_command(qlt, mcp);
5130 			return (QLT_FAILURE);
5131 		}
5132 		p += 8;
5133 	}
5134 	if (!found) {
5135 		*ret_handle = cmd_handle;
5136 	}
5137 	qlt_free_mailbox_command(qlt, mcp);
5138 	return (FCT_SUCCESS);
5139 }
5140 
5141 /* ARGSUSED */
5142 fct_status_t
qlt_fill_plogi_req(fct_local_port_t * port,fct_remote_port_t * rp,fct_cmd_t * login)5143 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
5144     fct_cmd_t *login)
5145 {
5146 	uint8_t *p;
5147 
5148 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
5149 	p[0] = ELS_OP_PLOGI;
5150 	*((uint16_t *)(&p[4])) = 0x2020;
5151 	p[7] = 3;
5152 	p[8] = 0x88;
5153 	p[10] = 8;
5154 	p[13] = 0xff; p[15] = 0x1f;
5155 	p[18] = 7; p[19] = 0xd0;
5156 
5157 	bcopy(port->port_pwwn, p + 20, 8);
5158 	bcopy(port->port_nwwn, p + 28, 8);
5159 
5160 	p[68] = 0x80;
5161 	p[74] = 8;
5162 	p[77] = 0xff;
5163 	p[81] = 1;
5164 
5165 	return (FCT_SUCCESS);
5166 }
5167 
5168 /* ARGSUSED */
5169 fct_status_t
qlt_fill_plogi_resp(fct_local_port_t * port,fct_remote_port_t * rp,fct_cmd_t * login)5170 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
5171     fct_cmd_t *login)
5172 {
5173 	return (FCT_SUCCESS);
5174 }
5175 
5176 fct_status_t
qlt_register_remote_port(fct_local_port_t * port,fct_remote_port_t * rp,fct_cmd_t * login)5177 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
5178     fct_cmd_t *login)
5179 {
5180 	uint16_t h;
5181 	fct_status_t ret;
5182 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
5183 
5184 	switch (rp->rp_id) {
5185 	case 0xFFFFFC:	h = 0x7FC; break;
5186 	case 0xFFFFFD:	h = 0x7FD; break;
5187 	case 0xFFFFFE:	h = 0x7FE; break;
5188 	case 0xFFFFFF:	h = 0x7FF; break;
5189 	default:
5190 		ret = qlt_portid_to_handle(qlt, rp->rp_id,
5191 		    login->cmd_rp_handle, &h);
5192 		if (ret != FCT_SUCCESS) {
5193 			EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
5194 			return (ret);
5195 		}
5196 	}
5197 
5198 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
5199 		ret = qlt_fill_plogi_req(port, rp, login);
5200 	} else {
5201 		ret = qlt_fill_plogi_resp(port, rp, login);
5202 	}
5203 
5204 	if (ret != FCT_SUCCESS) {
5205 		EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
5206 		return (ret);
5207 	}
5208 
5209 	EL(qlt, "rport id=%xh cmd_type=%xh handle=%xh(%xh)\n",
5210 	    rp->rp_id, login->cmd_type, h, rp->rp_handle);
5211 
5212 	if (h == FCT_HANDLE_NONE)
5213 		return (FCT_SUCCESS);
5214 
5215 	if (rp->rp_handle == FCT_HANDLE_NONE) {
5216 		rp->rp_handle = h;
5217 		return (FCT_SUCCESS);
5218 	}
5219 
5220 	if (rp->rp_handle == h)
5221 		return (FCT_SUCCESS);
5222 
5223 	EL(qlt, "failed, rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
5224 	return (FCT_FAILURE);
5225 }
5226 
5227 /* invoked in single thread */
5228 fct_status_t
qlt_deregister_remote_port(fct_local_port_t * port,fct_remote_port_t * rp)5229 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
5230 {
5231 	uint8_t *req;
5232 	qlt_state_t *qlt;
5233 	clock_t	dereg_req_timer;
5234 	fct_status_t ret;
5235 
5236 	qlt = (qlt_state_t *)port->port_fca_private;
5237 
5238 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
5239 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
5240 		return (FCT_SUCCESS);
5241 	ASSERT(qlt->rp_id_in_dereg == 0);
5242 
5243 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5244 		mutex_enter(&qlt->mq_req[0].mq_lock);
5245 		req = (uint8_t *)qlt_get_req_entries(qlt, 1, 0);
5246 		if (req == NULL) {
5247 			EL(qlt, "req = NULL\n");
5248 			mutex_exit(&qlt->mq_req[0].mq_lock);
5249 			return (FCT_BUSY);
5250 		}
5251 	} else {
5252 		mutex_enter(&qlt->preq_lock);
5253 		req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
5254 		if (req == NULL) {
5255 			EL(qlt, "req = NULL\n");
5256 			mutex_exit(&qlt->preq_lock);
5257 			return (FCT_BUSY);
5258 		}
5259 	}
5260 	bzero(req, IOCB_SIZE);
5261 	req[0] = 0x52; req[1] = 1;
5262 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
5263 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
5264 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
5265 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
5266 	qlt->rp_id_in_dereg = rp->rp_id;
5267 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5268 		qlt_submit_req_entries(qlt, 1, 0);
5269 	} else {
5270 		qlt_submit_preq_entries(qlt, 1);
5271 	}
5272 
5273 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
5274 	if (cv_timedwait(&qlt->rp_dereg_cv,
5275 	    (((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) ?
5276 	    &qlt->mq_req[0].mq_lock : &qlt->preq_lock),
5277 	    dereg_req_timer) > 0) {
5278 		ret = qlt->rp_dereg_status;
5279 	} else {
5280 		ret = FCT_BUSY;
5281 	}
5282 	qlt->rp_dereg_status = 0;
5283 	qlt->rp_id_in_dereg = 0;
5284 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5285 		mutex_exit(&qlt->mq_req[0].mq_lock);
5286 	} else {
5287 		mutex_exit(&qlt->preq_lock);
5288 	}
5289 
5290 	EL(qlt, "Dereg remote port(%Xh), ret=%llxh\n",
5291 	    rp->rp_id, ret);
5292 
5293 	return (ret);
5294 }
5295 
5296 /*
5297  * Pass received ELS up to framework.
5298  */
5299 static void
qlt_handle_purex(qlt_state_t * qlt,uint8_t * resp)5300 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
5301 {
5302 	fct_cmd_t		*cmd;
5303 	fct_els_t		*els;
5304 	qlt_cmd_t		*qcmd;
5305 	uint32_t		payload_size;
5306 	uint32_t		remote_portid;
5307 	uint8_t			*pldptr, *bndrptr;
5308 	int			i, off;
5309 	uint16_t		iocb_flags;
5310 	char			info[160];
5311 
5312 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
5313 	    ((uint32_t)(resp[0x1A])) << 16;
5314 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
5315 	if (iocb_flags & BIT_15) {
5316 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
5317 	} else {
5318 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
5319 	}
5320 
5321 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
5322 		EL(qlt, "payload is too large = %xh\n", payload_size);
5323 		cmn_err(CE_WARN, "handle_purex: payload is too large");
5324 		goto cmd_null;
5325 	}
5326 
5327 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
5328 	    (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
5329 	if (cmd == NULL) {
5330 		EL(qlt, "fct_alloc cmd==NULL\n");
5331 cmd_null:;
5332 		(void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
5333 		    "allocate space for fct_cmd", (void *)qlt);
5334 		info[159] = 0;
5335 		(void) fct_port_shutdown(qlt->qlt_port,
5336 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
5337 		return;
5338 	}
5339 
5340 	cmd->cmd_port = qlt->qlt_port;
5341 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
5342 	if (cmd->cmd_rp_handle == 0xFFFF) {
5343 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
5344 	}
5345 
5346 	els = (fct_els_t *)cmd->cmd_specific;
5347 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
5348 	els->els_req_size = (uint16_t)payload_size;
5349 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
5350 	    GET_STRUCT_SIZE(qlt_cmd_t));
5351 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
5352 	cmd->cmd_rportid = remote_portid;
5353 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
5354 	    ((uint32_t)(resp[0x16])) << 16;
5355 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
5356 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
5357 	pldptr = &resp[0x2C];
5358 	bndrptr = (uint8_t *)(qlt->mq_resp[0].mq_ptr
5359 	    + (RESPONSE_QUEUE_ENTRIES << 6));
5360 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
5361 		/* Take care of fw's swapping of payload */
5362 		els->els_req_payload[i] = pldptr[3];
5363 		els->els_req_payload[i+1] = pldptr[2];
5364 		els->els_req_payload[i+2] = pldptr[1];
5365 		els->els_req_payload[i+3] = pldptr[0];
5366 		pldptr += 4;
5367 		if (pldptr == bndrptr)
5368 			pldptr = (uint8_t *)qlt->mq_resp[0].mq_ptr;
5369 		off += 4;
5370 		if (off >= IOCB_SIZE) {
5371 			off = 4;
5372 			pldptr += 4;
5373 		}
5374 	}
5375 
5376 	EL(qlt, "remote portid = %xh logi/o(%xh) to us revd rex1=%xh\n",
5377 	    remote_portid, els->els_req_payload[0], qcmd->fw_xchg_addr);
5378 
5379 	fct_post_rcvd_cmd(cmd, 0);
5380 }
5381 
5382 fct_status_t
qlt_send_cmd_response(fct_cmd_t * cmd,uint32_t ioflags)5383 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
5384 {
5385 	qlt_state_t	*qlt;
5386 	char		info[160];
5387 
5388 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
5389 
5390 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
5391 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
5392 			EL(qlt, "ioflags = %xh\n", ioflags);
5393 			goto fatal_panic;
5394 		} else {
5395 			return (qlt_send_status(qlt, cmd));
5396 		}
5397 	}
5398 
5399 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
5400 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
5401 			goto fatal_panic;
5402 		} else {
5403 			return (qlt_send_els_response(qlt, cmd));
5404 		}
5405 	}
5406 
5407 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
5408 		cmd->cmd_handle = 0;
5409 	}
5410 
5411 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
5412 		return (qlt_send_abts_response(qlt, cmd, 0));
5413 	} else {
5414 		EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
5415 		ASSERT(0);
5416 		return (FCT_FAILURE);
5417 	}
5418 
5419 fatal_panic:;
5420 	(void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
5421 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
5422 	    ioflags);
5423 	info[159] = 0;
5424 	(void) fct_port_shutdown(qlt->qlt_port,
5425 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
5426 	return (FCT_FAILURE);
5427 }
5428 
5429 /* ARGSUSED */
5430 fct_status_t
qlt_xfer_scsi_data(fct_cmd_t * cmd,stmf_data_buf_t * dbuf,uint32_t ioflags)5431 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
5432 {
5433 	qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
5434 	qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
5435 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
5436 	uint8_t *req, rcnt;
5437 	uint16_t flags;
5438 	uint16_t cookie_count;
5439 	uint32_t ent_cnt;
5440 	uint16_t qi;
5441 
5442 	qi = qcmd->qid;
5443 
5444 	if (dbuf->db_handle == 0)
5445 		qcmd->dbuf = dbuf;
5446 	flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
5447 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
5448 		flags = (uint16_t)(flags | 2);
5449 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
5450 	} else {
5451 		flags = (uint16_t)(flags | 1);
5452 	}
5453 
5454 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
5455 		flags = (uint16_t)(flags | BIT_15);
5456 
5457 	if (dbuf->db_flags & DB_LU_DATA_BUF) {
5458 		/*
5459 		 * Data bufs from LU are in scatter/gather list format.
5460 		 */
5461 		cookie_count = qlt_get_cookie_count(dbuf);
5462 		rcnt = qlt_get_iocb_count(cookie_count);
5463 	} else {
5464 		cookie_count = 1;
5465 		rcnt = 1;
5466 	}
5467 	mutex_enter(&qlt->mq_req[qi].mq_lock);
5468 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5469 	if (req == NULL) {
5470 		mutex_exit(&qlt->mq_req[qi].mq_lock);
5471 		return (FCT_BUSY);
5472 	}
5473 	bzero(req, IOCB_SIZE);
5474 	req[0] = 0x12;
5475 	req[1] = rcnt;
5476 	req[2] = dbuf->db_handle;
5477 	QMEM_WR32_REQ(qlt, qi, req+4, cmd->cmd_handle);
5478 	QMEM_WR16_REQ(qlt, qi, req+8, cmd->cmd_rp->rp_handle);
5479 	QMEM_WR16_REQ(qlt, qi, req+10, 60);	/* 60 seconds timeout */
5480 	QMEM_WR16_REQ(qlt, qi, req+12, cookie_count);
5481 	QMEM_WR32_REQ(qlt, qi, req+0x10, cmd->cmd_rportid);
5482 	QMEM_WR32_REQ(qlt, qi, req+0x14, qcmd->fw_xchg_addr);
5483 	QMEM_WR16_REQ(qlt, qi, req+0x1A, flags);
5484 	QMEM_WR16_REQ(qlt, qi, req+0x20, cmd->cmd_oxid);
5485 	QMEM_WR32_REQ(qlt, qi, req+0x24, dbuf->db_relative_offset);
5486 	QMEM_WR32_REQ(qlt, qi, req+0x2C, dbuf->db_data_size);
5487 	if (dbuf->db_flags & DB_LU_DATA_BUF) {
5488 		uint8_t			*qptr;	/* qlt continuation segs */
5489 		uint16_t		cookie_resid;
5490 		uint16_t		cont_segs;
5491 		ddi_dma_cookie_t	cookie, *ckp;
5492 
5493 		/*
5494 		 * See if the dma cookies are in simple array format.
5495 		 */
5496 		ckp = qlt_get_cookie_array(dbuf);
5497 
5498 		/*
5499 		 * Program the first segment into main record.
5500 		 */
5501 		if (ckp) {
5502 			ASSERT(ckp->dmac_size);
5503 			QMEM_WR64_REQ(qlt, qi, req+0x34, ckp->dmac_laddress);
5504 			QMEM_WR32_REQ(qlt, qi, req+0x3c, ckp->dmac_size);
5505 		} else {
5506 			qlt_ddi_dma_nextcookie(dbuf, &cookie);
5507 			ASSERT(cookie.dmac_size);
5508 			QMEM_WR64_REQ(qlt, qi, req+0x34, cookie.dmac_laddress);
5509 			QMEM_WR32_REQ(qlt, qi, req+0x3c, cookie.dmac_size);
5510 		}
5511 		cookie_resid = cookie_count-1;
5512 
5513 		ent_cnt = (qi == 0) ? REQUEST_QUEUE_ENTRIES :
5514 		    REQUEST_QUEUE_MQ_ENTRIES;
5515 		/*
5516 		 * Program remaining segments into continuation records.
5517 		 */
5518 		while (cookie_resid) {
5519 			req += IOCB_SIZE;
5520 			if (req >= (uint8_t *)(qlt->mq_req[qi].mq_ptr +
5521 			    (ent_cnt * IOCB_SIZE))) {
5522 				req = (uint8_t *)(qlt->mq_req[qi].mq_ptr);
5523 			}
5524 
5525 			req[0] = 0x0a;
5526 			req[1] = 1;
5527 			req[2] = req[3] = 0;	/* tidy */
5528 			qptr = &req[4];
5529 			for (cont_segs = CONT_A64_DATA_SEGMENTS;
5530 			    cont_segs && cookie_resid; cont_segs--) {
5531 
5532 				if (ckp) {
5533 					++ckp;		/* next cookie */
5534 					ASSERT(ckp->dmac_size != 0);
5535 					QMEM_WR64_REQ(qlt, qi, qptr,
5536 					    ckp->dmac_laddress);
5537 					qptr += 8;	/* skip over laddress */
5538 					QMEM_WR32_REQ(qlt, qi, qptr,
5539 					    ckp->dmac_size);
5540 					qptr += 4;	/* skip over size */
5541 				} else {
5542 					qlt_ddi_dma_nextcookie(dbuf, &cookie);
5543 					ASSERT(cookie.dmac_size != 0);
5544 					QMEM_WR64_REQ(qlt, qi, qptr,
5545 					    cookie.dmac_laddress);
5546 					qptr += 8;	/* skip over laddress */
5547 					QMEM_WR32_REQ(qlt, qi, qptr,
5548 					    cookie.dmac_size);
5549 					qptr += 4;	/* skip over size */
5550 				}
5551 				cookie_resid--;
5552 			}
5553 			/*
5554 			 * zero unused remainder of IOCB
5555 			 */
5556 			if (cont_segs) {
5557 				size_t resid;
5558 				resid = (size_t)((uintptr_t)(req+IOCB_SIZE) -
5559 				    (uintptr_t)qptr);
5560 				ASSERT(resid < IOCB_SIZE);
5561 				bzero(qptr, resid);
5562 			}
5563 		}
5564 	} else {
5565 		/* Single, contiguous buffer */
5566 		QMEM_WR64_REQ(qlt, qi, req+0x34, bctl->bctl_dev_addr);
5567 		QMEM_WR32_REQ(qlt, qi, req+0x34+8, dbuf->db_data_size);
5568 	}
5569 
5570 	qlt_submit_req_entries(qlt, rcnt, qi);
5571 	mutex_exit(&qlt->mq_req[qi].mq_lock);
5572 
5573 	return (STMF_SUCCESS);
5574 }
5575 
5576 /*
5577  * We must construct proper FCP_RSP_IU now. Here we only focus on
5578  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
5579  * we could have caught them before we enter here.
5580  */
5581 fct_status_t
qlt_send_status(qlt_state_t * qlt,fct_cmd_t * cmd)5582 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
5583 {
5584 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
5585 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
5586 	qlt_dmem_bctl_t *bctl;
5587 	uint32_t size;
5588 	uint8_t *req, *fcp_rsp_iu;
5589 	uint8_t *psd, sensbuf[24];		/* sense data */
5590 	uint16_t flags;
5591 	uint16_t scsi_status;
5592 	int use_mode2;
5593 	int ndx;
5594 	uint16_t qi;
5595 
5596 	qi = qcmd->qid;
5597 
5598 	/*
5599 	 * Enter fast channel for non check condition
5600 	 */
5601 	if (task->task_scsi_status != STATUS_CHECK) {
5602 		/*
5603 		 * We will use mode1
5604 		 */
5605 		flags = (uint16_t)(BIT_6 | BIT_15 |
5606 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
5607 		scsi_status = (uint16_t)task->task_scsi_status;
5608 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
5609 			scsi_status = (uint16_t)(scsi_status | FCP_RESID_OVER);
5610 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
5611 			scsi_status = (uint16_t)(scsi_status | FCP_RESID_UNDER);
5612 		}
5613 		qcmd->dbuf_rsp_iu = NULL;
5614 
5615 		/*
5616 		 * Fillout CTIO type 7 IOCB
5617 		 */
5618 		mutex_enter(&qlt->mq_req[qi].mq_lock);
5619 		req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5620 		if (req == NULL) {
5621 			mutex_exit(&qlt->mq_req[qi].mq_lock);
5622 			return (FCT_BUSY);
5623 		}
5624 
5625 		/*
5626 		 * Common fields
5627 		 */
5628 		bzero(req, IOCB_SIZE);
5629 		req[0x00] = 0x12;
5630 		req[0x01] = 0x1;
5631 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
5632 		QMEM_WR32_REQ(qlt, qi, req + 0x04, cmd->cmd_handle);
5633 		QMEM_WR16_REQ(qlt, qi, req + 0x08, cmd->cmd_rp->rp_handle);
5634 		QMEM_WR32_REQ(qlt, qi, req + 0x10, cmd->cmd_rportid);
5635 		QMEM_WR32_REQ(qlt, qi, req + 0x14, qcmd->fw_xchg_addr);
5636 
5637 		/* handle TMF completion - !!! Important FIX */
5638 		if (task->task_mgmt_function) {
5639 			scsi_status =
5640 			    (uint16_t)(scsi_status | FCP_RESP_LEN_VALID);
5641 
5642 			/* no sense length, 4 bytes of resp info */
5643 			QMEM_WR16_REQ(qlt, qi, req + 0x24, 4);
5644 		}
5645 
5646 		/*
5647 		 * Mode-specific fields
5648 		 */
5649 		QMEM_WR16_REQ(qlt, qi, req + 0x1A, flags);
5650 		QMEM_WR32_REQ(qlt, qi, req + 0x1C, task->task_resid);
5651 		QMEM_WR16_REQ(qlt, qi, req + 0x20, cmd->cmd_oxid);
5652 		QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
5653 
5654 		/*
5655 		 * Trigger FW to send SCSI status out
5656 		 */
5657 		qlt_submit_req_entries(qlt, 1, qi);
5658 		mutex_exit(&qlt->mq_req[qi].mq_lock);
5659 		return (STMF_SUCCESS);
5660 	}
5661 
5662 	ASSERT(task->task_scsi_status == STATUS_CHECK);
5663 	/*
5664 	 * Decide the SCSI status mode, that should be used
5665 	 */
5666 	use_mode2 = (task->task_sense_length > 24);
5667 
5668 	/*
5669 	 * Prepare required information per the SCSI status mode
5670 	 */
5671 	flags = (uint16_t)(BIT_15 |
5672 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
5673 	if (use_mode2) {
5674 		flags = (uint16_t)(flags | BIT_7);
5675 
5676 		size = task->task_sense_length;
5677 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
5678 		    task->task_sense_length, &size, 0);
5679 		if (!qcmd->dbuf_rsp_iu) {
5680 			return (FCT_ALLOC_FAILURE);
5681 		}
5682 
5683 		/*
5684 		 * Start to construct FCP_RSP IU
5685 		 */
5686 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
5687 		bzero(fcp_rsp_iu, 24);
5688 
5689 		/*
5690 		 * FCP_RSP IU flags, byte10
5691 		 */
5692 		fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
5693 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
5694 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
5695 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
5696 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
5697 		}
5698 
5699 		/*
5700 		 * SCSI status code, byte11
5701 		 */
5702 		fcp_rsp_iu[11] = task->task_scsi_status;
5703 
5704 		/*
5705 		 * FCP_RESID (Overrun or underrun)
5706 		 */
5707 		fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
5708 		fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
5709 		fcp_rsp_iu[14] = (uint8_t)((task->task_resid >>  8) & 0xFF);
5710 		fcp_rsp_iu[15] = (uint8_t)((task->task_resid >>  0) & 0xFF);
5711 
5712 		/*
5713 		 * FCP_SNS_LEN
5714 		 */
5715 		fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
5716 		    0xFF);
5717 		fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
5718 		    0xFF);
5719 
5720 		/*
5721 		 * FCP_RSP_LEN
5722 		 */
5723 		/*
5724 		 * no FCP_RSP_INFO
5725 		 */
5726 		/*
5727 		 * FCP_SNS_INFO
5728 		 */
5729 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
5730 		    task->task_sense_length);
5731 
5732 		/*
5733 		 * Ensure dma data consistency
5734 		 */
5735 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
5736 	} else {
5737 		flags = (uint16_t)(flags | BIT_6);
5738 
5739 		scsi_status = (uint16_t)task->task_scsi_status;
5740 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
5741 			scsi_status = (uint16_t)(scsi_status | BIT_10);
5742 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
5743 			scsi_status = (uint16_t)(scsi_status | BIT_11);
5744 		}
5745 		if (task->task_sense_length) {
5746 			scsi_status = (uint16_t)(scsi_status | BIT_9);
5747 		}
5748 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
5749 		qcmd->dbuf_rsp_iu = NULL;
5750 	}
5751 
5752 	/*
5753 	 * Fillout CTIO type 7 IOCB
5754 	 */
5755 	mutex_enter(&qlt->mq_req[qi].mq_lock);
5756 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5757 	if (req == NULL) {
5758 		mutex_exit(&qlt->mq_req[qi].mq_lock);
5759 		if (use_mode2) {
5760 			qlt_dmem_free(cmd->cmd_port->port_fds,
5761 			    qcmd->dbuf_rsp_iu);
5762 			qcmd->dbuf_rsp_iu = NULL;
5763 		}
5764 		return (FCT_BUSY);
5765 	}
5766 
5767 	/*
5768 	 * Common fields
5769 	 */
5770 	bzero(req, IOCB_SIZE);
5771 	req[0x00] = 0x12;
5772 	req[0x01] = 0x1;
5773 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
5774 	QMEM_WR32_REQ(qlt, qi, req + 0x04, cmd->cmd_handle);
5775 	QMEM_WR16_REQ(qlt, qi, req + 0x08, cmd->cmd_rp->rp_handle);
5776 	QMEM_WR16_REQ(qlt, qi, req + 0x0A, 0);	/* not timed by FW */
5777 	if (use_mode2) {
5778 		/* FCP RSP IU data field */
5779 		QMEM_WR16_REQ(qlt, qi, req+0x0C, 1);
5780 	}
5781 	QMEM_WR32_REQ(qlt, qi, req + 0x10, cmd->cmd_rportid);
5782 	QMEM_WR32_REQ(qlt, qi, req + 0x14, qcmd->fw_xchg_addr);
5783 
5784 	/*
5785 	 * Mode-specific fields
5786 	 */
5787 	if (!use_mode2) {
5788 		QMEM_WR16_REQ(qlt, qi, req + 0x18, task->task_sense_length);
5789 	}
5790 	QMEM_WR16_REQ(qlt, qi, req + 0x1A, flags);
5791 	QMEM_WR32_REQ(qlt, qi, req + 0x1C, task->task_resid);
5792 	QMEM_WR16_REQ(qlt, qi, req + 0x20, cmd->cmd_oxid);
5793 	if (use_mode2) {
5794 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
5795 		QMEM_WR32_REQ(qlt, qi, req + 0x2C,
5796 		    24 + task->task_sense_length);
5797 		QMEM_WR64_REQ(qlt, qi, req + 0x34, bctl->bctl_dev_addr);
5798 		QMEM_WR32_REQ(qlt, qi, req + 0x3C,
5799 		    24 + task->task_sense_length);
5800 	} else {
5801 		QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
5802 		psd = req+0x28;
5803 
5804 		/*
5805 		 * Data in sense buf is always big-endian, data in IOCB
5806 		 * should always be little-endian, so we must do swapping.
5807 		 */
5808 		size = ((task->task_sense_length + 3) & (~3));
5809 		for (ndx = 0; ndx < size; ndx += 4) {
5810 			psd[ndx + 0] = sensbuf[ndx + 3];
5811 			psd[ndx + 1] = sensbuf[ndx + 2];
5812 			psd[ndx + 2] = sensbuf[ndx + 1];
5813 			psd[ndx + 3] = sensbuf[ndx + 0];
5814 		}
5815 	}
5816 
5817 	/*
5818 	 * Trigger FW to send SCSI status out
5819 	 */
5820 	qlt_submit_req_entries(qlt, 1, qi);
5821 	mutex_exit(&qlt->mq_req[qi].mq_lock);
5822 
5823 	return (STMF_SUCCESS);
5824 }
5825 
5826 fct_status_t
qlt_send_els_response(qlt_state_t * qlt,fct_cmd_t * cmd)5827 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
5828 {
5829 	qlt_cmd_t	*qcmd;
5830 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
5831 	uint8_t *req, *addr;
5832 	qlt_dmem_bctl_t *bctl;
5833 	uint32_t minsize;
5834 	uint8_t elsop, req1f;
5835 	uint16_t qi = 0;
5836 
5837 	addr = els->els_resp_payload;
5838 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
5839 
5840 	minsize = els->els_resp_size;
5841 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
5842 	if (qcmd->dbuf == NULL)
5843 		return (FCT_BUSY);
5844 
5845 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
5846 
5847 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
5848 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
5849 
5850 	if (addr[0] == 0x02) {	/* ACC */
5851 		req1f = BIT_5;
5852 	} else {
5853 		req1f = BIT_6;
5854 	}
5855 	elsop = els->els_req_payload[0];
5856 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
5857 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
5858 		req1f = (uint8_t)(req1f | BIT_4);
5859 	}
5860 
5861 	mutex_enter(&qlt->mq_req[qi].mq_lock);
5862 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5863 	if (req == NULL) {
5864 		EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
5865 		    cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
5866 		mutex_exit(&qlt->mq_req[qi].mq_lock);
5867 		qlt_dmem_free(NULL, qcmd->dbuf);
5868 		qcmd->dbuf = NULL;
5869 		return (FCT_BUSY);
5870 	}
5871 	bzero(req, IOCB_SIZE);
5872 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
5873 	req[0x16] = elsop; req[0x1f] = req1f;
5874 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
5875 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
5876 	QMEM_WR16(qlt, (&req[0xC]), 1);
5877 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
5878 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
5879 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
5880 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
5881 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
5882 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
5883 	}
5884 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
5885 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
5886 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
5887 
5888 	EL(qlt, "elsop=%xh req1f=%xh IOCB_TYPE_ELSPASS: rex1=%xh\n",
5889 	    elsop, req1f, qcmd->fw_xchg_addr);
5890 
5891 	qlt_submit_req_entries(qlt, 1, qi);
5892 	mutex_exit(&qlt->mq_req[qi].mq_lock);
5893 
5894 	return (FCT_SUCCESS);
5895 }
5896 
5897 fct_status_t
qlt_send_abts_response(qlt_state_t * qlt,fct_cmd_t * cmd,int terminate)5898 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
5899 {
5900 	qlt_abts_cmd_t *qcmd;
5901 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
5902 	uint8_t *req;
5903 	uint32_t lportid;
5904 	uint32_t fctl;
5905 	int i;
5906 	uint16_t qi;
5907 	uint32_t rex1, rex2;
5908 	uint8_t temp[64];
5909 
5910 	qi = 0;
5911 
5912 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
5913 
5914 	mutex_enter(&qlt->mq_req[qi].mq_lock);
5915 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
5916 	if (req == NULL) {
5917 		bcopy(qcmd->buf, &temp, IOCB_SIZE);
5918 		for (i = 0; i < 12; i += 4) {
5919 			/* Take care of firmware's LE requirement */
5920 			temp[0x2C+i] = abts->abts_resp_payload[i+3];
5921 			temp[0x2C+i+1] = abts->abts_resp_payload[i+2];
5922 			temp[0x2C+i+2] = abts->abts_resp_payload[i+1];
5923 			temp[0x2C+i+3] = abts->abts_resp_payload[i];
5924 		}
5925 		rex1 = QMEM_RD32(qlt, &temp[0x10]);
5926 		rex2 = QMEM_RD32(qlt, &temp[0x3C]);
5927 
5928 		EL(qlt, "req = NULL, %xh %xh %p %xh %xh\n", cmd->cmd_oxid,
5929 		    cmd->cmd_rportid, cmd, rex1, rex2);
5930 
5931 		mutex_exit(&qlt->mq_req[qi].mq_lock);
5932 		return (FCT_BUSY);
5933 	}
5934 	bcopy(qcmd->buf, req, IOCB_SIZE);
5935 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
5936 	fctl = QMEM_RD32(qlt, req+0x1C);
5937 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
5938 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
5939 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
5940 	if (cmd->cmd_rp)
5941 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
5942 	else
5943 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
5944 	if (terminate) {
5945 		QMEM_WR16(qlt, (&req[0xC]), 1);
5946 	}
5947 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
5948 	req[0x17] = abts->abts_resp_rctl;
5949 	QMEM_WR32(qlt, req+0x18, lportid);
5950 	QMEM_WR32(qlt, req+0x1C, fctl);
5951 	req[0x23]++;
5952 	for (i = 0; i < 12; i += 4) {
5953 		/* Take care of firmware's LE requirement */
5954 		req[0x2C+i] = abts->abts_resp_payload[i+3];
5955 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
5956 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
5957 		req[0x2C+i+3] = abts->abts_resp_payload[i];
5958 	}
5959 
5960 	rex1 = QMEM_RD32(qlt, &req[0x10]);
5961 	rex2 = QMEM_RD32(qlt, &req[0x3C]);
5962 
5963 	EL(qlt, "%xh %xh %d %p %xh %xh\n",
5964 	    QMEM_RD16(qlt, req+0x26), QMEM_RD16(qlt, req+0x24),
5965 	    terminate, cmd, rex1, rex2);
5966 
5967 	qlt_submit_req_entries(qlt, 1, qi);
5968 	mutex_exit(&qlt->mq_req[qi].mq_lock);
5969 
5970 	return (FCT_SUCCESS);
5971 }
5972 
5973 static void
qlt_handle_inot(qlt_state_t * qlt,uint8_t * inot)5974 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
5975 {
5976 	int i;
5977 	uint32_t d;
5978 	caddr_t req;
5979 	uint16_t qi;
5980 	uint8_t *entry = inot;
5981 
5982 	qi = 0;
5983 
5984 	/* Just put it on the request queue */
5985 	mutex_enter(&qlt->mq_req[qi].mq_lock);
5986 	req = qlt_get_req_entries(qlt, 1, qi);
5987 	if (req == NULL) {
5988 		mutex_exit(&qlt->mq_req[qi].mq_lock);
5989 		stmf_trace(qlt->qlt_port_alias,
5990 		    "qlt_handle_inot: can't get a ReqQ entry");
5991 		EL(qlt, "req = NULL\n");
5992 		return;
5993 	}
5994 	for (i = 0; i < 16; i++) {
5995 		d = QMEM_RD32(qlt, inot);
5996 		inot += 4;
5997 		QMEM_WR32(qlt, req, d);
5998 		req += 4;
5999 	}
6000 	req -= 64;
6001 	req[0] = 0x0e;
6002 
6003 	QMEM_WR32(qlt, entry+0x3c, 0xdeadbeef);
6004 	EL(qlt, "Issue inot ack\n");
6005 
6006 	qlt_submit_req_entries(qlt, 1, qi);
6007 	mutex_exit(&qlt->mq_req[qi].mq_lock);
6008 }
6009 
6010 static uint16_t
qlt_get_queue_id(qlt_state_t * qlt,int id)6011 qlt_get_queue_id(qlt_state_t *qlt, int id)
6012 {
6013 	uint16_t	qid;
6014 
6015 	if ((!qlt->qlt_mq_enabled) || (qlt->qlt_queue_cnt == 1)) {
6016 		return (0);
6017 	}
6018 
6019 	mutex_enter(&qlt->qlock);
6020 	if ((id == 0) && (qlt->last_qi == 0)) {
6021 		qlt->last_qi++;
6022 	}
6023 	qid = qlt->last_qi;
6024 	qlt->last_qi++;
6025 
6026 	if (qlt->last_qi >= qlt->qlt_queue_cnt) {
6027 		qlt->last_qi -= qlt->qlt_queue_cnt;
6028 	}
6029 	mutex_exit(&qlt->qlock);
6030 
6031 	return (qid);
6032 }
6033 
6034 static fct_status_t
qlt_verify_atio_entry(qlt_state_t * qlt,uint8_t * atio)6035 qlt_verify_atio_entry(qlt_state_t *qlt, uint8_t *atio)
6036 {
6037 	uint32_t sig;
6038 	int i;
6039 	char info[160];
6040 
6041 
6042 	sig = QMEM_RD32(qlt, atio+0x3c);
6043 	for (i = 0; ((sig == 0xdeadbeef) &&
6044 	    (i < qlt_reprocess_attempt_cnt)); i++) {
6045 		(void) ddi_dma_sync(
6046 		    qlt->queue_mem_dma_handle,
6047 		    ATIO_QUEUE_OFFSET + (qlt->atio_ndx_to_fw << 6),
6048 		    IOCB_SIZE, DDI_DMA_SYNC_FORCPU);
6049 
6050 		qlt->qlt_atio_reproc_cnt++;
6051 		drv_usecwait(qlt_reprocess_delay);
6052 		sig = QMEM_RD32(qlt, atio+0x3c);
6053 	}
6054 
6055 	if (i) {
6056 		if (i >= qlt_reprocess_attempt_cnt) {
6057 			EL(qlt, "atio entry reprocess failed, %x\n",
6058 			    qlt->qlt_atio_reproc_cnt);
6059 			cmn_err(CE_WARN, "qlt%d: atio entry reprocess"
6060 			    " failed %x\n",
6061 			    qlt->instance, qlt->qlt_atio_reproc_cnt);
6062 			(void) snprintf(info, 160,
6063 			    "qlt_handle_ctio_completion: atio entry reprocess"
6064 			    " failed, %x rsp-%p",
6065 			    qlt->qlt_atio_reproc_cnt, (void *)atio);
6066 			info[159] = 0;
6067 			(void) fct_port_shutdown(qlt->qlt_port,
6068 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6069 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6070 			return (QLT_FAILURE);
6071 		} else {
6072 			EL(qlt, "atio entry reprocess succeeded, %x %x\n",
6073 			    i, qlt->qlt_atio_reproc_cnt);
6074 		}
6075 	}
6076 
6077 	return (QLT_SUCCESS);
6078 }
6079 
6080 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
6081 static void
qlt_handle_atio(qlt_state_t * qlt,uint8_t * atio)6082 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
6083 {
6084 	fct_cmd_t	*cmd;
6085 	scsi_task_t	*task;
6086 	qlt_cmd_t	*qcmd;
6087 	uint32_t	rportid, fw_xchg_addr;
6088 	uint8_t		*p, *q, *req, tm;
6089 	uint16_t	cdb_size, flags, oxid;
6090 	char		info[160];
6091 	uint16_t	qi;
6092 
6093 	if (qlt_verify_atio_entry(qlt, atio) != QLT_SUCCESS)
6094 		return;
6095 
6096 	/*
6097 	 * If either bidirection xfer is requested of there is extended
6098 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
6099 	 */
6100 	cdb_size = 16;
6101 	if (atio[0x20 + 11] >= 3) {
6102 		uint8_t b = atio[0x20 + 11];
6103 		uint16_t b1;
6104 		if ((b & 3) == 3) {
6105 			EL(qlt, "bidirectional I/O not supported\n");
6106 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
6107 			    "received, dropping the cmd as bidirectional "
6108 			    " transfers are not yet supported", qlt->instance);
6109 			/* XXX abort the I/O */
6110 			return;
6111 		}
6112 		cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
6113 		/*
6114 		 * Verify that we have enough entries. Without additional CDB
6115 		 * Everything will fit nicely within the same 64 bytes. So the
6116 		 * additional cdb size is essentially the # of additional bytes
6117 		 * we need.
6118 		 */
6119 		b1 = (uint16_t)b;
6120 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
6121 			EL(qlt, "extended cdb received\n");
6122 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
6123 			    " cdb (cdb size = %d bytes), however the firmware "
6124 			    " did not DMAed the entire FCP_CMD IU, entry count "
6125 			    " is %d while it should be %d", qlt->instance,
6126 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
6127 			/* XXX abort the I/O */
6128 			return;
6129 		}
6130 	}
6131 
6132 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
6133 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
6134 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
6135 	oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
6136 
6137 	if (fw_xchg_addr == 0xFFFFFFFF) {
6138 		EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
6139 		cmd = NULL;
6140 	} else {
6141 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
6142 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
6143 		if (cmd == NULL) {
6144 			EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
6145 		}
6146 	}
6147 	if (cmd == NULL) {
6148 		qi = 0;	/* just use request queue 0 */
6149 
6150 		EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
6151 		/* Abort this IO */
6152 		flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
6153 
6154 		mutex_enter(&qlt->mq_req[qi].mq_lock);
6155 		req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
6156 		if (req == NULL) {
6157 			mutex_exit(&qlt->mq_req[0].mq_lock);
6158 
6159 			(void) snprintf(info, 160,
6160 			    "qlt_handle_atio: qlt-%p, can't "
6161 			    "allocate space for scsi_task", (void *)qlt);
6162 			info[159] = 0;
6163 			(void) fct_port_shutdown(qlt->qlt_port,
6164 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6165 			return;
6166 		}
6167 		bzero(req, IOCB_SIZE);
6168 		req[0] = 0x12; req[1] = 0x1;
6169 		QMEM_WR32(qlt, req+4, 0);
6170 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
6171 		    rportid));
6172 		QMEM_WR16(qlt, req+10, 60);
6173 		QMEM_WR32(qlt, req+0x10, rportid);
6174 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
6175 		QMEM_WR16(qlt, req+0x1A, flags);
6176 		QMEM_WR16(qlt, req+0x20, oxid);
6177 		qlt_submit_req_entries(qlt, 1, qi);
6178 		mutex_exit(&qlt->mq_req[qi].mq_lock);
6179 
6180 		return;
6181 	}
6182 	if (cmd == NULL) {
6183 		uint32_t res;
6184 		uint16_t scsi_status = 0;
6185 		uint16_t rphdl = 0;
6186 
6187 		qi = 0; /* always use request queue 0 */
6188 
6189 		rphdl = fct_get_rp_handle(qlt->qlt_port, rportid);
6190 		if ((rphdl != 0xFFFF) &&
6191 		    (rphdl >= qlt->qlt_port->port_max_logins)) {
6192 			rphdl = 0xFFFF;
6193 		}
6194 
6195 		mutex_enter(&qlt->mq_req[qi].mq_lock);
6196 		req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
6197 		if (req == NULL) {
6198 			EL(qlt, "cannot get reqq\n");
6199 			mutex_exit(&qlt->mq_req[qi].mq_lock);
6200 			(void) snprintf(info, 160,
6201 			    "qlt_handle_atio: qlt-%p, can't "
6202 			    "allocate space for termi-excg", (void *)qlt);
6203 			info[159] = 0;
6204 			(void) fct_port_shutdown(qlt->qlt_port,
6205 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6206 			return;
6207 		}
6208 
6209 		if (rphdl != 0xFFFF) {
6210 			/* Driver send scsi qfull status now */
6211 			flags = (uint16_t)(BIT_15 |
6212 			    ((uint16_t)(atio[0x3] & 0xF0) << 5));
6213 			/* always use SCSI status mode 1 */
6214 			flags = (uint16_t)(flags | BIT_6);
6215 
6216 			scsi_status |= (uint16_t)(0x28);
6217 
6218 			/* Build SCSI Status Mode 1, FCP_RSP IU 24-48 byte */
6219 			bzero(req, IOCB_SIZE);
6220 			req[0] = 0x12;
6221 			req[1] = 0x1;
6222 
6223 			/* allocate a special IOCB handle? or donot care  */
6224 			QMEM_WR32(qlt, req+4, 0);
6225 			QMEM_WR16(qlt, req+8, rphdl);
6226 			QMEM_WR16(qlt, req+10, 60);
6227 			QMEM_WR32(qlt, req+0x10, rportid);
6228 			QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
6229 
6230 			/* sense_length set to 0 */
6231 			QMEM_WR16(qlt, req+0x18, 0);
6232 
6233 			QMEM_WR16(qlt, req+0x1A, flags);
6234 
6235 			/* Residual transfer length */
6236 			res = QMEM_RD32(qlt, atio+0x3C);
6237 			BIG_ENDIAN_32(&res);
6238 			if (res != 0) {
6239 				scsi_status |= FCP_RESID_UNDER;
6240 			}
6241 			QMEM_WR32_REQ(qlt, qi, req + 0x1C, res);
6242 
6243 			QMEM_WR16(qlt, req+0x20, oxid);
6244 			QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
6245 
6246 			EL(qlt, "Send qfull (%Xh) (%Xh)(%Xh)(%Xh) from port "
6247 			    "(%Xh:%Xh)\n", scsi_status, fw_xchg_addr, flags,
6248 			    oxid, rportid, rphdl);
6249 		} else {
6250 			/* Terminate exchange because no remote port context */
6251 			flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
6252 
6253 			bzero(req, IOCB_SIZE);
6254 			req[0] = 0x12;
6255 			req[1] = 0x1;
6256 
6257 			QMEM_WR32(qlt, req+4, 0);
6258 			QMEM_WR16(qlt, req+8, rphdl);
6259 			QMEM_WR16(qlt, req+10, 60);
6260 			QMEM_WR32(qlt, req+0x10, rportid);
6261 			QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
6262 			QMEM_WR16(qlt, req+0x1A, flags);
6263 			QMEM_WR16(qlt, req+0x20, oxid);
6264 
6265 			EL(qlt, "Termi excg (%Xh)(%Xh)(%Xh) from port (%Xh)\n",
6266 			    fw_xchg_addr, flags, oxid, rportid);
6267 
6268 			EL(qlt, "Termi rp_handle (%Xh)\n", rphdl);
6269 		}
6270 
6271 		qlt_submit_req_entries(qlt, 1, qi);
6272 		mutex_exit(&qlt->mq_req[qi].mq_lock);
6273 		return;
6274 	}
6275 
6276 	qi = qlt_get_queue_id(qlt, 0);
6277 	task = (scsi_task_t *)cmd->cmd_specific;
6278 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6279 	qcmd->fw_xchg_addr = fw_xchg_addr;
6280 	qcmd->param.atio_byte3 = atio[3];
6281 	qcmd->qid = qi;
6282 	cmd->cmd_oxid = oxid;
6283 	cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
6284 	    atio[8+19]);
6285 	cmd->cmd_rportid = rportid;
6286 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
6287 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
6288 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
6289 	/* Dont do a 64 byte read as this is IOMMU */
6290 	q = atio+0x28;
6291 	/* XXX Handle fcp_cntl */
6292 	task->task_cmd_seq_no = (uint32_t)(*q++);
6293 	task->task_csn_size = 8;
6294 	task->task_flags = qlt_task_flags[(*q++) & 7];
6295 	tm = *q++;
6296 	if (tm) {
6297 		if (tm & BIT_1)
6298 			task->task_mgmt_function = TM_ABORT_TASK_SET;
6299 		else if (tm & BIT_2)
6300 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
6301 		else if (tm & BIT_4)
6302 			task->task_mgmt_function = TM_LUN_RESET;
6303 		else if (tm & BIT_5)
6304 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
6305 		else if (tm & BIT_6)
6306 			task->task_mgmt_function = TM_CLEAR_ACA;
6307 		else
6308 			task->task_mgmt_function = TM_ABORT_TASK;
6309 	}
6310 	task->task_max_nbufs = STMF_BUFS_MAX;
6311 	task->task_csn_size = 8;
6312 	task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
6313 	p = task->task_cdb;
6314 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
6315 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
6316 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
6317 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
6318 	if (cdb_size > 16) {
6319 		uint16_t xtra = (uint16_t)(cdb_size - 16);
6320 		uint16_t i;
6321 		uint8_t cb[4];
6322 
6323 		while (xtra) {
6324 			*p++ = *q++;
6325 			xtra--;
6326 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
6327 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
6328 				q = (uint8_t *)qlt->queue_mem_ptr +
6329 				    ATIO_QUEUE_OFFSET;
6330 			}
6331 		}
6332 		for (i = 0; i < 4; i++) {
6333 			cb[i] = *q++;
6334 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
6335 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
6336 				q = (uint8_t *)qlt->queue_mem_ptr +
6337 				    ATIO_QUEUE_OFFSET;
6338 			}
6339 		}
6340 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
6341 		    (((uint32_t)cb[1]) << 16) |
6342 		    (((uint32_t)cb[2]) << 8) | cb[3];
6343 	} else {
6344 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
6345 		    (((uint32_t)q[1]) << 16) |
6346 		    (((uint32_t)q[2]) << 8) | q[3];
6347 	}
6348 
6349 	QMEM_WR32(qlt, atio+0x3c, 0xdeadbeef);
6350 	fct_post_rcvd_cmd(cmd, 0);
6351 }
6352 
6353 static void
qlt_handle_dereg_completion(qlt_state_t * qlt,uint8_t * rsp)6354 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
6355 {
6356 	uint16_t status;
6357 	uint32_t portid;
6358 	uint32_t subcode1, subcode2;
6359 
6360 	status = QMEM_RD16(qlt, rsp+8);
6361 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
6362 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
6363 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
6364 
6365 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6366 		mutex_enter(&qlt->mq_req[0].mq_lock);
6367 	} else {
6368 		mutex_enter(&qlt->preq_lock);
6369 	}
6370 	if (portid != qlt->rp_id_in_dereg) {
6371 		int instance = ddi_get_instance(qlt->dip);
6372 
6373 		EL(qlt, "implicit logout reveived portid = %xh\n", portid);
6374 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
6375 		    " received when driver wasn't waiting for it",
6376 		    instance, portid);
6377 		if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6378 			mutex_exit(&qlt->mq_req[0].mq_lock);
6379 		} else {
6380 			mutex_exit(&qlt->preq_lock);
6381 		}
6382 		return;
6383 	}
6384 
6385 	if (status != 0) {
6386 		EL(qlt, "implicit logout completed for %xh with status %xh, "
6387 		    "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
6388 		    subcode2);
6389 		if (status == 0x31 && subcode1 == 0x0a) {
6390 			qlt->rp_dereg_status = FCT_SUCCESS;
6391 		} else {
6392 			EL(qlt, "implicit logout portid=%xh, status=%xh, "
6393 			    "subcode1=%xh, subcode2=%xh\n", portid, status,
6394 			    subcode1, subcode2);
6395 			qlt->rp_dereg_status =
6396 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
6397 		}
6398 	} else {
6399 		qlt->rp_dereg_status = FCT_SUCCESS;
6400 	}
6401 	cv_signal(&qlt->rp_dereg_cv);
6402 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6403 		mutex_exit(&qlt->mq_req[0].mq_lock);
6404 	} else {
6405 		mutex_exit(&qlt->preq_lock);
6406 	}
6407 }
6408 
6409 /*
6410  * Note that when an ELS is aborted, the regular or aborted completion
6411  * (if any) gets posted before the abort IOCB comes back on response queue.
6412  */
6413 static void
qlt_handle_unsol_els_completion(qlt_state_t * qlt,uint8_t * rsp)6414 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
6415 {
6416 	char		info[160];
6417 	fct_cmd_t	*cmd;
6418 	qlt_cmd_t	*qcmd;
6419 	uint32_t	hndl;
6420 	uint32_t	subcode1, subcode2;
6421 	uint16_t	status;
6422 	uint8_t		elsop;
6423 
6424 	hndl = QMEM_RD32(qlt, rsp+4);
6425 	status = QMEM_RD16(qlt, rsp+8);
6426 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
6427 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
6428 	elsop = rsp[0x16];
6429 
6430 	if (!CMD_HANDLE_VALID(hndl)) {
6431 		EL(qlt, "handle = %xh\n", hndl);
6432 		/*
6433 		 * This cannot happen for unsol els completion. This can
6434 		 * only happen when abort for an unsol els completes.
6435 		 * This condition indicates a firmware bug.
6436 		 */
6437 		(void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
6438 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
6439 		    hndl, status, subcode1, subcode2, (void *)rsp);
6440 		info[159] = 0;
6441 		(void) fct_port_shutdown(qlt->qlt_port,
6442 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6443 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6444 		return;
6445 	}
6446 
6447 	if (status == 5) {
6448 		/*
6449 		 * When an unsolicited els is aborted, the abort is done
6450 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
6451 		 * and not the abortee. We will do the cleanup when the
6452 		 * IOCB which caused the abort, returns.
6453 		 */
6454 		EL(qlt, "status = %xh\n", status);
6455 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
6456 		return;
6457 	}
6458 
6459 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6460 	if (cmd == NULL) {
6461 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6462 		/*
6463 		 * Now why would this happen ???
6464 		 */
6465 		(void) snprintf(info, 160,
6466 		    "qlt_handle_unsol_els_completion: can not "
6467 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6468 		    (void *)rsp);
6469 		info[159] = 0;
6470 		(void) fct_port_shutdown(qlt->qlt_port,
6471 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6472 
6473 		return;
6474 	}
6475 
6476 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
6477 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6478 	if (qcmd->flags & QLT_CMD_ABORTING) {
6479 		/*
6480 		 * This is the same case as "if (status == 5)" above. The
6481 		 * only difference is that in this case the firmware actually
6482 		 * finished sending the response. So the abort attempt will
6483 		 * come back with status ?. We will handle it there.
6484 		 */
6485 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
6486 		    "abort it");
6487 		return;
6488 	}
6489 
6490 	if (qcmd->dbuf != NULL) {
6491 		qlt_dmem_free(NULL, qcmd->dbuf);
6492 		qcmd->dbuf = NULL;
6493 	}
6494 
6495 	if (status == 0) {
6496 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
6497 
6498 		if ((elsop == ELS_OP_LOGO) &&
6499 		    (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT)) {
6500 			EL(qlt, "reset link since this is LOGO and N2N\n");
6501 			(void) snprintf(info, 80,
6502 			    "qlt_handle_unsol_els_completion: qlt-%p, "
6503 			    "trigger RFLAG_RESET to recover",
6504 			    (void *)qlt);
6505 
6506 			info[79] = 0;
6507 			(void) fct_port_shutdown(qlt->qlt_port,
6508 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6509 			    info);
6510 		}
6511 	} else {
6512 		EL(qlt, "status (0xh) sucode1=%xh subconde2=%xh\n",
6513 		    status, subcode1, subcode2);
6514 		fct_send_response_done(cmd,
6515 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
6516 	}
6517 }
6518 
6519 static void
qlt_handle_unsol_els_abort_completion(qlt_state_t * qlt,uint8_t * rsp)6520 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
6521 {
6522 	char		info[160];
6523 	fct_cmd_t	*cmd;
6524 	qlt_cmd_t	*qcmd;
6525 	uint32_t	hndl;
6526 	uint32_t	subcode1, subcode2;
6527 	uint16_t	status;
6528 
6529 	hndl = QMEM_RD32(qlt, rsp+4);
6530 	status = QMEM_RD16(qlt, rsp+8);
6531 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
6532 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
6533 
6534 	if (!CMD_HANDLE_VALID(hndl)) {
6535 		EL(qlt, "handle = %xh\n", hndl);
6536 		ASSERT(hndl == 0);
6537 		/*
6538 		 * Someone has requested to abort it, but no one is waiting for
6539 		 * this completion.
6540 		 */
6541 		if ((status != 0) && (status != 8)) {
6542 			EL(qlt, "status = %xh\n", status);
6543 			/*
6544 			 * There could be exchange resource leakage, so
6545 			 * throw HBA fatal error event now
6546 			 */
6547 			(void) snprintf(info, 160,
6548 			    "qlt_handle_unsol_els_abort_completion: "
6549 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
6550 			    hndl, status, subcode1, subcode2, (void *)rsp);
6551 			info[159] = 0;
6552 			(void) fct_port_shutdown(qlt->qlt_port,
6553 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6554 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6555 			return;
6556 		}
6557 
6558 		return;
6559 	}
6560 
6561 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6562 	if (cmd == NULL) {
6563 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6564 		/*
6565 		 * Why would this happen ??
6566 		 */
6567 		(void) snprintf(info, 160,
6568 		    "qlt_handle_unsol_els_abort_completion: can not get "
6569 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6570 		    (void *)rsp);
6571 		info[159] = 0;
6572 		(void) fct_port_shutdown(qlt->qlt_port,
6573 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6574 
6575 		return;
6576 	}
6577 
6578 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
6579 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6580 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
6581 
6582 	if (qcmd->dbuf != NULL) {
6583 		qlt_dmem_free(NULL, qcmd->dbuf);
6584 		qcmd->dbuf = NULL;
6585 	}
6586 
6587 	if (status == 0) {
6588 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
6589 	} else if (status == 8) {
6590 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
6591 	} else {
6592 		fct_cmd_fca_aborted(cmd,
6593 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
6594 	}
6595 }
6596 
6597 static void
qlt_handle_sol_els_completion(qlt_state_t * qlt,uint8_t * rsp)6598 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
6599 {
6600 	char		info[160];
6601 	fct_cmd_t	*cmd;
6602 	fct_els_t	*els;
6603 	qlt_cmd_t	*qcmd;
6604 	uint32_t	hndl;
6605 	uint32_t	subcode1, subcode2;
6606 	uint16_t	status;
6607 
6608 	hndl = QMEM_RD32(qlt, rsp+4);
6609 	status = QMEM_RD16(qlt, rsp+8);
6610 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
6611 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
6612 
6613 	if (!CMD_HANDLE_VALID(hndl)) {
6614 		EL(qlt, "handle = %xh\n", hndl);
6615 		/*
6616 		 * This cannot happen for sol els completion.
6617 		 */
6618 		(void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
6619 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
6620 		    hndl, status, subcode1, subcode2, (void *)rsp);
6621 		info[159] = 0;
6622 		(void) fct_port_shutdown(qlt->qlt_port,
6623 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6624 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6625 		return;
6626 	}
6627 
6628 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6629 	if (cmd == NULL) {
6630 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6631 		(void) snprintf(info, 160,
6632 		    "qlt_handle_sol_els_completion: can not "
6633 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6634 		    (void *)rsp);
6635 		info[159] = 0;
6636 		(void) fct_port_shutdown(qlt->qlt_port,
6637 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6638 
6639 		return;
6640 	}
6641 
6642 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
6643 	els = (fct_els_t *)cmd->cmd_specific;
6644 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6645 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
6646 
6647 	if (qcmd->flags & QLT_CMD_ABORTING) {
6648 		/*
6649 		 * We will handle it when the ABORT IO IOCB returns.
6650 		 */
6651 		return;
6652 	}
6653 
6654 	if (qcmd->dbuf != NULL) {
6655 		if (status == 0) {
6656 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
6657 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
6658 			    qcmd->param.resp_offset,
6659 			    els->els_resp_payload, els->els_resp_size);
6660 		}
6661 		qlt_dmem_free(NULL, qcmd->dbuf);
6662 		qcmd->dbuf = NULL;
6663 	}
6664 
6665 	if (status == 0) {
6666 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
6667 	} else {
6668 		fct_send_cmd_done(cmd,
6669 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
6670 	}
6671 }
6672 
6673 static void
qlt_handle_ct_completion(qlt_state_t * qlt,uint8_t * rsp)6674 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
6675 {
6676 	fct_cmd_t	*cmd;
6677 	fct_sol_ct_t	*ct;
6678 	qlt_cmd_t	*qcmd;
6679 	uint32_t	 hndl;
6680 	uint16_t	 status;
6681 	char		 info[160];
6682 
6683 	hndl = QMEM_RD32(qlt, rsp+4);
6684 	status = QMEM_RD16(qlt, rsp+8);
6685 
6686 	if (!CMD_HANDLE_VALID(hndl)) {
6687 		EL(qlt, "handle = %xh\n", hndl);
6688 		/*
6689 		 * Solicited commands will always have a valid handle.
6690 		 */
6691 		(void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
6692 		    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
6693 		info[159] = 0;
6694 		(void) fct_port_shutdown(qlt->qlt_port,
6695 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6696 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6697 		return;
6698 	}
6699 
6700 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6701 	EL(qlt, "cmd=%ph hndl=%xh status=%xh\n", cmd, hndl, status);
6702 	if (cmd == NULL) {
6703 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6704 		(void) snprintf(info, 160,
6705 		    "qlt_handle_ct_completion: cannot find "
6706 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6707 		    (void *)rsp);
6708 		info[159] = 0;
6709 		(void) fct_port_shutdown(qlt->qlt_port,
6710 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6711 
6712 		return;
6713 	}
6714 
6715 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
6716 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6717 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
6718 
6719 	if (qcmd->flags & QLT_CMD_ABORTING) {
6720 		/*
6721 		 * We will handle it when ABORT IO IOCB returns;
6722 		 */
6723 		return;
6724 	}
6725 
6726 	ASSERT(qcmd->dbuf);
6727 	if ((status == 0) || (status == 0x15)) {
6728 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
6729 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
6730 		    qcmd->param.resp_offset,
6731 		    ct->ct_resp_payload, ct->ct_resp_size);
6732 	}
6733 	qlt_dmem_free(NULL, qcmd->dbuf);
6734 	qcmd->dbuf = NULL;
6735 
6736 	if ((status == 0) || (status == 0x15)) {
6737 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
6738 	} else {
6739 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
6740 	}
6741 }
6742 
6743 static fct_status_t
qlt_verify_resp_entry(qlt_state_t * qlt,uint8_t * rsp,uint16_t qi)6744 qlt_verify_resp_entry(qlt_state_t *qlt, uint8_t *rsp, uint16_t qi)
6745 {
6746 	uint32_t sig;
6747 	int i;
6748 	char info[160];
6749 
6750 	sig = QMEM_RD32_RSPQ(qlt, qi, rsp+0x3c);
6751 	for (i = 0; ((sig == 0xdeadbeef) &&
6752 	    (i < qlt_reprocess_attempt_cnt)); i++) {
6753 		(void) ddi_dma_sync(
6754 		    qlt->mq_resp[qi].queue_mem_mq_dma_handle,
6755 		    (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
6756 		    IOCB_SIZE, DDI_DMA_SYNC_FORCPU);
6757 
6758 		qlt->qlt_resp_reproc_cnt++;
6759 		drv_usecwait(qlt_reprocess_delay);
6760 		sig = QMEM_RD32_RSPQ(qlt, qi, rsp+0x3c);
6761 	}
6762 
6763 	if (i) {
6764 		if (i >= qlt_reprocess_attempt_cnt) {
6765 			EL(qlt, "resp entry reprocess failed, %x\n",
6766 			    qlt->qlt_resp_reproc_cnt);
6767 			cmn_err(CE_WARN, "qlt%d: resp entry reprocess"
6768 			    " failed %x\n",
6769 			    qlt->instance, qlt->qlt_resp_reproc_cnt);
6770 			(void) snprintf(info, 160,
6771 			    "qlt_handle_ctio_completion: resp entry reprocess"
6772 			    " failed, %x rsp-%p",
6773 			    qlt->qlt_resp_reproc_cnt, (void *)rsp);
6774 			info[159] = 0;
6775 			(void) fct_port_shutdown(qlt->qlt_port,
6776 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6777 			    info);
6778 			return (QLT_FAILURE);
6779 		} else {
6780 			EL(qlt, "resp entry reprocess succeeded, %x %x\n",
6781 			    i, qlt->qlt_resp_reproc_cnt);
6782 		}
6783 	}
6784 
6785 	return (QLT_SUCCESS);
6786 }
6787 
6788 static void
qlt_handle_ctio_completion(qlt_state_t * qlt,uint8_t * rsp,uint16_t qi)6789 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp, uint16_t qi)
6790 {
6791 	fct_cmd_t	*cmd;
6792 	scsi_task_t	*task;
6793 	qlt_cmd_t	*qcmd;
6794 	stmf_data_buf_t	*dbuf;
6795 	fct_status_t	fc_st;
6796 	uint32_t	iof = 0;
6797 	uint32_t	hndl;
6798 	uint32_t	rex1;
6799 	uint16_t	oxid;
6800 	uint16_t	status;
6801 	uint16_t	flags;
6802 	uint8_t		abort_req;
6803 	uint8_t		n;
6804 	char		info[160];
6805 
6806 	if (qlt_verify_resp_entry(qlt, rsp, qi) != QLT_SUCCESS)
6807 		return;
6808 
6809 	/* write a deadbeef in the last 4 bytes of the IOCB */
6810 	QMEM_WR32_RSPQ(qlt, qi, rsp+0x3c, 0xdeadbeef);
6811 
6812 	/* XXX: Check validity of the IOCB by checking 4th byte. */
6813 	hndl = QMEM_RD32_RSPQ(qlt, qi, rsp+4);
6814 	status = QMEM_RD16_RSPQ(qlt, qi, rsp+8);
6815 	flags = QMEM_RD16_RSPQ(qlt, qi, rsp+0x1a);
6816 	oxid = QMEM_RD16_RSPQ(qlt, qi, rsp+0x20);
6817 	rex1 = QMEM_RD32_RSPQ(qlt, qi, rsp+0x14);
6818 	n = rsp[2];
6819 
6820 	if (!CMD_HANDLE_VALID(hndl)) {
6821 		EL(qlt, "handle = %xh\n", hndl);
6822 		ASSERT(hndl == 0);
6823 		/*
6824 		 * Someone has requested to abort it, but no one is waiting for
6825 		 * this completion.
6826 		 */
6827 		EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
6828 		    (void *)rsp);
6829 		if ((status != 1) && (status != 2)) {
6830 			EL(qlt, "status = %xh\n", status);
6831 			if (status == 0x29) {
6832 				uint8_t		*req;
6833 
6834 				/*
6835 				 * The qlt port received an ATIO request from
6836 				 * remote port before it issued a plogi.
6837 				 * The qlt fw returned the CTIO completion
6838 				 * status 0x29 to inform driver to do cleanup
6839 				 * (terminate the IO exchange). The subsequent
6840 				 * ABTS from the initiator can be handled
6841 				 * cleanly.
6842 				 */
6843 				qi = 0;
6844 				mutex_enter(&qlt->mq_req[qi].mq_lock);
6845 				req = (uint8_t *)
6846 				    qlt_get_req_entries(qlt, 1, qi);
6847 
6848 				if (req == NULL) {
6849 					EL(qlt, "No reqq entry available to "
6850 					    "termi exchg\n");
6851 					mutex_exit(&qlt->mq_req[qi].mq_lock);
6852 
6853 					(void) snprintf(info, 160,
6854 					    "qlt_handle_ctio_completion: no "
6855 					    "reqq entry available, status-%x,"
6856 					    "rsp-%p", status, (void *)rsp);
6857 
6858 					info[159] = 0;
6859 
6860 					(void) fct_port_shutdown(qlt->qlt_port,
6861 					    STMF_RFLAG_FATAL_ERROR |
6862 					    STMF_RFLAG_RESET,
6863 					    info);
6864 
6865 					return;
6866 				}
6867 
6868 				flags &= 0x1E00;
6869 				flags |= BIT_14;
6870 
6871 				bzero(req, IOCB_SIZE);
6872 				req[0] = 0x12;
6873 				req[1] = 0x1;
6874 
6875 				QMEM_WR32(qlt, req+4, 0);
6876 				QMEM_WR16(qlt, req+8, 0xFFFF);
6877 				QMEM_WR16(qlt, req+10, 60);
6878 				QMEM_WR32(qlt, req+0x14, rex1);
6879 				QMEM_WR16(qlt, req+0x1A, flags);
6880 				QMEM_WR16(qlt, req+0x20, oxid);
6881 
6882 				EL(qlt, "Termi exchg (%Xh)(%Xh)(%Xh) "
6883 				    "rphdl=0xFFFF\n", rex1, flags, oxid);
6884 
6885 				qlt_submit_req_entries(qlt, 1, qi);
6886 				mutex_exit(&qlt->mq_req[qi].mq_lock);
6887 			} else {
6888 				/*
6889 				 * There could be exchange resource leakage,
6890 				 * so throw HBA fatal error event now
6891 				 */
6892 				(void) snprintf(info, 160,
6893 				    "qlt_handle_ctio_completion: hndl-%x, "
6894 				    "status-%x, rsp-%p", hndl, status,
6895 				    (void *)rsp);
6896 
6897 				info[159] = 0;
6898 
6899 				(void) fct_port_shutdown(qlt->qlt_port,
6900 				    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6901 				    info);
6902 			}
6903 		}
6904 
6905 		return;
6906 	}
6907 
6908 	if (flags & BIT_14) {
6909 		abort_req = 1;
6910 		EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
6911 		    (void *)rsp);
6912 	} else {
6913 		abort_req = 0;
6914 	}
6915 
6916 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6917 	if (cmd == NULL) {
6918 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
6919 		(void) snprintf(info, 160,
6920 		    "qlt_handle_ctio_completion: cannot find "
6921 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
6922 		    (void *)rsp);
6923 		info[159] = 0;
6924 		(void) fct_port_shutdown(qlt->qlt_port,
6925 		    /* STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); */
6926 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6927 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6928 
6929 		return;
6930 	}
6931 
6932 	task = (scsi_task_t *)cmd->cmd_specific;
6933 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
6934 	if (qcmd->dbuf_rsp_iu) {
6935 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
6936 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
6937 		qcmd->dbuf_rsp_iu = NULL;
6938 	}
6939 
6940 	if ((status == 1) || (status == 2)) {
6941 		if (abort_req) {
6942 			fc_st = FCT_ABORT_SUCCESS;
6943 			iof = FCT_IOF_FCA_DONE;
6944 		} else {
6945 			fc_st = FCT_SUCCESS;
6946 			if (flags & BIT_15) {
6947 				iof = FCT_IOF_FCA_DONE;
6948 			}
6949 		}
6950 	} else {
6951 		EL(qlt, "status = %xh\n", status);
6952 		if ((status == 8) && abort_req) {
6953 			fc_st = FCT_NOT_FOUND;
6954 			iof = FCT_IOF_FCA_DONE;
6955 		} else {
6956 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
6957 		}
6958 	}
6959 	dbuf = NULL;
6960 	if (((n & BIT_7) == 0) && (!abort_req)) {
6961 		/* A completion of data xfer */
6962 		if (n == 0) {
6963 			dbuf = qcmd->dbuf;
6964 		} else {
6965 			dbuf = stmf_handle_to_buf(task, n);
6966 		}
6967 
6968 		ASSERT(dbuf != NULL);
6969 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
6970 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
6971 		if (flags & BIT_15) {
6972 			dbuf->db_flags = (uint16_t)(dbuf->db_flags |
6973 			    DB_STATUS_GOOD_SENT);
6974 		}
6975 
6976 		dbuf->db_xfer_status = fc_st;
6977 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
6978 		return;
6979 	}
6980 	if (!abort_req) {
6981 		/*
6982 		 * This was just a pure status xfer.
6983 		 */
6984 		fct_send_response_done(cmd, fc_st, iof);
6985 		return;
6986 	}
6987 
6988 	fct_cmd_fca_aborted(cmd, fc_st, iof);
6989 
6990 	EL(qlt, "(%d) (%p)(%xh,%xh),%x %x %x\n",
6991 	    qi, cmd, cmd->cmd_oxid, cmd->cmd_rxid,
6992 	    cmd->cmd_handle, qcmd->fw_xchg_addr,
6993 	    fc_st);
6994 }
6995 
6996 static void
qlt_handle_sol_abort_completion(qlt_state_t * qlt,uint8_t * rsp)6997 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
6998 {
6999 	char		info[80];
7000 	fct_cmd_t	*cmd;
7001 	qlt_cmd_t	*qcmd;
7002 	uint32_t	h;
7003 	uint16_t	status;
7004 
7005 	h = QMEM_RD32(qlt, rsp+4);
7006 	status = QMEM_RD16(qlt, rsp+8);
7007 
7008 	if (!CMD_HANDLE_VALID(h)) {
7009 		EL(qlt, "handle = %xh\n", h);
7010 		/*
7011 		 * Solicited commands always have a valid handle.
7012 		 */
7013 		(void) snprintf(info, 80,
7014 		    "qlt_handle_sol_abort_completion: hndl-"
7015 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
7016 		info[79] = 0;
7017 		(void) fct_port_shutdown(qlt->qlt_port,
7018 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
7019 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
7020 		return;
7021 	}
7022 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
7023 	if (cmd == NULL) {
7024 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
7025 		/*
7026 		 * What happened to the cmd ??
7027 		 */
7028 		(void) snprintf(info, 80,
7029 		    "qlt_handle_sol_abort_completion: cannot "
7030 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
7031 		    (void *)rsp);
7032 		info[79] = 0;
7033 		(void) fct_port_shutdown(qlt->qlt_port,
7034 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
7035 
7036 		return;
7037 	}
7038 
7039 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
7040 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
7041 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7042 	if (qcmd->dbuf != NULL) {
7043 		qlt_dmem_free(NULL, qcmd->dbuf);
7044 		qcmd->dbuf = NULL;
7045 	}
7046 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
7047 	EL(qlt, "status=%xh\n", status);
7048 	if (status == 0) {
7049 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
7050 	} else if (status == 0x31) {
7051 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
7052 	} else {
7053 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
7054 	}
7055 }
7056 
7057 static void
qlt_handle_rcvd_abts(qlt_state_t * qlt,uint8_t * resp,uint16_t qi)7058 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp, uint16_t qi)
7059 {
7060 	qlt_abts_cmd_t	*qcmd;
7061 	fct_cmd_t	*cmd;
7062 	uint32_t	remote_portid;
7063 	uint32_t	rex1;
7064 	uint32_t	rex2;
7065 	char		info[160];
7066 
7067 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
7068 	    ((uint32_t)(resp[0x1A])) << 16;
7069 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
7070 	    sizeof (qlt_abts_cmd_t), 0);
7071 	if (cmd == NULL) {
7072 		EL(qlt, "fct_alloc cmd==NULL\n");
7073 		(void) snprintf(info, 160,
7074 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
7075 		    "allocate space for fct_cmd", (void *)qlt);
7076 		info[159] = 0;
7077 		(void) fct_port_shutdown(qlt->qlt_port,
7078 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
7079 		return;
7080 	}
7081 
7082 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
7083 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
7084 	qcmd->qid = qi;
7085 	bcopy(resp, qcmd->buf, IOCB_SIZE);
7086 	cmd->cmd_port = qlt->qlt_port;
7087 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
7088 	if (cmd->cmd_rp_handle == 0xFFFF)
7089 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
7090 
7091 	cmd->cmd_rportid = remote_portid;
7092 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
7093 	    ((uint32_t)(resp[0x16])) << 16;
7094 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
7095 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
7096 
7097 	rex1 = QMEM_RD32(qlt, resp+0x10);
7098 	rex2 = QMEM_RD32(qlt, resp+0x3C);
7099 
7100 	EL(qlt, "(%d)(%xh %xh) (%xh)(%p) (%xh %xh) (%x)\n",
7101 	    qi, cmd->cmd_oxid, cmd->cmd_rxid, remote_portid,
7102 	    cmd, rex1, rex2, cmd->cmd_handle);
7103 
7104 	fct_post_rcvd_cmd(cmd, 0);
7105 }
7106 
7107 static void
qlt_handle_abts_completion(qlt_state_t * qlt,uint8_t * resp,uint16_t qi)7108 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp, uint16_t qi)
7109 {
7110 	uint16_t status;
7111 	char	info[80];
7112 
7113 	status = QMEM_RD16(qlt, resp+8);
7114 
7115 	if ((status == 0) || (status == 5)) {
7116 		EL(qlt, "qi(%d) status =%xh,(%xh %xh)\n",
7117 		    qi, status, QMEM_RD16(qlt, resp+0x26),
7118 		    QMEM_RD16(qlt, resp+0x24));
7119 		return;
7120 	}
7121 
7122 	EL(qlt, "ABTS status=%x/%x/%x resp_off %x",
7123 	    status, QMEM_RD32(qlt, resp+0x34),
7124 	    QMEM_RD32(qlt, resp+0x38),
7125 	    ((uint32_t)(qlt->mq_resp[0].mq_ndx_to_fw)) << 6);
7126 
7127 	(void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
7128 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
7129 	    ((uint32_t)(qlt->mq_resp[0].mq_ndx_to_fw)) << 6);
7130 	info[79] = 0;
7131 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
7132 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
7133 }
7134 
7135 #ifdef	DEBUG
7136 uint32_t qlt_drop_abort_counter = 0;
7137 #endif
7138 
7139 fct_status_t
qlt_abort_cmd(struct fct_local_port * port,fct_cmd_t * cmd,uint32_t flags)7140 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
7141 {
7142 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
7143 
7144 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
7145 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
7146 		return (FCT_NOT_FOUND);
7147 	}
7148 
7149 #ifdef DEBUG
7150 	if (qlt_drop_abort_counter > 0) {
7151 		if (atomic_dec_32_nv(&qlt_drop_abort_counter) == 1)
7152 			return (FCT_SUCCESS);
7153 	}
7154 #endif
7155 
7156 	EL(qlt, "cmd_type = %x\n", cmd->cmd_type);
7157 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
7158 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
7159 	}
7160 
7161 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
7162 		cmd->cmd_handle = 0;
7163 	}
7164 
7165 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
7166 		/* this is retried ABTS, terminate it now */
7167 		return (qlt_send_abts_response(qlt, cmd, 1));
7168 	}
7169 
7170 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
7171 		return (qlt_abort_purex(qlt, cmd));
7172 	}
7173 
7174 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
7175 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
7176 		return (qlt_abort_sol_cmd(qlt, cmd));
7177 	}
7178 	EL(qlt, "cmd->cmd_type = %x\n", cmd->cmd_type);
7179 
7180 	ASSERT(0);
7181 	return (FCT_FAILURE);
7182 }
7183 
7184 fct_status_t
qlt_abort_sol_cmd(qlt_state_t * qlt,fct_cmd_t * cmd)7185 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
7186 {
7187 	uint8_t *req;
7188 	qlt_cmd_t *qcmd;
7189 	uint16_t qi;
7190 
7191 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7192 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
7193 	qi = qcmd->qid;
7194 
7195 	EL(qlt, "fctcmd-%p, cmd_handle-%xh rportid=%xh\n",
7196 	    cmd, cmd->cmd_handle, cmd->cmd_rportid);
7197 
7198 	mutex_enter(&qlt->mq_req[qi].mq_lock);
7199 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7200 	if (req == NULL) {
7201 		EL(qlt, "req == NULL\n");
7202 		mutex_exit(&qlt->mq_req[qi].mq_lock);
7203 
7204 		return (FCT_BUSY);
7205 	}
7206 	bzero(req, IOCB_SIZE);
7207 	req[0] = 0x33; req[1] = 1;
7208 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
7209 	if (cmd->cmd_rp) {
7210 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
7211 	} else {
7212 		QMEM_WR16(qlt, req+8, 0xFFFF);
7213 	}
7214 
7215 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
7216 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
7217 	qlt_submit_req_entries(qlt, 1, qi);
7218 	mutex_exit(&qlt->mq_req[qi].mq_lock);
7219 
7220 	return (FCT_SUCCESS);
7221 }
7222 
7223 fct_status_t
qlt_abort_purex(qlt_state_t * qlt,fct_cmd_t * cmd)7224 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
7225 {
7226 	uint8_t *req;
7227 	qlt_cmd_t *qcmd;
7228 	fct_els_t *els;
7229 	uint8_t elsop, req1f;
7230 	uint16_t qi;
7231 
7232 	els = (fct_els_t *)cmd->cmd_specific;
7233 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7234 	qi = qcmd->qid;
7235 	elsop = els->els_req_payload[0];
7236 	EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd,
7237 	    cmd->cmd_handle, elsop);
7238 	req1f = 0x60;	/* Terminate xchg */
7239 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
7240 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
7241 		req1f = (uint8_t)(req1f | BIT_4);
7242 	}
7243 
7244 	mutex_enter(&qlt->mq_req[qi].mq_lock);
7245 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7246 	if (req == NULL) {
7247 		EL(qlt, "req == NULL\n");
7248 		mutex_exit(&qlt->mq_req[qi].mq_lock);
7249 		return (FCT_BUSY);
7250 	}
7251 
7252 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
7253 	bzero(req, IOCB_SIZE);
7254 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
7255 	req[0x16] = elsop; req[0x1f] = req1f;
7256 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
7257 	if (cmd->cmd_rp) {
7258 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
7259 		EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
7260 	} else {
7261 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
7262 		EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
7263 	}
7264 
7265 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
7266 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
7267 	qlt_submit_req_entries(qlt, 1, qi);
7268 	mutex_exit(&qlt->mq_req[qi].mq_lock);
7269 
7270 	return (FCT_SUCCESS);
7271 }
7272 
7273 fct_status_t
qlt_abort_unsol_scsi_cmd(qlt_state_t * qlt,fct_cmd_t * cmd)7274 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
7275 {
7276 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7277 	uint8_t *req;
7278 	uint16_t flags;
7279 	uint16_t qi;
7280 
7281 	qi = qcmd->qid;
7282 
7283 	flags = (uint16_t)(BIT_14 |
7284 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
7285 
7286 	EL(qlt, "(%d) (%x) (%p) (%x)\n", qi, cmd->cmd_oxid,
7287 	    cmd, qcmd->fw_xchg_addr);
7288 
7289 	mutex_enter(&qlt->mq_req[qi].mq_lock);
7290 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7291 	if (req == NULL) {
7292 		EL(qlt, "req == NULL\n");
7293 		mutex_exit(&qlt->mq_req[qi].mq_lock);
7294 		return (FCT_BUSY);
7295 	}
7296 
7297 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
7298 	bzero(req, IOCB_SIZE);
7299 	req[0] = 0x12; req[1] = 0x1;
7300 	QMEM_WR32_REQ(qlt, qi, req+4, cmd->cmd_handle);
7301 	QMEM_WR16_REQ(qlt, qi, req+8, cmd->cmd_rp->rp_handle);
7302 	QMEM_WR16_REQ(qlt, qi, req+10, 60);	/* 60 seconds timeout */
7303 	QMEM_WR32_REQ(qlt, qi, req+0x10, cmd->cmd_rportid);
7304 	QMEM_WR32_REQ(qlt, qi, req+0x14, qcmd->fw_xchg_addr);
7305 	QMEM_WR16_REQ(qlt, qi, req+0x1A, flags);
7306 	QMEM_WR16_REQ(qlt, qi, req+0x20, cmd->cmd_oxid);
7307 	qlt_submit_req_entries(qlt, 1, qi);
7308 	mutex_exit(&qlt->mq_req[qi].mq_lock);
7309 
7310 	return (FCT_SUCCESS);
7311 }
7312 
7313 fct_status_t
qlt_send_cmd(fct_cmd_t * cmd)7314 qlt_send_cmd(fct_cmd_t *cmd)
7315 {
7316 	qlt_state_t *qlt;
7317 
7318 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
7319 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
7320 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
7321 		return (qlt_send_els(qlt, cmd));
7322 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
7323 		return (qlt_send_ct(qlt, cmd));
7324 	}
7325 	EL(qlt, "Unknown cmd->cmd_type = %xh\n", cmd->cmd_type);
7326 
7327 	ASSERT(0);
7328 	return (FCT_FAILURE);
7329 }
7330 
7331 fct_status_t
qlt_send_els(qlt_state_t * qlt,fct_cmd_t * cmd)7332 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
7333 {
7334 	uint8_t *req;
7335 	fct_els_t *els;
7336 	qlt_cmd_t *qcmd;
7337 	stmf_data_buf_t *buf;
7338 	qlt_dmem_bctl_t *bctl;
7339 	uint32_t sz, minsz;
7340 	uint16_t qi;
7341 
7342 	qi = 0;
7343 
7344 	els = (fct_els_t *)cmd->cmd_specific;
7345 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7346 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
7347 	qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
7348 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
7349 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
7350 	if (buf == NULL) {
7351 		return (FCT_BUSY);
7352 	}
7353 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
7354 
7355 	qcmd->dbuf = buf;
7356 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
7357 	    els->els_req_size);
7358 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
7359 
7360 	mutex_enter(&qlt->mq_req[qi].mq_lock);
7361 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7362 	if (req == NULL) {
7363 		EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
7364 		    cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
7365 		qlt_dmem_free(NULL, buf);
7366 		mutex_exit(&qlt->mq_req[qi].mq_lock);
7367 		return (FCT_BUSY);
7368 	}
7369 	bzero(req, IOCB_SIZE);
7370 	req[0] = 0x53; req[1] = 1;
7371 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
7372 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
7373 	QMEM_WR16(qlt, (&req[0xC]), 1);
7374 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
7375 	QMEM_WR16(qlt, (&req[0x14]), 1);
7376 	req[0x16] = els->els_req_payload[0];
7377 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
7378 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
7379 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
7380 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
7381 	}
7382 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
7383 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
7384 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
7385 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
7386 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
7387 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
7388 	    qcmd->param.resp_offset));
7389 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
7390 
7391 	EL(qlt, "ELS opcode %xh to %xh\n",
7392 	    req[0x16], cmd->cmd_rp->rp_id);
7393 
7394 	qlt_submit_req_entries(qlt, 1, qi);
7395 	mutex_exit(&qlt->mq_req[qi].mq_lock);
7396 
7397 	return (FCT_SUCCESS);
7398 }
7399 
7400 fct_status_t
qlt_send_ct(qlt_state_t * qlt,fct_cmd_t * cmd)7401 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
7402 {
7403 	uint8_t *req;
7404 	fct_sol_ct_t *ct;
7405 	qlt_cmd_t *qcmd;
7406 	stmf_data_buf_t *buf;
7407 	qlt_dmem_bctl_t *bctl;
7408 	uint32_t sz, minsz;
7409 	uint16_t qi;
7410 
7411 	qi =  0;
7412 
7413 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
7414 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7415 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
7416 	qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
7417 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
7418 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
7419 	if (buf == NULL) {
7420 		return (FCT_BUSY);
7421 	}
7422 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
7423 
7424 	qcmd->dbuf = buf;
7425 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
7426 	    ct->ct_req_size);
7427 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
7428 
7429 	mutex_enter(&qlt->mq_req[qi].mq_lock);
7430 	req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7431 	if (req == NULL) {
7432 		EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
7433 		    cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
7434 		qlt_dmem_free(NULL, buf);
7435 		mutex_exit(&qlt->mq_req[qi].mq_lock);
7436 		return (FCT_BUSY);
7437 	}
7438 	bzero(req, IOCB_SIZE);
7439 	req[0] = 0x29; req[1] = 1;
7440 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
7441 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
7442 	QMEM_WR16(qlt, (&req[0xC]), 1);
7443 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
7444 	QMEM_WR16(qlt, (&req[0x14]), 1);
7445 
7446 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
7447 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
7448 
7449 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
7450 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
7451 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
7452 	    qcmd->param.resp_offset));		/* RESPONSE DSD */
7453 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
7454 
7455 	EL(qlt, "%p cmd_hdl=%xh %xh %xh\n",
7456 	    cmd, cmd->cmd_handle, ct->ct_req_size, ct->ct_resp_size);
7457 
7458 	qlt_submit_req_entries(qlt, 1, qi);
7459 	mutex_exit(&qlt->mq_req[qi].mq_lock);
7460 
7461 	return (FCT_SUCCESS);
7462 }
7463 
7464 /*ARGSUSED*/
7465 caddr_t
qlt_str_ptr(qlt_state_t * qlt,caddr_t bp,uint32_t * len)7466 qlt_str_ptr(qlt_state_t *qlt, caddr_t bp, uint32_t *len)
7467 {
7468 	caddr_t sp;
7469 	uint32_t i = 0;
7470 
7471 	sp = bp;
7472 	while (*sp++ != 0) i++;
7473 	if (i > *len || !(*len -= i)) {
7474 		EL(qlt, "full buffer\n");
7475 		return (NULL);
7476 	}
7477 	return (bp += i);
7478 }
7479 
7480 static fct_status_t
qlt_27xx_fw_dump(fct_local_port_t * port,stmf_state_change_info_t * ssci)7481 qlt_27xx_fw_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
7482 {
7483 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
7484 	qlt_dmp_template_t *template_buff;
7485 	uint32_t tsize, dsize, len;
7486 	uint32_t cnt, *dp, *bp;
7487 	uint8_t *fw;
7488 	caddr_t	sp;
7489 
7490 	EL(qlt, "enter...\n");
7491 
7492 	mutex_enter(&qlt->qlt_ioctl_lock);
7493 	/*
7494 	 * To make sure that there's no outstanding dumping task
7495 	 */
7496 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
7497 		mutex_exit(&qlt->qlt_ioctl_lock);
7498 		EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
7499 		    qlt->qlt_ioctl_flags);
7500 		return (FCT_FAILURE);
7501 	}
7502 
7503 	/*
7504 	 * To make sure not to overwrite existing dump
7505 	 */
7506 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
7507 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
7508 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
7509 		/*
7510 		 * If we have already one dump, but it's not triggered by user
7511 		 * and the user hasn't fetched it, we shouldn't dump again.
7512 		 * But if qlt force a fw dump, then we need to overwrite the
7513 		 * previous one anyway.
7514 		 */
7515 		mutex_exit(&qlt->qlt_ioctl_lock);
7516 		EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
7517 		    qlt->qlt_ioctl_flags);
7518 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
7519 		    "is one already outstanding.", qlt->instance);
7520 		return (FCT_FAILURE);
7521 	}
7522 
7523 	if (qlt->dmp_template_addr == NULL) {
7524 		mutex_exit(&qlt->qlt_ioctl_lock);
7525 		EL(qlt, "dmp_template_addr is NULL, can't "
7526 		    "perform firmware dump\n");
7527 		cmn_err(CE_WARN, "!qlt(%d) dmp_template_addr is NULL, can't "
7528 		    "perform firmware dump", qlt->instance);
7529 		return (FCT_FAILURE);
7530 	}
7531 
7532 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
7533 	if (ssci != NULL && (ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
7534 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
7535 	} else {
7536 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
7537 	}
7538 	mutex_exit(&qlt->qlt_ioctl_lock);
7539 
7540 	template_buff = (qlt_dmp_template_t *)qlt->dmp_template_addr;
7541 	tsize = template_buff->hdr.size_of_template;
7542 
7543 	if (qlt->fw_bin_dump_size == 0) {
7544 		qlt->fw_bin_dump_buf = kmem_zalloc(tsize, KM_NOSLEEP);
7545 		if (qlt->fw_bin_dump_buf == NULL) {
7546 			cmn_err(CE_WARN, "!qlt(%d) cannot alloc bin dump buf",
7547 			    qlt->instance);
7548 			return (FCT_FAILURE);
7549 		}
7550 		cnt = (uint32_t)(tsize / sizeof (uint32_t));
7551 		dp = (uint32_t *)qlt->fw_bin_dump_buf;
7552 		bp = (uint32_t *)&template_buff->hdr;
7553 		while (cnt--) {
7554 			*dp++ = ddi_get32(qlt->dmp_template_acc_handle, bp++);
7555 		}
7556 		qlt->fw_bin_dump_size = qlt_27xx_dmp_parse_template(qlt,
7557 		    (qlt_dt_hdr_t *)qlt->fw_bin_dump_buf, NULL, 0);
7558 		kmem_free(qlt->fw_bin_dump_buf, tsize);
7559 		qlt->fw_bin_dump_buf = NULL;
7560 
7561 		if (qlt->fw_bin_dump_size == 0) {
7562 			return (FCT_FAILURE);
7563 		}
7564 
7565 		/*
7566 		 * Determine ascii dump file size
7567 		 * 2 ascii bytes per binary byte + a space and
7568 		 * a newline every 16 binary bytes
7569 		 */
7570 		qlt->fw_ascii_dump_size = qlt->fw_bin_dump_size << 1;
7571 		qlt->fw_ascii_dump_size += qlt->fw_bin_dump_size;
7572 		qlt->fw_ascii_dump_size += qlt->fw_bin_dump_size / 16 + 1;
7573 
7574 		EL(qlt, "fw_bin_dump_size=%xh, "
7575 		    "fw_acsii_dump_size=%xh\n", qlt->fw_bin_dump_size,
7576 		    qlt->fw_ascii_dump_size);
7577 	}
7578 
7579 	if (qlt->fw_bin_dump_buf != NULL) {
7580 		/* overwrite the previous fw dump by qlt forced fw dump */
7581 		bzero((void *) qlt->fw_bin_dump_buf, qlt->fw_bin_dump_size);
7582 	} else {
7583 		qlt->fw_bin_dump_buf = kmem_zalloc(qlt->fw_bin_dump_size,
7584 		    KM_NOSLEEP);
7585 		if (qlt->fw_bin_dump_buf == NULL) {
7586 			qlt->fw_bin_dump_size = 0;
7587 			EL(qlt, "done, failed alloc bin dump buf\n");
7588 			return (FCT_FAILURE);
7589 		}
7590 	}
7591 
7592 	if ((qlt->fw_dump_size != 0) &&
7593 	    (qlt->fw_dump_size != qlt->fw_ascii_dump_size)) {
7594 		if (qlt->qlt_fwdump_buf != NULL) {
7595 			/* Release previously allocated buffer */
7596 			kmem_free(qlt->qlt_fwdump_buf, qlt->fw_dump_size);
7597 			qlt->qlt_fwdump_buf = NULL;
7598 		}
7599 	}
7600 
7601 	if (qlt->qlt_fwdump_buf == NULL) {
7602 		qlt->qlt_fwdump_buf = kmem_zalloc(qlt->fw_ascii_dump_size,
7603 		    KM_NOSLEEP);
7604 		if (qlt->qlt_fwdump_buf == NULL) {
7605 			EL(qlt, "done, failed alloc ascii fw dump buf\n");
7606 			return (FCT_FAILURE);
7607 		}
7608 		qlt->fw_dump_size = qlt->fw_ascii_dump_size;
7609 	}
7610 
7611 	/* Disable ISP interrupts. */
7612 	REG_WR32(qlt, 0xc, 0);
7613 
7614 	cnt = (uint32_t)(tsize / sizeof (uint32_t));
7615 	dp = (uint32_t *)qlt->fw_bin_dump_buf;
7616 	bp = (uint32_t *)&template_buff->hdr;
7617 	while (cnt--) {
7618 		*dp++ = ddi_get32(qlt->dmp_template_acc_handle, bp++);
7619 	}
7620 
7621 	(void) qlt_27xx_dmp_parse_template(qlt,
7622 	    (qlt_dt_hdr_t *)qlt->fw_bin_dump_buf,
7623 	    (uint8_t *)dp, qlt->fw_bin_dump_size);
7624 
7625 #ifdef _BIG_ENDIAN
7626 	cnt = (uint32_t)(tsize / sizeof (uint32_t));
7627 	dp = (uint32_t *)qlt->fw_bin_dump_buf;
7628 	while (cnt--) {
7629 		qlt_chg_endian((uint8_t *)dp, 4);
7630 		dp++;
7631 	}
7632 #endif
7633 
7634 	/*
7635 	 * Build ascii dump
7636 	 */
7637 	len = qlt->fw_ascii_dump_size;
7638 	dsize = qlt->fw_bin_dump_size;
7639 	fw = (uint8_t *)qlt->fw_bin_dump_buf;
7640 	sp = qlt->qlt_fwdump_buf;
7641 
7642 	EL(qlt, "fw_dump_buffer=%ph, fw=%ph, fw_ascii_dump_size=%xh, "
7643 	    "dsize=%xh\n", (void *)qlt->qlt_fwdump_buf, (void *)fw,
7644 	    len, dsize);
7645 
7646 	/*
7647 	 * 2 ascii bytes per binary byte + a space and
7648 	 * a newline every 16 binary bytes
7649 	 */
7650 	cnt = 0;
7651 	while (cnt < dsize) {
7652 		(void) snprintf(sp, len, "%02x ", *fw++);
7653 		if ((sp = qlt_str_ptr(qlt, sp, &len)) == NULL) {
7654 			break;
7655 		}
7656 		if (++cnt % 16 == 0) {
7657 			(void) snprintf(sp, len, "\n");
7658 			if ((sp = qlt_str_ptr(qlt, sp, &len)) == NULL) {
7659 				break;
7660 			}
7661 		}
7662 	}
7663 	if (cnt % 16 != 0) {
7664 		(void) snprintf(sp, len, "\n");
7665 		sp = qlt_str_ptr(qlt, sp, &len);
7666 	}
7667 
7668 	mutex_enter(&qlt->qlt_ioctl_lock);
7669 	qlt->qlt_ioctl_flags &=
7670 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
7671 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
7672 	mutex_exit(&qlt->qlt_ioctl_lock);
7673 
7674 	EL(qlt, "done...\n");
7675 	return (FCT_SUCCESS);
7676 }
7677 
7678 /*
7679  * All QLT_FIRMWARE_* will mainly be handled in this function
7680  * It can not be called in interrupt context
7681  *
7682  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
7683  * and qlt_ioctl_lock
7684  */
7685 static fct_status_t
qlt_firmware_dump(fct_local_port_t * port,stmf_state_change_info_t * ssci)7686 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
7687 {
7688 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
7689 	int		i;
7690 	int		retries, n;
7691 	uint_t		size_left;
7692 	char		c = ' ';
7693 	uint32_t	addr, endaddr, words_to_read;
7694 	caddr_t		buf;
7695 	fct_status_t	ret;
7696 
7697 	if (qlt->qlt_27xx_chip) {
7698 		return (qlt_27xx_fw_dump(port, ssci));
7699 	}
7700 	mutex_enter(&qlt->qlt_ioctl_lock);
7701 	/*
7702 	 * To make sure that there's no outstanding dumping task
7703 	 */
7704 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
7705 		mutex_exit(&qlt->qlt_ioctl_lock);
7706 		EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
7707 		    qlt->qlt_ioctl_flags);
7708 		return (FCT_FAILURE);
7709 	}
7710 
7711 	/*
7712 	 * To make sure not to overwrite existing dump
7713 	 */
7714 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
7715 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
7716 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
7717 		/*
7718 		 * If we have already one dump, but it's not triggered by user
7719 		 * and the user hasn't fetched it, we shouldn't dump again.
7720 		 */
7721 		mutex_exit(&qlt->qlt_ioctl_lock);
7722 		EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
7723 		    qlt->qlt_ioctl_flags);
7724 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
7725 		    "is one already outstanding.", qlt->instance);
7726 		return (FCT_FAILURE);
7727 	}
7728 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
7729 	if ((ssci != NULL) && (ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
7730 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
7731 	} else {
7732 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
7733 	}
7734 	mutex_exit(&qlt->qlt_ioctl_lock);
7735 
7736 	size_left = QLT_FWDUMP_BUFSIZE;
7737 	if (qlt->qlt_mq_enabled && qlt->qlt_queue_cnt >= 8) {
7738 		size_left += 512 * 1024;
7739 	}
7740 	qlt->fw_dump_size = size_left;
7741 	if (!qlt->qlt_fwdump_buf) {
7742 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
7743 		/*
7744 		 * It's the only place that we allocate buf for dumping. After
7745 		 * it's allocated, we will use it until the port is detached.
7746 		 */
7747 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_NOSLEEP);
7748 		if (qlt->qlt_fwdump_buf == NULL) {
7749 			EL(qlt, "cannot alloc fwdump buffer\n");
7750 			cmn_err(CE_WARN, "!qlt(%d): cannot alloc fwdump buf",
7751 			    qlt->instance);
7752 			return (FCT_FAILURE);
7753 		}
7754 	}
7755 
7756 	EL(qlt, "starting firmware dump...\n");
7757 	cmn_err(CE_WARN, "!qlt(%d) starting firmware dump...",
7758 	    qlt->instance);
7759 
7760 	/*
7761 	 * Start to dump firmware
7762 	 */
7763 	buf = (caddr_t)qlt->qlt_fwdump_buf;
7764 
7765 	/*
7766 	 * Print the ISP firmware revision number and attributes information
7767 	 * Read the RISC to Host Status register
7768 	 */
7769 	n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
7770 	    "Attributes %04x\n\nR2H Status register\n%08x",
7771 	    qlt->fw_major, qlt->fw_minor,
7772 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
7773 	buf += n; size_left -= n;
7774 
7775 	/*
7776 	 * Before pausing the RISC, make sure no mailbox can execute
7777 	 */
7778 	mutex_enter(&qlt->mbox_lock);
7779 	if ((qlt->mbox_io_state != MBOX_STATE_UNKNOWN) &&
7780 	    (qlt->qlt_intr_enabled)) {
7781 		/*
7782 		 * Wait to grab the mailboxes
7783 		 */
7784 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
7785 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
7786 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
7787 			    ddi_get_lbolt() + drv_usectohz(1000000));
7788 			if (retries > 5) {
7789 				mutex_exit(&qlt->mbox_lock);
7790 				EL(qlt, "can't drain out mailbox commands\n");
7791 				goto dump_fail;
7792 			}
7793 		}
7794 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
7795 		cv_broadcast(&qlt->mbox_cv);
7796 	}
7797 	mutex_exit(&qlt->mbox_lock);
7798 
7799 	/*
7800 	 * Pause the RISC processor
7801 	 */
7802 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
7803 
7804 	/*
7805 	 * Wait for the RISC processor to pause
7806 	 */
7807 	for (i = 0; i < 200; i++) {
7808 		if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
7809 			break;
7810 		}
7811 		drv_usecwait(1000);
7812 	}
7813 	if (i == 200) {
7814 		EL(qlt, "can't pause\n");
7815 		return (FCT_FAILURE);
7816 	}
7817 
7818 	if (qlt->qlt_83xx_chip) {
7819 		/* Disable ECC checks in FB registers */
7820 		REG_WR32(qlt, 0x54, 0x6000);
7821 		REG_WR32(qlt, 0xC0, 0); /* 6000h */
7822 		REG_WR32(qlt, 0xCC, 0); /* 6003h */
7823 		REG_WR32(qlt, 0x54, 0x6010);
7824 		REG_WR32(qlt, 0xD4, 0); /* 6015h */
7825 
7826 		/* disable ECC detection in PCR whilst dumping */
7827 		REG_WR32(qlt, 0x54, 0xF70);
7828 		REG_WR32(qlt, 0xF0, 0x60000000);
7829 	}
7830 
7831 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
7832 	    (!qlt->qlt_83xx_chip) && (!qlt->qlt_27xx_chip)) {
7833 		goto over_25xx_specific_dump;
7834 	}
7835 	n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
7836 	buf += n; size_left -= n;
7837 	REG_WR32(qlt, 0x54, 0x7000);
7838 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7839 	buf += n; size_left -= n;
7840 	REG_WR32(qlt, 0x54, 0x7010);
7841 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7842 	buf += n; size_left -= n;
7843 	if (qlt->qlt_83xx_chip) {
7844 		REG_WR32(qlt, 0x54, 0x7040);
7845 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7846 		buf += n; size_left -= n;
7847 	}
7848 	REG_WR32(qlt, 0x54, 0x7C00);
7849 
7850 	n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
7851 	buf += n; size_left -= n;
7852 	REG_WR32(qlt, 0xC0, 0x1);
7853 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
7854 	buf += n; size_left -= n;
7855 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
7856 	buf += n; size_left -= n;
7857 	REG_WR32(qlt, 0xC0, 0x0);
7858 
7859 	/* don't need to do this for 83xx */
7860 	if ((!qlt->qlt_83xx_chip) && (qlt->qlt_mq_enabled)) {
7861 		uint16_t qi;
7862 
7863 		for (qi = 0; qi < qlt->qlt_queue_cnt; qi++) {
7864 
7865 			n = (int)snprintf(buf, size_left,
7866 			    "\n\nQueue Pointers #%x\n", qi);
7867 			buf += n; size_left -= n;
7868 
7869 			n = (int)snprintf(buf, size_left, "%08x ",
7870 			    MQBAR_RD32(qlt,
7871 			    (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN));
7872 			buf += n; size_left -= n;
7873 			n = (int)snprintf(buf, size_left, "%08x ",
7874 			    MQBAR_RD32(qlt,
7875 			    (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT));
7876 			buf += n; size_left -= n;
7877 			n = (int)snprintf(buf, size_left, "%08x ",
7878 			    MQBAR_RD32(qlt,
7879 			    (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN));
7880 			buf += n; size_left -= n;
7881 			n = (int)snprintf(buf, size_left, "%08x",
7882 			    MQBAR_RD32(qlt,
7883 			    (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_OUT));
7884 			buf += n; size_left -= n;
7885 		}
7886 	}
7887 
7888 over_25xx_specific_dump:;
7889 	n = (int)snprintf(buf, size_left, "\n\nHost Interface registers\n");
7890 	buf += n; size_left -= n;
7891 	/*
7892 	 * Capture data from 32 registers
7893 	 */
7894 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
7895 	buf += n; size_left -= n;
7896 
7897 	/*
7898 	 * Disable interrupts
7899 	 */
7900 	REG_WR32(qlt, 0xc, 0);
7901 	EL(qlt, "Disable interrupt\n");
7902 
7903 	/*
7904 	 * Shadow registers
7905 	 */
7906 	n = (int)snprintf(buf, size_left, "\nShadow registers\n");
7907 	buf += n; size_left -= n;
7908 
7909 	REG_WR32(qlt, 0x54, 0xF70);
7910 	addr = 0xb0000000;
7911 	for (i = 0; i < 0xb; i++) {
7912 		if ((!qlt->qlt_25xx_chip) &&
7913 		    (!qlt->qlt_81xx_chip) &&
7914 		    (!qlt->qlt_83xx_chip) &&
7915 		    (i >= 7)) {
7916 			break;
7917 		}
7918 		if (i && ((i & 7) == 0)) {
7919 			n = (int)snprintf(buf, size_left, "\n");
7920 			buf += n; size_left -= n;
7921 		}
7922 		REG_WR32(qlt, 0xF0, addr);
7923 		n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
7924 		buf += n; size_left -= n;
7925 		addr += 0x100000;
7926 	}
7927 
7928 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
7929 	    (qlt->qlt_83xx_chip)) {
7930 		REG_WR32(qlt, 0x54, 0x10);
7931 		n = (int)snprintf(buf, size_left,
7932 		    "\n\nRISC IO register\n%08x", REG_RD32(qlt, 0xC0));
7933 		buf += n; size_left -= n;
7934 	}
7935 
7936 	/*
7937 	 * Mailbox registers
7938 	 */
7939 	n = (int)snprintf(buf, size_left, "\n\nMailbox registers\n");
7940 	buf += n; size_left -= n;
7941 	for (i = 0; i < 32; i += 2) {
7942 		if ((i + 2) & 15) {
7943 			c = ' ';
7944 		} else {
7945 			c = '\n';
7946 		}
7947 		n = (int)snprintf(buf, size_left, "%04x %04x%c",
7948 		    REG_RD16(qlt, 0x80 + (i << 1)),
7949 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
7950 		buf += n; size_left -= n;
7951 	}
7952 
7953 	/*
7954 	 * Transfer sequence registers
7955 	 */
7956 	n = (int)snprintf(buf, size_left, "\nXSEQ GP registers\n");
7957 	buf += n; size_left -= n;
7958 
7959 	if (qlt->qlt_83xx_chip) {
7960 		REG_WR32(qlt, 0x54, 0xBE00);
7961 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7962 		buf += n; size_left -= n;
7963 		REG_WR32(qlt, 0x54, 0xBE10);
7964 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7965 		buf += n; size_left -= n;
7966 		REG_WR32(qlt, 0x54, 0xBE20);
7967 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7968 		buf += n; size_left -= n;
7969 		REG_WR32(qlt, 0x54, 0xBE30);
7970 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7971 		buf += n; size_left -= n;
7972 		REG_WR32(qlt, 0x54, 0xBE40);
7973 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7974 		buf += n; size_left -= n;
7975 		REG_WR32(qlt, 0x54, 0xBE50);
7976 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7977 		buf += n; size_left -= n;
7978 		REG_WR32(qlt, 0x54, 0xBE60);
7979 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7980 		buf += n; size_left -= n;
7981 		REG_WR32(qlt, 0x54, 0xBE70);
7982 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7983 		buf += n; size_left -= n;
7984 	}
7985 	REG_WR32(qlt, 0x54, 0xBF00);
7986 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7987 	buf += n; size_left -= n;
7988 	REG_WR32(qlt, 0x54, 0xBF10);
7989 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7990 	buf += n; size_left -= n;
7991 	REG_WR32(qlt, 0x54, 0xBF20);
7992 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7993 	buf += n; size_left -= n;
7994 	REG_WR32(qlt, 0x54, 0xBF30);
7995 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7996 	buf += n; size_left -= n;
7997 	REG_WR32(qlt, 0x54, 0xBF40);
7998 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7999 	buf += n; size_left -= n;
8000 	REG_WR32(qlt, 0x54, 0xBF50);
8001 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8002 	buf += n; size_left -= n;
8003 	REG_WR32(qlt, 0x54, 0xBF60);
8004 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8005 	buf += n; size_left -= n;
8006 	REG_WR32(qlt, 0x54, 0xBF70);
8007 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8008 	buf += n; size_left -= n;
8009 	n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
8010 	buf += n; size_left -= n;
8011 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8012 	    (qlt->qlt_83xx_chip)) {
8013 		REG_WR32(qlt, 0x54, 0xBFC0);
8014 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8015 		buf += n; size_left -= n;
8016 		REG_WR32(qlt, 0x54, 0xBFD0);
8017 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8018 		buf += n; size_left -= n;
8019 	}
8020 	REG_WR32(qlt, 0x54, 0xBFE0);
8021 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8022 	buf += n; size_left -= n;
8023 	n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
8024 	buf += n; size_left -= n;
8025 	REG_WR32(qlt, 0x54, 0xBFF0);
8026 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8027 	buf += n; size_left -= n;
8028 
8029 	if (qlt->qlt_83xx_chip) {
8030 		n = (int)snprintf(buf, size_left, "\nXSEQ-2 registers\n");
8031 		buf += n; size_left -= n;
8032 		REG_WR32(qlt, 0x54, 0xBEF0);
8033 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8034 		buf += n; size_left -= n;
8035 	}
8036 
8037 	/*
8038 	 * Receive sequence registers
8039 	 */
8040 	n = (int)snprintf(buf, size_left, "\nRSEQ GP registers\n");
8041 	buf += n; size_left -= n;
8042 	if (qlt->qlt_83xx_chip) {
8043 		REG_WR32(qlt, 0x54, 0xFE00);
8044 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8045 		buf += n; size_left -= n;
8046 		REG_WR32(qlt, 0x54, 0xFE10);
8047 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8048 		buf += n; size_left -= n;
8049 		REG_WR32(qlt, 0x54, 0xFE20);
8050 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8051 		buf += n; size_left -= n;
8052 		REG_WR32(qlt, 0x54, 0xFE30);
8053 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8054 		buf += n; size_left -= n;
8055 		REG_WR32(qlt, 0x54, 0xFE40);
8056 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8057 		buf += n; size_left -= n;
8058 		REG_WR32(qlt, 0x54, 0xFE50);
8059 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8060 		buf += n; size_left -= n;
8061 		REG_WR32(qlt, 0x54, 0xFE60);
8062 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8063 		buf += n; size_left -= n;
8064 		REG_WR32(qlt, 0x54, 0xFE70);
8065 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8066 		buf += n; size_left -= n;
8067 	}
8068 	REG_WR32(qlt, 0x54, 0xFF00);
8069 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8070 	buf += n; size_left -= n;
8071 	REG_WR32(qlt, 0x54, 0xFF10);
8072 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8073 	buf += n; size_left -= n;
8074 	REG_WR32(qlt, 0x54, 0xFF20);
8075 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8076 	buf += n; size_left -= n;
8077 	REG_WR32(qlt, 0x54, 0xFF30);
8078 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8079 	buf += n; size_left -= n;
8080 	REG_WR32(qlt, 0x54, 0xFF40);
8081 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8082 	buf += n; size_left -= n;
8083 	REG_WR32(qlt, 0x54, 0xFF50);
8084 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8085 	buf += n; size_left -= n;
8086 	REG_WR32(qlt, 0x54, 0xFF60);
8087 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8088 	buf += n; size_left -= n;
8089 	REG_WR32(qlt, 0x54, 0xFF70);
8090 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8091 	buf += n; size_left -= n;
8092 	n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
8093 	buf += n; size_left -= n;
8094 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8095 	    (qlt->qlt_83xx_chip)) {
8096 		REG_WR32(qlt, 0x54, 0xFFC0);
8097 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8098 		buf += n; size_left -= n;
8099 	}
8100 	REG_WR32(qlt, 0x54, 0xFFD0);
8101 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8102 	buf += n; size_left -= n;
8103 	n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
8104 	buf += n; size_left -= n;
8105 	REG_WR32(qlt, 0x54, 0xFFE0);
8106 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8107 	buf += n; size_left -= n;
8108 	n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
8109 	buf += n; size_left -= n;
8110 	REG_WR32(qlt, 0x54, 0xFFF0);
8111 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8112 	buf += n; size_left -= n;
8113 	if (qlt->qlt_83xx_chip) {
8114 		n = (int)snprintf(buf, size_left, "\nRSEQ-3 registers\n");
8115 		buf += n; size_left -= n;
8116 		REG_WR32(qlt, 0x54, 0xFEF0);
8117 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8118 		buf += n; size_left -= n;
8119 	}
8120 
8121 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
8122 	    (!qlt->qlt_83xx_chip))
8123 		goto over_aseq_regs;
8124 
8125 	/*
8126 	 * Auxiliary sequencer registers
8127 	 */
8128 	n = (int)snprintf(buf, size_left, "\nASEQ GP registers\n");
8129 	buf += n; size_left -= n;
8130 	REG_WR32(qlt, 0x54, 0xB000);
8131 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8132 	buf += n; size_left -= n;
8133 	REG_WR32(qlt, 0x54, 0xB010);
8134 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8135 	buf += n; size_left -= n;
8136 	REG_WR32(qlt, 0x54, 0xB020);
8137 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8138 	buf += n; size_left -= n;
8139 	REG_WR32(qlt, 0x54, 0xB030);
8140 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8141 	buf += n; size_left -= n;
8142 	REG_WR32(qlt, 0x54, 0xB040);
8143 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8144 	buf += n; size_left -= n;
8145 	REG_WR32(qlt, 0x54, 0xB050);
8146 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8147 	buf += n; size_left -= n;
8148 	REG_WR32(qlt, 0x54, 0xB060);
8149 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8150 	buf += n; size_left -= n;
8151 	REG_WR32(qlt, 0x54, 0xB070);
8152 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8153 	buf += n; size_left -= n;
8154 	if (qlt->qlt_83xx_chip) {
8155 		REG_WR32(qlt, 0x54, 0xB100);
8156 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8157 		buf += n; size_left -= n;
8158 		REG_WR32(qlt, 0x54, 0xB110);
8159 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8160 		buf += n; size_left -= n;
8161 		REG_WR32(qlt, 0x54, 0xB120);
8162 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8163 		buf += n; size_left -= n;
8164 		REG_WR32(qlt, 0x54, 0xB130);
8165 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8166 		buf += n; size_left -= n;
8167 		REG_WR32(qlt, 0x54, 0xB140);
8168 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8169 		buf += n; size_left -= n;
8170 		REG_WR32(qlt, 0x54, 0xB150);
8171 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8172 		buf += n; size_left -= n;
8173 		REG_WR32(qlt, 0x54, 0xB160);
8174 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8175 		buf += n; size_left -= n;
8176 		REG_WR32(qlt, 0x54, 0xB170);
8177 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8178 		buf += n; size_left -= n;
8179 	}
8180 	n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
8181 	buf += n; size_left -= n;
8182 	REG_WR32(qlt, 0x54, 0xB0C0);
8183 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8184 	buf += n; size_left -= n;
8185 	REG_WR32(qlt, 0x54, 0xB0D0);
8186 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8187 	buf += n; size_left -= n;
8188 	n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
8189 	buf += n; size_left -= n;
8190 	REG_WR32(qlt, 0x54, 0xB0E0);
8191 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8192 	buf += n; size_left -= n;
8193 	n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
8194 	buf += n; size_left -= n;
8195 	REG_WR32(qlt, 0x54, 0xB0F0);
8196 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8197 	buf += n; size_left -= n;
8198 	if (qlt->qlt_83xx_chip) {
8199 		n = (int)snprintf(buf, size_left, "\nASEQ-3 registers\n");
8200 		buf += n; size_left -= n;
8201 		REG_WR32(qlt, 0x54, 0xB1F0);
8202 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8203 		buf += n; size_left -= n;
8204 	}
8205 
8206 over_aseq_regs:;
8207 
8208 	/*
8209 	 * Command DMA registers
8210 	 */
8211 	n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
8212 	buf += n; size_left -= n;
8213 	REG_WR32(qlt, 0x54, 0x7100);
8214 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8215 	buf += n; size_left -= n;
8216 	if (qlt->qlt_83xx_chip) {
8217 		REG_WR32(qlt, 0x54, 0x7120);
8218 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8219 		buf += n; size_left -= n;
8220 		REG_WR32(qlt, 0x54, 0x7130);
8221 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8222 		buf += n; size_left -= n;
8223 		REG_WR32(qlt, 0x54, 0x71F0);
8224 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8225 		buf += n; size_left -= n;
8226 	}
8227 
8228 	/*
8229 	 * Queues
8230 	 */
8231 	n = (int)snprintf(buf, size_left,
8232 	    "\nRequest0 Queue DMA Channel registers\n");
8233 	buf += n; size_left -= n;
8234 	REG_WR32(qlt, 0x54, 0x7200);
8235 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
8236 	buf += n; size_left -= n;
8237 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
8238 	buf += n; size_left -= n;
8239 
8240 	n = (int)snprintf(buf, size_left,
8241 	    "\n\nResponse0 Queue DMA Channel registers\n");
8242 	buf += n; size_left -= n;
8243 	REG_WR32(qlt, 0x54, 0x7300);
8244 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
8245 	buf += n; size_left -= n;
8246 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
8247 	buf += n; size_left -= n;
8248 
8249 	n = (int)snprintf(buf, size_left,
8250 	    "\n\nRequest1 Queue DMA Channel registers\n");
8251 	buf += n; size_left -= n;
8252 	REG_WR32(qlt, 0x54, 0x7400);
8253 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
8254 	buf += n; size_left -= n;
8255 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
8256 	buf += n; size_left -= n;
8257 
8258 	/*
8259 	 * Transmit DMA registers
8260 	 */
8261 	n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
8262 	buf += n; size_left -= n;
8263 	REG_WR32(qlt, 0x54, 0x7600);
8264 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8265 	buf += n; size_left -= n;
8266 	REG_WR32(qlt, 0x54, 0x7610);
8267 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8268 	buf += n; size_left -= n;
8269 	n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
8270 	buf += n; size_left -= n;
8271 	REG_WR32(qlt, 0x54, 0x7620);
8272 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8273 	buf += n; size_left -= n;
8274 	REG_WR32(qlt, 0x54, 0x7630);
8275 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8276 	buf += n; size_left -= n;
8277 	n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
8278 	buf += n; size_left -= n;
8279 	REG_WR32(qlt, 0x54, 0x7640);
8280 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8281 	buf += n; size_left -= n;
8282 	REG_WR32(qlt, 0x54, 0x7650);
8283 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8284 	buf += n; size_left -= n;
8285 	n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
8286 	buf += n; size_left -= n;
8287 	REG_WR32(qlt, 0x54, 0x7660);
8288 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8289 	buf += n; size_left -= n;
8290 	REG_WR32(qlt, 0x54, 0x7670);
8291 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8292 	buf += n; size_left -= n;
8293 	n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
8294 	buf += n; size_left -= n;
8295 	REG_WR32(qlt, 0x54, 0x7680);
8296 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8297 	buf += n; size_left -= n;
8298 	REG_WR32(qlt, 0x54, 0x7690);
8299 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8300 	buf += n; size_left -= n;
8301 	n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
8302 	buf += n; size_left -= n;
8303 	REG_WR32(qlt, 0x54, 0x76A0);
8304 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8305 	buf += n; size_left -= n;
8306 
8307 	/*
8308 	 * Receive DMA registers
8309 	 */
8310 	n = (int)snprintf(buf, size_left,
8311 	    "\nRCV Thread 0 Data DMA registers\n");
8312 	buf += n; size_left -= n;
8313 	REG_WR32(qlt, 0x54, 0x7700);
8314 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8315 	buf += n; size_left -= n;
8316 	REG_WR32(qlt, 0x54, 0x7710);
8317 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8318 	buf += n; size_left -= n;
8319 	n = (int)snprintf(buf, size_left,
8320 	    "\nRCV Thread 1 Data DMA registers\n");
8321 	buf += n; size_left -= n;
8322 	REG_WR32(qlt, 0x54, 0x7720);
8323 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8324 	buf += n; size_left -= n;
8325 	REG_WR32(qlt, 0x54, 0x7730);
8326 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8327 	buf += n; size_left -= n;
8328 
8329 	/*
8330 	 * RISC registers
8331 	 */
8332 	n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
8333 	buf += n; size_left -= n;
8334 	REG_WR32(qlt, 0x54, 0x0F00);
8335 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8336 	buf += n; size_left -= n;
8337 	REG_WR32(qlt, 0x54, 0x0F10);
8338 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8339 	buf += n; size_left -= n;
8340 	REG_WR32(qlt, 0x54, 0x0F20);
8341 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8342 	buf += n; size_left -= n;
8343 	REG_WR32(qlt, 0x54, 0x0F30);
8344 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8345 	buf += n; size_left -= n;
8346 	REG_WR32(qlt, 0x54, 0x0F40);
8347 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8348 	buf += n; size_left -= n;
8349 	REG_WR32(qlt, 0x54, 0x0F50);
8350 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8351 	buf += n; size_left -= n;
8352 	REG_WR32(qlt, 0x54, 0x0F60);
8353 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8354 	buf += n; size_left -= n;
8355 	REG_WR32(qlt, 0x54, 0x0F70);
8356 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8357 	buf += n; size_left -= n;
8358 
8359 	/*
8360 	 * Local memory controller registers
8361 	 */
8362 	n = (int)snprintf(buf, size_left, "\nLMC registers\n");
8363 	buf += n; size_left -= n;
8364 	REG_WR32(qlt, 0x54, 0x3000);
8365 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8366 	buf += n; size_left -= n;
8367 	REG_WR32(qlt, 0x54, 0x3010);
8368 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8369 	buf += n; size_left -= n;
8370 	REG_WR32(qlt, 0x54, 0x3020);
8371 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8372 	buf += n; size_left -= n;
8373 	REG_WR32(qlt, 0x54, 0x3030);
8374 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8375 	buf += n; size_left -= n;
8376 	REG_WR32(qlt, 0x54, 0x3040);
8377 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8378 	buf += n; size_left -= n;
8379 	REG_WR32(qlt, 0x54, 0x3050);
8380 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8381 	buf += n; size_left -= n;
8382 	REG_WR32(qlt, 0x54, 0x3060);
8383 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8384 	buf += n; size_left -= n;
8385 
8386 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8387 	    (qlt->qlt_83xx_chip)) {
8388 		REG_WR32(qlt, 0x54, 0x3070);
8389 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8390 		buf += n; size_left -= n;
8391 	}
8392 
8393 	/*
8394 	 * Fibre protocol module registers
8395 	 */
8396 	n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
8397 	buf += n; size_left -= n;
8398 	REG_WR32(qlt, 0x54, 0x4000);
8399 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8400 	buf += n; size_left -= n;
8401 	REG_WR32(qlt, 0x54, 0x4010);
8402 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8403 	buf += n; size_left -= n;
8404 	REG_WR32(qlt, 0x54, 0x4020);
8405 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8406 	buf += n; size_left -= n;
8407 	REG_WR32(qlt, 0x54, 0x4030);
8408 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8409 	buf += n; size_left -= n;
8410 	REG_WR32(qlt, 0x54, 0x4040);
8411 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8412 	buf += n; size_left -= n;
8413 	REG_WR32(qlt, 0x54, 0x4050);
8414 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8415 	buf += n; size_left -= n;
8416 	REG_WR32(qlt, 0x54, 0x4060);
8417 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8418 	buf += n; size_left -= n;
8419 	REG_WR32(qlt, 0x54, 0x4070);
8420 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8421 	buf += n; size_left -= n;
8422 	REG_WR32(qlt, 0x54, 0x4080);
8423 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8424 	buf += n; size_left -= n;
8425 	REG_WR32(qlt, 0x54, 0x4090);
8426 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8427 	buf += n; size_left -= n;
8428 	REG_WR32(qlt, 0x54, 0x40A0);
8429 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8430 	buf += n; size_left -= n;
8431 	REG_WR32(qlt, 0x54, 0x40B0);
8432 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8433 	buf += n; size_left -= n;
8434 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) {
8435 		REG_WR32(qlt, 0x54, 0x40C0);
8436 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8437 		buf += n; size_left -= n;
8438 		REG_WR32(qlt, 0x54, 0x40D0);
8439 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8440 		buf += n; size_left -= n;
8441 	}
8442 	if (qlt->qlt_83xx_chip) {
8443 		REG_WR32(qlt, 0x54, 0x40E0);
8444 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8445 		buf += n; size_left -= n;
8446 		REG_WR32(qlt, 0x54, 0x40F0);
8447 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8448 		buf += n; size_left -= n;
8449 
8450 		n = (int)snprintf(buf, size_left, "\nRQ0 Array registers\n");
8451 		buf += n; size_left -= n;
8452 		REG_WR32(qlt, 0x54, 0x5C00);
8453 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8454 		buf += n; size_left -= n;
8455 		REG_WR32(qlt, 0x54, 0x5C10);
8456 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8457 		buf += n; size_left -= n;
8458 		REG_WR32(qlt, 0x54, 0x5C20);
8459 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8460 		buf += n; size_left -= n;
8461 		REG_WR32(qlt, 0x54, 0x5C30);
8462 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8463 		buf += n; size_left -= n;
8464 		REG_WR32(qlt, 0x54, 0x5C40);
8465 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8466 		buf += n; size_left -= n;
8467 		REG_WR32(qlt, 0x54, 0x5C50);
8468 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8469 		buf += n; size_left -= n;
8470 		REG_WR32(qlt, 0x54, 0x5C60);
8471 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8472 		buf += n; size_left -= n;
8473 		REG_WR32(qlt, 0x54, 0x5C70);
8474 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8475 		buf += n; size_left -= n;
8476 		REG_WR32(qlt, 0x54, 0x5C80);
8477 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8478 		buf += n; size_left -= n;
8479 		REG_WR32(qlt, 0x54, 0x5C90);
8480 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8481 		buf += n; size_left -= n;
8482 		REG_WR32(qlt, 0x54, 0x5CA0);
8483 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8484 		buf += n; size_left -= n;
8485 		REG_WR32(qlt, 0x54, 0x5CB0);
8486 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8487 		buf += n; size_left -= n;
8488 		REG_WR32(qlt, 0x54, 0x5CC0);
8489 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8490 		buf += n; size_left -= n;
8491 		REG_WR32(qlt, 0x54, 0x5CD0);
8492 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8493 		buf += n; size_left -= n;
8494 		REG_WR32(qlt, 0x54, 0x5CE0);
8495 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8496 		buf += n; size_left -= n;
8497 		REG_WR32(qlt, 0x54, 0x5CF0);
8498 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8499 		buf += n; size_left -= n;
8500 
8501 		n = (int)snprintf(buf, size_left, "\nRQ1 Array registers\n");
8502 		buf += n; size_left -= n;
8503 		REG_WR32(qlt, 0x54, 0x5D00);
8504 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8505 		buf += n; size_left -= n;
8506 		REG_WR32(qlt, 0x54, 0x5D10);
8507 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8508 		buf += n; size_left -= n;
8509 		REG_WR32(qlt, 0x54, 0x5D20);
8510 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8511 		buf += n; size_left -= n;
8512 		REG_WR32(qlt, 0x54, 0x5D30);
8513 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8514 		buf += n; size_left -= n;
8515 		REG_WR32(qlt, 0x54, 0x5D40);
8516 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8517 		buf += n; size_left -= n;
8518 		REG_WR32(qlt, 0x54, 0x5D50);
8519 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8520 		buf += n; size_left -= n;
8521 		REG_WR32(qlt, 0x54, 0x5D60);
8522 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8523 		buf += n; size_left -= n;
8524 		REG_WR32(qlt, 0x54, 0x5D70);
8525 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8526 		buf += n; size_left -= n;
8527 		REG_WR32(qlt, 0x54, 0x5D80);
8528 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8529 		buf += n; size_left -= n;
8530 		REG_WR32(qlt, 0x54, 0x5D90);
8531 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8532 		buf += n; size_left -= n;
8533 		REG_WR32(qlt, 0x54, 0x5DA0);
8534 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8535 		buf += n; size_left -= n;
8536 		REG_WR32(qlt, 0x54, 0x5DB0);
8537 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8538 		buf += n; size_left -= n;
8539 		REG_WR32(qlt, 0x54, 0x5DC0);
8540 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8541 		buf += n; size_left -= n;
8542 		REG_WR32(qlt, 0x54, 0x5DD0);
8543 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8544 		buf += n; size_left -= n;
8545 		REG_WR32(qlt, 0x54, 0x5DE0);
8546 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8547 		buf += n; size_left -= n;
8548 		REG_WR32(qlt, 0x54, 0x5DF0);
8549 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8550 		buf += n; size_left -= n;
8551 
8552 		n = (int)snprintf(buf, size_left, "\nRP0 Array registers\n");
8553 		buf += n; size_left -= n;
8554 		REG_WR32(qlt, 0x54, 0x5E00);
8555 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8556 		buf += n; size_left -= n;
8557 		REG_WR32(qlt, 0x54, 0x5E10);
8558 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8559 		buf += n; size_left -= n;
8560 		REG_WR32(qlt, 0x54, 0x5E20);
8561 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8562 		buf += n; size_left -= n;
8563 		REG_WR32(qlt, 0x54, 0x5E30);
8564 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8565 		buf += n; size_left -= n;
8566 		REG_WR32(qlt, 0x54, 0x5E40);
8567 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8568 		buf += n; size_left -= n;
8569 		REG_WR32(qlt, 0x54, 0x5E50);
8570 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8571 		buf += n; size_left -= n;
8572 		REG_WR32(qlt, 0x54, 0x5E60);
8573 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8574 		buf += n; size_left -= n;
8575 		REG_WR32(qlt, 0x54, 0x5E70);
8576 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8577 		buf += n; size_left -= n;
8578 		REG_WR32(qlt, 0x54, 0x5E80);
8579 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8580 		buf += n; size_left -= n;
8581 		REG_WR32(qlt, 0x54, 0x5E90);
8582 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8583 		buf += n; size_left -= n;
8584 		REG_WR32(qlt, 0x54, 0x5EA0);
8585 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8586 		buf += n; size_left -= n;
8587 		REG_WR32(qlt, 0x54, 0x5EB0);
8588 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8589 		buf += n; size_left -= n;
8590 		REG_WR32(qlt, 0x54, 0x5EC0);
8591 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8592 		buf += n; size_left -= n;
8593 		REG_WR32(qlt, 0x54, 0x5ED0);
8594 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8595 		buf += n; size_left -= n;
8596 		REG_WR32(qlt, 0x54, 0x5EE0);
8597 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8598 		buf += n; size_left -= n;
8599 		REG_WR32(qlt, 0x54, 0x5EF0);
8600 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8601 		buf += n; size_left -= n;
8602 
8603 		n = (int)snprintf(buf, size_left, "\nRP1 Array registers\n");
8604 		buf += n; size_left -= n;
8605 		REG_WR32(qlt, 0x54, 0x5F00);
8606 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8607 		buf += n; size_left -= n;
8608 		REG_WR32(qlt, 0x54, 0x5F10);
8609 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8610 		buf += n; size_left -= n;
8611 		REG_WR32(qlt, 0x54, 0x5F20);
8612 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8613 		buf += n; size_left -= n;
8614 		REG_WR32(qlt, 0x54, 0x5F30);
8615 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8616 		buf += n; size_left -= n;
8617 		REG_WR32(qlt, 0x54, 0x5F40);
8618 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8619 		buf += n; size_left -= n;
8620 		REG_WR32(qlt, 0x54, 0x5F50);
8621 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8622 		buf += n; size_left -= n;
8623 		REG_WR32(qlt, 0x54, 0x5F60);
8624 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8625 		buf += n; size_left -= n;
8626 		REG_WR32(qlt, 0x54, 0x5F70);
8627 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8628 		buf += n; size_left -= n;
8629 		REG_WR32(qlt, 0x54, 0x5F80);
8630 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8631 		buf += n; size_left -= n;
8632 		REG_WR32(qlt, 0x54, 0x5F90);
8633 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8634 		buf += n; size_left -= n;
8635 		REG_WR32(qlt, 0x54, 0x5FA0);
8636 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8637 		buf += n; size_left -= n;
8638 		REG_WR32(qlt, 0x54, 0x5FB0);
8639 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8640 		buf += n; size_left -= n;
8641 		REG_WR32(qlt, 0x54, 0x5FC0);
8642 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8643 		buf += n; size_left -= n;
8644 		REG_WR32(qlt, 0x54, 0x5FD0);
8645 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8646 		buf += n; size_left -= n;
8647 		REG_WR32(qlt, 0x54, 0x5FE0);
8648 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8649 		buf += n; size_left -= n;
8650 		REG_WR32(qlt, 0x54, 0x5FF0);
8651 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8652 		buf += n; size_left -= n;
8653 
8654 		n = (int)snprintf(buf,
8655 		    size_left, "\nQueue Control Registers\n");
8656 		buf += n; size_left -= n;
8657 		REG_WR32(qlt, 0x54, 0x7800);
8658 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8659 		buf += n; size_left -= n;
8660 	}
8661 
8662 	/*
8663 	 * Fibre buffer registers
8664 	 */
8665 	n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
8666 	buf += n; size_left -= n;
8667 	REG_WR32(qlt, 0x54, 0x6000);
8668 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8669 	buf += n; size_left -= n;
8670 	REG_WR32(qlt, 0x54, 0x6010);
8671 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8672 	buf += n; size_left -= n;
8673 	REG_WR32(qlt, 0x54, 0x6020);
8674 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8675 	buf += n; size_left -= n;
8676 	REG_WR32(qlt, 0x54, 0x6030);
8677 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8678 	buf += n; size_left -= n;
8679 	REG_WR32(qlt, 0x54, 0x6040);
8680 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8681 	buf += n; size_left -= n;
8682 	if (qlt->qlt_83xx_chip) {
8683 		REG_WR32(qlt, 0x54, 0x6060);
8684 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8685 		buf += n; size_left -= n;
8686 		REG_WR32(qlt, 0x54, 0x6070);
8687 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8688 		buf += n; size_left -= n;
8689 	}
8690 	REG_WR32(qlt, 0x54, 0x6100);
8691 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8692 	buf += n; size_left -= n;
8693 	REG_WR32(qlt, 0x54, 0x6130);
8694 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8695 	buf += n; size_left -= n;
8696 	REG_WR32(qlt, 0x54, 0x6150);
8697 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8698 	buf += n; size_left -= n;
8699 	REG_WR32(qlt, 0x54, 0x6170);
8700 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8701 	buf += n; size_left -= n;
8702 	REG_WR32(qlt, 0x54, 0x6190);
8703 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8704 	buf += n; size_left -= n;
8705 	REG_WR32(qlt, 0x54, 0x61B0);
8706 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8707 	buf += n; size_left -= n;
8708 	if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) {
8709 		REG_WR32(qlt, 0x54, 0x61C0);
8710 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8711 		buf += n; size_left -= n;
8712 	}
8713 	if (qlt->qlt_83xx_chip) {
8714 		REG_WR32(qlt, 0x54, 0x6530);
8715 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8716 		buf += n; size_left -= n;
8717 		REG_WR32(qlt, 0x54, 0x6540);
8718 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8719 		buf += n; size_left -= n;
8720 		REG_WR32(qlt, 0x54, 0x6550);
8721 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8722 		buf += n; size_left -= n;
8723 		REG_WR32(qlt, 0x54, 0x6560);
8724 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8725 		buf += n; size_left -= n;
8726 		REG_WR32(qlt, 0x54, 0x6570);
8727 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8728 		buf += n; size_left -= n;
8729 		REG_WR32(qlt, 0x54, 0x6580);
8730 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8731 		buf += n; size_left -= n;
8732 		REG_WR32(qlt, 0x54, 0x6590);
8733 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8734 		buf += n; size_left -= n;
8735 		REG_WR32(qlt, 0x54, 0x65A0);
8736 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8737 		buf += n; size_left -= n;
8738 		REG_WR32(qlt, 0x54, 0x65B0);
8739 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8740 		buf += n; size_left -= n;
8741 		REG_WR32(qlt, 0x54, 0x65C0);
8742 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8743 		buf += n; size_left -= n;
8744 		REG_WR32(qlt, 0x54, 0x65D0);
8745 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8746 		buf += n; size_left -= n;
8747 		REG_WR32(qlt, 0x54, 0x65E0);
8748 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8749 		buf += n; size_left -= n;
8750 	}
8751 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8752 	    (qlt->qlt_83xx_chip)) {
8753 		REG_WR32(qlt, 0x54, 0x6F00);
8754 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8755 		buf += n; size_left -= n;
8756 	}
8757 
8758 	if (qlt->qlt_83xx_chip) {
8759 		n = (int)snprintf(buf, size_left, "\nAT0 Array registers\n");
8760 		buf += n; size_left -= n;
8761 		REG_WR32(qlt, 0x54, 0x7080);
8762 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8763 		buf += n; size_left -= n;
8764 		REG_WR32(qlt, 0x54, 0x7090);
8765 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8766 		buf += n; size_left -= n;
8767 		REG_WR32(qlt, 0x54, 0x70A0);
8768 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8769 		buf += n; size_left -= n;
8770 		REG_WR32(qlt, 0x54, 0x70B0);
8771 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8772 		buf += n; size_left -= n;
8773 		REG_WR32(qlt, 0x54, 0x70C0);
8774 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8775 		buf += n; size_left -= n;
8776 		REG_WR32(qlt, 0x54, 0x70D0);
8777 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8778 		buf += n; size_left -= n;
8779 		REG_WR32(qlt, 0x54, 0x70E0);
8780 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8781 		buf += n; size_left -= n;
8782 		REG_WR32(qlt, 0x54, 0x70F0);
8783 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8784 		buf += n; size_left -= n;
8785 	}
8786 
8787 	EL(qlt, "reset chip\n");
8788 	qlt->intr_sneak_counter = 10;
8789 	mutex_enter(&qlt->intr_lock);
8790 	if (qlt->qlt_mq_enabled) {
8791 		for (i = 1; i < qlt->qlt_queue_cnt; i++) {
8792 			mutex_enter(&qlt->mq_resp[i].mq_lock);
8793 		}
8794 	}
8795 	(void) qlt_reset_chip(qlt);
8796 	drv_usecwait(20);
8797 	qlt->intr_sneak_counter = 0;
8798 	if (qlt->qlt_mq_enabled) {
8799 		for (i = 1; i < qlt->qlt_queue_cnt; i++) {
8800 			mutex_exit(&qlt->mq_resp[i].mq_lock);
8801 		}
8802 	}
8803 	mutex_exit(&qlt->intr_lock);
8804 	EL(qlt, "reset chip, done\n");
8805 
8806 	/*
8807 	 * Memory
8808 	 */
8809 	n = (int)snprintf(buf, size_left, "\nCode RAM\n");
8810 	buf += n; size_left -= n;
8811 
8812 	addr = 0x20000;
8813 	endaddr = (qlt->qlt_83xx_chip) ? 0x22400 : 0x22000;
8814 	words_to_read = 0;
8815 	while (addr < endaddr) {
8816 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
8817 		if ((words_to_read + addr) > endaddr) {
8818 			words_to_read = endaddr - addr;
8819 		}
8820 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
8821 		    QLT_SUCCESS) {
8822 			EL(qlt, "Error reading risc ram - CODE RAM status="
8823 			    "%llxh\n", ret);
8824 			goto dump_fail;
8825 		}
8826 
8827 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
8828 		buf += n; size_left -= n;
8829 
8830 		if (size_left < 100000) {
8831 			EL(qlt, "run out of space - CODE RAM size_left=%d\n",
8832 			    size_left);
8833 			goto dump_ok;
8834 		}
8835 		addr += words_to_read;
8836 	}
8837 
8838 	n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
8839 	buf += n; size_left -= n;
8840 
8841 	addr = 0x100000;
8842 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
8843 	endaddr++;
8844 	if (endaddr & 7) {
8845 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
8846 	}
8847 
8848 	words_to_read = 0;
8849 	while (addr < endaddr) {
8850 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
8851 		if ((words_to_read + addr) > endaddr) {
8852 			words_to_read = endaddr - addr;
8853 		}
8854 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
8855 		    QLT_SUCCESS) {
8856 			EL(qlt, "Error reading risc ram - EXT RAM status="
8857 			    "%llxh\n", ret);
8858 			goto dump_fail;
8859 		}
8860 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
8861 		buf += n; size_left -= n;
8862 		if (size_left < 100000) {
8863 			EL(qlt, "run out of space - EXT RAM\n");
8864 			goto dump_ok;
8865 		}
8866 		addr += words_to_read;
8867 	}
8868 
8869 	/*
8870 	 * Label the end tag
8871 	 */
8872 	n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
8873 	buf += n; size_left -= n;
8874 
8875 	/*
8876 	 * Queue dumping
8877 	 */
8878 	n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
8879 	buf += n; size_left -= n;
8880 
8881 	if (qlt->qlt_mq_enabled) {
8882 		for (i = 0; i < qlt->qlt_queue_cnt; i++) {
8883 			if (qlt->mq_req[i].queue_mem_mq_base_addr) {
8884 				n = (int)snprintf(buf, size_left,
8885 				    "\nQueue %d:\n", i);
8886 				buf += n; size_left -= n;
8887 				n = qlt_dump_queue(qlt,
8888 				    qlt->mq_req[i].queue_mem_mq_base_addr,
8889 				    REQUEST_QUEUE_MQ_ENTRIES,
8890 				    buf, size_left);
8891 				buf += n; size_left -= n;
8892 			}
8893 		}
8894 	} else {
8895 		n = (int)snprintf(buf, size_left, "\nQueue 0:\n");
8896 		buf += n; size_left -= n;
8897 		n = qlt_dump_queue(qlt,
8898 		    qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
8899 		    REQUEST_QUEUE_ENTRIES, buf, size_left);
8900 		buf += n; size_left -= n;
8901 	}
8902 
8903 	if (!qlt->qlt_83xx_chip) {
8904 		n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
8905 		buf += n; size_left -= n;
8906 		n = qlt_dump_queue(qlt,
8907 		    qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
8908 		    PRIORITY_QUEUE_ENTRIES, buf, size_left);
8909 		buf += n; size_left -= n;
8910 	}
8911 
8912 	n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
8913 	buf += n; size_left -= n;
8914 
8915 	if (qlt->qlt_mq_enabled) {
8916 		for (i = 0; i < qlt->qlt_queue_cnt; i++) {
8917 			if (qlt->mq_resp[i].queue_mem_mq_base_addr) {
8918 				n = (int)snprintf(buf, size_left,
8919 				    "\nQueue %d:\n", i);
8920 				buf += n; size_left -= n;
8921 				n = qlt_dump_queue(qlt,
8922 				    qlt->mq_resp[i].queue_mem_mq_base_addr,
8923 				    RESPONSE_QUEUE_MQ_ENTRIES,
8924 				    buf, size_left);
8925 				buf += n; size_left -= n;
8926 			}
8927 		}
8928 	} else {
8929 		n = (int)snprintf(buf, size_left, "\nQueue 0:\n");
8930 		buf += n; size_left -= n;
8931 		n = qlt_dump_queue(qlt,
8932 		    qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
8933 		    RESPONSE_QUEUE_ENTRIES, buf, size_left);
8934 		buf += n; size_left -= n;
8935 	}
8936 
8937 	n = (int)snprintf(buf, size_left, "\nATIO Queue\nQueue 0:\n");
8938 	buf += n; size_left -= n;
8939 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
8940 	    ATIO_QUEUE_ENTRIES, buf, size_left);
8941 	buf += n; size_left -= n;
8942 
8943 	/*
8944 	 * Label dump reason
8945 	 */
8946 	if (ssci != NULL) {
8947 		n = (int)snprintf(buf, size_left,
8948 		    "\nFirmware dump reason: %s-%s\n",
8949 		    qlt->qlt_port_alias, ssci->st_additional_info);
8950 	} else {
8951 		n = (int)snprintf(buf, size_left,
8952 		    "\nFirmware dump reason: %s-%s\n",
8953 		    qlt->qlt_port_alias, "no additional infor");
8954 	}
8955 	buf += n; size_left -= n;
8956 
8957 dump_ok:
8958 	EL(qlt, "left-%d\n", size_left);
8959 	mutex_enter(&qlt->qlt_ioctl_lock);
8960 	qlt->qlt_ioctl_flags &=
8961 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
8962 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
8963 	mutex_exit(&qlt->qlt_ioctl_lock);
8964 	return (FCT_SUCCESS);
8965 
8966 dump_fail:
8967 	EL(qlt, "dump not done\n");
8968 	mutex_enter(&qlt->qlt_ioctl_lock);
8969 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
8970 	mutex_exit(&qlt->qlt_ioctl_lock);
8971 	return (FCT_FAILURE);
8972 }
8973 
8974 static int
qlt_fwdump_dump_regs(qlt_state_t * qlt,caddr_t buf,int startaddr,int count,uint_t size_left)8975 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
8976     uint_t size_left)
8977 {
8978 	int		i;
8979 	int		n;
8980 	char		c = ' ';
8981 
8982 	for (i = 0, n = 0; i < count; i++) {
8983 		if ((i + 1) & 7) {
8984 			c = ' ';
8985 		} else {
8986 			c = '\n';
8987 		}
8988 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
8989 		    "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
8990 	}
8991 	return (n);
8992 }
8993 
8994 static int
qlt_dump_risc_ram(qlt_state_t * qlt,uint32_t addr,uint32_t words,caddr_t buf,uint_t size_left)8995 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
8996     caddr_t buf, uint_t size_left)
8997 {
8998 	int		i;
8999 	int		n;
9000 	char		c = ' ';
9001 	uint32_t	*ptr;
9002 
9003 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
9004 	for (i = 0, n = 0; i < words; i++) {
9005 		if ((i & 7) == 0) {
9006 			n = (int)(n + (int)snprintf(&buf[n],
9007 			    (uint_t)(size_left - n), "%08x: ", addr + i));
9008 		}
9009 		if ((i + 1) & 7) {
9010 			c = ' ';
9011 		} else {
9012 			c = '\n';
9013 		}
9014 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
9015 		    "%08x%c", ptr[i], c));
9016 	}
9017 	return (n);
9018 }
9019 
9020 static int
qlt_dump_queue(qlt_state_t * qlt,caddr_t qadr,int entries,caddr_t buf,uint_t size_left)9021 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
9022     uint_t size_left)
9023 {
9024 	int		i;
9025 	int		n;
9026 	char		c = ' ';
9027 	int		words;
9028 	uint32_t	*ptr;
9029 	uint32_t	w;
9030 
9031 	words = entries * 16;
9032 	ptr = (uint32_t *)qadr;
9033 	for (i = 0, n = 0; i < words; i++) {
9034 		if ((i & 7) == 0) {
9035 			n = (int)(n + (int)snprintf(&buf[n],
9036 			    (uint_t)(size_left - n), "%05x: ", i));
9037 		}
9038 		if ((i + 1) & 7) {
9039 			c = ' ';
9040 		} else {
9041 			c = '\n';
9042 		}
9043 		w = QMEM_RD32(qlt, &ptr[i]);
9044 		n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%08x%c",
9045 		    w, c));
9046 	}
9047 	return (n);
9048 }
9049 
9050 /*
9051  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
9052  * mailbox ram is available.
9053  * Copy data from RISC RAM to system memory
9054  */
9055 static fct_status_t
qlt_read_risc_ram(qlt_state_t * qlt,uint32_t addr,uint32_t words)9056 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
9057 {
9058 	uint64_t	da;
9059 	fct_status_t	ret;
9060 
9061 	REG_WR16(qlt, REG_MBOX(0), MBC_DUMP_RAM_EXTENDED);
9062 	da = qlt->queue_mem_cookie.dmac_laddress;
9063 	da += MBOX_DMA_MEM_OFFSET;
9064 
9065 	/* System destination address */
9066 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
9067 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
9068 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
9069 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
9070 
9071 	/* Length */
9072 	REG_WR16(qlt, REG_MBOX(5), LSW(words));
9073 	REG_WR16(qlt, REG_MBOX(4), MSW(words));
9074 
9075 	/* RISC source address */
9076 	REG_WR16(qlt, REG_MBOX(1), LSW(addr));
9077 	REG_WR16(qlt, REG_MBOX(8), MSW(addr));
9078 
9079 	ret = qlt_raw_mailbox_command(qlt);
9080 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
9081 	if (ret == QLT_SUCCESS) {
9082 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
9083 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
9084 	} else {
9085 		EL(qlt, "qlt_raw_mailbox_command=0x0ch status=%llxh\n", ret);
9086 	}
9087 	return (ret);
9088 }
9089 
9090 static fct_status_t
qlt_mbx_mpi_ram(qlt_state_t * qlt,uint32_t addr,uint32_t words,uint16_t direction)9091 qlt_mbx_mpi_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
9092     uint16_t direction)
9093 {
9094 	uint64_t	da;
9095 	fct_status_t	ret;
9096 
9097 	REG_WR16(qlt, REG_MBOX(0), MBC_MPI_RAM);
9098 	da = qlt->queue_mem_cookie.dmac_laddress;
9099 	da += MBOX_DMA_MEM_OFFSET;
9100 
9101 	/* System destination address */
9102 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
9103 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
9104 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
9105 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
9106 
9107 	/* Length */
9108 	REG_WR16(qlt, REG_MBOX(5), LSW(words));
9109 	REG_WR16(qlt, REG_MBOX(4), MSW(words));
9110 
9111 	/* RISC source address */
9112 	REG_WR16(qlt, REG_MBOX(1), LSW(addr));
9113 	REG_WR16(qlt, REG_MBOX(8), MSW(addr));
9114 
9115 	REG_WR16(qlt, REG_MBOX(9), direction);
9116 	ret = qlt_raw_mailbox_command(qlt);
9117 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
9118 	if (ret == QLT_SUCCESS) {
9119 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
9120 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
9121 	} else {
9122 		EL(qlt, "qlt_raw_mailbox_command=0x05h status=%llxh\n", ret);
9123 	}
9124 	return (ret);
9125 }
9126 
9127 static void
qlt_verify_fw(qlt_state_t * qlt)9128 qlt_verify_fw(qlt_state_t *qlt)
9129 {
9130 	caddr_t req;
9131 	uint16_t qi = 0;
9132 
9133 	/* Just put it on the request queue */
9134 	mutex_enter(&qlt->mq_req[qi].mq_lock);
9135 	req = qlt_get_req_entries(qlt, 1, qi);
9136 	if (req == NULL) {
9137 		EL(qlt, "req = NULL\n");
9138 		mutex_exit(&qlt->mq_req[qi].mq_lock);
9139 		return;
9140 	}
9141 
9142 	bzero(req, IOCB_SIZE);
9143 
9144 	req[0] = 0x1b;
9145 	req[1] = 1;
9146 
9147 	QMEM_WR32(qlt, (&req[4]), 0xffffffff);
9148 	QMEM_WR16(qlt, (&req[0x8]), 1);    /*  options - don't update */
9149 	QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
9150 
9151 	qlt_submit_req_entries(qlt, 1, qi);
9152 	mutex_exit(&qlt->mq_req[qi].mq_lock);
9153 }
9154 
9155 static fct_status_t
qlt_mq_destroy(qlt_state_t * qlt)9156 qlt_mq_destroy(qlt_state_t *qlt)
9157 {
9158 	int idx;
9159 
9160 	for (idx = 1; idx < qlt->qlt_queue_cnt; idx++) {
9161 		(void) ddi_dma_unbind_handle(
9162 		    qlt->mq_req[idx].queue_mem_mq_dma_handle);
9163 		ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9164 		ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9165 		(void) ddi_dma_unbind_handle(
9166 		    qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9167 		ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9168 		ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9169 	}
9170 	return (QLT_SUCCESS);
9171 }
9172 
9173 static fct_status_t
qlt_mq_create(qlt_state_t * qlt,int idx)9174 qlt_mq_create(qlt_state_t *qlt, int idx)
9175 {
9176 	ddi_device_acc_attr_t dev_acc_attr;
9177 	size_t discard;
9178 	uint_t ncookies;
9179 
9180 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
9181 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
9182 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
9183 
9184 	/*
9185 	 * MQ Request queue
9186 	 */
9187 	if (ddi_dma_alloc_handle(qlt->dip, &qlt_queue_dma_attr_mq_req1,
9188 	    DDI_DMA_SLEEP, 0,
9189 	    &qlt->mq_req[idx].queue_mem_mq_dma_handle) != DDI_SUCCESS) {
9190 		return (QLT_FAILURE);
9191 	}
9192 	if (ddi_dma_mem_alloc(qlt->mq_req[idx].queue_mem_mq_dma_handle,
9193 	    REQUEST_QUEUE_MQ_SIZE,
9194 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9195 	    &qlt->mq_req[idx].queue_mem_mq_base_addr, &discard,
9196 	    &qlt->mq_req[idx].queue_mem_mq_acc_handle) != DDI_SUCCESS) {
9197 		ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9198 		return (QLT_FAILURE);
9199 	}
9200 	if (ddi_dma_addr_bind_handle(
9201 	    qlt->mq_req[idx].queue_mem_mq_dma_handle,
9202 	    NULL, qlt->mq_req[idx].queue_mem_mq_base_addr,
9203 	    REQUEST_QUEUE_MQ_SIZE,
9204 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9205 	    &qlt->mq_req[idx].queue_mem_mq_cookie,
9206 	    &ncookies) != DDI_SUCCESS) {
9207 		ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9208 		ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9209 		return (QLT_FAILURE);
9210 	}
9211 	if (ncookies != 1) {
9212 		(void) ddi_dma_unbind_handle(
9213 		    qlt->mq_req[idx].queue_mem_mq_dma_handle);
9214 		ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9215 		ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9216 		return (QLT_FAILURE);
9217 	}
9218 
9219 	/*
9220 	 * MQ Response queue
9221 	 */
9222 	if (ddi_dma_alloc_handle(qlt->dip, &qlt_queue_dma_attr_mq_rsp1,
9223 	    DDI_DMA_SLEEP, 0,
9224 	    &qlt->mq_resp[idx].queue_mem_mq_dma_handle) != DDI_SUCCESS) {
9225 		(void) ddi_dma_unbind_handle(
9226 		    qlt->mq_req[idx].queue_mem_mq_dma_handle);
9227 		ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9228 		ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9229 		return (QLT_FAILURE);
9230 	}
9231 	if (ddi_dma_mem_alloc(qlt->mq_resp[idx].queue_mem_mq_dma_handle,
9232 	    RESPONSE_QUEUE_MQ_SIZE,
9233 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9234 	    &qlt->mq_resp[idx].queue_mem_mq_base_addr, &discard,
9235 	    &qlt->mq_resp[idx].queue_mem_mq_acc_handle) != DDI_SUCCESS) {
9236 		(void) ddi_dma_unbind_handle(
9237 		    qlt->mq_req[idx].queue_mem_mq_dma_handle);
9238 		ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9239 		ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9240 		ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9241 		return (QLT_FAILURE);
9242 	}
9243 	if (ddi_dma_addr_bind_handle(
9244 	    qlt->mq_resp[idx].queue_mem_mq_dma_handle,
9245 	    NULL, qlt->mq_resp[idx].queue_mem_mq_base_addr,
9246 	    RESPONSE_QUEUE_MQ_SIZE,
9247 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9248 	    &qlt->mq_resp[idx].queue_mem_mq_cookie,
9249 	    &ncookies) != DDI_SUCCESS) {
9250 		(void) ddi_dma_unbind_handle(
9251 		    qlt->mq_req[idx].queue_mem_mq_dma_handle);
9252 		ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9253 		ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9254 		ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9255 		ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9256 		return (QLT_FAILURE);
9257 	}
9258 	if (ncookies != 1) {
9259 		(void) ddi_dma_unbind_handle(
9260 		    qlt->mq_req[idx].queue_mem_mq_dma_handle);
9261 		ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9262 		ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9263 		(void) ddi_dma_unbind_handle(
9264 		    qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9265 		ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9266 		ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9267 		return (QLT_FAILURE);
9268 	}
9269 
9270 	qlt->mq_req[idx].mq_ptr = qlt->mq_req[idx].queue_mem_mq_base_addr;
9271 	qlt->mq_req[idx].mq_ndx_to_fw = qlt->mq_req[idx].mq_ndx_from_fw = 0;
9272 	qlt->mq_req[idx].mq_available = REQUEST_QUEUE_MQ_ENTRIES - 1;
9273 	bzero(qlt->mq_req[idx].mq_ptr, REQUEST_QUEUE_MQ_SIZE);
9274 
9275 	qlt->mq_resp[idx].mq_ptr = qlt->mq_resp[idx].queue_mem_mq_base_addr;
9276 	qlt->mq_resp[idx].mq_ndx_to_fw = qlt->mq_resp[idx].mq_ndx_from_fw = 0;
9277 	bzero(qlt->mq_resp[idx].mq_ptr, RESPONSE_QUEUE_MQ_SIZE);
9278 
9279 	return (QLT_SUCCESS);
9280 }
9281 
9282 static void
qlt_handle_verify_fw_completion(qlt_state_t * qlt,uint8_t * rsp)9283 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
9284 {
9285 	uint16_t	status;
9286 	char		info[80];
9287 
9288 	status = QMEM_RD16(qlt, rsp+8);
9289 	if (status != 0) {
9290 		(void) snprintf(info, 80, "qlt_handle_verify_fw_completion: "
9291 		    "status:%x, rsp:%p", status, (void *)rsp);
9292 		if (status == 3) {
9293 			uint16_t error_code;
9294 
9295 			error_code = QMEM_RD16(qlt, rsp+0xA);
9296 			(void) snprintf(info, 80, "qlt_handle_verify_fw_"
9297 			    "completion: error code:%x", error_code);
9298 		}
9299 	}
9300 }
9301 
9302 /*
9303  * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
9304  *
9305  * Input:	Pointer to the adapter state structure.
9306  * Returns:	Success or Failure.
9307  * Context:	Kernel context.
9308  */
9309 static int
qlt_el_trace_desc_ctor(qlt_state_t * qlt)9310 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
9311 {
9312 	qlt_trace_entry_t	*entry;
9313 	size_t			maxsize;
9314 
9315 	qlt->qlt_trace_desc =
9316 	    (qlt_trace_desc_t *)kmem_zalloc(
9317 	    sizeof (qlt_trace_desc_t), KM_SLEEP);
9318 
9319 	qlt->qlt_log_entries = QL_LOG_ENTRIES;
9320 	maxsize = qlt->qlt_log_entries * sizeof (qlt_trace_entry_t);
9321 	entry = kmem_zalloc(maxsize, KM_SLEEP);
9322 
9323 	mutex_init(&qlt->qlt_trace_desc->mutex, NULL,
9324 	    MUTEX_DRIVER, NULL);
9325 
9326 	qlt->qlt_trace_desc->trace_buffer = entry;
9327 	qlt->qlt_trace_desc->trace_buffer_size = maxsize;
9328 	qlt->qlt_trace_desc->nindex = 0;
9329 
9330 	qlt->qlt_trace_desc->nentries = qlt->qlt_log_entries;
9331 	qlt->qlt_trace_desc->start = qlt->qlt_trace_desc->end = 0;
9332 	qlt->qlt_trace_desc->csize = 0;
9333 	qlt->qlt_trace_desc->count = 0;
9334 
9335 	return (DDI_SUCCESS);
9336 }
9337 
9338 /*
9339  * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
9340  *
9341  * Input:	Pointer to the adapter state structure.
9342  * Returns:	Success or Failure.
9343  * Context:	Kernel context.
9344  */
9345 static int
qlt_el_trace_desc_dtor(qlt_state_t * qlt)9346 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
9347 {
9348 	int	rval = DDI_SUCCESS;
9349 
9350 	if (qlt->qlt_trace_desc != NULL) {
9351 		if (qlt->qlt_trace_desc->trace_buffer != NULL) {
9352 			kmem_free(qlt->qlt_trace_desc->trace_buffer,
9353 			    qlt->qlt_trace_desc->trace_buffer_size);
9354 		}
9355 		mutex_destroy(&qlt->qlt_trace_desc->mutex);
9356 		kmem_free(qlt->qlt_trace_desc, sizeof (qlt_trace_desc_t));
9357 	}
9358 
9359 	return (rval);
9360 }
9361 
9362 /*
9363  * qlt_el_msg
9364  *	Extended logging message
9365  *
9366  * Input:
9367  *	qlt:	adapter state pointer.
9368  *	fn:	function name.
9369  *	ce:	level
9370  *	...:	Variable argument list.
9371  *
9372  * Context:
9373  *	Kernel/Interrupt context.
9374  */
9375 void
qlt_el_msg(qlt_state_t * qlt,const char * fn,int ce,...)9376 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
9377 {
9378 	char	*s, *fmt = 0, *fmt1 = 0;
9379 
9380 	/*
9381 	 * EL_BUFFER_RESERVE 256 is the max # of bytes
9382 	 * that driver's log could be collected.
9383 	 * add 3 more buytes for safely maniplulation.
9384 	 */
9385 	char	buf[EL_BUFFER_RESERVE + 3];
9386 	char	buf1[QL_LOG_LENGTH];
9387 	size_t	tmp;
9388 	size_t	rval, rval1;
9389 	va_list	vl;
9390 	qlt_trace_desc_t *desc = qlt->qlt_trace_desc;
9391 	qlt_trace_entry_t	*entry;
9392 	uint32_t	cindex;
9393 	timespec_t	time;
9394 	uint32_t	count;
9395 	size_t		left;
9396 
9397 	(void) bzero((void *)&buf[0], EL_BUFFER_RESERVE + 3);
9398 	fmt1 = &buf[0];
9399 
9400 	TRACE_BUFFER_LOCK(qlt);
9401 
9402 	/* locate the entry to be filled out */
9403 	cindex = desc->nindex;
9404 	entry = &desc->trace_buffer[cindex];
9405 
9406 	count = desc->count;
9407 
9408 	desc->end = desc->nindex;
9409 	desc->nindex++;
9410 	if (desc->nindex == desc->nentries) {
9411 		desc->nindex = 0;
9412 	}
9413 
9414 	if (desc->csize < desc->nentries) {
9415 		desc->csize ++;
9416 	} else {
9417 		/*
9418 		 * once wrapped, csize is fixed.
9419 		 * so we have to adjust start point
9420 		 */
9421 		desc->start = desc->nindex;
9422 	}
9423 
9424 	gethrestime(&time);
9425 
9426 	rval = snprintf(fmt1, (size_t)EL_BUFFER_RESERVE,
9427 	    QL_BANG "%d=>QEL %s(%d,%d):: %s, ", count, QL_NAME,
9428 	    qlt->instance, 0, fn);
9429 
9430 	rval1 = rval;
9431 
9432 	va_start(vl, ce);
9433 	s = va_arg(vl, char *);
9434 	fmt = fmt1 + rval;
9435 	tmp = vsnprintf(fmt,
9436 	    (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
9437 	va_end(vl);
9438 
9439 	rval += tmp;
9440 	if (rval > QL_LOG_LENGTH - 1) {
9441 		left = rval - (QL_LOG_LENGTH - 1);
9442 
9443 		/* store the remaining string */
9444 		(void) strncpy(buf1, fmt1 + (QL_LOG_LENGTH - 1), left);
9445 		(void) strncpy(entry->buf, fmt1, (QL_LOG_LENGTH - 1));
9446 		entry->buf[QL_LOG_LENGTH - 1] = '\n';
9447 
9448 		bcopy((void *)&time, (void *)&entry->hs_time,
9449 		    sizeof (timespec_t));
9450 
9451 		/*
9452 		 * remaining msg will be stored in the nex entry
9453 		 * with same timestamp and same sequence number
9454 		 */
9455 		cindex = desc->nindex;
9456 		entry = &desc->trace_buffer[cindex];
9457 
9458 		desc->end = desc->nindex;
9459 		desc->nindex++;
9460 		if (desc->nindex == desc->nentries) {
9461 			desc->nindex = 0;
9462 		}
9463 
9464 		if (desc->csize < desc->nentries) {
9465 			desc->csize ++;
9466 		} else {
9467 			desc->start = desc->nindex;
9468 		}
9469 
9470 		(void) strncpy(&entry->buf[0], fmt1, rval1);
9471 		(void) strncpy(&entry->buf[rval1], &buf1[0], left);
9472 		entry->buf[rval1 + left] = 0;
9473 
9474 		bcopy((void *)&time, (void *)&entry->hs_time,
9475 		    sizeof (timespec_t));
9476 
9477 		if (qlt->qlt_eel_level == 1) {
9478 			cmn_err(ce, fmt1);
9479 		}
9480 
9481 		desc->count++;
9482 
9483 		TRACE_BUFFER_UNLOCK(qlt);
9484 		return;
9485 	}
9486 
9487 	desc->count ++;
9488 	bcopy((void *)&time, (void *)&entry->hs_time,
9489 	    sizeof (timespec_t));
9490 
9491 	/*
9492 	 * Here we know that fmt1 will fit within QL_LOG_LENGTH due to the
9493 	 * check above, but smatch identifies a potential problem.
9494 	 */
9495 	(void) strncpy(entry->buf, fmt1, rval);
9496 	entry->buf[rval] = 0;
9497 
9498 	TRACE_BUFFER_UNLOCK(qlt);
9499 
9500 	if (qlt->qlt_eel_level == 1) {
9501 		cmn_err(ce, fmt1);
9502 	}
9503 }
9504 
9505 static int
qlt_read_int_prop(qlt_state_t * qlt,char * prop,int defval)9506 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
9507 {
9508 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
9509 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
9510 }
9511 
9512 static int
qlt_read_string_prop(qlt_state_t * qlt,char * prop,char ** prop_val)9513 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
9514 {
9515 	return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
9516 	    DDI_PROP_DONTPASS, prop, prop_val));
9517 }
9518 
9519 static int
qlt_read_int_instance_prop(qlt_state_t * qlt,char * prop,int defval)9520 qlt_read_int_instance_prop(qlt_state_t *qlt, char *prop, int defval)
9521 {
9522 	char		inst_prop[256];
9523 	int		val;
9524 
9525 	/*
9526 	 * Get adapter instance specific parameters. If the instance
9527 	 * specific parameter isn't there, try the global parameter.
9528 	 */
9529 
9530 	(void) sprintf(inst_prop, "hba%d-%s", qlt->instance, prop);
9531 
9532 	if ((val = qlt_read_int_prop(qlt, inst_prop, defval)) == defval) {
9533 		val = qlt_read_int_prop(qlt, prop, defval);
9534 	}
9535 
9536 	return (val);
9537 }
9538 
9539 static int
qlt_read_string_instance_prop(qlt_state_t * qlt,char * prop,char ** prop_val)9540 qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop, char **prop_val)
9541 {
9542 	char		instance_prop[256];
9543 
9544 	/* Get adapter instance specific parameter. */
9545 	(void) sprintf(instance_prop, "hba%d-%s", qlt->instance, prop);
9546 	return (qlt_read_string_prop(qlt, instance_prop, prop_val));
9547 }
9548 
9549 static int
qlt_convert_string_to_ull(char * prop,int radix,u_longlong_t * result)9550 qlt_convert_string_to_ull(char *prop, int radix,
9551     u_longlong_t *result)
9552 {
9553 	return (ddi_strtoull((const char *)prop, 0, radix, result));
9554 }
9555 
9556 static boolean_t
qlt_wwn_overload_prop(qlt_state_t * qlt)9557 qlt_wwn_overload_prop(qlt_state_t *qlt)
9558 {
9559 	char		*prop_val = NULL;
9560 	int		rval;
9561 	int		radix;
9562 	u_longlong_t	wwnn = 0, wwpn = 0;
9563 	boolean_t	overloaded = FALSE;
9564 
9565 	radix = 16;
9566 
9567 	rval = qlt_read_string_instance_prop(qlt, "adapter-wwnn", &prop_val);
9568 	if (rval == DDI_PROP_SUCCESS) {
9569 		rval = qlt_convert_string_to_ull(prop_val, radix, &wwnn);
9570 	}
9571 	if (rval == DDI_PROP_SUCCESS) {
9572 		rval = qlt_read_string_instance_prop(qlt, "adapter-wwpn",
9573 		    &prop_val);
9574 		if (rval == DDI_PROP_SUCCESS) {
9575 			rval = qlt_convert_string_to_ull(prop_val, radix,
9576 			    &wwpn);
9577 		}
9578 	}
9579 	if (rval == DDI_PROP_SUCCESS) {
9580 		overloaded = TRUE;
9581 		/* Overload the current node/port name nvram copy */
9582 		bcopy((char *)&wwnn, qlt->nvram->node_name, 8);
9583 		BIG_ENDIAN_64(qlt->nvram->node_name);
9584 		bcopy((char *)&wwpn, qlt->nvram->port_name, 8);
9585 		BIG_ENDIAN_64(qlt->nvram->port_name);
9586 	}
9587 	return (overloaded);
9588 }
9589 
9590 /*
9591  * prop_text - Return a pointer to a string describing the status
9592  *
9593  * Input:	prop_status = the return status from a property function.
9594  * Returns:	pointer to a string.
9595  * Context:	Kernel context.
9596  */
9597 char *
prop_text(int prop_status)9598 prop_text(int prop_status)
9599 {
9600 	string_table_t *entry = &prop_status_tbl[0];
9601 
9602 	return (value2string(entry, prop_status, 0xFFFF));
9603 }
9604 
9605 /*
9606  * value2string	Return a pointer to a string associated with the value
9607  *
9608  * Input:	entry = the value to string table
9609  *		value = the value
9610  * Returns:	pointer to a string.
9611  * Context:	Kernel context.
9612  */
9613 char *
value2string(string_table_t * entry,int value,int delimiter)9614 value2string(string_table_t *entry, int value, int delimiter)
9615 {
9616 	for (; entry->value != delimiter; entry++) {
9617 		if (entry->value == value) {
9618 			break;
9619 		}
9620 	}
9621 	return (entry->string);
9622 }
9623 
9624 /*
9625  * qlt_chg_endian Change endianess of byte array.
9626  *
9627  * Input:	buf = array pointer.
9628  *		size = size of array in bytes.
9629  *
9630  * Context:	Interrupt or Kernel context.
9631  */
9632 void
qlt_chg_endian(uint8_t buf[],size_t size)9633 qlt_chg_endian(uint8_t buf[], size_t size)
9634 {
9635 	uint8_t byte;
9636 	size_t  cnt1;
9637 	size_t  cnt;
9638 
9639 	cnt1 = size - 1;
9640 	for (cnt = 0; cnt < size / 2; cnt++) {
9641 		byte = buf[cnt1];
9642 		buf[cnt1] = buf[cnt];
9643 		buf[cnt] = byte;
9644 		cnt1--;
9645 	}
9646 }
9647 
9648 /*
9649  * ql_mps_reset
9650  *	Reset MPS for FCoE functions.
9651  *
9652  * Input:
9653  *	ha = virtual adapter state pointer.
9654  *
9655  * Context:
9656  *	Kernel context.
9657  */
9658 static void
qlt_mps_reset(qlt_state_t * qlt)9659 qlt_mps_reset(qlt_state_t *qlt)
9660 {
9661 	uint32_t	data, dctl = 1000;
9662 
9663 	do {
9664 		if (dctl-- == 0 || qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 1) !=
9665 		    QLT_SUCCESS) {
9666 			EL(qlt, "qlt_mps_reset: semaphore request fail,"
9667 			    " cnt=%d\n", dctl);
9668 			return;
9669 		}
9670 		if (qlt_raw_rd_risc_ram_word(qlt, 0x7c00, &data) !=
9671 		    QLT_SUCCESS) {
9672 			(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
9673 			EL(qlt, "qlt_mps_reset: semaphore read fail,"
9674 			    " cnt=%d\n", dctl);
9675 			return;
9676 		}
9677 	} while (!(data & BIT_0));
9678 
9679 	if (qlt_raw_rd_risc_ram_word(qlt, 0x7A15, &data) == QLT_SUCCESS) {
9680 		dctl = (uint16_t)PCICFG_RD16(qlt, 0x54);
9681 		if ((data & 0xe0) != (dctl & 0xe0)) {
9682 			data &= 0xff1f;
9683 			data |= dctl & 0xe0;
9684 			(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7A15, data);
9685 		}
9686 	} else {
9687 		EL(qlt, "qlt_mps_reset: read 0x7a15 failed.\n");
9688 	}
9689 	(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
9690 }
9691 
9692 /*
9693  * qlt_raw_wrt_risc_ram_word
9694  *	Write RISC RAM word.
9695  *
9696  * Input:	qlt:		adapter state pointer.
9697  *		risc_address:	risc ram word address.
9698  *		data:		data.
9699  *
9700  * Returns:	qlt local function return status code.
9701  *
9702  * Context:	Kernel context.
9703  */
9704 static fct_status_t
qlt_raw_wrt_risc_ram_word(qlt_state_t * qlt,uint32_t risc_address,uint32_t data)9705 qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
9706     uint32_t data)
9707 {
9708 	fct_status_t	ret;
9709 
9710 	REG_WR16(qlt, REG_MBOX(0), MBC_WRITE_RAM_EXTENDED);
9711 	REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
9712 	REG_WR16(qlt, REG_MBOX(2), LSW(data));
9713 	REG_WR16(qlt, REG_MBOX(3), MSW(data));
9714 	REG_WR16(qlt, REG_MBOX(8), MSW(risc_address));
9715 	ret = qlt_raw_mailbox_command(qlt);
9716 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
9717 	if (ret != QLT_SUCCESS) {
9718 		EL(qlt, "qlt_raw_mailbox_command=MBC_WRITE_RAM_EXTENDED status"
9719 		    "=%llxh\n", ret);
9720 	}
9721 	return (ret);
9722 }
9723 
9724 /*
9725  * ql_raw_rd_risc_ram_word
9726  *	Read RISC RAM word.
9727  *
9728  * Input:	qlt:		adapter state pointer.
9729  *		risc_address:	risc ram word address.
9730  *		data:		data pointer.
9731  *
9732  * Returns:	ql local function return status code.
9733  *
9734  * Context:	Kernel context.
9735  */
9736 static fct_status_t
qlt_raw_rd_risc_ram_word(qlt_state_t * qlt,uint32_t risc_address,uint32_t * data)9737 qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
9738     uint32_t *data)
9739 {
9740 	fct_status_t	ret;
9741 
9742 	REG_WR16(qlt, REG_MBOX(0), MBC_READ_RAM_EXTENDED);
9743 	REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
9744 	REG_WR16(qlt, REG_MBOX(2), MSW(risc_address));
9745 	ret = qlt_raw_mailbox_command(qlt);
9746 	*data = REG_RD16(qlt, REG_MBOX(2));
9747 	*data |= (REG_RD16(qlt, REG_MBOX(3)) << 16);
9748 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
9749 	if (ret != QLT_SUCCESS) {
9750 		EL(qlt, "qlt_raw_mailbox_command=MBC_READ_RAM_EXTENDED status"
9751 		    "=%llxh\n", ret);
9752 	}
9753 	return (ret);
9754 }
9755 
9756 static void
qlt_properties(qlt_state_t * qlt)9757 qlt_properties(qlt_state_t *qlt)
9758 {
9759 	int32_t		cnt = 0;
9760 	int32_t		defval = 0xffff;
9761 
9762 	if (qlt_wwn_overload_prop(qlt) == TRUE) {
9763 		EL(qlt, "wwnn overloaded.\n");
9764 	}
9765 
9766 	/* configure extended logging from conf file */
9767 	if ((cnt = qlt_read_int_instance_prop(qlt, "extended-logging",
9768 	    defval)) != defval) {
9769 		qlt->qlt_eel_level = (uint8_t)(cnt & 0xff);
9770 		EL(qlt, "extended error logging=%d\n", cnt);
9771 	}
9772 
9773 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt2k", defval)) !=
9774 	    defval) {
9775 		qlt->qlt_bucketcnt[0] = cnt;
9776 		EL(qlt, "2k bucket o/l=%d\n", cnt);
9777 	}
9778 
9779 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt8k", defval)) !=
9780 	    defval) {
9781 		qlt->qlt_bucketcnt[1] = cnt;
9782 		EL(qlt, "8k bucket o/l=%d\n", cnt);
9783 	}
9784 
9785 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt64k", defval)) !=
9786 	    defval) {
9787 		qlt->qlt_bucketcnt[2] = cnt;
9788 		EL(qlt, "64k bucket o/l=%d\n", cnt);
9789 	}
9790 
9791 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt128k", defval)) !=
9792 	    defval) {
9793 		qlt->qlt_bucketcnt[3] = cnt;
9794 		EL(qlt, "128k bucket o/l=%d\n", cnt);
9795 	}
9796 
9797 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt256", defval)) !=
9798 	    defval) {
9799 		qlt->qlt_bucketcnt[4] = cnt;
9800 		EL(qlt, "256k bucket o/l=%d\n", cnt);
9801 	}
9802 }
9803 
9804 /* ******************************************************************* */
9805 /* ****************** 27xx Dump Template Functions ******************* */
9806 /* ******************************************************************* */
9807 
9808 /*
9809  * qlt_get_dmp_template
9810  *	Get dump template from firmware module
9811  *
9812  * Input:
9813  *	qlt:	qlt_state_t pointer.
9814  *
9815  * Returns:
9816  *	qlt local function return status code.
9817  *
9818  * Context:
9819  *	Kernel context.
9820  */
9821 static fct_status_t
qlt_27xx_get_dmp_template(qlt_state_t * qlt)9822 qlt_27xx_get_dmp_template(qlt_state_t *qlt)
9823 {
9824 	ddi_device_acc_attr_t dev_acc_attr;
9825 	dev_info_t *dip = qlt->dip;
9826 	uint_t ncookies;
9827 	size_t discard;
9828 	uint32_t word_count, cnt, *bp, *dp;
9829 
9830 	if (qlt->dmp_template_dma_handle != NULL) {
9831 		(void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
9832 		if (qlt->dmp_template_acc_handle != NULL) {
9833 			ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9834 		}
9835 		ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9836 	}
9837 
9838 	if ((word_count = tmplt2700_length01) == 0) {
9839 		EL(qlt, "No dump template, length=0\n");
9840 		return (QLT_FAILURE);
9841 	}
9842 
9843 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
9844 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
9845 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
9846 
9847 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr,
9848 	    DDI_DMA_SLEEP, 0, &qlt->dmp_template_dma_handle) !=
9849 	    DDI_SUCCESS) {
9850 		EL(qlt, "Unable to allocate template handle");
9851 		return (QLT_FAILURE);
9852 	}
9853 
9854 	if (ddi_dma_mem_alloc(qlt->dmp_template_dma_handle,
9855 	    (word_count << 2), &dev_acc_attr, DDI_DMA_CONSISTENT,
9856 	    DDI_DMA_SLEEP, 0, &qlt->dmp_template_addr, &discard,
9857 	    &qlt->dmp_template_acc_handle) != DDI_SUCCESS) {
9858 		ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9859 		EL(qlt, "Unable to allocate template buffer");
9860 		return (QLT_FAILURE);
9861 	}
9862 
9863 	if (ddi_dma_addr_bind_handle(qlt->dmp_template_dma_handle, NULL,
9864 	    qlt->dmp_template_addr, (word_count << 2),
9865 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9866 	    &qlt->dmp_template_cookie, &ncookies) != DDI_SUCCESS) {
9867 		ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9868 		ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9869 		EL(qlt, "Unable to bind template handle");
9870 		return (QLT_FAILURE);
9871 	}
9872 
9873 	if (ncookies != 1) {
9874 		(void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
9875 		ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9876 		ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9877 		EL(qlt, "cookies (%d) > 1.\n", ncookies);
9878 		return (QLT_FAILURE);
9879 	}
9880 
9881 	/* Get big endian template. */
9882 	bp = (uint32_t *)qlt->dmp_template_addr;
9883 	dp = (uint32_t *)tmplt2700_code01;
9884 	for (cnt = 0; cnt < word_count; cnt++) {
9885 		ddi_put32(qlt->dmp_template_acc_handle, bp, *dp++);
9886 		if (cnt > 6) {
9887 			qlt_chg_endian((uint8_t *)bp, 4);
9888 		}
9889 		bp++;
9890 	}
9891 
9892 	return (QLT_SUCCESS);
9893 }
9894 
9895 static int
qlt_27xx_dt_riob1(qlt_state_t * qlt,qlt_dt_riob1_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)9896 qlt_27xx_dt_riob1(qlt_state_t *qlt, qlt_dt_riob1_t *entry,
9897     uint8_t *dbuff, uint8_t *dbuff_end)
9898 {
9899 	int esize;
9900 	uint32_t i, cnt;
9901 	uint8_t	 *bp = dbuff;
9902 	uint32_t addr = entry->addr;
9903 	uint32_t reg = entry->pci_offset;
9904 
9905 	cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
9906 	esize = cnt * 4;		/* addr */
9907 	esize += cnt * entry->reg_size;	/* data */
9908 
9909 	if (dbuff == NULL) {
9910 		return (esize);
9911 	}
9912 	if (esize + dbuff >= dbuff_end) {
9913 		EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
9914 		entry->h.driver_flags = (uint8_t)
9915 		    (entry->h.driver_flags | SKIPPED_FLAG);
9916 		return (0);
9917 	}
9918 
9919 	REG_WR32(qlt, REG_IOBUS_BASE_ADDR, addr);
9920 	while (cnt--) {
9921 		*bp++ = LSB(LSW(addr));
9922 		*bp++ = MSB(LSW(addr));
9923 		*bp++ = LSB(MSW(addr));
9924 		*bp++ = MSB(MSW(addr));
9925 		for (i = 0; i < entry->reg_size; i++) {
9926 			*bp++ = REG_RD8(qlt, reg++);
9927 		}
9928 		addr++;
9929 	}
9930 
9931 	return (esize);
9932 }
9933 
9934 static void
qlt_27xx_dt_wiob1(qlt_state_t * qlt,qlt_dt_wiob1_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)9935 qlt_27xx_dt_wiob1(qlt_state_t *qlt, qlt_dt_wiob1_t *entry,
9936     uint8_t *dbuff, uint8_t *dbuff_end)
9937 {
9938 	uint32_t reg = entry->pci_offset;
9939 
9940 	if (dbuff == NULL) {
9941 		return;
9942 	}
9943 	if (dbuff >= dbuff_end) {
9944 		EL(qlt, "skipped, no buffer space, needed=0\n");
9945 		entry->h.driver_flags = (uint8_t)
9946 		    (entry->h.driver_flags | SKIPPED_FLAG);
9947 		return;
9948 	}
9949 
9950 	REG_WR32(qlt, REG_IOBUS_BASE_ADDR, entry->addr);
9951 	REG_WR32(qlt, reg, entry->data);
9952 }
9953 
9954 static int
qlt_27xx_dt_riob2(qlt_state_t * qlt,qlt_dt_riob2_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)9955 qlt_27xx_dt_riob2(qlt_state_t *qlt, qlt_dt_riob2_t *entry,
9956     uint8_t *dbuff, uint8_t *dbuff_end)
9957 {
9958 	int esize;
9959 	uint32_t i, cnt;
9960 	uint8_t	 *bp = dbuff;
9961 	uint32_t reg = entry->pci_offset;
9962 	uint32_t addr = entry->addr;
9963 
9964 	cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
9965 	esize = cnt * 4;		/* addr */
9966 	esize += cnt * entry->reg_size;	/* data */
9967 
9968 	if (dbuff == NULL) {
9969 		return (esize);
9970 	}
9971 	if (esize + dbuff >= dbuff_end) {
9972 		EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
9973 		entry->h.driver_flags = (uint8_t)
9974 		    (entry->h.driver_flags | SKIPPED_FLAG);
9975 		return (0);
9976 	}
9977 
9978 	REG_WR32(qlt, REG_IOBUS_BASE_ADDR, addr);
9979 	REG_WR32(qlt, entry->bank_sel_offset, entry->reg_bank);
9980 	while (cnt--) {
9981 		*bp++ = LSB(LSW(addr));
9982 		*bp++ = MSB(LSW(addr));
9983 		*bp++ = LSB(MSW(addr));
9984 		*bp++ = MSB(MSW(addr));
9985 		for (i = 0; i < entry->reg_size; i++) {
9986 			*bp++ = REG_RD8(qlt, reg++);
9987 		}
9988 		addr++;
9989 	}
9990 
9991 	return (esize);
9992 }
9993 
9994 static void
qlt_27xx_dt_wiob2(qlt_state_t * qlt,qlt_dt_wiob2_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)9995 qlt_27xx_dt_wiob2(qlt_state_t *qlt, qlt_dt_wiob2_t *entry,
9996     uint8_t *dbuff, uint8_t *dbuff_end)
9997 {
9998 	uint16_t data;
9999 	uint32_t reg = entry->pci_offset;
10000 
10001 	if (dbuff == NULL) {
10002 		return;
10003 	}
10004 	if (dbuff >= dbuff_end) {
10005 		EL(qlt, "skipped, no buffer space, needed=0\n");
10006 		entry->h.driver_flags = (uint8_t)
10007 		    (entry->h.driver_flags | SKIPPED_FLAG);
10008 		return;
10009 	}
10010 
10011 	data = CHAR_TO_SHORT(entry->data_l, entry->data_h);
10012 
10013 	REG_WR32(qlt, REG_IOBUS_BASE_ADDR, entry->addr);
10014 	REG_WR32(qlt, entry->bank_sel_offset, entry->reg_bank);
10015 	REG_WR16(qlt, reg, data);
10016 }
10017 
10018 static int
qlt_27xx_dt_rpci(qlt_state_t * qlt,qlt_dt_rpci_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10019 qlt_27xx_dt_rpci(qlt_state_t *qlt, qlt_dt_rpci_t *entry, uint8_t *dbuff,
10020     uint8_t *dbuff_end)
10021 {
10022 	int esize;
10023 	uint32_t i;
10024 	uint8_t	*bp = dbuff;
10025 	uint32_t reg = entry->addr;
10026 
10027 	esize = 4;	/* addr */
10028 	esize += 4;	/* data */
10029 
10030 	if (dbuff == NULL) {
10031 		return (esize);
10032 	}
10033 	if (esize + dbuff >= dbuff_end) {
10034 		EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10035 		entry->h.driver_flags = (uint8_t)
10036 		    (entry->h.driver_flags | SKIPPED_FLAG);
10037 		return (0);
10038 	}
10039 
10040 	*bp++ = LSB(LSW(entry->addr));
10041 	*bp++ = MSB(LSW(entry->addr));
10042 	*bp++ = LSB(MSW(entry->addr));
10043 	*bp++ = MSB(MSW(entry->addr));
10044 	for (i = 0; i < 4; i++) {
10045 		*bp++ = REG_RD8(qlt, reg++);
10046 	}
10047 
10048 	return (esize);
10049 }
10050 
10051 static void
qlt_27xx_dt_wpci(qlt_state_t * qlt,qlt_dt_wpci_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10052 qlt_27xx_dt_wpci(qlt_state_t *qlt, qlt_dt_wpci_t *entry,
10053     uint8_t *dbuff, uint8_t *dbuff_end)
10054 {
10055 	uint32_t reg = entry->addr;
10056 
10057 	if (dbuff == NULL) {
10058 		return;
10059 	}
10060 	if (dbuff >= dbuff_end) {
10061 		EL(qlt, "skipped, no buffer space, needed=0\n");
10062 		entry->h.driver_flags = (uint8_t)
10063 		    (entry->h.driver_flags | SKIPPED_FLAG);
10064 		return;
10065 	}
10066 
10067 	REG_WR32(qlt, reg, entry->data);
10068 }
10069 
10070 static int
qlt_27xx_dt_rram(qlt_state_t * qlt,qlt_dt_rram_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10071 qlt_27xx_dt_rram(qlt_state_t *qlt, qlt_dt_rram_t *entry,
10072     uint8_t *dbuff, uint8_t *dbuff_end)
10073 {
10074 	int esize, rval;
10075 	uint32_t start = entry->start_addr;
10076 	uint32_t end = entry->end_addr;
10077 
10078 	if (entry->ram_area == 2) {
10079 		end = qlt->fw_ext_memory_end;
10080 	} else if (entry->ram_area == 3) {
10081 		start = qlt->fw_shared_ram_start;
10082 		end = qlt->fw_shared_ram_end;
10083 	} else if (entry->ram_area == 4) {
10084 		start = qlt->fw_ddr_ram_start;
10085 		end = qlt->fw_ddr_ram_end;
10086 	} else if (entry->ram_area != 1) {
10087 		EL(qlt, "skipped, unknown RAM_AREA %d\n", entry->ram_area);
10088 		start = 0;
10089 		end = 0;
10090 	}
10091 	esize = end > start ? end - start : 0;
10092 	if (esize) {
10093 		esize = (esize + 1) * 4;
10094 	}
10095 
10096 	if (dbuff == NULL) {
10097 		return (esize);
10098 	}
10099 	if (esize == 0 || esize + dbuff >= dbuff_end) {
10100 		if (esize != 0) {
10101 			EL(qlt, "skipped, no buffer space, needed=%xh\n",
10102 			    esize);
10103 		} else {
10104 			EL(qlt, "skipped, no ram_area=%xh, start=%xh "
10105 			    "end=%xh\n", entry->ram_area, start, end);
10106 		}
10107 		entry->h.driver_flags = (uint8_t)
10108 		    (entry->h.driver_flags | SKIPPED_FLAG);
10109 		return (0);
10110 	}
10111 	entry->end_addr = end;
10112 	entry->start_addr = start;
10113 
10114 	if ((rval = qlt_27xx_dump_ram(qlt, MBC_DUMP_RAM_EXTENDED,
10115 	    start, esize / 4, dbuff)) != QLT_SUCCESS) {
10116 		EL(qlt, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
10117 		    "esize=0\n", rval, start, esize / 4);
10118 		return (0);
10119 	}
10120 
10121 	return (esize);
10122 }
10123 
10124 static int
qlt_27xx_dt_gque(qlt_state_t * qlt,qlt_dt_gque_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10125 qlt_27xx_dt_gque(qlt_state_t *qlt, qlt_dt_gque_t *entry,
10126     uint8_t *dbuff, uint8_t *dbuff_end)
10127 {
10128 	int esize;
10129 	uint32_t cnt, q_cnt, e_cnt, i;
10130 	uint8_t	*bp = dbuff, *dp;
10131 
10132 	if (entry->queue_type == 1) {
10133 		e_cnt = qlt->qlt_queue_cnt;
10134 		esize = e_cnt * 2;	/* queue number */
10135 		esize += e_cnt * 2;	/* queue entries */
10136 
10137 		/* queue size */
10138 		esize += REQUEST_QUEUE_ENTRIES * IOCB_SIZE;
10139 		for (q_cnt = 1; q_cnt < qlt->qlt_queue_cnt; q_cnt++) {
10140 			esize += REQUEST_QUEUE_MQ_ENTRIES * IOCB_SIZE;
10141 		}
10142 
10143 		if (dbuff == NULL) {
10144 			return (esize);
10145 		}
10146 		if (esize + dbuff >= dbuff_end) {
10147 			EL(qlt, "skipped, no buffer space, needed=%xh\n",
10148 			    esize);
10149 			entry->h.driver_flags = (uint8_t)
10150 			    (entry->h.driver_flags | SKIPPED_FLAG);
10151 			return (0);
10152 		}
10153 		entry->num_queues = e_cnt;
10154 
10155 		for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10156 			e_cnt = q_cnt == 0 ?
10157 			    REQUEST_QUEUE_ENTRIES : REQUEST_QUEUE_MQ_ENTRIES;
10158 			dp = (uint8_t *)qlt->mq_req[q_cnt].mq_ptr;
10159 			*bp++ = LSB(q_cnt);
10160 			*bp++ = MSB(q_cnt);
10161 			*bp++ = LSB(e_cnt);
10162 			*bp++ = MSB(e_cnt);
10163 			for (cnt = 0; cnt < e_cnt; cnt++) {
10164 				for (i = 0; i < IOCB_SIZE; i++) {
10165 					*bp++ = *dp++;
10166 				}
10167 			}
10168 		}
10169 	} else if (entry->queue_type == 2) {
10170 
10171 		e_cnt = qlt->qlt_queue_cnt;
10172 		esize = e_cnt * 2;	/* queue number */
10173 		esize += e_cnt * 2;	/* queue entries */
10174 
10175 		/* queue size */
10176 		esize += RESPONSE_QUEUE_ENTRIES * IOCB_SIZE;
10177 		for (q_cnt = 1; q_cnt < qlt->qlt_queue_cnt; q_cnt++) {
10178 			esize += RESPONSE_QUEUE_MQ_ENTRIES * IOCB_SIZE;
10179 		}
10180 
10181 		if (dbuff == NULL) {
10182 			return (esize);
10183 		}
10184 		if (esize + dbuff >= dbuff_end) {
10185 			EL(qlt, "skipped2, no buffer space, needed=%xh\n",
10186 			    esize);
10187 			entry->h.driver_flags = (uint8_t)
10188 			    (entry->h.driver_flags | SKIPPED_FLAG);
10189 			return (0);
10190 		}
10191 		entry->num_queues = e_cnt;
10192 
10193 		for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10194 			e_cnt = q_cnt == 0 ?
10195 			    RESPONSE_QUEUE_ENTRIES : RESPONSE_QUEUE_MQ_ENTRIES;
10196 			dp = (uint8_t *)qlt->mq_resp[q_cnt].mq_ptr;
10197 			*bp++ = LSB(q_cnt);
10198 			*bp++ = MSB(q_cnt);
10199 			*bp++ = LSB(e_cnt);
10200 			*bp++ = MSB(e_cnt);
10201 			for (cnt = 0; cnt < e_cnt; cnt++) {
10202 				for (i = 0; i < IOCB_SIZE; i++) {
10203 					*bp++ = *dp++;
10204 				}
10205 			}
10206 		}
10207 	} else if (entry->queue_type == 3) {
10208 		e_cnt = 1;
10209 		esize = e_cnt * 2;	/* queue number */
10210 		esize += e_cnt * 2;	/* queue entries */
10211 
10212 		/* queue size */
10213 		esize += RESPONSE_QUEUE_ENTRIES * IOCB_SIZE;
10214 
10215 		if (dbuff == NULL) {
10216 			return (esize);
10217 		}
10218 		if (esize + dbuff >= dbuff_end) {
10219 			EL(qlt, "skipped2, no buffer space, needed=%xh\n",
10220 			    esize);
10221 			entry->h.driver_flags = (uint8_t)
10222 			    (entry->h.driver_flags | SKIPPED_FLAG);
10223 			return (0);
10224 		}
10225 		entry->num_queues = e_cnt;
10226 
10227 		for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10228 			e_cnt = ATIO_QUEUE_ENTRIES;
10229 			dp = (uint8_t *)qlt->atio_ptr;
10230 			*bp++ = LSB(q_cnt);
10231 			*bp++ = MSB(q_cnt);
10232 			*bp++ = LSB(e_cnt);
10233 			*bp++ = MSB(e_cnt);
10234 			for (cnt = 0; cnt < e_cnt; cnt++) {
10235 				for (i = 0; i < IOCB_SIZE; i++) {
10236 					*bp++ = *dp++;
10237 				}
10238 			}
10239 		}
10240 	} else {
10241 		EL(qlt, "skipped, unknown queue_type %d, esize=0\n",
10242 		    entry->queue_type);
10243 		if (dbuff != NULL) {
10244 			entry->h.driver_flags = (uint8_t)
10245 			    (entry->h.driver_flags | SKIPPED_FLAG);
10246 		}
10247 		return (0);
10248 	}
10249 
10250 	return (esize);
10251 }
10252 
10253 /*ARGSUSED*/
10254 static int
qlt_27xx_dt_gfce(qlt_state_t * qlt,qlt_dt_gfce_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10255 qlt_27xx_dt_gfce(qlt_state_t *qlt, qlt_dt_gfce_t *entry,
10256     uint8_t *dbuff, uint8_t *dbuff_end)
10257 {
10258 	if (dbuff != NULL) {
10259 		entry->h.driver_flags = (uint8_t)
10260 		    (entry->h.driver_flags | SKIPPED_FLAG);
10261 	}
10262 
10263 	return (0);
10264 }
10265 
10266 static void
qlt_27xx_dt_prisc(qlt_state_t * qlt,qlt_dt_prisc_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10267 qlt_27xx_dt_prisc(qlt_state_t *qlt, qlt_dt_prisc_t *entry,
10268     uint8_t *dbuff, uint8_t *dbuff_end)
10269 {
10270 	clock_t	timer;
10271 
10272 	if (dbuff == NULL) {
10273 		return;
10274 	}
10275 	if (dbuff >= dbuff_end) {
10276 		EL(qlt, "skipped, no buffer space, needed=0\n");
10277 		entry->h.driver_flags = (uint8_t)
10278 		    (entry->h.driver_flags | SKIPPED_FLAG);
10279 		return;
10280 	}
10281 
10282 	/* Pause RISC. */
10283 	if ((REG_RD32(qlt, REG_RISC_STATUS) & BIT_8) == 0) {
10284 		REG_WR32(qlt, REG_HCCR, 0x30000000);
10285 		for (timer = 30000;
10286 		    (REG_RD32(qlt, REG_RISC_STATUS) & BIT_8) == 0;
10287 		    timer--) {
10288 			if (timer) {
10289 				drv_usecwait(100);
10290 				if (timer % 10000 == 0) {
10291 					EL(qlt, "risc pause %d\n", timer);
10292 				}
10293 			} else {
10294 				EL(qlt, "risc pause timeout\n");
10295 				break;
10296 			}
10297 		}
10298 	}
10299 }
10300 
10301 static void
qlt_27xx_dt_rrisc(qlt_state_t * qlt,qlt_dt_rrisc_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10302 qlt_27xx_dt_rrisc(qlt_state_t *qlt, qlt_dt_rrisc_t *entry,
10303     uint8_t *dbuff, uint8_t *dbuff_end)
10304 {
10305 	clock_t	timer;
10306 	uint16_t rom_status;
10307 
10308 	if (dbuff == NULL) {
10309 		return;
10310 	}
10311 	if (dbuff >= dbuff_end) {
10312 		EL(qlt, "skipped, no buffer space, needed=0\n");
10313 		entry->h.driver_flags = (uint8_t)
10314 		    (entry->h.driver_flags | SKIPPED_FLAG);
10315 		return;
10316 	}
10317 
10318 	/* Shutdown DMA. */
10319 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL);
10320 
10321 	/* Wait for DMA to stop. */
10322 	for (timer = 0; timer < 30000; timer++) {
10323 		if (!(REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS)) {
10324 			break;
10325 		}
10326 		drv_usecwait(100);
10327 	}
10328 
10329 	/* Reset the chip. */
10330 	REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET);
10331 	drv_usecwait(200);
10332 
10333 	/* Wait for RISC to recover from reset. */
10334 	for (timer = 30000; timer; timer--) {
10335 		rom_status = REG_RD16(qlt, REG_MBOX0);
10336 		if ((rom_status & MBS_ROM_STATUS_MASK) != MBS_ROM_BUSY) {
10337 			break;
10338 		}
10339 		drv_usecwait(100);
10340 	}
10341 
10342 	/* Wait for reset to finish. */
10343 	for (timer = 30000; timer; timer--) {
10344 		if (!(REG_RD32(qlt, REG_CTRL_STATUS) & CHIP_SOFT_RESET)) {
10345 			break;
10346 		}
10347 		drv_usecwait(100);
10348 	}
10349 
10350 	/* XXX: Disable Interrupts (Probably not needed) */
10351 	REG_WR32(qlt, REG_INTR_CTRL, 0);
10352 
10353 	qlt->qlt_intr_enabled = 0;
10354 }
10355 
10356 static void
qlt_27xx_dt_dint(qlt_state_t * qlt,qlt_dt_dint_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10357 qlt_27xx_dt_dint(qlt_state_t *qlt, qlt_dt_dint_t *entry,
10358     uint8_t *dbuff, uint8_t *dbuff_end)
10359 {
10360 	if (dbuff == NULL) {
10361 		return;
10362 	}
10363 	if (dbuff >= dbuff_end) {
10364 		EL(qlt, "skipped, no buffer space, needed=0\n");
10365 		entry->h.driver_flags = (uint8_t)
10366 		    (entry->h.driver_flags | SKIPPED_FLAG);
10367 		return;
10368 	}
10369 
10370 	PCICFG_WR32(qlt, entry->pci_offset, entry->data);
10371 }
10372 
10373 /*ARGSUSED*/
10374 static int
qlt_27xx_dt_ghbd(qlt_state_t * qlt,qlt_dt_ghbd_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10375 qlt_27xx_dt_ghbd(qlt_state_t *qlt, qlt_dt_ghbd_t *entry,
10376     uint8_t *dbuff, uint8_t *dbuff_end)
10377 {
10378 	if (dbuff != NULL) {
10379 		entry->h.driver_flags = (uint8_t)
10380 		    (entry->h.driver_flags | SKIPPED_FLAG);
10381 	}
10382 
10383 	return (0);
10384 }
10385 
10386 /*ARGSUSED*/
10387 static int
qlt_27xx_dt_scra(qlt_state_t * qlt,qlt_dt_scra_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10388 qlt_27xx_dt_scra(qlt_state_t *qlt, qlt_dt_scra_t *entry,
10389     uint8_t *dbuff, uint8_t *dbuff_end)
10390 {
10391 	if (dbuff != NULL) {
10392 		entry->h.driver_flags = (uint8_t)
10393 		    (entry->h.driver_flags | SKIPPED_FLAG);
10394 	}
10395 
10396 	return (0);
10397 }
10398 
10399 static int
qlt_27xx_dt_rrreg(qlt_state_t * qlt,qlt_dt_rrreg_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10400 qlt_27xx_dt_rrreg(qlt_state_t *qlt, qlt_dt_rrreg_t *entry,
10401     uint8_t *dbuff, uint8_t *dbuff_end)
10402 {
10403 	int esize;
10404 	uint32_t i;
10405 	uint8_t	*bp = dbuff;
10406 	uint32_t addr = entry->addr;
10407 	uint32_t cnt = entry->count;
10408 
10409 	esize = cnt * 4;	/* addr */
10410 	esize += cnt * 4;	/* data */
10411 
10412 	if (dbuff == NULL) {
10413 		return (esize);
10414 	}
10415 	if (esize + dbuff >= dbuff_end) {
10416 		EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10417 		entry->h.driver_flags = (uint8_t)
10418 		    (entry->h.driver_flags | SKIPPED_FLAG);
10419 		return (0);
10420 	}
10421 
10422 	REG_WR32(qlt, REG_IOBUS_BASE_ADDR, 0x40);
10423 	while (cnt--) {
10424 		REG_WR32(qlt, 0xc0, addr | 0x80000000);
10425 		*bp++ = LSB(LSW(addr));
10426 		*bp++ = MSB(LSW(addr));
10427 		*bp++ = LSB(MSW(addr));
10428 		*bp++ = MSB(MSW(addr));
10429 		for (i = 0; i < 4; i++) {
10430 			*bp++ = REG_RD8(qlt, i);
10431 		}
10432 		addr += 4;
10433 	}
10434 
10435 	return (esize);
10436 }
10437 
10438 static void
qlt_27xx_dt_wrreg(qlt_state_t * qlt,qlt_dt_wrreg_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10439 qlt_27xx_dt_wrreg(qlt_state_t *qlt, qlt_dt_wrreg_t *entry,
10440     uint8_t *dbuff, uint8_t *dbuff_end)
10441 {
10442 	if (dbuff == NULL) {
10443 		return;
10444 	}
10445 	if (dbuff >= dbuff_end) {
10446 		EL(qlt, "skipped, no buffer space, needed=0\n");
10447 		entry->h.driver_flags = (uint8_t)
10448 		    (entry->h.driver_flags | SKIPPED_FLAG);
10449 		return;
10450 	}
10451 
10452 	REG_WR32(qlt, REG_IOBUS_BASE_ADDR, 0x40);
10453 	REG_WR32(qlt, 0xc4, entry->data);
10454 	REG_WR32(qlt, 0xc0, entry->addr);
10455 }
10456 
10457 static int
qlt_27xx_dt_rrram(qlt_state_t * qlt,qlt_dt_rrram_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10458 qlt_27xx_dt_rrram(qlt_state_t *qlt, qlt_dt_rrram_t *entry,
10459     uint8_t *dbuff, uint8_t *dbuff_end)
10460 {
10461 	int rval, esize;
10462 
10463 	esize = entry->count * 4;	/* data */
10464 
10465 	if (dbuff == NULL) {
10466 		return (esize);
10467 	}
10468 	if (esize + dbuff >= dbuff_end) {
10469 		EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10470 		entry->h.driver_flags = (uint8_t)
10471 		    (entry->h.driver_flags | SKIPPED_FLAG);
10472 		return (0);
10473 	}
10474 
10475 	if ((rval = qlt_27xx_dump_ram(qlt, MBC_MPI_RAM, entry->addr,
10476 	    entry->count, dbuff)) != QLT_SUCCESS) {
10477 		EL(qlt, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
10478 		    "esize=0\n", rval, entry->addr, entry->count);
10479 		return (0);
10480 	}
10481 
10482 	return (esize);
10483 }
10484 
10485 static int
qlt_27xx_dt_rpcic(qlt_state_t * qlt,qlt_dt_rpcic_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10486 qlt_27xx_dt_rpcic(qlt_state_t *qlt, qlt_dt_rpcic_t *entry,
10487     uint8_t *dbuff, uint8_t *dbuff_end)
10488 {
10489 	int esize;
10490 	uint32_t i;
10491 	uint8_t	*bp = dbuff;
10492 	uint32_t addr = entry->addr;
10493 	uint32_t cnt = entry->count;
10494 
10495 	esize = cnt * 4;	/* addr */
10496 	esize += cnt * 4;	/* data */
10497 
10498 	if (dbuff == NULL) {
10499 		return (esize);
10500 	}
10501 	if (esize + dbuff >= dbuff_end) {
10502 		EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10503 		entry->h.driver_flags = (uint8_t)
10504 		    (entry->h.driver_flags | SKIPPED_FLAG);
10505 		return (0);
10506 	}
10507 
10508 	while (cnt--) {
10509 		*bp++ = LSB(LSW(addr));
10510 		*bp++ = MSB(LSW(addr));
10511 		*bp++ = LSB(MSW(addr));
10512 		*bp++ = MSB(MSW(addr));
10513 		for (i = 0; i < 4; i++) {
10514 			*bp++ = PCICFG_RD8(qlt, addr++);
10515 		}
10516 	}
10517 
10518 	return (esize);
10519 }
10520 
10521 /*ARGSUSED*/
10522 static int
qlt_27xx_dt_gques(qlt_state_t * qlt,qlt_dt_gques_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10523 qlt_27xx_dt_gques(qlt_state_t *qlt, qlt_dt_gques_t *entry,
10524     uint8_t *dbuff, uint8_t *dbuff_end)
10525 {
10526 	if (entry->queue_type == 1) {
10527 		EL(qlt, "skipped, no request queue shadowing, esize=0\n");
10528 		if (dbuff != NULL) {
10529 			entry->num_queues = 0;
10530 			entry->h.driver_flags = (uint8_t)
10531 			    (entry->h.driver_flags | SKIPPED_FLAG);
10532 		}
10533 		return (0);
10534 	} else if (entry->queue_type == 2) {
10535 		EL(qlt, "skipped, no response queue shadowing, esize=0\n");
10536 		if (dbuff != NULL) {
10537 			entry->num_queues = 0;
10538 			entry->h.driver_flags = (uint8_t)
10539 			    (entry->h.driver_flags | SKIPPED_FLAG);
10540 		}
10541 		return (0);
10542 	} else if (entry->queue_type == 3) {
10543 		EL(qlt, "skipped, no ATIO queue, esize=0\n");
10544 		if (dbuff != NULL) {
10545 			entry->num_queues = 0;
10546 			entry->h.driver_flags = (uint8_t)
10547 			    (entry->h.driver_flags | SKIPPED_FLAG);
10548 		}
10549 		return (0);
10550 	} else {
10551 		EL(qlt, "skipped, unknown queue_type %d, esize=0\n",
10552 		    entry->queue_type);
10553 		if (dbuff != NULL) {
10554 			entry->h.driver_flags = (uint8_t)
10555 			    (entry->h.driver_flags | SKIPPED_FLAG);
10556 		}
10557 		return (0);
10558 	}
10559 }
10560 
10561 static int
qlt_27xx_dt_wdmp(qlt_state_t * qlt,qlt_dt_wdmp_t * entry,uint8_t * dbuff,uint8_t * dbuff_end)10562 qlt_27xx_dt_wdmp(qlt_state_t *qlt, qlt_dt_wdmp_t *entry,
10563     uint8_t *dbuff, uint8_t *dbuff_end)
10564 {
10565 	int esize;
10566 	uint8_t *bp = dbuff;
10567 	uint32_t data, cnt = entry->length, *dp = entry->data;
10568 
10569 	esize = cnt;
10570 	if (dbuff == NULL) {
10571 		return (esize);
10572 	}
10573 	if (esize + dbuff >= dbuff_end) {
10574 		EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10575 		entry->h.driver_flags = (uint8_t)
10576 		    (entry->h.driver_flags | SKIPPED_FLAG);
10577 		return (0);
10578 	}
10579 
10580 	while (cnt--) {
10581 		data = *dp++;
10582 		*bp++ = LSB(LSW(data));
10583 		*bp++ = MSB(LSW(data));
10584 		*bp++ = LSB(MSW(data));
10585 		*bp++ = MSB(MSW(data));
10586 	}
10587 
10588 	return (esize);
10589 }
10590 
10591 /*
10592  * qlt_27xx_dump_ram
10593  *	Dumps RAM.
10594  *	Risc interrupts must be disabled when this routine is called.
10595  *
10596  * Input:
10597  *	pi:		port info pointer.
10598  *	cmd:		MBC_DUMP_RAM_EXTENDED/MBC_MPI_RAM.
10599  *	risc_address:	RISC code start address.
10600  *	len:		Number of words.
10601  *	bp:		buffer pointer.
10602  *
10603  * Returns:
10604  *	qlt local function return status code.
10605  *
10606  * Context:
10607  *	Interrupt or Kernel context, no mailbox commands allowed.
10608  */
10609 /*ARGSUSED*/
10610 static int
qlt_27xx_dump_ram(qlt_state_t * qlt,uint16_t cmd,uint32_t risc_address,uint32_t len,uint8_t * bp)10611 qlt_27xx_dump_ram(qlt_state_t *qlt, uint16_t cmd, uint32_t risc_address,
10612     uint32_t len, uint8_t *bp)
10613 {
10614 	uint8_t *dp;
10615 	uint32_t words_to_read, endaddr;
10616 	uint32_t i;
10617 	int rval = QLT_SUCCESS;
10618 
10619 	endaddr = risc_address + len;
10620 	words_to_read = 0;
10621 	while (risc_address < endaddr) {
10622 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
10623 		if ((words_to_read + risc_address) > endaddr) {
10624 			words_to_read = endaddr - risc_address;
10625 		}
10626 
10627 		if (cmd == MBC_DUMP_RAM_EXTENDED) {
10628 			rval = qlt_read_risc_ram(qlt, risc_address,
10629 			    words_to_read);
10630 		} else {
10631 			rval = qlt_mbx_mpi_ram(qlt, risc_address,
10632 			    words_to_read, 0);
10633 		}
10634 
10635 		if (rval != QLT_SUCCESS) {
10636 			EL(qlt, "Error reading risc ram = %xh len = %x\n",
10637 			    risc_address, words_to_read);
10638 			return (rval);
10639 		}
10640 
10641 		dp = (uint8_t *)(qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
10642 		for (i = 0; i < (words_to_read * 4); i++) {
10643 			*bp++ = *dp++;
10644 		}
10645 		risc_address += words_to_read;
10646 	}
10647 
10648 	return (rval);
10649 }
10650 
10651 static uint32_t
qlt_27xx_dmp_parse_template(qlt_state_t * qlt,qlt_dt_hdr_t * template_hdr,uint8_t * dump_buff,uint32_t buff_size)10652 qlt_27xx_dmp_parse_template(qlt_state_t *qlt, qlt_dt_hdr_t *template_hdr,
10653     uint8_t *dump_buff, uint32_t buff_size)
10654 {
10655 	int e_cnt, esize, num_of_entries;
10656 	uint32_t bsize;
10657 	time_t time;
10658 	uint8_t *dbuff, *dbuff_end;
10659 	qlt_dt_entry_t *entry;
10660 	int sane_end = 0;
10661 
10662 	dbuff = dump_buff;	/* dbuff = NULL	size determination. */
10663 	dbuff_end = dump_buff + buff_size;
10664 
10665 	if (template_hdr->type != DT_THDR) {
10666 		EL(qlt, "Template header not found\n");
10667 		return (0);
10668 	}
10669 	if (dbuff != NULL) {
10670 		(void) drv_getparm(TIME, &time);
10671 		template_hdr->driver_timestamp = LSD(time);
10672 	}
10673 
10674 	num_of_entries = template_hdr->num_of_entries;
10675 	entry = (qlt_dt_entry_t *)((caddr_t)template_hdr +
10676 	    template_hdr->first_entry_offset);
10677 
10678 	bsize = template_hdr->size_of_template;
10679 	for (e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
10680 		/*
10681 		 * Decode the entry type and process it accordingly
10682 		 */
10683 		esize = 0;
10684 		switch (entry->h.type) {
10685 		case DT_NOP:
10686 			if (dbuff != NULL) {
10687 				entry->h.driver_flags = (uint8_t)
10688 				    (entry->h.driver_flags | SKIPPED_FLAG);
10689 			}
10690 			break;
10691 		case DT_TEND:
10692 			if (dbuff != NULL) {
10693 				entry->h.driver_flags = (uint8_t)
10694 				    (entry->h.driver_flags | SKIPPED_FLAG);
10695 			}
10696 			sane_end++;
10697 			break;
10698 		case DT_RIOB1:
10699 			esize = qlt_27xx_dt_riob1(qlt, (qlt_dt_riob1_t *)entry,
10700 			    dbuff, dbuff_end);
10701 			break;
10702 		case DT_WIOB1:
10703 			qlt_27xx_dt_wiob1(qlt, (qlt_dt_wiob1_t *)entry,
10704 			    dbuff, dbuff_end);
10705 			break;
10706 		case DT_RIOB2:
10707 			esize = qlt_27xx_dt_riob2(qlt, (qlt_dt_riob2_t *)entry,
10708 			    dbuff, dbuff_end);
10709 			break;
10710 		case DT_WIOB2:
10711 			qlt_27xx_dt_wiob2(qlt, (qlt_dt_wiob2_t *)entry,
10712 			    dbuff, dbuff_end);
10713 			break;
10714 		case DT_RPCI:
10715 			esize = qlt_27xx_dt_rpci(qlt, (qlt_dt_rpci_t *)entry,
10716 			    dbuff, dbuff_end);
10717 			break;
10718 		case DT_WPCI:
10719 			qlt_27xx_dt_wpci(qlt, (qlt_dt_wpci_t *)entry,
10720 			    dbuff, dbuff_end);
10721 			break;
10722 		case DT_RRAM:
10723 			esize = qlt_27xx_dt_rram(qlt, (qlt_dt_rram_t *)entry,
10724 			    dbuff, dbuff_end);
10725 			break;
10726 		case DT_GQUE:
10727 			esize = qlt_27xx_dt_gque(qlt, (qlt_dt_gque_t *)entry,
10728 			    dbuff, dbuff_end);
10729 			break;
10730 		case DT_GFCE:
10731 			esize = qlt_27xx_dt_gfce(qlt, (qlt_dt_gfce_t *)entry,
10732 			    dbuff, dbuff_end);
10733 			break;
10734 		case DT_PRISC:
10735 			qlt_27xx_dt_prisc(qlt, (qlt_dt_prisc_t *)entry,
10736 			    dbuff, dbuff_end);
10737 			break;
10738 		case DT_RRISC:
10739 			qlt_27xx_dt_rrisc(qlt, (qlt_dt_rrisc_t *)entry,
10740 			    dbuff, dbuff_end);
10741 			break;
10742 		case DT_DINT:
10743 			qlt_27xx_dt_dint(qlt, (qlt_dt_dint_t *)entry,
10744 			    dbuff, dbuff_end);
10745 			break;
10746 		case DT_GHBD:
10747 			esize = qlt_27xx_dt_ghbd(qlt, (qlt_dt_ghbd_t *)entry,
10748 			    dbuff, dbuff_end);
10749 			break;
10750 		case DT_SCRA:
10751 			esize = qlt_27xx_dt_scra(qlt, (qlt_dt_scra_t *)entry,
10752 			    dbuff, dbuff_end);
10753 			break;
10754 		case DT_RRREG:
10755 			esize = qlt_27xx_dt_rrreg(qlt, (qlt_dt_rrreg_t *)entry,
10756 			    dbuff, dbuff_end);
10757 			break;
10758 		case DT_WRREG:
10759 			qlt_27xx_dt_wrreg(qlt, (qlt_dt_wrreg_t *)entry,
10760 			    dbuff, dbuff_end);
10761 			break;
10762 		case DT_RRRAM:
10763 			esize = qlt_27xx_dt_rrram(qlt, (qlt_dt_rrram_t *)entry,
10764 			    dbuff, dbuff_end);
10765 			break;
10766 		case DT_RPCIC:
10767 			esize = qlt_27xx_dt_rpcic(qlt, (qlt_dt_rpcic_t *)entry,
10768 			    dbuff, dbuff_end);
10769 			break;
10770 		case DT_GQUES:
10771 			esize = qlt_27xx_dt_gques(qlt, (qlt_dt_gques_t *)entry,
10772 			    dbuff, dbuff_end);
10773 			break;
10774 		case DT_WDMP:
10775 			esize = qlt_27xx_dt_wdmp(qlt, (qlt_dt_wdmp_t *)entry,
10776 			    dbuff, dbuff_end);
10777 			break;
10778 		default:
10779 			entry->h.driver_flags = (uint8_t)
10780 			    (entry->h.driver_flags | SKIPPED_FLAG);
10781 			EL(qlt, "Entry ID=%d, type=%d unknown\n", e_cnt,
10782 			    entry->h.type);
10783 			break;
10784 		}
10785 		if (dbuff != NULL && esize) {
10786 			dbuff += esize;
10787 		}
10788 		bsize += esize;
10789 		/* next entry in the template */
10790 		entry = (qlt_dt_entry_t *)((caddr_t)entry + entry->h.size);
10791 	}
10792 	if (sane_end > 1) {
10793 		EL(qlt, "Template configuration error. Check Template\n");
10794 	}
10795 
10796 	return (bsize);
10797 }
10798