1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
29  */
30 
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/stat.h>
34 #include <sys/pci.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/file.h>
38 #include <sys/cred.h>
39 #include <sys/byteorder.h>
40 #include <sys/atomic.h>
41 #include <sys/scsi/scsi.h>
42 
43 #include <sys/stmf_defines.h>
44 #include <sys/fct_defines.h>
45 #include <sys/stmf.h>
46 #include <sys/stmf_ioctl.h>
47 #include <sys/portif.h>
48 #include <sys/fct.h>
49 
50 #include "qlt.h"
51 #include "qlt_dma.h"
52 #include "qlt_ioctl.h"
53 #include "qlt_open.h"
54 
55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 static void qlt_enable_intr(qlt_state_t *);
58 static void qlt_disable_intr(qlt_state_t *);
59 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
60 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
61 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
62     uint32_t word_count, uint32_t risc_addr);
63 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
64 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
65 					uint32_t dma_size);
66 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
67 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
68 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
69 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
70     stmf_state_change_info_t *ssci);
71 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
72 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
73 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
74 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
75 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
76 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
77 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
78 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
79     uint8_t *rsp);
80 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
81 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
82 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
83 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
84 static void qlt_verify_fw(qlt_state_t *qlt);
85 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
86 fct_status_t qlt_port_start(caddr_t arg);
87 fct_status_t qlt_port_stop(caddr_t arg);
88 fct_status_t qlt_port_online(qlt_state_t *qlt);
89 fct_status_t qlt_port_offline(qlt_state_t *qlt);
90 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
91     fct_link_info_t *li);
92 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
93 static fct_status_t qlt_force_lip(qlt_state_t *);
94 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
95 						fct_flogi_xchg_t *fx);
96 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
97 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
98 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
99     fct_remote_port_t *rp, fct_cmd_t *login);
100 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
101     fct_remote_port_t *rp);
102 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
103 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
104 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
105     fct_cmd_t *cmd, int terminate);
106 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
107 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
108 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
109     fct_cmd_t *cmd, uint32_t flags);
110 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
111 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
112 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
113 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
114 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
115 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
116 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
117     stmf_data_buf_t *dbuf, uint32_t ioflags);
118 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
119 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
120 static void qlt_release_intr(qlt_state_t *qlt);
121 static int qlt_setup_interrupts(qlt_state_t *qlt);
122 static void qlt_destroy_mutex(qlt_state_t *qlt);
123 
124 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
125     uint32_t words);
126 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
127     caddr_t buf, uint_t size_left);
128 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
129     caddr_t buf, uint_t size_left);
130 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
131     int count, uint_t size_left);
132 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
133     cred_t *credp, int *rval);
134 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
135 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
136 
137 static int qlt_setup_msi(qlt_state_t *qlt);
138 static int qlt_setup_msix(qlt_state_t *qlt);
139 
140 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
141 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
142 static int qlt_validate_trace_desc(qlt_state_t *qlt);
143 static char *qlt_find_trace_start(qlt_state_t *qlt);
144 
145 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
146 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
147 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
148     char **prop_val);
149 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
150 static int qlt_convert_string_to_ull(char *prop, int radix,
151     u_longlong_t *result);
152 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
153 static int qlt_quiesce(dev_info_t *dip);
154 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
155     uint32_t);
156 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
157     uint32_t *);
158 static void qlt_mps_reset(qlt_state_t *qlt);
159 static void qlt_properties(qlt_state_t *qlt);
160 
161 
162 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] = \
163 	(uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
164 
165 int qlt_enable_msix = 0;
166 int qlt_enable_msi = 1;
167 
168 
169 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
170 
171 /* Array to quickly calculate next free buf index to use */
172 #if 0
173 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
174 #endif
175 
176 static struct cb_ops qlt_cb_ops = {
177 	qlt_open,
178 	qlt_close,
179 	nodev,
180 	nodev,
181 	nodev,
182 	nodev,
183 	nodev,
184 	qlt_ioctl,
185 	nodev,
186 	nodev,
187 	nodev,
188 	nochpoll,
189 	ddi_prop_op,
190 	0,
191 	D_MP | D_NEW
192 };
193 
194 static struct dev_ops qlt_ops = {
195 	DEVO_REV,
196 	0,
197 	nodev,
198 	nulldev,
199 	nulldev,
200 	qlt_attach,
201 	qlt_detach,
202 	nodev,
203 	&qlt_cb_ops,
204 	NULL,
205 	ddi_power,
206 	qlt_quiesce
207 };
208 
209 #ifndef	PORT_SPEED_10G
210 #define	PORT_SPEED_10G		16
211 #endif
212 
213 static struct modldrv modldrv = {
214 	&mod_driverops,
215 	QLT_NAME" "QLT_VERSION,
216 	&qlt_ops,
217 };
218 
219 static struct modlinkage modlinkage = {
220 	MODREV_1, &modldrv, NULL
221 };
222 
223 void *qlt_state = NULL;
224 kmutex_t qlt_global_lock;
225 static uint32_t qlt_loaded_counter = 0;
226 
227 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
228 			"-X Mode 1 133", "--Invalid--",
229 			"-X Mode 2 66", "-X Mode 2 100",
230 			"-X Mode 2 133", " 66" };
231 
232 /* Always use 64 bit DMA. */
233 static ddi_dma_attr_t qlt_queue_dma_attr = {
234 	DMA_ATTR_V0,		/* dma_attr_version */
235 	0,			/* low DMA address range */
236 	0xffffffffffffffff,	/* high DMA address range */
237 	0xffffffff,		/* DMA counter register */
238 	64,			/* DMA address alignment */
239 	0xff,			/* DMA burstsizes */
240 	1,			/* min effective DMA size */
241 	0xffffffff,		/* max DMA xfer size */
242 	0xffffffff,		/* segment boundary */
243 	1,			/* s/g list length */
244 	1,			/* granularity of device */
245 	0			/* DMA transfer flags */
246 };
247 
248 /* qlogic logging */
249 int enable_extended_logging = 0;
250 
251 static char qlt_provider_name[] = "qlt";
252 static struct stmf_port_provider *qlt_pp;
253 
254 int
_init(void)255 _init(void)
256 {
257 	int ret;
258 
259 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
260 	if (ret == 0) {
261 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
262 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
263 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
264 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
265 		qlt_pp->pp_name = qlt_provider_name;
266 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
267 			stmf_free(qlt_pp);
268 			mutex_destroy(&qlt_global_lock);
269 			ddi_soft_state_fini(&qlt_state);
270 			return (EIO);
271 		}
272 		ret = mod_install(&modlinkage);
273 		if (ret != 0) {
274 			(void) stmf_deregister_port_provider(qlt_pp);
275 			stmf_free(qlt_pp);
276 			mutex_destroy(&qlt_global_lock);
277 			ddi_soft_state_fini(&qlt_state);
278 		}
279 	}
280 	return (ret);
281 }
282 
283 int
_fini(void)284 _fini(void)
285 {
286 	int ret;
287 
288 	if (qlt_loaded_counter)
289 		return (EBUSY);
290 	ret = mod_remove(&modlinkage);
291 	if (ret == 0) {
292 		(void) stmf_deregister_port_provider(qlt_pp);
293 		stmf_free(qlt_pp);
294 		mutex_destroy(&qlt_global_lock);
295 		ddi_soft_state_fini(&qlt_state);
296 	}
297 	return (ret);
298 }
299 
300 int
_info(struct modinfo * modinfop)301 _info(struct modinfo *modinfop)
302 {
303 	return (mod_info(&modlinkage, modinfop));
304 }
305 
306 
307 static int
qlt_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)308 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
309 {
310 	int		instance;
311 	qlt_state_t	*qlt;
312 	ddi_device_acc_attr_t	dev_acc_attr;
313 	uint16_t	did;
314 	uint16_t	val;
315 	uint16_t	mr;
316 	size_t		discard;
317 	uint_t		ncookies;
318 	int		max_read_size;
319 	int		max_payload_size;
320 	fct_status_t	ret;
321 
322 	/* No support for suspend resume yet */
323 	if (cmd != DDI_ATTACH)
324 		return (DDI_FAILURE);
325 	instance = ddi_get_instance(dip);
326 
327 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
328 		return (DDI_FAILURE);
329 	}
330 
331 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
332 	    NULL) {
333 		goto attach_fail_1;
334 	}
335 
336 	qlt->instance = instance;
337 
338 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
339 	qlt->dip = dip;
340 
341 	if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
342 		cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
343 		goto attach_fail_1;
344 	}
345 
346 	EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
347 
348 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
349 		goto attach_fail_2;
350 	}
351 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
352 	if ((did != 0x2422) && (did != 0x2432) &&
353 	    (did != 0x8432) && (did != 0x2532) &&
354 	    (did != 0x8001)) {
355 		cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
356 		    instance, did);
357 		goto attach_fail_4;
358 	}
359 
360 	if ((did & 0xFF00) == 0x8000)
361 		qlt->qlt_81xx_chip = 1;
362 	else if ((did & 0xFF00) == 0x2500)
363 		qlt->qlt_25xx_chip = 1;
364 
365 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
366 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
367 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
368 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
369 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
370 		goto attach_fail_4;
371 	}
372 	if (did == 0x2422) {
373 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
374 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
375 		pci_bits >>= 8;
376 		pci_bits &= 0xf;
377 		if ((pci_bits == 3) || (pci_bits == 7)) {
378 			cmn_err(CE_NOTE,
379 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
380 			    instance, pci_speeds[pci_bits], pci_bits);
381 		} else {
382 			cmn_err(CE_WARN,
383 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
384 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
385 			    "(Invalid)", ((pci_bits == 0) ||
386 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
387 			    "32 bit slot ") : "", pci_bits);
388 		}
389 	}
390 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
391 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
392 		    (unsigned long long)ret);
393 		goto attach_fail_5;
394 	}
395 
396 	qlt_properties(qlt);
397 
398 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
399 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
400 		goto attach_fail_5;
401 	}
402 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
403 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
404 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
405 	    DDI_SUCCESS) {
406 		goto attach_fail_6;
407 	}
408 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
409 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
410 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
411 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
412 		goto attach_fail_7;
413 	}
414 	if (ncookies != 1)
415 		goto attach_fail_8;
416 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
417 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
418 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
419 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
420 
421 	/* mutex are inited in this function */
422 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
423 		goto attach_fail_8;
424 
425 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
426 	    "qlt%d", instance);
427 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
428 	    "%s,0", qlt->qlt_minor_name);
429 
430 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
431 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
432 		goto attach_fail_9;
433 	}
434 
435 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
436 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
437 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
438 
439 	/* Setup PCI cfg space registers */
440 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
441 	if (max_read_size == 11)
442 		goto over_max_read_xfer_setting;
443 	if (did == 0x2422) {
444 		if (max_read_size == 512)
445 			val = 0;
446 		else if (max_read_size == 1024)
447 			val = 1;
448 		else if (max_read_size == 2048)
449 			val = 2;
450 		else if (max_read_size == 4096)
451 			val = 3;
452 		else {
453 			cmn_err(CE_WARN, "qlt(%d) malformed "
454 			    "pci-max-read-request in qlt.conf. Valid values "
455 			    "for this HBA are 512/1024/2048/4096", instance);
456 			goto over_max_read_xfer_setting;
457 		}
458 		mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
459 		mr = (uint16_t)(mr & 0xfff3);
460 		mr = (uint16_t)(mr | (val << 2));
461 		PCICFG_WR16(qlt, 0x4E, mr);
462 	} else if ((did == 0x2432) || (did == 0x8432) ||
463 	    (did == 0x2532) || (did == 0x8001)) {
464 		if (max_read_size == 128)
465 			val = 0;
466 		else if (max_read_size == 256)
467 			val = 1;
468 		else if (max_read_size == 512)
469 			val = 2;
470 		else if (max_read_size == 1024)
471 			val = 3;
472 		else if (max_read_size == 2048)
473 			val = 4;
474 		else if (max_read_size == 4096)
475 			val = 5;
476 		else {
477 			cmn_err(CE_WARN, "qlt(%d) malformed "
478 			    "pci-max-read-request in qlt.conf. Valid values "
479 			    "for this HBA are 128/256/512/1024/2048/4096",
480 			    instance);
481 			goto over_max_read_xfer_setting;
482 		}
483 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
484 		mr = (uint16_t)(mr & 0x8fff);
485 		mr = (uint16_t)(mr | (val << 12));
486 		PCICFG_WR16(qlt, 0x54, mr);
487 	} else {
488 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
489 		    "pci-max-read-request for this device (%x)",
490 		    instance, did);
491 	}
492 over_max_read_xfer_setting:;
493 
494 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
495 	if (max_payload_size == 11)
496 		goto over_max_payload_setting;
497 	if ((did == 0x2432) || (did == 0x8432) ||
498 	    (did == 0x2532) || (did == 0x8001)) {
499 		if (max_payload_size == 128)
500 			val = 0;
501 		else if (max_payload_size == 256)
502 			val = 1;
503 		else if (max_payload_size == 512)
504 			val = 2;
505 		else if (max_payload_size == 1024)
506 			val = 3;
507 		else {
508 			cmn_err(CE_WARN, "qlt(%d) malformed "
509 			    "pcie-max-payload-size in qlt.conf. Valid values "
510 			    "for this HBA are 128/256/512/1024",
511 			    instance);
512 			goto over_max_payload_setting;
513 		}
514 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
515 		mr = (uint16_t)(mr & 0xff1f);
516 		mr = (uint16_t)(mr | (val << 5));
517 		PCICFG_WR16(qlt, 0x54, mr);
518 	} else {
519 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
520 		    "pcie-max-payload-size for this device (%x)",
521 		    instance, did);
522 	}
523 
524 over_max_payload_setting:;
525 
526 	qlt_enable_intr(qlt);
527 
528 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
529 		goto attach_fail_10;
530 
531 	ddi_report_dev(dip);
532 	return (DDI_SUCCESS);
533 
534 attach_fail_10:;
535 	mutex_destroy(&qlt->qlt_ioctl_lock);
536 	cv_destroy(&qlt->mbox_cv);
537 	cv_destroy(&qlt->rp_dereg_cv);
538 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
539 attach_fail_9:;
540 	qlt_destroy_mutex(qlt);
541 	qlt_release_intr(qlt);
542 attach_fail_8:;
543 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
544 attach_fail_7:;
545 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
546 attach_fail_6:;
547 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
548 attach_fail_5:;
549 	ddi_regs_map_free(&qlt->regs_acc_handle);
550 attach_fail_4:;
551 	pci_config_teardown(&qlt->pcicfg_acc_handle);
552 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
553 	(void) qlt_el_trace_desc_dtor(qlt);
554 attach_fail_2:;
555 attach_fail_1:;
556 	ddi_soft_state_free(qlt_state, instance);
557 	return (DDI_FAILURE);
558 }
559 
560 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
561 
562 /* ARGSUSED */
563 static int
qlt_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)564 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
565 {
566 	qlt_state_t *qlt;
567 
568 	int instance;
569 
570 	instance = ddi_get_instance(dip);
571 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
572 	    NULL) {
573 		return (DDI_FAILURE);
574 	}
575 
576 	if (qlt->fw_code01) {
577 		return (DDI_FAILURE);
578 	}
579 
580 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
581 	    qlt->qlt_state_not_acked) {
582 		return (DDI_FAILURE);
583 	}
584 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
585 		return (DDI_FAILURE);
586 	}
587 
588 	qlt_disable_intr(qlt);
589 
590 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
591 	qlt_destroy_mutex(qlt);
592 	qlt_release_intr(qlt);
593 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
594 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
595 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
596 	ddi_regs_map_free(&qlt->regs_acc_handle);
597 	pci_config_teardown(&qlt->pcicfg_acc_handle);
598 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
599 	cv_destroy(&qlt->mbox_cv);
600 	cv_destroy(&qlt->rp_dereg_cv);
601 	(void) qlt_el_trace_desc_dtor(qlt);
602 	ddi_soft_state_free(qlt_state, instance);
603 
604 	return (DDI_SUCCESS);
605 }
606 
607 /*
608  * qlt_quiesce	quiesce a device attached to the system.
609  */
610 static int
qlt_quiesce(dev_info_t * dip)611 qlt_quiesce(dev_info_t *dip)
612 {
613 	qlt_state_t	*qlt;
614 	uint32_t	timer;
615 	uint32_t	stat;
616 
617 	qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
618 	if (qlt == NULL) {
619 		/* Oh well.... */
620 		return (DDI_SUCCESS);
621 	}
622 
623 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
624 	REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
625 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
626 	for (timer = 0; timer < 30000; timer++) {
627 		stat = REG_RD32(qlt, REG_RISC_STATUS);
628 		if (stat & RISC_HOST_INTR_REQUEST) {
629 			if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
630 				REG_WR32(qlt, REG_HCCR,
631 				    HCCR_CMD(CLEAR_RISC_PAUSE));
632 				break;
633 			}
634 			REG_WR32(qlt, REG_HCCR,
635 			    HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
636 		}
637 		drv_usecwait(100);
638 	}
639 	/* Reset the chip. */
640 	REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
641 	    PCI_X_XFER_CTRL);
642 	drv_usecwait(100);
643 
644 	qlt_disable_intr(qlt);
645 
646 	return (DDI_SUCCESS);
647 }
648 
649 static void
qlt_enable_intr(qlt_state_t * qlt)650 qlt_enable_intr(qlt_state_t *qlt)
651 {
652 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
653 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
654 	} else {
655 		int i;
656 		for (i = 0; i < qlt->intr_cnt; i++)
657 			(void) ddi_intr_enable(qlt->htable[i]);
658 	}
659 	qlt->qlt_intr_enabled = 1;
660 }
661 
662 static void
qlt_disable_intr(qlt_state_t * qlt)663 qlt_disable_intr(qlt_state_t *qlt)
664 {
665 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
666 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
667 	} else {
668 		int i;
669 		for (i = 0; i < qlt->intr_cnt; i++)
670 			(void) ddi_intr_disable(qlt->htable[i]);
671 	}
672 	qlt->qlt_intr_enabled = 0;
673 }
674 
675 static void
qlt_release_intr(qlt_state_t * qlt)676 qlt_release_intr(qlt_state_t *qlt)
677 {
678 	if (qlt->htable) {
679 		int i;
680 		for (i = 0; i < qlt->intr_cnt; i++) {
681 			(void) ddi_intr_remove_handler(qlt->htable[i]);
682 			(void) ddi_intr_free(qlt->htable[i]);
683 		}
684 		kmem_free(qlt->htable, (uint_t)qlt->intr_size);
685 	}
686 	qlt->htable = NULL;
687 	qlt->intr_pri = 0;
688 	qlt->intr_cnt = 0;
689 	qlt->intr_size = 0;
690 	qlt->intr_cap = 0;
691 }
692 
693 
694 static void
qlt_init_mutex(qlt_state_t * qlt)695 qlt_init_mutex(qlt_state_t *qlt)
696 {
697 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
698 	    INT2PTR(qlt->intr_pri, void *));
699 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
700 	    INT2PTR(qlt->intr_pri, void *));
701 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
702 	    INT2PTR(qlt->intr_pri, void *));
703 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
704 	    INT2PTR(qlt->intr_pri, void *));
705 }
706 
707 static void
qlt_destroy_mutex(qlt_state_t * qlt)708 qlt_destroy_mutex(qlt_state_t *qlt)
709 {
710 	mutex_destroy(&qlt->req_lock);
711 	mutex_destroy(&qlt->preq_lock);
712 	mutex_destroy(&qlt->mbox_lock);
713 	mutex_destroy(&qlt->intr_lock);
714 }
715 
716 
717 static int
qlt_setup_msix(qlt_state_t * qlt)718 qlt_setup_msix(qlt_state_t *qlt)
719 {
720 	int count, avail, actual;
721 	int ret;
722 	int itype = DDI_INTR_TYPE_MSIX;
723 	int i;
724 
725 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
726 	if (ret != DDI_SUCCESS || count == 0) {
727 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
728 		    count);
729 		return (DDI_FAILURE);
730 	}
731 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
732 	if (ret != DDI_SUCCESS || avail == 0) {
733 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
734 		    avail);
735 		return (DDI_FAILURE);
736 	}
737 	if (avail < count) {
738 		stmf_trace(qlt->qlt_port_alias,
739 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
740 	}
741 
742 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
743 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
744 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
745 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
746 	/* we need at least 2 interrupt vectors */
747 	if (ret != DDI_SUCCESS || actual < 2) {
748 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
749 		    actual);
750 		ret = DDI_FAILURE;
751 		goto release_intr;
752 	}
753 	if (actual < count) {
754 		EL(qlt, "requested: %d, received: %d\n", count, actual);
755 	}
756 
757 	qlt->intr_cnt = actual;
758 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
759 	if (ret != DDI_SUCCESS) {
760 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
761 		ret = DDI_FAILURE;
762 		goto release_intr;
763 	}
764 	qlt_init_mutex(qlt);
765 	for (i = 0; i < actual; i++) {
766 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
767 		    qlt, INT2PTR((uint_t)i, void *));
768 		if (ret != DDI_SUCCESS) {
769 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
770 			goto release_mutex;
771 		}
772 	}
773 
774 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
775 	qlt->intr_flags |= QLT_INTR_MSIX;
776 	return (DDI_SUCCESS);
777 
778 release_mutex:
779 	qlt_destroy_mutex(qlt);
780 release_intr:
781 	for (i = 0; i < actual; i++)
782 		(void) ddi_intr_free(qlt->htable[i]);
783 #if 0
784 free_mem:
785 #endif
786 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
787 	qlt->htable = NULL;
788 	qlt_release_intr(qlt);
789 	return (ret);
790 }
791 
792 
793 static int
qlt_setup_msi(qlt_state_t * qlt)794 qlt_setup_msi(qlt_state_t *qlt)
795 {
796 	int count, avail, actual;
797 	int itype = DDI_INTR_TYPE_MSI;
798 	int ret;
799 	int i;
800 
801 	/* get the # of interrupts */
802 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
803 	if (ret != DDI_SUCCESS || count == 0) {
804 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
805 		    count);
806 		return (DDI_FAILURE);
807 	}
808 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
809 	if (ret != DDI_SUCCESS || avail == 0) {
810 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
811 		    avail);
812 		return (DDI_FAILURE);
813 	}
814 	if (avail < count) {
815 		EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
816 	}
817 	/* MSI requires only 1 interrupt. */
818 	count = 1;
819 
820 	/* allocate interrupt */
821 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
822 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
823 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
824 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
825 	if (ret != DDI_SUCCESS || actual == 0) {
826 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
827 		    actual);
828 		ret = DDI_FAILURE;
829 		goto free_mem;
830 	}
831 	if (actual < count) {
832 		EL(qlt, "requested: %d, received: %d\n", count, actual);
833 	}
834 	qlt->intr_cnt = actual;
835 
836 	/*
837 	 * Get priority for first msi, assume remaining are all the same.
838 	 */
839 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
840 	if (ret != DDI_SUCCESS) {
841 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
842 		ret = DDI_FAILURE;
843 		goto release_intr;
844 	}
845 	qlt_init_mutex(qlt);
846 
847 	/* add handler */
848 	for (i = 0; i < actual; i++) {
849 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
850 		    qlt, INT2PTR((uint_t)i, void *));
851 		if (ret != DDI_SUCCESS) {
852 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
853 			goto release_mutex;
854 		}
855 	}
856 
857 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
858 	qlt->intr_flags |= QLT_INTR_MSI;
859 	return (DDI_SUCCESS);
860 
861 release_mutex:
862 	qlt_destroy_mutex(qlt);
863 release_intr:
864 	for (i = 0; i < actual; i++)
865 		(void) ddi_intr_free(qlt->htable[i]);
866 free_mem:
867 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
868 	qlt->htable = NULL;
869 	qlt_release_intr(qlt);
870 	return (ret);
871 }
872 
873 static int
qlt_setup_fixed(qlt_state_t * qlt)874 qlt_setup_fixed(qlt_state_t *qlt)
875 {
876 	int count;
877 	int actual;
878 	int ret;
879 	int itype = DDI_INTR_TYPE_FIXED;
880 
881 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
882 	/* Fixed interrupts can only have one interrupt. */
883 	if (ret != DDI_SUCCESS || count != 1) {
884 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
885 		    count);
886 		return (DDI_FAILURE);
887 	}
888 
889 	qlt->intr_size = sizeof (ddi_intr_handle_t);
890 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
891 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
892 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
893 	if (ret != DDI_SUCCESS || actual != 1) {
894 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
895 		    actual);
896 		ret = DDI_FAILURE;
897 		goto free_mem;
898 	}
899 
900 	qlt->intr_cnt = actual;
901 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
902 	if (ret != DDI_SUCCESS) {
903 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
904 		ret = DDI_FAILURE;
905 		goto release_intr;
906 	}
907 	qlt_init_mutex(qlt);
908 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
909 	if (ret != DDI_SUCCESS) {
910 		EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
911 		goto release_mutex;
912 	}
913 
914 	qlt->intr_flags |= QLT_INTR_FIXED;
915 	return (DDI_SUCCESS);
916 
917 release_mutex:
918 	qlt_destroy_mutex(qlt);
919 release_intr:
920 	(void) ddi_intr_free(qlt->htable[0]);
921 free_mem:
922 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
923 	qlt->htable = NULL;
924 	qlt_release_intr(qlt);
925 	return (ret);
926 }
927 
928 static int
qlt_setup_interrupts(qlt_state_t * qlt)929 qlt_setup_interrupts(qlt_state_t *qlt)
930 {
931 	int itypes = 0;
932 
933 /*
934  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
935  */
936 #ifndef __sparc
937 	if (qlt_enable_msi != 0) {
938 #endif
939 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
940 		itypes = DDI_INTR_TYPE_FIXED;
941 	}
942 
943 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
944 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
945 			return (DDI_SUCCESS);
946 	}
947 
948 	if (itypes & DDI_INTR_TYPE_MSI) {
949 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
950 			return (DDI_SUCCESS);
951 	}
952 #ifndef __sparc
953 	}
954 #endif
955 	return (qlt_setup_fixed(qlt));
956 }
957 
958 /*
959  * Filling the hba attributes
960  */
961 void
qlt_populate_hba_fru_details(struct fct_local_port * port,struct fct_port_attrs * port_attrs)962 qlt_populate_hba_fru_details(struct fct_local_port *port,
963     struct fct_port_attrs *port_attrs)
964 {
965 	caddr_t	bufp;
966 	int len;
967 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
968 
969 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
970 	    "QLogic Corp.");
971 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
972 	    "%s", QLT_NAME);
973 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
974 	    "%s", QLT_VERSION);
975 	port_attrs->serial_number[0] = '\0';
976 	port_attrs->hardware_version[0] = '\0';
977 
978 	(void) snprintf(port_attrs->firmware_version,
979 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
980 	    qlt->fw_minor, qlt->fw_subminor);
981 
982 	/* Get FCode version */
983 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
984 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
985 	    (int *)&len) == DDI_PROP_SUCCESS) {
986 		(void) snprintf(port_attrs->option_rom_version,
987 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
988 		kmem_free(bufp, (uint_t)len);
989 		bufp = NULL;
990 	} else {
991 #ifdef __sparc
992 		(void) snprintf(port_attrs->option_rom_version,
993 		    FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
994 #else
995 		(void) snprintf(port_attrs->option_rom_version,
996 		    FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
997 #endif
998 	}
999 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
1000 	    qlt->nvram->subsystem_vendor_id[1] << 8;
1001 
1002 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1003 	    qlt->nvram->max_frame_length[0];
1004 
1005 	port_attrs->supported_cos = 0x10000000;
1006 	port_attrs->supported_speed = PORT_SPEED_1G |
1007 	    PORT_SPEED_2G | PORT_SPEED_4G;
1008 	if (qlt->qlt_25xx_chip)
1009 		port_attrs->supported_speed = PORT_SPEED_2G | PORT_SPEED_4G |
1010 		    PORT_SPEED_8G;
1011 	if (qlt->qlt_81xx_chip)
1012 		port_attrs->supported_speed = PORT_SPEED_10G;
1013 
1014 	/* limit string length to nvr model_name length */
1015 	len = (qlt->qlt_81xx_chip) ? 16 : 8;
1016 	(void) snprintf(port_attrs->model,
1017 	    (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1018 	    "%s", qlt->nvram->model_name);
1019 
1020 	(void) snprintf(port_attrs->model_description,
1021 	    (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1022 	    FCHBA_MODEL_DESCRIPTION_LEN),
1023 	    "%s", qlt->nvram->model_name);
1024 }
1025 
1026 /* ARGSUSED */
1027 fct_status_t
qlt_info(uint32_t cmd,fct_local_port_t * port,void * arg,uint8_t * buf,uint32_t * bufsizep)1028 qlt_info(uint32_t cmd, fct_local_port_t *port,
1029     void *arg, uint8_t *buf, uint32_t *bufsizep)
1030 {
1031 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
1032 	mbox_cmd_t	*mcp;
1033 	fct_status_t	ret = FCT_SUCCESS;
1034 	uint8_t		*p;
1035 	fct_port_link_status_t	*link_status;
1036 
1037 	switch (cmd) {
1038 	case FC_TGT_PORT_RLS:
1039 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1040 			EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1041 			    "fct_port_link_status_t=%xh\n", *bufsizep,
1042 			    sizeof (fct_port_link_status_t));
1043 			ret = FCT_FAILURE;
1044 			break;
1045 		}
1046 		/* send mailbox command to get link status */
1047 		mcp = qlt_alloc_mailbox_command(qlt, 156);
1048 		if (mcp == NULL) {
1049 			EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1050 			ret = FCT_ALLOC_FAILURE;
1051 			break;
1052 		}
1053 
1054 		/* GET LINK STATUS count */
1055 		mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1056 		mcp->to_fw[8] = 156/4;
1057 		mcp->to_fw_mask |= BIT_1 | BIT_8;
1058 		mcp->from_fw_mask |= BIT_1 | BIT_2;
1059 
1060 		ret = qlt_mailbox_command(qlt, mcp);
1061 		if (ret != QLT_SUCCESS) {
1062 			EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1063 			qlt_free_mailbox_command(qlt, mcp);
1064 			break;
1065 		}
1066 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1067 
1068 		p = mcp->dbuf->db_sglist[0].seg_addr;
1069 		link_status = (fct_port_link_status_t *)buf;
1070 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1071 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1072 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1073 		link_status->PrimitiveSeqProtocolErrorCount =
1074 		    LE_32(*((uint32_t *)(p + 12)));
1075 		link_status->InvalidTransmissionWordCount =
1076 		    LE_32(*((uint32_t *)(p + 16)));
1077 		link_status->InvalidCRCCount =
1078 		    LE_32(*((uint32_t *)(p + 20)));
1079 
1080 		qlt_free_mailbox_command(qlt, mcp);
1081 		break;
1082 	default:
1083 		EL(qlt, "Unknown cmd=%xh\n", cmd);
1084 		ret = FCT_FAILURE;
1085 		break;
1086 	}
1087 	return (ret);
1088 }
1089 
1090 fct_status_t
qlt_port_start(caddr_t arg)1091 qlt_port_start(caddr_t arg)
1092 {
1093 	qlt_state_t *qlt = (qlt_state_t *)arg;
1094 	fct_local_port_t *port;
1095 	fct_dbuf_store_t *fds;
1096 	fct_status_t ret;
1097 
1098 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1099 		return (FCT_FAILURE);
1100 	}
1101 	/* Initialize the ddi_dma_handle free pool */
1102 	qlt_dma_handle_pool_init(qlt);
1103 
1104 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1105 	if (port == NULL) {
1106 		goto qlt_pstart_fail_1;
1107 	}
1108 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1109 	if (fds == NULL) {
1110 		goto qlt_pstart_fail_2;
1111 	}
1112 	qlt->qlt_port = port;
1113 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
1114 	fds->fds_free_data_buf = qlt_dmem_free;
1115 	fds->fds_setup_dbuf = qlt_dma_setup_dbuf;
1116 	fds->fds_teardown_dbuf = qlt_dma_teardown_dbuf;
1117 	fds->fds_max_sgl_xfer_len = QLT_DMA_SG_LIST_LENGTH * MMU_PAGESIZE;
1118 	fds->fds_copy_threshold = MMU_PAGESIZE;
1119 	fds->fds_fca_private = (void *)qlt;
1120 	/*
1121 	 * Since we keep everything in the state struct and dont allocate any
1122 	 * port private area, just use that pointer to point to the
1123 	 * state struct.
1124 	 */
1125 	port->port_fca_private = qlt;
1126 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
1127 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1128 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1129 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1130 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1131 	port->port_default_alias = qlt->qlt_port_alias;
1132 	port->port_pp = qlt_pp;
1133 	port->port_fds = fds;
1134 	port->port_max_logins = QLT_MAX_LOGINS;
1135 	port->port_max_xchges = QLT_MAX_XCHGES;
1136 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1137 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1138 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1139 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1140 	port->port_get_link_info = qlt_get_link_info;
1141 	port->port_register_remote_port = qlt_register_remote_port;
1142 	port->port_deregister_remote_port = qlt_deregister_remote_port;
1143 	port->port_send_cmd = qlt_send_cmd;
1144 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1145 	port->port_send_cmd_response = qlt_send_cmd_response;
1146 	port->port_abort_cmd = qlt_abort_cmd;
1147 	port->port_ctl = qlt_ctl;
1148 	port->port_flogi_xchg = qlt_do_flogi;
1149 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1150 	port->port_info = qlt_info;
1151 	port->port_fca_version = FCT_FCA_MODREV_1;
1152 
1153 	if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1154 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1155 		goto qlt_pstart_fail_2_5;
1156 	}
1157 
1158 	return (QLT_SUCCESS);
1159 #if 0
1160 qlt_pstart_fail_3:
1161 	(void) fct_deregister_local_port(port);
1162 #endif
1163 qlt_pstart_fail_2_5:
1164 	fct_free(fds);
1165 qlt_pstart_fail_2:
1166 	fct_free(port);
1167 	qlt->qlt_port = NULL;
1168 qlt_pstart_fail_1:
1169 	qlt_dma_handle_pool_fini(qlt);
1170 	qlt_dmem_fini(qlt);
1171 	return (QLT_FAILURE);
1172 }
1173 
1174 fct_status_t
qlt_port_stop(caddr_t arg)1175 qlt_port_stop(caddr_t arg)
1176 {
1177 	qlt_state_t *qlt = (qlt_state_t *)arg;
1178 	fct_status_t ret;
1179 
1180 	if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1181 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1182 		return (QLT_FAILURE);
1183 	}
1184 	fct_free(qlt->qlt_port->port_fds);
1185 	fct_free(qlt->qlt_port);
1186 	qlt->qlt_port = NULL;
1187 	qlt_dma_handle_pool_fini(qlt);
1188 	qlt_dmem_fini(qlt);
1189 	return (QLT_SUCCESS);
1190 }
1191 
1192 /*
1193  * Called by framework to init the HBA.
1194  * Can be called in the middle of I/O. (Why ??)
1195  * Should make sure sane state both before and after the initialization
1196  */
1197 fct_status_t
qlt_port_online(qlt_state_t * qlt)1198 qlt_port_online(qlt_state_t *qlt)
1199 {
1200 	uint64_t	da;
1201 	int		instance, i;
1202 	fct_status_t	ret;
1203 	uint16_t	rcount;
1204 	caddr_t		icb;
1205 	mbox_cmd_t	*mcp;
1206 	uint8_t		*elsbmp;
1207 
1208 	instance = ddi_get_instance(qlt->dip);
1209 
1210 	/* XXX Make sure a sane state */
1211 
1212 	if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
1213 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1214 		return (ret);
1215 	}
1216 
1217 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1218 
1219 	/* Get resource count */
1220 	REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
1221 	ret = qlt_raw_mailbox_command(qlt);
1222 	rcount = REG_RD16(qlt, REG_MBOX(3));
1223 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1224 	if (ret != QLT_SUCCESS) {
1225 		EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1226 		return (ret);
1227 	}
1228 
1229 	/* Enable PUREX */
1230 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
1231 	REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
1232 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1233 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1234 	ret = qlt_raw_mailbox_command(qlt);
1235 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1236 	if (ret != QLT_SUCCESS) {
1237 		EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1238 		cmn_err(CE_NOTE, "Enable PUREX failed");
1239 		return (ret);
1240 	}
1241 
1242 	/* Pass ELS bitmap to fw */
1243 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
1244 	REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
1245 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1246 	bzero(elsbmp, 32);
1247 	da = qlt->queue_mem_cookie.dmac_laddress;
1248 	da += MBOX_DMA_MEM_OFFSET;
1249 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
1250 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
1251 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
1252 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
1253 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1254 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1255 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1256 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1257 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1258 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1259 	SETELSBIT(elsbmp, ELS_OP_SCN);
1260 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1261 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1262 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1263 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1264 	SETELSBIT(elsbmp, ELS_OP_RNID);
1265 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1266 	    DDI_DMA_SYNC_FORDEV);
1267 	ret = qlt_raw_mailbox_command(qlt);
1268 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1269 	if (ret != QLT_SUCCESS) {
1270 		EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1271 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1272 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1273 		    elsbmp[1]);
1274 		return (ret);
1275 	}
1276 
1277 	/* Init queue pointers */
1278 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1279 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1280 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1281 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1282 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1283 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1284 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1285 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1286 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1287 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1288 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1289 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1290 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1291 
1292 	/*
1293 	 * XXX support for tunables. Also should we cache icb ?
1294 	 */
1295 	if (qlt->qlt_81xx_chip) {
1296 	    /* allocate extra 64 bytes for Extended init control block */
1297 		mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1298 	} else {
1299 		mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1300 	}
1301 	if (mcp == NULL) {
1302 		EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1303 		return (STMF_ALLOC_FAILURE);
1304 	}
1305 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1306 	if (qlt->qlt_81xx_chip) {
1307 		bzero(icb, 0xC0);
1308 	} else {
1309 		bzero(icb, 0x80);
1310 	}
1311 	da = qlt->queue_mem_cookie.dmac_laddress;
1312 	DMEM_WR16(qlt, icb, 1);		/* Version */
1313 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1314 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1315 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1316 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1317 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1318 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1319 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1320 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1321 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1322 	if (!qlt->qlt_81xx_chip) {
1323 		DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1324 	}
1325 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1326 	DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1327 	DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1328 	DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1329 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1330 	DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1331 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1332 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1333 	if (qlt->qlt_81xx_chip) {
1334 		qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1335 
1336 		DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1337 		DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1338 		DMEM_WR32(qlt, icb+0x70,
1339 		    qlt81nvr->enode_mac[0] |
1340 		    (qlt81nvr->enode_mac[1] << 8) |
1341 		    (qlt81nvr->enode_mac[2] << 16) |
1342 		    (qlt81nvr->enode_mac[3] << 24));
1343 		DMEM_WR16(qlt, icb+0x74,
1344 		    qlt81nvr->enode_mac[4] |
1345 		    (qlt81nvr->enode_mac[5] << 8));
1346 		} else {
1347 			DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1348 			    BIT_2 | BIT_1 | BIT_0);
1349 			DMEM_WR32(qlt, icb+0x60, BIT_5);
1350 			DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1351 			    BIT_4);
1352 		}
1353 
1354 	if (qlt->qlt_81xx_chip) {
1355 		qlt_dmem_bctl_t		*bctl;
1356 		uint32_t		index;
1357 		caddr_t			src;
1358 		caddr_t			dst;
1359 		qlt_nvram_81xx_t	*qlt81nvr;
1360 
1361 		dst = icb+0x80;
1362 		qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1363 		src = (caddr_t)&qlt81nvr->ext_blk;
1364 		index = sizeof (qlt_ext_icb_81xx_t);
1365 
1366 		/* Use defaults for cases where we find nothing in NVR */
1367 		if (*src == 0) {
1368 			EL(qlt, "nvram eicb=null\n");
1369 			cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1370 			    instance);
1371 			qlt81nvr->ext_blk.version[0] = 1;
1372 /*
1373  * not yet, for !FIP firmware at least
1374  *
1375  *                qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1376  */
1377 #ifdef _LITTLE_ENDIAN
1378 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1379 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1380 #else
1381 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1382 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1383 #endif
1384 		}
1385 
1386 		while (index--) {
1387 			*dst++ = *src++;
1388 		}
1389 
1390 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1391 		da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1392 
1393 		mcp->to_fw[11] = LSW(LSD(da));
1394 		mcp->to_fw[10] = MSW(LSD(da));
1395 		mcp->to_fw[13] = LSW(MSD(da));
1396 		mcp->to_fw[12] = MSW(MSD(da));
1397 		mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1398 		    0xffff);
1399 
1400 		/* eicb enable */
1401 		mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1402 		mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1403 		    BIT_1;
1404 	}
1405 
1406 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1407 	mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
1408 
1409 	/*
1410 	 * This is the 1st command after adapter initialize which will
1411 	 * use interrupts and regular mailbox interface.
1412 	 */
1413 	qlt->mbox_io_state = MBOX_STATE_READY;
1414 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1415 	/* Issue mailbox to firmware */
1416 	ret = qlt_mailbox_command(qlt, mcp);
1417 	if (ret != QLT_SUCCESS) {
1418 		EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1419 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1420 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1421 	}
1422 
1423 	mcp->to_fw_mask = BIT_0;
1424 	mcp->from_fw_mask = BIT_0 | BIT_1;
1425 	mcp->to_fw[0] = 0x28;
1426 	ret = qlt_mailbox_command(qlt, mcp);
1427 	if (ret != QLT_SUCCESS) {
1428 		EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1429 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1430 		    (long long)ret);
1431 	}
1432 
1433 	/*
1434 	 * Report FW versions for 81xx - MPI rev is useful
1435 	 */
1436 	if (qlt->qlt_81xx_chip) {
1437 		mcp->to_fw_mask = BIT_0;
1438 		mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1439 		    BIT_0;
1440 		mcp->to_fw[0] = 0x8;
1441 		ret = qlt_mailbox_command(qlt, mcp);
1442 		if (ret != QLT_SUCCESS) {
1443 			EL(qlt, "about fw failed: %llx\n", (long long)ret);
1444 		} else {
1445 			EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1446 			    mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1447 			    mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1448 			    mcp->from_fw[11] & 0xff);
1449 		}
1450 	}
1451 
1452 	qlt_free_mailbox_command(qlt, mcp);
1453 
1454 	for (i = 0; i < 5; i++) {
1455 		qlt->qlt_bufref[i] = 0;
1456 	}
1457 	qlt->qlt_bumpbucket = 0;
1458 	qlt->qlt_pmintry = 0;
1459 	qlt->qlt_pmin_ok = 0;
1460 
1461 	if (ret != QLT_SUCCESS)
1462 		return (ret);
1463 	return (FCT_SUCCESS);
1464 }
1465 
1466 fct_status_t
qlt_port_offline(qlt_state_t * qlt)1467 qlt_port_offline(qlt_state_t *qlt)
1468 {
1469 	int		retries;
1470 
1471 	mutex_enter(&qlt->mbox_lock);
1472 
1473 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1474 		mutex_exit(&qlt->mbox_lock);
1475 		goto poff_mbox_done;
1476 	}
1477 
1478 	/* Wait to grab the mailboxes */
1479 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1480 	    retries++) {
1481 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1482 		if ((retries > 5) ||
1483 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1484 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1485 			mutex_exit(&qlt->mbox_lock);
1486 			goto poff_mbox_done;
1487 		}
1488 	}
1489 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1490 	mutex_exit(&qlt->mbox_lock);
1491 poff_mbox_done:;
1492 	qlt->intr_sneak_counter = 10;
1493 	mutex_enter(&qlt->intr_lock);
1494 	(void) qlt_reset_chip(qlt);
1495 	drv_usecwait(20);
1496 	qlt->intr_sneak_counter = 0;
1497 	mutex_exit(&qlt->intr_lock);
1498 
1499 	return (FCT_SUCCESS);
1500 }
1501 
1502 static fct_status_t
qlt_get_link_info(fct_local_port_t * port,fct_link_info_t * li)1503 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1504 {
1505 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1506 	mbox_cmd_t *mcp;
1507 	fct_status_t fc_ret;
1508 	fct_status_t ret;
1509 	clock_t et;
1510 
1511 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1512 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1513 link_info_retry:
1514 	mcp->to_fw[0] = MBC_GET_ID;
1515 	mcp->to_fw[9] = 0;
1516 	mcp->to_fw_mask |= BIT_0 | BIT_9;
1517 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1518 	/* Issue mailbox to firmware */
1519 	ret = qlt_mailbox_command(qlt, mcp);
1520 	if (ret != QLT_SUCCESS) {
1521 		EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1522 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1523 			/* Firmware is not ready */
1524 			if (ddi_get_lbolt() < et) {
1525 				delay(drv_usectohz(50000));
1526 				goto link_info_retry;
1527 			}
1528 		}
1529 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1530 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1531 		fc_ret = FCT_FAILURE;
1532 	} else {
1533 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1534 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1535 
1536 		li->port_speed = qlt->link_speed;
1537 		switch (mcp->from_fw[6]) {
1538 		case 1:
1539 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1540 			li->port_fca_flogi_done = 1;
1541 			break;
1542 		case 0:
1543 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1544 			li->port_no_fct_flogi = 1;
1545 			break;
1546 		case 3:
1547 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1548 			li->port_fca_flogi_done = 1;
1549 			break;
1550 		case 2: /*FALLTHROUGH*/
1551 		case 4:
1552 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1553 			li->port_fca_flogi_done = 1;
1554 			break;
1555 		default:
1556 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1557 			EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1558 		}
1559 		qlt->cur_topology = li->port_topology;
1560 		fc_ret = FCT_SUCCESS;
1561 	}
1562 	qlt_free_mailbox_command(qlt, mcp);
1563 
1564 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1565 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1566 		mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
1567 		mcp->to_fw[1] = 0x7FE;
1568 		mcp->to_fw[9] = 0;
1569 		mcp->to_fw[10] = 0;
1570 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1571 		fc_ret = qlt_mailbox_command(qlt, mcp);
1572 		if (fc_ret != QLT_SUCCESS) {
1573 			EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1574 			    fc_ret);
1575 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1576 			    "database for F_port failed, ret = %llx", fc_ret);
1577 		} else {
1578 			uint8_t *p;
1579 
1580 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1581 			p = mcp->dbuf->db_sglist[0].seg_addr;
1582 			bcopy(p + 0x18, li->port_rpwwn, 8);
1583 			bcopy(p + 0x20, li->port_rnwwn, 8);
1584 		}
1585 		qlt_free_mailbox_command(qlt, mcp);
1586 	}
1587 	return (fc_ret);
1588 }
1589 
1590 static int
qlt_open(dev_t * devp,int flag,int otype,cred_t * credp)1591 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1592 {
1593 	int		instance;
1594 	qlt_state_t	*qlt;
1595 
1596 	if (otype != OTYP_CHR) {
1597 		return (EINVAL);
1598 	}
1599 
1600 	/*
1601 	 * Since this is for debugging only, only allow root to issue ioctl now
1602 	 */
1603 	if (drv_priv(credp)) {
1604 		return (EPERM);
1605 	}
1606 
1607 	instance = (int)getminor(*devp);
1608 	qlt = ddi_get_soft_state(qlt_state, instance);
1609 	if (qlt == NULL) {
1610 		return (ENXIO);
1611 	}
1612 
1613 	mutex_enter(&qlt->qlt_ioctl_lock);
1614 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1615 		/*
1616 		 * It is already open for exclusive access.
1617 		 * So shut the door on this caller.
1618 		 */
1619 		mutex_exit(&qlt->qlt_ioctl_lock);
1620 		return (EBUSY);
1621 	}
1622 
1623 	if (flag & FEXCL) {
1624 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1625 			/*
1626 			 * Exclusive operation not possible
1627 			 * as it is already opened
1628 			 */
1629 			mutex_exit(&qlt->qlt_ioctl_lock);
1630 			return (EBUSY);
1631 		}
1632 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1633 	}
1634 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1635 	mutex_exit(&qlt->qlt_ioctl_lock);
1636 
1637 	return (0);
1638 }
1639 
1640 /* ARGSUSED */
1641 static int
qlt_close(dev_t dev,int flag,int otype,cred_t * credp)1642 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1643 {
1644 	int		instance;
1645 	qlt_state_t	*qlt;
1646 
1647 	if (otype != OTYP_CHR) {
1648 		return (EINVAL);
1649 	}
1650 
1651 	instance = (int)getminor(dev);
1652 	qlt = ddi_get_soft_state(qlt_state, instance);
1653 	if (qlt == NULL) {
1654 		return (ENXIO);
1655 	}
1656 
1657 	mutex_enter(&qlt->qlt_ioctl_lock);
1658 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1659 		mutex_exit(&qlt->qlt_ioctl_lock);
1660 		return (ENODEV);
1661 	}
1662 
1663 	/*
1664 	 * It looks there's one hole here, maybe there could several concurrent
1665 	 * shareed open session, but we never check this case.
1666 	 * But it will not hurt too much, disregard it now.
1667 	 */
1668 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1669 	mutex_exit(&qlt->qlt_ioctl_lock);
1670 
1671 	return (0);
1672 }
1673 
1674 /*
1675  * All of these ioctls are unstable interfaces which are meant to be used
1676  * in a controlled lab env. No formal testing will be (or needs to be) done
1677  * for these ioctls. Specially note that running with an additional
1678  * uploaded firmware is not supported and is provided here for test
1679  * purposes only.
1680  */
1681 /* ARGSUSED */
1682 static int
qlt_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)1683 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1684     cred_t *credp, int *rval)
1685 {
1686 	qlt_state_t	*qlt;
1687 	int		ret = 0;
1688 #ifdef _LITTLE_ENDIAN
1689 	int		i;
1690 #endif
1691 	stmf_iocdata_t	*iocd;
1692 	void		*ibuf = NULL;
1693 	void		*obuf = NULL;
1694 	uint32_t	*intp;
1695 	qlt_fw_info_t	*fwi;
1696 	mbox_cmd_t	*mcp;
1697 	fct_status_t	st;
1698 	char		info[QLT_INFO_LEN];
1699 	fct_status_t	ret2;
1700 
1701 	if (drv_priv(credp) != 0)
1702 		return (EPERM);
1703 
1704 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1705 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1706 	if (ret)
1707 		return (ret);
1708 	iocd->stmf_error = 0;
1709 
1710 	switch (cmd) {
1711 	case QLT_IOCTL_FETCH_FWDUMP:
1712 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1713 			EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1714 			    iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1715 			ret = EINVAL;
1716 			break;
1717 		}
1718 		mutex_enter(&qlt->qlt_ioctl_lock);
1719 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1720 			mutex_exit(&qlt->qlt_ioctl_lock);
1721 			ret = ENODATA;
1722 			EL(qlt, "no fwdump\n");
1723 			iocd->stmf_error = QLTIO_NO_DUMP;
1724 			break;
1725 		}
1726 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1727 			mutex_exit(&qlt->qlt_ioctl_lock);
1728 			ret = EBUSY;
1729 			EL(qlt, "fwdump inprogress\n");
1730 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1731 			break;
1732 		}
1733 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1734 			mutex_exit(&qlt->qlt_ioctl_lock);
1735 			ret = EEXIST;
1736 			EL(qlt, "fwdump already fetched\n");
1737 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1738 			break;
1739 		}
1740 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1741 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1742 		mutex_exit(&qlt->qlt_ioctl_lock);
1743 
1744 		break;
1745 
1746 	case QLT_IOCTL_TRIGGER_FWDUMP:
1747 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1748 			ret = EACCES;
1749 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1750 			break;
1751 		}
1752 		(void) snprintf(info, sizeof (info), "qlt_ioctl: qlt-%p, "
1753 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1754 		if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1755 		    STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1756 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1757 			EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1758 			    "%llxh\n", ret2);
1759 			ret = EIO;
1760 		}
1761 		break;
1762 	case QLT_IOCTL_UPLOAD_FW:
1763 		if ((iocd->stmf_ibuf_size < 1024) ||
1764 		    (iocd->stmf_ibuf_size & 3)) {
1765 			EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1766 			    iocd->stmf_ibuf_size);
1767 			ret = EINVAL;
1768 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1769 			break;
1770 		}
1771 		intp = (uint32_t *)ibuf;
1772 #ifdef _LITTLE_ENDIAN
1773 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1774 			intp[i] = BSWAP_32(intp[i]);
1775 		}
1776 #endif
1777 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1778 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1779 		    iocd->stmf_ibuf_size)) {
1780 			EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1781 			    iocd->stmf_ibuf_size);
1782 			ret = EINVAL;
1783 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1784 			break;
1785 		}
1786 		if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1787 		    (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1788 		    (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1789 		    ((intp[8] & 3) == 0))) {
1790 			EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1791 			ret = EACCES;
1792 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1793 			break;
1794 		}
1795 
1796 		/* Everything looks ok, lets copy this firmware */
1797 		if (qlt->fw_code01) {
1798 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1799 			    qlt->fw_length02) << 2);
1800 			qlt->fw_code01 = NULL;
1801 		} else {
1802 			atomic_inc_32(&qlt_loaded_counter);
1803 		}
1804 		qlt->fw_length01 = intp[3];
1805 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1806 		    KM_SLEEP);
1807 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1808 		qlt->fw_addr01 = intp[2];
1809 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1810 		qlt->fw_addr02 = qlt->fw_code02[2];
1811 		qlt->fw_length02 = qlt->fw_code02[3];
1812 		break;
1813 
1814 	case QLT_IOCTL_CLEAR_FW:
1815 		if (qlt->fw_code01) {
1816 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1817 			    qlt->fw_length02) << 2);
1818 			qlt->fw_code01 = NULL;
1819 			atomic_dec_32(&qlt_loaded_counter);
1820 		}
1821 		break;
1822 
1823 	case QLT_IOCTL_GET_FW_INFO:
1824 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1825 			EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1826 			    iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1827 			ret = EINVAL;
1828 			break;
1829 		}
1830 		fwi = (qlt_fw_info_t *)obuf;
1831 		if (qlt->qlt_stay_offline) {
1832 			fwi->fwi_stay_offline = 1;
1833 		}
1834 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1835 			fwi->fwi_port_active = 1;
1836 		}
1837 		fwi->fwi_active_major = qlt->fw_major;
1838 		fwi->fwi_active_minor = qlt->fw_minor;
1839 		fwi->fwi_active_subminor = qlt->fw_subminor;
1840 		fwi->fwi_active_attr = qlt->fw_attr;
1841 		if (qlt->fw_code01) {
1842 			fwi->fwi_fw_uploaded = 1;
1843 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1844 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1845 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1846 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1847 		}
1848 		if (qlt->qlt_81xx_chip) {
1849 			fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1850 			fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1851 			fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1852 			fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1853 		} else if (qlt->qlt_25xx_chip) {
1854 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1855 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1856 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1857 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1858 		} else {
1859 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1860 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1861 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1862 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1863 		}
1864 		break;
1865 
1866 	case QLT_IOCTL_STAY_OFFLINE:
1867 		if (!iocd->stmf_ibuf_size) {
1868 			EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1869 			    iocd->stmf_ibuf_size);
1870 			ret = EINVAL;
1871 			break;
1872 		}
1873 		if (*((char *)ibuf)) {
1874 			qlt->qlt_stay_offline = 1;
1875 		} else {
1876 			qlt->qlt_stay_offline = 0;
1877 		}
1878 		break;
1879 
1880 	case QLT_IOCTL_MBOX:
1881 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1882 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1883 			EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1884 			    iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1885 			ret = EINVAL;
1886 			break;
1887 		}
1888 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1889 		if (mcp == NULL) {
1890 			EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1891 			ret = ENOMEM;
1892 			break;
1893 		}
1894 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1895 		st = qlt_mailbox_command(qlt, mcp);
1896 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1897 		qlt_free_mailbox_command(qlt, mcp);
1898 		if (st != QLT_SUCCESS) {
1899 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1900 				st = QLT_SUCCESS;
1901 		}
1902 		if (st != QLT_SUCCESS) {
1903 			EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1904 			ret = EIO;
1905 			switch (st) {
1906 			case QLT_MBOX_NOT_INITIALIZED:
1907 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1908 				break;
1909 			case QLT_MBOX_BUSY:
1910 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1911 				break;
1912 			case QLT_MBOX_TIMEOUT:
1913 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1914 				break;
1915 			case QLT_MBOX_ABORTED:
1916 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1917 				break;
1918 			}
1919 		}
1920 		break;
1921 
1922 	case QLT_IOCTL_ELOG:
1923 		qlt_dump_el_trace_buffer(qlt);
1924 		break;
1925 
1926 	default:
1927 		EL(qlt, "Unknown ioctl-%xh\n", cmd);
1928 		ret = ENOTTY;
1929 	}
1930 
1931 	if (ret == 0) {
1932 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1933 	} else if (iocd->stmf_error) {
1934 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1935 	}
1936 	if (obuf) {
1937 		kmem_free(obuf, iocd->stmf_obuf_size);
1938 		obuf = NULL;
1939 	}
1940 	if (ibuf) {
1941 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1942 		ibuf = NULL;
1943 	}
1944 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1945 	return (ret);
1946 }
1947 
1948 static fct_status_t
qlt_force_lip(qlt_state_t * qlt)1949 qlt_force_lip(qlt_state_t *qlt)
1950 {
1951 	mbox_cmd_t	*mcp;
1952 	fct_status_t	 rval;
1953 
1954 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1955 	mcp->to_fw[0] = 0x0072;
1956 	mcp->to_fw[1] = BIT_4;
1957 	mcp->to_fw[3] = 1;
1958 	mcp->to_fw_mask |= BIT_1 | BIT_3;
1959 	rval = qlt_mailbox_command(qlt, mcp);
1960 	if (rval != FCT_SUCCESS) {
1961 		EL(qlt, "qlt force lip MB failed: rval=%x", rval);
1962 	} else {
1963 		if (mcp->from_fw[0] != 0x4000) {
1964 			QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
1965 			    mcp->from_fw[0]);
1966 			rval = FCT_FAILURE;
1967 		}
1968 	}
1969 	qlt_free_mailbox_command(qlt, mcp);
1970 	return (rval);
1971 }
1972 
1973 static void
qlt_ctl(struct fct_local_port * port,int cmd,void * arg)1974 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1975 {
1976 	stmf_change_status_t		st;
1977 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1978 	qlt_state_t			*qlt;
1979 	fct_status_t			ret;
1980 
1981 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1982 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1983 	    (cmd == FCT_CMD_FORCE_LIP) ||
1984 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1985 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1986 
1987 	qlt = (qlt_state_t *)port->port_fca_private;
1988 	st.st_completion_status = FCT_SUCCESS;
1989 	st.st_additional_info = NULL;
1990 
1991 	switch (cmd) {
1992 	case FCT_CMD_PORT_ONLINE:
1993 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1994 			st.st_completion_status = STMF_ALREADY;
1995 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1996 			st.st_completion_status = FCT_FAILURE;
1997 		if (st.st_completion_status == FCT_SUCCESS) {
1998 			qlt->qlt_state = FCT_STATE_ONLINING;
1999 			qlt->qlt_state_not_acked = 1;
2000 			st.st_completion_status = qlt_port_online(qlt);
2001 			if (st.st_completion_status != STMF_SUCCESS) {
2002 				EL(qlt, "PORT_ONLINE status=%xh\n",
2003 				    st.st_completion_status);
2004 				qlt->qlt_state = FCT_STATE_OFFLINE;
2005 				qlt->qlt_state_not_acked = 0;
2006 			} else {
2007 				qlt->qlt_state = FCT_STATE_ONLINE;
2008 			}
2009 		}
2010 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
2011 		qlt->qlt_change_state_flags = 0;
2012 		break;
2013 
2014 	case FCT_CMD_PORT_OFFLINE:
2015 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
2016 			st.st_completion_status = STMF_ALREADY;
2017 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
2018 			st.st_completion_status = FCT_FAILURE;
2019 		}
2020 		if (st.st_completion_status == FCT_SUCCESS) {
2021 			qlt->qlt_state = FCT_STATE_OFFLINING;
2022 			qlt->qlt_state_not_acked = 1;
2023 
2024 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
2025 				(void) qlt_firmware_dump(port, ssci);
2026 			}
2027 			qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
2028 			st.st_completion_status = qlt_port_offline(qlt);
2029 			if (st.st_completion_status != STMF_SUCCESS) {
2030 				EL(qlt, "PORT_OFFLINE status=%xh\n",
2031 				    st.st_completion_status);
2032 				qlt->qlt_state = FCT_STATE_ONLINE;
2033 				qlt->qlt_state_not_acked = 0;
2034 			} else {
2035 				qlt->qlt_state = FCT_STATE_OFFLINE;
2036 			}
2037 		}
2038 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
2039 		break;
2040 
2041 	case FCT_ACK_PORT_ONLINE_COMPLETE:
2042 		qlt->qlt_state_not_acked = 0;
2043 		break;
2044 
2045 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
2046 		qlt->qlt_state_not_acked = 0;
2047 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
2048 		    (qlt->qlt_stay_offline == 0)) {
2049 			if ((ret = fct_port_initialize(port,
2050 			    qlt->qlt_change_state_flags,
2051 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
2052 			    "with RLFLAG_RESET")) != FCT_SUCCESS) {
2053 				EL(qlt, "fct_port_initialize status=%llxh\n",
2054 				    ret);
2055 				cmn_err(CE_WARN, "qlt_ctl: "
2056 				    "fct_port_initialize failed, please use "
2057 				    "stmfstate to start the port-%s manualy",
2058 				    qlt->qlt_port_alias);
2059 			}
2060 		}
2061 		break;
2062 
2063 	case FCT_CMD_FORCE_LIP:
2064 		if (qlt->qlt_81xx_chip) {
2065 			EL(qlt, "force lip is an unsupported command "
2066 			    "for this adapter type\n");
2067 		} else {
2068 			*((fct_status_t *)arg) = qlt_force_lip(qlt);
2069 			EL(qlt, "forcelip done\n");
2070 		}
2071 		break;
2072 
2073 	default:
2074 		EL(qlt, "unsupport cmd - 0x%02X", cmd);
2075 		break;
2076 	}
2077 }
2078 
2079 /* ARGSUSED */
2080 static fct_status_t
qlt_do_flogi(fct_local_port_t * port,fct_flogi_xchg_t * fx)2081 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2082 {
2083 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
2084 
2085 	EL(qlt, "FLOGI requested not supported\n");
2086 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2087 	return (FCT_FAILURE);
2088 }
2089 
2090 /*
2091  * Return a pointer to n entries in the request queue. Assumes that
2092  * request queue lock is held. Does a very short busy wait if
2093  * less/zero entries are available. Retuns NULL if it still cannot
2094  * fullfill the request.
2095  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2096  */
2097 caddr_t
qlt_get_req_entries(qlt_state_t * qlt,uint32_t n)2098 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
2099 {
2100 	int try = 0;
2101 
2102 	while (qlt->req_available < n) {
2103 		uint32_t val1, val2, val3;
2104 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2105 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2106 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2107 		if ((val1 != val2) || (val2 != val3))
2108 			continue;
2109 
2110 		qlt->req_ndx_from_fw = val1;
2111 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2112 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2113 		    (REQUEST_QUEUE_ENTRIES - 1));
2114 		if (qlt->req_available < n) {
2115 			if (try < 2) {
2116 				drv_usecwait(100);
2117 				try++;
2118 				continue;
2119 			} else {
2120 				stmf_trace(qlt->qlt_port_alias,
2121 				    "Req Q is full");
2122 				return (NULL);
2123 			}
2124 		}
2125 		break;
2126 	}
2127 	/* We dont change anything until the entries are sumitted */
2128 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2129 }
2130 
2131 /*
2132  * updates the req in ptr to fw. Assumes that req lock is held.
2133  */
2134 void
qlt_submit_req_entries(qlt_state_t * qlt,uint32_t n)2135 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2136 {
2137 	ASSERT(n >= 1);
2138 	qlt->req_ndx_to_fw += n;
2139 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2140 	qlt->req_available -= n;
2141 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2142 }
2143 
2144 
2145 /*
2146  * Return a pointer to n entries in the priority request queue. Assumes that
2147  * priority request queue lock is held. Does a very short busy wait if
2148  * less/zero entries are available. Retuns NULL if it still cannot
2149  * fullfill the request.
2150  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2151  */
2152 caddr_t
qlt_get_preq_entries(qlt_state_t * qlt,uint32_t n)2153 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2154 {
2155 	int try = 0;
2156 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2157 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2158 	    (PRIORITY_QUEUE_ENTRIES - 1));
2159 
2160 	while (req_available < n) {
2161 		uint32_t val1, val2, val3;
2162 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2163 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2164 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2165 		if ((val1 != val2) || (val2 != val3))
2166 			continue;
2167 
2168 		qlt->preq_ndx_from_fw = val1;
2169 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2170 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2171 		    (PRIORITY_QUEUE_ENTRIES - 1));
2172 		if (req_available < n) {
2173 			if (try < 2) {
2174 				drv_usecwait(100);
2175 				try++;
2176 				continue;
2177 			} else {
2178 				return (NULL);
2179 			}
2180 		}
2181 		break;
2182 	}
2183 	/* We dont change anything until the entries are sumitted */
2184 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2185 }
2186 
2187 /*
2188  * updates the req in ptr to fw. Assumes that req lock is held.
2189  */
2190 void
qlt_submit_preq_entries(qlt_state_t * qlt,uint32_t n)2191 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2192 {
2193 	ASSERT(n >= 1);
2194 	qlt->preq_ndx_to_fw += n;
2195 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2196 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2197 }
2198 
2199 /*
2200  * - Should not be called from Interrupt.
2201  * - A very hardware specific function. Does not touch driver state.
2202  * - Assumes that interrupts are disabled or not there.
2203  * - Expects that the caller makes sure that all activity has stopped
2204  *   and its ok now to go ahead and reset the chip. Also the caller
2205  *   takes care of post reset damage control.
2206  * - called by initialize adapter() and dump_fw(for reset only).
2207  * - During attach() nothing much is happening and during initialize_adapter()
2208  *   the function (caller) does all the housekeeping so that this function
2209  *   can execute in peace.
2210  * - Returns 0 on success.
2211  */
2212 static fct_status_t
qlt_reset_chip(qlt_state_t * qlt)2213 qlt_reset_chip(qlt_state_t *qlt)
2214 {
2215 	int cntr;
2216 
2217 	EL(qlt, "initiated\n");
2218 
2219 	/* XXX: Switch off LEDs */
2220 
2221 	/* Disable Interrupts */
2222 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2223 	(void) REG_RD32(qlt, REG_INTR_CTRL);
2224 	/* Stop DMA */
2225 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2226 
2227 	/* Wait for DMA to be stopped */
2228 	cntr = 0;
2229 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2230 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2231 		cntr++;
2232 		/* 3 sec should be more than enough */
2233 		if (cntr == 300)
2234 			return (QLT_DMA_STUCK);
2235 	}
2236 
2237 	/* Reset the Chip */
2238 	REG_WR32(qlt, REG_CTRL_STATUS,
2239 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2240 
2241 	qlt->qlt_link_up = 0;
2242 
2243 	drv_usecwait(100);
2244 
2245 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2246 	cntr = 0;
2247 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2248 		delay(drv_usectohz(10000));
2249 		cntr++;
2250 		/* 3 sec should be more than enough */
2251 		if (cntr == 300)
2252 			return (QLT_ROM_STUCK);
2253 	}
2254 	/* Disable Interrupts (Probably not needed) */
2255 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2256 
2257 	return (QLT_SUCCESS);
2258 }
2259 /*
2260  * - Should not be called from Interrupt.
2261  * - A very hardware specific function. Does not touch driver state.
2262  * - Assumes that interrupts are disabled or not there.
2263  * - Expects that the caller makes sure that all activity has stopped
2264  *   and its ok now to go ahead and reset the chip. Also the caller
2265  *   takes care of post reset damage control.
2266  * - called by initialize adapter() and dump_fw(for reset only).
2267  * - During attach() nothing much is happening and during initialize_adapter()
2268  *   the function (caller) does all the housekeeping so that this function
2269  *   can execute in peace.
2270  * - Returns 0 on success.
2271  */
2272 static fct_status_t
qlt_download_fw(qlt_state_t * qlt)2273 qlt_download_fw(qlt_state_t *qlt)
2274 {
2275 	uint32_t start_addr;
2276 	fct_status_t ret;
2277 
2278 	EL(qlt, "initiated\n");
2279 
2280 	(void) qlt_reset_chip(qlt);
2281 
2282 	if (qlt->qlt_81xx_chip) {
2283 		qlt_mps_reset(qlt);
2284 	}
2285 
2286 	/* Load the two segments */
2287 	if (qlt->fw_code01 != NULL) {
2288 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2289 		    qlt->fw_addr01);
2290 		if (ret == QLT_SUCCESS) {
2291 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2292 			    qlt->fw_length02, qlt->fw_addr02);
2293 		}
2294 		start_addr = qlt->fw_addr01;
2295 	} else if (qlt->qlt_81xx_chip) {
2296 		ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2297 		    fw8100_addr01);
2298 		if (ret == QLT_SUCCESS) {
2299 			ret = qlt_load_risc_ram(qlt, fw8100_code02,
2300 			    fw8100_length02, fw8100_addr02);
2301 		}
2302 		start_addr = fw8100_addr01;
2303 	} else if (qlt->qlt_25xx_chip) {
2304 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2305 		    fw2500_addr01);
2306 		if (ret == QLT_SUCCESS) {
2307 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
2308 			    fw2500_length02, fw2500_addr02);
2309 		}
2310 		start_addr = fw2500_addr01;
2311 	} else {
2312 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2313 		    fw2400_addr01);
2314 		if (ret == QLT_SUCCESS) {
2315 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
2316 			    fw2400_length02, fw2400_addr02);
2317 		}
2318 		start_addr = fw2400_addr01;
2319 	}
2320 	if (ret != QLT_SUCCESS) {
2321 		EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2322 		return (ret);
2323 	}
2324 
2325 	/* Verify Checksum */
2326 	REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
2327 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2328 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2329 	ret = qlt_raw_mailbox_command(qlt);
2330 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2331 	if (ret != QLT_SUCCESS) {
2332 		EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2333 		return (ret);
2334 	}
2335 
2336 	/* Execute firmware */
2337 	REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
2338 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2339 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2340 	REG_WR16(qlt, REG_MBOX(3), 0);
2341 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
2342 	ret = qlt_raw_mailbox_command(qlt);
2343 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2344 	if (ret != QLT_SUCCESS) {
2345 		EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2346 		return (ret);
2347 	}
2348 
2349 	/* Get revisions (About Firmware) */
2350 	REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
2351 	ret = qlt_raw_mailbox_command(qlt);
2352 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2353 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2354 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2355 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2356 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2357 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2358 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2359 	if (ret != QLT_SUCCESS) {
2360 		EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2361 		return (ret);
2362 	}
2363 
2364 	return (QLT_SUCCESS);
2365 }
2366 
2367 /*
2368  * Used only from qlt_download_fw().
2369  */
2370 static fct_status_t
qlt_load_risc_ram(qlt_state_t * qlt,uint32_t * host_addr,uint32_t word_count,uint32_t risc_addr)2371 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2372     uint32_t word_count, uint32_t risc_addr)
2373 {
2374 	uint32_t words_sent = 0;
2375 	uint32_t words_being_sent;
2376 	uint32_t *cur_host_addr;
2377 	uint32_t cur_risc_addr;
2378 	uint64_t da;
2379 	fct_status_t ret;
2380 
2381 	while (words_sent < word_count) {
2382 		cur_host_addr = &(host_addr[words_sent]);
2383 		cur_risc_addr = risc_addr + (words_sent << 2);
2384 		words_being_sent = min(word_count - words_sent,
2385 		    TOTAL_DMA_MEM_SIZE >> 2);
2386 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2387 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2388 		    DDI_DEV_AUTOINCR);
2389 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2390 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2391 		da = qlt->queue_mem_cookie.dmac_laddress;
2392 		REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
2393 		REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
2394 		REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
2395 		REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2396 		REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2397 		REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2398 		REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2399 		REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
2400 		REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
2401 		ret = qlt_raw_mailbox_command(qlt);
2402 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2403 		if (ret != QLT_SUCCESS) {
2404 			EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2405 			    ret);
2406 			return (ret);
2407 		}
2408 		words_sent += words_being_sent;
2409 	}
2410 	return (QLT_SUCCESS);
2411 }
2412 
2413 /*
2414  * Not used during normal operation. Only during driver init.
2415  * Assumes that interrupts are disabled and mailboxes are loaded.
2416  * Just triggers the mailbox command an waits for the completion.
2417  * Also expects that There is nothing else going on and we will only
2418  * get back a mailbox completion from firmware.
2419  * ---DOES NOT CLEAR INTERRUPT---
2420  * Used only from the code path originating from
2421  * qlt_reset_chip_and_download_fw()
2422  */
2423 static fct_status_t
qlt_raw_mailbox_command(qlt_state_t * qlt)2424 qlt_raw_mailbox_command(qlt_state_t *qlt)
2425 {
2426 	int cntr = 0;
2427 	uint32_t status;
2428 
2429 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2430 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
2431 		cntr++;
2432 		if (cntr == 100) {
2433 			return (QLT_MAILBOX_STUCK);
2434 		}
2435 		delay(drv_usectohz(10000));
2436 	}
2437 	status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
2438 
2439 	if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
2440 	    (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
2441 	    (status == MBX_CMD_SUCCESSFUL) ||
2442 	    (status == MBX_CMD_NOT_SUCCESSFUL)) {
2443 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2444 		if (mbox0 == QLT_MBX_CMD_SUCCESS) {
2445 			return (QLT_SUCCESS);
2446 		} else {
2447 			return (QLT_MBOX_FAILED | mbox0);
2448 		}
2449 	}
2450 	/* This is unexpected, dump a message */
2451 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2452 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
2453 	return (QLT_UNEXPECTED_RESPONSE);
2454 }
2455 
2456 static mbox_cmd_t *
qlt_alloc_mailbox_command(qlt_state_t * qlt,uint32_t dma_size)2457 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2458 {
2459 	mbox_cmd_t *mcp;
2460 
2461 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2462 	if (dma_size) {
2463 		qlt_dmem_bctl_t *bctl;
2464 		uint64_t da;
2465 
2466 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2467 		if (mcp->dbuf == NULL) {
2468 			kmem_free(mcp, sizeof (*mcp));
2469 			return (NULL);
2470 		}
2471 		mcp->dbuf->db_data_size = dma_size;
2472 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2473 
2474 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2475 		da = bctl->bctl_dev_addr;
2476 		/* This is the most common initialization of dma ptrs */
2477 		mcp->to_fw[3] = LSW(LSD(da));
2478 		mcp->to_fw[2] = MSW(LSD(da));
2479 		mcp->to_fw[7] = LSW(MSD(da));
2480 		mcp->to_fw[6] = MSW(MSD(da));
2481 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2482 	}
2483 	mcp->to_fw_mask |= BIT_0;
2484 	mcp->from_fw_mask |= BIT_0;
2485 	return (mcp);
2486 }
2487 
2488 void
qlt_free_mailbox_command(qlt_state_t * qlt,mbox_cmd_t * mcp)2489 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2490 {
2491 	if (mcp->dbuf)
2492 		qlt_i_dmem_free(qlt, mcp->dbuf);
2493 	kmem_free(mcp, sizeof (*mcp));
2494 }
2495 
2496 /*
2497  * This can sleep. Should never be called from interrupt context.
2498  */
2499 static fct_status_t
qlt_mailbox_command(qlt_state_t * qlt,mbox_cmd_t * mcp)2500 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2501 {
2502 	int	retries;
2503 	int	i;
2504 	char	info[QLT_INFO_LEN];
2505 
2506 	if (curthread->t_flag & T_INTR_THREAD) {
2507 		ASSERT(0);
2508 		return (QLT_MBOX_FAILED);
2509 	}
2510 
2511 	mutex_enter(&qlt->mbox_lock);
2512 	/* See if mailboxes are still uninitialized */
2513 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2514 		mutex_exit(&qlt->mbox_lock);
2515 		return (QLT_MBOX_NOT_INITIALIZED);
2516 	}
2517 
2518 	/* Wait to grab the mailboxes */
2519 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2520 	    retries++) {
2521 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2522 		if ((retries > 5) ||
2523 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2524 			mutex_exit(&qlt->mbox_lock);
2525 			return (QLT_MBOX_BUSY);
2526 		}
2527 	}
2528 	/* Make sure we always ask for mailbox 0 */
2529 	mcp->from_fw_mask |= BIT_0;
2530 
2531 	/* Load mailboxes, set state and generate RISC interrupt */
2532 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2533 	qlt->mcp = mcp;
2534 	for (i = 0; i < MAX_MBOXES; i++) {
2535 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2536 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2537 	}
2538 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2539 
2540 qlt_mbox_wait_loop:;
2541 	/* Wait for mailbox command completion */
2542 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2543 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2544 		(void) snprintf(info, sizeof (info),
2545 		    "qlt_mailbox_command: qlt-%p, "
2546 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2547 		qlt->mcp = NULL;
2548 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2549 		mutex_exit(&qlt->mbox_lock);
2550 
2551 		/*
2552 		 * XXX Throw HBA fatal error event
2553 		 */
2554 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2555 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2556 		return (QLT_MBOX_TIMEOUT);
2557 	}
2558 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2559 		goto qlt_mbox_wait_loop;
2560 
2561 	qlt->mcp = NULL;
2562 
2563 	/* Make sure its a completion */
2564 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2565 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2566 		mutex_exit(&qlt->mbox_lock);
2567 		return (QLT_MBOX_ABORTED);
2568 	}
2569 
2570 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2571 	/* Mailboxes are already loaded by interrupt routine */
2572 	qlt->mbox_io_state = MBOX_STATE_READY;
2573 	mutex_exit(&qlt->mbox_lock);
2574 	if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS)
2575 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2576 
2577 	return (QLT_SUCCESS);
2578 }
2579 
2580 /*
2581  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2582  */
2583 /* ARGSUSED */
2584 static uint_t
qlt_isr(caddr_t arg,caddr_t arg2)2585 qlt_isr(caddr_t arg, caddr_t arg2)
2586 {
2587 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2588 	uint32_t	risc_status, intr_type;
2589 	int		i;
2590 	int		intr_loop_count;
2591 	char		info[QLT_INFO_LEN];
2592 
2593 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2594 	if (!mutex_tryenter(&qlt->intr_lock)) {
2595 		/*
2596 		 * Normally we will always get this lock. If tryenter is
2597 		 * failing then it means that driver is trying to do
2598 		 * some cleanup and is masking the intr but some intr
2599 		 * has sneaked in between. See if our device has generated
2600 		 * this intr. If so then wait a bit and return claimed.
2601 		 * If not then return claimed if this is the 1st instance
2602 		 * of a interrupt after driver has grabbed the lock.
2603 		 */
2604 		if (risc_status & BIT_15) {
2605 			drv_usecwait(10);
2606 			return (DDI_INTR_CLAIMED);
2607 		} else if (qlt->intr_sneak_counter) {
2608 			qlt->intr_sneak_counter--;
2609 			return (DDI_INTR_CLAIMED);
2610 		} else {
2611 			return (DDI_INTR_UNCLAIMED);
2612 		}
2613 	}
2614 	if (((risc_status & BIT_15) == 0) ||
2615 	    (qlt->qlt_intr_enabled == 0)) {
2616 		/*
2617 		 * This might be a pure coincedence that we are operating
2618 		 * in a interrupt disabled mode and another device
2619 		 * sharing the interrupt line has generated an interrupt
2620 		 * while an interrupt from our device might be pending. Just
2621 		 * ignore it and let the code handling the interrupt
2622 		 * disabled mode handle it.
2623 		 */
2624 		mutex_exit(&qlt->intr_lock);
2625 		return (DDI_INTR_UNCLAIMED);
2626 	}
2627 
2628 	/*
2629 	 * XXX take care for MSI case. disable intrs
2630 	 * Its gonna be complicated because of the max iterations.
2631 	 * as hba will have posted the intr which did not go on PCI
2632 	 * but we did not service it either because of max iterations.
2633 	 * Maybe offload the intr on a different thread.
2634 	 */
2635 	intr_loop_count = 0;
2636 
2637 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2638 
2639 intr_again:;
2640 
2641 	/* check for risc pause */
2642 	if (risc_status & BIT_8) {
2643 		EL(qlt, "Risc Pause status=%xh\n", risc_status);
2644 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2645 		    qlt->instance, risc_status);
2646 		(void) snprintf(info, sizeof (info), "Risc Pause %08x",
2647 		    risc_status);
2648 		(void) fct_port_shutdown(qlt->qlt_port,
2649 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2650 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2651 	}
2652 
2653 	/* First check for high performance path */
2654 	intr_type = risc_status & 0xff;
2655 	if (intr_type == 0x1D) {
2656 		qlt->atio_ndx_from_fw = (uint16_t)
2657 		    REG_RD32(qlt, REG_ATIO_IN_PTR);
2658 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2659 		qlt->resp_ndx_from_fw = risc_status >> 16;
2660 		qlt_handle_atio_queue_update(qlt);
2661 		qlt_handle_resp_queue_update(qlt);
2662 	} else if (intr_type == 0x1C) {
2663 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2664 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2665 		qlt_handle_atio_queue_update(qlt);
2666 	} else if (intr_type == 0x13) {
2667 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2668 		qlt->resp_ndx_from_fw = risc_status >> 16;
2669 		qlt_handle_resp_queue_update(qlt);
2670 	} else if (intr_type == 0x12) {
2671 		uint16_t code = (uint16_t)(risc_status >> 16);
2672 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2673 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2674 		uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2675 		uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2676 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2677 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2678 
2679 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2680 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2681 		    " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2682 		    mbox5, mbox6);
2683 		EL(qlt, "Async event %x mb1=%x mb2=%x, mb3=%x, mb5=%x, mb6=%x",
2684 		    code, mbox1, mbox2, mbox3, mbox5, mbox6);
2685 
2686 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2687 			if (qlt->qlt_link_up) {
2688 				fct_handle_event(qlt->qlt_port,
2689 				    FCT_EVENT_LINK_RESET, 0, 0);
2690 			}
2691 		} else if (code == 0x8012) {
2692 			qlt->qlt_link_up = 0;
2693 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2694 			    0, 0);
2695 		} else if (code == 0x8011) {
2696 			switch (mbox1) {
2697 			case 0: qlt->link_speed = PORT_SPEED_1G;
2698 				break;
2699 			case 1: qlt->link_speed = PORT_SPEED_2G;
2700 				break;
2701 			case 3: qlt->link_speed = PORT_SPEED_4G;
2702 				break;
2703 			case 4: qlt->link_speed = PORT_SPEED_8G;
2704 				break;
2705 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
2706 				break;
2707 			default:
2708 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2709 			}
2710 			qlt->qlt_link_up = 1;
2711 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2712 			    0, 0);
2713 		} else if ((code == 0x8002) || (code == 0x8003) ||
2714 		    (code == 0x8004) || (code == 0x8005)) {
2715 			(void) snprintf(info, sizeof (info),
2716 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2717 			    code, mbox1, mbox2, mbox5, mbox6);
2718 			(void) fct_port_shutdown(qlt->qlt_port,
2719 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2720 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2721 		} else if (code == 0x800F) {
2722 			(void) snprintf(info, sizeof (info),
2723 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
2724 			    mbox1, mbox2, mbox3);
2725 
2726 			if (mbox1 != 1) {
2727 				/* issue "verify fw" */
2728 				qlt_verify_fw(qlt);
2729 			}
2730 		} else if (code == 0x8101) {
2731 			(void) snprintf(info, sizeof (info),
2732 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2733 			    code, mbox1, mbox2, mbox3);
2734 
2735 			/* check if "ACK" is required (timeout != 0) */
2736 			if (mbox1 & 0x0f00) {
2737 				caddr_t	req;
2738 
2739 				/*
2740 				 * Ack the request (queue work to do it?)
2741 				 * using a mailbox iocb
2742 				 */
2743 				mutex_enter(&qlt->req_lock);
2744 				req = qlt_get_req_entries(qlt, 1);
2745 				if (req) {
2746 					bzero(req, IOCB_SIZE);
2747 					req[0] = 0x39; req[1] = 1;
2748 					QMEM_WR16(qlt, req+8, 0x101);
2749 					QMEM_WR16(qlt, req+10, mbox1);
2750 					QMEM_WR16(qlt, req+12, mbox2);
2751 					QMEM_WR16(qlt, req+14, mbox3);
2752 					QMEM_WR16(qlt, req+16, mbox4);
2753 					QMEM_WR16(qlt, req+18, mbox5);
2754 					QMEM_WR16(qlt, req+20, mbox6);
2755 					qlt_submit_req_entries(qlt, 1);
2756 				} else {
2757 					(void) snprintf(info, sizeof (info),
2758 					    "IDC ACK failed");
2759 				}
2760 				mutex_exit(&qlt->req_lock);
2761 			}
2762 		}
2763 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2764 		/* Handle mailbox completion */
2765 		mutex_enter(&qlt->mbox_lock);
2766 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2767 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2768 			    " when driver wasn't waiting for it %d",
2769 			    qlt->instance, qlt->mbox_io_state);
2770 		} else {
2771 			for (i = 0; i < MAX_MBOXES; i++) {
2772 				if (qlt->mcp->from_fw_mask &
2773 				    (((uint32_t)1) << i)) {
2774 					qlt->mcp->from_fw[i] =
2775 					    REG_RD16(qlt, REG_MBOX(i));
2776 				}
2777 			}
2778 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2779 		}
2780 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2781 		cv_broadcast(&qlt->mbox_cv);
2782 		mutex_exit(&qlt->mbox_lock);
2783 	} else {
2784 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2785 		    qlt->instance, intr_type);
2786 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2787 	}
2788 
2789 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2790 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2791 	if ((risc_status & BIT_15) &&
2792 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2793 		goto intr_again;
2794 	}
2795 
2796 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2797 
2798 	mutex_exit(&qlt->intr_lock);
2799 	return (DDI_INTR_CLAIMED);
2800 }
2801 
2802 /* **************** NVRAM Functions ********************** */
2803 
2804 fct_status_t
qlt_read_flash_word(qlt_state_t * qlt,uint32_t faddr,uint32_t * bp)2805 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2806 {
2807 	uint32_t	timer;
2808 
2809 	/* Clear access error flag */
2810 	REG_WR32(qlt, REG_CTRL_STATUS,
2811 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2812 
2813 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2814 
2815 	/* Wait for READ cycle to complete. */
2816 	for (timer = 3000; timer; timer--) {
2817 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2818 			break;
2819 		}
2820 		drv_usecwait(10);
2821 	}
2822 	if (timer == 0) {
2823 		EL(qlt, "flash timeout\n");
2824 		return (QLT_FLASH_TIMEOUT);
2825 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2826 		EL(qlt, "flash access error\n");
2827 		return (QLT_FLASH_ACCESS_ERROR);
2828 	}
2829 
2830 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2831 
2832 	return (QLT_SUCCESS);
2833 }
2834 
2835 fct_status_t
qlt_read_nvram(qlt_state_t * qlt)2836 qlt_read_nvram(qlt_state_t *qlt)
2837 {
2838 	uint32_t		index, addr, chksum;
2839 	uint32_t		val, *ptr;
2840 	fct_status_t		ret;
2841 	qlt_nvram_t		*nv;
2842 	uint64_t		empty_node_name = 0;
2843 
2844 	if (qlt->qlt_81xx_chip) {
2845 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2846 		    QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2847 	} else if (qlt->qlt_25xx_chip) {
2848 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2849 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2850 	} else {
2851 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2852 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2853 	}
2854 	mutex_enter(&qlt_global_lock);
2855 
2856 	/* Pause RISC. */
2857 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
2858 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2859 
2860 	/* Get NVRAM data and calculate checksum. */
2861 	ptr = (uint32_t *)qlt->nvram;
2862 	chksum = 0;
2863 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2864 		ret = qlt_read_flash_word(qlt, addr++, &val);
2865 		if (ret != QLT_SUCCESS) {
2866 			EL(qlt, "qlt_read_flash_word, status=%llxh\n",