1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2015 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 /*
29  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
30  * Fibre Channel Adapter (FCA) driver IOCTL source file.
31  *
32  * ***********************************************************************
33  * *									**
34  * *				NOTICE					**
35  * *		COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION		**
36  * *			ALL RIGHTS RESERVED				**
37  * *									**
38  * ***********************************************************************
39  *
40  */
41 
42 #include <ql_apps.h>
43 #include <ql_api.h>
44 #include <ql_debug.h>
45 #include <ql_init.h>
46 #include <ql_ioctl.h>
47 #include <ql_mbx.h>
48 #include <ql_nx.h>
49 #include <ql_xioctl.h>
50 
51 /*
52  * Local Function Prototypes.
53  */
54 static int ql_busy_notification(ql_adapter_state_t *);
55 static int ql_idle_notification(ql_adapter_state_t *);
56 static int ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features);
57 static int ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features);
58 static int ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha);
59 static void ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr,
60     uint16_t value);
61 static int ql_24xx_load_nvram(ql_adapter_state_t *, uint32_t, uint32_t);
62 static int ql_adm_op(ql_adapter_state_t *, void *, int);
63 static int ql_adm_adapter_info(ql_adapter_state_t *, ql_adm_op_t *, int);
64 static int ql_adm_extended_logging(ql_adapter_state_t *, ql_adm_op_t *);
65 static int ql_adm_device_list(ql_adapter_state_t *, ql_adm_op_t *, int);
66 static int ql_adm_update_properties(ql_adapter_state_t *);
67 static int ql_adm_prop_update_int(ql_adapter_state_t *, ql_adm_op_t *, int);
68 static int ql_adm_loop_reset(ql_adapter_state_t *);
69 static int ql_adm_fw_dump(ql_adapter_state_t *, ql_adm_op_t *, void *, int);
70 static int ql_adm_fw_t_dump(ql_adapter_state_t *);
71 static int ql_adm_beacon(ql_adapter_state_t *, ql_adm_op_t *);
72 static int ql_adm_nvram_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
73 static int ql_adm_nvram_load(ql_adapter_state_t *, ql_adm_op_t *, int);
74 static int ql_adm_flash_load(ql_adapter_state_t *, ql_adm_op_t *, int);
75 static int ql_adm_vpd_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
76 static int ql_adm_vpd_load(ql_adapter_state_t *, ql_adm_op_t *, int);
77 static int ql_adm_vpd_gettag(ql_adapter_state_t *, ql_adm_op_t *, int);
78 static int ql_adm_updfwmodule(ql_adapter_state_t *, ql_adm_op_t *, int);
79 static uint8_t *ql_vpd_findtag(ql_adapter_state_t *, uint8_t *, int8_t *);
80 
81 /* ************************************************************************ */
82 /*				cb_ops functions			    */
83 /* ************************************************************************ */
84 
85 /*
86  * ql_open
87  *	opens device
88  *
89  * Input:
90  *	dev_p = device pointer
91  *	flags = open flags
92  *	otype = open type
93  *	cred_p = credentials pointer
94  *
95  * Returns:
96  *	0 = success
97  *
98  * Context:
99  *	Kernel context.
100  */
101 /* ARGSUSED */
102 int
ql_open(dev_t * dev_p,int flags,int otyp,cred_t * cred_p)103 ql_open(dev_t *dev_p, int flags, int otyp, cred_t *cred_p)
104 {
105 	ql_adapter_state_t	*ha;
106 	int			rval = 0;
107 
108 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(*dev_p));
109 	if (ha == NULL) {
110 		QL_PRINT_2(NULL, "failed, no adapter\n");
111 		return (ENXIO);
112 	}
113 
114 	QL_PRINT_3(ha, "started\n");
115 
116 	/* Allow only character opens */
117 	if (otyp != OTYP_CHR) {
118 		QL_PRINT_2(ha, "failed, open type\n");
119 		return (EINVAL);
120 	}
121 
122 	ADAPTER_STATE_LOCK(ha);
123 	if (flags & FEXCL && ha->flags & QL_OPENED) {
124 		ADAPTER_STATE_UNLOCK(ha);
125 		rval = EBUSY;
126 	} else {
127 		ha->flags |= QL_OPENED;
128 		ADAPTER_STATE_UNLOCK(ha);
129 	}
130 
131 	if (rval != 0) {
132 		EL(ha, "failed, rval = %xh\n", rval);
133 	} else {
134 		/*EMPTY*/
135 		QL_PRINT_3(ha, "done\n");
136 	}
137 	return (rval);
138 }
139 
140 /*
141  * ql_close
142  *	opens device
143  *
144  * Input:
145  *	dev_p = device pointer
146  *	flags = open flags
147  *	otype = open type
148  *	cred_p = credentials pointer
149  *
150  * Returns:
151  *	0 = success
152  *
153  * Context:
154  *	Kernel context.
155  */
156 /* ARGSUSED */
157 int
ql_close(dev_t dev,int flags,int otyp,cred_t * cred_p)158 ql_close(dev_t dev, int flags, int otyp, cred_t *cred_p)
159 {
160 	ql_adapter_state_t	*ha;
161 	int			rval = 0;
162 
163 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
164 	if (ha == NULL) {
165 		QL_PRINT_2(ha, "failed, no adapter\n");
166 		return (ENXIO);
167 	}
168 
169 	QL_PRINT_3(ha, "started\n");
170 
171 	if (otyp != OTYP_CHR) {
172 		QL_PRINT_2(ha, "failed, open type\n");
173 		return (EINVAL);
174 	}
175 
176 	ADAPTER_STATE_LOCK(ha);
177 	ha->flags &= ~QL_OPENED;
178 	ADAPTER_STATE_UNLOCK(ha);
179 
180 	if (rval != 0) {
181 		EL(ha, "failed, rval = %xh\n", rval);
182 	} else {
183 		/*EMPTY*/
184 		QL_PRINT_3(ha, "done\n");
185 	}
186 	return (rval);
187 }
188 
189 /*
190  * ql_ioctl
191  *	control a character device
192  *
193  * Input:
194  *	dev = device number
195  *	cmd = function to perform
196  *	arg = data type varies with request
197  *	mode = flags
198  *	cred_p = credentials pointer
199  *	rval_p = pointer to result value
200  *
201  * Returns:
202  *	0 = success
203  *
204  * Context:
205  *	Kernel context.
206  */
207 /* ARGSUSED */
208 int
ql_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)209 ql_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
210     int *rval_p)
211 {
212 	ql_adapter_state_t	*ha;
213 	int			rval = 0;
214 
215 	if (ddi_in_panic()) {
216 		QL_PRINT_2(NULL, "ql_ioctl: ddi_in_panic exit\n");
217 		return (ENOPROTOOPT);
218 	}
219 
220 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
221 	if (ha == NULL)	{
222 		QL_PRINT_2(ha, "failed, no adapter\n");
223 		return (ENXIO);
224 	}
225 
226 	QL_PRINT_3(ha, "started\n");
227 
228 	/*
229 	 * Quick clean exit for qla2x00 foapi calls which are
230 	 * not supported in qlc.
231 	 */
232 	if (cmd >= QL_FOAPI_START && cmd <= QL_FOAPI_END) {
233 		QL_PRINT_9(ha, "failed, fo api not supported\n");
234 		return (ENOTTY);
235 	}
236 
237 	/* PWR management busy. */
238 	rval = ql_busy_notification(ha);
239 	if (rval != FC_SUCCESS)	 {
240 		EL(ha, "failed, ql_busy_notification\n");
241 		return (ENXIO);
242 	}
243 
244 	rval = ql_xioctl(ha, cmd, arg, mode, cred_p, rval_p);
245 	if (rval == ENOPROTOOPT || rval == EINVAL) {
246 		switch (cmd) {
247 		case QL_GET_ADAPTER_FEATURE_BITS: {
248 			uint16_t bits;
249 
250 			rval = ql_get_feature_bits(ha, &bits);
251 
252 			if (!rval && ddi_copyout((void *)&bits, (void *)arg,
253 			    sizeof (bits), mode)) {
254 				rval = EFAULT;
255 			}
256 			break;
257 		}
258 
259 		case QL_SET_ADAPTER_FEATURE_BITS: {
260 			uint16_t bits;
261 
262 			if (ddi_copyin((void *)arg, (void *)&bits,
263 			    sizeof (bits), mode)) {
264 				rval = EFAULT;
265 				break;
266 			}
267 
268 			rval = ql_set_feature_bits(ha, bits);
269 			break;
270 		}
271 
272 		case QL_SET_ADAPTER_NVRAM_DEFAULTS:
273 			rval = ql_set_nvram_adapter_defaults(ha);
274 			break;
275 
276 		case QL_UTIL_LOAD:
277 			rval = ql_nv_util_load(ha, (void *)arg, mode);
278 			break;
279 
280 		case QL_UTIL_DUMP:
281 			rval = ql_nv_util_dump(ha, (void *)arg, mode);
282 			break;
283 
284 		case QL_ADM_OP:
285 			rval = ql_adm_op(ha, (void *)arg, mode);
286 			break;
287 
288 		default:
289 			EL(ha, "unknown command = %d\n", cmd);
290 			rval = ENOTTY;
291 			break;
292 		}
293 	}
294 
295 	/* PWR management idle. */
296 	(void) ql_idle_notification(ha);
297 
298 	if (rval != 0) {
299 		/*
300 		 * Don't show failures caused by pps polling for
301 		 * non-existant virtual ports.
302 		 */
303 		if (cmd != EXT_CC_VPORT_CMD) {
304 			EL(ha, "failed, cmd=%d rval=%d\n", cmd, rval);
305 		}
306 	} else {
307 		/*EMPTY*/
308 		QL_PRINT_9(ha, "done\n");
309 	}
310 	return (rval);
311 }
312 
313 /*
314  * ql_busy_notification
315  *	Adapter busy notification.
316  *
317  * Input:
318  *	ha = adapter state pointer.
319  *
320  * Returns:
321  *	FC_SUCCESS
322  *	FC_FAILURE
323  *
324  * Context:
325  *	Kernel context.
326  */
327 static int
ql_busy_notification(ql_adapter_state_t * ha)328 ql_busy_notification(ql_adapter_state_t *ha)
329 {
330 	if (!ha->pm_capable) {
331 		return (FC_SUCCESS);
332 	}
333 
334 	QL_PRINT_9(ha, "started\n");
335 
336 	QL_PM_LOCK(ha);
337 	ha->pm_busy++;
338 	QL_PM_UNLOCK(ha);
339 
340 	if (pm_busy_component(ha->dip, 0) != DDI_SUCCESS) {
341 		QL_PM_LOCK(ha);
342 		if (ha->pm_busy) {
343 			ha->pm_busy--;
344 		}
345 		QL_PM_UNLOCK(ha);
346 
347 		EL(ha, "pm_busy_component failed = %xh\n", FC_FAILURE);
348 		return (FC_FAILURE);
349 	}
350 
351 	QL_PM_LOCK(ha);
352 	if (ha->power_level != PM_LEVEL_D0) {
353 		QL_PM_UNLOCK(ha);
354 		if (pm_raise_power(ha->dip, 0, 1) != DDI_SUCCESS) {
355 			QL_PM_LOCK(ha);
356 			if (ha->pm_busy) {
357 				ha->pm_busy--;
358 			}
359 			QL_PM_UNLOCK(ha);
360 			return (FC_FAILURE);
361 		}
362 	} else {
363 		QL_PM_UNLOCK(ha);
364 	}
365 
366 	QL_PRINT_9(ha, "done\n");
367 
368 	return (FC_SUCCESS);
369 }
370 
371 /*
372  * ql_idle_notification
373  *	Adapter idle notification.
374  *
375  * Input:
376  *	ha = adapter state pointer.
377  *
378  * Returns:
379  *	FC_SUCCESS
380  *	FC_FAILURE
381  *
382  * Context:
383  *	Kernel context.
384  */
385 static int
ql_idle_notification(ql_adapter_state_t * ha)386 ql_idle_notification(ql_adapter_state_t *ha)
387 {
388 	if (!ha->pm_capable) {
389 		return (FC_SUCCESS);
390 	}
391 
392 	QL_PRINT_9(ha, "started\n");
393 
394 	if (pm_idle_component(ha->dip, 0) != DDI_SUCCESS) {
395 		EL(ha, "pm_idle_component failed = %xh\n", FC_FAILURE);
396 		return (FC_FAILURE);
397 	}
398 
399 	QL_PM_LOCK(ha);
400 	if (ha->pm_busy) {
401 		ha->pm_busy--;
402 	}
403 	QL_PM_UNLOCK(ha);
404 
405 	QL_PRINT_9(ha, "done\n");
406 
407 	return (FC_SUCCESS);
408 }
409 
410 /*
411  * Get adapter feature bits from NVRAM
412  */
413 static int
ql_get_feature_bits(ql_adapter_state_t * ha,uint16_t * features)414 ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features)
415 {
416 	int			count;
417 	volatile uint16_t	data;
418 	uint32_t		nv_cmd;
419 	uint32_t		start_addr;
420 	int			rval;
421 	uint32_t		offset = offsetof(nvram_t, adapter_features);
422 
423 	QL_PRINT_9(ha, "started\n");
424 
425 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
426 		EL(ha, "Not supported for 24xx\n");
427 		return (EINVAL);
428 	}
429 
430 	/*
431 	 * The offset can't be greater than max of 8 bits and
432 	 * the following code breaks if the offset isn't at
433 	 * 2 byte boundary.
434 	 */
435 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
436 	if (rval != QL_SUCCESS) {
437 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
438 		return (EIO);
439 	}
440 
441 	/*
442 	 * Have the most significant 3 bits represent the read operation
443 	 * followed by the 8 bits representing the offset at which we
444 	 * are going to perform the read operation
445 	 */
446 	offset >>= 1;
447 	offset += start_addr;
448 	nv_cmd = (offset << 16) | NV_READ_OP;
449 	nv_cmd <<= 5;
450 
451 	/*
452 	 * Select the chip and feed the command and address
453 	 */
454 	for (count = 0; count < 11; count++) {
455 		if (nv_cmd & BIT_31) {
456 			ql_nv_write(ha, NV_DATA_OUT);
457 		} else {
458 			ql_nv_write(ha, 0);
459 		}
460 		nv_cmd <<= 1;
461 	}
462 
463 	*features = 0;
464 	for (count = 0; count < 16; count++) {
465 		WRT16_IO_REG(ha, nvram, NV_SELECT | NV_CLOCK);
466 		ql_nv_delay();
467 
468 		data = RD16_IO_REG(ha, nvram);
469 		*features <<= 1;
470 		if (data & NV_DATA_IN) {
471 			*features = (uint16_t)(*features | 0x1);
472 		}
473 
474 		WRT16_IO_REG(ha, nvram, NV_SELECT);
475 		ql_nv_delay();
476 	}
477 
478 	/*
479 	 * Deselect the chip
480 	 */
481 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
482 
483 	ql_release_nvram(ha);
484 
485 	QL_PRINT_9(ha, "done\n");
486 
487 	return (0);
488 }
489 
490 /*
491  * Set adapter feature bits in NVRAM
492  */
493 static int
ql_set_feature_bits(ql_adapter_state_t * ha,uint16_t features)494 ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features)
495 {
496 	int		rval;
497 	uint32_t	count;
498 	nvram_t		*nv;
499 	uint16_t	*wptr;
500 	uint8_t		*bptr;
501 	uint8_t		csum;
502 	uint32_t	start_addr;
503 
504 	QL_PRINT_9(ha, "started\n");
505 
506 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
507 		EL(ha, "Not supported for 24xx\n");
508 		return (EINVAL);
509 	}
510 
511 	nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
512 	if (nv == NULL) {
513 		EL(ha, "failed, kmem_zalloc\n");
514 		return (ENOMEM);
515 	}
516 
517 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
518 	if (rval != QL_SUCCESS) {
519 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
520 		kmem_free(nv, sizeof (*nv));
521 		return (EIO);
522 	}
523 	rval = 0;
524 
525 	/*
526 	 * Read off the whole NVRAM
527 	 */
528 	wptr = (uint16_t *)nv;
529 	csum = 0;
530 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
531 		*wptr = (uint16_t)ql_get_nvram_word(ha, count + start_addr);
532 		csum = (uint8_t)(csum + (uint8_t)*wptr);
533 		csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
534 		wptr++;
535 	}
536 
537 	/*
538 	 * If the checksum is BAD then fail it right here.
539 	 */
540 	if (csum) {
541 		kmem_free(nv, sizeof (*nv));
542 		ql_release_nvram(ha);
543 		return (EBADF);
544 	}
545 
546 	nv->adapter_features[0] = (uint8_t)((features & 0xFF00) >> 8);
547 	nv->adapter_features[1] = (uint8_t)(features & 0xFF);
548 
549 	/*
550 	 * Recompute the chesksum now
551 	 */
552 	bptr = (uint8_t *)nv;
553 	for (count = 0; count < sizeof (nvram_t) - 1; count++) {
554 		csum = (uint8_t)(csum + *bptr++);
555 	}
556 	csum = (uint8_t)(~csum + 1);
557 	nv->checksum = csum;
558 
559 	/*
560 	 * Now load the NVRAM
561 	 */
562 	wptr = (uint16_t *)nv;
563 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
564 		ql_load_nvram(ha, (uint8_t)(count + start_addr), *wptr++);
565 	}
566 
567 	/*
568 	 * Read NVRAM and verify the contents
569 	 */
570 	wptr = (uint16_t *)nv;
571 	csum = 0;
572 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
573 		if (ql_get_nvram_word(ha, count + start_addr) != *wptr) {
574 			rval = EIO;
575 			break;
576 		}
577 		csum = (uint8_t)(csum + (uint8_t)*wptr);
578 		csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
579 		wptr++;
580 	}
581 
582 	if (csum) {
583 		rval = EINVAL;
584 	}
585 
586 	kmem_free(nv, sizeof (*nv));
587 	ql_release_nvram(ha);
588 
589 	QL_PRINT_9(ha, "done\n");
590 
591 	return (rval);
592 }
593 
594 /*
595  * Fix this function to update just feature bits and checksum in NVRAM
596  */
597 static int
ql_set_nvram_adapter_defaults(ql_adapter_state_t * ha)598 ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha)
599 {
600 	int		rval;
601 	uint32_t	count;
602 	uint32_t	start_addr;
603 
604 	QL_PRINT_9(ha, "started\n");
605 
606 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
607 	if (rval != QL_SUCCESS) {
608 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
609 		return (EIO);
610 	}
611 	rval = 0;
612 
613 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
614 		nvram_24xx_t	*nv;
615 		uint32_t	*longptr;
616 		uint32_t	csum = 0;
617 
618 		nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
619 		if (nv == NULL) {
620 			EL(ha, "failed, kmem_zalloc\n");
621 			return (ENOMEM);
622 		}
623 
624 		nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
625 		nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
626 
627 		nv->version[0] = 1;
628 		nv->max_frame_length[1] = 8;
629 		nv->execution_throttle[0] = 16;
630 		nv->login_retry_count[0] = 8;
631 
632 		nv->firmware_options_1[0] = BIT_2 | BIT_1;
633 		nv->firmware_options_1[1] = BIT_5;
634 		nv->firmware_options_2[0] = BIT_5;
635 		nv->firmware_options_2[1] = BIT_4;
636 		nv->firmware_options_3[1] = BIT_6;
637 
638 		/*
639 		 * Set default host adapter parameters
640 		 */
641 		nv->host_p[0] = BIT_4 | BIT_1;
642 		nv->host_p[1] = BIT_3 | BIT_2;
643 		nv->reset_delay = 5;
644 		nv->max_luns_per_target[0] = 128;
645 		nv->port_down_retry_count[0] = 30;
646 		nv->link_down_timeout[0] = 30;
647 
648 		/*
649 		 * compute the chesksum now
650 		 */
651 		longptr = (uint32_t *)nv;
652 		csum = 0;
653 		for (count = 0; count < (sizeof (nvram_24xx_t) / 4) - 1;
654 		    count++) {
655 			csum += *longptr;
656 			longptr++;
657 		}
658 		csum = (uint32_t)(~csum + 1);
659 		LITTLE_ENDIAN_32((long)csum);
660 		*longptr = csum;
661 
662 		/*
663 		 * Now load the NVRAM
664 		 */
665 		longptr = (uint32_t *)nv;
666 		for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
667 			(void) ql_24xx_load_nvram(ha,
668 			    (uint32_t)(count + start_addr), *longptr++);
669 		}
670 
671 		/*
672 		 * Read NVRAM and verify the contents
673 		 */
674 		csum = 0;
675 		longptr = (uint32_t *)nv;
676 		for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
677 			rval = ql_24xx_read_flash(ha, count + start_addr,
678 			    longptr);
679 			if (rval != QL_SUCCESS) {
680 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
681 				break;
682 			}
683 			csum += *longptr;
684 		}
685 
686 		if (csum) {
687 			rval = EINVAL;
688 		}
689 		kmem_free(nv, sizeof (nvram_24xx_t));
690 	} else {
691 		nvram_t		*nv;
692 		uint16_t	*wptr;
693 		uint8_t		*bptr;
694 		uint8_t		csum;
695 
696 		nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
697 		if (nv == NULL) {
698 			EL(ha, "failed, kmem_zalloc\n");
699 			return (ENOMEM);
700 		}
701 		/*
702 		 * Set default initialization control block.
703 		 */
704 		nv->parameter_block_version = ICB_VERSION;
705 		nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
706 		nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
707 
708 		nv->max_frame_length[1] = 4;
709 		nv->max_iocb_allocation[1] = 1;
710 		nv->execution_throttle[0] = 16;
711 		nv->login_retry_count = 8;
712 		nv->port_name[0] = 33;
713 		nv->port_name[3] = 224;
714 		nv->port_name[4] = 139;
715 		nv->login_timeout = 4;
716 
717 		/*
718 		 * Set default host adapter parameters
719 		 */
720 		nv->host_p[0] = BIT_1;
721 		nv->host_p[1] = BIT_2;
722 		nv->reset_delay = 5;
723 		nv->port_down_retry_count = 8;
724 		nv->maximum_luns_per_target[0] = 8;
725 
726 		/*
727 		 * compute the chesksum now
728 		 */
729 		bptr = (uint8_t *)nv;
730 		csum = 0;
731 		for (count = 0; count < sizeof (nvram_t) - 1; count++) {
732 			csum = (uint8_t)(csum + *bptr++);
733 		}
734 		csum = (uint8_t)(~csum + 1);
735 		nv->checksum = csum;
736 
737 		/*
738 		 * Now load the NVRAM
739 		 */
740 		wptr = (uint16_t *)nv;
741 		for (count = 0; count < sizeof (nvram_t) / 2; count++) {
742 			ql_load_nvram(ha, (uint8_t)(count + start_addr),
743 			    *wptr++);
744 		}
745 
746 		/*
747 		 * Read NVRAM and verify the contents
748 		 */
749 		wptr = (uint16_t *)nv;
750 		csum = 0;
751 		for (count = 0; count < sizeof (nvram_t) / 2; count++) {
752 			if (ql_get_nvram_word(ha, count + start_addr) !=
753 			    *wptr) {
754 				rval = EIO;
755 				break;
756 			}
757 			csum = (uint8_t)(csum + (uint8_t)*wptr);
758 			csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
759 			wptr++;
760 		}
761 		if (csum) {
762 			rval = EINVAL;
763 		}
764 		kmem_free(nv, sizeof (*nv));
765 	}
766 	ql_release_nvram(ha);
767 
768 	QL_PRINT_9(ha, "done\n");
769 
770 	return (rval);
771 }
772 
773 static void
ql_load_nvram(ql_adapter_state_t * ha,uint8_t addr,uint16_t value)774 ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, uint16_t value)
775 {
776 	int			count;
777 	volatile uint16_t	word;
778 	volatile uint32_t	nv_cmd;
779 
780 	ql_nv_write(ha, NV_DATA_OUT);
781 	ql_nv_write(ha, 0);
782 	ql_nv_write(ha, 0);
783 
784 	for (word = 0; word < 8; word++) {
785 		ql_nv_write(ha, NV_DATA_OUT);
786 	}
787 
788 	/*
789 	 * Deselect the chip
790 	 */
791 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
792 	ql_nv_delay();
793 
794 	/*
795 	 * Erase Location
796 	 */
797 	nv_cmd = (addr << 16) | NV_ERASE_OP;
798 	nv_cmd <<= 5;
799 	for (count = 0; count < 11; count++) {
800 		if (nv_cmd & BIT_31) {
801 			ql_nv_write(ha, NV_DATA_OUT);
802 		} else {
803 			ql_nv_write(ha, 0);
804 		}
805 		nv_cmd <<= 1;
806 	}
807 
808 	/*
809 	 * Wait for Erase to Finish
810 	 */
811 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
812 	ql_nv_delay();
813 	WRT16_IO_REG(ha, nvram, NV_SELECT);
814 	word = 0;
815 	while ((word & NV_DATA_IN) == 0) {
816 		ql_nv_delay();
817 		word = RD16_IO_REG(ha, nvram);
818 	}
819 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
820 	ql_nv_delay();
821 
822 	/*
823 	 * Write data now
824 	 */
825 	nv_cmd = (addr << 16) | NV_WRITE_OP;
826 	nv_cmd |= value;
827 	nv_cmd <<= 5;
828 	for (count = 0; count < 27; count++) {
829 		if (nv_cmd & BIT_31) {
830 			ql_nv_write(ha, NV_DATA_OUT);
831 		} else {
832 			ql_nv_write(ha, 0);
833 		}
834 		nv_cmd <<= 1;
835 	}
836 
837 	/*
838 	 * Wait for NVRAM to become ready
839 	 */
840 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
841 	ql_nv_delay();
842 	WRT16_IO_REG(ha, nvram, NV_SELECT);
843 	word = 0;
844 	while ((word & NV_DATA_IN) == 0) {
845 		ql_nv_delay();
846 		word = RD16_IO_REG(ha, nvram);
847 	}
848 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
849 	ql_nv_delay();
850 
851 	/*
852 	 * Disable writes
853 	 */
854 	ql_nv_write(ha, NV_DATA_OUT);
855 	for (count = 0; count < 10; count++) {
856 		ql_nv_write(ha, 0);
857 	}
858 
859 	/*
860 	 * Deselect the chip now
861 	 */
862 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
863 }
864 
865 /*
866  * ql_24xx_load_nvram
867  *	Enable NVRAM and writes a 32bit word to ISP24xx NVRAM.
868  *
869  * Input:
870  *	ha:	adapter state pointer.
871  *	addr:	NVRAM address.
872  *	value:	data.
873  *
874  * Returns:
875  *	ql local function return status code.
876  *
877  * Context:
878  *	Kernel context.
879  */
880 static int
ql_24xx_load_nvram(ql_adapter_state_t * ha,uint32_t addr,uint32_t value)881 ql_24xx_load_nvram(ql_adapter_state_t *ha, uint32_t addr, uint32_t value)
882 {
883 	int	rval;
884 
885 	/* Enable flash write. */
886 	if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
887 		WRT32_IO_REG(ha, ctrl_status,
888 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
889 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
890 	}
891 
892 	/* Disable NVRAM write-protection. */
893 	if (CFG_IST(ha, CFG_CTRL_24XX)) {
894 		(void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0);
895 	} else {
896 		if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
897 			EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
898 			return (rval);
899 		}
900 	}
901 
902 	/* Write to flash. */
903 	rval = ql_24xx_write_flash(ha, addr, value);
904 
905 	/* Enable NVRAM write-protection. */
906 	if (CFG_IST(ha, CFG_CTRL_24XX)) {
907 		/* TODO: Check if 0x8c is correct -- sb: 0x9c ? */
908 		(void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0x8c);
909 	} else {
910 		ql_24xx_protect_flash(ha);
911 	}
912 
913 	/* Disable flash write. */
914 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
915 		WRT32_IO_REG(ha, ctrl_status,
916 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
917 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
918 	}
919 
920 	return (rval);
921 }
922 
923 /*
924  * ql_nv_util_load
925  *	Loads NVRAM from application.
926  *
927  * Input:
928  *	ha = adapter state pointer.
929  *	bp = user buffer address.
930  *
931  * Returns:
932  *
933  * Context:
934  *	Kernel context.
935  */
936 int
ql_nv_util_load(ql_adapter_state_t * ha,void * bp,int mode)937 ql_nv_util_load(ql_adapter_state_t *ha, void *bp, int mode)
938 {
939 	uint8_t		cnt;
940 	void		*nv;
941 	uint16_t	*wptr;
942 	uint16_t	data;
943 	uint32_t	start_addr, *lptr, data32;
944 	nvram_t		*nptr;
945 	int		rval;
946 
947 	QL_PRINT_9(ha, "started\n");
948 
949 	if ((nv = kmem_zalloc(ha->nvram_cache->size, KM_SLEEP)) == NULL) {
950 		EL(ha, "failed, kmem_zalloc\n");
951 		return (ENOMEM);
952 	}
953 
954 	if (ddi_copyin(bp, nv, ha->nvram_cache->size, mode) != 0) {
955 		EL(ha, "Buffer copy failed\n");
956 		kmem_free(nv, ha->nvram_cache->size);
957 		return (EFAULT);
958 	}
959 
960 	/* See if the buffer passed to us looks sane */
961 	nptr = (nvram_t *)nv;
962 	if (nptr->id[0] != 'I' || nptr->id[1] != 'S' || nptr->id[2] != 'P' ||
963 	    nptr->id[3] != ' ') {
964 		EL(ha, "failed, buffer sanity check\n");
965 		kmem_free(nv, ha->nvram_cache->size);
966 		return (EINVAL);
967 	}
968 
969 	/* Quiesce I/O */
970 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
971 		EL(ha, "ql_stall_driver failed\n");
972 		kmem_free(nv, ha->nvram_cache->size);
973 		return (EBUSY);
974 	}
975 
976 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
977 	if (rval != QL_SUCCESS) {
978 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
979 		kmem_free(nv, ha->nvram_cache->size);
980 		ql_restart_driver(ha);
981 		return (EIO);
982 	}
983 
984 	/* Load NVRAM. */
985 	if (CFG_IST(ha, CFG_CTRL_252780818283)) {
986 		GLOBAL_HW_UNLOCK();
987 		start_addr &= ~ha->flash_data_addr;
988 		start_addr <<= 2;
989 		if ((rval = ql_r_m_w_flash(ha, bp, ha->nvram_cache->size,
990 		    start_addr, mode)) != QL_SUCCESS) {
991 			EL(ha, "nvram load failed, rval = %0xh\n", rval);
992 		}
993 		GLOBAL_HW_LOCK();
994 	} else if (CFG_IST(ha, CFG_CTRL_24XX)) {
995 		lptr = (uint32_t *)nv;
996 		for (cnt = 0; cnt < ha->nvram_cache->size / 4; cnt++) {
997 			data32 = *lptr++;
998 			LITTLE_ENDIAN_32(&data32);
999 			rval = ql_24xx_load_nvram(ha, cnt + start_addr,
1000 			    data32);
1001 			if (rval != QL_SUCCESS) {
1002 				EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
1003 				break;
1004 			}
1005 		}
1006 	} else {
1007 		wptr = (uint16_t *)nv;
1008 		for (cnt = 0; cnt < ha->nvram_cache->size / 2; cnt++) {
1009 			data = *wptr++;
1010 			LITTLE_ENDIAN_16(&data);
1011 			ql_load_nvram(ha, (uint8_t)(cnt + start_addr), data);
1012 		}
1013 	}
1014 	/* switch to the new one */
1015 	kmem_free(ha->nvram_cache->cache, ha->nvram_cache->size);
1016 	ha->nvram_cache->cache = (void *)nptr;
1017 
1018 	ql_release_nvram(ha);
1019 	ql_restart_driver(ha);
1020 
1021 	QL_PRINT_9(ha, "done\n");
1022 
1023 	if (rval == QL_SUCCESS) {
1024 		return (0);
1025 	}
1026 
1027 	return (EFAULT);
1028 }
1029 
1030 /*
1031  * ql_nv_util_dump
1032  *	Dumps NVRAM to application.
1033  *
1034  * Input:
1035  *	ha = adapter state pointer.
1036  *	bp = user buffer address.
1037  *
1038  * Returns:
1039  *
1040  * Context:
1041  *	Kernel context.
1042  */
1043 int
ql_nv_util_dump(ql_adapter_state_t * ha,void * bp,int mode)1044 ql_nv_util_dump(ql_adapter_state_t *ha, void *bp, int mode)
1045 {
1046 	uint32_t	start_addr;
1047 	int		rval2, rval = 0;
1048 
1049 	QL_PRINT_9(ha, "started\n");
1050 
1051 	if (ha->nvram_cache == NULL ||
1052 	    ha->nvram_cache->size == 0 ||
1053 	    ha->nvram_cache->cache == NULL) {
1054 		EL(ha, "failed, kmem_zalloc\n");
1055 		return (ENOMEM);
1056 	} else if (ha->nvram_cache->valid != 1) {
1057 
1058 		/* Quiesce I/O */
1059 		if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1060 			EL(ha, "ql_stall_driver failed\n");
1061 			return (EBUSY);
1062 		}
1063 
1064 		rval2 = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
1065 		if (rval2 != QL_SUCCESS) {
1066 			EL(ha, "failed, ql_lock_nvram=%xh\n", rval2);
1067 			ql_restart_driver(ha);
1068 			return (EIO);
1069 		}
1070 		rval2 = ql_get_nvram(ha, ha->nvram_cache->cache,
1071 		    start_addr, ha->nvram_cache->size);
1072 		if (rval2 != QL_SUCCESS) {
1073 			rval = rval2;
1074 		} else {
1075 			ha->nvram_cache->valid = 1;
1076 			EL(ha, "nvram cache now valid.");
1077 		}
1078 
1079 		ql_release_nvram(ha);
1080 		ql_restart_driver(ha);
1081 
1082 		if (rval != 0) {
1083 			EL(ha, "failed to dump nvram, rval=%x\n", rval);
1084 			return (rval);
1085 		}
1086 	}
1087 
1088 	if (ddi_copyout(ha->nvram_cache->cache, bp,
1089 	    ha->nvram_cache->size, mode) != 0) {
1090 		EL(ha, "Buffer copy failed\n");
1091 		return (EFAULT);
1092 	}
1093 
1094 	QL_PRINT_9(ha, "done\n");
1095 
1096 	return (0);
1097 }
1098 
1099 int
ql_get_nvram(ql_adapter_state_t * ha,void * dest_addr,uint32_t src_addr,uint32_t size)1100 ql_get_nvram(ql_adapter_state_t *ha, void *dest_addr, uint32_t src_addr,
1101     uint32_t size)
1102 {
1103 	int rval = QL_SUCCESS;
1104 	int cnt;
1105 	/* Dump NVRAM. */
1106 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
1107 		uint32_t	*lptr = (uint32_t *)dest_addr;
1108 
1109 		for (cnt = 0; cnt < size / 4; cnt++) {
1110 			rval = ql_24xx_read_flash(ha, src_addr++, lptr);
1111 			if (rval != QL_SUCCESS) {
1112 				EL(ha, "read_flash failed=%xh\n", rval);
1113 				rval = EAGAIN;
1114 				break;
1115 			}
1116 			LITTLE_ENDIAN_32(lptr);
1117 			lptr++;
1118 		}
1119 	} else {
1120 		uint16_t	data;
1121 		uint16_t	*wptr = (uint16_t *)dest_addr;
1122 
1123 		for (cnt = 0; cnt < size / 2; cnt++) {
1124 			data = (uint16_t)ql_get_nvram_word(ha, cnt +
1125 			    src_addr);
1126 			LITTLE_ENDIAN_16(&data);
1127 			*wptr++ = data;
1128 		}
1129 	}
1130 	return (rval);
1131 }
1132 
1133 /*
1134  * ql_vpd_load
1135  *	Loads VPD from application.
1136  *
1137  * Input:
1138  *	ha = adapter state pointer.
1139  *	bp = user buffer address.
1140  *
1141  * Returns:
1142  *
1143  * Context:
1144  *	Kernel context.
1145  */
1146 int
ql_vpd_load(ql_adapter_state_t * ha,void * bp,int mode)1147 ql_vpd_load(ql_adapter_state_t *ha, void *bp, int mode)
1148 {
1149 	uint8_t		cnt;
1150 	uint8_t		*vpd, *vpdptr, *vbuf;
1151 	uint32_t	start_addr, vpd_size, *lptr, data32;
1152 	int		rval;
1153 
1154 	QL_PRINT_9(ha, "started\n");
1155 
1156 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
1157 		EL(ha, "unsupported adapter feature\n");
1158 		return (ENOTSUP);
1159 	}
1160 
1161 	vpd_size = QL_24XX_VPD_SIZE;
1162 
1163 	if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1164 		EL(ha, "failed, kmem_zalloc\n");
1165 		return (ENOMEM);
1166 	}
1167 
1168 	if (ddi_copyin(bp, vpd, vpd_size, mode) != 0) {
1169 		EL(ha, "Buffer copy failed\n");
1170 		kmem_free(vpd, vpd_size);
1171 		return (EFAULT);
1172 	}
1173 
1174 	/* Sanity check the user supplied data via checksum */
1175 	if ((vpdptr = ql_vpd_findtag(ha, vpd, "RV")) == NULL) {
1176 		EL(ha, "vpd RV tag missing\n");
1177 		kmem_free(vpd, vpd_size);
1178 		return (EINVAL);
1179 	}
1180 
1181 	vpdptr += 3;
1182 	cnt = 0;
1183 	vbuf = vpd;
1184 	while (vbuf <= vpdptr) {
1185 		cnt += *vbuf++;
1186 	}
1187 	if (cnt != 0) {
1188 		EL(ha, "mismatched checksum, cal=%xh, passed=%xh\n",
1189 		    (uint8_t)cnt, (uintptr_t)vpdptr);
1190 		kmem_free(vpd, vpd_size);
1191 		return (EINVAL);
1192 	}
1193 
1194 	/* Quiesce I/O */
1195 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1196 		EL(ha, "ql_stall_driver failed\n");
1197 		kmem_free(vpd, vpd_size);
1198 		return (EBUSY);
1199 	}
1200 
1201 	rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1202 	if (rval != QL_SUCCESS) {
1203 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1204 		kmem_free(vpd, vpd_size);
1205 		ql_restart_driver(ha);
1206 		return (EIO);
1207 	}
1208 
1209 	/* Load VPD. */
1210 	if (CFG_IST(ha, CFG_CTRL_252780818283)) {
1211 		GLOBAL_HW_UNLOCK();
1212 		start_addr &= ~ha->flash_data_addr;
1213 		start_addr <<= 2;
1214 		if ((rval = ql_r_m_w_flash(ha, bp, vpd_size, start_addr,
1215 		    mode)) != QL_SUCCESS) {
1216 			EL(ha, "vpd load error: %xh\n", rval);
1217 		}
1218 		GLOBAL_HW_LOCK();
1219 	} else {
1220 		lptr = (uint32_t *)vpd;
1221 		for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1222 			data32 = *lptr++;
1223 			LITTLE_ENDIAN_32(&data32);
1224 			rval = ql_24xx_load_nvram(ha, cnt + start_addr,
1225 			    data32);
1226 			if (rval != QL_SUCCESS) {
1227 				EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
1228 				break;
1229 			}
1230 		}
1231 	}
1232 
1233 	kmem_free(vpd, vpd_size);
1234 
1235 	/* Update the vcache */
1236 	if (rval != QL_SUCCESS) {
1237 		EL(ha, "failed, load\n");
1238 	} else if ((ha->vcache == NULL) && ((ha->vcache =
1239 	    kmem_zalloc(vpd_size, KM_SLEEP)) == NULL)) {
1240 		EL(ha, "failed, kmem_zalloc2\n");
1241 	} else if (ddi_copyin(bp, ha->vcache, vpd_size, mode) != 0) {
1242 		EL(ha, "Buffer copy2 failed\n");
1243 		kmem_free(ha->vcache, vpd_size);
1244 		ha->vcache = NULL;
1245 	}
1246 
1247 	ql_release_nvram(ha);
1248 	ql_restart_driver(ha);
1249 
1250 	QL_PRINT_9(ha, "done\n");
1251 
1252 	if (rval == QL_SUCCESS) {
1253 		return (0);
1254 	}
1255 
1256 	return (EFAULT);
1257 }
1258 
1259 /*
1260  * ql_vpd_dump
1261  *	Dumps VPD to application buffer.
1262  *
1263  * Input:
1264  *	ha = adapter state pointer.
1265  *	bp = user buffer address.
1266  *
1267  * Returns:
1268  *
1269  * Context:
1270  *	Kernel context.
1271  */
1272 int
ql_vpd_dump(ql_adapter_state_t * ha,void * bp,int mode)1273 ql_vpd_dump(ql_adapter_state_t *ha, void *bp, int mode)
1274 {
1275 	uint8_t		cnt;
1276 	void		*vpd;
1277 	uint32_t	start_addr, vpd_size, *lptr;
1278 	int		rval = 0;
1279 
1280 	QL_PRINT_3(ha, "started\n");
1281 
1282 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
1283 		EL(ha, "unsupported adapter feature\n");
1284 		return (EACCES);
1285 	}
1286 
1287 	vpd_size = QL_24XX_VPD_SIZE;
1288 
1289 	if (ha->vcache != NULL) {
1290 		/* copy back the vpd cache data */
1291 		if (ddi_copyout(ha->vcache, bp, vpd_size, mode) != 0) {
1292 			EL(ha, "Buffer copy failed\n");
1293 			rval = EFAULT;
1294 		}
1295 		return (rval);
1296 	}
1297 
1298 	if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1299 		EL(ha, "failed, kmem_zalloc\n");
1300 		return (ENOMEM);
1301 	}
1302 
1303 	/* Quiesce I/O */
1304 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1305 		EL(ha, "ql_stall_driver failed\n");
1306 		kmem_free(vpd, vpd_size);
1307 		return (EBUSY);
1308 	}
1309 
1310 	rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1311 	if (rval != QL_SUCCESS) {
1312 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1313 		kmem_free(vpd, vpd_size);
1314 		ql_restart_driver(ha);
1315 		return (EIO);
1316 	}
1317 
1318 	/* Dump VPD. */
1319 	lptr = (uint32_t *)vpd;
1320 
1321 	for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1322 		rval = ql_24xx_read_flash(ha, start_addr++, lptr);
1323 		if (rval != QL_SUCCESS) {
1324 			EL(ha, "read_flash failed=%xh\n", rval);
1325 			rval = EAGAIN;
1326 			break;
1327 		}
1328 		LITTLE_ENDIAN_32(lptr);
1329 		lptr++;
1330 	}
1331 
1332 	ql_release_nvram(ha);
1333 	ql_restart_driver(ha);
1334 
1335 	if (ddi_copyout(vpd, bp, vpd_size, mode) != 0) {
1336 		EL(ha, "Buffer copy failed\n");
1337 		kmem_free(vpd, vpd_size);
1338 		return (EFAULT);
1339 	}
1340 
1341 	ha->vcache = vpd;
1342 
1343 	QL_PRINT_3(ha, "done\n");
1344 
1345 	if (rval != QL_SUCCESS) {
1346 		return (EFAULT);
1347 	} else {
1348 		return (0);
1349 	}
1350 }
1351 
1352 /*
1353  * ql_vpd_findtag
1354  *	Search the passed vpd buffer for the requested VPD tag type.
1355  *
1356  * Input:
1357  *	ha	= adapter state pointer.
1358  *	vpdbuf	= Pointer to start of the buffer to search
1359  *	op	= VPD opcode to find (must be NULL terminated).
1360  *
1361  * Returns:
1362  *	Pointer to the opcode in the buffer if opcode found.
1363  *	NULL if opcode is not found.
1364  *
1365  * Context:
1366  *	Kernel context.
1367  */
1368 static uint8_t *
ql_vpd_findtag(ql_adapter_state_t * ha,uint8_t * vpdbuf,int8_t * opcode)1369 ql_vpd_findtag(ql_adapter_state_t *ha, uint8_t *vpdbuf, int8_t *opcode)
1370 {
1371 	uint8_t		*vpd = vpdbuf;
1372 	uint8_t		*end = vpdbuf + QL_24XX_VPD_SIZE;
1373 	uint32_t	found = 0;
1374 
1375 	QL_PRINT_3(ha, "started\n");
1376 
1377 	if (vpdbuf == NULL || opcode == NULL) {
1378 		EL(ha, "null parameter passed!\n");
1379 		return (NULL);
1380 	}
1381 
1382 	while (vpd < end) {
1383 
1384 		/* check for end of vpd */
1385 		if (vpd[0] == VPD_TAG_END) {
1386 			if (opcode[0] == VPD_TAG_END) {
1387 				found = 1;
1388 			} else {
1389 				found = 0;
1390 			}
1391 			break;
1392 		}
1393 
1394 		/* check opcode */
1395 		if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1396 			/* found opcode requested */
1397 			found = 1;
1398 			break;
1399 		}
1400 
1401 		/*
1402 		 * Didn't find the opcode, so calculate start of
1403 		 * next tag. Depending on the current tag type,
1404 		 * the length field can be 1 or 2 bytes
1405 		 */
1406 		if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1407 			vpd += (vpd[2] << 8) + vpd[1] + 3;
1408 		} else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1409 			vpd += 3;
1410 		} else {
1411 			vpd += vpd[2] +3;
1412 		}
1413 	}
1414 
1415 	QL_PRINT_3(ha, "done\n");
1416 
1417 	return (found == 1 ? vpd : NULL);
1418 }
1419 
1420 /*
1421  * ql_vpd_lookup
1422  *	Return the VPD data for the request VPD tag
1423  *
1424  * Input:
1425  *	ha	= adapter state pointer.
1426  *	opcode	= VPD opcode to find (must be NULL terminated).
1427  *	bp	= Pointer to returned data buffer.
1428  *	bplen	= Length of returned data buffer.
1429  *
1430  * Returns:
1431  *	Length of data copied into returned data buffer.
1432  *		>0 = VPD data field (NULL terminated)
1433  *		 0 = no data.
1434  *		-1 = Could not find opcode in vpd buffer / error.
1435  *
1436  * Context:
1437  *	Kernel context.
1438  *
1439  * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1440  *
1441  */
1442 int32_t
ql_vpd_lookup(ql_adapter_state_t * ha,uint8_t * opcode,uint8_t * bp,int32_t bplen)1443 ql_vpd_lookup(ql_adapter_state_t *ha, uint8_t *opcode, uint8_t *bp,
1444     int32_t bplen)
1445 {
1446 	uint8_t		*vpd;
1447 	uint8_t		*vpdbuf;
1448 	int32_t		len = -1;
1449 
1450 	QL_PRINT_3(ha, "started\n");
1451 
1452 	if (opcode == NULL || bp == NULL || bplen < 1) {
1453 		EL(ha, "invalid parameter passed: opcode=%ph, "
1454 		    "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1455 		return (len);
1456 	}
1457 
1458 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
1459 		return (len);
1460 	}
1461 
1462 	if ((vpdbuf = (uint8_t *)kmem_zalloc(QL_24XX_VPD_SIZE,
1463 	    KM_SLEEP)) == NULL) {
1464 		EL(ha, "unable to allocate vpd memory\n");
1465 		return (len);
1466 	}
1467 
1468 	if ((ql_vpd_dump(ha, vpdbuf, (int)FKIOCTL)) != 0) {
1469 		kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1470 		EL(ha, "unable to retrieve VPD data\n");
1471 		return (len);
1472 	}
1473 
1474 	if ((vpd = ql_vpd_findtag(ha, vpdbuf, (int8_t *)opcode)) != NULL) {
1475 		/*
1476 		 * Found the tag
1477 		 */
1478 		if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1479 		    *opcode == VPD_TAG_LRTC) {
1480 			/*
1481 			 * we found it, but the tag doesn't have a data
1482 			 * field.
1483 			 */
1484 			len = 0;
1485 		} else if (!(strncmp((char *)vpd, (char *)
1486 		    VPD_TAG_PRODID, 1))) {
1487 			len = vpd[2] << 8;
1488 			len += vpd[1];
1489 		} else {
1490 			len = vpd[2];
1491 		}
1492 
1493 		/*
1494 		 * make sure that the vpd len doesn't exceed the
1495 		 * vpd end
1496 		 */
1497 		if (vpd + len > vpdbuf + QL_24XX_VPD_SIZE) {
1498 			EL(ha, "vpd tag len (%xh) exceeds vpd buffer "
1499 			    "length\n", len);
1500 			len = -1;
1501 		}
1502 	}
1503 
1504 	if (len >= 0) {
1505 		/*
1506 		 * make sure we don't exceed callers buffer space len
1507 		 */
1508 		if (len > bplen) {
1509 			len = bplen - 1;
1510 		}
1511 
1512 		/* copy the data back */
1513 		(void) strncpy((int8_t *)bp, (int8_t *)(vpd + 3), (int64_t)len);
1514 		bp[len] = 0;
1515 	} else {
1516 		/* error -- couldn't find tag */
1517 		bp[0] = 0;
1518 		if (opcode[1] != 0) {
1519 			EL(ha, "unable to find tag '%s'\n", opcode);
1520 		} else {
1521 			EL(ha, "unable to find tag '%xh'\n", opcode[0]);
1522 		}
1523 	}
1524 
1525 	kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1526 
1527 	QL_PRINT_3(ha, "done\n");
1528 
1529 	return (len);
1530 }
1531 
1532 /*
1533  * ql_r_m_w_flash
1534  *	Read modify write from user space to flash.
1535  *
1536  * Input:
1537  *	ha:	adapter state pointer.
1538  *	dp:	source byte pointer.
1539  *	bc:	byte count.
1540  *	faddr:	flash byte address.
1541  *	mode:	flags.
1542  *
1543  * Returns:
1544  *	ql local function return status code.
1545  *
1546  * Context:
1547  *	Kernel context.
1548  */
1549 int
ql_r_m_w_flash(ql_adapter_state_t * ha,caddr_t dp,uint32_t bc,uint32_t faddr,int mode)1550 ql_r_m_w_flash(ql_adapter_state_t *ha, caddr_t dp, uint32_t bc, uint32_t faddr,
1551     int mode)
1552 {
1553 	uint8_t		*bp;
1554 	uint32_t	xfer, bsize, saddr, ofst;
1555 	int		rval = 0;
1556 
1557 	QL_PRINT_9(ha, "started, dp=%ph, faddr=%xh, bc=%xh\n",
1558 	    (void *)dp, faddr, bc);
1559 
1560 	bsize = ha->xioctl->fdesc.block_size;
1561 	saddr = faddr & ~(bsize - 1);
1562 	ofst = faddr & (bsize - 1);
1563 
1564 	if ((bp = kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
1565 		EL(ha, "kmem_zalloc=null\n");
1566 		return (QL_MEMORY_ALLOC_FAILED);
1567 	}
1568 
1569 	while (bc) {
1570 		xfer = bc > bsize ? bsize : bc;
1571 		if (ofst + xfer > bsize) {
1572 			xfer = bsize - ofst;
1573 		}
1574 		QL_PRINT_9(ha, "dp=%ph, saddr=%xh, bc=%xh, "
1575 		    "ofst=%xh, xfer=%xh\n", (void *)dp, saddr,
1576 		    bc, ofst, xfer);
1577 
1578 		if (ofst || xfer < bsize) {
1579 			/* Dump Flash sector. */
1580 			if ((rval = ql_dump_fcode(ha, bp, bsize, saddr)) !=
1581 			    QL_SUCCESS) {
1582 				EL(ha, "dump_flash status=%x\n", rval);
1583 				break;
1584 			}
1585 		}
1586 
1587 		/* Set new data. */
1588 		if ((rval = ddi_copyin(dp, (caddr_t)(bp + ofst), xfer,
1589 		    mode)) != 0) {
1590 			EL(ha, "ddi_copyin status=%xh, dp=%ph, ofst=%xh, "
1591 			    "xfer=%xh\n", rval, (void *)dp, ofst, xfer);
1592 			rval = QL_FUNCTION_FAILED;
1593 			break;
1594 		}
1595 
1596 		/* Write to flash. */
1597 		if ((rval = ql_load_fcode(ha, bp, bsize, saddr)) !=
1598 		    QL_SUCCESS) {
1599 			EL(ha, "load_flash status=%x\n", rval);
1600 			break;
1601 		}
1602 		bc -= xfer;
1603 		dp += xfer;
1604 		saddr += bsize;
1605 		ofst = 0;
1606 	}
1607 
1608 	kmem_free(bp, bsize);
1609 
1610 	QL_PRINT_9(ha, "done\n");
1611 
1612 	return (rval);
1613 }
1614 
1615 /*
1616  * ql_adm_op
1617  *	Performs qladm utility operations
1618  *
1619  * Input:
1620  *	ha:	adapter state pointer.
1621  *	arg:	driver_op_t structure pointer.
1622  *	mode:	flags.
1623  *
1624  * Returns:
1625  *
1626  * Context:
1627  *	Kernel context.
1628  */
1629 static int
ql_adm_op(ql_adapter_state_t * ha,void * arg,int mode)1630 ql_adm_op(ql_adapter_state_t *ha, void *arg, int mode)
1631 {
1632 	ql_adm_op_t		dop;
1633 	int			rval = 0;
1634 
1635 	if (ddi_copyin(arg, &dop, sizeof (ql_adm_op_t), mode) != 0) {
1636 		EL(ha, "failed, driver_op_t ddi_copyin\n");
1637 		return (EFAULT);
1638 	}
1639 
1640 	QL_PRINT_9(ha, "started, cmd=%xh, buffer=%llx,"
1641 	    " length=%xh, option=%xh\n", dop.cmd, dop.buffer,
1642 	    dop.length, dop.option);
1643 
1644 	switch (dop.cmd) {
1645 	case QL_ADAPTER_INFO:
1646 		rval = ql_adm_adapter_info(ha, &dop, mode);
1647 		break;
1648 
1649 	case QL_EXTENDED_LOGGING:
1650 		rval = ql_adm_extended_logging(ha, &dop);
1651 		break;
1652 
1653 	case QL_LOOP_RESET:
1654 		rval = ql_adm_loop_reset(ha);
1655 		break;
1656 
1657 	case QL_DEVICE_LIST:
1658 		rval = ql_adm_device_list(ha, &dop, mode);
1659 		break;
1660 
1661 	case QL_PROP_UPDATE_INT:
1662 		rval = ql_adm_prop_update_int(ha, &dop, mode);
1663 		break;
1664 
1665 	case QL_UPDATE_PROPERTIES:
1666 		rval = ql_adm_update_properties(ha);
1667 		break;
1668 
1669 	case QL_FW_DUMP:
1670 		rval = ql_adm_fw_dump(ha, &dop, arg, mode);
1671 		break;
1672 
1673 	case QL_FW_DUMP_TRIGGER:
1674 		rval = ql_adm_fw_t_dump(ha);
1675 		break;
1676 
1677 	case QL_BEACON_ENABLE:
1678 	case QL_BEACON_DISABLE:
1679 		rval = ql_adm_beacon(ha, &dop);
1680 		break;
1681 
1682 	case QL_NVRAM_LOAD:
1683 		rval = ql_adm_nvram_load(ha, &dop, mode);
1684 		break;
1685 
1686 	case QL_NVRAM_DUMP:
1687 		rval = ql_adm_nvram_dump(ha, &dop, mode);
1688 		break;
1689 
1690 	case QL_FLASH_LOAD:
1691 		rval = ql_adm_flash_load(ha, &dop, mode);
1692 		break;
1693 
1694 	case QL_VPD_LOAD:
1695 		rval = ql_adm_vpd_load(ha, &dop, mode);
1696 		break;
1697 
1698 	case QL_VPD_DUMP:
1699 		rval = ql_adm_vpd_dump(ha, &dop, mode);
1700 		break;
1701 
1702 	case QL_VPD_GETTAG:
1703 		rval = ql_adm_vpd_gettag(ha, &dop, mode);
1704 		break;
1705 
1706 	case QL_UPD_FWMODULE:
1707 		rval = ql_adm_updfwmodule(ha, &dop, mode);
1708 		break;
1709 
1710 	default:
1711 		EL(ha, "unsupported driver op cmd: %x\n", dop.cmd);
1712 		return (EINVAL);
1713 	}
1714 
1715 	QL_PRINT_9(ha, "done\n");
1716 
1717 	return (rval);
1718 }
1719 
1720 /*
1721  * ql_adm_adapter_info
1722  *	Performs qladm QL_ADAPTER_INFO command
1723  *
1724  * Input:
1725  *	ha:	adapter state pointer.
1726  *	dop:	ql_adm_op_t structure pointer.
1727  *	mode:	flags.
1728  *
1729  * Returns:
1730  *
1731  * Context:
1732  *	Kernel context.
1733  */
1734 static int
ql_adm_adapter_info(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)1735 ql_adm_adapter_info(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1736 {
1737 	ql_adapter_info_t	hba;
1738 	uint8_t			*dp;
1739 	uint32_t		length;
1740 	int			rval, i;
1741 
1742 	QL_PRINT_9(ha, "started\n");
1743 
1744 	hba.device_id = ha->device_id;
1745 
1746 	dp = ha->loginparams.nport_ww_name.raw_wwn;
1747 	bcopy(dp, hba.wwpn, 8);
1748 
1749 	hba.d_id = ha->d_id.b24;
1750 
1751 	if (ha->xioctl->fdesc.flash_size == 0 &&
1752 	    !(CFG_IST(ha, CFG_CTRL_22XX) && !ha->subven_id)) {
1753 		if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1754 			EL(ha, "ql_stall_driver failed\n");
1755 			return (EBUSY);
1756 		}
1757 
1758 		if ((rval = ql_setup_fcache(ha)) != QL_SUCCESS) {
1759 			EL(ha, "ql_setup_flash failed=%xh\n", rval);
1760 			if (rval == QL_FUNCTION_TIMEOUT) {
1761 				return (EBUSY);
1762 			}
1763 			return (EIO);
1764 		}
1765 
1766 		/* Resume I/O */
1767 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
1768 			ql_restart_driver(ha);
1769 		} else {
1770 			EL(ha, "isp_abort_needed for restart\n");
1771 			ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
1772 			    DRIVER_STALL);
1773 		}
1774 	}
1775 	hba.flash_size = ha->xioctl->fdesc.flash_size;
1776 
1777 	(void) strcpy(hba.driver_ver, QL_VERSION);
1778 
1779 	(void) sprintf(hba.fw_ver, "%d.%d.%d", ha->fw_major_version,
1780 	    ha->fw_minor_version, ha->fw_subminor_version);
1781 
1782 	bzero(hba.fcode_ver, sizeof (hba.fcode_ver));
1783 
1784 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
1785 	rval = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
1786 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&dp, &i);
1787 	length = i;
1788 	if (rval != DDI_PROP_SUCCESS) {
1789 		EL(ha, "failed, ddi_getlongprop=%xh\n", rval);
1790 	} else {
1791 		if (length > (uint32_t)sizeof (hba.fcode_ver)) {
1792 			length = sizeof (hba.fcode_ver) - 1;
1793 		}
1794 		bcopy((void *)dp, (void *)hba.fcode_ver, length);
1795 		kmem_free(dp, length);
1796 	}
1797 
1798 	if (ddi_copyout((void *)&hba, (void *)(uintptr_t)dop->buffer,
1799 	    dop->length, mode) != 0) {
1800 		EL(ha, "failed, ddi_copyout\n");
1801 		return (EFAULT);
1802 	}
1803 
1804 	QL_PRINT_9(ha, "done\n");
1805 
1806 	return (0);
1807 }
1808 
1809 /*
1810  * ql_adm_extended_logging
1811  *	Performs qladm QL_EXTENDED_LOGGING command
1812  *
1813  * Input:
1814  *	ha:	adapter state pointer.
1815  *	dop:	ql_adm_op_t structure pointer.
1816  *
1817  * Returns:
1818  *
1819  * Context:
1820  *	Kernel context.
1821  */
1822 static int
ql_adm_extended_logging(ql_adapter_state_t * ha,ql_adm_op_t * dop)1823 ql_adm_extended_logging(ql_adapter_state_t *ha, ql_adm_op_t *dop)
1824 {
1825 	char	prop_name[MAX_PROP_LENGTH];
1826 	int	rval;
1827 
1828 	QL_PRINT_9(ha, "started\n");
1829 
1830 	(void) sprintf(prop_name, "hba%d-extended-logging", ha->instance);
1831 
1832 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1833 	rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
1834 	    (int)dop->option);
1835 	if (rval != DDI_PROP_SUCCESS) {
1836 		EL(ha, "failed, prop_update = %xh\n", rval);
1837 		return (EINVAL);
1838 	} else {
1839 		dop->option ?
1840 		    (ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING) :
1841 		    (ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING);
1842 	}
1843 
1844 	QL_PRINT_9(ha, "done\n");
1845 
1846 	return (0);
1847 }
1848 
1849 /*
1850  * ql_adm_loop_reset
1851  *	Performs qladm QL_LOOP_RESET command
1852  *
1853  * Input:
1854  *	ha:	adapter state pointer.
1855  *
1856  * Returns:
1857  *
1858  * Context:
1859  *	Kernel context.
1860  */
1861 static int
ql_adm_loop_reset(ql_adapter_state_t * ha)1862 ql_adm_loop_reset(ql_adapter_state_t *ha)
1863 {
1864 	int	rval;
1865 
1866 	QL_PRINT_9(ha, "started\n");
1867 
1868 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
1869 		rval = ql_8021_fw_reload(ha);
1870 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
1871 		if (rval != QL_SUCCESS) {
1872 			EL(ha, "failed, ql_8021_fw_reload=%xh\n", rval);
1873 			return (EIO);
1874 		}
1875 	} else {
1876 		if (ha->task_daemon_flags & LOOP_DOWN) {
1877 			(void) ql_full_login_lip(ha);
1878 		} else if ((rval = ql_full_login_lip(ha)) != QL_SUCCESS) {
1879 			EL(ha, "failed, ql_initiate_lip=%xh\n", rval);
1880 			return (EIO);
1881 		}
1882 	}
1883 
1884 	QL_PRINT_9(ha, "done\n");
1885 
1886 	return (0);
1887 }
1888 
1889 /*
1890  * ql_adm_device_list
1891  *	Performs qladm QL_DEVICE_LIST command
1892  *
1893  * Input:
1894  *	ha:	adapter state pointer.
1895  *	dop:	ql_adm_op_t structure pointer.
1896  *	mode:	flags.
1897  *
1898  * Returns:
1899  *
1900  * Context:
1901  *	Kernel context.
1902  */
1903 static int
ql_adm_device_list(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)1904 ql_adm_device_list(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1905 {
1906 	ql_device_info_t	dev;
1907 	ql_link_t		*link;
1908 	ql_tgt_t		*tq;
1909 	uint32_t		index, cnt;
1910 
1911 	QL_PRINT_9(ha, "started\n");
1912 
1913 	cnt = 0;
1914 	dev.address = 0xffffffff;
1915 
1916 	/* Scan port list for requested target and fill in the values */
1917 	for (link = NULL, index = 0;
1918 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1919 		for (link = ha->dev[index].first; link != NULL;
1920 		    link = link->next) {
1921 			tq = link->base_address;
1922 
1923 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1924 				continue;
1925 			}
1926 			if (cnt != dop->option) {
1927 				cnt++;
1928 				continue;
1929 			}
1930 			/* fill in the values */
1931 			bcopy(tq->port_name, dev.wwpn, 8);
1932 			dev.address = tq->d_id.b24;
1933 			dev.loop_id = tq->loop_id;
1934 			if (tq->flags & TQF_TAPE_DEVICE) {
1935 				dev.type = FCT_TAPE;
1936 			} else if (tq->flags & TQF_INITIATOR_DEVICE) {
1937 				dev.type = FCT_INITIATOR;
1938 			} else {
1939 				dev.type = FCT_TARGET;
1940 			}
1941 			break;
1942 		}
1943 	}
1944 
1945 	if (ddi_copyout((void *)&dev, (void *)(uintptr_t)dop->buffer,
1946 	    dop->length, mode) != 0) {
1947 		EL(ha, "failed, ddi_copyout\n");
1948 		return (EFAULT);
1949 	}
1950 
1951 	QL_PRINT_9(ha, "done\n");
1952 
1953 	return (0);
1954 }
1955 
1956 /*
1957  * ql_adm_update_properties
1958  *	Performs qladm QL_UPDATE_PROPERTIES command
1959  *
1960  * Input:
1961  *	ha:	adapter state pointer.
1962  *
1963  * Returns:
1964  *
1965  * Context:
1966  *	Kernel context.
1967  */
1968 static int
ql_adm_update_properties(ql_adapter_state_t * ha)1969 ql_adm_update_properties(ql_adapter_state_t *ha)
1970 {
1971 	ql_comb_init_cb_t	init_ctrl_blk;
1972 	ql_comb_ip_init_cb_t	ip_init_ctrl_blk;
1973 
1974 	QL_PRINT_9(ha, "started\n");
1975 
1976 	/* Stall driver instance. */
1977 	(void) ql_stall_driver(ha, 0);
1978 
1979 	/* Save init control blocks. */
1980 	bcopy(&ha->init_ctrl_blk, &init_ctrl_blk, sizeof (ql_comb_init_cb_t));
1981 	bcopy(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1982 	    sizeof (ql_comb_ip_init_cb_t));
1983 
1984 	/* Update PCI configration. */
1985 	(void) ql_pci_sbus_config(ha);
1986 
1987 	/* Get configuration properties. */
1988 	(void) ql_nvram_config(ha);
1989 
1990 	/* Check for init firmware required. */
1991 	if (bcmp(&ha->init_ctrl_blk, &init_ctrl_blk,
1992 	    sizeof (ql_comb_init_cb_t)) != 0 ||
1993 	    bcmp(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1994 	    sizeof (ql_comb_ip_init_cb_t)) != 0) {
1995 
1996 		EL(ha, "isp_abort_needed\n");
1997 		ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1998 		TASK_DAEMON_LOCK(ha);
1999 		ha->task_daemon_flags |= LOOP_DOWN | ISP_ABORT_NEEDED;
2000 		TASK_DAEMON_UNLOCK(ha);
2001 	}
2002 
2003 	/* Update AEN queue. */
2004 	if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
2005 		ql_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
2006 	}
2007 
2008 	/* Restart driver instance. */
2009 	ql_restart_driver(ha);
2010 
2011 	QL_PRINT_9(ha, "done\n");
2012 
2013 	return (0);
2014 }
2015 
2016 /*
2017  * ql_adm_prop_update_int
2018  *	Performs qladm QL_PROP_UPDATE_INT command
2019  *
2020  * Input:
2021  *	ha:	adapter state pointer.
2022  *	dop:	ql_adm_op_t structure pointer.
2023  *	mode:	flags.
2024  *
2025  * Returns:
2026  *
2027  * Context:
2028  *	Kernel context.
2029  */
2030 static int
ql_adm_prop_update_int(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)2031 ql_adm_prop_update_int(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2032 {
2033 	char	*prop_name;
2034 	int	rval;
2035 
2036 	QL_PRINT_9(ha, "started\n");
2037 
2038 	prop_name = kmem_zalloc(dop->length, KM_SLEEP);
2039 	if (prop_name == NULL) {
2040 		EL(ha, "failed, kmem_zalloc\n");
2041 		return (ENOMEM);
2042 	}
2043 
2044 	if (ddi_copyin((void *)(uintptr_t)dop->buffer, prop_name, dop->length,
2045 	    mode) != 0) {
2046 		EL(ha, "failed, prop_name ddi_copyin\n");
2047 		kmem_free(prop_name, dop->length);
2048 		return (EFAULT);
2049 	}
2050 
2051 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2052 	if ((rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
2053 	    (int)dop->option)) != DDI_PROP_SUCCESS) {
2054 		EL(ha, "failed, prop_update=%xh\n", rval);
2055 		kmem_free(prop_name, dop->length);
2056 		return (EINVAL);
2057 	}
2058 
2059 	kmem_free(prop_name, dop->length);
2060 
2061 	QL_PRINT_9(ha, "done\n");
2062 
2063 	return (0);
2064 }
2065 
2066 /*
2067  * ql_adm_fw_dump
2068  *	Performs qladm QL_FW_DUMP command
2069  *
2070  * Input:
2071  *	ha:	adapter state pointer.
2072  *	dop:	ql_adm_op_t structure pointer.
2073  *	udop:	user space ql_adm_op_t structure pointer.
2074  *	mode:	flags.
2075  *
2076  * Returns:
2077  *
2078  * Context:
2079  *	Kernel context.
2080  */
2081 static int
ql_adm_fw_dump(ql_adapter_state_t * ha,ql_adm_op_t * dop,void * udop,int mode)2082 ql_adm_fw_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, void *udop, int mode)
2083 {
2084 	caddr_t	dmp;
2085 
2086 	QL_PRINT_9(ha, "started\n");
2087 
2088 	if (dop->length < ha->risc_dump_size) {
2089 		EL(ha, "failed, incorrect length=%xh, size=%xh\n",
2090 		    dop->length, ha->risc_dump_size);
2091 		return (EINVAL);
2092 	}
2093 
2094 	if (ha->ql_dump_state & QL_DUMP_VALID) {
2095 		dmp = kmem_zalloc(ha->risc_dump_size, KM_SLEEP);
2096 		if (dmp == NULL) {
2097 			EL(ha, "failed, kmem_zalloc\n");
2098 			return (ENOMEM);
2099 		}
2100 
2101 		dop->length = (uint32_t)ql_ascii_fw_dump(ha, dmp);
2102 		if (ddi_copyout((void *)dmp, (void *)(uintptr_t)dop->buffer,
2103 		    dop->length, mode) != 0) {
2104 			EL(ha, "failed, ddi_copyout\n");
2105 			kmem_free(dmp, ha->risc_dump_size);
2106 			return (EFAULT);
2107 		}
2108 
2109 		kmem_free(dmp, ha->risc_dump_size);
2110 		ha->ql_dump_state |= QL_DUMP_UPLOADED;
2111 
2112 	} else {
2113 		EL(ha, "failed, no dump file\n");
2114 		dop->length = 0;
2115 	}
2116 
2117 	if (ddi_copyout(dop, udop, sizeof (ql_adm_op_t), mode) != 0) {
2118 		EL(ha, "failed, driver_op_t ddi_copyout\n");
2119 		return (EFAULT);
2120 	}
2121 
2122 	QL_PRINT_9(ha, "done\n");
2123 
2124 	return (0);
2125 }
2126 
2127 /*
2128  * ql_adm_fw_t_dump
2129  *	Performs qladm QL_FW_DUMP_TRIGGER command
2130  *
2131  * Input:
2132  *	ha:	adapter state pointer.
2133  *
2134  * Returns:
2135  *
2136  * Context:
2137  *	Kernel context.
2138  */
2139 static int
ql_adm_fw_t_dump(ql_adapter_state_t * ha)2140 ql_adm_fw_t_dump(ql_adapter_state_t *ha)
2141 {
2142 	int	rval;
2143 
2144 	QL_PRINT_9(ha, "started\n");
2145 
2146 	if (ha->ql_dump_state & QL_DUMP_VALID) {
2147 		EL(ha, "Already contains a dump file\n");
2148 		return (EINVAL);
2149 	}
2150 	rval = ql_dump_firmware(ha);
2151 
2152 	QL_PRINT_9(ha, "done\n");
2153 
2154 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
2155 		return (0);
2156 	}
2157 	return (EFAULT);
2158 }
2159 
2160 /*
2161  * ql_adm_beacon
2162  *      Performs qladm QL_BEACON_ENABLE/DISABLE command
2163  *
2164  * Input:
2165  *      ha:     adapter state pointer.
2166  *	dop:	ql_adm_op_t structure pointer.
2167  *
2168  * Returns:
2169  *
2170  * Context:
2171  *      Kernel context.
2172  */
2173 static int
ql_adm_beacon(ql_adapter_state_t * ha,ql_adm_op_t * dop)2174 ql_adm_beacon(ql_adapter_state_t *ha, ql_adm_op_t *dop)
2175 {
2176 	int		rval;
2177 	ql_mbx_data_t	mr;
2178 
2179 	if (!CFG_IST(ha, CFG_CTRL_82XX)) {
2180 		return (EIO);
2181 	}
2182 
2183 	rval = ql_diag_beacon(ha, dop->cmd, &mr);
2184 
2185 	if (rval == QL_SUCCESS) {
2186 		return (0);
2187 	}
2188 
2189 	return (rval);
2190 }
2191 
2192 
2193 /*
2194  * ql_adm_nvram_dump
2195  *	Performs qladm QL_NVRAM_DUMP command
2196  *
2197  * Input:
2198  *	ha:	adapter state pointer.
2199  *	dop:	ql_adm_op_t structure pointer.
2200  *	mode:	flags.
2201  *
2202  * Returns:
2203  *
2204  * Context:
2205  *	Kernel context.
2206  */
2207 static int
ql_adm_nvram_dump(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)2208 ql_adm_nvram_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2209 {
2210 	int		rval;
2211 
2212 	QL_PRINT_9(ha, "started\n");
2213 
2214 	if (dop->length < ha->nvram_cache->size) {
2215 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2216 		    ha->nvram_cache->size);
2217 		return (EINVAL);
2218 	}
2219 
2220 	if ((rval = ql_nv_util_dump(ha, (void *)(uintptr_t)dop->buffer,
2221 	    mode)) != 0) {
2222 		EL(ha, "failed, ql_nv_util_dump\n");
2223 	} else {
2224 		/*EMPTY*/
2225 		QL_PRINT_9(ha, "done\n");
2226 	}
2227 
2228 	return (rval);
2229 }
2230 
2231 /*
2232  * ql_adm_nvram_load
2233  *	Performs qladm QL_NVRAM_LOAD command
2234  *
2235  * Input:
2236  *	ha:	adapter state pointer.
2237  *	dop:	ql_adm_op_t structure pointer.
2238  *	mode:	flags.
2239  *
2240  * Returns:
2241  *
2242  * Context:
2243  *	Kernel context.
2244  */
2245 static int
ql_adm_nvram_load(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)2246 ql_adm_nvram_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2247 {
2248 	int		rval;
2249 
2250 	QL_PRINT_9(ha, "started\n");
2251 
2252 	if (dop->length < ha->nvram_cache->size) {
2253 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2254 		    ha->nvram_cache->size);
2255 		return (EINVAL);
2256 	}
2257 
2258 	if ((rval = ql_nv_util_load(ha, (void *)(uintptr_t)dop->buffer,
2259 	    mode)) != 0) {
2260 		EL(ha, "failed, ql_nv_util_dump\n");
2261 	} else {
2262 		/*EMPTY*/
2263 		QL_PRINT_9(ha, "done\n");
2264 	}
2265 
2266 	return (rval);
2267 }
2268 
2269 /*
2270  * ql_adm_flash_load
2271  *	Performs qladm QL_FLASH_LOAD command
2272  *
2273  * Input:
2274  *	ha:	adapter state pointer.
2275  *	dop:	ql_adm_op_t structure pointer.
2276  *	mode:	flags.
2277  *
2278  * Returns:
2279  *
2280  * Context:
2281  *	Kernel context.
2282  */
2283 static int
ql_adm_flash_load(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)2284 ql_adm_flash_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2285 {
2286 	uint8_t	*dp;
2287 	int	rval;
2288 
2289 	QL_PRINT_9(ha, "started\n");
2290 
2291 	if ((dp = kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2292 		EL(ha, "failed, kmem_zalloc\n");
2293 		return (ENOMEM);
2294 	}
2295 
2296 	if (ddi_copyin((void *)(uintptr_t)dop->buffer, dp, dop->length,
2297 	    mode) != 0) {
2298 		EL(ha, "ddi_copyin failed\n");
2299 		kmem_free(dp, dop->length);
2300 		return (EFAULT);
2301 	}
2302 
2303 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
2304 		EL(ha, "ql_stall_driver failed\n");
2305 		kmem_free(dp, dop->length);
2306 		return (EBUSY);
2307 	}
2308 
2309 	rval = (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
2310 	    ql_24xx_load_flash(ha, dp, dop->length, dop->option) :
2311 	    ql_load_flash(ha, dp, dop->length));
2312 
2313 	ql_restart_driver(ha);
2314 
2315 	kmem_free(dp, dop->length);
2316 
2317 	if (rval != QL_SUCCESS) {
2318 		EL(ha, "failed\n");
2319 		return (EIO);
2320 	}
2321 
2322 	QL_PRINT_9(ha, "done\n");
2323 
2324 	return (0);
2325 }
2326 
2327 /*
2328  * ql_adm_vpd_dump
2329  *	Performs qladm QL_VPD_DUMP command
2330  *
2331  * Input:
2332  *	ha:	adapter state pointer.
2333  *	dop:	ql_adm_op_t structure pointer.
2334  *	mode:	flags.
2335  *
2336  * Returns:
2337  *
2338  * Context:
2339  *	Kernel context.
2340  */
2341 static int
ql_adm_vpd_dump(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)2342 ql_adm_vpd_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2343 {
2344 	int		rval;
2345 
2346 	QL_PRINT_9(ha, "started\n");
2347 
2348 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
2349 		EL(ha, "hba does not support VPD\n");
2350 		return (EINVAL);
2351 	}
2352 
2353 	if (dop->length < QL_24XX_VPD_SIZE) {
2354 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2355 		    QL_24XX_VPD_SIZE);
2356 		return (EINVAL);
2357 	}
2358 
2359 	if ((rval = ql_vpd_dump(ha, (void *)(uintptr_t)dop->buffer, mode))
2360 	    != 0) {
2361 		EL(ha, "failed, ql_vpd_dump\n");
2362 	} else {
2363 		/*EMPTY*/
2364 		QL_PRINT_9(ha, "done\n");
2365 	}
2366 
2367 	return (rval);
2368 }
2369 
2370 /*
2371  * ql_adm_vpd_load
2372  *	Performs qladm QL_VPD_LOAD command
2373  *
2374  * Input:
2375  *	ha:	adapter state pointer.
2376  *	dop:	ql_adm_op_t structure pointer.
2377  *	mode:	flags.
2378  *
2379  * Returns:
2380  *
2381  * Context:
2382  *	Kernel context.
2383  */
2384 static int
ql_adm_vpd_load(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)2385 ql_adm_vpd_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2386 {
2387 	int		rval;
2388 
2389 	QL_PRINT_9(ha, "started\n");
2390 
2391 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
2392 		EL(ha, "hba does not support VPD\n");
2393 		return (EINVAL);
2394 	}
2395 
2396 	if (dop->length < QL_24XX_VPD_SIZE) {
2397 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2398 		    QL_24XX_VPD_SIZE);
2399 		return (EINVAL);
2400 	}
2401 
2402 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)dop->buffer, mode))
2403 	    != 0) {
2404 		EL(ha, "failed, ql_vpd_dump\n");
2405 	} else {
2406 		/*EMPTY*/
2407 		QL_PRINT_9(ha, "done\n");
2408 	}
2409 
2410 	return (rval);
2411 }
2412 
2413 /*
2414  * ql_adm_vpd_gettag
2415  *	Performs qladm QL_VPD_GETTAG command
2416  *
2417  * Input:
2418  *	ha:	adapter state pointer.
2419  *	dop:	ql_adm_op_t structure pointer.
2420  *	mode:	flags.
2421  *
2422  * Returns:
2423  *
2424  * Context:
2425  *	Kernel context.
2426  */
2427 static int
ql_adm_vpd_gettag(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)2428 ql_adm_vpd_gettag(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2429 {
2430 	int		rval = 0;
2431 	uint8_t		*lbuf;
2432 
2433 	QL_PRINT_9(ha, "started\n");
2434 
2435 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
2436 		EL(ha, "hba does not support VPD\n");
2437 		return (EINVAL);
2438 	}
2439 
2440 	if ((lbuf = (uint8_t *)kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2441 		EL(ha, "mem alloc failure of %xh bytes\n", dop->length);
2442 		rval = EFAULT;
2443 	} else {
2444 		if (ddi_copyin((void *)(uintptr_t)dop->buffer, lbuf,
2445 		    dop->length, mode) != 0) {
2446 			EL(ha, "ddi_copyin failed\n");
2447 			kmem_free(lbuf, dop->length);
2448 			return (EFAULT);
2449 		}
2450 
2451 		if ((rval = ql_vpd_lookup(ha, lbuf, lbuf, (int32_t)
2452 		    dop->length)) < 0) {
2453 			EL(ha, "failed vpd_lookup\n");
2454 		} else {
2455 			if (ddi_copyout(lbuf, (void *)(uintptr_t)dop->buffer,
2456 			    strlen((int8_t *)lbuf) + 1, mode) != 0) {
2457 				EL(ha, "failed, ddi_copyout\n");
2458 				rval = EFAULT;
2459 			} else {
2460 				rval = 0;
2461 			}
2462 		}
2463 		kmem_free(lbuf, dop->length);
2464 	}
2465 
2466 	QL_PRINT_9(ha, "done\n");
2467 
2468 	return (rval);
2469 }
2470 
2471 /*
2472  * ql_adm_updfwmodule
2473  *	Performs qladm QL_UPD_FWMODULE command
2474  *
2475  * Input:
2476  *	ha:	adapter state pointer.
2477  *	dop:	ql_adm_op_t structure pointer.
2478  *	mode:	flags.
2479  *
2480  * Returns:
2481  *
2482  * Context:
2483  *	Kernel context.
2484  */
2485 /* ARGSUSED */
2486 static int
ql_adm_updfwmodule(ql_adapter_state_t * ha,ql_adm_op_t * dop,int mode)2487 ql_adm_updfwmodule(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2488 {
2489 	int			rval = DDI_SUCCESS;
2490 	ql_link_t		*link;
2491 	ql_adapter_state_t	*ha2 = NULL;
2492 	uint16_t		fw_class = (uint16_t)dop->option;
2493 
2494 	QL_PRINT_9(ha, "started\n");
2495 
2496 	/* zero the firmware module reference count */
2497 	for (link = ql_hba.first; link != NULL; link = link->next) {
2498 		ha2 = link->base_address;
2499 		if (fw_class == ha2->fw_class) {
2500 			if ((rval = ddi_modclose(ha2->fw_module)) !=
2501 			    DDI_SUCCESS) {
2502 				EL(ha2, "modclose rval=%xh\n", rval);
2503 				break;
2504 			}
2505 			ha2->fw_module = NULL;
2506 		}
2507 	}
2508 
2509 	/* reload the f/w modules */
2510 	for (link = ql_hba.first; link != NULL; link = link->next) {
2511 		ha2 = link->base_address;
2512 
2513 		if ((fw_class == ha2->fw_class) && (ha2->fw_class == 0)) {
2514 			if ((rval = (int32_t)ql_fwmodule_resolve(ha2)) !=
2515 			    QL_SUCCESS) {
2516 				EL(ha2, "unable to load f/w module: '%x' "
2517 				    "(rval=%xh)\n", ha2->fw_class, rval);
2518 				rval = EFAULT;
2519 			} else {
2520 				EL(ha2, "f/w module updated: '%x'\n",
2521 				    ha2->fw_class);
2522 			}
2523 
2524 			EL(ha2, "isp abort needed (%d)\n", ha->instance);
2525 
2526 			ql_awaken_task_daemon(ha2, NULL, ISP_ABORT_NEEDED, 0);
2527 
2528 			rval = 0;
2529 		}
2530 	}
2531 
2532 	QL_PRINT_9(ha, "done\n");
2533 
2534 	return (rval);
2535 }
2536