1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 /*
29  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
30  * Fibre Channel Adapter (FCA) driver IOCTL source file.
31  *
32  * ***********************************************************************
33  * *									**
34  * *				NOTICE					**
35  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
36  * *			ALL RIGHTS RESERVED				**
37  * *									**
38  * ***********************************************************************
39  *
40  */
41 
42 #include <ql_apps.h>
43 #include <ql_api.h>
44 #include <ql_debug.h>
45 #include <ql_init.h>
46 #include <ql_ioctl.h>
47 #include <ql_mbx.h>
48 #include <ql_xioctl.h>
49 
50 /*
51  * Local Function Prototypes.
52  */
53 static int ql_busy_notification(ql_adapter_state_t *);
54 static int ql_idle_notification(ql_adapter_state_t *);
55 static int ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features);
56 static int ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features);
57 static int ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha);
58 static void ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr,
59     uint16_t value);
60 static int ql_24xx_load_nvram(ql_adapter_state_t *, uint32_t, uint32_t);
61 static int ql_adm_op(ql_adapter_state_t *, void *, int);
62 static int ql_adm_adapter_info(ql_adapter_state_t *, ql_adm_op_t *, int);
63 static int ql_adm_extended_logging(ql_adapter_state_t *, ql_adm_op_t *);
64 static int ql_adm_device_list(ql_adapter_state_t *, ql_adm_op_t *, int);
65 static int ql_adm_update_properties(ql_adapter_state_t *);
66 static int ql_adm_prop_update_int(ql_adapter_state_t *, ql_adm_op_t *, int);
67 static int ql_adm_loop_reset(ql_adapter_state_t *);
68 static int ql_adm_fw_dump(ql_adapter_state_t *, ql_adm_op_t *, void *, int);
69 static int ql_adm_nvram_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
70 static int ql_adm_nvram_load(ql_adapter_state_t *, ql_adm_op_t *, int);
71 static int ql_adm_flash_load(ql_adapter_state_t *, ql_adm_op_t *, int);
72 static int ql_adm_vpd_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
73 static int ql_adm_vpd_load(ql_adapter_state_t *, ql_adm_op_t *, int);
74 static int ql_adm_vpd_gettag(ql_adapter_state_t *, ql_adm_op_t *, int);
75 static int ql_adm_updfwmodule(ql_adapter_state_t *, ql_adm_op_t *, int);
76 static uint8_t *ql_vpd_findtag(ql_adapter_state_t *, uint8_t *, int8_t *);
77 
78 /* ************************************************************************ */
79 /*				cb_ops functions			    */
80 /* ************************************************************************ */
81 
82 /*
83  * ql_open
84  *	opens device
85  *
86  * Input:
87  *	dev_p = device pointer
88  *	flags = open flags
89  *	otype = open type
90  *	cred_p = credentials pointer
91  *
92  * Returns:
93  *	0 = success
94  *
95  * Context:
96  *	Kernel context.
97  */
98 /* ARGSUSED */
99 int
100 ql_open(dev_t *dev_p, int flags, int otyp, cred_t *cred_p)
101 {
102 	ql_adapter_state_t	*ha;
103 	int			rval = 0;
104 
105 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(*dev_p));
106 	if (ha == NULL) {
107 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
108 		return (ENXIO);
109 	}
110 
111 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
112 
113 	/* Allow only character opens */
114 	if (otyp != OTYP_CHR) {
115 		QL_PRINT_2(CE_CONT, "(%d): failed, open type\n",
116 		    ha->instance);
117 		return (EINVAL);
118 	}
119 
120 	ADAPTER_STATE_LOCK(ha);
121 	if (flags & FEXCL && ha->flags & QL_OPENED) {
122 		ADAPTER_STATE_UNLOCK(ha);
123 		rval = EBUSY;
124 	} else {
125 		ha->flags |= QL_OPENED;
126 		ADAPTER_STATE_UNLOCK(ha);
127 	}
128 
129 	if (rval != 0) {
130 		EL(ha, "failed, rval = %xh\n", rval);
131 	} else {
132 		/*EMPTY*/
133 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
134 	}
135 	return (rval);
136 }
137 
138 /*
139  * ql_close
140  *	opens device
141  *
142  * Input:
143  *	dev_p = device pointer
144  *	flags = open flags
145  *	otype = open type
146  *	cred_p = credentials pointer
147  *
148  * Returns:
149  *	0 = success
150  *
151  * Context:
152  *	Kernel context.
153  */
154 /* ARGSUSED */
155 int
156 ql_close(dev_t dev, int flags, int otyp, cred_t *cred_p)
157 {
158 	ql_adapter_state_t	*ha;
159 	int			rval = 0;
160 
161 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
162 	if (ha == NULL) {
163 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
164 		return (ENXIO);
165 	}
166 
167 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
168 
169 	if (otyp != OTYP_CHR) {
170 		QL_PRINT_2(CE_CONT, "(%d): failed, open type\n",
171 		    ha->instance);
172 		return (EINVAL);
173 	}
174 
175 	ADAPTER_STATE_LOCK(ha);
176 	ha->flags &= ~QL_OPENED;
177 	ADAPTER_STATE_UNLOCK(ha);
178 
179 	if (rval != 0) {
180 		EL(ha, "failed, rval = %xh\n", rval);
181 	} else {
182 		/*EMPTY*/
183 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
184 	}
185 	return (rval);
186 }
187 
188 /*
189  * ql_ioctl
190  *	control a character device
191  *
192  * Input:
193  *	dev = device number
194  *	cmd = function to perform
195  *	arg = data type varies with request
196  *	mode = flags
197  *	cred_p = credentials pointer
198  *	rval_p = pointer to result value
199  *
200  * Returns:
201  *	0 = success
202  *
203  * Context:
204  *	Kernel context.
205  */
206 /* ARGSUSED */
207 int
208 ql_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
209     int *rval_p)
210 {
211 	ql_adapter_state_t	*ha;
212 	int			rval = 0;
213 
214 	if (ddi_in_panic()) {
215 		QL_PRINT_2(CE_CONT, "ql_ioctl: ddi_in_panic exit\n");
216 		return (ENOPROTOOPT);
217 	}
218 
219 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
220 	if (ha == NULL)	{
221 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
222 		return (ENXIO);
223 	}
224 
225 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
226 
227 	/*
228 	 * Quick clean exit for qla2x00 foapi calls which are
229 	 * not supported in qlc.
230 	 */
231 	if (cmd >= QL_FOAPI_START && cmd <= QL_FOAPI_END) {
232 		QL_PRINT_9(CE_CONT, "failed, fo api not supported\n");
233 		return (ENOTTY);
234 	}
235 
236 	/* PWR management busy. */
237 	rval = ql_busy_notification(ha);
238 	if (rval != FC_SUCCESS)	 {
239 		EL(ha, "failed, ql_busy_notification\n");
240 		return (ENXIO);
241 	}
242 
243 	rval = ql_xioctl(ha, cmd, arg, mode, cred_p, rval_p);
244 	if (rval == ENOPROTOOPT || rval == EINVAL) {
245 		switch (cmd) {
246 		case QL_GET_ADAPTER_FEATURE_BITS: {
247 			uint16_t bits;
248 
249 			rval = ql_get_feature_bits(ha, &bits);
250 
251 			if (!rval && ddi_copyout((void *)&bits, (void *)arg,
252 			    sizeof (bits), mode)) {
253 				rval = EFAULT;
254 			}
255 			break;
256 		}
257 
258 		case QL_SET_ADAPTER_FEATURE_BITS: {
259 			uint16_t bits;
260 
261 			if (ddi_copyin((void *)arg, (void *)&bits,
262 			    sizeof (bits), mode)) {
263 				rval = EFAULT;
264 				break;
265 			}
266 
267 			rval = ql_set_feature_bits(ha, bits);
268 			break;
269 		}
270 
271 		case QL_SET_ADAPTER_NVRAM_DEFAULTS:
272 			rval = ql_set_nvram_adapter_defaults(ha);
273 			break;
274 
275 		case QL_UTIL_LOAD:
276 			rval = ql_nv_util_load(ha, (void *)arg, mode);
277 			break;
278 
279 		case QL_UTIL_DUMP:
280 			rval = ql_nv_util_dump(ha, (void *)arg, mode);
281 			break;
282 
283 		case QL_ADM_OP:
284 			rval = ql_adm_op(ha, (void *)arg, mode);
285 			break;
286 
287 		default:
288 			EL(ha, "unknown command = %d\n", cmd);
289 			rval = ENOTTY;
290 			break;
291 		}
292 	}
293 
294 	/* PWR management idle. */
295 	(void) ql_idle_notification(ha);
296 
297 	if (rval != 0) {
298 		/*
299 		 * Don't show failures caused by pps polling for
300 		 * non-existant virtual ports.
301 		 */
302 		if (cmd != EXT_CC_VPORT_CMD) {
303 			EL(ha, "failed, cmd=%d rval=%d\n", cmd, rval);
304 		}
305 	} else {
306 		/*EMPTY*/
307 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
308 	}
309 	return (rval);
310 }
311 
312 /*
313  * ql_busy_notification
314  *	Adapter busy notification.
315  *
316  * Input:
317  *	ha = adapter state pointer.
318  *
319  * Returns:
320  *	FC_SUCCESS
321  *	FC_FAILURE
322  *
323  * Context:
324  *	Kernel context.
325  */
326 static int
327 ql_busy_notification(ql_adapter_state_t *ha)
328 {
329 	if (!ha->pm_capable) {
330 		return (FC_SUCCESS);
331 	}
332 
333 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
334 
335 	QL_PM_LOCK(ha);
336 	ha->busy++;
337 	QL_PM_UNLOCK(ha);
338 
339 	if (pm_busy_component(ha->dip, 0) != DDI_SUCCESS) {
340 		QL_PM_LOCK(ha);
341 		ha->busy--;
342 		QL_PM_UNLOCK(ha);
343 
344 		EL(ha, "pm_busy_component failed = %xh\n", FC_FAILURE);
345 		return (FC_FAILURE);
346 	}
347 
348 	QL_PM_LOCK(ha);
349 	if (ha->power_level != PM_LEVEL_D0) {
350 		QL_PM_UNLOCK(ha);
351 		if (pm_raise_power(ha->dip, 0, 1) != DDI_SUCCESS) {
352 			QL_PM_LOCK(ha);
353 			ha->busy--;
354 			QL_PM_UNLOCK(ha);
355 			return (FC_FAILURE);
356 		}
357 	} else {
358 		QL_PM_UNLOCK(ha);
359 	}
360 
361 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
362 
363 	return (FC_SUCCESS);
364 }
365 
366 /*
367  * ql_idle_notification
368  *	Adapter idle notification.
369  *
370  * Input:
371  *	ha = adapter state pointer.
372  *
373  * Returns:
374  *	FC_SUCCESS
375  *	FC_FAILURE
376  *
377  * Context:
378  *	Kernel context.
379  */
380 static int
381 ql_idle_notification(ql_adapter_state_t *ha)
382 {
383 	if (!ha->pm_capable) {
384 		return (FC_SUCCESS);
385 	}
386 
387 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
388 
389 	if (pm_idle_component(ha->dip, 0) != DDI_SUCCESS) {
390 		EL(ha, "pm_idle_component failed = %xh\n", FC_FAILURE);
391 		return (FC_FAILURE);
392 	}
393 
394 	QL_PM_LOCK(ha);
395 	ha->busy--;
396 	QL_PM_UNLOCK(ha);
397 
398 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
399 
400 	return (FC_SUCCESS);
401 }
402 
403 /*
404  * Get adapter feature bits from NVRAM
405  */
406 static int
407 ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features)
408 {
409 	int			count;
410 	volatile uint16_t	data;
411 	uint32_t		nv_cmd;
412 	uint32_t		start_addr;
413 	int			rval;
414 	uint32_t		offset = offsetof(nvram_t, adapter_features);
415 
416 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
417 
418 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
419 		EL(ha, "Not supported for 24xx\n");
420 		return (EINVAL);
421 	}
422 
423 	/*
424 	 * The offset can't be greater than max of 8 bits and
425 	 * the following code breaks if the offset isn't at
426 	 * 2 byte boundary.
427 	 */
428 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
429 	if (rval != QL_SUCCESS) {
430 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
431 		return (EIO);
432 	}
433 
434 	/*
435 	 * Have the most significant 3 bits represent the read operation
436 	 * followed by the 8 bits representing the offset at which we
437 	 * are going to perform the read operation
438 	 */
439 	offset >>= 1;
440 	offset += start_addr;
441 	nv_cmd = (offset << 16) | NV_READ_OP;
442 	nv_cmd <<= 5;
443 
444 	/*
445 	 * Select the chip and feed the command and address
446 	 */
447 	for (count = 0; count < 11; count++) {
448 		if (nv_cmd & BIT_31) {
449 			ql_nv_write(ha, NV_DATA_OUT);
450 		} else {
451 			ql_nv_write(ha, 0);
452 		}
453 		nv_cmd <<= 1;
454 	}
455 
456 	*features = 0;
457 	for (count = 0; count < 16; count++) {
458 		WRT16_IO_REG(ha, nvram, NV_SELECT | NV_CLOCK);
459 		ql_nv_delay();
460 
461 		data = RD16_IO_REG(ha, nvram);
462 		*features <<= 1;
463 		if (data & NV_DATA_IN) {
464 			*features = (uint16_t)(*features | 0x1);
465 		}
466 
467 		WRT16_IO_REG(ha, nvram, NV_SELECT);
468 		ql_nv_delay();
469 	}
470 
471 	/*
472 	 * Deselect the chip
473 	 */
474 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
475 
476 	ql_release_nvram(ha);
477 
478 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
479 
480 	return (0);
481 }
482 
483 /*
484  * Set adapter feature bits in NVRAM
485  */
486 static int
487 ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features)
488 {
489 	int		rval;
490 	uint32_t	count;
491 	nvram_t		*nv;
492 	uint16_t	*wptr;
493 	uint8_t		*bptr;
494 	uint8_t		csum;
495 	uint32_t	start_addr;
496 
497 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
498 
499 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
500 		EL(ha, "Not supported for 24xx\n");
501 		return (EINVAL);
502 	}
503 
504 	nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
505 	if (nv == NULL) {
506 		EL(ha, "failed, kmem_zalloc\n");
507 		return (ENOMEM);
508 	}
509 
510 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
511 	if (rval != QL_SUCCESS) {
512 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
513 		kmem_free(nv, sizeof (*nv));
514 		return (EIO);
515 	}
516 	rval = 0;
517 
518 	/*
519 	 * Read off the whole NVRAM
520 	 */
521 	wptr = (uint16_t *)nv;
522 	csum = 0;
523 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
524 		*wptr = (uint16_t)ql_get_nvram_word(ha, count + start_addr);
525 		csum = (uint8_t)(csum + (uint8_t)*wptr);
526 		csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
527 		wptr++;
528 	}
529 
530 	/*
531 	 * If the checksum is BAD then fail it right here.
532 	 */
533 	if (csum) {
534 		kmem_free(nv, sizeof (*nv));
535 		ql_release_nvram(ha);
536 		return (EBADF);
537 	}
538 
539 	nv->adapter_features[0] = (uint8_t)((features & 0xFF00) >> 8);
540 	nv->adapter_features[1] = (uint8_t)(features & 0xFF);
541 
542 	/*
543 	 * Recompute the chesksum now
544 	 */
545 	bptr = (uint8_t *)nv;
546 	for (count = 0; count < sizeof (nvram_t) - 1; count++) {
547 		csum = (uint8_t)(csum + *bptr++);
548 	}
549 	csum = (uint8_t)(~csum + 1);
550 	nv->checksum = csum;
551 
552 	/*
553 	 * Now load the NVRAM
554 	 */
555 	wptr = (uint16_t *)nv;
556 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
557 		ql_load_nvram(ha, (uint8_t)(count + start_addr), *wptr++);
558 	}
559 
560 	/*
561 	 * Read NVRAM and verify the contents
562 	 */
563 	wptr = (uint16_t *)nv;
564 	csum = 0;
565 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
566 		if (ql_get_nvram_word(ha, count + start_addr) != *wptr) {
567 			rval = EIO;
568 			break;
569 		}
570 		csum = (uint8_t)(csum + (uint8_t)*wptr);
571 		csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
572 		wptr++;
573 	}
574 
575 	if (csum) {
576 		rval = EINVAL;
577 	}
578 
579 	kmem_free(nv, sizeof (*nv));
580 	ql_release_nvram(ha);
581 
582 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
583 
584 	return (rval);
585 }
586 
587 /*
588  * Fix this function to update just feature bits and checksum in NVRAM
589  */
590 static int
591 ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha)
592 {
593 	int		rval;
594 	uint32_t	count;
595 	uint32_t	start_addr;
596 
597 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
598 
599 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
600 	if (rval != QL_SUCCESS) {
601 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
602 		return (EIO);
603 	}
604 	rval = 0;
605 
606 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
607 		nvram_24xx_t	*nv;
608 		uint32_t	*longptr;
609 		uint32_t	csum = 0;
610 
611 		nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
612 		if (nv == NULL) {
613 			EL(ha, "failed, kmem_zalloc\n");
614 			return (ENOMEM);
615 		}
616 
617 		nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
618 		nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
619 
620 		nv->version[0] = 1;
621 		nv->max_frame_length[1] = 8;
622 		nv->execution_throttle[0] = 16;
623 		nv->login_retry_count[0] = 8;
624 
625 		nv->firmware_options_1[0] = BIT_2 | BIT_1;
626 		nv->firmware_options_1[1] = BIT_5;
627 		nv->firmware_options_2[0] = BIT_5;
628 		nv->firmware_options_2[1] = BIT_4;
629 		nv->firmware_options_3[1] = BIT_6;
630 
631 		/*
632 		 * Set default host adapter parameters
633 		 */
634 		nv->host_p[0] = BIT_4 | BIT_1;
635 		nv->host_p[1] = BIT_3 | BIT_2;
636 		nv->reset_delay = 5;
637 		nv->max_luns_per_target[0] = 128;
638 		nv->port_down_retry_count[0] = 30;
639 		nv->link_down_timeout[0] = 30;
640 
641 		/*
642 		 * compute the chesksum now
643 		 */
644 		longptr = (uint32_t *)nv;
645 		csum = 0;
646 		for (count = 0; count < (sizeof (nvram_24xx_t)/4)-1; count++) {
647 			csum += *longptr;
648 			longptr++;
649 		}
650 		csum = (uint32_t)(~csum + 1);
651 		LITTLE_ENDIAN_32((long)csum);
652 		*longptr = csum;
653 
654 		/*
655 		 * Now load the NVRAM
656 		 */
657 		longptr = (uint32_t *)nv;
658 		for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
659 			(void) ql_24xx_load_nvram(ha,
660 			    (uint32_t)(count + start_addr), *longptr++);
661 		}
662 
663 		/*
664 		 * Read NVRAM and verify the contents
665 		 */
666 		csum = 0;
667 		longptr = (uint32_t *)nv;
668 		for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
669 			rval = ql_24xx_read_flash(ha, count + start_addr,
670 			    longptr);
671 			if (rval != QL_SUCCESS) {
672 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
673 				break;
674 			}
675 			csum += *longptr;
676 		}
677 
678 		if (csum) {
679 			rval = EINVAL;
680 		}
681 		kmem_free(nv, sizeof (nvram_24xx_t));
682 	} else {
683 		nvram_t		*nv;
684 		uint16_t	*wptr;
685 		uint8_t		*bptr;
686 		uint8_t		csum;
687 
688 		nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
689 		if (nv == NULL) {
690 			EL(ha, "failed, kmem_zalloc\n");
691 			return (ENOMEM);
692 		}
693 		/*
694 		 * Set default initialization control block.
695 		 */
696 		nv->parameter_block_version = ICB_VERSION;
697 		nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
698 		nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
699 
700 		nv->max_frame_length[1] = 4;
701 		nv->max_iocb_allocation[1] = 1;
702 		nv->execution_throttle[0] = 16;
703 		nv->login_retry_count = 8;
704 		nv->port_name[0] = 33;
705 		nv->port_name[3] = 224;
706 		nv->port_name[4] = 139;
707 		nv->login_timeout = 4;
708 
709 		/*
710 		 * Set default host adapter parameters
711 		 */
712 		nv->host_p[0] = BIT_1;
713 		nv->host_p[1] = BIT_2;
714 		nv->reset_delay = 5;
715 		nv->port_down_retry_count = 8;
716 		nv->maximum_luns_per_target[0] = 8;
717 
718 		/*
719 		 * compute the chesksum now
720 		 */
721 		bptr = (uint8_t *)nv;
722 		csum = 0;
723 		for (count = 0; count < sizeof (nvram_t) - 1; count++) {
724 			csum = (uint8_t)(csum + *bptr++);
725 		}
726 		csum = (uint8_t)(~csum + 1);
727 		nv->checksum = csum;
728 
729 		/*
730 		 * Now load the NVRAM
731 		 */
732 		wptr = (uint16_t *)nv;
733 		for (count = 0; count < sizeof (nvram_t) / 2; count++) {
734 			ql_load_nvram(ha, (uint8_t)(count + start_addr),
735 			    *wptr++);
736 		}
737 
738 		/*
739 		 * Read NVRAM and verify the contents
740 		 */
741 		wptr = (uint16_t *)nv;
742 		csum = 0;
743 		for (count = 0; count < sizeof (nvram_t) / 2; count++) {
744 			if (ql_get_nvram_word(ha, count + start_addr) !=
745 			    *wptr) {
746 				rval = EIO;
747 				break;
748 			}
749 			csum = (uint8_t)(csum + (uint8_t)*wptr);
750 			csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
751 			wptr++;
752 		}
753 		if (csum) {
754 			rval = EINVAL;
755 		}
756 		kmem_free(nv, sizeof (*nv));
757 	}
758 	ql_release_nvram(ha);
759 
760 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
761 
762 	return (rval);
763 }
764 
765 static void
766 ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, uint16_t value)
767 {
768 	int			count;
769 	volatile uint16_t	word;
770 	volatile uint32_t	nv_cmd;
771 
772 	ql_nv_write(ha, NV_DATA_OUT);
773 	ql_nv_write(ha, 0);
774 	ql_nv_write(ha, 0);
775 
776 	for (word = 0; word < 8; word++) {
777 		ql_nv_write(ha, NV_DATA_OUT);
778 	}
779 
780 	/*
781 	 * Deselect the chip
782 	 */
783 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
784 	ql_nv_delay();
785 
786 	/*
787 	 * Erase Location
788 	 */
789 	nv_cmd = (addr << 16) | NV_ERASE_OP;
790 	nv_cmd <<= 5;
791 	for (count = 0; count < 11; count++) {
792 		if (nv_cmd & BIT_31) {
793 			ql_nv_write(ha, NV_DATA_OUT);
794 		} else {
795 			ql_nv_write(ha, 0);
796 		}
797 		nv_cmd <<= 1;
798 	}
799 
800 	/*
801 	 * Wait for Erase to Finish
802 	 */
803 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
804 	ql_nv_delay();
805 	WRT16_IO_REG(ha, nvram, NV_SELECT);
806 	word = 0;
807 	while ((word & NV_DATA_IN) == 0) {
808 		ql_nv_delay();
809 		word = RD16_IO_REG(ha, nvram);
810 	}
811 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
812 	ql_nv_delay();
813 
814 	/*
815 	 * Write data now
816 	 */
817 	nv_cmd = (addr << 16) | NV_WRITE_OP;
818 	nv_cmd |= value;
819 	nv_cmd <<= 5;
820 	for (count = 0; count < 27; count++) {
821 		if (nv_cmd & BIT_31) {
822 			ql_nv_write(ha, NV_DATA_OUT);
823 		} else {
824 			ql_nv_write(ha, 0);
825 		}
826 		nv_cmd <<= 1;
827 	}
828 
829 	/*
830 	 * Wait for NVRAM to become ready
831 	 */
832 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
833 	ql_nv_delay();
834 	WRT16_IO_REG(ha, nvram, NV_SELECT);
835 	word = 0;
836 	while ((word & NV_DATA_IN) == 0) {
837 		ql_nv_delay();
838 		word = RD16_IO_REG(ha, nvram);
839 	}
840 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
841 	ql_nv_delay();
842 
843 	/*
844 	 * Disable writes
845 	 */
846 	ql_nv_write(ha, NV_DATA_OUT);
847 	for (count = 0; count < 10; count++) {
848 		ql_nv_write(ha, 0);
849 	}
850 
851 	/*
852 	 * Deselect the chip now
853 	 */
854 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
855 }
856 
857 /*
858  * ql_24xx_load_nvram
859  *	Enable NVRAM and writes a 32bit word to ISP24xx NVRAM.
860  *
861  * Input:
862  *	ha:	adapter state pointer.
863  *	addr:	NVRAM address.
864  *	value:	data.
865  *
866  * Returns:
867  *	ql local function return status code.
868  *
869  * Context:
870  *	Kernel context.
871  */
872 static int
873 ql_24xx_load_nvram(ql_adapter_state_t *ha, uint32_t addr, uint32_t value)
874 {
875 	int	rval;
876 
877 	/* Enable flash write. */
878 	if (!(CFG_IST(ha, CFG_CTRL_8081))) {
879 		WRT32_IO_REG(ha, ctrl_status,
880 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
881 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
882 	}
883 
884 	/* Disable NVRAM write-protection. */
885 	if (CFG_IST(ha, CFG_CTRL_2422)) {
886 		(void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0);
887 	} else {
888 		if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
889 			EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
890 			return (rval);
891 		}
892 	}
893 
894 	/* Write to flash. */
895 	rval = ql_24xx_write_flash(ha, addr, value);
896 
897 	/* Enable NVRAM write-protection. */
898 	if (CFG_IST(ha, CFG_CTRL_2422)) {
899 		/* TODO: Check if 0x8c is correct -- sb: 0x9c ? */
900 		(void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0x8c);
901 	} else {
902 		ql_24xx_protect_flash(ha);
903 	}
904 
905 	/* Disable flash write. */
906 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
907 		WRT32_IO_REG(ha, ctrl_status,
908 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
909 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
910 	}
911 
912 	return (rval);
913 }
914 
915 /*
916  * ql_nv_util_load
917  *	Loads NVRAM from application.
918  *
919  * Input:
920  *	ha = adapter state pointer.
921  *	bp = user buffer address.
922  *
923  * Returns:
924  *
925  * Context:
926  *	Kernel context.
927  */
928 int
929 ql_nv_util_load(ql_adapter_state_t *ha, void *bp, int mode)
930 {
931 	uint8_t		cnt;
932 	void		*nv;
933 	uint16_t	*wptr;
934 	uint16_t	data;
935 	uint32_t	start_addr, *lptr, data32;
936 	nvram_t		*nptr;
937 	int		rval;
938 
939 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
940 
941 	if ((nv = kmem_zalloc(ha->nvram_cache->size, KM_SLEEP)) == NULL) {
942 		EL(ha, "failed, kmem_zalloc\n");
943 		return (ENOMEM);
944 	}
945 
946 	if (ddi_copyin(bp, nv, ha->nvram_cache->size, mode) != 0) {
947 		EL(ha, "Buffer copy failed\n");
948 		kmem_free(nv, ha->nvram_cache->size);
949 		return (EFAULT);
950 	}
951 
952 	/* See if the buffer passed to us looks sane */
953 	nptr = (nvram_t *)nv;
954 	if (nptr->id[0] != 'I' || nptr->id[1] != 'S' || nptr->id[2] != 'P' ||
955 	    nptr->id[3] != ' ') {
956 		EL(ha, "failed, buffer sanity check\n");
957 		kmem_free(nv, ha->nvram_cache->size);
958 		return (EINVAL);
959 	}
960 
961 	/* Quiesce I/O */
962 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
963 		EL(ha, "ql_stall_driver failed\n");
964 		kmem_free(nv, ha->nvram_cache->size);
965 		return (EBUSY);
966 	}
967 
968 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
969 	if (rval != QL_SUCCESS) {
970 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
971 		kmem_free(nv, ha->nvram_cache->size);
972 		ql_restart_driver(ha);
973 		return (EIO);
974 	}
975 
976 	/* Load NVRAM. */
977 	if (CFG_IST(ha, CFG_CTRL_258081)) {
978 		GLOBAL_HW_UNLOCK();
979 		start_addr &= ~ha->flash_data_addr;
980 		start_addr <<= 2;
981 		if ((rval = ql_r_m_w_flash(ha, bp, ha->nvram_cache->size,
982 		    start_addr, mode)) != QL_SUCCESS) {
983 			EL(ha, "nvram load failed, rval = %0xh\n", rval);
984 		}
985 		GLOBAL_HW_LOCK();
986 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
987 		lptr = (uint32_t *)nv;
988 		for (cnt = 0; cnt < ha->nvram_cache->size / 4; cnt++) {
989 			data32 = *lptr++;
990 			LITTLE_ENDIAN_32(&data32);
991 			rval = ql_24xx_load_nvram(ha, cnt + start_addr,
992 			    data32);
993 			if (rval != QL_SUCCESS) {
994 				EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
995 				break;
996 			}
997 		}
998 	} else {
999 		wptr = (uint16_t *)nv;
1000 		for (cnt = 0; cnt < ha->nvram_cache->size / 2; cnt++) {
1001 			data = *wptr++;
1002 			LITTLE_ENDIAN_16(&data);
1003 			ql_load_nvram(ha, (uint8_t)(cnt + start_addr), data);
1004 		}
1005 	}
1006 	/* switch to the new one */
1007 	NVRAM_CACHE_LOCK(ha);
1008 
1009 	kmem_free(ha->nvram_cache->cache, ha->nvram_cache->size);
1010 	ha->nvram_cache->cache = (void *)nptr;
1011 
1012 	NVRAM_CACHE_UNLOCK(ha);
1013 
1014 	ql_release_nvram(ha);
1015 	ql_restart_driver(ha);
1016 
1017 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1018 
1019 	if (rval == QL_SUCCESS) {
1020 		return (0);
1021 	}
1022 
1023 	return (EFAULT);
1024 }
1025 
1026 /*
1027  * ql_nv_util_dump
1028  *	Dumps NVRAM to application.
1029  *
1030  * Input:
1031  *	ha = adapter state pointer.
1032  *	bp = user buffer address.
1033  *
1034  * Returns:
1035  *
1036  * Context:
1037  *	Kernel context.
1038  */
1039 int
1040 ql_nv_util_dump(ql_adapter_state_t *ha, void *bp, int mode)
1041 {
1042 	uint32_t	start_addr;
1043 	int		rval2, rval = 0;
1044 
1045 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1046 
1047 	if (ha->nvram_cache == NULL ||
1048 	    ha->nvram_cache->size == 0 ||
1049 	    ha->nvram_cache->cache == NULL) {
1050 		EL(ha, "failed, kmem_zalloc\n");
1051 		return (ENOMEM);
1052 	} else if (ha->nvram_cache->valid != 1) {
1053 
1054 		/* Quiesce I/O */
1055 		if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1056 			EL(ha, "ql_stall_driver failed\n");
1057 			return (EBUSY);
1058 		}
1059 
1060 		rval2 = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
1061 		if (rval2 != QL_SUCCESS) {
1062 			EL(ha, "failed, ql_lock_nvram=%xh\n", rval2);
1063 			ql_restart_driver(ha);
1064 			return (EIO);
1065 		}
1066 		NVRAM_CACHE_LOCK(ha);
1067 
1068 		rval2 = ql_get_nvram(ha, ha->nvram_cache->cache,
1069 		    start_addr, ha->nvram_cache->size);
1070 		if (rval2 != QL_SUCCESS) {
1071 			rval = rval2;
1072 		} else {
1073 			ha->nvram_cache->valid = 1;
1074 			EL(ha, "nvram cache now valid.");
1075 		}
1076 
1077 		NVRAM_CACHE_UNLOCK(ha);
1078 
1079 		ql_release_nvram(ha);
1080 		ql_restart_driver(ha);
1081 
1082 		if (rval != 0) {
1083 			EL(ha, "failed to dump nvram, rval=%x\n", rval);
1084 			return (rval);
1085 		}
1086 	}
1087 
1088 	if (ddi_copyout(ha->nvram_cache->cache, bp,
1089 	    ha->nvram_cache->size, mode) != 0) {
1090 		EL(ha, "Buffer copy failed\n");
1091 		return (EFAULT);
1092 	}
1093 
1094 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1095 
1096 	return (0);
1097 }
1098 
1099 int
1100 ql_get_nvram(ql_adapter_state_t *ha, void *dest_addr, uint32_t src_addr,
1101     uint32_t size)
1102 {
1103 	int rval = QL_SUCCESS;
1104 	int cnt;
1105 	/* Dump NVRAM. */
1106 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1107 		uint32_t	*lptr = (uint32_t *)dest_addr;
1108 
1109 		for (cnt = 0; cnt < size / 4; cnt++) {
1110 			rval = ql_24xx_read_flash(ha, src_addr++, lptr);
1111 			if (rval != QL_SUCCESS) {
1112 				EL(ha, "read_flash failed=%xh\n", rval);
1113 				rval = EAGAIN;
1114 				break;
1115 			}
1116 			LITTLE_ENDIAN_32(lptr);
1117 			lptr++;
1118 		}
1119 	} else {
1120 		uint16_t	data;
1121 		uint16_t	*wptr = (uint16_t *)dest_addr;
1122 
1123 		for (cnt = 0; cnt < size / 2; cnt++) {
1124 			data = (uint16_t)ql_get_nvram_word(ha, cnt +
1125 			    src_addr);
1126 			LITTLE_ENDIAN_16(&data);
1127 			*wptr++ = data;
1128 		}
1129 	}
1130 	return (rval);
1131 }
1132 
1133 /*
1134  * ql_vpd_load
1135  *	Loads VPD from application.
1136  *
1137  * Input:
1138  *	ha = adapter state pointer.
1139  *	bp = user buffer address.
1140  *
1141  * Returns:
1142  *
1143  * Context:
1144  *	Kernel context.
1145  */
1146 int
1147 ql_vpd_load(ql_adapter_state_t *ha, void *bp, int mode)
1148 {
1149 	uint8_t		cnt;
1150 	uint8_t		*vpd, *vpdptr, *vbuf;
1151 	uint32_t	start_addr, vpd_size, *lptr, data32;
1152 	int		rval;
1153 
1154 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1155 
1156 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1157 		EL(ha, "unsupported adapter feature\n");
1158 		return (ENOTSUP);
1159 	}
1160 
1161 	vpd_size = QL_24XX_VPD_SIZE;
1162 
1163 	if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1164 		EL(ha, "failed, kmem_zalloc\n");
1165 		return (ENOMEM);
1166 	}
1167 
1168 	if (ddi_copyin(bp, vpd, vpd_size, mode) != 0) {
1169 		EL(ha, "Buffer copy failed\n");
1170 		kmem_free(vpd, vpd_size);
1171 		return (EFAULT);
1172 	}
1173 
1174 	/* Sanity check the user supplied data via checksum */
1175 	if ((vpdptr = ql_vpd_findtag(ha, vpd, "RV")) == NULL) {
1176 		EL(ha, "vpd RV tag missing\n");
1177 		kmem_free(vpd, vpd_size);
1178 		return (EINVAL);
1179 	}
1180 
1181 	vpdptr += 3;
1182 	cnt = 0;
1183 	vbuf = vpd;
1184 	while (vbuf <= vpdptr) {
1185 		cnt += *vbuf++;
1186 	}
1187 	if (cnt != 0) {
1188 		EL(ha, "mismatched checksum, cal=%xh, passed=%xh\n",
1189 		    (uint8_t)cnt, (uintptr_t)vpdptr);
1190 		kmem_free(vpd, vpd_size);
1191 		return (EINVAL);
1192 	}
1193 
1194 	/* Quiesce I/O */
1195 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1196 		EL(ha, "ql_stall_driver failed\n");
1197 		kmem_free(vpd, vpd_size);
1198 		return (EBUSY);
1199 	}
1200 
1201 	rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1202 	if (rval != QL_SUCCESS) {
1203 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1204 		kmem_free(vpd, vpd_size);
1205 		ql_restart_driver(ha);
1206 		return (EIO);
1207 	}
1208 
1209 	/* Load VPD. */
1210 	if (CFG_IST(ha, CFG_CTRL_258081)) {
1211 		GLOBAL_HW_UNLOCK();
1212 		start_addr &= ~ha->flash_data_addr;
1213 		start_addr <<= 2;
1214 		if ((rval = ql_r_m_w_flash(ha, bp, vpd_size, start_addr,
1215 		    mode)) != QL_SUCCESS) {
1216 			EL(ha, "vpd load error: %xh\n", rval);
1217 		}
1218 		GLOBAL_HW_LOCK();
1219 	} else {
1220 		lptr = (uint32_t *)vpd;
1221 		for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1222 			data32 = *lptr++;
1223 			LITTLE_ENDIAN_32(&data32);
1224 			rval = ql_24xx_load_nvram(ha, cnt + start_addr,
1225 			    data32);
1226 			if (rval != QL_SUCCESS) {
1227 				EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
1228 				break;
1229 			}
1230 		}
1231 	}
1232 
1233 	kmem_free(vpd, vpd_size);
1234 
1235 	/* Update the vcache */
1236 	CACHE_LOCK(ha);
1237 
1238 	if (rval != QL_SUCCESS) {
1239 		EL(ha, "failed, load\n");
1240 	} else if ((ha->vcache == NULL) && ((ha->vcache =
1241 	    kmem_zalloc(vpd_size, KM_SLEEP)) == NULL)) {
1242 		EL(ha, "failed, kmem_zalloc2\n");
1243 	} else if (ddi_copyin(bp, ha->vcache, vpd_size, mode) != 0) {
1244 		EL(ha, "Buffer copy2 failed\n");
1245 		kmem_free(ha->vcache, vpd_size);
1246 		ha->vcache = NULL;
1247 	}
1248 
1249 	CACHE_UNLOCK(ha);
1250 
1251 	ql_release_nvram(ha);
1252 	ql_restart_driver(ha);
1253 
1254 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1255 
1256 	if (rval == QL_SUCCESS) {
1257 		return (0);
1258 	}
1259 
1260 	return (EFAULT);
1261 }
1262 
1263 /*
1264  * ql_vpd_dump
1265  *	Dumps VPD to application buffer.
1266  *
1267  * Input:
1268  *	ha = adapter state pointer.
1269  *	bp = user buffer address.
1270  *
1271  * Returns:
1272  *
1273  * Context:
1274  *	Kernel context.
1275  */
1276 int
1277 ql_vpd_dump(ql_adapter_state_t *ha, void *bp, int mode)
1278 {
1279 	uint8_t		cnt;
1280 	void		*vpd;
1281 	uint32_t	start_addr, vpd_size, *lptr;
1282 	int		rval = 0;
1283 
1284 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1285 
1286 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1287 		EL(ha, "unsupported adapter feature\n");
1288 		return (EACCES);
1289 	}
1290 
1291 	vpd_size = QL_24XX_VPD_SIZE;
1292 
1293 	CACHE_LOCK(ha);
1294 
1295 	if (ha->vcache != NULL) {
1296 		/* copy back the vpd cache data */
1297 		if (ddi_copyout(ha->vcache, bp, vpd_size, mode) != 0) {
1298 			EL(ha, "Buffer copy failed\n");
1299 			rval = EFAULT;
1300 		}
1301 		CACHE_UNLOCK(ha);
1302 		return (rval);
1303 	}
1304 
1305 	if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1306 		CACHE_UNLOCK(ha);
1307 		EL(ha, "failed, kmem_zalloc\n");
1308 		return (ENOMEM);
1309 	}
1310 
1311 	/* Quiesce I/O */
1312 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1313 		CACHE_UNLOCK(ha);
1314 		EL(ha, "ql_stall_driver failed\n");
1315 		kmem_free(vpd, vpd_size);
1316 		return (EBUSY);
1317 	}
1318 
1319 	rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1320 	if (rval != QL_SUCCESS) {
1321 		CACHE_UNLOCK(ha);
1322 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1323 		kmem_free(vpd, vpd_size);
1324 		ql_restart_driver(ha);
1325 		return (EIO);
1326 	}
1327 
1328 	/* Dump VPD. */
1329 	lptr = (uint32_t *)vpd;
1330 
1331 	for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1332 		rval = ql_24xx_read_flash(ha, start_addr++, lptr);
1333 		if (rval != QL_SUCCESS) {
1334 			EL(ha, "read_flash failed=%xh\n", rval);
1335 			rval = EAGAIN;
1336 			break;
1337 		}
1338 		LITTLE_ENDIAN_32(lptr);
1339 		lptr++;
1340 	}
1341 
1342 	ql_release_nvram(ha);
1343 	ql_restart_driver(ha);
1344 
1345 	if (ddi_copyout(vpd, bp, vpd_size, mode) != 0) {
1346 		CACHE_UNLOCK(ha);
1347 		EL(ha, "Buffer copy failed\n");
1348 		kmem_free(vpd, vpd_size);
1349 		return (EFAULT);
1350 	}
1351 
1352 	ha->vcache = vpd;
1353 
1354 	CACHE_UNLOCK(ha);
1355 
1356 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1357 
1358 	if (rval != QL_SUCCESS) {
1359 		return (EFAULT);
1360 	} else {
1361 		return (0);
1362 	}
1363 }
1364 
1365 /*
1366  * ql_vpd_findtag
1367  *	Search the passed vpd buffer for the requested VPD tag type.
1368  *
1369  * Input:
1370  *	ha	= adapter state pointer.
1371  *	vpdbuf	= Pointer to start of the buffer to search
1372  *	op	= VPD opcode to find (must be NULL terminated).
1373  *
1374  * Returns:
1375  *	Pointer to the opcode in the buffer if opcode found.
1376  *	NULL if opcode is not found.
1377  *
1378  * Context:
1379  *	Kernel context.
1380  */
1381 static uint8_t *
1382 ql_vpd_findtag(ql_adapter_state_t *ha, uint8_t *vpdbuf, int8_t *opcode)
1383 {
1384 	uint8_t		*vpd = vpdbuf;
1385 	uint8_t		*end = vpdbuf + QL_24XX_VPD_SIZE;
1386 	uint32_t	found = 0;
1387 
1388 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1389 
1390 	if (vpdbuf == NULL || opcode == NULL) {
1391 		EL(ha, "null parameter passed!\n");
1392 		return (NULL);
1393 	}
1394 
1395 	while (vpd < end) {
1396 
1397 		/* check for end of vpd */
1398 		if (vpd[0] == VPD_TAG_END) {
1399 			if (opcode[0] == VPD_TAG_END) {
1400 				found = 1;
1401 			} else {
1402 				found = 0;
1403 			}
1404 			break;
1405 		}
1406 
1407 		/* check opcode */
1408 		if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1409 			/* found opcode requested */
1410 			found = 1;
1411 			break;
1412 		}
1413 
1414 		/*
1415 		 * Didn't find the opcode, so calculate start of
1416 		 * next tag. Depending on the current tag type,
1417 		 * the length field can be 1 or 2 bytes
1418 		 */
1419 		if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1420 			vpd += (vpd[2] << 8) + vpd[1] + 3;
1421 		} else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1422 			vpd += 3;
1423 		} else {
1424 			vpd += vpd[2] +3;
1425 		}
1426 	}
1427 
1428 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1429 
1430 	return (found == 1 ? vpd : NULL);
1431 }
1432 
1433 /*
1434  * ql_vpd_lookup
1435  *	Return the VPD data for the request VPD tag
1436  *
1437  * Input:
1438  *	ha	= adapter state pointer.
1439  *	opcode	= VPD opcode to find (must be NULL terminated).
1440  *	bp	= Pointer to returned data buffer.
1441  *	bplen	= Length of returned data buffer.
1442  *
1443  * Returns:
1444  *	Length of data copied into returned data buffer.
1445  *		>0 = VPD data field (NULL terminated)
1446  *		 0 = no data.
1447  *		-1 = Could not find opcode in vpd buffer / error.
1448  *
1449  * Context:
1450  *	Kernel context.
1451  *
1452  * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1453  *
1454  */
1455 int32_t
1456 ql_vpd_lookup(ql_adapter_state_t *ha, uint8_t *opcode, uint8_t *bp,
1457     int32_t bplen)
1458 {
1459 	uint8_t		*vpd;
1460 	uint8_t		*vpdbuf;
1461 	int32_t		len = -1;
1462 
1463 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1464 
1465 	if (opcode == NULL || bp == NULL || bplen < 1) {
1466 		EL(ha, "invalid parameter passed: opcode=%ph, "
1467 		    "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1468 		return (len);
1469 	}
1470 
1471 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1472 		return (len);
1473 	}
1474 
1475 	if ((vpdbuf = (uint8_t *)kmem_zalloc(QL_24XX_VPD_SIZE,
1476 	    KM_SLEEP)) == NULL) {
1477 		EL(ha, "unable to allocate vpd memory\n");
1478 		return (len);
1479 	}
1480 
1481 	if ((ql_vpd_dump(ha, vpdbuf, (int)FKIOCTL)) != 0) {
1482 		kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1483 		EL(ha, "unable to retrieve VPD data\n");
1484 		return (len);
1485 	}
1486 
1487 	if ((vpd = ql_vpd_findtag(ha, vpdbuf, (int8_t *)opcode)) != NULL) {
1488 		/*
1489 		 * Found the tag
1490 		 */
1491 		if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1492 		    *opcode == VPD_TAG_LRTC) {
1493 			/*
1494 			 * we found it, but the tag doesn't have a data
1495 			 * field.
1496 			 */
1497 			len = 0;
1498 		} else if (!(strncmp((char *)vpd, (char *)
1499 		    VPD_TAG_PRODID, 1))) {
1500 			len = vpd[2] << 8;
1501 			len += vpd[1];
1502 		} else {
1503 			len = vpd[2];
1504 		}
1505 
1506 		/*
1507 		 * make sure that the vpd len doesn't exceed the
1508 		 * vpd end
1509 		 */
1510 		if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) {
1511 			EL(ha, "vpd tag len (%xh) exceeds vpd buffer "
1512 			    "length\n", len);
1513 			len = -1;
1514 		}
1515 	}
1516 
1517 	if (len >= 0) {
1518 		/*
1519 		 * make sure we don't exceed callers buffer space len
1520 		 */
1521 		if (len > bplen) {
1522 			len = bplen-1;
1523 		}
1524 
1525 		/* copy the data back */
1526 		(void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len);
1527 		bp[len] = 0;
1528 	} else {
1529 		/* error -- couldn't find tag */
1530 		bp[0] = 0;
1531 		if (opcode[1] != 0) {
1532 			EL(ha, "unable to find tag '%s'\n", opcode);
1533 		} else {
1534 			EL(ha, "unable to find tag '%xh'\n", opcode[0]);
1535 		}
1536 	}
1537 
1538 	kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1539 
1540 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1541 
1542 	return (len);
1543 }
1544 
1545 /*
1546  * ql_r_m_w_flash
1547  *	Read modify write from user space to flash.
1548  *
1549  * Input:
1550  *	ha:	adapter state pointer.
1551  *	dp:	source byte pointer.
1552  *	bc:	byte count.
1553  *	faddr:	flash byte address.
1554  *	mode:	flags.
1555  *
1556  * Returns:
1557  *	ql local function return status code.
1558  *
1559  * Context:
1560  *	Kernel context.
1561  */
1562 int
1563 ql_r_m_w_flash(ql_adapter_state_t *ha, caddr_t dp, uint32_t bc, uint32_t faddr,
1564     int mode)
1565 {
1566 	uint8_t		*bp;
1567 	uint32_t	xfer, bsize, saddr, ofst;
1568 	int		rval = 0;
1569 
1570 	QL_PRINT_9(CE_CONT, "(%d): started, dp=%ph, faddr=%xh, bc=%xh\n",
1571 	    ha->instance, (void *)dp, faddr, bc);
1572 
1573 	bsize = ha->xioctl->fdesc.block_size;
1574 	saddr = faddr & ~(bsize - 1);
1575 	ofst = faddr & (bsize - 1);
1576 
1577 	if ((bp = kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
1578 		EL(ha, "kmem_zalloc=null\n");
1579 		return (QL_MEMORY_ALLOC_FAILED);
1580 	}
1581 
1582 	while (bc) {
1583 		xfer = bc > bsize ? bsize : bc;
1584 		if (ofst + xfer > bsize) {
1585 			xfer = bsize - ofst;
1586 		}
1587 		QL_PRINT_9(CE_CONT, "(%d): dp=%ph, saddr=%xh, bc=%xh, "
1588 		    "ofst=%xh, xfer=%xh\n", ha->instance, (void *)dp, saddr,
1589 		    bc, ofst, xfer);
1590 
1591 		if (ofst || xfer < bsize) {
1592 			/* Dump Flash sector. */
1593 			if ((rval = ql_dump_fcode(ha, bp, bsize, saddr)) !=
1594 			    QL_SUCCESS) {
1595 				EL(ha, "dump_flash status=%x\n", rval);
1596 				break;
1597 			}
1598 		}
1599 
1600 		/* Set new data. */
1601 		if ((rval = ddi_copyin(dp, (caddr_t)(bp + ofst), xfer,
1602 		    mode)) != 0) {
1603 			EL(ha, "ddi_copyin status=%xh, dp=%ph, ofst=%xh, "
1604 			    "xfer=%xh\n", rval, (void *)dp, ofst, xfer);
1605 			rval = QL_FUNCTION_FAILED;
1606 			break;
1607 		}
1608 
1609 		/* Write to flash. */
1610 		if ((rval = ql_load_fcode(ha, bp, bsize, saddr)) !=
1611 		    QL_SUCCESS) {
1612 			EL(ha, "load_flash status=%x\n", rval);
1613 			break;
1614 		}
1615 		bc -= xfer;
1616 		dp += xfer;
1617 		saddr += bsize;
1618 		ofst = 0;
1619 	}
1620 
1621 	kmem_free(bp, bsize);
1622 
1623 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1624 
1625 	return (rval);
1626 }
1627 
1628 /*
1629  * ql_adm_op
1630  *	Performs qladm utility operations
1631  *
1632  * Input:
1633  *	ha:	adapter state pointer.
1634  *	arg:	driver_op_t structure pointer.
1635  *	mode:	flags.
1636  *
1637  * Returns:
1638  *
1639  * Context:
1640  *	Kernel context.
1641  */
1642 static int
1643 ql_adm_op(ql_adapter_state_t *ha, void *arg, int mode)
1644 {
1645 	ql_adm_op_t		dop;
1646 	int			rval = 0;
1647 
1648 	if (ddi_copyin(arg, &dop, sizeof (ql_adm_op_t), mode) != 0) {
1649 		EL(ha, "failed, driver_op_t ddi_copyin\n");
1650 		return (EFAULT);
1651 	}
1652 
1653 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%xh, buffer=%llx,"
1654 	    " length=%xh, option=%xh\n", ha->instance, dop.cmd, dop.buffer,
1655 	    dop.length, dop.option);
1656 
1657 	switch (dop.cmd) {
1658 	case QL_ADAPTER_INFO:
1659 		rval = ql_adm_adapter_info(ha, &dop, mode);
1660 		break;
1661 
1662 	case QL_EXTENDED_LOGGING:
1663 		rval = ql_adm_extended_logging(ha, &dop);
1664 		break;
1665 
1666 	case QL_LOOP_RESET:
1667 		rval = ql_adm_loop_reset(ha);
1668 		break;
1669 
1670 	case QL_DEVICE_LIST:
1671 		rval = ql_adm_device_list(ha, &dop, mode);
1672 		break;
1673 
1674 	case QL_PROP_UPDATE_INT:
1675 		rval = ql_adm_prop_update_int(ha, &dop, mode);
1676 		break;
1677 
1678 	case QL_UPDATE_PROPERTIES:
1679 		rval = ql_adm_update_properties(ha);
1680 		break;
1681 
1682 	case QL_FW_DUMP:
1683 		rval = ql_adm_fw_dump(ha, &dop, arg, mode);
1684 		break;
1685 
1686 	case QL_NVRAM_LOAD:
1687 		rval = ql_adm_nvram_load(ha, &dop, mode);
1688 		break;
1689 
1690 	case QL_NVRAM_DUMP:
1691 		rval = ql_adm_nvram_dump(ha, &dop, mode);
1692 		break;
1693 
1694 	case QL_FLASH_LOAD:
1695 		rval = ql_adm_flash_load(ha, &dop, mode);
1696 		break;
1697 
1698 	case QL_VPD_LOAD:
1699 		rval = ql_adm_vpd_load(ha, &dop, mode);
1700 		break;
1701 
1702 	case QL_VPD_DUMP:
1703 		rval = ql_adm_vpd_dump(ha, &dop, mode);
1704 		break;
1705 
1706 	case QL_VPD_GETTAG:
1707 		rval = ql_adm_vpd_gettag(ha, &dop, mode);
1708 		break;
1709 
1710 	case QL_UPD_FWMODULE:
1711 		rval = ql_adm_updfwmodule(ha, &dop, mode);
1712 		break;
1713 
1714 	default:
1715 		EL(ha, "unsupported driver op cmd: %x\n", dop.cmd);
1716 		return (EINVAL);
1717 	}
1718 
1719 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1720 
1721 	return (rval);
1722 }
1723 
1724 /*
1725  * ql_adm_adapter_info
1726  *	Performs qladm QL_ADAPTER_INFO command
1727  *
1728  * Input:
1729  *	ha:	adapter state pointer.
1730  *	dop:	ql_adm_op_t structure pointer.
1731  *	mode:	flags.
1732  *
1733  * Returns:
1734  *
1735  * Context:
1736  *	Kernel context.
1737  */
1738 static int
1739 ql_adm_adapter_info(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1740 {
1741 	ql_adapter_info_t	hba;
1742 	uint8_t			*dp;
1743 	uint32_t		length;
1744 	int			rval, i;
1745 
1746 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1747 
1748 	hba.device_id = ha->device_id;
1749 
1750 	dp = CFG_IST(ha, CFG_CTRL_24258081) ?
1751 	    &ha->init_ctrl_blk.cb24.port_name[0] :
1752 	    &ha->init_ctrl_blk.cb.port_name[0];
1753 	bcopy(dp, hba.wwpn, 8);
1754 
1755 	hba.d_id = ha->d_id.b24;
1756 
1757 	if (ha->xioctl->fdesc.flash_size == 0 &&
1758 	    !(CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id)) {
1759 		if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1760 			EL(ha, "ql_stall_driver failed\n");
1761 			return (EBUSY);
1762 		}
1763 
1764 		if ((rval = ql_setup_fcache(ha)) != QL_SUCCESS) {
1765 			EL(ha, "ql_setup_flash failed=%xh\n", rval);
1766 			if (rval == QL_FUNCTION_TIMEOUT) {
1767 				return (EBUSY);
1768 			}
1769 			return (EIO);
1770 		}
1771 
1772 		/* Resume I/O */
1773 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
1774 			ql_restart_driver(ha);
1775 		} else {
1776 			EL(ha, "isp_abort_needed for restart\n");
1777 			ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
1778 			    DRIVER_STALL);
1779 		}
1780 	}
1781 	hba.flash_size = ha->xioctl->fdesc.flash_size;
1782 
1783 	(void) strcpy(hba.driver_ver, QL_VERSION);
1784 
1785 	(void) sprintf(hba.fw_ver, "%d.%d.%d", ha->fw_major_version,
1786 	    ha->fw_minor_version, ha->fw_subminor_version);
1787 
1788 	bzero(hba.fcode_ver, sizeof (hba.fcode_ver));
1789 
1790 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
1791 	rval = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
1792 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&dp, &i);
1793 	length = i;
1794 	if (rval != DDI_PROP_SUCCESS) {
1795 		EL(ha, "failed, ddi_getlongprop=%xh\n", rval);
1796 	} else {
1797 		if (length > (uint32_t)sizeof (hba.fcode_ver)) {
1798 			length = sizeof (hba.fcode_ver) - 1;
1799 		}
1800 		bcopy((void *)dp, (void *)hba.fcode_ver, length);
1801 		kmem_free(dp, length);
1802 	}
1803 
1804 	if (ddi_copyout((void *)&hba, (void *)(uintptr_t)dop->buffer,
1805 	    dop->length, mode) != 0) {
1806 		EL(ha, "failed, ddi_copyout\n");
1807 		return (EFAULT);
1808 	}
1809 
1810 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1811 
1812 	return (0);
1813 }
1814 
1815 /*
1816  * ql_adm_extended_logging
1817  *	Performs qladm QL_EXTENDED_LOGGING command
1818  *
1819  * Input:
1820  *	ha:	adapter state pointer.
1821  *	dop:	ql_adm_op_t structure pointer.
1822  *
1823  * Returns:
1824  *
1825  * Context:
1826  *	Kernel context.
1827  */
1828 static int
1829 ql_adm_extended_logging(ql_adapter_state_t *ha, ql_adm_op_t *dop)
1830 {
1831 	char	prop_name[MAX_PROP_LENGTH];
1832 	int	rval;
1833 
1834 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1835 
1836 	(void) sprintf(prop_name, "hba%d-extended-logging", ha->instance);
1837 
1838 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1839 	rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
1840 	    (int)dop->option);
1841 	if (rval != DDI_PROP_SUCCESS) {
1842 		EL(ha, "failed, prop_update = %xh\n", rval);
1843 		return (EINVAL);
1844 	} else {
1845 		dop->option ?
1846 		    (ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING) :
1847 		    (ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING);
1848 	}
1849 
1850 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1851 
1852 	return (0);
1853 }
1854 
1855 /*
1856  * ql_adm_loop_reset
1857  *	Performs qladm QL_LOOP_RESET command
1858  *
1859  * Input:
1860  *	ha:	adapter state pointer.
1861  *
1862  * Returns:
1863  *
1864  * Context:
1865  *	Kernel context.
1866  */
1867 static int
1868 ql_adm_loop_reset(ql_adapter_state_t *ha)
1869 {
1870 	int	rval;
1871 
1872 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1873 
1874 	if (ha->task_daemon_flags & LOOP_DOWN) {
1875 		(void) ql_full_login_lip(ha);
1876 	} else if ((rval = ql_full_login_lip(ha)) != QL_SUCCESS) {
1877 		EL(ha, "failed, ql_initiate_lip=%xh\n", rval);
1878 		return (EIO);
1879 	}
1880 
1881 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1882 
1883 	return (0);
1884 }
1885 
1886 /*
1887  * ql_adm_device_list
1888  *	Performs qladm QL_DEVICE_LIST command
1889  *
1890  * Input:
1891  *	ha:	adapter state pointer.
1892  *	dop:	ql_adm_op_t structure pointer.
1893  *	mode:	flags.
1894  *
1895  * Returns:
1896  *
1897  * Context:
1898  *	Kernel context.
1899  */
1900 static int
1901 ql_adm_device_list(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1902 {
1903 	ql_device_info_t	dev;
1904 	ql_link_t		*link;
1905 	ql_tgt_t		*tq;
1906 	uint32_t		index, cnt;
1907 
1908 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1909 
1910 	cnt = 0;
1911 	dev.address = 0xffffffff;
1912 
1913 	/* Scan port list for requested target and fill in the values */
1914 	for (link = NULL, index = 0;
1915 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1916 		for (link = ha->dev[index].first; link != NULL;
1917 		    link = link->next) {
1918 			tq = link->base_address;
1919 
1920 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1921 				continue;
1922 			}
1923 			if (cnt != dop->option) {
1924 				cnt++;
1925 				continue;
1926 			}
1927 			/* fill in the values */
1928 			bcopy(tq->port_name, dev.wwpn, 8);
1929 			dev.address = tq->d_id.b24;
1930 			dev.loop_id = tq->loop_id;
1931 			if (tq->flags & TQF_TAPE_DEVICE) {
1932 				dev.type = FCT_TAPE;
1933 			} else if (tq->flags & TQF_INITIATOR_DEVICE) {
1934 				dev.type = FCT_INITIATOR;
1935 			} else {
1936 				dev.type = FCT_TARGET;
1937 			}
1938 			break;
1939 		}
1940 	}
1941 
1942 	if (ddi_copyout((void *)&dev, (void *)(uintptr_t)dop->buffer,
1943 	    dop->length, mode) != 0) {
1944 		EL(ha, "failed, ddi_copyout\n");
1945 		return (EFAULT);
1946 	}
1947 
1948 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1949 
1950 	return (0);
1951 }
1952 
1953 /*
1954  * ql_adm_update_properties
1955  *	Performs qladm QL_UPDATE_PROPERTIES command
1956  *
1957  * Input:
1958  *	ha:	adapter state pointer.
1959  *
1960  * Returns:
1961  *
1962  * Context:
1963  *	Kernel context.
1964  */
1965 static int
1966 ql_adm_update_properties(ql_adapter_state_t *ha)
1967 {
1968 	ql_comb_init_cb_t	init_ctrl_blk;
1969 	ql_comb_ip_init_cb_t	ip_init_ctrl_blk;
1970 
1971 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1972 
1973 	/* Stall driver instance. */
1974 	(void) ql_stall_driver(ha, 0);
1975 
1976 	/* Save init control blocks. */
1977 	bcopy(&ha->init_ctrl_blk, &init_ctrl_blk, sizeof (ql_comb_init_cb_t));
1978 	bcopy(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1979 	    sizeof (ql_comb_ip_init_cb_t));
1980 
1981 	/* Update PCI configration. */
1982 	(void) ql_pci_sbus_config(ha);
1983 
1984 	/* Get configuration properties. */
1985 	(void) ql_nvram_config(ha);
1986 
1987 	/* Check for init firmware required. */
1988 	if (bcmp(&ha->init_ctrl_blk, &init_ctrl_blk,
1989 	    sizeof (ql_comb_init_cb_t)) != 0 ||
1990 	    bcmp(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1991 	    sizeof (ql_comb_ip_init_cb_t)) != 0) {
1992 
1993 		EL(ha, "isp_abort_needed\n");
1994 		ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1995 		TASK_DAEMON_LOCK(ha);
1996 		ha->task_daemon_flags |= LOOP_DOWN | ISP_ABORT_NEEDED;
1997 		TASK_DAEMON_UNLOCK(ha);
1998 	}
1999 
2000 	/* Update AEN queue. */
2001 	if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
2002 		ql_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
2003 	}
2004 
2005 	/* Restart driver instance. */
2006 	ql_restart_driver(ha);
2007 
2008 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2009 
2010 	return (0);
2011 }
2012 
2013 /*
2014  * ql_adm_prop_update_int
2015  *	Performs qladm QL_PROP_UPDATE_INT command
2016  *
2017  * Input:
2018  *	ha:	adapter state pointer.
2019  *	dop:	ql_adm_op_t structure pointer.
2020  *	mode:	flags.
2021  *
2022  * Returns:
2023  *
2024  * Context:
2025  *	Kernel context.
2026  */
2027 static int
2028 ql_adm_prop_update_int(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2029 {
2030 	char	*prop_name;
2031 	int	rval;
2032 
2033 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2034 
2035 	prop_name = kmem_zalloc(dop->length, KM_SLEEP);
2036 	if (prop_name == NULL) {
2037 		EL(ha, "failed, kmem_zalloc\n");
2038 		return (ENOMEM);
2039 	}
2040 
2041 	if (ddi_copyin((void *)(uintptr_t)dop->buffer, prop_name, dop->length,
2042 	    mode) != 0) {
2043 		EL(ha, "failed, prop_name ddi_copyin\n");
2044 		kmem_free(prop_name, dop->length);
2045 		return (EFAULT);
2046 	}
2047 
2048 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2049 	if ((rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
2050 	    (int)dop->option)) != DDI_PROP_SUCCESS) {
2051 		EL(ha, "failed, prop_update=%xh\n", rval);
2052 		kmem_free(prop_name, dop->length);
2053 		return (EINVAL);
2054 	}
2055 
2056 	kmem_free(prop_name, dop->length);
2057 
2058 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2059 
2060 	return (0);
2061 }
2062 
2063 /*
2064  * ql_adm_fw_dump
2065  *	Performs qladm QL_FW_DUMP command
2066  *
2067  * Input:
2068  *	ha:	adapter state pointer.
2069  *	dop:	ql_adm_op_t structure pointer.
2070  *	udop:	user space ql_adm_op_t structure pointer.
2071  *	mode:	flags.
2072  *
2073  * Returns:
2074  *
2075  * Context:
2076  *	Kernel context.
2077  */
2078 static int
2079 ql_adm_fw_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, void *udop, int mode)
2080 {
2081 	caddr_t	dmp;
2082 
2083 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2084 
2085 	if (dop->length < ha->risc_dump_size) {
2086 		EL(ha, "failed, incorrect length=%xh, size=%xh\n",
2087 		    dop->length, ha->risc_dump_size);
2088 		return (EINVAL);
2089 	}
2090 
2091 	if (ha->ql_dump_state & QL_DUMP_VALID) {
2092 		dmp = kmem_zalloc(ha->risc_dump_size, KM_SLEEP);
2093 		if (dmp == NULL) {
2094 			EL(ha, "failed, kmem_zalloc\n");
2095 			return (ENOMEM);
2096 		}
2097 
2098 		dop->length = (uint32_t)ql_ascii_fw_dump(ha, dmp);
2099 		if (ddi_copyout((void *)dmp, (void *)(uintptr_t)dop->buffer,
2100 		    dop->length, mode) != 0) {
2101 			EL(ha, "failed, ddi_copyout\n");
2102 			kmem_free(dmp, ha->risc_dump_size);
2103 			return (EFAULT);
2104 		}
2105 
2106 		kmem_free(dmp, ha->risc_dump_size);
2107 		ha->ql_dump_state |= QL_DUMP_UPLOADED;
2108 
2109 	} else {
2110 		EL(ha, "failed, no dump file\n");
2111 		dop->length = 0;
2112 	}
2113 
2114 	if (ddi_copyout(dop, udop, sizeof (ql_adm_op_t), mode) != 0) {
2115 		EL(ha, "failed, driver_op_t ddi_copyout\n");
2116 		return (EFAULT);
2117 	}
2118 
2119 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2120 
2121 	return (0);
2122 }
2123 
2124 /*
2125  * ql_adm_nvram_dump
2126  *	Performs qladm QL_NVRAM_DUMP command
2127  *
2128  * Input:
2129  *	ha:	adapter state pointer.
2130  *	dop:	ql_adm_op_t structure pointer.
2131  *	mode:	flags.
2132  *
2133  * Returns:
2134  *
2135  * Context:
2136  *	Kernel context.
2137  */
2138 static int
2139 ql_adm_nvram_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2140 {
2141 	int		rval;
2142 
2143 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2144 
2145 	if (dop->length < ha->nvram_cache->size) {
2146 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2147 		    ha->nvram_cache->size);
2148 		return (EINVAL);
2149 	}
2150 
2151 	if ((rval = ql_nv_util_dump(ha, (void *)(uintptr_t)dop->buffer,
2152 	    mode)) != 0) {
2153 		EL(ha, "failed, ql_nv_util_dump\n");
2154 	} else {
2155 		/*EMPTY*/
2156 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2157 	}
2158 
2159 	return (rval);
2160 }
2161 
2162 /*
2163  * ql_adm_nvram_load
2164  *	Performs qladm QL_NVRAM_LOAD command
2165  *
2166  * Input:
2167  *	ha:	adapter state pointer.
2168  *	dop:	ql_adm_op_t structure pointer.
2169  *	mode:	flags.
2170  *
2171  * Returns:
2172  *
2173  * Context:
2174  *	Kernel context.
2175  */
2176 static int
2177 ql_adm_nvram_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2178 {
2179 	int		rval;
2180 
2181 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2182 
2183 	if (dop->length < ha->nvram_cache->size) {
2184 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2185 		    ha->nvram_cache->size);
2186 		return (EINVAL);
2187 	}
2188 
2189 	if ((rval = ql_nv_util_load(ha, (void *)(uintptr_t)dop->buffer,
2190 	    mode)) != 0) {
2191 		EL(ha, "failed, ql_nv_util_dump\n");
2192 	} else {
2193 		/*EMPTY*/
2194 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2195 	}
2196 
2197 	return (rval);
2198 }
2199 
2200 /*
2201  * ql_adm_flash_load
2202  *	Performs qladm QL_FLASH_LOAD command
2203  *
2204  * Input:
2205  *	ha:	adapter state pointer.
2206  *	dop:	ql_adm_op_t structure pointer.
2207  *	mode:	flags.
2208  *
2209  * Returns:
2210  *
2211  * Context:
2212  *	Kernel context.
2213  */
2214 static int
2215 ql_adm_flash_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2216 {
2217 	uint8_t	*dp;
2218 	int	rval;
2219 
2220 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2221 
2222 	if ((dp = kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2223 		EL(ha, "failed, kmem_zalloc\n");
2224 		return (ENOMEM);
2225 	}
2226 
2227 	if (ddi_copyin((void *)(uintptr_t)dop->buffer, dp, dop->length,
2228 	    mode) != 0) {
2229 		EL(ha, "ddi_copyin failed\n");
2230 		kmem_free(dp, dop->length);
2231 		return (EFAULT);
2232 	}
2233 
2234 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
2235 		EL(ha, "ql_stall_driver failed\n");
2236 		kmem_free(dp, dop->length);
2237 		return (EBUSY);
2238 	}
2239 
2240 	rval = (CFG_IST(ha, CFG_CTRL_24258081) ?
2241 	    ql_24xx_load_flash(ha, dp, dop->length, dop->option) :
2242 	    ql_load_flash(ha, dp, dop->length));
2243 
2244 	ql_restart_driver(ha);
2245 
2246 	kmem_free(dp, dop->length);
2247 
2248 	if (rval != QL_SUCCESS) {
2249 		EL(ha, "failed\n");
2250 		return (EIO);
2251 	}
2252 
2253 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2254 
2255 	return (0);
2256 }
2257 
2258 /*
2259  * ql_adm_vpd_dump
2260  *	Performs qladm QL_VPD_DUMP command
2261  *
2262  * Input:
2263  *	ha:	adapter state pointer.
2264  *	dop:	ql_adm_op_t structure pointer.
2265  *	mode:	flags.
2266  *
2267  * Returns:
2268  *
2269  * Context:
2270  *	Kernel context.
2271  */
2272 static int
2273 ql_adm_vpd_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2274 {
2275 	int		rval;
2276 
2277 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2278 
2279 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2280 		EL(ha, "hba does not support VPD\n");
2281 		return (EINVAL);
2282 	}
2283 
2284 	if (dop->length < QL_24XX_VPD_SIZE) {
2285 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2286 		    QL_24XX_VPD_SIZE);
2287 		return (EINVAL);
2288 	}
2289 
2290 	if ((rval = ql_vpd_dump(ha, (void *)(uintptr_t)dop->buffer, mode))
2291 	    != 0) {
2292 		EL(ha, "failed, ql_vpd_dump\n");
2293 	} else {
2294 		/*EMPTY*/
2295 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2296 	}
2297 
2298 	return (rval);
2299 }
2300 
2301 /*
2302  * ql_adm_vpd_load
2303  *	Performs qladm QL_VPD_LOAD command
2304  *
2305  * Input:
2306  *	ha:	adapter state pointer.
2307  *	dop:	ql_adm_op_t structure pointer.
2308  *	mode:	flags.
2309  *
2310  * Returns:
2311  *
2312  * Context:
2313  *	Kernel context.
2314  */
2315 static int
2316 ql_adm_vpd_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2317 {
2318 	int		rval;
2319 
2320 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2321 
2322 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2323 		EL(ha, "hba does not support VPD\n");
2324 		return (EINVAL);
2325 	}
2326 
2327 	if (dop->length < QL_24XX_VPD_SIZE) {
2328 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2329 		    QL_24XX_VPD_SIZE);
2330 		return (EINVAL);
2331 	}
2332 
2333 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)dop->buffer, mode))
2334 	    != 0) {
2335 		EL(ha, "failed, ql_vpd_dump\n");
2336 	} else {
2337 		/*EMPTY*/
2338 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2339 	}
2340 
2341 	return (rval);
2342 }
2343 
2344 /*
2345  * ql_adm_vpd_gettag
2346  *	Performs qladm QL_VPD_GETTAG command
2347  *
2348  * Input:
2349  *	ha:	adapter state pointer.
2350  *	dop:	ql_adm_op_t structure pointer.
2351  *	mode:	flags.
2352  *
2353  * Returns:
2354  *
2355  * Context:
2356  *	Kernel context.
2357  */
2358 static int
2359 ql_adm_vpd_gettag(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2360 {
2361 	int		rval = 0;
2362 	uint8_t		*lbuf;
2363 
2364 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2365 
2366 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2367 		EL(ha, "hba does not support VPD\n");
2368 		return (EINVAL);
2369 	}
2370 
2371 	if ((lbuf = (uint8_t *)kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2372 		EL(ha, "mem alloc failure of %xh bytes\n", dop->length);
2373 		rval = EFAULT;
2374 	} else {
2375 		if (ddi_copyin((void *)(uintptr_t)dop->buffer, lbuf,
2376 		    dop->length, mode) != 0) {
2377 			EL(ha, "ddi_copyin failed\n");
2378 			kmem_free(lbuf, dop->length);
2379 			return (EFAULT);
2380 		}
2381 
2382 		if ((rval = ql_vpd_lookup(ha, lbuf, lbuf, (int32_t)
2383 		    dop->length)) < 0) {
2384 			EL(ha, "failed vpd_lookup\n");
2385 		} else {
2386 			if (ddi_copyout(lbuf, (void *)(uintptr_t)dop->buffer,
2387 			    strlen((int8_t *)lbuf)+1, mode) != 0) {
2388 				EL(ha, "failed, ddi_copyout\n");
2389 				rval = EFAULT;
2390 			} else {
2391 				rval = 0;
2392 			}
2393 		}
2394 		kmem_free(lbuf, dop->length);
2395 	}
2396 
2397 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2398 
2399 	return (rval);
2400 }
2401 
2402 /*
2403  * ql_adm_updfwmodule
2404  *	Performs qladm QL_UPD_FWMODULE command
2405  *
2406  * Input:
2407  *	ha:	adapter state pointer.
2408  *	dop:	ql_adm_op_t structure pointer.
2409  *	mode:	flags.
2410  *
2411  * Returns:
2412  *
2413  * Context:
2414  *	Kernel context.
2415  */
2416 /* ARGSUSED */
2417 static int
2418 ql_adm_updfwmodule(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2419 {
2420 	int			rval = DDI_SUCCESS;
2421 	ql_link_t		*link;
2422 	ql_adapter_state_t	*ha2 = NULL;
2423 	uint16_t		fw_class = (uint16_t)dop->option;
2424 
2425 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2426 
2427 	/* zero the firmware module reference count */
2428 	for (link = ql_hba.first; link != NULL; link = link->next) {
2429 		ha2 = link->base_address;
2430 		if (fw_class == ha2->fw_class) {
2431 			if ((rval = ddi_modclose(ha2->fw_module)) !=
2432 			    DDI_SUCCESS) {
2433 				EL(ha2, "modclose rval=%xh\n", rval);
2434 				break;
2435 			}
2436 			ha2->fw_module = NULL;
2437 		}
2438 	}
2439 
2440 	/* reload the f/w modules */
2441 	for (link = ql_hba.first; link != NULL; link = link->next) {
2442 		ha2 = link->base_address;
2443 
2444 		if ((fw_class == ha2->fw_class) && (ha2->fw_class == 0)) {
2445 			if ((rval = (int32_t)ql_fwmodule_resolve(ha2)) !=
2446 			    QL_SUCCESS) {
2447 				EL(ha2, "unable to load f/w module: '%x' "
2448 				    "(rval=%xh)\n", ha2->fw_class, rval);
2449 				rval = EFAULT;
2450 			} else {
2451 				EL(ha2, "f/w module updated: '%x'\n",
2452 				    ha2->fw_class);
2453 			}
2454 
2455 			EL(ha2, "isp abort needed (%d)\n", ha->instance);
2456 
2457 			ql_awaken_task_daemon(ha2, NULL, ISP_ABORT_NEEDED, 0);
2458 
2459 			rval = 0;
2460 		}
2461 	}
2462 
2463 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2464 
2465 	return (rval);
2466 }
2467