1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 #pragma ident	"Copyright 2010 QLogic Corporation; ql_ioctl.c"
29 
30 /*
31  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32  * Fibre Channel Adapter (FCA) driver IOCTL source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_ioctl.h>
49 #include <ql_mbx.h>
50 #include <ql_xioctl.h>
51 
52 /*
53  * Local Function Prototypes.
54  */
55 static int ql_busy_notification(ql_adapter_state_t *);
56 static int ql_idle_notification(ql_adapter_state_t *);
57 static int ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features);
58 static int ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features);
59 static int ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha);
60 static void ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr,
61     uint16_t value);
62 static int ql_24xx_load_nvram(ql_adapter_state_t *, uint32_t, uint32_t);
63 static int ql_adm_op(ql_adapter_state_t *, void *, int);
64 static int ql_adm_adapter_info(ql_adapter_state_t *, ql_adm_op_t *, int);
65 static int ql_adm_extended_logging(ql_adapter_state_t *, ql_adm_op_t *);
66 static int ql_adm_device_list(ql_adapter_state_t *, ql_adm_op_t *, int);
67 static int ql_adm_update_properties(ql_adapter_state_t *);
68 static int ql_adm_prop_update_int(ql_adapter_state_t *, ql_adm_op_t *, int);
69 static int ql_adm_loop_reset(ql_adapter_state_t *);
70 static int ql_adm_fw_dump(ql_adapter_state_t *, ql_adm_op_t *, void *, int);
71 static int ql_adm_nvram_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
72 static int ql_adm_nvram_load(ql_adapter_state_t *, ql_adm_op_t *, int);
73 static int ql_adm_flash_load(ql_adapter_state_t *, ql_adm_op_t *, int);
74 static int ql_adm_vpd_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
75 static int ql_adm_vpd_load(ql_adapter_state_t *, ql_adm_op_t *, int);
76 static int ql_adm_vpd_gettag(ql_adapter_state_t *, ql_adm_op_t *, int);
77 static int ql_adm_updfwmodule(ql_adapter_state_t *, ql_adm_op_t *, int);
78 static uint8_t *ql_vpd_findtag(ql_adapter_state_t *, uint8_t *, int8_t *);
79 
80 /* ************************************************************************ */
81 /*				cb_ops functions			    */
82 /* ************************************************************************ */
83 
84 /*
85  * ql_open
86  *	opens device
87  *
88  * Input:
89  *	dev_p = device pointer
90  *	flags = open flags
91  *	otype = open type
92  *	cred_p = credentials pointer
93  *
94  * Returns:
95  *	0 = success
96  *
97  * Context:
98  *	Kernel context.
99  */
100 /* ARGSUSED */
101 int
102 ql_open(dev_t *dev_p, int flags, int otyp, cred_t *cred_p)
103 {
104 	ql_adapter_state_t	*ha;
105 	int			rval = 0;
106 
107 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(*dev_p));
108 	if (ha == NULL) {
109 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
110 		return (ENXIO);
111 	}
112 
113 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
114 
115 	/* Allow only character opens */
116 	if (otyp != OTYP_CHR) {
117 		QL_PRINT_2(CE_CONT, "(%d): failed, open type\n",
118 		    ha->instance);
119 		return (EINVAL);
120 	}
121 
122 	ADAPTER_STATE_LOCK(ha);
123 	if (flags & FEXCL && ha->flags & QL_OPENED) {
124 		ADAPTER_STATE_UNLOCK(ha);
125 		rval = EBUSY;
126 	} else {
127 		ha->flags |= QL_OPENED;
128 		ADAPTER_STATE_UNLOCK(ha);
129 	}
130 
131 	if (rval != 0) {
132 		EL(ha, "failed, rval = %xh\n", rval);
133 	} else {
134 		/*EMPTY*/
135 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
136 	}
137 	return (rval);
138 }
139 
140 /*
141  * ql_close
142  *	opens device
143  *
144  * Input:
145  *	dev_p = device pointer
146  *	flags = open flags
147  *	otype = open type
148  *	cred_p = credentials pointer
149  *
150  * Returns:
151  *	0 = success
152  *
153  * Context:
154  *	Kernel context.
155  */
156 /* ARGSUSED */
157 int
158 ql_close(dev_t dev, int flags, int otyp, cred_t *cred_p)
159 {
160 	ql_adapter_state_t	*ha;
161 	int			rval = 0;
162 
163 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
164 	if (ha == NULL) {
165 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
166 		return (ENXIO);
167 	}
168 
169 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
170 
171 	if (otyp != OTYP_CHR) {
172 		QL_PRINT_2(CE_CONT, "(%d): failed, open type\n",
173 		    ha->instance);
174 		return (EINVAL);
175 	}
176 
177 	ADAPTER_STATE_LOCK(ha);
178 	ha->flags &= ~QL_OPENED;
179 	ADAPTER_STATE_UNLOCK(ha);
180 
181 	if (rval != 0) {
182 		EL(ha, "failed, rval = %xh\n", rval);
183 	} else {
184 		/*EMPTY*/
185 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
186 	}
187 	return (rval);
188 }
189 
190 /*
191  * ql_ioctl
192  *	control a character device
193  *
194  * Input:
195  *	dev = device number
196  *	cmd = function to perform
197  *	arg = data type varies with request
198  *	mode = flags
199  *	cred_p = credentials pointer
200  *	rval_p = pointer to result value
201  *
202  * Returns:
203  *	0 = success
204  *
205  * Context:
206  *	Kernel context.
207  */
208 /* ARGSUSED */
209 int
210 ql_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
211     int *rval_p)
212 {
213 	ql_adapter_state_t	*ha;
214 	int			rval = 0;
215 
216 	if (ddi_in_panic()) {
217 		QL_PRINT_2(CE_CONT, "ql_ioctl: ddi_in_panic exit\n");
218 		return (ENOPROTOOPT);
219 	}
220 
221 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
222 	if (ha == NULL)	{
223 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
224 		return (ENXIO);
225 	}
226 
227 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
228 
229 	/*
230 	 * Quick clean exit for qla2x00 foapi calls which are
231 	 * not supported in qlc.
232 	 */
233 	if (cmd >= QL_FOAPI_START && cmd <= QL_FOAPI_END) {
234 		QL_PRINT_9(CE_CONT, "failed, fo api not supported\n");
235 		return (ENOTTY);
236 	}
237 
238 	/* PWR management busy. */
239 	rval = ql_busy_notification(ha);
240 	if (rval != FC_SUCCESS)	 {
241 		EL(ha, "failed, ql_busy_notification\n");
242 		return (ENXIO);
243 	}
244 
245 	rval = ql_xioctl(ha, cmd, arg, mode, cred_p, rval_p);
246 	if (rval == ENOPROTOOPT || rval == EINVAL) {
247 		switch (cmd) {
248 		case QL_GET_ADAPTER_FEATURE_BITS: {
249 			uint16_t bits;
250 
251 			rval = ql_get_feature_bits(ha, &bits);
252 
253 			if (!rval && ddi_copyout((void *)&bits, (void *)arg,
254 			    sizeof (bits), mode)) {
255 				rval = EFAULT;
256 			}
257 			break;
258 		}
259 
260 		case QL_SET_ADAPTER_FEATURE_BITS: {
261 			uint16_t bits;
262 
263 			if (ddi_copyin((void *)arg, (void *)&bits,
264 			    sizeof (bits), mode)) {
265 				rval = EFAULT;
266 				break;
267 			}
268 
269 			rval = ql_set_feature_bits(ha, bits);
270 			break;
271 		}
272 
273 		case QL_SET_ADAPTER_NVRAM_DEFAULTS:
274 			rval = ql_set_nvram_adapter_defaults(ha);
275 			break;
276 
277 		case QL_UTIL_LOAD:
278 			rval = ql_nv_util_load(ha, (void *)arg, mode);
279 			break;
280 
281 		case QL_UTIL_DUMP:
282 			rval = ql_nv_util_dump(ha, (void *)arg, mode);
283 			break;
284 
285 		case QL_ADM_OP:
286 			rval = ql_adm_op(ha, (void *)arg, mode);
287 			break;
288 
289 		default:
290 			EL(ha, "unknown command = %d\n", cmd);
291 			rval = ENOTTY;
292 			break;
293 		}
294 	}
295 
296 	/* PWR management idle. */
297 	(void) ql_idle_notification(ha);
298 
299 	if (rval != 0) {
300 		/*
301 		 * Don't show failures caused by pps polling for
302 		 * non-existant virtual ports.
303 		 */
304 		if (cmd != EXT_CC_VPORT_CMD) {
305 			EL(ha, "failed, cmd=%d rval=%d\n", cmd, rval);
306 		}
307 	} else {
308 		/*EMPTY*/
309 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
310 	}
311 	return (rval);
312 }
313 
314 /*
315  * ql_busy_notification
316  *	Adapter busy notification.
317  *
318  * Input:
319  *	ha = adapter state pointer.
320  *
321  * Returns:
322  *	FC_SUCCESS
323  *	FC_FAILURE
324  *
325  * Context:
326  *	Kernel context.
327  */
328 static int
329 ql_busy_notification(ql_adapter_state_t *ha)
330 {
331 	if (!ha->pm_capable) {
332 		return (FC_SUCCESS);
333 	}
334 
335 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
336 
337 	QL_PM_LOCK(ha);
338 	ha->busy++;
339 	QL_PM_UNLOCK(ha);
340 
341 	if (pm_busy_component(ha->dip, 0) != DDI_SUCCESS) {
342 		QL_PM_LOCK(ha);
343 		ha->busy--;
344 		QL_PM_UNLOCK(ha);
345 
346 		EL(ha, "pm_busy_component failed = %xh\n", FC_FAILURE);
347 		return (FC_FAILURE);
348 	}
349 
350 	QL_PM_LOCK(ha);
351 	if (ha->power_level != PM_LEVEL_D0) {
352 		QL_PM_UNLOCK(ha);
353 		if (pm_raise_power(ha->dip, 0, 1) != DDI_SUCCESS) {
354 			QL_PM_LOCK(ha);
355 			ha->busy--;
356 			QL_PM_UNLOCK(ha);
357 			return (FC_FAILURE);
358 		}
359 	} else {
360 		QL_PM_UNLOCK(ha);
361 	}
362 
363 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
364 
365 	return (FC_SUCCESS);
366 }
367 
368 /*
369  * ql_idle_notification
370  *	Adapter idle notification.
371  *
372  * Input:
373  *	ha = adapter state pointer.
374  *
375  * Returns:
376  *	FC_SUCCESS
377  *	FC_FAILURE
378  *
379  * Context:
380  *	Kernel context.
381  */
382 static int
383 ql_idle_notification(ql_adapter_state_t *ha)
384 {
385 	if (!ha->pm_capable) {
386 		return (FC_SUCCESS);
387 	}
388 
389 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
390 
391 	if (pm_idle_component(ha->dip, 0) != DDI_SUCCESS) {
392 		EL(ha, "pm_idle_component failed = %xh\n", FC_FAILURE);
393 		return (FC_FAILURE);
394 	}
395 
396 	QL_PM_LOCK(ha);
397 	ha->busy--;
398 	QL_PM_UNLOCK(ha);
399 
400 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
401 
402 	return (FC_SUCCESS);
403 }
404 
405 /*
406  * Get adapter feature bits from NVRAM
407  */
408 static int
409 ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features)
410 {
411 	int			count;
412 	volatile uint16_t	data;
413 	uint32_t		nv_cmd;
414 	uint32_t		start_addr;
415 	int			rval;
416 	uint32_t		offset = offsetof(nvram_t, adapter_features);
417 
418 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
419 
420 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
421 		EL(ha, "Not supported for 24xx\n");
422 		return (EINVAL);
423 	}
424 
425 	/*
426 	 * The offset can't be greater than max of 8 bits and
427 	 * the following code breaks if the offset isn't at
428 	 * 2 byte boundary.
429 	 */
430 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
431 	if (rval != QL_SUCCESS) {
432 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
433 		return (EIO);
434 	}
435 
436 	/*
437 	 * Have the most significant 3 bits represent the read operation
438 	 * followed by the 8 bits representing the offset at which we
439 	 * are going to perform the read operation
440 	 */
441 	offset >>= 1;
442 	offset += start_addr;
443 	nv_cmd = (offset << 16) | NV_READ_OP;
444 	nv_cmd <<= 5;
445 
446 	/*
447 	 * Select the chip and feed the command and address
448 	 */
449 	for (count = 0; count < 11; count++) {
450 		if (nv_cmd & BIT_31) {
451 			ql_nv_write(ha, NV_DATA_OUT);
452 		} else {
453 			ql_nv_write(ha, 0);
454 		}
455 		nv_cmd <<= 1;
456 	}
457 
458 	*features = 0;
459 	for (count = 0; count < 16; count++) {
460 		WRT16_IO_REG(ha, nvram, NV_SELECT | NV_CLOCK);
461 		ql_nv_delay();
462 
463 		data = RD16_IO_REG(ha, nvram);
464 		*features <<= 1;
465 		if (data & NV_DATA_IN) {
466 			*features = (uint16_t)(*features | 0x1);
467 		}
468 
469 		WRT16_IO_REG(ha, nvram, NV_SELECT);
470 		ql_nv_delay();
471 	}
472 
473 	/*
474 	 * Deselect the chip
475 	 */
476 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
477 
478 	ql_release_nvram(ha);
479 
480 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
481 
482 	return (0);
483 }
484 
485 /*
486  * Set adapter feature bits in NVRAM
487  */
488 static int
489 ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features)
490 {
491 	int		rval;
492 	uint32_t	count;
493 	nvram_t		*nv;
494 	uint16_t	*wptr;
495 	uint8_t		*bptr;
496 	uint8_t		csum;
497 	uint32_t	start_addr;
498 
499 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
500 
501 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
502 		EL(ha, "Not supported for 24xx\n");
503 		return (EINVAL);
504 	}
505 
506 	nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
507 	if (nv == NULL) {
508 		EL(ha, "failed, kmem_zalloc\n");
509 		return (ENOMEM);
510 	}
511 
512 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
513 	if (rval != QL_SUCCESS) {
514 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
515 		kmem_free(nv, sizeof (*nv));
516 		return (EIO);
517 	}
518 	rval = 0;
519 
520 	/*
521 	 * Read off the whole NVRAM
522 	 */
523 	wptr = (uint16_t *)nv;
524 	csum = 0;
525 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
526 		*wptr = (uint16_t)ql_get_nvram_word(ha, count + start_addr);
527 		csum = (uint8_t)(csum + (uint8_t)*wptr);
528 		csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
529 		wptr++;
530 	}
531 
532 	/*
533 	 * If the checksum is BAD then fail it right here.
534 	 */
535 	if (csum) {
536 		kmem_free(nv, sizeof (*nv));
537 		ql_release_nvram(ha);
538 		return (EBADF);
539 	}
540 
541 	nv->adapter_features[0] = (uint8_t)((features & 0xFF00) >> 8);
542 	nv->adapter_features[1] = (uint8_t)(features & 0xFF);
543 
544 	/*
545 	 * Recompute the chesksum now
546 	 */
547 	bptr = (uint8_t *)nv;
548 	for (count = 0; count < sizeof (nvram_t) - 1; count++) {
549 		csum = (uint8_t)(csum + *bptr++);
550 	}
551 	csum = (uint8_t)(~csum + 1);
552 	nv->checksum = csum;
553 
554 	/*
555 	 * Now load the NVRAM
556 	 */
557 	wptr = (uint16_t *)nv;
558 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
559 		ql_load_nvram(ha, (uint8_t)(count + start_addr), *wptr++);
560 	}
561 
562 	/*
563 	 * Read NVRAM and verify the contents
564 	 */
565 	wptr = (uint16_t *)nv;
566 	csum = 0;
567 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
568 		if (ql_get_nvram_word(ha, count + start_addr) != *wptr) {
569 			rval = EIO;
570 			break;
571 		}
572 		csum = (uint8_t)(csum + (uint8_t)*wptr);
573 		csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
574 		wptr++;
575 	}
576 
577 	if (csum) {
578 		rval = EINVAL;
579 	}
580 
581 	kmem_free(nv, sizeof (*nv));
582 	ql_release_nvram(ha);
583 
584 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
585 
586 	return (rval);
587 }
588 
589 /*
590  * Fix this function to update just feature bits and checksum in NVRAM
591  */
592 static int
593 ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha)
594 {
595 	int		rval;
596 	uint32_t	count;
597 	uint32_t	start_addr;
598 
599 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
600 
601 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
602 	if (rval != QL_SUCCESS) {
603 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
604 		return (EIO);
605 	}
606 	rval = 0;
607 
608 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
609 		nvram_24xx_t	*nv;
610 		uint32_t	*longptr;
611 		uint32_t	csum = 0;
612 
613 		nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
614 		if (nv == NULL) {
615 			EL(ha, "failed, kmem_zalloc\n");
616 			return (ENOMEM);
617 		}
618 
619 		nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
620 		nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
621 
622 		nv->version[0] = 1;
623 		nv->max_frame_length[1] = 8;
624 		nv->execution_throttle[0] = 16;
625 		nv->login_retry_count[0] = 8;
626 
627 		nv->firmware_options_1[0] = BIT_2 | BIT_1;
628 		nv->firmware_options_1[1] = BIT_5;
629 		nv->firmware_options_2[0] = BIT_5;
630 		nv->firmware_options_2[1] = BIT_4;
631 		nv->firmware_options_3[1] = BIT_6;
632 
633 		/*
634 		 * Set default host adapter parameters
635 		 */
636 		nv->host_p[0] = BIT_4 | BIT_1;
637 		nv->host_p[1] = BIT_3 | BIT_2;
638 		nv->reset_delay = 5;
639 		nv->max_luns_per_target[0] = 128;
640 		nv->port_down_retry_count[0] = 30;
641 		nv->link_down_timeout[0] = 30;
642 
643 		/*
644 		 * compute the chesksum now
645 		 */
646 		longptr = (uint32_t *)nv;
647 		csum = 0;
648 		for (count = 0; count < (sizeof (nvram_24xx_t)/4)-1; count++) {
649 			csum += *longptr;
650 			longptr++;
651 		}
652 		csum = (uint32_t)(~csum + 1);
653 		LITTLE_ENDIAN_32((long)csum);
654 		*longptr = csum;
655 
656 		/*
657 		 * Now load the NVRAM
658 		 */
659 		longptr = (uint32_t *)nv;
660 		for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
661 			(void) ql_24xx_load_nvram(ha,
662 			    (uint32_t)(count + start_addr), *longptr++);
663 		}
664 
665 		/*
666 		 * Read NVRAM and verify the contents
667 		 */
668 		csum = 0;
669 		longptr = (uint32_t *)nv;
670 		for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
671 			rval = ql_24xx_read_flash(ha, count + start_addr,
672 			    longptr);
673 			if (rval != QL_SUCCESS) {
674 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
675 				break;
676 			}
677 			csum += *longptr;
678 		}
679 
680 		if (csum) {
681 			rval = EINVAL;
682 		}
683 		kmem_free(nv, sizeof (nvram_24xx_t));
684 	} else {
685 		nvram_t		*nv;
686 		uint16_t	*wptr;
687 		uint8_t		*bptr;
688 		uint8_t		csum;
689 
690 		nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
691 		if (nv == NULL) {
692 			EL(ha, "failed, kmem_zalloc\n");
693 			return (ENOMEM);
694 		}
695 		/*
696 		 * Set default initialization control block.
697 		 */
698 		nv->parameter_block_version = ICB_VERSION;
699 		nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
700 		nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
701 
702 		nv->max_frame_length[1] = 4;
703 		nv->max_iocb_allocation[1] = 1;
704 		nv->execution_throttle[0] = 16;
705 		nv->login_retry_count = 8;
706 		nv->port_name[0] = 33;
707 		nv->port_name[3] = 224;
708 		nv->port_name[4] = 139;
709 		nv->login_timeout = 4;
710 
711 		/*
712 		 * Set default host adapter parameters
713 		 */
714 		nv->host_p[0] = BIT_1;
715 		nv->host_p[1] = BIT_2;
716 		nv->reset_delay = 5;
717 		nv->port_down_retry_count = 8;
718 		nv->maximum_luns_per_target[0] = 8;
719 
720 		/*
721 		 * compute the chesksum now
722 		 */
723 		bptr = (uint8_t *)nv;
724 		csum = 0;
725 		for (count = 0; count < sizeof (nvram_t) - 1; count++) {
726 			csum = (uint8_t)(csum + *bptr++);
727 		}
728 		csum = (uint8_t)(~csum + 1);
729 		nv->checksum = csum;
730 
731 		/*
732 		 * Now load the NVRAM
733 		 */
734 		wptr = (uint16_t *)nv;
735 		for (count = 0; count < sizeof (nvram_t) / 2; count++) {
736 			ql_load_nvram(ha, (uint8_t)(count + start_addr),
737 			    *wptr++);
738 		}
739 
740 		/*
741 		 * Read NVRAM and verify the contents
742 		 */
743 		wptr = (uint16_t *)nv;
744 		csum = 0;
745 		for (count = 0; count < sizeof (nvram_t) / 2; count++) {
746 			if (ql_get_nvram_word(ha, count + start_addr) !=
747 			    *wptr) {
748 				rval = EIO;
749 				break;
750 			}
751 			csum = (uint8_t)(csum + (uint8_t)*wptr);
752 			csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
753 			wptr++;
754 		}
755 		if (csum) {
756 			rval = EINVAL;
757 		}
758 		kmem_free(nv, sizeof (*nv));
759 	}
760 	ql_release_nvram(ha);
761 
762 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
763 
764 	return (rval);
765 }
766 
767 static void
768 ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, uint16_t value)
769 {
770 	int			count;
771 	volatile uint16_t	word;
772 	volatile uint32_t	nv_cmd;
773 
774 	ql_nv_write(ha, NV_DATA_OUT);
775 	ql_nv_write(ha, 0);
776 	ql_nv_write(ha, 0);
777 
778 	for (word = 0; word < 8; word++) {
779 		ql_nv_write(ha, NV_DATA_OUT);
780 	}
781 
782 	/*
783 	 * Deselect the chip
784 	 */
785 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
786 	ql_nv_delay();
787 
788 	/*
789 	 * Erase Location
790 	 */
791 	nv_cmd = (addr << 16) | NV_ERASE_OP;
792 	nv_cmd <<= 5;
793 	for (count = 0; count < 11; count++) {
794 		if (nv_cmd & BIT_31) {
795 			ql_nv_write(ha, NV_DATA_OUT);
796 		} else {
797 			ql_nv_write(ha, 0);
798 		}
799 		nv_cmd <<= 1;
800 	}
801 
802 	/*
803 	 * Wait for Erase to Finish
804 	 */
805 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
806 	ql_nv_delay();
807 	WRT16_IO_REG(ha, nvram, NV_SELECT);
808 	word = 0;
809 	while ((word & NV_DATA_IN) == 0) {
810 		ql_nv_delay();
811 		word = RD16_IO_REG(ha, nvram);
812 	}
813 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
814 	ql_nv_delay();
815 
816 	/*
817 	 * Write data now
818 	 */
819 	nv_cmd = (addr << 16) | NV_WRITE_OP;
820 	nv_cmd |= value;
821 	nv_cmd <<= 5;
822 	for (count = 0; count < 27; count++) {
823 		if (nv_cmd & BIT_31) {
824 			ql_nv_write(ha, NV_DATA_OUT);
825 		} else {
826 			ql_nv_write(ha, 0);
827 		}
828 		nv_cmd <<= 1;
829 	}
830 
831 	/*
832 	 * Wait for NVRAM to become ready
833 	 */
834 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
835 	ql_nv_delay();
836 	WRT16_IO_REG(ha, nvram, NV_SELECT);
837 	word = 0;
838 	while ((word & NV_DATA_IN) == 0) {
839 		ql_nv_delay();
840 		word = RD16_IO_REG(ha, nvram);
841 	}
842 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
843 	ql_nv_delay();
844 
845 	/*
846 	 * Disable writes
847 	 */
848 	ql_nv_write(ha, NV_DATA_OUT);
849 	for (count = 0; count < 10; count++) {
850 		ql_nv_write(ha, 0);
851 	}
852 
853 	/*
854 	 * Deselect the chip now
855 	 */
856 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
857 }
858 
859 /*
860  * ql_24xx_load_nvram
861  *	Enable NVRAM and writes a 32bit word to ISP24xx NVRAM.
862  *
863  * Input:
864  *	ha:	adapter state pointer.
865  *	addr:	NVRAM address.
866  *	value:	data.
867  *
868  * Returns:
869  *	ql local function return status code.
870  *
871  * Context:
872  *	Kernel context.
873  */
874 static int
875 ql_24xx_load_nvram(ql_adapter_state_t *ha, uint32_t addr, uint32_t value)
876 {
877 	int	rval;
878 
879 	/* Enable flash write. */
880 	if (!(CFG_IST(ha, CFG_CTRL_8081))) {
881 		WRT32_IO_REG(ha, ctrl_status,
882 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
883 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
884 	}
885 
886 	/* Disable NVRAM write-protection. */
887 	if (CFG_IST(ha, CFG_CTRL_2422)) {
888 		(void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0);
889 	} else {
890 		if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
891 			EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
892 			return (rval);
893 		}
894 	}
895 
896 	/* Write to flash. */
897 	rval = ql_24xx_write_flash(ha, addr, value);
898 
899 	/* Enable NVRAM write-protection. */
900 	if (CFG_IST(ha, CFG_CTRL_2422)) {
901 		/* TODO: Check if 0x8c is correct -- sb: 0x9c ? */
902 		(void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0x8c);
903 	} else {
904 		ql_24xx_protect_flash(ha);
905 	}
906 
907 	/* Disable flash write. */
908 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
909 		WRT32_IO_REG(ha, ctrl_status,
910 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
911 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
912 	}
913 
914 	return (rval);
915 }
916 
917 /*
918  * ql_nv_util_load
919  *	Loads NVRAM from application.
920  *
921  * Input:
922  *	ha = adapter state pointer.
923  *	bp = user buffer address.
924  *
925  * Returns:
926  *
927  * Context:
928  *	Kernel context.
929  */
930 int
931 ql_nv_util_load(ql_adapter_state_t *ha, void *bp, int mode)
932 {
933 	uint8_t		cnt;
934 	void		*nv;
935 	uint16_t	*wptr;
936 	uint16_t	data;
937 	uint32_t	start_addr, *lptr, data32;
938 	nvram_t		*nptr;
939 	int		rval;
940 
941 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
942 
943 	if ((nv = kmem_zalloc(ha->nvram_cache->size, KM_SLEEP)) == NULL) {
944 		EL(ha, "failed, kmem_zalloc\n");
945 		return (ENOMEM);
946 	}
947 
948 	if (ddi_copyin(bp, nv, ha->nvram_cache->size, mode) != 0) {
949 		EL(ha, "Buffer copy failed\n");
950 		kmem_free(nv, ha->nvram_cache->size);
951 		return (EFAULT);
952 	}
953 
954 	/* See if the buffer passed to us looks sane */
955 	nptr = (nvram_t *)nv;
956 	if (nptr->id[0] != 'I' || nptr->id[1] != 'S' || nptr->id[2] != 'P' ||
957 	    nptr->id[3] != ' ') {
958 		EL(ha, "failed, buffer sanity check\n");
959 		kmem_free(nv, ha->nvram_cache->size);
960 		return (EINVAL);
961 	}
962 
963 	/* Quiesce I/O */
964 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
965 		EL(ha, "ql_stall_driver failed\n");
966 		kmem_free(nv, ha->nvram_cache->size);
967 		return (EBUSY);
968 	}
969 
970 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
971 	if (rval != QL_SUCCESS) {
972 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
973 		kmem_free(nv, ha->nvram_cache->size);
974 		ql_restart_driver(ha);
975 		return (EIO);
976 	}
977 
978 	/* Load NVRAM. */
979 	if (CFG_IST(ha, CFG_CTRL_258081)) {
980 		GLOBAL_HW_UNLOCK();
981 		start_addr &= ~ha->flash_data_addr;
982 		start_addr <<= 2;
983 		if ((rval = ql_r_m_w_flash(ha, bp, ha->nvram_cache->size,
984 		    start_addr, mode)) != QL_SUCCESS) {
985 			EL(ha, "nvram load failed, rval = %0xh\n", rval);
986 		}
987 		GLOBAL_HW_LOCK();
988 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
989 		lptr = (uint32_t *)nv;
990 		for (cnt = 0; cnt < ha->nvram_cache->size / 4; cnt++) {
991 			data32 = *lptr++;
992 			LITTLE_ENDIAN_32(&data32);
993 			rval = ql_24xx_load_nvram(ha, cnt + start_addr,
994 			    data32);
995 			if (rval != QL_SUCCESS) {
996 				EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
997 				break;
998 			}
999 		}
1000 	} else {
1001 		wptr = (uint16_t *)nv;
1002 		for (cnt = 0; cnt < ha->nvram_cache->size / 2; cnt++) {
1003 			data = *wptr++;
1004 			LITTLE_ENDIAN_16(&data);
1005 			ql_load_nvram(ha, (uint8_t)(cnt + start_addr), data);
1006 		}
1007 	}
1008 	/* switch to the new one */
1009 	NVRAM_CACHE_LOCK(ha);
1010 
1011 	kmem_free(ha->nvram_cache->cache, ha->nvram_cache->size);
1012 	ha->nvram_cache->cache = (void *)nptr;
1013 
1014 	NVRAM_CACHE_UNLOCK(ha);
1015 
1016 	ql_release_nvram(ha);
1017 	ql_restart_driver(ha);
1018 
1019 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1020 
1021 	if (rval == QL_SUCCESS) {
1022 		return (0);
1023 	}
1024 
1025 	return (EFAULT);
1026 }
1027 
1028 /*
1029  * ql_nv_util_dump
1030  *	Dumps NVRAM to application.
1031  *
1032  * Input:
1033  *	ha = adapter state pointer.
1034  *	bp = user buffer address.
1035  *
1036  * Returns:
1037  *
1038  * Context:
1039  *	Kernel context.
1040  */
1041 int
1042 ql_nv_util_dump(ql_adapter_state_t *ha, void *bp, int mode)
1043 {
1044 	uint32_t	start_addr;
1045 	int		rval2, rval = 0;
1046 
1047 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1048 
1049 	if (ha->nvram_cache == NULL ||
1050 	    ha->nvram_cache->size == 0 ||
1051 	    ha->nvram_cache->cache == NULL) {
1052 		EL(ha, "failed, kmem_zalloc\n");
1053 		return (ENOMEM);
1054 	} else if (ha->nvram_cache->valid != 1) {
1055 
1056 		/* Quiesce I/O */
1057 		if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1058 			EL(ha, "ql_stall_driver failed\n");
1059 			return (EBUSY);
1060 		}
1061 
1062 		rval2 = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
1063 		if (rval2 != QL_SUCCESS) {
1064 			EL(ha, "failed, ql_lock_nvram=%xh\n", rval2);
1065 			ql_restart_driver(ha);
1066 			return (EIO);
1067 		}
1068 		NVRAM_CACHE_LOCK(ha);
1069 
1070 		rval2 = ql_get_nvram(ha, ha->nvram_cache->cache,
1071 		    start_addr, ha->nvram_cache->size);
1072 		if (rval2 != QL_SUCCESS) {
1073 			rval = rval2;
1074 		} else {
1075 			ha->nvram_cache->valid = 1;
1076 			EL(ha, "nvram cache now valid.");
1077 		}
1078 
1079 		NVRAM_CACHE_UNLOCK(ha);
1080 
1081 		ql_release_nvram(ha);
1082 		ql_restart_driver(ha);
1083 
1084 		if (rval != 0) {
1085 			EL(ha, "failed to dump nvram, rval=%x\n", rval);
1086 			return (rval);
1087 		}
1088 	}
1089 
1090 	if (ddi_copyout(ha->nvram_cache->cache, bp,
1091 	    ha->nvram_cache->size, mode) != 0) {
1092 		EL(ha, "Buffer copy failed\n");
1093 		return (EFAULT);
1094 	}
1095 
1096 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1097 
1098 	return (0);
1099 }
1100 
1101 int
1102 ql_get_nvram(ql_adapter_state_t *ha, void *dest_addr, uint32_t src_addr,
1103     uint32_t size)
1104 {
1105 	int rval = QL_SUCCESS;
1106 	int cnt;
1107 	/* Dump NVRAM. */
1108 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1109 		uint32_t	*lptr = (uint32_t *)dest_addr;
1110 
1111 		for (cnt = 0; cnt < size / 4; cnt++) {
1112 			rval = ql_24xx_read_flash(ha, src_addr++, lptr);
1113 			if (rval != QL_SUCCESS) {
1114 				EL(ha, "read_flash failed=%xh\n", rval);
1115 				rval = EAGAIN;
1116 				break;
1117 			}
1118 			LITTLE_ENDIAN_32(lptr);
1119 			lptr++;
1120 		}
1121 	} else {
1122 		uint16_t	data;
1123 		uint16_t	*wptr = (uint16_t *)dest_addr;
1124 
1125 		for (cnt = 0; cnt < size / 2; cnt++) {
1126 			data = (uint16_t)ql_get_nvram_word(ha, cnt +
1127 			    src_addr);
1128 			LITTLE_ENDIAN_16(&data);
1129 			*wptr++ = data;
1130 		}
1131 	}
1132 	return (rval);
1133 }
1134 
1135 /*
1136  * ql_vpd_load
1137  *	Loads VPD from application.
1138  *
1139  * Input:
1140  *	ha = adapter state pointer.
1141  *	bp = user buffer address.
1142  *
1143  * Returns:
1144  *
1145  * Context:
1146  *	Kernel context.
1147  */
1148 int
1149 ql_vpd_load(ql_adapter_state_t *ha, void *bp, int mode)
1150 {
1151 	uint8_t		cnt;
1152 	uint8_t		*vpd, *vpdptr, *vbuf;
1153 	uint32_t	start_addr, vpd_size, *lptr, data32;
1154 	int		rval;
1155 
1156 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1157 
1158 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1159 		EL(ha, "unsupported adapter feature\n");
1160 		return (ENOTSUP);
1161 	}
1162 
1163 	vpd_size = QL_24XX_VPD_SIZE;
1164 
1165 	if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1166 		EL(ha, "failed, kmem_zalloc\n");
1167 		return (ENOMEM);
1168 	}
1169 
1170 	if (ddi_copyin(bp, vpd, vpd_size, mode) != 0) {
1171 		EL(ha, "Buffer copy failed\n");
1172 		kmem_free(vpd, vpd_size);
1173 		return (EFAULT);
1174 	}
1175 
1176 	/* Sanity check the user supplied data via checksum */
1177 	if ((vpdptr = ql_vpd_findtag(ha, vpd, "RV")) == NULL) {
1178 		EL(ha, "vpd RV tag missing\n");
1179 		kmem_free(vpd, vpd_size);
1180 		return (EINVAL);
1181 	}
1182 
1183 	vpdptr += 3;
1184 	cnt = 0;
1185 	vbuf = vpd;
1186 	while (vbuf <= vpdptr) {
1187 		cnt += *vbuf++;
1188 	}
1189 	if (cnt != 0) {
1190 		EL(ha, "mismatched checksum, cal=%xh, passed=%xh\n",
1191 		    (uint8_t)cnt, (uintptr_t)vpdptr);
1192 		kmem_free(vpd, vpd_size);
1193 		return (EINVAL);
1194 	}
1195 
1196 	/* Quiesce I/O */
1197 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1198 		EL(ha, "ql_stall_driver failed\n");
1199 		kmem_free(vpd, vpd_size);
1200 		return (EBUSY);
1201 	}
1202 
1203 	rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1204 	if (rval != QL_SUCCESS) {
1205 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1206 		kmem_free(vpd, vpd_size);
1207 		ql_restart_driver(ha);
1208 		return (EIO);
1209 	}
1210 
1211 	/* Load VPD. */
1212 	if (CFG_IST(ha, CFG_CTRL_258081)) {
1213 		GLOBAL_HW_UNLOCK();
1214 		start_addr &= ~ha->flash_data_addr;
1215 		start_addr <<= 2;
1216 		if ((rval = ql_r_m_w_flash(ha, bp, vpd_size, start_addr,
1217 		    mode)) != QL_SUCCESS) {
1218 			EL(ha, "vpd load error: %xh\n", rval);
1219 		}
1220 		GLOBAL_HW_LOCK();
1221 	} else {
1222 		lptr = (uint32_t *)vpd;
1223 		for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1224 			data32 = *lptr++;
1225 			LITTLE_ENDIAN_32(&data32);
1226 			rval = ql_24xx_load_nvram(ha, cnt + start_addr,
1227 			    data32);
1228 			if (rval != QL_SUCCESS) {
1229 				EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
1230 				break;
1231 			}
1232 		}
1233 	}
1234 
1235 	kmem_free(vpd, vpd_size);
1236 
1237 	/* Update the vcache */
1238 	CACHE_LOCK(ha);
1239 
1240 	if (rval != QL_SUCCESS) {
1241 		EL(ha, "failed, load\n");
1242 	} else if ((ha->vcache == NULL) && ((ha->vcache =
1243 	    kmem_zalloc(vpd_size, KM_SLEEP)) == NULL)) {
1244 		EL(ha, "failed, kmem_zalloc2\n");
1245 	} else if (ddi_copyin(bp, ha->vcache, vpd_size, mode) != 0) {
1246 		EL(ha, "Buffer copy2 failed\n");
1247 		kmem_free(ha->vcache, vpd_size);
1248 		ha->vcache = NULL;
1249 	}
1250 
1251 	CACHE_UNLOCK(ha);
1252 
1253 	ql_release_nvram(ha);
1254 	ql_restart_driver(ha);
1255 
1256 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1257 
1258 	if (rval == QL_SUCCESS) {
1259 		return (0);
1260 	}
1261 
1262 	return (EFAULT);
1263 }
1264 
1265 /*
1266  * ql_vpd_dump
1267  *	Dumps VPD to application buffer.
1268  *
1269  * Input:
1270  *	ha = adapter state pointer.
1271  *	bp = user buffer address.
1272  *
1273  * Returns:
1274  *
1275  * Context:
1276  *	Kernel context.
1277  */
1278 int
1279 ql_vpd_dump(ql_adapter_state_t *ha, void *bp, int mode)
1280 {
1281 	uint8_t		cnt;
1282 	void		*vpd;
1283 	uint32_t	start_addr, vpd_size, *lptr;
1284 	int		rval = 0;
1285 
1286 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1287 
1288 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1289 		EL(ha, "unsupported adapter feature\n");
1290 		return (EACCES);
1291 	}
1292 
1293 	vpd_size = QL_24XX_VPD_SIZE;
1294 
1295 	CACHE_LOCK(ha);
1296 
1297 	if (ha->vcache != NULL) {
1298 		/* copy back the vpd cache data */
1299 		if (ddi_copyout(ha->vcache, bp, vpd_size, mode) != 0) {
1300 			EL(ha, "Buffer copy failed\n");
1301 			rval = EFAULT;
1302 		}
1303 		CACHE_UNLOCK(ha);
1304 		return (rval);
1305 	}
1306 
1307 	if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1308 		CACHE_UNLOCK(ha);
1309 		EL(ha, "failed, kmem_zalloc\n");
1310 		return (ENOMEM);
1311 	}
1312 
1313 	/* Quiesce I/O */
1314 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1315 		CACHE_UNLOCK(ha);
1316 		EL(ha, "ql_stall_driver failed\n");
1317 		kmem_free(vpd, vpd_size);
1318 		return (EBUSY);
1319 	}
1320 
1321 	rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1322 	if (rval != QL_SUCCESS) {
1323 		CACHE_UNLOCK(ha);
1324 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1325 		kmem_free(vpd, vpd_size);
1326 		ql_restart_driver(ha);
1327 		return (EIO);
1328 	}
1329 
1330 	/* Dump VPD. */
1331 	lptr = (uint32_t *)vpd;
1332 
1333 	for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1334 		rval = ql_24xx_read_flash(ha, start_addr++, lptr);
1335 		if (rval != QL_SUCCESS) {
1336 			EL(ha, "read_flash failed=%xh\n", rval);
1337 			rval = EAGAIN;
1338 			break;
1339 		}
1340 		LITTLE_ENDIAN_32(lptr);
1341 		lptr++;
1342 	}
1343 
1344 	ql_release_nvram(ha);
1345 	ql_restart_driver(ha);
1346 
1347 	if (ddi_copyout(vpd, bp, vpd_size, mode) != 0) {
1348 		CACHE_UNLOCK(ha);
1349 		EL(ha, "Buffer copy failed\n");
1350 		kmem_free(vpd, vpd_size);
1351 		return (EFAULT);
1352 	}
1353 
1354 	ha->vcache = vpd;
1355 
1356 	CACHE_UNLOCK(ha);
1357 
1358 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1359 
1360 	if (rval != QL_SUCCESS) {
1361 		return (EFAULT);
1362 	} else {
1363 		return (0);
1364 	}
1365 }
1366 
1367 /*
1368  * ql_vpd_findtag
1369  *	Search the passed vpd buffer for the requested VPD tag type.
1370  *
1371  * Input:
1372  *	ha	= adapter state pointer.
1373  *	vpdbuf	= Pointer to start of the buffer to search
1374  *	op	= VPD opcode to find (must be NULL terminated).
1375  *
1376  * Returns:
1377  *	Pointer to the opcode in the buffer if opcode found.
1378  *	NULL if opcode is not found.
1379  *
1380  * Context:
1381  *	Kernel context.
1382  */
1383 static uint8_t *
1384 ql_vpd_findtag(ql_adapter_state_t *ha, uint8_t *vpdbuf, int8_t *opcode)
1385 {
1386 	uint8_t		*vpd = vpdbuf;
1387 	uint8_t		*end = vpdbuf + QL_24XX_VPD_SIZE;
1388 	uint32_t	found = 0;
1389 
1390 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1391 
1392 	if (vpdbuf == NULL || opcode == NULL) {
1393 		EL(ha, "null parameter passed!\n");
1394 		return (NULL);
1395 	}
1396 
1397 	while (vpd < end) {
1398 
1399 		/* check for end of vpd */
1400 		if (vpd[0] == VPD_TAG_END) {
1401 			if (opcode[0] == VPD_TAG_END) {
1402 				found = 1;
1403 			} else {
1404 				found = 0;
1405 			}
1406 			break;
1407 		}
1408 
1409 		/* check opcode */
1410 		if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1411 			/* found opcode requested */
1412 			found = 1;
1413 			break;
1414 		}
1415 
1416 		/*
1417 		 * Didn't find the opcode, so calculate start of
1418 		 * next tag. Depending on the current tag type,
1419 		 * the length field can be 1 or 2 bytes
1420 		 */
1421 		if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1422 			vpd += (vpd[2] << 8) + vpd[1] + 3;
1423 		} else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1424 			vpd += 3;
1425 		} else {
1426 			vpd += vpd[2] +3;
1427 		}
1428 	}
1429 
1430 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1431 
1432 	return (found == 1 ? vpd : NULL);
1433 }
1434 
1435 /*
1436  * ql_vpd_lookup
1437  *	Return the VPD data for the request VPD tag
1438  *
1439  * Input:
1440  *	ha	= adapter state pointer.
1441  *	opcode	= VPD opcode to find (must be NULL terminated).
1442  *	bp	= Pointer to returned data buffer.
1443  *	bplen	= Length of returned data buffer.
1444  *
1445  * Returns:
1446  *	Length of data copied into returned data buffer.
1447  *		>0 = VPD data field (NULL terminated)
1448  *		 0 = no data.
1449  *		-1 = Could not find opcode in vpd buffer / error.
1450  *
1451  * Context:
1452  *	Kernel context.
1453  *
1454  * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1455  *
1456  */
1457 int32_t
1458 ql_vpd_lookup(ql_adapter_state_t *ha, uint8_t *opcode, uint8_t *bp,
1459     int32_t bplen)
1460 {
1461 	uint8_t		*vpd;
1462 	uint8_t		*vpdbuf;
1463 	int32_t		len = -1;
1464 
1465 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1466 
1467 	if (opcode == NULL || bp == NULL || bplen < 1) {
1468 		EL(ha, "invalid parameter passed: opcode=%ph, "
1469 		    "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1470 		return (len);
1471 	}
1472 
1473 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
1474 		return (len);
1475 	}
1476 
1477 	if ((vpdbuf = (uint8_t *)kmem_zalloc(QL_24XX_VPD_SIZE,
1478 	    KM_SLEEP)) == NULL) {
1479 		EL(ha, "unable to allocate vpd memory\n");
1480 		return (len);
1481 	}
1482 
1483 	if ((ql_vpd_dump(ha, vpdbuf, (int)FKIOCTL)) != 0) {
1484 		kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1485 		EL(ha, "unable to retrieve VPD data\n");
1486 		return (len);
1487 	}
1488 
1489 	if ((vpd = ql_vpd_findtag(ha, vpdbuf, (int8_t *)opcode)) != NULL) {
1490 		/*
1491 		 * Found the tag
1492 		 */
1493 		if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1494 		    *opcode == VPD_TAG_LRTC) {
1495 			/*
1496 			 * we found it, but the tag doesn't have a data
1497 			 * field.
1498 			 */
1499 			len = 0;
1500 		} else if (!(strncmp((char *)vpd, (char *)
1501 		    VPD_TAG_PRODID, 1))) {
1502 			len = vpd[2] << 8;
1503 			len += vpd[1];
1504 		} else {
1505 			len = vpd[2];
1506 		}
1507 
1508 		/*
1509 		 * make sure that the vpd len doesn't exceed the
1510 		 * vpd end
1511 		 */
1512 		if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) {
1513 			EL(ha, "vpd tag len (%xh) exceeds vpd buffer "
1514 			    "length\n", len);
1515 			len = -1;
1516 		}
1517 	}
1518 
1519 	if (len >= 0) {
1520 		/*
1521 		 * make sure we don't exceed callers buffer space len
1522 		 */
1523 		if (len > bplen) {
1524 			len = bplen-1;
1525 		}
1526 
1527 		/* copy the data back */
1528 		(void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len);
1529 		bp[len] = 0;
1530 	} else {
1531 		/* error -- couldn't find tag */
1532 		bp[0] = 0;
1533 		if (opcode[1] != 0) {
1534 			EL(ha, "unable to find tag '%s'\n", opcode);
1535 		} else {
1536 			EL(ha, "unable to find tag '%xh'\n", opcode[0]);
1537 		}
1538 	}
1539 
1540 	kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1541 
1542 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1543 
1544 	return (len);
1545 }
1546 
1547 /*
1548  * ql_r_m_w_flash
1549  *	Read modify write from user space to flash.
1550  *
1551  * Input:
1552  *	ha:	adapter state pointer.
1553  *	dp:	source byte pointer.
1554  *	bc:	byte count.
1555  *	faddr:	flash byte address.
1556  *	mode:	flags.
1557  *
1558  * Returns:
1559  *	ql local function return status code.
1560  *
1561  * Context:
1562  *	Kernel context.
1563  */
1564 int
1565 ql_r_m_w_flash(ql_adapter_state_t *ha, caddr_t dp, uint32_t bc, uint32_t faddr,
1566     int mode)
1567 {
1568 	uint8_t		*bp;
1569 	uint32_t	xfer, bsize, saddr, ofst;
1570 	int		rval = 0;
1571 
1572 	QL_PRINT_9(CE_CONT, "(%d): started, dp=%ph, faddr=%xh, bc=%xh\n",
1573 	    ha->instance, (void *)dp, faddr, bc);
1574 
1575 	bsize = ha->xioctl->fdesc.block_size;
1576 	saddr = faddr & ~(bsize - 1);
1577 	ofst = faddr & (bsize - 1);
1578 
1579 	if ((bp = kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
1580 		EL(ha, "kmem_zalloc=null\n");
1581 		return (QL_MEMORY_ALLOC_FAILED);
1582 	}
1583 
1584 	while (bc) {
1585 		xfer = bc > bsize ? bsize : bc;
1586 		if (ofst + xfer > bsize) {
1587 			xfer = bsize - ofst;
1588 		}
1589 		QL_PRINT_9(CE_CONT, "(%d): dp=%ph, saddr=%xh, bc=%xh, "
1590 		    "ofst=%xh, xfer=%xh\n", ha->instance, (void *)dp, saddr,
1591 		    bc, ofst, xfer);
1592 
1593 		if (ofst || xfer < bsize) {
1594 			/* Dump Flash sector. */
1595 			if ((rval = ql_dump_fcode(ha, bp, bsize, saddr)) !=
1596 			    QL_SUCCESS) {
1597 				EL(ha, "dump_flash status=%x\n", rval);
1598 				break;
1599 			}
1600 		}
1601 
1602 		/* Set new data. */
1603 		if ((rval = ddi_copyin(dp, (caddr_t)(bp + ofst), xfer,
1604 		    mode)) != 0) {
1605 			EL(ha, "ddi_copyin status=%xh, dp=%ph, ofst=%xh, "
1606 			    "xfer=%xh\n", rval, (void *)dp, ofst, xfer);
1607 			rval = QL_FUNCTION_FAILED;
1608 			break;
1609 		}
1610 
1611 		/* Write to flash. */
1612 		if ((rval = ql_load_fcode(ha, bp, bsize, saddr)) !=
1613 		    QL_SUCCESS) {
1614 			EL(ha, "load_flash status=%x\n", rval);
1615 			break;
1616 		}
1617 		bc -= xfer;
1618 		dp += xfer;
1619 		saddr += bsize;
1620 		ofst = 0;
1621 	}
1622 
1623 	kmem_free(bp, bsize);
1624 
1625 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1626 
1627 	return (rval);
1628 }
1629 
1630 /*
1631  * ql_adm_op
1632  *	Performs qladm utility operations
1633  *
1634  * Input:
1635  *	ha:	adapter state pointer.
1636  *	arg:	driver_op_t structure pointer.
1637  *	mode:	flags.
1638  *
1639  * Returns:
1640  *
1641  * Context:
1642  *	Kernel context.
1643  */
1644 static int
1645 ql_adm_op(ql_adapter_state_t *ha, void *arg, int mode)
1646 {
1647 	ql_adm_op_t		dop;
1648 	int			rval = 0;
1649 
1650 	if (ddi_copyin(arg, &dop, sizeof (ql_adm_op_t), mode) != 0) {
1651 		EL(ha, "failed, driver_op_t ddi_copyin\n");
1652 		return (EFAULT);
1653 	}
1654 
1655 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%xh, buffer=%llx,"
1656 	    " length=%xh, option=%xh\n", ha->instance, dop.cmd, dop.buffer,
1657 	    dop.length, dop.option);
1658 
1659 	switch (dop.cmd) {
1660 	case QL_ADAPTER_INFO:
1661 		rval = ql_adm_adapter_info(ha, &dop, mode);
1662 		break;
1663 
1664 	case QL_EXTENDED_LOGGING:
1665 		rval = ql_adm_extended_logging(ha, &dop);
1666 		break;
1667 
1668 	case QL_LOOP_RESET:
1669 		rval = ql_adm_loop_reset(ha);
1670 		break;
1671 
1672 	case QL_DEVICE_LIST:
1673 		rval = ql_adm_device_list(ha, &dop, mode);
1674 		break;
1675 
1676 	case QL_PROP_UPDATE_INT:
1677 		rval = ql_adm_prop_update_int(ha, &dop, mode);
1678 		break;
1679 
1680 	case QL_UPDATE_PROPERTIES:
1681 		rval = ql_adm_update_properties(ha);
1682 		break;
1683 
1684 	case QL_FW_DUMP:
1685 		rval = ql_adm_fw_dump(ha, &dop, arg, mode);
1686 		break;
1687 
1688 	case QL_NVRAM_LOAD:
1689 		rval = ql_adm_nvram_load(ha, &dop, mode);
1690 		break;
1691 
1692 	case QL_NVRAM_DUMP:
1693 		rval = ql_adm_nvram_dump(ha, &dop, mode);
1694 		break;
1695 
1696 	case QL_FLASH_LOAD:
1697 		rval = ql_adm_flash_load(ha, &dop, mode);
1698 		break;
1699 
1700 	case QL_VPD_LOAD:
1701 		rval = ql_adm_vpd_load(ha, &dop, mode);
1702 		break;
1703 
1704 	case QL_VPD_DUMP:
1705 		rval = ql_adm_vpd_dump(ha, &dop, mode);
1706 		break;
1707 
1708 	case QL_VPD_GETTAG:
1709 		rval = ql_adm_vpd_gettag(ha, &dop, mode);
1710 		break;
1711 
1712 	case QL_UPD_FWMODULE:
1713 		rval = ql_adm_updfwmodule(ha, &dop, mode);
1714 		break;
1715 
1716 	default:
1717 		EL(ha, "unsupported driver op cmd: %x\n", dop.cmd);
1718 		return (EINVAL);
1719 	}
1720 
1721 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1722 
1723 	return (rval);
1724 }
1725 
1726 /*
1727  * ql_adm_adapter_info
1728  *	Performs qladm QL_ADAPTER_INFO command
1729  *
1730  * Input:
1731  *	ha:	adapter state pointer.
1732  *	dop:	ql_adm_op_t structure pointer.
1733  *	mode:	flags.
1734  *
1735  * Returns:
1736  *
1737  * Context:
1738  *	Kernel context.
1739  */
1740 static int
1741 ql_adm_adapter_info(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1742 {
1743 	ql_adapter_info_t	hba;
1744 	uint8_t			*dp;
1745 	uint32_t		length;
1746 	int			rval, i;
1747 
1748 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1749 
1750 	hba.device_id = ha->device_id;
1751 
1752 	dp = CFG_IST(ha, CFG_CTRL_24258081) ?
1753 	    &ha->init_ctrl_blk.cb24.port_name[0] :
1754 	    &ha->init_ctrl_blk.cb.port_name[0];
1755 	bcopy(dp, hba.wwpn, 8);
1756 
1757 	hba.d_id = ha->d_id.b24;
1758 
1759 	if (ha->xioctl->fdesc.flash_size == 0 &&
1760 	    !(CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id)) {
1761 		if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1762 			EL(ha, "ql_stall_driver failed\n");
1763 			return (EBUSY);
1764 		}
1765 
1766 		if ((rval = ql_setup_fcache(ha)) != QL_SUCCESS) {
1767 			EL(ha, "ql_setup_flash failed=%xh\n", rval);
1768 			if (rval == QL_FUNCTION_TIMEOUT) {
1769 				return (EBUSY);
1770 			}
1771 			return (EIO);
1772 		}
1773 
1774 		/* Resume I/O */
1775 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
1776 			ql_restart_driver(ha);
1777 		} else {
1778 			EL(ha, "isp_abort_needed for restart\n");
1779 			ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
1780 			    DRIVER_STALL);
1781 		}
1782 	}
1783 	hba.flash_size = ha->xioctl->fdesc.flash_size;
1784 
1785 	(void) strcpy(hba.driver_ver, QL_VERSION);
1786 
1787 	(void) sprintf(hba.fw_ver, "%d.%d.%d", ha->fw_major_version,
1788 	    ha->fw_minor_version, ha->fw_subminor_version);
1789 
1790 	bzero(hba.fcode_ver, sizeof (hba.fcode_ver));
1791 
1792 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
1793 	rval = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
1794 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&dp, &i);
1795 	length = i;
1796 	if (rval != DDI_PROP_SUCCESS) {
1797 		EL(ha, "failed, ddi_getlongprop=%xh\n", rval);
1798 	} else {
1799 		if (length > (uint32_t)sizeof (hba.fcode_ver)) {
1800 			length = sizeof (hba.fcode_ver) - 1;
1801 		}
1802 		bcopy((void *)dp, (void *)hba.fcode_ver, length);
1803 		kmem_free(dp, length);
1804 	}
1805 
1806 	if (ddi_copyout((void *)&hba, (void *)(uintptr_t)dop->buffer,
1807 	    dop->length, mode) != 0) {
1808 		EL(ha, "failed, ddi_copyout\n");
1809 		return (EFAULT);
1810 	}
1811 
1812 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1813 
1814 	return (0);
1815 }
1816 
1817 /*
1818  * ql_adm_extended_logging
1819  *	Performs qladm QL_EXTENDED_LOGGING command
1820  *
1821  * Input:
1822  *	ha:	adapter state pointer.
1823  *	dop:	ql_adm_op_t structure pointer.
1824  *
1825  * Returns:
1826  *
1827  * Context:
1828  *	Kernel context.
1829  */
1830 static int
1831 ql_adm_extended_logging(ql_adapter_state_t *ha, ql_adm_op_t *dop)
1832 {
1833 	char	prop_name[MAX_PROP_LENGTH];
1834 	int	rval;
1835 
1836 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1837 
1838 	(void) sprintf(prop_name, "hba%d-extended-logging", ha->instance);
1839 
1840 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1841 	rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
1842 	    (int)dop->option);
1843 	if (rval != DDI_PROP_SUCCESS) {
1844 		EL(ha, "failed, prop_update = %xh\n", rval);
1845 		return (EINVAL);
1846 	} else {
1847 		dop->option ?
1848 		    (ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING) :
1849 		    (ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING);
1850 	}
1851 
1852 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1853 
1854 	return (0);
1855 }
1856 
1857 /*
1858  * ql_adm_loop_reset
1859  *	Performs qladm QL_LOOP_RESET command
1860  *
1861  * Input:
1862  *	ha:	adapter state pointer.
1863  *
1864  * Returns:
1865  *
1866  * Context:
1867  *	Kernel context.
1868  */
1869 static int
1870 ql_adm_loop_reset(ql_adapter_state_t *ha)
1871 {
1872 	int	rval;
1873 
1874 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1875 
1876 	if (ha->task_daemon_flags & LOOP_DOWN) {
1877 		(void) ql_full_login_lip(ha);
1878 	} else if ((rval = ql_full_login_lip(ha)) != QL_SUCCESS) {
1879 		EL(ha, "failed, ql_initiate_lip=%xh\n", rval);
1880 		return (EIO);
1881 	}
1882 
1883 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1884 
1885 	return (0);
1886 }
1887 
1888 /*
1889  * ql_adm_device_list
1890  *	Performs qladm QL_DEVICE_LIST command
1891  *
1892  * Input:
1893  *	ha:	adapter state pointer.
1894  *	dop:	ql_adm_op_t structure pointer.
1895  *	mode:	flags.
1896  *
1897  * Returns:
1898  *
1899  * Context:
1900  *	Kernel context.
1901  */
1902 static int
1903 ql_adm_device_list(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1904 {
1905 	ql_device_info_t	dev;
1906 	ql_link_t		*link;
1907 	ql_tgt_t		*tq;
1908 	uint32_t		index, cnt;
1909 
1910 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1911 
1912 	cnt = 0;
1913 	dev.address = 0xffffffff;
1914 
1915 	/* Scan port list for requested target and fill in the values */
1916 	for (link = NULL, index = 0;
1917 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1918 		for (link = ha->dev[index].first; link != NULL;
1919 		    link = link->next) {
1920 			tq = link->base_address;
1921 
1922 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1923 				continue;
1924 			}
1925 			if (cnt != dop->option) {
1926 				cnt++;
1927 				continue;
1928 			}
1929 			/* fill in the values */
1930 			bcopy(tq->port_name, dev.wwpn, 8);
1931 			dev.address = tq->d_id.b24;
1932 			dev.loop_id = tq->loop_id;
1933 			if (tq->flags & TQF_TAPE_DEVICE) {
1934 				dev.type = FCT_TAPE;
1935 			} else if (tq->flags & TQF_INITIATOR_DEVICE) {
1936 				dev.type = FCT_INITIATOR;
1937 			} else {
1938 				dev.type = FCT_TARGET;
1939 			}
1940 			break;
1941 		}
1942 	}
1943 
1944 	if (ddi_copyout((void *)&dev, (void *)(uintptr_t)dop->buffer,
1945 	    dop->length, mode) != 0) {
1946 		EL(ha, "failed, ddi_copyout\n");
1947 		return (EFAULT);
1948 	}
1949 
1950 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1951 
1952 	return (0);
1953 }
1954 
1955 /*
1956  * ql_adm_update_properties
1957  *	Performs qladm QL_UPDATE_PROPERTIES command
1958  *
1959  * Input:
1960  *	ha:	adapter state pointer.
1961  *
1962  * Returns:
1963  *
1964  * Context:
1965  *	Kernel context.
1966  */
1967 static int
1968 ql_adm_update_properties(ql_adapter_state_t *ha)
1969 {
1970 	ql_comb_init_cb_t	init_ctrl_blk;
1971 	ql_comb_ip_init_cb_t	ip_init_ctrl_blk;
1972 
1973 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1974 
1975 	/* Stall driver instance. */
1976 	(void) ql_stall_driver(ha, 0);
1977 
1978 	/* Save init control blocks. */
1979 	bcopy(&ha->init_ctrl_blk, &init_ctrl_blk, sizeof (ql_comb_init_cb_t));
1980 	bcopy(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1981 	    sizeof (ql_comb_ip_init_cb_t));
1982 
1983 	/* Update PCI configration. */
1984 	(void) ql_pci_sbus_config(ha);
1985 
1986 	/* Get configuration properties. */
1987 	(void) ql_nvram_config(ha);
1988 
1989 	/* Check for init firmware required. */
1990 	if (bcmp(&ha->init_ctrl_blk, &init_ctrl_blk,
1991 	    sizeof (ql_comb_init_cb_t)) != 0 ||
1992 	    bcmp(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1993 	    sizeof (ql_comb_ip_init_cb_t)) != 0) {
1994 
1995 		EL(ha, "isp_abort_needed\n");
1996 		ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1997 		TASK_DAEMON_LOCK(ha);
1998 		ha->task_daemon_flags |= LOOP_DOWN | ISP_ABORT_NEEDED;
1999 		TASK_DAEMON_UNLOCK(ha);
2000 	}
2001 
2002 	/* Update AEN queue. */
2003 	if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
2004 		ql_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
2005 	}
2006 
2007 	/* Restart driver instance. */
2008 	ql_restart_driver(ha);
2009 
2010 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2011 
2012 	return (0);
2013 }
2014 
2015 /*
2016  * ql_adm_prop_update_int
2017  *	Performs qladm QL_PROP_UPDATE_INT command
2018  *
2019  * Input:
2020  *	ha:	adapter state pointer.
2021  *	dop:	ql_adm_op_t structure pointer.
2022  *	mode:	flags.
2023  *
2024  * Returns:
2025  *
2026  * Context:
2027  *	Kernel context.
2028  */
2029 static int
2030 ql_adm_prop_update_int(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2031 {
2032 	char	*prop_name;
2033 	int	rval;
2034 
2035 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2036 
2037 	prop_name = kmem_zalloc(dop->length, KM_SLEEP);
2038 	if (prop_name == NULL) {
2039 		EL(ha, "failed, kmem_zalloc\n");
2040 		return (ENOMEM);
2041 	}
2042 
2043 	if (ddi_copyin((void *)(uintptr_t)dop->buffer, prop_name, dop->length,
2044 	    mode) != 0) {
2045 		EL(ha, "failed, prop_name ddi_copyin\n");
2046 		kmem_free(prop_name, dop->length);
2047 		return (EFAULT);
2048 	}
2049 
2050 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2051 	if ((rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
2052 	    (int)dop->option)) != DDI_PROP_SUCCESS) {
2053 		EL(ha, "failed, prop_update=%xh\n", rval);
2054 		kmem_free(prop_name, dop->length);
2055 		return (EINVAL);
2056 	}
2057 
2058 	kmem_free(prop_name, dop->length);
2059 
2060 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2061 
2062 	return (0);
2063 }
2064 
2065 /*
2066  * ql_adm_fw_dump
2067  *	Performs qladm QL_FW_DUMP command
2068  *
2069  * Input:
2070  *	ha:	adapter state pointer.
2071  *	dop:	ql_adm_op_t structure pointer.
2072  *	udop:	user space ql_adm_op_t structure pointer.
2073  *	mode:	flags.
2074  *
2075  * Returns:
2076  *
2077  * Context:
2078  *	Kernel context.
2079  */
2080 static int
2081 ql_adm_fw_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, void *udop, int mode)
2082 {
2083 	caddr_t	dmp;
2084 
2085 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2086 
2087 	if (dop->length < ha->risc_dump_size) {
2088 		EL(ha, "failed, incorrect length=%xh, size=%xh\n",
2089 		    dop->length, ha->risc_dump_size);
2090 		return (EINVAL);
2091 	}
2092 
2093 	if (ha->ql_dump_state & QL_DUMP_VALID) {
2094 		dmp = kmem_zalloc(ha->risc_dump_size, KM_SLEEP);
2095 		if (dmp == NULL) {
2096 			EL(ha, "failed, kmem_zalloc\n");
2097 			return (ENOMEM);
2098 		}
2099 
2100 		dop->length = (uint32_t)ql_ascii_fw_dump(ha, dmp);
2101 		if (ddi_copyout((void *)dmp, (void *)(uintptr_t)dop->buffer,
2102 		    dop->length, mode) != 0) {
2103 			EL(ha, "failed, ddi_copyout\n");
2104 			kmem_free(dmp, ha->risc_dump_size);
2105 			return (EFAULT);
2106 		}
2107 
2108 		kmem_free(dmp, ha->risc_dump_size);
2109 		ha->ql_dump_state |= QL_DUMP_UPLOADED;
2110 
2111 	} else {
2112 		EL(ha, "failed, no dump file\n");
2113 		dop->length = 0;
2114 	}
2115 
2116 	if (ddi_copyout(dop, udop, sizeof (ql_adm_op_t), mode) != 0) {
2117 		EL(ha, "failed, driver_op_t ddi_copyout\n");
2118 		return (EFAULT);
2119 	}
2120 
2121 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2122 
2123 	return (0);
2124 }
2125 
2126 /*
2127  * ql_adm_nvram_dump
2128  *	Performs qladm QL_NVRAM_DUMP command
2129  *
2130  * Input:
2131  *	ha:	adapter state pointer.
2132  *	dop:	ql_adm_op_t structure pointer.
2133  *	mode:	flags.
2134  *
2135  * Returns:
2136  *
2137  * Context:
2138  *	Kernel context.
2139  */
2140 static int
2141 ql_adm_nvram_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2142 {
2143 	int		rval;
2144 
2145 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2146 
2147 	if (dop->length < ha->nvram_cache->size) {
2148 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2149 		    ha->nvram_cache->size);
2150 		return (EINVAL);
2151 	}
2152 
2153 	if ((rval = ql_nv_util_dump(ha, (void *)(uintptr_t)dop->buffer,
2154 	    mode)) != 0) {
2155 		EL(ha, "failed, ql_nv_util_dump\n");
2156 	} else {
2157 		/*EMPTY*/
2158 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2159 	}
2160 
2161 	return (rval);
2162 }
2163 
2164 /*
2165  * ql_adm_nvram_load
2166  *	Performs qladm QL_NVRAM_LOAD command
2167  *
2168  * Input:
2169  *	ha:	adapter state pointer.
2170  *	dop:	ql_adm_op_t structure pointer.
2171  *	mode:	flags.
2172  *
2173  * Returns:
2174  *
2175  * Context:
2176  *	Kernel context.
2177  */
2178 static int
2179 ql_adm_nvram_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2180 {
2181 	int		rval;
2182 
2183 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2184 
2185 	if (dop->length < ha->nvram_cache->size) {
2186 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2187 		    ha->nvram_cache->size);
2188 		return (EINVAL);
2189 	}
2190 
2191 	if ((rval = ql_nv_util_load(ha, (void *)(uintptr_t)dop->buffer,
2192 	    mode)) != 0) {
2193 		EL(ha, "failed, ql_nv_util_dump\n");
2194 	} else {
2195 		/*EMPTY*/
2196 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2197 	}
2198 
2199 	return (rval);
2200 }
2201 
2202 /*
2203  * ql_adm_flash_load
2204  *	Performs qladm QL_FLASH_LOAD command
2205  *
2206  * Input:
2207  *	ha:	adapter state pointer.
2208  *	dop:	ql_adm_op_t structure pointer.
2209  *	mode:	flags.
2210  *
2211  * Returns:
2212  *
2213  * Context:
2214  *	Kernel context.
2215  */
2216 static int
2217 ql_adm_flash_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2218 {
2219 	uint8_t	*dp;
2220 	int	rval;
2221 
2222 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2223 
2224 	if ((dp = kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2225 		EL(ha, "failed, kmem_zalloc\n");
2226 		return (ENOMEM);
2227 	}
2228 
2229 	if (ddi_copyin((void *)(uintptr_t)dop->buffer, dp, dop->length,
2230 	    mode) != 0) {
2231 		EL(ha, "ddi_copyin failed\n");
2232 		kmem_free(dp, dop->length);
2233 		return (EFAULT);
2234 	}
2235 
2236 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
2237 		EL(ha, "ql_stall_driver failed\n");
2238 		kmem_free(dp, dop->length);
2239 		return (EBUSY);
2240 	}
2241 
2242 	rval = (CFG_IST(ha, CFG_CTRL_24258081) ?
2243 	    ql_24xx_load_flash(ha, dp, dop->length, dop->option) :
2244 	    ql_load_flash(ha, dp, dop->length));
2245 
2246 	ql_restart_driver(ha);
2247 
2248 	kmem_free(dp, dop->length);
2249 
2250 	if (rval != QL_SUCCESS) {
2251 		EL(ha, "failed\n");
2252 		return (EIO);
2253 	}
2254 
2255 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2256 
2257 	return (0);
2258 }
2259 
2260 /*
2261  * ql_adm_vpd_dump
2262  *	Performs qladm QL_VPD_DUMP command
2263  *
2264  * Input:
2265  *	ha:	adapter state pointer.
2266  *	dop:	ql_adm_op_t structure pointer.
2267  *	mode:	flags.
2268  *
2269  * Returns:
2270  *
2271  * Context:
2272  *	Kernel context.
2273  */
2274 static int
2275 ql_adm_vpd_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2276 {
2277 	int		rval;
2278 
2279 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2280 
2281 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2282 		EL(ha, "hba does not support VPD\n");
2283 		return (EINVAL);
2284 	}
2285 
2286 	if (dop->length < QL_24XX_VPD_SIZE) {
2287 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2288 		    QL_24XX_VPD_SIZE);
2289 		return (EINVAL);
2290 	}
2291 
2292 	if ((rval = ql_vpd_dump(ha, (void *)(uintptr_t)dop->buffer, mode))
2293 	    != 0) {
2294 		EL(ha, "failed, ql_vpd_dump\n");
2295 	} else {
2296 		/*EMPTY*/
2297 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2298 	}
2299 
2300 	return (rval);
2301 }
2302 
2303 /*
2304  * ql_adm_vpd_load
2305  *	Performs qladm QL_VPD_LOAD command
2306  *
2307  * Input:
2308  *	ha:	adapter state pointer.
2309  *	dop:	ql_adm_op_t structure pointer.
2310  *	mode:	flags.
2311  *
2312  * Returns:
2313  *
2314  * Context:
2315  *	Kernel context.
2316  */
2317 static int
2318 ql_adm_vpd_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2319 {
2320 	int		rval;
2321 
2322 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2323 
2324 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2325 		EL(ha, "hba does not support VPD\n");
2326 		return (EINVAL);
2327 	}
2328 
2329 	if (dop->length < QL_24XX_VPD_SIZE) {
2330 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2331 		    QL_24XX_VPD_SIZE);
2332 		return (EINVAL);
2333 	}
2334 
2335 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)dop->buffer, mode))
2336 	    != 0) {
2337 		EL(ha, "failed, ql_vpd_dump\n");
2338 	} else {
2339 		/*EMPTY*/
2340 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2341 	}
2342 
2343 	return (rval);
2344 }
2345 
2346 /*
2347  * ql_adm_vpd_gettag
2348  *	Performs qladm QL_VPD_GETTAG command
2349  *
2350  * Input:
2351  *	ha:	adapter state pointer.
2352  *	dop:	ql_adm_op_t structure pointer.
2353  *	mode:	flags.
2354  *
2355  * Returns:
2356  *
2357  * Context:
2358  *	Kernel context.
2359  */
2360 static int
2361 ql_adm_vpd_gettag(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2362 {
2363 	int		rval = 0;
2364 	uint8_t		*lbuf;
2365 
2366 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2367 
2368 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2369 		EL(ha, "hba does not support VPD\n");
2370 		return (EINVAL);
2371 	}
2372 
2373 	if ((lbuf = (uint8_t *)kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2374 		EL(ha, "mem alloc failure of %xh bytes\n", dop->length);
2375 		rval = EFAULT;
2376 	} else {
2377 		if (ddi_copyin((void *)(uintptr_t)dop->buffer, lbuf,
2378 		    dop->length, mode) != 0) {
2379 			EL(ha, "ddi_copyin failed\n");
2380 			kmem_free(lbuf, dop->length);
2381 			return (EFAULT);
2382 		}
2383 
2384 		if ((rval = ql_vpd_lookup(ha, lbuf, lbuf, (int32_t)
2385 		    dop->length)) < 0) {
2386 			EL(ha, "failed vpd_lookup\n");
2387 		} else {
2388 			if (ddi_copyout(lbuf, (void *)(uintptr_t)dop->buffer,
2389 			    strlen((int8_t *)lbuf)+1, mode) != 0) {
2390 				EL(ha, "failed, ddi_copyout\n");
2391 				rval = EFAULT;
2392 			} else {
2393 				rval = 0;
2394 			}
2395 		}
2396 		kmem_free(lbuf, dop->length);
2397 	}
2398 
2399 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2400 
2401 	return (rval);
2402 }
2403 
2404 /*
2405  * ql_adm_updfwmodule
2406  *	Performs qladm QL_UPD_FWMODULE command
2407  *
2408  * Input:
2409  *	ha:	adapter state pointer.
2410  *	dop:	ql_adm_op_t structure pointer.
2411  *	mode:	flags.
2412  *
2413  * Returns:
2414  *
2415  * Context:
2416  *	Kernel context.
2417  */
2418 /* ARGSUSED */
2419 static int
2420 ql_adm_updfwmodule(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2421 {
2422 	int			rval = DDI_SUCCESS;
2423 	ql_link_t		*link;
2424 	ql_adapter_state_t	*ha2 = NULL;
2425 	uint16_t		fw_class = (uint16_t)dop->option;
2426 
2427 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2428 
2429 	/* zero the firmware module reference count */
2430 	for (link = ql_hba.first; link != NULL; link = link->next) {
2431 		ha2 = link->base_address;
2432 		if (fw_class == ha2->fw_class) {
2433 			if ((rval = ddi_modclose(ha2->fw_module)) !=
2434 			    DDI_SUCCESS) {
2435 				EL(ha2, "modclose rval=%xh\n", rval);
2436 				break;
2437 			}
2438 			ha2->fw_module = NULL;
2439 		}
2440 	}
2441 
2442 	/* reload the f/w modules */
2443 	for (link = ql_hba.first; link != NULL; link = link->next) {
2444 		ha2 = link->base_address;
2445 
2446 		if ((fw_class == ha2->fw_class) && (ha2->fw_class == 0)) {
2447 			if ((rval = (int32_t)ql_fwmodule_resolve(ha2)) !=
2448 			    QL_SUCCESS) {
2449 				EL(ha2, "unable to load f/w module: '%x' "
2450 				    "(rval=%xh)\n", ha2->fw_class, rval);
2451 				rval = EFAULT;
2452 			} else {
2453 				EL(ha2, "f/w module updated: '%x'\n",
2454 				    ha2->fw_class);
2455 			}
2456 
2457 			EL(ha2, "isp abort needed (%d)\n", ha->instance);
2458 
2459 			ql_awaken_task_daemon(ha2, NULL, ISP_ABORT_NEEDED, 0);
2460 
2461 			rval = 0;
2462 		}
2463 	}
2464 
2465 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2466 
2467 	return (rval);
2468 }
2469