1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2009 QLogic Corporation */ 23 24 /* 25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #pragma ident "Copyright 2009 QLogic Corporation; ql_ioctl.c" 30 31 /* 32 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 33 * Fibre Channel Adapter (FCA) driver IOCTL source file. 34 * 35 * *********************************************************************** 36 * * ** 37 * * NOTICE ** 38 * * COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION ** 39 * * ALL RIGHTS RESERVED ** 40 * * ** 41 * *********************************************************************** 42 * 43 */ 44 45 #include <ql_apps.h> 46 #include <ql_api.h> 47 #include <ql_debug.h> 48 #include <ql_init.h> 49 #include <ql_ioctl.h> 50 #include <ql_mbx.h> 51 #include <ql_xioctl.h> 52 53 /* 54 * Local Function Prototypes. 55 */ 56 static int ql_busy_notification(ql_adapter_state_t *); 57 static int ql_idle_notification(ql_adapter_state_t *); 58 static int ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features); 59 static int ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features); 60 static int ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha); 61 static void ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, 62 uint16_t value); 63 static int ql_24xx_load_nvram(ql_adapter_state_t *, uint32_t, uint32_t); 64 static int ql_adm_op(ql_adapter_state_t *, void *, int); 65 static int ql_adm_adapter_info(ql_adapter_state_t *, ql_adm_op_t *, int); 66 static int ql_adm_extended_logging(ql_adapter_state_t *, ql_adm_op_t *); 67 static int ql_adm_device_list(ql_adapter_state_t *, ql_adm_op_t *, int); 68 static int ql_adm_update_properties(ql_adapter_state_t *); 69 static int ql_adm_prop_update_int(ql_adapter_state_t *, ql_adm_op_t *, int); 70 static int ql_adm_loop_reset(ql_adapter_state_t *); 71 static int ql_adm_fw_dump(ql_adapter_state_t *, ql_adm_op_t *, void *, int); 72 static int ql_adm_nvram_dump(ql_adapter_state_t *, ql_adm_op_t *, int); 73 static int ql_adm_nvram_load(ql_adapter_state_t *, ql_adm_op_t *, int); 74 static int ql_adm_flash_load(ql_adapter_state_t *, ql_adm_op_t *, int); 75 static int ql_adm_vpd_dump(ql_adapter_state_t *, ql_adm_op_t *, int); 76 static int ql_adm_vpd_load(ql_adapter_state_t *, ql_adm_op_t *, int); 77 static int ql_adm_vpd_gettag(ql_adapter_state_t *, ql_adm_op_t *, int); 78 static int ql_adm_updfwmodule(ql_adapter_state_t *, ql_adm_op_t *, int); 79 static uint8_t *ql_vpd_findtag(ql_adapter_state_t *, uint8_t *, int8_t *); 80 static int ql_25xx_load_nv_vpd(ql_adapter_state_t *, uint8_t *, uint32_t, 81 uint32_t); 82 83 /* ************************************************************************ */ 84 /* cb_ops functions */ 85 /* ************************************************************************ */ 86 87 /* 88 * ql_open 89 * opens device 90 * 91 * Input: 92 * dev_p = device pointer 93 * flags = open flags 94 * otype = open type 95 * cred_p = credentials pointer 96 * 97 * Returns: 98 * 0 = success 99 * 100 * Context: 101 * Kernel context. 102 */ 103 /* ARGSUSED */ 104 int 105 ql_open(dev_t *dev_p, int flags, int otyp, cred_t *cred_p) 106 { 107 ql_adapter_state_t *ha; 108 int rval = 0; 109 110 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(*dev_p)); 111 if (ha == NULL) { 112 QL_PRINT_2(CE_CONT, "failed, no adapter\n"); 113 return (ENXIO); 114 } 115 116 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 117 118 /* Allow only character opens */ 119 if (otyp != OTYP_CHR) { 120 QL_PRINT_2(CE_CONT, "(%d): failed, open type\n", 121 ha->instance); 122 return (EINVAL); 123 } 124 125 ADAPTER_STATE_LOCK(ha); 126 if (flags & FEXCL && ha->flags & QL_OPENED) { 127 ADAPTER_STATE_UNLOCK(ha); 128 rval = EBUSY; 129 } else { 130 ha->flags |= QL_OPENED; 131 ADAPTER_STATE_UNLOCK(ha); 132 } 133 134 if (rval != 0) { 135 EL(ha, "failed, rval = %xh\n", rval); 136 } else { 137 /*EMPTY*/ 138 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 139 } 140 return (rval); 141 } 142 143 /* 144 * ql_close 145 * opens device 146 * 147 * Input: 148 * dev_p = device pointer 149 * flags = open flags 150 * otype = open type 151 * cred_p = credentials pointer 152 * 153 * Returns: 154 * 0 = success 155 * 156 * Context: 157 * Kernel context. 158 */ 159 /* ARGSUSED */ 160 int 161 ql_close(dev_t dev, int flags, int otyp, cred_t *cred_p) 162 { 163 ql_adapter_state_t *ha; 164 int rval = 0; 165 166 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev)); 167 if (ha == NULL) { 168 QL_PRINT_2(CE_CONT, "failed, no adapter\n"); 169 return (ENXIO); 170 } 171 172 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 173 174 if (otyp != OTYP_CHR) { 175 QL_PRINT_2(CE_CONT, "(%d): failed, open type\n", 176 ha->instance); 177 return (EINVAL); 178 } 179 180 ADAPTER_STATE_LOCK(ha); 181 ha->flags &= ~QL_OPENED; 182 ADAPTER_STATE_UNLOCK(ha); 183 184 if (rval != 0) { 185 EL(ha, "failed, rval = %xh\n", rval); 186 } else { 187 /*EMPTY*/ 188 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 189 } 190 return (rval); 191 } 192 193 /* 194 * ql_ioctl 195 * control a character device 196 * 197 * Input: 198 * dev = device number 199 * cmd = function to perform 200 * arg = data type varies with request 201 * mode = flags 202 * cred_p = credentials pointer 203 * rval_p = pointer to result value 204 * 205 * Returns: 206 * 0 = success 207 * 208 * Context: 209 * Kernel context. 210 */ 211 /* ARGSUSED */ 212 int 213 ql_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 214 int *rval_p) 215 { 216 ql_adapter_state_t *ha; 217 int rval = 0; 218 219 if (ddi_in_panic()) { 220 QL_PRINT_2(CE_CONT, "qla_ioctl: ddi_in_panic exit\n"); 221 return (ENOPROTOOPT); 222 } 223 224 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev)); 225 if (ha == NULL) { 226 QL_PRINT_2(CE_CONT, "failed, no adapter\n"); 227 return (ENXIO); 228 } 229 230 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 231 232 /* 233 * Quick clean exit for qla2x00 foapi calls which are 234 * not supported in qlc. 235 */ 236 if (cmd >= QL_FOAPI_START && cmd <= QL_FOAPI_END) { 237 QL_PRINT_9(CE_CONT, "failed, fo api not supported\n"); 238 return (ENOTTY); 239 } 240 241 /* PWR management busy. */ 242 rval = ql_busy_notification(ha); 243 if (rval != FC_SUCCESS) { 244 EL(ha, "failed, ql_busy_notification\n"); 245 return (ENXIO); 246 } 247 248 rval = ql_xioctl(ha, cmd, arg, mode, cred_p, rval_p); 249 if (rval == ENOPROTOOPT || rval == EINVAL) { 250 switch (cmd) { 251 case QL_GET_ADAPTER_FEATURE_BITS: { 252 uint16_t bits; 253 254 rval = ql_get_feature_bits(ha, &bits); 255 256 if (!rval && ddi_copyout((void *)&bits, (void *)arg, 257 sizeof (bits), mode)) { 258 rval = EFAULT; 259 } 260 break; 261 } 262 263 case QL_SET_ADAPTER_FEATURE_BITS: { 264 uint16_t bits; 265 266 if (ddi_copyin((void *)arg, (void *)&bits, 267 sizeof (bits), mode)) { 268 rval = EFAULT; 269 break; 270 } 271 272 rval = ql_set_feature_bits(ha, bits); 273 break; 274 } 275 276 case QL_SET_ADAPTER_NVRAM_DEFAULTS: 277 rval = ql_set_nvram_adapter_defaults(ha); 278 break; 279 280 case QL_UTIL_LOAD: 281 rval = ql_nv_util_load(ha, (void *)arg, mode); 282 break; 283 284 case QL_UTIL_DUMP: 285 rval = ql_nv_util_dump(ha, (void *)arg, mode); 286 break; 287 288 case QL_ADM_OP: 289 rval = ql_adm_op(ha, (void *)arg, mode); 290 break; 291 292 default: 293 EL(ha, "unknown command = %d\n", cmd); 294 rval = ENOTTY; 295 break; 296 } 297 } 298 299 /* PWR management idle. */ 300 (void) ql_idle_notification(ha); 301 302 if (rval != 0) { 303 EL(ha, "failed, rval = %d\n", rval); 304 } else { 305 /*EMPTY*/ 306 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 307 } 308 return (rval); 309 } 310 311 /* 312 * ql_busy_notification 313 * Adapter busy notification. 314 * 315 * Input: 316 * ha = adapter state pointer. 317 * 318 * Returns: 319 * FC_SUCCESS 320 * FC_FAILURE 321 * 322 * Context: 323 * Kernel context. 324 */ 325 static int 326 ql_busy_notification(ql_adapter_state_t *ha) 327 { 328 if (!ha->pm_capable) { 329 return (FC_SUCCESS); 330 } 331 332 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 333 334 QL_PM_LOCK(ha); 335 ha->busy++; 336 QL_PM_UNLOCK(ha); 337 338 if (pm_busy_component(ha->dip, 0) != DDI_SUCCESS) { 339 QL_PM_LOCK(ha); 340 ha->busy--; 341 QL_PM_UNLOCK(ha); 342 343 EL(ha, "pm_busy_component failed = %xh\n", FC_FAILURE); 344 return (FC_FAILURE); 345 } 346 347 QL_PM_LOCK(ha); 348 if (ha->power_level != PM_LEVEL_D0) { 349 ASSERT(ha->power_level == PM_LEVEL_D3); 350 351 QL_PM_UNLOCK(ha); 352 if (pm_raise_power(ha->dip, 0, 1) != DDI_SUCCESS) { 353 QL_PM_LOCK(ha); 354 ha->busy--; 355 QL_PM_UNLOCK(ha); 356 return (FC_FAILURE); 357 } 358 } else { 359 QL_PM_UNLOCK(ha); 360 } 361 362 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 363 364 return (FC_SUCCESS); 365 } 366 367 /* 368 * ql_idle_notification 369 * Adapter idle notification. 370 * 371 * Input: 372 * ha = adapter state pointer. 373 * 374 * Returns: 375 * FC_SUCCESS 376 * FC_FAILURE 377 * 378 * Context: 379 * Kernel context. 380 */ 381 static int 382 ql_idle_notification(ql_adapter_state_t *ha) 383 { 384 if (!ha->pm_capable) { 385 return (FC_SUCCESS); 386 } 387 388 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 389 390 if (pm_idle_component(ha->dip, 0) != DDI_SUCCESS) { 391 EL(ha, "pm_idle_component failed = %xh\n", FC_FAILURE); 392 return (FC_FAILURE); 393 } 394 395 QL_PM_LOCK(ha); 396 ASSERT(ha->busy > 0); 397 ha->busy--; 398 QL_PM_UNLOCK(ha); 399 400 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 401 402 return (FC_SUCCESS); 403 } 404 405 /* 406 * Get adapter feature bits from NVRAM 407 */ 408 static int 409 ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features) 410 { 411 int count; 412 volatile uint16_t data; 413 uint32_t nv_cmd; 414 uint32_t start_addr; 415 int rval; 416 uint32_t offset = offsetof(nvram_t, adapter_features); 417 418 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 419 420 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 421 EL(ha, "Not supported for 24xx\n"); 422 return (EINVAL); 423 } 424 425 /* 426 * The offset can't be greater than max of 8 bits and 427 * the following code breaks if the offset isn't at 428 * 2 byte boundary. 429 */ 430 ASSERT(offset <= 0xFF && (offset & 0x1) == 0); 431 432 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 433 if (rval != QL_SUCCESS) { 434 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 435 return (EIO); 436 } 437 438 /* 439 * Have the most significant 3 bits represent the read operation 440 * followed by the 8 bits representing the offset at which we 441 * are going to perform the read operation 442 */ 443 offset >>= 1; 444 offset += start_addr; 445 nv_cmd = (offset << 16) | NV_READ_OP; 446 nv_cmd <<= 5; 447 448 /* 449 * Select the chip and feed the command and address 450 */ 451 for (count = 0; count < 11; count++) { 452 if (nv_cmd & BIT_31) { 453 ql_nv_write(ha, NV_DATA_OUT); 454 } else { 455 ql_nv_write(ha, 0); 456 } 457 nv_cmd <<= 1; 458 } 459 460 *features = 0; 461 for (count = 0; count < 16; count++) { 462 WRT16_IO_REG(ha, nvram, NV_SELECT | NV_CLOCK); 463 ql_nv_delay(); 464 465 data = RD16_IO_REG(ha, nvram); 466 *features <<= 1; 467 if (data & NV_DATA_IN) { 468 *features = (uint16_t)(*features | 0x1); 469 } 470 471 WRT16_IO_REG(ha, nvram, NV_SELECT); 472 ql_nv_delay(); 473 } 474 475 /* 476 * Deselect the chip 477 */ 478 WRT16_IO_REG(ha, nvram, NV_DESELECT); 479 480 ql_release_nvram(ha); 481 482 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 483 484 return (0); 485 } 486 487 /* 488 * Set adapter feature bits in NVRAM 489 */ 490 static int 491 ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features) 492 { 493 int rval; 494 uint32_t count; 495 nvram_t *nv; 496 uint16_t *wptr; 497 uint8_t *bptr; 498 uint8_t csum; 499 uint32_t start_addr; 500 501 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 502 503 if (CFG_IST(ha, CFG_CTRL_2425)) { 504 EL(ha, "Not supported for 24xx\n"); 505 return (EINVAL); 506 } 507 508 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP); 509 if (nv == NULL) { 510 EL(ha, "failed, kmem_zalloc\n"); 511 return (ENOMEM); 512 } 513 514 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 515 if (rval != QL_SUCCESS) { 516 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 517 kmem_free(nv, sizeof (*nv)); 518 return (EIO); 519 } 520 rval = 0; 521 522 /* 523 * Read off the whole NVRAM 524 */ 525 wptr = (uint16_t *)nv; 526 csum = 0; 527 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 528 *wptr = (uint16_t)ql_get_nvram_word(ha, count + start_addr); 529 csum = (uint8_t)(csum + (uint8_t)*wptr); 530 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8)); 531 wptr++; 532 } 533 534 /* 535 * If the checksum is BAD then fail it right here. 536 */ 537 if (csum) { 538 kmem_free(nv, sizeof (*nv)); 539 ql_release_nvram(ha); 540 return (EBADF); 541 } 542 543 nv->adapter_features[0] = (uint8_t)((features & 0xFF00) >> 8); 544 nv->adapter_features[1] = (uint8_t)(features & 0xFF); 545 546 /* 547 * Recompute the chesksum now 548 */ 549 bptr = (uint8_t *)nv; 550 for (count = 0; count < sizeof (nvram_t) - 1; count++) { 551 csum = (uint8_t)(csum + *bptr++); 552 } 553 csum = (uint8_t)(~csum + 1); 554 nv->checksum = csum; 555 556 /* 557 * Now load the NVRAM 558 */ 559 wptr = (uint16_t *)nv; 560 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 561 ql_load_nvram(ha, (uint8_t)(count + start_addr), *wptr++); 562 } 563 564 /* 565 * Read NVRAM and verify the contents 566 */ 567 wptr = (uint16_t *)nv; 568 csum = 0; 569 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 570 if (ql_get_nvram_word(ha, count + start_addr) != *wptr) { 571 rval = EIO; 572 break; 573 } 574 csum = (uint8_t)(csum + (uint8_t)*wptr); 575 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8)); 576 wptr++; 577 } 578 579 if (csum) { 580 rval = EINVAL; 581 } 582 583 kmem_free(nv, sizeof (*nv)); 584 ql_release_nvram(ha); 585 586 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 587 588 return (rval); 589 } 590 591 /* 592 * Fix this function to update just feature bits and checksum in NVRAM 593 */ 594 static int 595 ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha) 596 { 597 int rval; 598 uint32_t count; 599 uint32_t start_addr; 600 601 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 602 603 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 604 if (rval != QL_SUCCESS) { 605 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 606 return (EIO); 607 } 608 rval = 0; 609 610 if (CFG_IST(ha, CFG_CTRL_2425)) { 611 nvram_24xx_t *nv; 612 uint32_t *longptr; 613 uint32_t csum = 0; 614 615 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP); 616 if (nv == NULL) { 617 EL(ha, "failed, kmem_zalloc\n"); 618 return (ENOMEM); 619 } 620 621 nv->nvram_version[0] = LSB(ICB_24XX_VERSION); 622 nv->nvram_version[1] = MSB(ICB_24XX_VERSION); 623 624 nv->version[0] = 1; 625 nv->max_frame_length[1] = 8; 626 nv->execution_throttle[0] = 16; 627 nv->login_retry_count[0] = 8; 628 629 nv->firmware_options_1[0] = BIT_2 | BIT_1; 630 nv->firmware_options_1[1] = BIT_5; 631 nv->firmware_options_2[0] = BIT_5; 632 nv->firmware_options_2[1] = BIT_4; 633 nv->firmware_options_3[1] = BIT_6; 634 635 /* 636 * Set default host adapter parameters 637 */ 638 nv->host_p[0] = BIT_4 | BIT_1; 639 nv->host_p[1] = BIT_3 | BIT_2; 640 nv->reset_delay = 5; 641 nv->max_luns_per_target[0] = 128; 642 nv->port_down_retry_count[0] = 30; 643 nv->link_down_timeout[0] = 30; 644 645 /* 646 * compute the chesksum now 647 */ 648 longptr = (uint32_t *)nv; 649 csum = 0; 650 for (count = 0; count < (sizeof (nvram_24xx_t)/4)-1; count++) { 651 csum += *longptr; 652 longptr++; 653 } 654 csum = (uint32_t)(~csum + 1); 655 LITTLE_ENDIAN_32((long)csum); 656 *longptr = csum; 657 658 /* 659 * Now load the NVRAM 660 */ 661 longptr = (uint32_t *)nv; 662 for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) { 663 (void) ql_24xx_load_nvram(ha, 664 (uint32_t)(count + start_addr), *longptr++); 665 } 666 667 /* 668 * Read NVRAM and verify the contents 669 */ 670 csum = 0; 671 longptr = (uint32_t *)nv; 672 for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) { 673 rval = ql_24xx_read_flash(ha, count + start_addr, 674 longptr); 675 if (rval != QL_SUCCESS) { 676 EL(ha, "24xx_read_flash failed=%xh\n", rval); 677 break; 678 } 679 csum += *longptr; 680 } 681 682 if (csum) { 683 rval = EINVAL; 684 } 685 kmem_free(nv, sizeof (nvram_24xx_t)); 686 } else { 687 nvram_t *nv; 688 uint16_t *wptr; 689 uint8_t *bptr; 690 uint8_t csum; 691 692 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP); 693 if (nv == NULL) { 694 EL(ha, "failed, kmem_zalloc\n"); 695 return (ENOMEM); 696 } 697 /* 698 * Set default initialization control block. 699 */ 700 nv->parameter_block_version = ICB_VERSION; 701 nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1; 702 nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2; 703 704 nv->max_frame_length[1] = 4; 705 nv->max_iocb_allocation[1] = 1; 706 nv->execution_throttle[0] = 16; 707 nv->login_retry_count = 8; 708 nv->port_name[0] = 33; 709 nv->port_name[3] = 224; 710 nv->port_name[4] = 139; 711 nv->login_timeout = 4; 712 713 /* 714 * Set default host adapter parameters 715 */ 716 nv->host_p[0] = BIT_1; 717 nv->host_p[1] = BIT_2; 718 nv->reset_delay = 5; 719 nv->port_down_retry_count = 8; 720 nv->maximum_luns_per_target[0] = 8; 721 722 /* 723 * compute the chesksum now 724 */ 725 bptr = (uint8_t *)nv; 726 csum = 0; 727 for (count = 0; count < sizeof (nvram_t) - 1; count++) { 728 csum = (uint8_t)(csum + *bptr++); 729 } 730 csum = (uint8_t)(~csum + 1); 731 nv->checksum = csum; 732 733 /* 734 * Now load the NVRAM 735 */ 736 wptr = (uint16_t *)nv; 737 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 738 ql_load_nvram(ha, (uint8_t)(count + start_addr), 739 *wptr++); 740 } 741 742 /* 743 * Read NVRAM and verify the contents 744 */ 745 wptr = (uint16_t *)nv; 746 csum = 0; 747 for (count = 0; count < sizeof (nvram_t) / 2; count++) { 748 if (ql_get_nvram_word(ha, count + start_addr) != 749 *wptr) { 750 rval = EIO; 751 break; 752 } 753 csum = (uint8_t)(csum + (uint8_t)*wptr); 754 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8)); 755 wptr++; 756 } 757 if (csum) { 758 rval = EINVAL; 759 } 760 kmem_free(nv, sizeof (*nv)); 761 } 762 ql_release_nvram(ha); 763 764 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 765 766 return (rval); 767 } 768 769 static void 770 ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, uint16_t value) 771 { 772 int count; 773 volatile uint16_t word; 774 volatile uint32_t nv_cmd; 775 776 ql_nv_write(ha, NV_DATA_OUT); 777 ql_nv_write(ha, 0); 778 ql_nv_write(ha, 0); 779 780 for (word = 0; word < 8; word++) { 781 ql_nv_write(ha, NV_DATA_OUT); 782 } 783 784 /* 785 * Deselect the chip 786 */ 787 WRT16_IO_REG(ha, nvram, NV_DESELECT); 788 ql_nv_delay(); 789 790 /* 791 * Erase Location 792 */ 793 nv_cmd = (addr << 16) | NV_ERASE_OP; 794 nv_cmd <<= 5; 795 for (count = 0; count < 11; count++) { 796 if (nv_cmd & BIT_31) { 797 ql_nv_write(ha, NV_DATA_OUT); 798 } else { 799 ql_nv_write(ha, 0); 800 } 801 nv_cmd <<= 1; 802 } 803 804 /* 805 * Wait for Erase to Finish 806 */ 807 WRT16_IO_REG(ha, nvram, NV_DESELECT); 808 ql_nv_delay(); 809 WRT16_IO_REG(ha, nvram, NV_SELECT); 810 word = 0; 811 while ((word & NV_DATA_IN) == 0) { 812 ql_nv_delay(); 813 word = RD16_IO_REG(ha, nvram); 814 } 815 WRT16_IO_REG(ha, nvram, NV_DESELECT); 816 ql_nv_delay(); 817 818 /* 819 * Write data now 820 */ 821 nv_cmd = (addr << 16) | NV_WRITE_OP; 822 nv_cmd |= value; 823 nv_cmd <<= 5; 824 for (count = 0; count < 27; count++) { 825 if (nv_cmd & BIT_31) { 826 ql_nv_write(ha, NV_DATA_OUT); 827 } else { 828 ql_nv_write(ha, 0); 829 } 830 nv_cmd <<= 1; 831 } 832 833 /* 834 * Wait for NVRAM to become ready 835 */ 836 WRT16_IO_REG(ha, nvram, NV_DESELECT); 837 ql_nv_delay(); 838 WRT16_IO_REG(ha, nvram, NV_SELECT); 839 word = 0; 840 while ((word & NV_DATA_IN) == 0) { 841 ql_nv_delay(); 842 word = RD16_IO_REG(ha, nvram); 843 } 844 WRT16_IO_REG(ha, nvram, NV_DESELECT); 845 ql_nv_delay(); 846 847 /* 848 * Disable writes 849 */ 850 ql_nv_write(ha, NV_DATA_OUT); 851 for (count = 0; count < 10; count++) { 852 ql_nv_write(ha, 0); 853 } 854 855 /* 856 * Deselect the chip now 857 */ 858 WRT16_IO_REG(ha, nvram, NV_DESELECT); 859 } 860 861 /* 862 * ql_24xx_load_nvram 863 * Enable NVRAM and writes a 32bit word to ISP24xx NVRAM. 864 * 865 * Input: 866 * ha: adapter state pointer. 867 * addr: NVRAM address. 868 * value: data. 869 * 870 * Returns: 871 * ql local function return status code. 872 * 873 * Context: 874 * Kernel context. 875 */ 876 static int 877 ql_24xx_load_nvram(ql_adapter_state_t *ha, uint32_t addr, uint32_t value) 878 { 879 int rval; 880 881 /* Enable flash write. */ 882 WRT32_IO_REG(ha, ctrl_status, 883 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE); 884 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */ 885 886 /* Disable NVRAM write-protection. */ 887 if (CFG_IST(ha, CFG_CTRL_2422)) { 888 (void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0); 889 } else { 890 ql_24xx_unprotect_flash(ha); 891 } 892 893 /* Write to flash. */ 894 rval = ql_24xx_write_flash(ha, addr, value); 895 896 /* Enable NVRAM write-protection. */ 897 if (CFG_IST(ha, CFG_CTRL_2422)) { 898 /* TODO: Check if 0x8c is correct -- sb: 0x9c ? */ 899 (void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0x8c); 900 } else { 901 ql_24xx_protect_flash(ha); 902 } 903 904 /* Disable flash write. */ 905 WRT32_IO_REG(ha, ctrl_status, 906 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE); 907 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */ 908 909 return (rval); 910 } 911 912 /* 913 * ql_nv_util_load 914 * Loads NVRAM from application. 915 * 916 * Input: 917 * ha = adapter state pointer. 918 * bp = user buffer address. 919 * 920 * Returns: 921 * 922 * Context: 923 * Kernel context. 924 */ 925 int 926 ql_nv_util_load(ql_adapter_state_t *ha, void *bp, int mode) 927 { 928 uint8_t cnt; 929 void *nv; 930 uint16_t *wptr; 931 uint16_t data; 932 uint32_t start_addr, nv_size, *lptr, data32; 933 nvram_t *nptr; 934 int rval; 935 936 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 937 938 nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ? 939 sizeof (nvram_24xx_t) : sizeof (nvram_t)); 940 941 if ((nv = kmem_zalloc(nv_size, KM_SLEEP)) == NULL) { 942 EL(ha, "failed, kmem_zalloc\n"); 943 return (ENOMEM); 944 } 945 946 if (ddi_copyin(bp, nv, nv_size, mode) != 0) { 947 EL(ha, "Buffer copy failed\n"); 948 kmem_free(nv, nv_size); 949 return (EFAULT); 950 } 951 952 /* See if the buffer passed to us looks sane */ 953 nptr = (nvram_t *)nv; 954 if (nptr->id[0] != 'I' || nptr->id[1] != 'S' || nptr->id[2] != 'P' || 955 nptr->id[3] != ' ') { 956 EL(ha, "failed, buffer sanity check\n"); 957 kmem_free(nv, nv_size); 958 return (EINVAL); 959 } 960 961 /* Quiesce I/O */ 962 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 963 EL(ha, "ql_stall_driver failed\n"); 964 kmem_free(nv, nv_size); 965 return (EBUSY); 966 } 967 968 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA); 969 if (rval != QL_SUCCESS) { 970 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 971 kmem_free(nv, nv_size); 972 ql_restart_driver(ha); 973 return (EIO); 974 } 975 976 /* Load NVRAM. */ 977 if (CFG_IST(ha, CFG_CTRL_25XX)) { 978 GLOBAL_HW_UNLOCK(); 979 if ((rval = ql_25xx_load_nv_vpd(ha, (uint8_t *)nv, start_addr, 980 nv_size)) != QL_SUCCESS) { 981 EL(ha, "nvram load failed, rval = %0xh\n", rval); 982 } 983 GLOBAL_HW_LOCK(); 984 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 985 lptr = (uint32_t *)nv; 986 for (cnt = 0; cnt < nv_size / 4; cnt++) { 987 data32 = *lptr++; 988 LITTLE_ENDIAN_32(&data32); 989 rval = ql_24xx_load_nvram(ha, cnt + start_addr, 990 data32); 991 if (rval != QL_SUCCESS) { 992 EL(ha, "failed, 24xx_load_nvram=%xh\n", rval); 993 break; 994 } 995 } 996 } else { 997 wptr = (uint16_t *)nv; 998 for (cnt = 0; cnt < nv_size / 2; cnt++) { 999 data = *wptr++; 1000 LITTLE_ENDIAN_16(&data); 1001 ql_load_nvram(ha, (uint8_t)(cnt + start_addr), data); 1002 } 1003 } 1004 1005 kmem_free(nv, nv_size); 1006 ql_release_nvram(ha); 1007 ql_restart_driver(ha); 1008 1009 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1010 1011 if (rval == QL_SUCCESS) { 1012 return (0); 1013 } 1014 1015 return (EFAULT); 1016 } 1017 1018 /* 1019 * ql_nv_util_dump 1020 * Dumps NVRAM to application. 1021 * 1022 * Input: 1023 * ha = adapter state pointer. 1024 * bp = user buffer address. 1025 * 1026 * Returns: 1027 * 1028 * Context: 1029 * Kernel context. 1030 */ 1031 int 1032 ql_nv_util_dump(ql_adapter_state_t *ha, void *bp, int mode) 1033 { 1034 uint32_t cnt, nv_size; 1035 void *nv; 1036 uint32_t start_addr; 1037 int rval2, rval = 0; 1038 1039 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1040 1041 nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ? 1042 sizeof (nvram_24xx_t) : sizeof (nvram_t)); 1043 1044 if ((nv = kmem_zalloc(nv_size, KM_SLEEP)) == NULL) { 1045 EL(ha, "failed, kmem_zalloc\n"); 1046 return (ENOMEM); 1047 } 1048 1049 /* Quiesce I/O */ 1050 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 1051 EL(ha, "ql_stall_driver failed\n"); 1052 kmem_free(nv, nv_size); 1053 return (EBUSY); 1054 } 1055 1056 if ((rval2 = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) != 1057 QL_SUCCESS) { 1058 EL(ha, "failed, ql_lock_nvram=%xh\n", rval2); 1059 kmem_free(nv, nv_size); 1060 ql_restart_driver(ha); 1061 return (EIO); 1062 } 1063 1064 /* Dump NVRAM. */ 1065 if (CFG_IST(ha, CFG_CTRL_2425)) { 1066 1067 uint32_t *lptr = (uint32_t *)nv; 1068 1069 for (cnt = 0; cnt < nv_size / 4; cnt++) { 1070 rval2 = ql_24xx_read_flash(ha, start_addr++, lptr); 1071 if (rval2 != QL_SUCCESS) { 1072 EL(ha, "read_flash failed=%xh\n", rval2); 1073 rval = EAGAIN; 1074 break; 1075 } 1076 1077 LITTLE_ENDIAN_32(lptr); 1078 lptr++; 1079 } 1080 } else { 1081 uint16_t data; 1082 uint16_t *wptr = (uint16_t *)nv; 1083 1084 for (cnt = 0; cnt < nv_size / 2; cnt++) { 1085 data = (uint16_t)ql_get_nvram_word(ha, cnt + 1086 start_addr); 1087 LITTLE_ENDIAN_16(&data); 1088 *wptr++ = data; 1089 } 1090 } 1091 1092 ql_release_nvram(ha); 1093 ql_restart_driver(ha); 1094 1095 if (rval != 0) { 1096 EL(ha, "failed to dump nvram\n"); 1097 kmem_free(nv, nv_size); 1098 return (rval); 1099 } 1100 1101 if (ddi_copyout(nv, bp, nv_size, mode) != 0) { 1102 EL(ha, "Buffer copy failed\n"); 1103 kmem_free(nv, nv_size); 1104 return (EFAULT); 1105 } 1106 1107 kmem_free(nv, nv_size); 1108 1109 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1110 1111 return (0); 1112 } 1113 1114 /* 1115 * ql_vpd_load 1116 * Loads VPD from application. 1117 * 1118 * Input: 1119 * ha = adapter state pointer. 1120 * bp = user buffer address. 1121 * 1122 * Returns: 1123 * 1124 * Context: 1125 * Kernel context. 1126 */ 1127 int 1128 ql_vpd_load(ql_adapter_state_t *ha, void *bp, int mode) 1129 { 1130 uint8_t cnt; 1131 uint8_t *vpd, *vpdptr, *vbuf; 1132 uint32_t start_addr, vpd_size, *lptr, data32; 1133 int rval; 1134 1135 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1136 1137 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 1138 EL(ha, "unsupported adapter feature\n"); 1139 return (ENOTSUP); 1140 } 1141 1142 vpd_size = QL_24XX_VPD_SIZE; 1143 1144 if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) { 1145 EL(ha, "failed, kmem_zalloc\n"); 1146 return (ENOMEM); 1147 } 1148 1149 if (ddi_copyin(bp, vpd, vpd_size, mode) != 0) { 1150 EL(ha, "Buffer copy failed\n"); 1151 kmem_free(vpd, vpd_size); 1152 return (EFAULT); 1153 } 1154 1155 /* Sanity check the user supplied data via checksum */ 1156 if ((vpdptr = ql_vpd_findtag(ha, vpd, "RV")) == NULL) { 1157 EL(ha, "vpd RV tag missing\n"); 1158 kmem_free(vpd, vpd_size); 1159 return (EINVAL); 1160 } 1161 1162 vpdptr += 3; 1163 cnt = 0; 1164 vbuf = vpd; 1165 while (vbuf <= vpdptr) { 1166 cnt += *vbuf++; 1167 } 1168 if (cnt != 0) { 1169 EL(ha, "mismatched checksum, cal=%xh, passed=%xh\n", 1170 (uint8_t)cnt, (uintptr_t)vpdptr); 1171 kmem_free(vpd, vpd_size); 1172 return (EINVAL); 1173 } 1174 1175 /* Quiesce I/O */ 1176 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 1177 EL(ha, "ql_stall_driver failed\n"); 1178 kmem_free(vpd, vpd_size); 1179 return (EBUSY); 1180 } 1181 1182 rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA); 1183 if (rval != QL_SUCCESS) { 1184 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 1185 kmem_free(vpd, vpd_size); 1186 ql_restart_driver(ha); 1187 return (EIO); 1188 } 1189 1190 /* Load VPD. */ 1191 if (CFG_IST(ha, CFG_CTRL_25XX)) { 1192 GLOBAL_HW_UNLOCK(); 1193 if ((rval = ql_25xx_load_nv_vpd(ha, vpd, start_addr, 1194 vpd_size)) != QL_SUCCESS) { 1195 EL(ha, "vpd load error: %xh\n", rval); 1196 } 1197 GLOBAL_HW_LOCK(); 1198 } else { 1199 lptr = (uint32_t *)vpd; 1200 for (cnt = 0; cnt < vpd_size / 4; cnt++) { 1201 data32 = *lptr++; 1202 LITTLE_ENDIAN_32(&data32); 1203 rval = ql_24xx_load_nvram(ha, cnt + start_addr, 1204 data32); 1205 if (rval != QL_SUCCESS) { 1206 EL(ha, "failed, 24xx_load_nvram=%xh\n", rval); 1207 break; 1208 } 1209 } 1210 } 1211 1212 kmem_free(vpd, vpd_size); 1213 1214 /* Update the vcache */ 1215 CACHE_LOCK(ha); 1216 1217 if (rval != QL_SUCCESS) { 1218 EL(ha, "failed, load\n"); 1219 } else if ((ha->vcache == NULL) && ((ha->vcache = 1220 kmem_zalloc(vpd_size, KM_SLEEP)) == NULL)) { 1221 EL(ha, "failed, kmem_zalloc2\n"); 1222 } else if (ddi_copyin(bp, ha->vcache, vpd_size, mode) != 0) { 1223 EL(ha, "Buffer copy2 failed\n"); 1224 kmem_free(ha->vcache, vpd_size); 1225 ha->vcache = NULL; 1226 } 1227 1228 CACHE_UNLOCK(ha); 1229 1230 ql_release_nvram(ha); 1231 ql_restart_driver(ha); 1232 1233 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1234 1235 if (rval == QL_SUCCESS) { 1236 return (0); 1237 } 1238 1239 return (EFAULT); 1240 } 1241 1242 /* 1243 * ql_vpd_dump 1244 * Dumps VPD to application buffer. 1245 * 1246 * Input: 1247 * ha = adapter state pointer. 1248 * bp = user buffer address. 1249 * 1250 * Returns: 1251 * 1252 * Context: 1253 * Kernel context. 1254 */ 1255 int 1256 ql_vpd_dump(ql_adapter_state_t *ha, void *bp, int mode) 1257 { 1258 uint8_t cnt; 1259 void *vpd; 1260 uint32_t start_addr, vpd_size, *lptr; 1261 int rval = 0; 1262 1263 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1264 1265 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 1266 EL(ha, "unsupported adapter feature\n"); 1267 return (EACCES); 1268 } 1269 1270 vpd_size = QL_24XX_VPD_SIZE; 1271 1272 CACHE_LOCK(ha); 1273 1274 if (ha->vcache != NULL) { 1275 /* copy back the vpd cache data */ 1276 if (ddi_copyout(ha->vcache, bp, vpd_size, mode) != 0) { 1277 EL(ha, "Buffer copy failed\n"); 1278 rval = EFAULT; 1279 } 1280 CACHE_UNLOCK(ha); 1281 return (rval); 1282 } 1283 1284 if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) { 1285 CACHE_UNLOCK(ha); 1286 EL(ha, "failed, kmem_zalloc\n"); 1287 return (ENOMEM); 1288 } 1289 1290 /* Quiesce I/O */ 1291 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 1292 CACHE_UNLOCK(ha); 1293 EL(ha, "ql_stall_driver failed\n"); 1294 kmem_free(vpd, vpd_size); 1295 return (EBUSY); 1296 } 1297 1298 rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA); 1299 if (rval != QL_SUCCESS) { 1300 CACHE_UNLOCK(ha); 1301 EL(ha, "failed, ql_lock_nvram=%xh\n", rval); 1302 kmem_free(vpd, vpd_size); 1303 ql_restart_driver(ha); 1304 return (EIO); 1305 } 1306 1307 /* Dump VPD. */ 1308 lptr = (uint32_t *)vpd; 1309 1310 for (cnt = 0; cnt < vpd_size / 4; cnt++) { 1311 rval = ql_24xx_read_flash(ha, start_addr++, lptr); 1312 if (rval != QL_SUCCESS) { 1313 EL(ha, "read_flash failed=%xh\n", rval); 1314 rval = EAGAIN; 1315 break; 1316 } 1317 LITTLE_ENDIAN_32(lptr); 1318 lptr++; 1319 } 1320 1321 ql_release_nvram(ha); 1322 ql_restart_driver(ha); 1323 1324 if (ddi_copyout(vpd, bp, vpd_size, mode) != 0) { 1325 CACHE_UNLOCK(ha); 1326 EL(ha, "Buffer copy failed\n"); 1327 kmem_free(vpd, vpd_size); 1328 return (EFAULT); 1329 } 1330 1331 ha->vcache = vpd; 1332 1333 CACHE_UNLOCK(ha); 1334 1335 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1336 1337 if (rval != QL_SUCCESS) { 1338 return (EFAULT); 1339 } else { 1340 return (0); 1341 } 1342 } 1343 1344 /* 1345 * ql_vpd_findtag 1346 * Search the passed vpd buffer for the requested VPD tag type. 1347 * 1348 * Input: 1349 * ha = adapter state pointer. 1350 * vpdbuf = Pointer to start of the buffer to search 1351 * op = VPD opcode to find (must be NULL terminated). 1352 * 1353 * Returns: 1354 * Pointer to the opcode in the buffer if opcode found. 1355 * NULL if opcode is not found. 1356 * 1357 * Context: 1358 * Kernel context. 1359 */ 1360 static uint8_t * 1361 ql_vpd_findtag(ql_adapter_state_t *ha, uint8_t *vpdbuf, int8_t *opcode) 1362 { 1363 uint8_t *vpd = vpdbuf; 1364 uint8_t *end = vpdbuf + QL_24XX_VPD_SIZE; 1365 uint32_t found = 0; 1366 1367 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1368 1369 if (vpdbuf == NULL || opcode == NULL) { 1370 EL(ha, "null parameter passed!\n"); 1371 return (NULL); 1372 } 1373 1374 while (vpd < end) { 1375 1376 /* check for end of vpd */ 1377 if (vpd[0] == VPD_TAG_END) { 1378 if (opcode[0] == VPD_TAG_END) { 1379 found = 1; 1380 } else { 1381 found = 0; 1382 } 1383 break; 1384 } 1385 1386 /* check opcode */ 1387 if (bcmp(opcode, vpd, strlen(opcode)) == 0) { 1388 /* found opcode requested */ 1389 found = 1; 1390 break; 1391 } 1392 1393 /* 1394 * Didn't find the opcode, so calculate start of 1395 * next tag. Depending on the current tag type, 1396 * the length field can be 1 or 2 bytes 1397 */ 1398 if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) { 1399 vpd += (vpd[2] << 8) + vpd[1] + 3; 1400 } else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) { 1401 vpd += 3; 1402 } else { 1403 vpd += vpd[2] +3; 1404 } 1405 } 1406 1407 QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance); 1408 1409 return (found == 1 ? vpd : NULL); 1410 } 1411 1412 /* 1413 * ql_vpd_lookup 1414 * Return the VPD data for the request VPD tag 1415 * 1416 * Input: 1417 * ha = adapter state pointer. 1418 * opcode = VPD opcode to find (must be NULL terminated). 1419 * bp = Pointer to returned data buffer. 1420 * bplen = Length of returned data buffer. 1421 * 1422 * Returns: 1423 * Length of data copied into returned data buffer. 1424 * >0 = VPD data field (NULL terminated) 1425 * 0 = no data. 1426 * -1 = Could not find opcode in vpd buffer / error. 1427 * 1428 * Context: 1429 * Kernel context. 1430 * 1431 * NB: The opcode buffer and the bp buffer *could* be the same buffer! 1432 * 1433 */ 1434 int32_t 1435 ql_vpd_lookup(ql_adapter_state_t *ha, uint8_t *opcode, uint8_t *bp, 1436 int32_t bplen) 1437 { 1438 uint8_t *vpd; 1439 uint8_t *vpdbuf; 1440 int32_t len = -1; 1441 1442 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1443 1444 if (opcode == NULL || bp == NULL || bplen < 1) { 1445 EL(ha, "invalid parameter passed: opcode=%ph, " 1446 "bp=%ph, bplen=%xh\n", opcode, bp, bplen); 1447 return (len); 1448 } 1449 1450 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 1451 return (len); 1452 } 1453 1454 if ((vpdbuf = (uint8_t *)kmem_zalloc(QL_24XX_VPD_SIZE, 1455 KM_SLEEP)) == NULL) { 1456 EL(ha, "unable to allocate vpd memory\n"); 1457 return (len); 1458 } 1459 1460 if ((ql_vpd_dump(ha, vpdbuf, (int)FKIOCTL)) != 0) { 1461 kmem_free(vpdbuf, QL_24XX_VPD_SIZE); 1462 EL(ha, "unable to retrieve VPD data\n"); 1463 return (len); 1464 } 1465 1466 if ((vpd = ql_vpd_findtag(ha, vpdbuf, (int8_t *)opcode)) != NULL) { 1467 /* 1468 * Found the tag 1469 */ 1470 if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT || 1471 *opcode == VPD_TAG_LRTC) { 1472 /* 1473 * we found it, but the tag doesn't have a data 1474 * field. 1475 */ 1476 len = 0; 1477 } else if (!(strncmp((char *)vpd, (char *) 1478 VPD_TAG_PRODID, 1))) { 1479 len = vpd[2] << 8; 1480 len += vpd[1]; 1481 } else { 1482 len = vpd[2]; 1483 } 1484 1485 /* 1486 * make sure that the vpd len doesn't exceed the 1487 * vpd end 1488 */ 1489 if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) { 1490 EL(ha, "vpd tag len (%xh) exceeds vpd buffer " 1491 "length\n", len); 1492 len = -1; 1493 } 1494 } 1495 1496 if (len >= 0) { 1497 /* 1498 * make sure we don't exceed callers buffer space len 1499 */ 1500 if (len > bplen) { 1501 len = bplen-1; 1502 } 1503 1504 /* copy the data back */ 1505 (void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len); 1506 bp[len] = NULL; 1507 } else { 1508 /* error -- couldn't find tag */ 1509 bp[0] = NULL; 1510 if (opcode[1] != NULL) { 1511 EL(ha, "unable to find tag '%s'\n", opcode); 1512 } else { 1513 EL(ha, "unable to find tag '%xh'\n", opcode[0]); 1514 } 1515 } 1516 1517 kmem_free(vpdbuf, QL_24XX_VPD_SIZE); 1518 1519 QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance); 1520 1521 return (len); 1522 } 1523 1524 static int 1525 ql_25xx_load_nv_vpd(ql_adapter_state_t *ha, uint8_t *buf, uint32_t faddr, 1526 uint32_t bufsize) 1527 { 1528 uint8_t *bp; 1529 int rval; 1530 uint32_t bsize = 0x10000; 1531 uint32_t saddr, ofst; 1532 1533 if ((bp = kmem_zalloc(bsize, KM_SLEEP)) == NULL) { 1534 EL(ha, "failed kmem_zalloc\n"); 1535 return (ENOMEM); 1536 } 1537 1538 saddr = ((faddr & 0xff000) << 2) & 0xffff0000; 1539 ofst = (faddr & 0xfff) << 2; 1540 1541 /* Dump Flash sector. */ 1542 if ((rval = ql_dump_fcode(ha, bp, bsize, saddr)) == QL_SUCCESS) { 1543 /* Set new data. */ 1544 bcopy(buf, bp + ofst, bufsize); 1545 1546 /* Write to flash. */ 1547 (void) ql_24xx_load_flash(ha, bp, bsize, saddr); 1548 } else { 1549 EL(ha, "failed dump_fcode=%x\n", rval); 1550 } 1551 1552 kmem_free(bp, bsize); 1553 1554 return (rval); 1555 } 1556 1557 /* 1558 * ql_adm_op 1559 * Performs qladm utility operations 1560 * 1561 * Input: 1562 * ha: adapter state pointer. 1563 * arg: driver_op_t structure pointer. 1564 * mode: flags. 1565 * 1566 * Returns: 1567 * 1568 * Context: 1569 * Kernel context. 1570 */ 1571 static int 1572 ql_adm_op(ql_adapter_state_t *ha, void *arg, int mode) 1573 { 1574 ql_adm_op_t dop; 1575 int rval = 0; 1576 1577 if (ddi_copyin(arg, &dop, sizeof (ql_adm_op_t), mode) != 0) { 1578 EL(ha, "failed, driver_op_t ddi_copyin\n"); 1579 return (EFAULT); 1580 } 1581 1582 QL_PRINT_9(CE_CONT, "(%d): entered, cmd=%xh, buffer=%llx," 1583 " length=%xh, option=%xh\n", ha->instance, dop.cmd, dop.buffer, 1584 dop.length, dop.option); 1585 1586 switch (dop.cmd) { 1587 case QL_ADAPTER_INFO: 1588 rval = ql_adm_adapter_info(ha, &dop, mode); 1589 break; 1590 1591 case QL_EXTENDED_LOGGING: 1592 rval = ql_adm_extended_logging(ha, &dop); 1593 break; 1594 1595 case QL_LOOP_RESET: 1596 rval = ql_adm_loop_reset(ha); 1597 break; 1598 1599 case QL_DEVICE_LIST: 1600 rval = ql_adm_device_list(ha, &dop, mode); 1601 break; 1602 1603 case QL_PROP_UPDATE_INT: 1604 rval = ql_adm_prop_update_int(ha, &dop, mode); 1605 break; 1606 1607 case QL_UPDATE_PROPERTIES: 1608 rval = ql_adm_update_properties(ha); 1609 break; 1610 1611 case QL_FW_DUMP: 1612 rval = ql_adm_fw_dump(ha, &dop, arg, mode); 1613 break; 1614 1615 case QL_NVRAM_LOAD: 1616 rval = ql_adm_nvram_load(ha, &dop, mode); 1617 break; 1618 1619 case QL_NVRAM_DUMP: 1620 rval = ql_adm_nvram_dump(ha, &dop, mode); 1621 break; 1622 1623 case QL_FLASH_LOAD: 1624 rval = ql_adm_flash_load(ha, &dop, mode); 1625 break; 1626 1627 case QL_VPD_LOAD: 1628 rval = ql_adm_vpd_load(ha, &dop, mode); 1629 break; 1630 1631 case QL_VPD_DUMP: 1632 rval = ql_adm_vpd_dump(ha, &dop, mode); 1633 break; 1634 1635 case QL_VPD_GETTAG: 1636 rval = ql_adm_vpd_gettag(ha, &dop, mode); 1637 break; 1638 1639 case QL_UPD_FWMODULE: 1640 rval = ql_adm_updfwmodule(ha, &dop, mode); 1641 break; 1642 1643 default: 1644 EL(ha, "unsupported driver op cmd: %x\n", dop.cmd); 1645 return (EINVAL); 1646 } 1647 1648 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1649 1650 return (rval); 1651 } 1652 1653 /* 1654 * ql_adm_adapter_info 1655 * Performs qladm QL_ADAPTER_INFO command 1656 * 1657 * Input: 1658 * ha: adapter state pointer. 1659 * dop: ql_adm_op_t structure pointer. 1660 * mode: flags. 1661 * 1662 * Returns: 1663 * 1664 * Context: 1665 * Kernel context. 1666 */ 1667 static int 1668 ql_adm_adapter_info(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 1669 { 1670 ql_adapter_info_t hba; 1671 uint8_t *dp; 1672 uint32_t length; 1673 int rval, i; 1674 1675 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1676 1677 hba.device_id = ha->device_id; 1678 1679 dp = CFG_IST(ha, CFG_CTRL_2425) ? 1680 &ha->init_ctrl_blk.cb24.port_name[0] : 1681 &ha->init_ctrl_blk.cb.port_name[0]; 1682 bcopy(dp, hba.wwpn, 8); 1683 1684 hba.d_id = ha->d_id.b24; 1685 1686 if (ha->xioctl->fdesc.flash_size == 0 && 1687 !(CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id)) { 1688 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 1689 EL(ha, "ql_stall_driver failed\n"); 1690 return (EBUSY); 1691 } 1692 1693 if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) { 1694 EL(ha, "ql_setup_flash failed=%xh\n", rval); 1695 if (rval == QL_FUNCTION_TIMEOUT) { 1696 return (EBUSY); 1697 } 1698 return (EIO); 1699 } 1700 1701 /* Resume I/O */ 1702 if (CFG_IST(ha, CFG_CTRL_2425)) { 1703 ql_restart_driver(ha); 1704 } else { 1705 EL(ha, "isp_abort_needed for restart\n"); 1706 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 1707 DRIVER_STALL); 1708 } 1709 } 1710 hba.flash_size = ha->xioctl->fdesc.flash_size; 1711 1712 (void) strcpy(hba.driver_ver, QL_VERSION); 1713 1714 (void) sprintf(hba.fw_ver, "%d.%d.%d", ha->fw_major_version, 1715 ha->fw_minor_version, ha->fw_subminor_version); 1716 1717 bzero(hba.fcode_ver, sizeof (hba.fcode_ver)); 1718 1719 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 1720 rval = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, 1721 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&dp, &i); 1722 length = i; 1723 if (rval != DDI_PROP_SUCCESS) { 1724 EL(ha, "failed, ddi_getlongprop=%xh\n", rval); 1725 } else { 1726 if (length > (uint32_t)sizeof (hba.fcode_ver)) { 1727 length = sizeof (hba.fcode_ver) - 1; 1728 } 1729 bcopy((void *)dp, (void *)hba.fcode_ver, length); 1730 kmem_free(dp, length); 1731 } 1732 1733 if (ddi_copyout((void *)&hba, (void *)(uintptr_t)dop->buffer, 1734 dop->length, mode) != 0) { 1735 EL(ha, "failed, ddi_copyout\n"); 1736 return (EFAULT); 1737 } 1738 1739 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1740 1741 return (0); 1742 } 1743 1744 /* 1745 * ql_adm_extended_logging 1746 * Performs qladm QL_EXTENDED_LOGGING command 1747 * 1748 * Input: 1749 * ha: adapter state pointer. 1750 * dop: ql_adm_op_t structure pointer. 1751 * 1752 * Returns: 1753 * 1754 * Context: 1755 * Kernel context. 1756 */ 1757 static int 1758 ql_adm_extended_logging(ql_adapter_state_t *ha, ql_adm_op_t *dop) 1759 { 1760 char prop_name[MAX_PROP_LENGTH]; 1761 int rval; 1762 1763 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1764 1765 (void) sprintf(prop_name, "hba%d-extended-logging", ha->instance); 1766 1767 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/ 1768 rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name, 1769 (int)dop->option); 1770 if (rval != DDI_PROP_SUCCESS) { 1771 EL(ha, "failed, prop_update = %xh\n", rval); 1772 return (EINVAL); 1773 } else { 1774 dop->option ? 1775 (ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING) : 1776 (ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING); 1777 } 1778 1779 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1780 1781 return (0); 1782 } 1783 1784 /* 1785 * ql_adm_loop_reset 1786 * Performs qladm QL_LOOP_RESET command 1787 * 1788 * Input: 1789 * ha: adapter state pointer. 1790 * 1791 * Returns: 1792 * 1793 * Context: 1794 * Kernel context. 1795 */ 1796 static int 1797 ql_adm_loop_reset(ql_adapter_state_t *ha) 1798 { 1799 int rval; 1800 1801 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1802 1803 if (ha->task_daemon_flags & LOOP_DOWN) { 1804 (void) ql_full_login_lip(ha); 1805 } else if ((rval = ql_full_login_lip(ha)) != QL_SUCCESS) { 1806 EL(ha, "failed, ql_initiate_lip=%xh\n", rval); 1807 return (EIO); 1808 } 1809 1810 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1811 1812 return (0); 1813 } 1814 1815 /* 1816 * ql_adm_device_list 1817 * Performs qladm QL_DEVICE_LIST command 1818 * 1819 * Input: 1820 * ha: adapter state pointer. 1821 * dop: ql_adm_op_t structure pointer. 1822 * mode: flags. 1823 * 1824 * Returns: 1825 * 1826 * Context: 1827 * Kernel context. 1828 */ 1829 static int 1830 ql_adm_device_list(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 1831 { 1832 ql_device_info_t dev; 1833 ql_link_t *link; 1834 ql_tgt_t *tq; 1835 uint32_t index, cnt; 1836 1837 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1838 1839 cnt = 0; 1840 dev.address = 0xffffffff; 1841 1842 /* Scan port list for requested target and fill in the values */ 1843 for (link = NULL, index = 0; 1844 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) { 1845 for (link = ha->dev[index].first; link != NULL; 1846 link = link->next) { 1847 tq = link->base_address; 1848 1849 if (!VALID_TARGET_ID(ha, tq->loop_id)) { 1850 continue; 1851 } 1852 if (cnt != dop->option) { 1853 cnt++; 1854 continue; 1855 } 1856 /* fill in the values */ 1857 bcopy(tq->port_name, dev.wwpn, 8); 1858 dev.address = tq->d_id.b24; 1859 dev.loop_id = tq->loop_id; 1860 if (tq->flags & TQF_TAPE_DEVICE) { 1861 dev.type = FCT_TAPE; 1862 } else if (tq->flags & TQF_INITIATOR_DEVICE) { 1863 dev.type = FCT_INITIATOR; 1864 } else { 1865 dev.type = FCT_TARGET; 1866 } 1867 break; 1868 } 1869 } 1870 1871 if (ddi_copyout((void *)&dev, (void *)(uintptr_t)dop->buffer, 1872 dop->length, mode) != 0) { 1873 EL(ha, "failed, ddi_copyout\n"); 1874 return (EFAULT); 1875 } 1876 1877 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1878 1879 return (0); 1880 } 1881 1882 /* 1883 * ql_adm_update_properties 1884 * Performs qladm QL_UPDATE_PROPERTIES command 1885 * 1886 * Input: 1887 * ha: adapter state pointer. 1888 * 1889 * Returns: 1890 * 1891 * Context: 1892 * Kernel context. 1893 */ 1894 static int 1895 ql_adm_update_properties(ql_adapter_state_t *ha) 1896 { 1897 ql_comb_init_cb_t init_ctrl_blk; 1898 ql_comb_ip_init_cb_t ip_init_ctrl_blk; 1899 1900 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1901 1902 /* Stall driver instance. */ 1903 (void) ql_stall_driver(ha, 0); 1904 1905 /* Save init control blocks. */ 1906 bcopy(&ha->init_ctrl_blk, &init_ctrl_blk, sizeof (ql_comb_init_cb_t)); 1907 bcopy(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk, 1908 sizeof (ql_comb_ip_init_cb_t)); 1909 1910 /* Update PCI configration. */ 1911 (void) ql_pci_sbus_config(ha); 1912 1913 /* Get configuration properties. */ 1914 (void) ql_nvram_config(ha); 1915 1916 /* Check for init firmware required. */ 1917 if (bcmp(&ha->init_ctrl_blk, &init_ctrl_blk, 1918 sizeof (ql_comb_init_cb_t)) != 0 || 1919 bcmp(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk, 1920 sizeof (ql_comb_ip_init_cb_t)) != 0) { 1921 1922 EL(ha, "isp_abort_needed\n"); 1923 ha->loop_down_timer = LOOP_DOWN_TIMER_START; 1924 TASK_DAEMON_LOCK(ha); 1925 ha->task_daemon_flags |= LOOP_DOWN | ISP_ABORT_NEEDED; 1926 TASK_DAEMON_UNLOCK(ha); 1927 } 1928 1929 /* Update AEN queue. */ 1930 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) { 1931 ql_enqueue_aen(ha, MBA_PORT_UPDATE, NULL); 1932 } 1933 1934 /* Restart driver instance. */ 1935 ql_restart_driver(ha); 1936 1937 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1938 1939 return (0); 1940 } 1941 1942 /* 1943 * ql_adm_prop_update_int 1944 * Performs qladm QL_PROP_UPDATE_INT command 1945 * 1946 * Input: 1947 * ha: adapter state pointer. 1948 * dop: ql_adm_op_t structure pointer. 1949 * mode: flags. 1950 * 1951 * Returns: 1952 * 1953 * Context: 1954 * Kernel context. 1955 */ 1956 static int 1957 ql_adm_prop_update_int(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 1958 { 1959 char *prop_name; 1960 int rval; 1961 1962 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1963 1964 prop_name = kmem_zalloc(dop->length, KM_SLEEP); 1965 if (prop_name == NULL) { 1966 EL(ha, "failed, kmem_zalloc\n"); 1967 return (ENOMEM); 1968 } 1969 1970 if (ddi_copyin((void *)(uintptr_t)dop->buffer, prop_name, dop->length, 1971 mode) != 0) { 1972 EL(ha, "failed, prop_name ddi_copyin\n"); 1973 kmem_free(prop_name, dop->length); 1974 return (EFAULT); 1975 } 1976 1977 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 1978 if ((rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name, 1979 (int)dop->option)) != DDI_PROP_SUCCESS) { 1980 EL(ha, "failed, prop_update=%xh\n", rval); 1981 kmem_free(prop_name, dop->length); 1982 return (EINVAL); 1983 } 1984 1985 kmem_free(prop_name, dop->length); 1986 1987 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1988 1989 return (0); 1990 } 1991 1992 /* 1993 * ql_adm_fw_dump 1994 * Performs qladm QL_FW_DUMP command 1995 * 1996 * Input: 1997 * ha: adapter state pointer. 1998 * dop: ql_adm_op_t structure pointer. 1999 * udop: user space ql_adm_op_t structure pointer. 2000 * mode: flags. 2001 * 2002 * Returns: 2003 * 2004 * Context: 2005 * Kernel context. 2006 */ 2007 static int 2008 ql_adm_fw_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, void *udop, int mode) 2009 { 2010 caddr_t dmp; 2011 2012 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2013 2014 if (dop->length < ha->risc_dump_size) { 2015 EL(ha, "failed, incorrect length=%xh, size=%xh\n", 2016 dop->length, ha->risc_dump_size); 2017 return (EINVAL); 2018 } 2019 2020 if (ha->ql_dump_state & QL_DUMP_VALID) { 2021 dmp = kmem_zalloc(ha->risc_dump_size, KM_SLEEP); 2022 if (dmp == NULL) { 2023 EL(ha, "failed, kmem_zalloc\n"); 2024 return (ENOMEM); 2025 } 2026 2027 dop->length = (uint32_t)ql_ascii_fw_dump(ha, dmp); 2028 if (ddi_copyout((void *)dmp, (void *)(uintptr_t)dop->buffer, 2029 dop->length, mode) != 0) { 2030 EL(ha, "failed, ddi_copyout\n"); 2031 kmem_free(dmp, ha->risc_dump_size); 2032 return (EFAULT); 2033 } 2034 2035 kmem_free(dmp, ha->risc_dump_size); 2036 ha->ql_dump_state |= QL_DUMP_UPLOADED; 2037 2038 } else { 2039 EL(ha, "failed, no dump file\n"); 2040 dop->length = 0; 2041 } 2042 2043 if (ddi_copyout(dop, udop, sizeof (ql_adm_op_t), mode) != 0) { 2044 EL(ha, "failed, driver_op_t ddi_copyout\n"); 2045 return (EFAULT); 2046 } 2047 2048 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2049 2050 return (0); 2051 } 2052 2053 /* 2054 * ql_adm_nvram_dump 2055 * Performs qladm QL_NVRAM_DUMP command 2056 * 2057 * Input: 2058 * ha: adapter state pointer. 2059 * dop: ql_adm_op_t structure pointer. 2060 * mode: flags. 2061 * 2062 * Returns: 2063 * 2064 * Context: 2065 * Kernel context. 2066 */ 2067 static int 2068 ql_adm_nvram_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2069 { 2070 uint32_t nv_size; 2071 int rval; 2072 2073 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2074 2075 nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ? 2076 sizeof (nvram_24xx_t) : sizeof (nvram_t)); 2077 2078 if (dop->length < nv_size) { 2079 EL(ha, "failed, length=%xh, size=%xh\n", dop->length, nv_size); 2080 return (EINVAL); 2081 } 2082 2083 if ((rval = ql_nv_util_dump(ha, (void *)(uintptr_t)dop->buffer, 2084 mode)) != 0) { 2085 EL(ha, "failed, ql_nv_util_dump\n"); 2086 } else { 2087 /*EMPTY*/ 2088 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2089 } 2090 2091 return (rval); 2092 } 2093 2094 /* 2095 * ql_adm_nvram_load 2096 * Performs qladm QL_NVRAM_LOAD command 2097 * 2098 * Input: 2099 * ha: adapter state pointer. 2100 * dop: ql_adm_op_t structure pointer. 2101 * mode: flags. 2102 * 2103 * Returns: 2104 * 2105 * Context: 2106 * Kernel context. 2107 */ 2108 static int 2109 ql_adm_nvram_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2110 { 2111 uint32_t nv_size; 2112 int rval; 2113 2114 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2115 2116 nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_2425) ? 2117 sizeof (nvram_24xx_t) : sizeof (nvram_t)); 2118 2119 if (dop->length < nv_size) { 2120 EL(ha, "failed, length=%xh, size=%xh\n", dop->length, nv_size); 2121 return (EINVAL); 2122 } 2123 2124 if ((rval = ql_nv_util_load(ha, (void *)(uintptr_t)dop->buffer, 2125 mode)) != 0) { 2126 EL(ha, "failed, ql_nv_util_dump\n"); 2127 } else { 2128 /*EMPTY*/ 2129 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2130 } 2131 2132 return (rval); 2133 } 2134 2135 /* 2136 * ql_adm_flash_load 2137 * Performs qladm QL_FLASH_LOAD command 2138 * 2139 * Input: 2140 * ha: adapter state pointer. 2141 * dop: ql_adm_op_t structure pointer. 2142 * mode: flags. 2143 * 2144 * Returns: 2145 * 2146 * Context: 2147 * Kernel context. 2148 */ 2149 static int 2150 ql_adm_flash_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2151 { 2152 uint8_t *dp; 2153 int rval; 2154 2155 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2156 2157 if ((dp = kmem_zalloc(dop->length, KM_SLEEP)) == NULL) { 2158 EL(ha, "failed, kmem_zalloc\n"); 2159 return (ENOMEM); 2160 } 2161 2162 if (ddi_copyin((void *)(uintptr_t)dop->buffer, dp, dop->length, 2163 mode) != 0) { 2164 EL(ha, "ddi_copyin failed\n"); 2165 kmem_free(dp, dop->length); 2166 return (EFAULT); 2167 } 2168 2169 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 2170 EL(ha, "ql_stall_driver failed\n"); 2171 kmem_free(dp, dop->length); 2172 return (EBUSY); 2173 } 2174 2175 rval = (CFG_IST(ha, CFG_CTRL_2425) ? 2176 ql_24xx_load_flash(ha, dp, dop->length, dop->option) : 2177 ql_load_flash(ha, dp, dop->length)); 2178 2179 ql_restart_driver(ha); 2180 2181 kmem_free(dp, dop->length); 2182 2183 if (rval != QL_SUCCESS) { 2184 EL(ha, "failed\n"); 2185 return (EIO); 2186 } 2187 2188 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2189 2190 return (0); 2191 } 2192 2193 /* 2194 * ql_adm_vpd_dump 2195 * Performs qladm QL_VPD_DUMP command 2196 * 2197 * Input: 2198 * ha: adapter state pointer. 2199 * dop: ql_adm_op_t structure pointer. 2200 * mode: flags. 2201 * 2202 * Returns: 2203 * 2204 * Context: 2205 * Kernel context. 2206 */ 2207 static int 2208 ql_adm_vpd_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2209 { 2210 int rval; 2211 2212 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2213 2214 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 2215 EL(ha, "hba does not support VPD\n"); 2216 return (EINVAL); 2217 } 2218 2219 if (dop->length < QL_24XX_VPD_SIZE) { 2220 EL(ha, "failed, length=%xh, size=%xh\n", dop->length, 2221 QL_24XX_VPD_SIZE); 2222 return (EINVAL); 2223 } 2224 2225 if ((rval = ql_vpd_dump(ha, (void *)(uintptr_t)dop->buffer, mode)) 2226 != 0) { 2227 EL(ha, "failed, ql_vpd_dump\n"); 2228 } else { 2229 /*EMPTY*/ 2230 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2231 } 2232 2233 return (rval); 2234 } 2235 2236 /* 2237 * ql_adm_vpd_load 2238 * Performs qladm QL_VPD_LOAD command 2239 * 2240 * Input: 2241 * ha: adapter state pointer. 2242 * dop: ql_adm_op_t structure pointer. 2243 * mode: flags. 2244 * 2245 * Returns: 2246 * 2247 * Context: 2248 * Kernel context. 2249 */ 2250 static int 2251 ql_adm_vpd_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2252 { 2253 int rval; 2254 2255 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2256 2257 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 2258 EL(ha, "hba does not support VPD\n"); 2259 return (EINVAL); 2260 } 2261 2262 if (dop->length < QL_24XX_VPD_SIZE) { 2263 EL(ha, "failed, length=%xh, size=%xh\n", dop->length, 2264 QL_24XX_VPD_SIZE); 2265 return (EINVAL); 2266 } 2267 2268 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)dop->buffer, mode)) 2269 != 0) { 2270 EL(ha, "failed, ql_vpd_dump\n"); 2271 } else { 2272 /*EMPTY*/ 2273 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2274 } 2275 2276 return (rval); 2277 } 2278 2279 /* 2280 * ql_adm_vpd_gettag 2281 * Performs qladm QL_VPD_GETTAG command 2282 * 2283 * Input: 2284 * ha: adapter state pointer. 2285 * dop: ql_adm_op_t structure pointer. 2286 * mode: flags. 2287 * 2288 * Returns: 2289 * 2290 * Context: 2291 * Kernel context. 2292 */ 2293 static int 2294 ql_adm_vpd_gettag(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2295 { 2296 int rval = 0; 2297 uint8_t *lbuf; 2298 2299 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2300 2301 if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) { 2302 EL(ha, "hba does not support VPD\n"); 2303 return (EINVAL); 2304 } 2305 2306 if ((lbuf = (uint8_t *)kmem_zalloc(dop->length, KM_SLEEP)) == NULL) { 2307 EL(ha, "mem alloc failure of %xh bytes\n", dop->length); 2308 rval = EFAULT; 2309 } else { 2310 if (ddi_copyin((void *)(uintptr_t)dop->buffer, lbuf, 2311 dop->length, mode) != 0) { 2312 EL(ha, "ddi_copyin failed\n"); 2313 kmem_free(lbuf, dop->length); 2314 return (EFAULT); 2315 } 2316 2317 if ((rval = ql_vpd_lookup(ha, lbuf, lbuf, (int32_t) 2318 dop->length)) < 0) { 2319 EL(ha, "failed vpd_lookup\n"); 2320 } else { 2321 if (ddi_copyout(lbuf, (void *)(uintptr_t)dop->buffer, 2322 strlen((int8_t *)lbuf)+1, mode) != 0) { 2323 EL(ha, "failed, ddi_copyout\n"); 2324 rval = EFAULT; 2325 } else { 2326 rval = 0; 2327 } 2328 } 2329 kmem_free(lbuf, dop->length); 2330 } 2331 2332 QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance); 2333 2334 return (rval); 2335 } 2336 2337 /* 2338 * ql_adm_updfwmodule 2339 * Performs qladm QL_UPD_FWMODULE command 2340 * 2341 * Input: 2342 * ha: adapter state pointer. 2343 * dop: ql_adm_op_t structure pointer. 2344 * mode: flags. 2345 * 2346 * Returns: 2347 * 2348 * Context: 2349 * Kernel context. 2350 */ 2351 /* ARGSUSED */ 2352 static int 2353 ql_adm_updfwmodule(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode) 2354 { 2355 int rval = DDI_SUCCESS; 2356 ql_link_t *link; 2357 ql_adapter_state_t *ha2 = NULL; 2358 uint16_t fw_class = (uint16_t)dop->option; 2359 2360 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2361 2362 /* zero the firmware module reference count */ 2363 for (link = ql_hba.first; link != NULL; link = link->next) { 2364 ha2 = link->base_address; 2365 if (fw_class == ha2->fw_class) { 2366 if ((rval = ddi_modclose(ha2->fw_module)) != 2367 DDI_SUCCESS) { 2368 EL(ha2, "modclose rval=%xh\n", rval); 2369 break; 2370 } 2371 ha2->fw_module = NULL; 2372 } 2373 } 2374 2375 /* reload the f/w modules */ 2376 for (link = ql_hba.first; link != NULL; link = link->next) { 2377 ha2 = link->base_address; 2378 2379 if ((fw_class == ha2->fw_class) && (ha2->fw_class == NULL)) { 2380 if ((rval = (int32_t)ql_fwmodule_resolve(ha2)) != 2381 QL_SUCCESS) { 2382 EL(ha2, "unable to load f/w module: '%x' " 2383 "(rval=%xh)\n", ha2->fw_class, rval); 2384 rval = EFAULT; 2385 } else { 2386 EL(ha2, "f/w module updated: '%x'\n", 2387 ha2->fw_class); 2388 } 2389 2390 EL(ha2, "isp abort needed (%d)\n", ha->instance); 2391 2392 ql_awaken_task_daemon(ha2, NULL, ISP_ABORT_NEEDED, 0); 2393 2394 rval = 0; 2395 } 2396 } 2397 2398 QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance); 2399 2400 return (rval); 2401 } 2402