1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2010 QLogic Corporation */ 23 24 /* 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 26 */ 27 28 /* 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 */ 31 32 /* 33 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 34 * 35 * *********************************************************************** 36 * * ** 37 * * NOTICE ** 38 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION ** 39 * * ALL RIGHTS RESERVED ** 40 * * ** 41 * *********************************************************************** 42 * 43 */ 44 45 #include <ql_apps.h> 46 #include <ql_api.h> 47 #include <ql_debug.h> 48 #include <ql_init.h> 49 #include <ql_iocb.h> 50 #include <ql_ioctl.h> 51 #include <ql_mbx.h> 52 #include <ql_xioctl.h> 53 54 /* 55 * Local data 56 */ 57 58 /* 59 * Local prototypes 60 */ 61 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int); 62 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int, 63 boolean_t (*)(EXT_IOCTL *)); 64 static boolean_t ql_validate_signature(EXT_IOCTL *); 65 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int); 66 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int); 67 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int); 68 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int); 69 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int); 70 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int); 71 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int); 72 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int); 73 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int); 74 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int); 75 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int); 76 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int); 77 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int); 78 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int); 79 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int); 80 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int); 81 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int); 82 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int); 83 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int); 84 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int); 85 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int); 86 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int); 87 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int); 88 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int); 89 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int); 90 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int); 91 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int); 92 93 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *); 94 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *); 95 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int); 96 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *, 97 uint8_t); 98 static uint32_t ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int); 99 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int); 100 static int ql_24xx_flash_desc(ql_adapter_state_t *); 101 static int ql_setup_flash(ql_adapter_state_t *); 102 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t); 103 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int); 104 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t, 105 uint32_t, int); 106 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, 107 uint8_t); 108 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int); 109 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int); 110 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *); 111 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int); 112 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int); 113 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int); 114 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int); 115 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int); 116 static void ql_drive_led(ql_adapter_state_t *, uint32_t); 117 static uint32_t ql_setup_led(ql_adapter_state_t *); 118 static uint32_t ql_wrapup_led(ql_adapter_state_t *); 119 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int); 120 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int); 121 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int); 122 static int ql_dump_sfp(ql_adapter_state_t *, void *, int); 123 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *); 124 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int); 125 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int); 126 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t); 127 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *); 128 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t); 129 static void ql_process_flt(ql_adapter_state_t *, uint32_t); 130 static void ql_flash_nvram_defaults(ql_adapter_state_t *); 131 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int); 132 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *); 133 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int); 134 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int); 135 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int); 136 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int); 137 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int); 138 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int); 139 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int); 140 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t); 141 static void ql_restart_hba(ql_adapter_state_t *); 142 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int); 143 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int); 144 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int); 145 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int); 146 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *); 147 static void ql_update_flash_caches(ql_adapter_state_t *); 148 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int); 149 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int); 150 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int); 151 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int); 152 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int); 153 static int ql_set_loop_point(ql_adapter_state_t *, uint16_t); 154 155 /* ******************************************************************** */ 156 /* External IOCTL support. */ 157 /* ******************************************************************** */ 158 159 /* 160 * ql_alloc_xioctl_resource 161 * Allocates resources needed by module code. 162 * 163 * Input: 164 * ha: adapter state pointer. 165 * 166 * Returns: 167 * SYS_ERRNO 168 * 169 * Context: 170 * Kernel context. 171 */ 172 int 173 ql_alloc_xioctl_resource(ql_adapter_state_t *ha) 174 { 175 ql_xioctl_t *xp; 176 177 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 178 179 if (ha->xioctl != NULL) { 180 QL_PRINT_9(CE_CONT, "(%d): already allocated done\n", 181 ha->instance); 182 return (0); 183 } 184 185 xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP); 186 if (xp == NULL) { 187 EL(ha, "failed, kmem_zalloc\n"); 188 return (ENOMEM); 189 } 190 ha->xioctl = xp; 191 192 /* Allocate AEN tracking buffer */ 193 xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE * 194 sizeof (EXT_ASYNC_EVENT), KM_SLEEP); 195 if (xp->aen_tracking_queue == NULL) { 196 EL(ha, "failed, kmem_zalloc-2\n"); 197 ql_free_xioctl_resource(ha); 198 return (ENOMEM); 199 } 200 201 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 202 203 return (0); 204 } 205 206 /* 207 * ql_free_xioctl_resource 208 * Frees resources used by module code. 209 * 210 * Input: 211 * ha: adapter state pointer. 212 * 213 * Context: 214 * Kernel context. 215 */ 216 void 217 ql_free_xioctl_resource(ql_adapter_state_t *ha) 218 { 219 ql_xioctl_t *xp = ha->xioctl; 220 221 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 222 223 if (xp == NULL) { 224 QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance); 225 return; 226 } 227 228 if (xp->aen_tracking_queue != NULL) { 229 kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE * 230 sizeof (EXT_ASYNC_EVENT)); 231 xp->aen_tracking_queue = NULL; 232 } 233 234 kmem_free(xp, sizeof (ql_xioctl_t)); 235 ha->xioctl = NULL; 236 237 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 238 } 239 240 /* 241 * ql_xioctl 242 * External IOCTL processing. 243 * 244 * Input: 245 * ha: adapter state pointer. 246 * cmd: function to perform 247 * arg: data type varies with request 248 * mode: flags 249 * cred_p: credentials pointer 250 * rval_p: pointer to result value 251 * 252 * Returns: 253 * 0: success 254 * ENXIO: No such device or address 255 * ENOPROTOOPT: Protocol not available 256 * 257 * Context: 258 * Kernel context. 259 */ 260 /* ARGSUSED */ 261 int 262 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode, 263 cred_t *cred_p, int *rval_p) 264 { 265 int rval; 266 267 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd); 268 269 if (ha->xioctl == NULL) { 270 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance); 271 return (ENXIO); 272 } 273 274 switch (cmd) { 275 case EXT_CC_QUERY: 276 case EXT_CC_SEND_FCCT_PASSTHRU: 277 case EXT_CC_REG_AEN: 278 case EXT_CC_GET_AEN: 279 case EXT_CC_SEND_SCSI_PASSTHRU: 280 case EXT_CC_WWPN_TO_SCSIADDR: 281 case EXT_CC_SEND_ELS_RNID: 282 case EXT_CC_SET_DATA: 283 case EXT_CC_GET_DATA: 284 case EXT_CC_HOST_IDX: 285 case EXT_CC_READ_NVRAM: 286 case EXT_CC_UPDATE_NVRAM: 287 case EXT_CC_READ_OPTION_ROM: 288 case EXT_CC_READ_OPTION_ROM_EX: 289 case EXT_CC_UPDATE_OPTION_ROM: 290 case EXT_CC_UPDATE_OPTION_ROM_EX: 291 case EXT_CC_GET_VPD: 292 case EXT_CC_SET_VPD: 293 case EXT_CC_LOOPBACK: 294 case EXT_CC_GET_FCACHE: 295 case EXT_CC_GET_FCACHE_EX: 296 case EXT_CC_HOST_DRVNAME: 297 case EXT_CC_GET_SFP_DATA: 298 case EXT_CC_PORT_PARAM: 299 case EXT_CC_GET_PCI_DATA: 300 case EXT_CC_GET_FWEXTTRACE: 301 case EXT_CC_GET_FWFCETRACE: 302 case EXT_CC_GET_VP_CNT_ID: 303 case EXT_CC_VPORT_CMD: 304 case EXT_CC_ACCESS_FLASH: 305 case EXT_CC_RESET_FW: 306 case EXT_CC_MENLO_MANAGE_INFO: 307 rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode); 308 break; 309 default: 310 /* function not supported. */ 311 EL(ha, "function=%d not supported\n", cmd); 312 rval = ENOPROTOOPT; 313 } 314 315 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 316 317 return (rval); 318 } 319 320 /* 321 * ql_sdm_ioctl 322 * Provides ioctl functions for SAN/Device Management functions 323 * AKA External Ioctl functions. 324 * 325 * Input: 326 * ha: adapter state pointer. 327 * ioctl_code: ioctl function to perform 328 * arg: Pointer to EXT_IOCTL cmd data in application land. 329 * mode: flags 330 * 331 * Returns: 332 * 0: success 333 * ENOMEM: Alloc of local EXT_IOCTL struct failed. 334 * EFAULT: Copyin of caller's EXT_IOCTL struct failed or 335 * copyout of EXT_IOCTL status info failed. 336 * EINVAL: Signature or version of caller's EXT_IOCTL invalid. 337 * EBUSY: Device busy 338 * 339 * Context: 340 * Kernel context. 341 */ 342 static int 343 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode) 344 { 345 EXT_IOCTL *cmd; 346 int rval; 347 ql_adapter_state_t *vha; 348 349 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 350 351 /* Copy argument structure (EXT_IOCTL) from application land. */ 352 if ((rval = ql_sdm_setup(ha, &cmd, arg, mode, 353 ql_validate_signature)) != 0) { 354 /* 355 * a non-zero value at this time means a problem getting 356 * the requested information from application land, just 357 * return the error code and hope for the best. 358 */ 359 EL(ha, "failed, sdm_setup\n"); 360 return (rval); 361 } 362 363 /* 364 * Map the physical ha ptr (which the ioctl is called with) 365 * to the virtual ha that the caller is addressing. 366 */ 367 if (ha->flags & VP_ENABLED) { 368 /* Check that it is within range. */ 369 if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ? 370 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) { 371 EL(ha, "Invalid HbaSelect vp index: %xh\n", 372 cmd->HbaSelect); 373 cmd->Status = EXT_STATUS_INVALID_VPINDEX; 374 cmd->ResponseLen = 0; 375 return (EFAULT); 376 } 377 /* 378 * Special case: HbaSelect == 0 is physical ha 379 */ 380 if (cmd->HbaSelect != 0) { 381 vha = ha->vp_next; 382 while (vha != NULL) { 383 if (vha->vp_index == cmd->HbaSelect) { 384 ha = vha; 385 break; 386 } 387 vha = vha->vp_next; 388 } 389 /* 390 * The specified vp index may be valid(within range) 391 * but it's not in the list. Currently this is all 392 * we can say. 393 */ 394 if (vha == NULL) { 395 cmd->Status = EXT_STATUS_INVALID_VPINDEX; 396 cmd->ResponseLen = 0; 397 return (EFAULT); 398 } 399 } 400 } 401 402 /* 403 * If driver is suspended, stalled, or powered down rtn BUSY 404 */ 405 if (ha->flags & ADAPTER_SUSPENDED || 406 ha->task_daemon_flags & DRIVER_STALL || 407 ha->power_level != PM_LEVEL_D0) { 408 EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ? 409 "driver suspended" : 410 (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" : 411 "FCA powered down")); 412 cmd->Status = EXT_STATUS_BUSY; 413 cmd->ResponseLen = 0; 414 rval = EBUSY; 415 416 /* Return results to caller */ 417 if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) { 418 EL(ha, "failed, sdm_return\n"); 419 rval = EFAULT; 420 } 421 return (rval); 422 } 423 424 switch (ioctl_code) { 425 case EXT_CC_QUERY_OS: 426 ql_query(ha, cmd, mode); 427 break; 428 case EXT_CC_SEND_FCCT_PASSTHRU_OS: 429 ql_fcct(ha, cmd, mode); 430 break; 431 case EXT_CC_REG_AEN_OS: 432 ql_aen_reg(ha, cmd, mode); 433 break; 434 case EXT_CC_GET_AEN_OS: 435 ql_aen_get(ha, cmd, mode); 436 break; 437 case EXT_CC_GET_DATA_OS: 438 ql_get_host_data(ha, cmd, mode); 439 break; 440 case EXT_CC_SET_DATA_OS: 441 ql_set_host_data(ha, cmd, mode); 442 break; 443 case EXT_CC_SEND_ELS_RNID_OS: 444 ql_send_els_rnid(ha, cmd, mode); 445 break; 446 case EXT_CC_SCSI_PASSTHRU_OS: 447 ql_scsi_passthru(ha, cmd, mode); 448 break; 449 case EXT_CC_WWPN_TO_SCSIADDR_OS: 450 ql_wwpn_to_scsiaddr(ha, cmd, mode); 451 break; 452 case EXT_CC_HOST_IDX_OS: 453 ql_host_idx(ha, cmd, mode); 454 break; 455 case EXT_CC_HOST_DRVNAME_OS: 456 ql_host_drvname(ha, cmd, mode); 457 break; 458 case EXT_CC_READ_NVRAM_OS: 459 ql_read_nvram(ha, cmd, mode); 460 break; 461 case EXT_CC_UPDATE_NVRAM_OS: 462 ql_write_nvram(ha, cmd, mode); 463 break; 464 case EXT_CC_READ_OPTION_ROM_OS: 465 case EXT_CC_READ_OPTION_ROM_EX_OS: 466 ql_read_flash(ha, cmd, mode); 467 break; 468 case EXT_CC_UPDATE_OPTION_ROM_OS: 469 case EXT_CC_UPDATE_OPTION_ROM_EX_OS: 470 ql_write_flash(ha, cmd, mode); 471 break; 472 case EXT_CC_LOOPBACK_OS: 473 ql_diagnostic_loopback(ha, cmd, mode); 474 break; 475 case EXT_CC_GET_VPD_OS: 476 ql_read_vpd(ha, cmd, mode); 477 break; 478 case EXT_CC_SET_VPD_OS: 479 ql_write_vpd(ha, cmd, mode); 480 break; 481 case EXT_CC_GET_FCACHE_OS: 482 ql_get_fcache(ha, cmd, mode); 483 break; 484 case EXT_CC_GET_FCACHE_EX_OS: 485 ql_get_fcache_ex(ha, cmd, mode); 486 break; 487 case EXT_CC_GET_SFP_DATA_OS: 488 ql_get_sfp(ha, cmd, mode); 489 break; 490 case EXT_CC_PORT_PARAM_OS: 491 ql_port_param(ha, cmd, mode); 492 break; 493 case EXT_CC_GET_PCI_DATA_OS: 494 ql_get_pci_data(ha, cmd, mode); 495 break; 496 case EXT_CC_GET_FWEXTTRACE_OS: 497 ql_get_fwexttrace(ha, cmd, mode); 498 break; 499 case EXT_CC_GET_FWFCETRACE_OS: 500 ql_get_fwfcetrace(ha, cmd, mode); 501 break; 502 case EXT_CC_MENLO_RESET: 503 ql_menlo_reset(ha, cmd, mode); 504 break; 505 case EXT_CC_MENLO_GET_FW_VERSION: 506 ql_menlo_get_fw_version(ha, cmd, mode); 507 break; 508 case EXT_CC_MENLO_UPDATE_FW: 509 ql_menlo_update_fw(ha, cmd, mode); 510 break; 511 case EXT_CC_MENLO_MANAGE_INFO: 512 ql_menlo_manage_info(ha, cmd, mode); 513 break; 514 case EXT_CC_GET_VP_CNT_ID_OS: 515 ql_get_vp_cnt_id(ha, cmd, mode); 516 break; 517 case EXT_CC_VPORT_CMD_OS: 518 ql_vp_ioctl(ha, cmd, mode); 519 break; 520 case EXT_CC_ACCESS_FLASH_OS: 521 ql_access_flash(ha, cmd, mode); 522 break; 523 case EXT_CC_RESET_FW_OS: 524 ql_reset_cmd(ha, cmd); 525 break; 526 default: 527 /* function not supported. */ 528 EL(ha, "failed, function not supported=%d\n", ioctl_code); 529 530 cmd->Status = EXT_STATUS_INVALID_REQUEST; 531 cmd->ResponseLen = 0; 532 break; 533 } 534 535 /* Return results to caller */ 536 if (ql_sdm_return(ha, cmd, arg, mode) == -1) { 537 EL(ha, "failed, sdm_return\n"); 538 return (EFAULT); 539 } 540 541 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 542 543 return (0); 544 } 545 546 /* 547 * ql_sdm_setup 548 * Make a local copy of the EXT_IOCTL struct and validate it. 549 * 550 * Input: 551 * ha: adapter state pointer. 552 * cmd_struct: Pointer to location to store local adrs of EXT_IOCTL. 553 * arg: Address of application EXT_IOCTL cmd data 554 * mode: flags 555 * val_sig: Pointer to a function to validate the ioctl signature. 556 * 557 * Returns: 558 * 0: success 559 * EFAULT: Copy in error of application EXT_IOCTL struct. 560 * EINVAL: Invalid version, signature. 561 * ENOMEM: Local allocation of EXT_IOCTL failed. 562 * 563 * Context: 564 * Kernel context. 565 */ 566 static int 567 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg, 568 int mode, boolean_t (*val_sig)(EXT_IOCTL *)) 569 { 570 int rval; 571 EXT_IOCTL *cmd; 572 573 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 574 575 /* Allocate local memory for EXT_IOCTL. */ 576 *cmd_struct = NULL; 577 cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP); 578 if (cmd == NULL) { 579 EL(ha, "failed, kmem_zalloc\n"); 580 return (ENOMEM); 581 } 582 /* Get argument structure. */ 583 rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode); 584 if (rval != 0) { 585 EL(ha, "failed, ddi_copyin\n"); 586 rval = EFAULT; 587 } else { 588 /* 589 * Check signature and the version. 590 * If either are not valid then neither is the 591 * structure so don't attempt to return any error status 592 * because we can't trust what caller's arg points to. 593 * Just return the errno. 594 */ 595 if (val_sig(cmd) == 0) { 596 EL(ha, "failed, signature\n"); 597 rval = EINVAL; 598 } else if (cmd->Version > EXT_VERSION) { 599 EL(ha, "failed, version\n"); 600 rval = EINVAL; 601 } 602 } 603 604 if (rval == 0) { 605 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 606 *cmd_struct = cmd; 607 cmd->Status = EXT_STATUS_OK; 608 cmd->DetailStatus = 0; 609 } else { 610 kmem_free((void *)cmd, sizeof (EXT_IOCTL)); 611 } 612 613 return (rval); 614 } 615 616 /* 617 * ql_validate_signature 618 * Validate the signature string for an external ioctl call. 619 * 620 * Input: 621 * sg: Pointer to EXT_IOCTL signature to validate. 622 * 623 * Returns: 624 * B_TRUE: Signature is valid. 625 * B_FALSE: Signature is NOT valid. 626 * 627 * Context: 628 * Kernel context. 629 */ 630 static boolean_t 631 ql_validate_signature(EXT_IOCTL *cmd_struct) 632 { 633 /* 634 * Check signature. 635 * 636 * If signature is not valid then neither is the rest of 637 * the structure (e.g., can't trust it), so don't attempt 638 * to return any error status other than the errno. 639 */ 640 if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) { 641 QL_PRINT_2(CE_CONT, "failed,\n"); 642 return (B_FALSE); 643 } 644 645 return (B_TRUE); 646 } 647 648 /* 649 * ql_sdm_return 650 * Copies return data/status to application land for 651 * ioctl call using the SAN/Device Management EXT_IOCTL call interface. 652 * 653 * Input: 654 * ha: adapter state pointer. 655 * cmd: Pointer to kernel copy of requestor's EXT_IOCTL struct. 656 * ioctl_code: ioctl function to perform 657 * arg: EXT_IOCTL cmd data in application land. 658 * mode: flags 659 * 660 * Returns: 661 * 0: success 662 * EFAULT: Copy out error. 663 * 664 * Context: 665 * Kernel context. 666 */ 667 /* ARGSUSED */ 668 static int 669 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode) 670 { 671 int rval = 0; 672 673 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 674 675 rval |= ddi_copyout((void *)&cmd->ResponseLen, 676 (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t), 677 mode); 678 679 rval |= ddi_copyout((void *)&cmd->Status, 680 (void *)&(((EXT_IOCTL*)arg)->Status), 681 sizeof (cmd->Status), mode); 682 rval |= ddi_copyout((void *)&cmd->DetailStatus, 683 (void *)&(((EXT_IOCTL*)arg)->DetailStatus), 684 sizeof (cmd->DetailStatus), mode); 685 686 kmem_free((void *)cmd, sizeof (EXT_IOCTL)); 687 688 if (rval != 0) { 689 /* Some copyout operation failed */ 690 EL(ha, "failed, ddi_copyout\n"); 691 return (EFAULT); 692 } 693 694 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 695 696 return (0); 697 } 698 699 /* 700 * ql_query 701 * Performs all EXT_CC_QUERY functions. 702 * 703 * Input: 704 * ha: adapter state pointer. 705 * cmd: Local EXT_IOCTL cmd struct pointer. 706 * mode: flags. 707 * 708 * Returns: 709 * None, request status indicated in cmd->Status. 710 * 711 * Context: 712 * Kernel context. 713 */ 714 static void 715 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 716 { 717 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, 718 cmd->SubCode); 719 720 /* case off on command subcode */ 721 switch (cmd->SubCode) { 722 case EXT_SC_QUERY_HBA_NODE: 723 ql_qry_hba_node(ha, cmd, mode); 724 break; 725 case EXT_SC_QUERY_HBA_PORT: 726 ql_qry_hba_port(ha, cmd, mode); 727 break; 728 case EXT_SC_QUERY_DISC_PORT: 729 ql_qry_disc_port(ha, cmd, mode); 730 break; 731 case EXT_SC_QUERY_DISC_TGT: 732 ql_qry_disc_tgt(ha, cmd, mode); 733 break; 734 case EXT_SC_QUERY_DRIVER: 735 ql_qry_driver(ha, cmd, mode); 736 break; 737 case EXT_SC_QUERY_FW: 738 ql_qry_fw(ha, cmd, mode); 739 break; 740 case EXT_SC_QUERY_CHIP: 741 ql_qry_chip(ha, cmd, mode); 742 break; 743 case EXT_SC_QUERY_CNA_PORT: 744 ql_qry_cna_port(ha, cmd, mode); 745 break; 746 case EXT_SC_QUERY_ADAPTER_VERSIONS: 747 ql_qry_adapter_versions(ha, cmd, mode); 748 break; 749 case EXT_SC_QUERY_DISC_LUN: 750 default: 751 /* function not supported. */ 752 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 753 EL(ha, "failed, Unsupported Subcode=%xh\n", 754 cmd->SubCode); 755 break; 756 } 757 758 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 759 } 760 761 /* 762 * ql_qry_hba_node 763 * Performs EXT_SC_QUERY_HBA_NODE subfunction. 764 * 765 * Input: 766 * ha: adapter state pointer. 767 * cmd: EXT_IOCTL cmd struct pointer. 768 * mode: flags. 769 * 770 * Returns: 771 * None, request status indicated in cmd->Status. 772 * 773 * Context: 774 * Kernel context. 775 */ 776 static void 777 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 778 { 779 EXT_HBA_NODE tmp_node = {0}; 780 uint_t len; 781 caddr_t bufp; 782 783 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 784 785 if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) { 786 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 787 cmd->DetailStatus = sizeof (EXT_HBA_NODE); 788 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, " 789 "Len=%xh\n", cmd->ResponseLen); 790 cmd->ResponseLen = 0; 791 return; 792 } 793 794 /* fill in the values */ 795 796 bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN, 797 EXT_DEF_WWN_NAME_SIZE); 798 799 (void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation"); 800 801 (void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id); 802 803 bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3); 804 805 (void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION); 806 807 if (CFG_IST(ha, CFG_SBUS_CARD)) { 808 size_t verlen; 809 uint16_t w; 810 char *tmpptr; 811 812 verlen = strlen((char *)(tmp_node.DriverVersion)); 813 if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) { 814 EL(ha, "failed, No room for fpga version string\n"); 815 } else { 816 w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle, 817 (uint16_t *) 818 (ha->sbus_fpga_iobase + FPGA_REVISION)); 819 820 tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]); 821 if (tmpptr == NULL) { 822 EL(ha, "Unable to insert fpga version str\n"); 823 } else { 824 (void) sprintf(tmpptr, "%d.%d", 825 ((w & 0xf0) >> 4), (w & 0x0f)); 826 tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS; 827 } 828 } 829 } 830 831 (void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d", 832 ha->fw_major_version, ha->fw_minor_version, 833 ha->fw_subminor_version); 834 835 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 836 switch (ha->fw_attributes) { 837 case FWATTRIB_EF: 838 (void) strcat((char *)(tmp_node.FWVersion), " EF"); 839 break; 840 case FWATTRIB_TP: 841 (void) strcat((char *)(tmp_node.FWVersion), " TP"); 842 break; 843 case FWATTRIB_IP: 844 (void) strcat((char *)(tmp_node.FWVersion), " IP"); 845 break; 846 case FWATTRIB_IPX: 847 (void) strcat((char *)(tmp_node.FWVersion), " IPX"); 848 break; 849 case FWATTRIB_FL: 850 (void) strcat((char *)(tmp_node.FWVersion), " FL"); 851 break; 852 case FWATTRIB_FPX: 853 (void) strcat((char *)(tmp_node.FWVersion), " FLX"); 854 break; 855 default: 856 break; 857 } 858 } 859 860 /* FCode version. */ 861 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/ 862 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC | 863 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp, 864 (int *)&len) == DDI_PROP_SUCCESS) { 865 if (len < EXT_DEF_MAX_STR_SIZE) { 866 bcopy(bufp, tmp_node.OptRomVersion, len); 867 } else { 868 bcopy(bufp, tmp_node.OptRomVersion, 869 EXT_DEF_MAX_STR_SIZE - 1); 870 tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] = 871 '\0'; 872 } 873 kmem_free(bufp, len); 874 } else { 875 (void) sprintf((char *)tmp_node.OptRomVersion, "0"); 876 } 877 tmp_node.PortCount = 1; 878 tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE; 879 880 if (ddi_copyout((void *)&tmp_node, 881 (void *)(uintptr_t)(cmd->ResponseAdr), 882 sizeof (EXT_HBA_NODE), mode) != 0) { 883 cmd->Status = EXT_STATUS_COPY_ERR; 884 cmd->ResponseLen = 0; 885 EL(ha, "failed, ddi_copyout\n"); 886 } else { 887 cmd->ResponseLen = sizeof (EXT_HBA_NODE); 888 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 889 } 890 } 891 892 /* 893 * ql_qry_hba_port 894 * Performs EXT_SC_QUERY_HBA_PORT subfunction. 895 * 896 * Input: 897 * ha: adapter state pointer. 898 * cmd: EXT_IOCTL cmd struct pointer. 899 * mode: flags. 900 * 901 * Returns: 902 * None, request status indicated in cmd->Status. 903 * 904 * Context: 905 * Kernel context. 906 */ 907 static void 908 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 909 { 910 ql_link_t *link; 911 ql_tgt_t *tq; 912 ql_mbx_data_t mr; 913 EXT_HBA_PORT tmp_port = {0}; 914 int rval; 915 uint16_t port_cnt, tgt_cnt, index; 916 917 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 918 919 if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) { 920 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 921 cmd->DetailStatus = sizeof (EXT_HBA_PORT); 922 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n", 923 cmd->ResponseLen); 924 cmd->ResponseLen = 0; 925 return; 926 } 927 928 /* fill in the values */ 929 930 bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN, 931 EXT_DEF_WWN_NAME_SIZE); 932 tmp_port.Id[0] = 0; 933 tmp_port.Id[1] = ha->d_id.b.domain; 934 tmp_port.Id[2] = ha->d_id.b.area; 935 tmp_port.Id[3] = ha->d_id.b.al_pa; 936 937 /* For now we are initiator only driver */ 938 tmp_port.Type = EXT_DEF_INITIATOR_DEV; 939 940 if (ha->task_daemon_flags & LOOP_DOWN) { 941 tmp_port.State = EXT_DEF_HBA_LOOP_DOWN; 942 } else if (DRIVER_SUSPENDED(ha)) { 943 tmp_port.State = EXT_DEF_HBA_SUSPENDED; 944 } else { 945 tmp_port.State = EXT_DEF_HBA_OK; 946 } 947 948 if (ha->flags & POINT_TO_POINT) { 949 tmp_port.Mode = EXT_DEF_P2P_MODE; 950 } else { 951 tmp_port.Mode = EXT_DEF_LOOP_MODE; 952 } 953 /* 954 * fill in the portspeed values. 955 * 956 * default to not yet negotiated state 957 */ 958 tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED; 959 960 if (tmp_port.State == EXT_DEF_HBA_OK) { 961 switch (ha->iidma_rate) { 962 case IIDMA_RATE_1GB: 963 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT; 964 break; 965 case IIDMA_RATE_2GB: 966 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT; 967 break; 968 case IIDMA_RATE_4GB: 969 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT; 970 break; 971 case IIDMA_RATE_8GB: 972 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT; 973 break; 974 case IIDMA_RATE_10GB: 975 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT; 976 break; 977 default: 978 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN; 979 EL(ha, "failed, data rate=%xh\n", mr.mb[1]); 980 break; 981 } 982 } 983 984 /* Report all supported port speeds */ 985 if (CFG_IST(ha, CFG_CTRL_25XX)) { 986 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT | 987 EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT | 988 EXT_DEF_PORTSPEED_1GBIT); 989 /* 990 * Correct supported speeds based on type of 991 * sfp that is present 992 */ 993 switch (ha->sfp_stat) { 994 case 1: 995 /* no sfp detected */ 996 break; 997 case 2: 998 case 4: 999 /* 4GB sfp */ 1000 tmp_port.PortSupportedSpeed &= 1001 ~EXT_DEF_PORTSPEED_8GBIT; 1002 break; 1003 case 3: 1004 case 5: 1005 /* 8GB sfp */ 1006 tmp_port.PortSupportedSpeed &= 1007 ~EXT_DEF_PORTSPEED_1GBIT; 1008 break; 1009 default: 1010 EL(ha, "sfp_stat: %xh\n", ha->sfp_stat); 1011 break; 1012 1013 } 1014 } else if (CFG_IST(ha, CFG_CTRL_8081)) { 1015 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT; 1016 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 1017 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT | 1018 EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT); 1019 } else if (CFG_IST(ha, CFG_CTRL_2300)) { 1020 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT | 1021 EXT_DEF_PORTSPEED_1GBIT); 1022 } else if (CFG_IST(ha, CFG_CTRL_6322)) { 1023 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT; 1024 } else if (CFG_IST(ha, CFG_CTRL_2200)) { 1025 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT; 1026 } else { 1027 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN; 1028 EL(ha, "unknown HBA type: %xh\n", ha->device_id); 1029 } 1030 tmp_port.LinkState2 = LSB(ha->sfp_stat); 1031 port_cnt = 0; 1032 tgt_cnt = 0; 1033 1034 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 1035 for (link = ha->dev[index].first; link != NULL; 1036 link = link->next) { 1037 tq = link->base_address; 1038 1039 if (!VALID_TARGET_ID(ha, tq->loop_id)) { 1040 continue; 1041 } 1042 1043 port_cnt++; 1044 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) { 1045 tgt_cnt++; 1046 } 1047 } 1048 } 1049 1050 tmp_port.DiscPortCount = port_cnt; 1051 tmp_port.DiscTargetCount = tgt_cnt; 1052 1053 tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME; 1054 1055 rval = ddi_copyout((void *)&tmp_port, 1056 (void *)(uintptr_t)(cmd->ResponseAdr), 1057 sizeof (EXT_HBA_PORT), mode); 1058 if (rval != 0) { 1059 cmd->Status = EXT_STATUS_COPY_ERR; 1060 cmd->ResponseLen = 0; 1061 EL(ha, "failed, ddi_copyout\n"); 1062 } else { 1063 cmd->ResponseLen = sizeof (EXT_HBA_PORT); 1064 QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n", 1065 ha->instance, port_cnt, tgt_cnt); 1066 } 1067 } 1068 1069 /* 1070 * ql_qry_disc_port 1071 * Performs EXT_SC_QUERY_DISC_PORT subfunction. 1072 * 1073 * Input: 1074 * ha: adapter state pointer. 1075 * cmd: EXT_IOCTL cmd struct pointer. 1076 * mode: flags. 1077 * 1078 * cmd->Instance = Port instance in fcport chain. 1079 * 1080 * Returns: 1081 * None, request status indicated in cmd->Status. 1082 * 1083 * Context: 1084 * Kernel context. 1085 */ 1086 static void 1087 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1088 { 1089 EXT_DISC_PORT tmp_port = {0}; 1090 ql_link_t *link; 1091 ql_tgt_t *tq; 1092 uint16_t index; 1093 uint16_t inst = 0; 1094 1095 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1096 1097 if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) { 1098 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1099 cmd->DetailStatus = sizeof (EXT_DISC_PORT); 1100 EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n", 1101 cmd->ResponseLen); 1102 cmd->ResponseLen = 0; 1103 return; 1104 } 1105 1106 for (link = NULL, index = 0; 1107 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) { 1108 for (link = ha->dev[index].first; link != NULL; 1109 link = link->next) { 1110 tq = link->base_address; 1111 1112 if (!VALID_TARGET_ID(ha, tq->loop_id)) { 1113 continue; 1114 } 1115 if (inst != cmd->Instance) { 1116 inst++; 1117 continue; 1118 } 1119 1120 /* fill in the values */ 1121 bcopy(tq->node_name, tmp_port.WWNN, 1122 EXT_DEF_WWN_NAME_SIZE); 1123 bcopy(tq->port_name, tmp_port.WWPN, 1124 EXT_DEF_WWN_NAME_SIZE); 1125 1126 break; 1127 } 1128 } 1129 1130 if (link == NULL) { 1131 /* no matching device */ 1132 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 1133 EL(ha, "failed, port not found port=%d\n", cmd->Instance); 1134 cmd->ResponseLen = 0; 1135 return; 1136 } 1137 1138 tmp_port.Id[0] = 0; 1139 tmp_port.Id[1] = tq->d_id.b.domain; 1140 tmp_port.Id[2] = tq->d_id.b.area; 1141 tmp_port.Id[3] = tq->d_id.b.al_pa; 1142 1143 tmp_port.Type = 0; 1144 if (tq->flags & TQF_INITIATOR_DEVICE) { 1145 tmp_port.Type = (uint16_t)(tmp_port.Type | 1146 EXT_DEF_INITIATOR_DEV); 1147 } else if ((tq->flags & TQF_TAPE_DEVICE) == 0) { 1148 (void) ql_inq_scan(ha, tq, 1); 1149 } else if (tq->flags & TQF_TAPE_DEVICE) { 1150 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV); 1151 } 1152 1153 if (tq->flags & TQF_FABRIC_DEVICE) { 1154 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV); 1155 } else { 1156 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV); 1157 } 1158 1159 tmp_port.Status = 0; 1160 tmp_port.Bus = 0; /* Hard-coded for Solaris */ 1161 1162 bcopy(tq->port_name, &tmp_port.TargetId, 8); 1163 1164 if (ddi_copyout((void *)&tmp_port, 1165 (void *)(uintptr_t)(cmd->ResponseAdr), 1166 sizeof (EXT_DISC_PORT), mode) != 0) { 1167 cmd->Status = EXT_STATUS_COPY_ERR; 1168 cmd->ResponseLen = 0; 1169 EL(ha, "failed, ddi_copyout\n"); 1170 } else { 1171 cmd->ResponseLen = sizeof (EXT_DISC_PORT); 1172 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1173 } 1174 } 1175 1176 /* 1177 * ql_qry_disc_tgt 1178 * Performs EXT_SC_QUERY_DISC_TGT subfunction. 1179 * 1180 * Input: 1181 * ha: adapter state pointer. 1182 * cmd: EXT_IOCTL cmd struct pointer. 1183 * mode: flags. 1184 * 1185 * cmd->Instance = Port instance in fcport chain. 1186 * 1187 * Returns: 1188 * None, request status indicated in cmd->Status. 1189 * 1190 * Context: 1191 * Kernel context. 1192 */ 1193 static void 1194 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1195 { 1196 EXT_DISC_TARGET tmp_tgt = {0}; 1197 ql_link_t *link; 1198 ql_tgt_t *tq; 1199 uint16_t index; 1200 uint16_t inst = 0; 1201 1202 QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance, 1203 cmd->Instance); 1204 1205 if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) { 1206 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1207 cmd->DetailStatus = sizeof (EXT_DISC_TARGET); 1208 EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n", 1209 cmd->ResponseLen); 1210 cmd->ResponseLen = 0; 1211 return; 1212 } 1213 1214 /* Scan port list for requested target and fill in the values */ 1215 for (link = NULL, index = 0; 1216 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) { 1217 for (link = ha->dev[index].first; link != NULL; 1218 link = link->next) { 1219 tq = link->base_address; 1220 1221 if (!VALID_TARGET_ID(ha, tq->loop_id) || 1222 tq->flags & TQF_INITIATOR_DEVICE) { 1223 continue; 1224 } 1225 if (inst != cmd->Instance) { 1226 inst++; 1227 continue; 1228 } 1229 1230 /* fill in the values */ 1231 bcopy(tq->node_name, tmp_tgt.WWNN, 1232 EXT_DEF_WWN_NAME_SIZE); 1233 bcopy(tq->port_name, tmp_tgt.WWPN, 1234 EXT_DEF_WWN_NAME_SIZE); 1235 1236 break; 1237 } 1238 } 1239 1240 if (link == NULL) { 1241 /* no matching device */ 1242 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 1243 cmd->DetailStatus = EXT_DSTATUS_TARGET; 1244 EL(ha, "failed, not found target=%d\n", cmd->Instance); 1245 cmd->ResponseLen = 0; 1246 return; 1247 } 1248 tmp_tgt.Id[0] = 0; 1249 tmp_tgt.Id[1] = tq->d_id.b.domain; 1250 tmp_tgt.Id[2] = tq->d_id.b.area; 1251 tmp_tgt.Id[3] = tq->d_id.b.al_pa; 1252 1253 tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq); 1254 1255 if ((tq->flags & TQF_TAPE_DEVICE) == 0) { 1256 (void) ql_inq_scan(ha, tq, 1); 1257 } 1258 1259 tmp_tgt.Type = 0; 1260 if (tq->flags & TQF_TAPE_DEVICE) { 1261 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV); 1262 } 1263 1264 if (tq->flags & TQF_FABRIC_DEVICE) { 1265 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV); 1266 } else { 1267 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV); 1268 } 1269 1270 tmp_tgt.Status = 0; 1271 1272 tmp_tgt.Bus = 0; /* Hard-coded for Solaris. */ 1273 1274 bcopy(tq->port_name, &tmp_tgt.TargetId, 8); 1275 1276 if (ddi_copyout((void *)&tmp_tgt, 1277 (void *)(uintptr_t)(cmd->ResponseAdr), 1278 sizeof (EXT_DISC_TARGET), mode) != 0) { 1279 cmd->Status = EXT_STATUS_COPY_ERR; 1280 cmd->ResponseLen = 0; 1281 EL(ha, "failed, ddi_copyout\n"); 1282 } else { 1283 cmd->ResponseLen = sizeof (EXT_DISC_TARGET); 1284 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1285 } 1286 } 1287 1288 /* 1289 * ql_qry_fw 1290 * Performs EXT_SC_QUERY_FW subfunction. 1291 * 1292 * Input: 1293 * ha: adapter state pointer. 1294 * cmd: EXT_IOCTL cmd struct pointer. 1295 * mode: flags. 1296 * 1297 * Returns: 1298 * None, request status indicated in cmd->Status. 1299 * 1300 * Context: 1301 * Kernel context. 1302 */ 1303 static void 1304 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1305 { 1306 EXT_FW fw_info = {0}; 1307 1308 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1309 1310 if (cmd->ResponseLen < sizeof (EXT_FW)) { 1311 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1312 cmd->DetailStatus = sizeof (EXT_FW); 1313 EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n", 1314 cmd->ResponseLen); 1315 cmd->ResponseLen = 0; 1316 return; 1317 } 1318 1319 (void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d", 1320 ha->fw_major_version, ha->fw_minor_version, 1321 ha->fw_subminor_version); 1322 1323 fw_info.Attrib = ha->fw_attributes; 1324 1325 if (ddi_copyout((void *)&fw_info, 1326 (void *)(uintptr_t)(cmd->ResponseAdr), 1327 sizeof (EXT_FW), mode) != 0) { 1328 cmd->Status = EXT_STATUS_COPY_ERR; 1329 cmd->ResponseLen = 0; 1330 EL(ha, "failed, ddi_copyout\n"); 1331 return; 1332 } else { 1333 cmd->ResponseLen = sizeof (EXT_FW); 1334 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1335 } 1336 } 1337 1338 /* 1339 * ql_qry_chip 1340 * Performs EXT_SC_QUERY_CHIP subfunction. 1341 * 1342 * Input: 1343 * ha: adapter state pointer. 1344 * cmd: EXT_IOCTL cmd struct pointer. 1345 * mode: flags. 1346 * 1347 * Returns: 1348 * None, request status indicated in cmd->Status. 1349 * 1350 * Context: 1351 * Kernel context. 1352 */ 1353 static void 1354 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1355 { 1356 EXT_CHIP chip = {0}; 1357 1358 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1359 1360 if (cmd->ResponseLen < sizeof (EXT_CHIP)) { 1361 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1362 cmd->DetailStatus = sizeof (EXT_CHIP); 1363 EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n", 1364 cmd->ResponseLen); 1365 cmd->ResponseLen = 0; 1366 return; 1367 } 1368 1369 chip.VendorId = ha->ven_id; 1370 chip.DeviceId = ha->device_id; 1371 chip.SubVendorId = ha->subven_id; 1372 chip.SubSystemId = ha->subsys_id; 1373 chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0); 1374 chip.IoAddrLen = 0x100; 1375 chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1); 1376 chip.MemAddrLen = 0x100; 1377 chip.ChipRevID = ha->rev_id; 1378 if (ha->flags & FUNCTION_1) { 1379 chip.FuncNo = 1; 1380 } 1381 1382 if (ddi_copyout((void *)&chip, 1383 (void *)(uintptr_t)(cmd->ResponseAdr), 1384 sizeof (EXT_CHIP), mode) != 0) { 1385 cmd->Status = EXT_STATUS_COPY_ERR; 1386 cmd->ResponseLen = 0; 1387 EL(ha, "failed, ddi_copyout\n"); 1388 } else { 1389 cmd->ResponseLen = sizeof (EXT_CHIP); 1390 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1391 } 1392 } 1393 1394 /* 1395 * ql_qry_driver 1396 * Performs EXT_SC_QUERY_DRIVER subfunction. 1397 * 1398 * Input: 1399 * ha: adapter state pointer. 1400 * cmd: EXT_IOCTL cmd struct pointer. 1401 * mode: flags. 1402 * 1403 * Returns: 1404 * None, request status indicated in cmd->Status. 1405 * 1406 * Context: 1407 * Kernel context. 1408 */ 1409 static void 1410 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1411 { 1412 EXT_DRIVER qd = {0}; 1413 1414 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1415 1416 if (cmd->ResponseLen < sizeof (EXT_DRIVER)) { 1417 cmd->Status = EXT_STATUS_DATA_OVERRUN; 1418 cmd->DetailStatus = sizeof (EXT_DRIVER); 1419 EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n", 1420 cmd->ResponseLen); 1421 cmd->ResponseLen = 0; 1422 return; 1423 } 1424 1425 (void) strcpy((void *)&qd.Version[0], QL_VERSION); 1426 qd.NumOfBus = 1; /* Fixed for Solaris */ 1427 qd.TargetsPerBus = (uint16_t) 1428 (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ? 1429 MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES); 1430 qd.LunsPerTarget = 2030; 1431 qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE; 1432 qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH; 1433 1434 if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr, 1435 sizeof (EXT_DRIVER), mode) != 0) { 1436 cmd->Status = EXT_STATUS_COPY_ERR; 1437 cmd->ResponseLen = 0; 1438 EL(ha, "failed, ddi_copyout\n"); 1439 } else { 1440 cmd->ResponseLen = sizeof (EXT_DRIVER); 1441 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1442 } 1443 } 1444 1445 /* 1446 * ql_fcct 1447 * IOCTL management server FC-CT passthrough. 1448 * 1449 * Input: 1450 * ha: adapter state pointer. 1451 * cmd: User space CT arguments pointer. 1452 * mode: flags. 1453 * 1454 * Returns: 1455 * None, request status indicated in cmd->Status. 1456 * 1457 * Context: 1458 * Kernel context. 1459 */ 1460 static void 1461 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1462 { 1463 ql_mbx_iocb_t *pkt; 1464 ql_mbx_data_t mr; 1465 dma_mem_t *dma_mem; 1466 caddr_t pld; 1467 uint32_t pkt_size, pld_byte_cnt, *long_ptr; 1468 int rval; 1469 ql_ct_iu_preamble_t *ct; 1470 ql_xioctl_t *xp = ha->xioctl; 1471 ql_tgt_t tq; 1472 uint16_t comp_status, loop_id; 1473 1474 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1475 1476 /* Get CT argument structure. */ 1477 if ((ha->topology & QL_SNS_CONNECTION) == 0) { 1478 EL(ha, "failed, No switch\n"); 1479 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 1480 cmd->ResponseLen = 0; 1481 return; 1482 } 1483 1484 if (DRIVER_SUSPENDED(ha)) { 1485 EL(ha, "failed, LOOP_NOT_READY\n"); 1486 cmd->Status = EXT_STATUS_BUSY; 1487 cmd->ResponseLen = 0; 1488 return; 1489 } 1490 1491 /* Login management server device. */ 1492 if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) { 1493 tq.d_id.b.al_pa = 0xfa; 1494 tq.d_id.b.area = 0xff; 1495 tq.d_id.b.domain = 0xff; 1496 tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ? 1497 MANAGEMENT_SERVER_24XX_LOOP_ID : 1498 MANAGEMENT_SERVER_LOOP_ID); 1499 rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr); 1500 if (rval != QL_SUCCESS) { 1501 EL(ha, "failed, server login\n"); 1502 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 1503 cmd->ResponseLen = 0; 1504 return; 1505 } else { 1506 xp->flags |= QL_MGMT_SERVER_LOGIN; 1507 } 1508 } 1509 1510 QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance); 1511 QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL)); 1512 1513 /* Allocate a DMA Memory Descriptor */ 1514 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP); 1515 if (dma_mem == NULL) { 1516 EL(ha, "failed, kmem_zalloc\n"); 1517 cmd->Status = EXT_STATUS_NO_MEMORY; 1518 cmd->ResponseLen = 0; 1519 return; 1520 } 1521 /* Determine maximum buffer size. */ 1522 if (cmd->RequestLen < cmd->ResponseLen) { 1523 pld_byte_cnt = cmd->ResponseLen; 1524 } else { 1525 pld_byte_cnt = cmd->RequestLen; 1526 } 1527 1528 /* Allocate command block. */ 1529 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt); 1530 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 1531 if (pkt == NULL) { 1532 EL(ha, "failed, kmem_zalloc\n"); 1533 cmd->Status = EXT_STATUS_NO_MEMORY; 1534 cmd->ResponseLen = 0; 1535 return; 1536 } 1537 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t); 1538 1539 /* Get command payload data. */ 1540 if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld, 1541 cmd->RequestLen, mode) != cmd->RequestLen) { 1542 EL(ha, "failed, get_buffer_data\n"); 1543 kmem_free(pkt, pkt_size); 1544 cmd->Status = EXT_STATUS_COPY_ERR; 1545 cmd->ResponseLen = 0; 1546 return; 1547 } 1548 1549 /* Get DMA memory for the IOCB */ 1550 if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA, 1551 QL_DMA_RING_ALIGN) != QL_SUCCESS) { 1552 cmn_err(CE_WARN, "%s(%d): DMA memory " 1553 "alloc failed", QL_NAME, ha->instance); 1554 kmem_free(pkt, pkt_size); 1555 kmem_free(dma_mem, sizeof (dma_mem_t)); 1556 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 1557 cmd->ResponseLen = 0; 1558 return; 1559 } 1560 1561 /* Copy out going payload data to IOCB DMA buffer. */ 1562 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld, 1563 (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR); 1564 1565 /* Sync IOCB DMA buffer. */ 1566 (void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt, 1567 DDI_DMA_SYNC_FORDEV); 1568 1569 /* 1570 * Setup IOCB 1571 */ 1572 ct = (ql_ct_iu_preamble_t *)pld; 1573 if (CFG_IST(ha, CFG_CTRL_24258081)) { 1574 pkt->ms24.entry_type = CT_PASSTHRU_TYPE; 1575 pkt->ms24.entry_count = 1; 1576 1577 pkt->ms24.vp_index = ha->vp_index; 1578 1579 /* Set loop ID */ 1580 pkt->ms24.n_port_hdl = (uint16_t) 1581 (ct->gs_type == GS_TYPE_DIR_SERVER ? 1582 LE_16(SNS_24XX_HDL) : 1583 LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID)); 1584 1585 /* Set ISP command timeout. */ 1586 pkt->ms24.timeout = LE_16(120); 1587 1588 /* Set cmd/response data segment counts. */ 1589 pkt->ms24.cmd_dseg_count = LE_16(1); 1590 pkt->ms24.resp_dseg_count = LE_16(1); 1591 1592 /* Load ct cmd byte count. */ 1593 pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen); 1594 1595 /* Load ct rsp byte count. */ 1596 pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen); 1597 1598 long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address; 1599 1600 /* Load MS command entry data segments. */ 1601 *long_ptr++ = (uint32_t) 1602 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 1603 *long_ptr++ = (uint32_t) 1604 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 1605 *long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen)); 1606 1607 /* Load MS response entry data segments. */ 1608 *long_ptr++ = (uint32_t) 1609 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 1610 *long_ptr++ = (uint32_t) 1611 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 1612 *long_ptr = (uint32_t)LE_32(cmd->ResponseLen); 1613 1614 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 1615 sizeof (ql_mbx_iocb_t)); 1616 1617 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status); 1618 if (comp_status == CS_DATA_UNDERRUN) { 1619 if ((BE_16(ct->max_residual_size)) == 0) { 1620 comp_status = CS_COMPLETE; 1621 } 1622 } 1623 1624 if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) != 1625 0) { 1626 EL(ha, "failed, I/O timeout or " 1627 "es=%xh, ss_l=%xh, rval=%xh\n", 1628 pkt->sts24.entry_status, 1629 pkt->sts24.scsi_status_l, rval); 1630 kmem_free(pkt, pkt_size); 1631 ql_free_dma_resource(ha, dma_mem); 1632 kmem_free(dma_mem, sizeof (dma_mem_t)); 1633 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 1634 cmd->ResponseLen = 0; 1635 return; 1636 } 1637 } else { 1638 pkt->ms.entry_type = MS_TYPE; 1639 pkt->ms.entry_count = 1; 1640 1641 /* Set loop ID */ 1642 loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ? 1643 SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID); 1644 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 1645 pkt->ms.loop_id_l = LSB(loop_id); 1646 pkt->ms.loop_id_h = MSB(loop_id); 1647 } else { 1648 pkt->ms.loop_id_h = LSB(loop_id); 1649 } 1650 1651 /* Set ISP command timeout. */ 1652 pkt->ms.timeout = LE_16(120); 1653 1654 /* Set data segment counts. */ 1655 pkt->ms.cmd_dseg_count_l = 1; 1656 pkt->ms.total_dseg_count = LE_16(2); 1657 1658 /* Response total byte count. */ 1659 pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen); 1660 pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen); 1661 1662 /* Command total byte count. */ 1663 pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen); 1664 pkt->ms.dseg_0_length = LE_32(cmd->RequestLen); 1665 1666 /* Load command/response data segments. */ 1667 pkt->ms.dseg_0_address[0] = (uint32_t) 1668 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 1669 pkt->ms.dseg_0_address[1] = (uint32_t) 1670 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 1671 pkt->ms.dseg_1_address[0] = (uint32_t) 1672 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 1673 pkt->ms.dseg_1_address[1] = (uint32_t) 1674 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 1675 1676 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 1677 sizeof (ql_mbx_iocb_t)); 1678 1679 comp_status = (uint16_t)LE_16(pkt->sts.comp_status); 1680 if (comp_status == CS_DATA_UNDERRUN) { 1681 if ((BE_16(ct->max_residual_size)) == 0) { 1682 comp_status = CS_COMPLETE; 1683 } 1684 } 1685 if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) { 1686 EL(ha, "failed, I/O timeout or " 1687 "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval); 1688 kmem_free(pkt, pkt_size); 1689 ql_free_dma_resource(ha, dma_mem); 1690 kmem_free(dma_mem, sizeof (dma_mem_t)); 1691 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 1692 cmd->ResponseLen = 0; 1693 return; 1694 } 1695 } 1696 1697 /* Sync in coming DMA buffer. */ 1698 (void) ddi_dma_sync(dma_mem->dma_handle, 0, 1699 pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL); 1700 /* Copy in coming DMA data. */ 1701 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld, 1702 (uint8_t *)dma_mem->bp, pld_byte_cnt, 1703 DDI_DEV_AUTOINCR); 1704 1705 /* Copy response payload from DMA buffer to application. */ 1706 if (cmd->ResponseLen != 0) { 1707 QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance, 1708 cmd->ResponseLen); 1709 QL_DUMP_9(pld, 8, cmd->ResponseLen); 1710 1711 /* Send response payload. */ 1712 if (ql_send_buffer_data(pld, 1713 (caddr_t)(uintptr_t)cmd->ResponseAdr, 1714 cmd->ResponseLen, mode) != cmd->ResponseLen) { 1715 EL(ha, "failed, send_buffer_data\n"); 1716 cmd->Status = EXT_STATUS_COPY_ERR; 1717 cmd->ResponseLen = 0; 1718 } 1719 } 1720 1721 kmem_free(pkt, pkt_size); 1722 ql_free_dma_resource(ha, dma_mem); 1723 kmem_free(dma_mem, sizeof (dma_mem_t)); 1724 1725 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1726 } 1727 1728 /* 1729 * ql_aen_reg 1730 * IOCTL management server Asynchronous Event Tracking Enable/Disable. 1731 * 1732 * Input: 1733 * ha: adapter state pointer. 1734 * cmd: EXT_IOCTL cmd struct pointer. 1735 * mode: flags. 1736 * 1737 * Returns: 1738 * None, request status indicated in cmd->Status. 1739 * 1740 * Context: 1741 * Kernel context. 1742 */ 1743 static void 1744 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1745 { 1746 EXT_REG_AEN reg_struct; 1747 int rval = 0; 1748 ql_xioctl_t *xp = ha->xioctl; 1749 1750 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1751 1752 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, ®_struct, 1753 cmd->RequestLen, mode); 1754 1755 if (rval == 0) { 1756 if (reg_struct.Enable) { 1757 xp->flags |= QL_AEN_TRACKING_ENABLE; 1758 } else { 1759 xp->flags &= ~QL_AEN_TRACKING_ENABLE; 1760 /* Empty the queue. */ 1761 INTR_LOCK(ha); 1762 xp->aen_q_head = 0; 1763 xp->aen_q_tail = 0; 1764 INTR_UNLOCK(ha); 1765 } 1766 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1767 } else { 1768 cmd->Status = EXT_STATUS_COPY_ERR; 1769 EL(ha, "failed, ddi_copyin\n"); 1770 } 1771 } 1772 1773 /* 1774 * ql_aen_get 1775 * IOCTL management server Asynchronous Event Record Transfer. 1776 * 1777 * Input: 1778 * ha: adapter state pointer. 1779 * cmd: EXT_IOCTL cmd struct pointer. 1780 * mode: flags. 1781 * 1782 * Returns: 1783 * None, request status indicated in cmd->Status. 1784 * 1785 * Context: 1786 * Kernel context. 1787 */ 1788 static void 1789 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1790 { 1791 uint32_t out_size; 1792 EXT_ASYNC_EVENT *tmp_q; 1793 EXT_ASYNC_EVENT aen[EXT_DEF_MAX_AEN_QUEUE]; 1794 uint8_t i; 1795 uint8_t queue_cnt; 1796 uint8_t request_cnt; 1797 ql_xioctl_t *xp = ha->xioctl; 1798 1799 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 1800 1801 /* Compute the number of events that can be returned */ 1802 request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT)); 1803 1804 if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) { 1805 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 1806 cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE; 1807 EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, " 1808 "Len=%xh\n", request_cnt); 1809 cmd->ResponseLen = 0; 1810 return; 1811 } 1812 1813 /* 1st: Make a local copy of the entire queue content. */ 1814 tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue; 1815 queue_cnt = 0; 1816 1817 INTR_LOCK(ha); 1818 i = xp->aen_q_head; 1819 1820 for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) { 1821 if (tmp_q[i].AsyncEventCode != 0) { 1822 bcopy(&tmp_q[i], &aen[queue_cnt], 1823 sizeof (EXT_ASYNC_EVENT)); 1824 queue_cnt++; 1825 tmp_q[i].AsyncEventCode = 0; /* empty out the slot */ 1826 } 1827 if (i == xp->aen_q_tail) { 1828 /* done. */ 1829 break; 1830 } 1831 i++; 1832 if (i == EXT_DEF_MAX_AEN_QUEUE) { 1833 i = 0; 1834 } 1835 } 1836 1837 /* Empty the queue. */ 1838 xp->aen_q_head = 0; 1839 xp->aen_q_tail = 0; 1840 1841 INTR_UNLOCK(ha); 1842 1843 /* 2nd: Now transfer the queue content to user buffer */ 1844 /* Copy the entire queue to user's buffer. */ 1845 out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT)); 1846 if (queue_cnt == 0) { 1847 cmd->ResponseLen = 0; 1848 } else if (ddi_copyout((void *)&aen[0], 1849 (void *)(uintptr_t)(cmd->ResponseAdr), 1850 out_size, mode) != 0) { 1851 cmd->Status = EXT_STATUS_COPY_ERR; 1852 cmd->ResponseLen = 0; 1853 EL(ha, "failed, ddi_copyout\n"); 1854 } else { 1855 cmd->ResponseLen = out_size; 1856 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1857 } 1858 } 1859 1860 /* 1861 * ql_enqueue_aen 1862 * 1863 * Input: 1864 * ha: adapter state pointer. 1865 * event_code: async event code of the event to add to queue. 1866 * payload: event payload for the queue. 1867 * INTR_LOCK must be already obtained. 1868 * 1869 * Context: 1870 * Interrupt or Kernel context, no mailbox commands allowed. 1871 */ 1872 void 1873 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload) 1874 { 1875 uint8_t new_entry; /* index to current entry */ 1876 uint16_t *mbx; 1877 EXT_ASYNC_EVENT *aen_queue; 1878 ql_xioctl_t *xp = ha->xioctl; 1879 1880 QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance, 1881 event_code); 1882 1883 if (xp == NULL) { 1884 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance); 1885 return; 1886 } 1887 aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue; 1888 1889 if (aen_queue[xp->aen_q_tail].AsyncEventCode != 0) { 1890 /* Need to change queue pointers to make room. */ 1891 1892 /* Increment tail for adding new entry. */ 1893 xp->aen_q_tail++; 1894 if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) { 1895 xp->aen_q_tail = 0; 1896 } 1897 if (xp->aen_q_head == xp->aen_q_tail) { 1898 /* 1899 * We're overwriting the oldest entry, so need to 1900 * update the head pointer. 1901 */ 1902 xp->aen_q_head++; 1903 if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) { 1904 xp->aen_q_head = 0; 1905 } 1906 } 1907 } 1908 1909 new_entry = xp->aen_q_tail; 1910 aen_queue[new_entry].AsyncEventCode = event_code; 1911 1912 /* Update payload */ 1913 if (payload != NULL) { 1914 switch (event_code) { 1915 case MBA_LIP_OCCURRED: 1916 case MBA_LOOP_UP: 1917 case MBA_LOOP_DOWN: 1918 case MBA_LIP_F8: 1919 case MBA_LIP_RESET: 1920 case MBA_PORT_UPDATE: 1921 break; 1922 case MBA_RSCN_UPDATE: 1923 mbx = (uint16_t *)payload; 1924 /* al_pa */ 1925 aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] = 1926 LSB(mbx[2]); 1927 /* area */ 1928 aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] = 1929 MSB(mbx[2]); 1930 /* domain */ 1931 aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] = 1932 LSB(mbx[1]); 1933 /* save in big endian */ 1934 BIG_ENDIAN_24(&aen_queue[new_entry]. 1935 Payload.RSCN.RSCNInfo[0]); 1936 1937 aen_queue[new_entry].Payload.RSCN.AddrFormat = 1938 MSB(mbx[1]); 1939 1940 break; 1941 default: 1942 /* Not supported */ 1943 EL(ha, "failed, event code not supported=%xh\n", 1944 event_code); 1945 aen_queue[new_entry].AsyncEventCode = 0; 1946 break; 1947 } 1948 } 1949 1950 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 1951 } 1952 1953 /* 1954 * ql_scsi_passthru 1955 * IOCTL SCSI passthrough. 1956 * 1957 * Input: 1958 * ha: adapter state pointer. 1959 * cmd: User space SCSI command pointer. 1960 * mode: flags. 1961 * 1962 * Returns: 1963 * None, request status indicated in cmd->Status. 1964 * 1965 * Context: 1966 * Kernel context. 1967 */ 1968 static void 1969 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 1970 { 1971 ql_mbx_iocb_t *pkt; 1972 ql_mbx_data_t mr; 1973 dma_mem_t *dma_mem; 1974 caddr_t pld; 1975 uint32_t pkt_size, pld_size; 1976 uint16_t qlnt, retries, cnt, cnt2; 1977 uint8_t *name; 1978 EXT_FC_SCSI_PASSTHRU *ufc_req; 1979 EXT_SCSI_PASSTHRU *usp_req; 1980 int rval; 1981 union _passthru { 1982 EXT_SCSI_PASSTHRU sp_cmd; 1983 EXT_FC_SCSI_PASSTHRU fc_cmd; 1984 } pt_req; /* Passthru request */ 1985 uint32_t status, sense_sz = 0; 1986 ql_tgt_t *tq = NULL; 1987 EXT_SCSI_PASSTHRU *sp_req = &pt_req.sp_cmd; 1988 EXT_FC_SCSI_PASSTHRU *fc_req = &pt_req.fc_cmd; 1989 1990 /* SCSI request struct for SCSI passthrough IOs. */ 1991 struct { 1992 uint16_t lun; 1993 uint16_t sense_length; /* Sense buffer size */ 1994 size_t resid; /* Residual */ 1995 uint8_t *cdbp; /* Requestor's CDB */ 1996 uint8_t *u_sense; /* Requestor's sense buffer */ 1997 uint8_t cdb_len; /* Requestor's CDB length */ 1998 uint8_t direction; 1999 } scsi_req; 2000 2001 struct { 2002 uint8_t *rsp_info; 2003 uint8_t *req_sense_data; 2004 uint32_t residual_length; 2005 uint32_t rsp_info_length; 2006 uint32_t req_sense_length; 2007 uint16_t comp_status; 2008 uint8_t state_flags_l; 2009 uint8_t state_flags_h; 2010 uint8_t scsi_status_l; 2011 uint8_t scsi_status_h; 2012 } sts; 2013 2014 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2015 2016 /* Verify Sub Code and set cnt to needed request size. */ 2017 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) { 2018 pld_size = sizeof (EXT_SCSI_PASSTHRU); 2019 } else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) { 2020 pld_size = sizeof (EXT_FC_SCSI_PASSTHRU); 2021 } else { 2022 EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode); 2023 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 2024 cmd->ResponseLen = 0; 2025 return; 2026 } 2027 2028 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP); 2029 if (dma_mem == NULL) { 2030 EL(ha, "failed, kmem_zalloc\n"); 2031 cmd->Status = EXT_STATUS_NO_MEMORY; 2032 cmd->ResponseLen = 0; 2033 return; 2034 } 2035 /* Verify the size of and copy in the passthru request structure. */ 2036 if (cmd->RequestLen != pld_size) { 2037 /* Return error */ 2038 EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n", 2039 cmd->RequestLen, pld_size); 2040 cmd->Status = EXT_STATUS_INVALID_PARAM; 2041 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 2042 cmd->ResponseLen = 0; 2043 return; 2044 } 2045 2046 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req, 2047 pld_size, mode) != 0) { 2048 EL(ha, "failed, ddi_copyin\n"); 2049 cmd->Status = EXT_STATUS_COPY_ERR; 2050 cmd->ResponseLen = 0; 2051 return; 2052 } 2053 2054 /* 2055 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req 2056 * request data structure. 2057 */ 2058 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) { 2059 scsi_req.lun = sp_req->TargetAddr.Lun; 2060 scsi_req.sense_length = sizeof (sp_req->SenseData); 2061 scsi_req.cdbp = &sp_req->Cdb[0]; 2062 scsi_req.cdb_len = sp_req->CdbLength; 2063 scsi_req.direction = sp_req->Direction; 2064 usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr; 2065 scsi_req.u_sense = &usp_req->SenseData[0]; 2066 cmd->DetailStatus = EXT_DSTATUS_TARGET; 2067 2068 qlnt = QLNT_PORT; 2069 name = (uint8_t *)&sp_req->TargetAddr.Target; 2070 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n", 2071 ha->instance, cmd->SubCode, sp_req->TargetAddr.Target); 2072 tq = ql_find_port(ha, name, qlnt); 2073 } else { 2074 /* 2075 * Must be FC PASSTHRU, verified above. 2076 */ 2077 if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) { 2078 qlnt = QLNT_PORT; 2079 name = &fc_req->FCScsiAddr.DestAddr.WWPN[0]; 2080 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, " 2081 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 2082 ha->instance, cmd->SubCode, name[0], name[1], 2083 name[2], name[3], name[4], name[5], name[6], 2084 name[7]); 2085 tq = ql_find_port(ha, name, qlnt); 2086 } else if (fc_req->FCScsiAddr.DestType == 2087 EXT_DEF_DESTTYPE_WWNN) { 2088 qlnt = QLNT_NODE; 2089 name = &fc_req->FCScsiAddr.DestAddr.WWNN[0]; 2090 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, " 2091 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 2092 ha->instance, cmd->SubCode, name[0], name[1], 2093 name[2], name[3], name[4], name[5], name[6], 2094 name[7]); 2095 tq = ql_find_port(ha, name, qlnt); 2096 } else if (fc_req->FCScsiAddr.DestType == 2097 EXT_DEF_DESTTYPE_PORTID) { 2098 qlnt = QLNT_PID; 2099 name = &fc_req->FCScsiAddr.DestAddr.Id[0]; 2100 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID=" 2101 "%02x%02x%02x\n", ha->instance, cmd->SubCode, 2102 name[0], name[1], name[2]); 2103 tq = ql_find_port(ha, name, qlnt); 2104 } else { 2105 EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n", 2106 cmd->SubCode, fc_req->FCScsiAddr.DestType); 2107 cmd->Status = EXT_STATUS_INVALID_PARAM; 2108 cmd->ResponseLen = 0; 2109 return; 2110 } 2111 scsi_req.lun = fc_req->FCScsiAddr.Lun; 2112 scsi_req.sense_length = sizeof (fc_req->SenseData); 2113 scsi_req.cdbp = &sp_req->Cdb[0]; 2114 scsi_req.cdb_len = sp_req->CdbLength; 2115 ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr; 2116 scsi_req.u_sense = &ufc_req->SenseData[0]; 2117 scsi_req.direction = fc_req->Direction; 2118 } 2119 2120 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) { 2121 EL(ha, "failed, fc_port not found\n"); 2122 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 2123 cmd->ResponseLen = 0; 2124 return; 2125 } 2126 2127 if (tq->flags & TQF_NEED_AUTHENTICATION) { 2128 EL(ha, "target not available; loopid=%xh\n", tq->loop_id); 2129 cmd->Status = EXT_STATUS_DEVICE_OFFLINE; 2130 cmd->ResponseLen = 0; 2131 return; 2132 } 2133 2134 /* Allocate command block. */ 2135 if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN || 2136 scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) && 2137 cmd->ResponseLen) { 2138 pld_size = cmd->ResponseLen; 2139 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size); 2140 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 2141 if (pkt == NULL) { 2142 EL(ha, "failed, kmem_zalloc\n"); 2143 cmd->Status = EXT_STATUS_NO_MEMORY; 2144 cmd->ResponseLen = 0; 2145 return; 2146 } 2147 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t); 2148 2149 /* Get DMA memory for the IOCB */ 2150 if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA, 2151 QL_DMA_DATA_ALIGN) != QL_SUCCESS) { 2152 cmn_err(CE_WARN, "%s(%d): request queue DMA memory " 2153 "alloc failed", QL_NAME, ha->instance); 2154 kmem_free(pkt, pkt_size); 2155 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 2156 cmd->ResponseLen = 0; 2157 return; 2158 } 2159 2160 if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) { 2161 scsi_req.direction = (uint8_t) 2162 (CFG_IST(ha, CFG_CTRL_24258081) ? 2163 CF_RD : CF_DATA_IN | CF_STAG); 2164 } else { 2165 scsi_req.direction = (uint8_t) 2166 (CFG_IST(ha, CFG_CTRL_24258081) ? 2167 CF_WR : CF_DATA_OUT | CF_STAG); 2168 cmd->ResponseLen = 0; 2169 2170 /* Get command payload. */ 2171 if (ql_get_buffer_data( 2172 (caddr_t)(uintptr_t)cmd->ResponseAdr, 2173 pld, pld_size, mode) != pld_size) { 2174 EL(ha, "failed, get_buffer_data\n"); 2175 cmd->Status = EXT_STATUS_COPY_ERR; 2176 2177 kmem_free(pkt, pkt_size); 2178 ql_free_dma_resource(ha, dma_mem); 2179 kmem_free(dma_mem, sizeof (dma_mem_t)); 2180 return; 2181 } 2182 2183 /* Copy out going data to DMA buffer. */ 2184 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld, 2185 (uint8_t *)dma_mem->bp, pld_size, 2186 DDI_DEV_AUTOINCR); 2187 2188 /* Sync DMA buffer. */ 2189 (void) ddi_dma_sync(dma_mem->dma_handle, 0, 2190 dma_mem->size, DDI_DMA_SYNC_FORDEV); 2191 } 2192 } else { 2193 scsi_req.direction = (uint8_t) 2194 (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG); 2195 cmd->ResponseLen = 0; 2196 2197 pkt_size = sizeof (ql_mbx_iocb_t); 2198 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 2199 if (pkt == NULL) { 2200 EL(ha, "failed, kmem_zalloc-2\n"); 2201 cmd->Status = EXT_STATUS_NO_MEMORY; 2202 return; 2203 } 2204 pld = NULL; 2205 pld_size = 0; 2206 } 2207 2208 /* retries = ha->port_down_retry_count; */ 2209 retries = 1; 2210 cmd->Status = EXT_STATUS_OK; 2211 cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO; 2212 2213 QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance); 2214 QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len); 2215 2216 do { 2217 if (DRIVER_SUSPENDED(ha)) { 2218 sts.comp_status = CS_LOOP_DOWN_ABORT; 2219 break; 2220 } 2221 2222 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2223 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7; 2224 pkt->cmd24.entry_count = 1; 2225 2226 /* Set LUN number */ 2227 pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun); 2228 pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun); 2229 2230 /* Set N_port handle */ 2231 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id); 2232 2233 /* Set VP Index */ 2234 pkt->cmd24.vp_index = ha->vp_index; 2235 2236 /* Set target ID */ 2237 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa; 2238 pkt->cmd24.target_id[1] = tq->d_id.b.area; 2239 pkt->cmd24.target_id[2] = tq->d_id.b.domain; 2240 2241 /* Set ISP command timeout. */ 2242 pkt->cmd24.timeout = (uint16_t)LE_16(15); 2243 2244 /* Load SCSI CDB */ 2245 ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp, 2246 pkt->cmd24.scsi_cdb, scsi_req.cdb_len, 2247 DDI_DEV_AUTOINCR); 2248 for (cnt = 0; cnt < MAX_CMDSZ; 2249 cnt = (uint16_t)(cnt + 4)) { 2250 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb 2251 + cnt, 4); 2252 } 2253 2254 /* Set tag queue control flags */ 2255 pkt->cmd24.task = TA_STAG; 2256 2257 if (pld_size) { 2258 /* Set transfer direction. */ 2259 pkt->cmd24.control_flags = scsi_req.direction; 2260 2261 /* Set data segment count. */ 2262 pkt->cmd24.dseg_count = LE_16(1); 2263 2264 /* Load total byte count. */ 2265 pkt->cmd24.total_byte_count = LE_32(pld_size); 2266 2267 /* Load data descriptor. */ 2268 pkt->cmd24.dseg_0_address[0] = (uint32_t) 2269 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 2270 pkt->cmd24.dseg_0_address[1] = (uint32_t) 2271 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 2272 pkt->cmd24.dseg_0_length = LE_32(pld_size); 2273 } 2274 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 2275 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3; 2276 pkt->cmd3.entry_count = 1; 2277 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 2278 pkt->cmd3.target_l = LSB(tq->loop_id); 2279 pkt->cmd3.target_h = MSB(tq->loop_id); 2280 } else { 2281 pkt->cmd3.target_h = LSB(tq->loop_id); 2282 } 2283 pkt->cmd3.lun_l = LSB(scsi_req.lun); 2284 pkt->cmd3.lun_h = MSB(scsi_req.lun); 2285 pkt->cmd3.control_flags_l = scsi_req.direction; 2286 pkt->cmd3.timeout = LE_16(15); 2287 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) { 2288 pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt]; 2289 } 2290 if (pld_size) { 2291 pkt->cmd3.dseg_count = LE_16(1); 2292 pkt->cmd3.byte_count = LE_32(pld_size); 2293 pkt->cmd3.dseg_0_address[0] = (uint32_t) 2294 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 2295 pkt->cmd3.dseg_0_address[1] = (uint32_t) 2296 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 2297 pkt->cmd3.dseg_0_length = LE_32(pld_size); 2298 } 2299 } else { 2300 pkt->cmd.entry_type = IOCB_CMD_TYPE_2; 2301 pkt->cmd.entry_count = 1; 2302 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 2303 pkt->cmd.target_l = LSB(tq->loop_id); 2304 pkt->cmd.target_h = MSB(tq->loop_id); 2305 } else { 2306 pkt->cmd.target_h = LSB(tq->loop_id); 2307 } 2308 pkt->cmd.lun_l = LSB(scsi_req.lun); 2309 pkt->cmd.lun_h = MSB(scsi_req.lun); 2310 pkt->cmd.control_flags_l = scsi_req.direction; 2311 pkt->cmd.timeout = LE_16(15); 2312 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) { 2313 pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt]; 2314 } 2315 if (pld_size) { 2316 pkt->cmd.dseg_count = LE_16(1); 2317 pkt->cmd.byte_count = LE_32(pld_size); 2318 pkt->cmd.dseg_0_address = (uint32_t) 2319 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 2320 pkt->cmd.dseg_0_length = LE_32(pld_size); 2321 } 2322 } 2323 /* Go issue command and wait for completion. */ 2324 QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance); 2325 QL_DUMP_9(pkt, 8, pkt_size); 2326 2327 status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); 2328 2329 if (pld_size) { 2330 /* Sync in coming DMA buffer. */ 2331 (void) ddi_dma_sync(dma_mem->dma_handle, 0, 2332 dma_mem->size, DDI_DMA_SYNC_FORKERNEL); 2333 /* Copy in coming DMA data. */ 2334 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld, 2335 (uint8_t *)dma_mem->bp, pld_size, 2336 DDI_DEV_AUTOINCR); 2337 } 2338 2339 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2340 pkt->sts24.entry_status = (uint8_t) 2341 (pkt->sts24.entry_status & 0x3c); 2342 } else { 2343 pkt->sts.entry_status = (uint8_t) 2344 (pkt->sts.entry_status & 0x7e); 2345 } 2346 2347 if (status == QL_SUCCESS && pkt->sts.entry_status != 0) { 2348 EL(ha, "failed, entry_status=%xh, d_id=%xh\n", 2349 pkt->sts.entry_status, tq->d_id.b24); 2350 status = QL_FUNCTION_PARAMETER_ERROR; 2351 } 2352 2353 sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ? 2354 LE_16(pkt->sts24.comp_status) : 2355 LE_16(pkt->sts.comp_status)); 2356 2357 /* 2358 * We have verified about all the request that can be so far. 2359 * Now we need to start verification of our ability to 2360 * actually issue the CDB. 2361 */ 2362 if (DRIVER_SUSPENDED(ha)) { 2363 sts.comp_status = CS_LOOP_DOWN_ABORT; 2364 break; 2365 } else if (status == QL_SUCCESS && 2366 (sts.comp_status == CS_PORT_LOGGED_OUT || 2367 sts.comp_status == CS_PORT_UNAVAILABLE)) { 2368 EL(ha, "login retry d_id=%xh\n", tq->d_id.b24); 2369 if (tq->flags & TQF_FABRIC_DEVICE) { 2370 rval = ql_login_fport(ha, tq, tq->loop_id, 2371 LFF_NO_PLOGI, &mr); 2372 if (rval != QL_SUCCESS) { 2373 EL(ha, "failed, login_fport=%xh, " 2374 "d_id=%xh\n", rval, tq->d_id.b24); 2375 } 2376 } else { 2377 rval = ql_login_lport(ha, tq, tq->loop_id, 2378 LLF_NONE); 2379 if (rval != QL_SUCCESS) { 2380 EL(ha, "failed, login_lport=%xh, " 2381 "d_id=%xh\n", rval, tq->d_id.b24); 2382 } 2383 } 2384 } else { 2385 break; 2386 } 2387 2388 bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t)); 2389 2390 } while (retries--); 2391 2392 if (sts.comp_status == CS_LOOP_DOWN_ABORT) { 2393 /* Cannot issue command now, maybe later */ 2394 EL(ha, "failed, suspended\n"); 2395 kmem_free(pkt, pkt_size); 2396 ql_free_dma_resource(ha, dma_mem); 2397 kmem_free(dma_mem, sizeof (dma_mem_t)); 2398 cmd->Status = EXT_STATUS_SUSPENDED; 2399 cmd->ResponseLen = 0; 2400 return; 2401 } 2402 2403 if (status != QL_SUCCESS) { 2404 /* Command error */ 2405 EL(ha, "failed, I/O\n"); 2406 kmem_free(pkt, pkt_size); 2407 ql_free_dma_resource(ha, dma_mem); 2408 kmem_free(dma_mem, sizeof (dma_mem_t)); 2409 cmd->Status = EXT_STATUS_ERR; 2410 cmd->DetailStatus = status; 2411 cmd->ResponseLen = 0; 2412 return; 2413 } 2414 2415 /* Setup status. */ 2416 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2417 sts.scsi_status_l = pkt->sts24.scsi_status_l; 2418 sts.scsi_status_h = pkt->sts24.scsi_status_h; 2419 2420 /* Setup residuals. */ 2421 sts.residual_length = LE_32(pkt->sts24.residual_length); 2422 2423 /* Setup state flags. */ 2424 sts.state_flags_l = pkt->sts24.state_flags_l; 2425 sts.state_flags_h = pkt->sts24.state_flags_h; 2426 if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) { 2427 sts.state_flags_h = (uint8_t)(sts.state_flags_h | 2428 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD | 2429 SF_XFERRED_DATA | SF_GOT_STATUS); 2430 } else { 2431 sts.state_flags_h = (uint8_t)(sts.state_flags_h | 2432 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD | 2433 SF_GOT_STATUS); 2434 } 2435 if (scsi_req.direction & CF_WR) { 2436 sts.state_flags_l = (uint8_t)(sts.state_flags_l | 2437 SF_DATA_OUT); 2438 } else if (scsi_req.direction & CF_RD) { 2439 sts.state_flags_l = (uint8_t)(sts.state_flags_l | 2440 SF_DATA_IN); 2441 } 2442 sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q); 2443 2444 /* Setup FCP response info. */ 2445 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ? 2446 LE_32(pkt->sts24.fcp_rsp_data_length) : 0; 2447 sts.rsp_info = &pkt->sts24.rsp_sense_data[0]; 2448 for (cnt = 0; cnt < sts.rsp_info_length; 2449 cnt = (uint16_t)(cnt + 4)) { 2450 ql_chg_endian(sts.rsp_info + cnt, 4); 2451 } 2452 2453 /* Setup sense data. */ 2454 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) { 2455 sts.req_sense_length = 2456 LE_32(pkt->sts24.fcp_sense_length); 2457 sts.state_flags_h = (uint8_t)(sts.state_flags_h | 2458 SF_ARQ_DONE); 2459 } else { 2460 sts.req_sense_length = 0; 2461 } 2462 sts.req_sense_data = 2463 &pkt->sts24.rsp_sense_data[sts.rsp_info_length]; 2464 cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) - 2465 (uintptr_t)sts.req_sense_data); 2466 for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) { 2467 ql_chg_endian(sts.req_sense_data + cnt, 4); 2468 } 2469 } else { 2470 sts.scsi_status_l = pkt->sts.scsi_status_l; 2471 sts.scsi_status_h = pkt->sts.scsi_status_h; 2472 2473 /* Setup residuals. */ 2474 sts.residual_length = LE_32(pkt->sts.residual_length); 2475 2476 /* Setup state flags. */ 2477 sts.state_flags_l = pkt->sts.state_flags_l; 2478 sts.state_flags_h = pkt->sts.state_flags_h; 2479 2480 /* Setup FCP response info. */ 2481 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ? 2482 LE_16(pkt->sts.rsp_info_length) : 0; 2483 sts.rsp_info = &pkt->sts.rsp_info[0]; 2484 2485 /* Setup sense data. */ 2486 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ? 2487 LE_16(pkt->sts.req_sense_length) : 0; 2488 sts.req_sense_data = &pkt->sts.req_sense_data[0]; 2489 } 2490 2491 QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance); 2492 QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t)); 2493 2494 switch (sts.comp_status) { 2495 case CS_INCOMPLETE: 2496 case CS_ABORTED: 2497 case CS_DEVICE_UNAVAILABLE: 2498 case CS_PORT_UNAVAILABLE: 2499 case CS_PORT_LOGGED_OUT: 2500 case CS_PORT_CONFIG_CHG: 2501 case CS_PORT_BUSY: 2502 case CS_LOOP_DOWN_ABORT: 2503 cmd->Status = EXT_STATUS_BUSY; 2504 break; 2505 case CS_RESET: 2506 case CS_QUEUE_FULL: 2507 cmd->Status = EXT_STATUS_ERR; 2508 break; 2509 case CS_TIMEOUT: 2510 cmd->Status = EXT_STATUS_ERR; 2511 break; 2512 case CS_DATA_OVERRUN: 2513 cmd->Status = EXT_STATUS_DATA_OVERRUN; 2514 break; 2515 case CS_DATA_UNDERRUN: 2516 cmd->Status = EXT_STATUS_DATA_UNDERRUN; 2517 break; 2518 } 2519 2520 /* 2521 * If non data transfer commands fix tranfer counts. 2522 */ 2523 if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY || 2524 scsi_req.cdbp[0] == SCMD_REZERO_UNIT || 2525 scsi_req.cdbp[0] == SCMD_SEEK || 2526 scsi_req.cdbp[0] == SCMD_SEEK_G1 || 2527 scsi_req.cdbp[0] == SCMD_RESERVE || 2528 scsi_req.cdbp[0] == SCMD_RELEASE || 2529 scsi_req.cdbp[0] == SCMD_START_STOP || 2530 scsi_req.cdbp[0] == SCMD_DOORLOCK || 2531 scsi_req.cdbp[0] == SCMD_VERIFY || 2532 scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK || 2533 scsi_req.cdbp[0] == SCMD_VERIFY_G0 || 2534 scsi_req.cdbp[0] == SCMD_SPACE || 2535 scsi_req.cdbp[0] == SCMD_ERASE || 2536 (scsi_req.cdbp[0] == SCMD_FORMAT && 2537 (scsi_req.cdbp[1] & FPB_DATA) == 0)) { 2538 /* 2539 * Non data transfer command, clear sts_entry residual 2540 * length. 2541 */ 2542 sts.residual_length = 0; 2543 cmd->ResponseLen = 0; 2544 if (sts.comp_status == CS_DATA_UNDERRUN) { 2545 sts.comp_status = CS_COMPLETE; 2546 cmd->Status = EXT_STATUS_OK; 2547 } 2548 } else { 2549 cmd->ResponseLen = pld_size; 2550 } 2551 2552 /* Correct ISP completion status */ 2553 if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 && 2554 (sts.scsi_status_h & FCP_RSP_MASK) == 0) { 2555 QL_PRINT_9(CE_CONT, "(%d): Correct completion\n", 2556 ha->instance); 2557 scsi_req.resid = 0; 2558 } else if (sts.comp_status == CS_DATA_UNDERRUN) { 2559 QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n", 2560 ha->instance); 2561 scsi_req.resid = sts.residual_length; 2562 if (sts.scsi_status_h & FCP_RESID_UNDER) { 2563 cmd->Status = (uint32_t)EXT_STATUS_OK; 2564 2565 cmd->ResponseLen = (uint32_t) 2566 (pld_size - scsi_req.resid); 2567 } else { 2568 EL(ha, "failed, Transfer ERROR\n"); 2569 cmd->Status = EXT_STATUS_ERR; 2570 cmd->ResponseLen = 0; 2571 } 2572 } else { 2573 QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, " 2574 "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance, 2575 tq->d_id.b24, sts.comp_status, sts.scsi_status_h, 2576 sts.scsi_status_l); 2577 2578 scsi_req.resid = pld_size; 2579 /* 2580 * Handle residual count on SCSI check 2581 * condition. 2582 * 2583 * - If Residual Under / Over is set, use the 2584 * Residual Transfer Length field in IOCB. 2585 * - If Residual Under / Over is not set, and 2586 * Transferred Data bit is set in State Flags 2587 * field of IOCB, report residual value of 0 2588 * (you may want to do this for tape 2589 * Write-type commands only). This takes care 2590 * of logical end of tape problem and does 2591 * not break Unit Attention. 2592 * - If Residual Under / Over is not set, and 2593 * Transferred Data bit is not set in State 2594 * Flags, report residual value equal to 2595 * original data transfer length. 2596 */ 2597 if (sts.scsi_status_l & STATUS_CHECK) { 2598 cmd->Status = EXT_STATUS_SCSI_STATUS; 2599 cmd->DetailStatus = sts.scsi_status_l; 2600 if (sts.scsi_status_h & 2601 (FCP_RESID_OVER | FCP_RESID_UNDER)) { 2602 scsi_req.resid = sts.residual_length; 2603 } else if (sts.state_flags_h & 2604 STATE_XFERRED_DATA) { 2605 scsi_req.resid = 0; 2606 } 2607 } 2608 } 2609 2610 if (sts.scsi_status_l & STATUS_CHECK && 2611 sts.scsi_status_h & FCP_SNS_LEN_VALID && 2612 sts.req_sense_length) { 2613 /* 2614 * Check condition with vaild sense data flag set and sense 2615 * length != 0 2616 */ 2617 if (sts.req_sense_length > scsi_req.sense_length) { 2618 sense_sz = scsi_req.sense_length; 2619 } else { 2620 sense_sz = sts.req_sense_length; 2621 } 2622 2623 EL(ha, "failed, Check Condition Status, d_id=%xh\n", 2624 tq->d_id.b24); 2625 QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length); 2626 2627 if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense, 2628 (size_t)sense_sz, mode) != 0) { 2629 EL(ha, "failed, request sense ddi_copyout\n"); 2630 } 2631 2632 cmd->Status = EXT_STATUS_SCSI_STATUS; 2633 cmd->DetailStatus = sts.scsi_status_l; 2634 } 2635 2636 /* Copy response payload from DMA buffer to application. */ 2637 if (scsi_req.direction & (CF_RD | CF_DATA_IN) && 2638 cmd->ResponseLen != 0) { 2639 QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, " 2640 "byte_count=%u, ResponseLen=%xh\n", ha->instance, 2641 scsi_req.resid, pld_size, cmd->ResponseLen); 2642 QL_DUMP_9(pld, 8, cmd->ResponseLen); 2643 2644 /* Send response payload. */ 2645 if (ql_send_buffer_data(pld, 2646 (caddr_t)(uintptr_t)cmd->ResponseAdr, 2647 cmd->ResponseLen, mode) != cmd->ResponseLen) { 2648 EL(ha, "failed, send_buffer_data\n"); 2649 cmd->Status = EXT_STATUS_COPY_ERR; 2650 cmd->ResponseLen = 0; 2651 } 2652 } 2653 2654 if (cmd->Status != EXT_STATUS_OK) { 2655 EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, " 2656 "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24); 2657 } else { 2658 /*EMPTY*/ 2659 QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n", 2660 ha->instance, cmd->ResponseLen); 2661 } 2662 2663 kmem_free(pkt, pkt_size); 2664 ql_free_dma_resource(ha, dma_mem); 2665 kmem_free(dma_mem, sizeof (dma_mem_t)); 2666 } 2667 2668 /* 2669 * ql_wwpn_to_scsiaddr 2670 * 2671 * Input: 2672 * ha: adapter state pointer. 2673 * cmd: EXT_IOCTL cmd struct pointer. 2674 * mode: flags. 2675 * 2676 * Context: 2677 * Kernel context. 2678 */ 2679 static void 2680 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2681 { 2682 int status; 2683 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE]; 2684 EXT_SCSI_ADDR *tmp_addr; 2685 ql_tgt_t *tq; 2686 2687 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2688 2689 if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) { 2690 /* Return error */ 2691 EL(ha, "incorrect RequestLen\n"); 2692 cmd->Status = EXT_STATUS_INVALID_PARAM; 2693 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 2694 return; 2695 } 2696 2697 status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn, 2698 cmd->RequestLen, mode); 2699 2700 if (status != 0) { 2701 cmd->Status = EXT_STATUS_COPY_ERR; 2702 EL(ha, "failed, ddi_copyin\n"); 2703 return; 2704 } 2705 2706 tq = ql_find_port(ha, wwpn, QLNT_PORT); 2707 2708 if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) { 2709 /* no matching device */ 2710 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 2711 EL(ha, "failed, device not found\n"); 2712 return; 2713 } 2714 2715 /* Copy out the IDs found. For now we can only return target ID. */ 2716 tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr; 2717 2718 status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode); 2719 2720 if (status != 0) { 2721 cmd->Status = EXT_STATUS_COPY_ERR; 2722 EL(ha, "failed, ddi_copyout\n"); 2723 } else { 2724 cmd->Status = EXT_STATUS_OK; 2725 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2726 } 2727 } 2728 2729 /* 2730 * ql_host_idx 2731 * Gets host order index. 2732 * 2733 * Input: 2734 * ha: adapter state pointer. 2735 * cmd: EXT_IOCTL cmd struct pointer. 2736 * mode: flags. 2737 * 2738 * Returns: 2739 * None, request status indicated in cmd->Status. 2740 * 2741 * Context: 2742 * Kernel context. 2743 */ 2744 static void 2745 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2746 { 2747 uint16_t idx; 2748 2749 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2750 2751 if (cmd->ResponseLen < sizeof (uint16_t)) { 2752 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2753 cmd->DetailStatus = sizeof (uint16_t); 2754 EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen); 2755 cmd->ResponseLen = 0; 2756 return; 2757 } 2758 2759 idx = (uint16_t)ha->instance; 2760 2761 if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr), 2762 sizeof (uint16_t), mode) != 0) { 2763 cmd->Status = EXT_STATUS_COPY_ERR; 2764 cmd->ResponseLen = 0; 2765 EL(ha, "failed, ddi_copyout\n"); 2766 } else { 2767 cmd->ResponseLen = sizeof (uint16_t); 2768 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2769 } 2770 } 2771 2772 /* 2773 * ql_host_drvname 2774 * Gets host driver name 2775 * 2776 * Input: 2777 * ha: adapter state pointer. 2778 * cmd: EXT_IOCTL cmd struct pointer. 2779 * mode: flags. 2780 * 2781 * Returns: 2782 * None, request status indicated in cmd->Status. 2783 * 2784 * Context: 2785 * Kernel context. 2786 */ 2787 static void 2788 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2789 { 2790 2791 char drvname[] = QL_NAME; 2792 uint32_t qlnamelen; 2793 2794 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2795 2796 qlnamelen = (uint32_t)(strlen(QL_NAME)+1); 2797 2798 if (cmd->ResponseLen < qlnamelen) { 2799 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2800 cmd->DetailStatus = qlnamelen; 2801 EL(ha, "failed, ResponseLen: %xh, needed: %xh\n", 2802 cmd->ResponseLen, qlnamelen); 2803 cmd->ResponseLen = 0; 2804 return; 2805 } 2806 2807 if (ddi_copyout((void *)&drvname, 2808 (void *)(uintptr_t)(cmd->ResponseAdr), 2809 qlnamelen, mode) != 0) { 2810 cmd->Status = EXT_STATUS_COPY_ERR; 2811 cmd->ResponseLen = 0; 2812 EL(ha, "failed, ddi_copyout\n"); 2813 } else { 2814 cmd->ResponseLen = qlnamelen-1; 2815 } 2816 2817 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2818 } 2819 2820 /* 2821 * ql_read_nvram 2822 * Get NVRAM contents. 2823 * 2824 * Input: 2825 * ha: adapter state pointer. 2826 * cmd: EXT_IOCTL cmd struct pointer. 2827 * mode: flags. 2828 * 2829 * Returns: 2830 * None, request status indicated in cmd->Status. 2831 * 2832 * Context: 2833 * Kernel context. 2834 */ 2835 static void 2836 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2837 { 2838 2839 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2840 2841 if (cmd->ResponseLen < ha->nvram_cache->size) { 2842 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2843 cmd->DetailStatus = ha->nvram_cache->size; 2844 EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n", 2845 cmd->ResponseLen); 2846 cmd->ResponseLen = 0; 2847 return; 2848 } 2849 2850 /* Get NVRAM data. */ 2851 if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr), 2852 mode) != 0) { 2853 cmd->Status = EXT_STATUS_COPY_ERR; 2854 cmd->ResponseLen = 0; 2855 EL(ha, "failed, copy error\n"); 2856 } else { 2857 cmd->ResponseLen = ha->nvram_cache->size; 2858 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2859 } 2860 } 2861 2862 /* 2863 * ql_write_nvram 2864 * Loads NVRAM contents. 2865 * 2866 * Input: 2867 * ha: adapter state pointer. 2868 * cmd: EXT_IOCTL cmd struct pointer. 2869 * mode: flags. 2870 * 2871 * Returns: 2872 * None, request status indicated in cmd->Status. 2873 * 2874 * Context: 2875 * Kernel context. 2876 */ 2877 static void 2878 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2879 { 2880 2881 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2882 2883 if (cmd->RequestLen < ha->nvram_cache->size) { 2884 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2885 cmd->DetailStatus = ha->nvram_cache->size; 2886 EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n", 2887 cmd->RequestLen); 2888 return; 2889 } 2890 2891 /* Load NVRAM data. */ 2892 if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr), 2893 mode) != 0) { 2894 cmd->Status = EXT_STATUS_COPY_ERR; 2895 EL(ha, "failed, copy error\n"); 2896 } else { 2897 /*EMPTY*/ 2898 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2899 } 2900 } 2901 2902 /* 2903 * ql_write_vpd 2904 * Loads VPD contents. 2905 * 2906 * Input: 2907 * ha: adapter state pointer. 2908 * cmd: EXT_IOCTL cmd struct pointer. 2909 * mode: flags. 2910 * 2911 * Returns: 2912 * None, request status indicated in cmd->Status. 2913 * 2914 * Context: 2915 * Kernel context. 2916 */ 2917 static void 2918 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2919 { 2920 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2921 2922 int32_t rval = 0; 2923 2924 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 2925 cmd->Status = EXT_STATUS_INVALID_REQUEST; 2926 EL(ha, "failed, invalid request for HBA\n"); 2927 return; 2928 } 2929 2930 if (cmd->RequestLen < QL_24XX_VPD_SIZE) { 2931 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2932 cmd->DetailStatus = QL_24XX_VPD_SIZE; 2933 EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n", 2934 cmd->RequestLen); 2935 return; 2936 } 2937 2938 /* Load VPD data. */ 2939 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr), 2940 mode)) != 0) { 2941 cmd->Status = EXT_STATUS_COPY_ERR; 2942 cmd->DetailStatus = rval; 2943 EL(ha, "failed, errno=%x\n", rval); 2944 } else { 2945 /*EMPTY*/ 2946 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2947 } 2948 } 2949 2950 /* 2951 * ql_read_vpd 2952 * Dumps VPD contents. 2953 * 2954 * Input: 2955 * ha: adapter state pointer. 2956 * cmd: EXT_IOCTL cmd struct pointer. 2957 * mode: flags. 2958 * 2959 * Returns: 2960 * None, request status indicated in cmd->Status. 2961 * 2962 * Context: 2963 * Kernel context. 2964 */ 2965 static void 2966 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 2967 { 2968 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 2969 2970 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 2971 cmd->Status = EXT_STATUS_INVALID_REQUEST; 2972 EL(ha, "failed, invalid request for HBA\n"); 2973 return; 2974 } 2975 2976 if (cmd->ResponseLen < QL_24XX_VPD_SIZE) { 2977 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 2978 cmd->DetailStatus = QL_24XX_VPD_SIZE; 2979 EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n", 2980 cmd->ResponseLen); 2981 return; 2982 } 2983 2984 /* Dump VPD data. */ 2985 if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr), 2986 mode)) != 0) { 2987 cmd->Status = EXT_STATUS_COPY_ERR; 2988 EL(ha, "failed,\n"); 2989 } else { 2990 /*EMPTY*/ 2991 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 2992 } 2993 } 2994 2995 /* 2996 * ql_get_fcache 2997 * Dumps flash cache contents. 2998 * 2999 * Input: 3000 * ha: adapter state pointer. 3001 * cmd: EXT_IOCTL cmd struct pointer. 3002 * mode: flags. 3003 * 3004 * Returns: 3005 * None, request status indicated in cmd->Status. 3006 * 3007 * Context: 3008 * Kernel context. 3009 */ 3010 static void 3011 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3012 { 3013 uint32_t bsize, boff, types, cpsize, hsize; 3014 ql_fcache_t *fptr; 3015 3016 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3017 3018 CACHE_LOCK(ha); 3019 3020 if (ha->fcache == NULL) { 3021 CACHE_UNLOCK(ha); 3022 cmd->Status = EXT_STATUS_ERR; 3023 EL(ha, "failed, adapter fcache not setup\n"); 3024 return; 3025 } 3026 3027 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 3028 bsize = 100; 3029 } else { 3030 bsize = 400; 3031 } 3032 3033 if (cmd->ResponseLen < bsize) { 3034 CACHE_UNLOCK(ha); 3035 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 3036 cmd->DetailStatus = bsize; 3037 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n", 3038 bsize, cmd->ResponseLen); 3039 return; 3040 } 3041 3042 boff = 0; 3043 bsize = 0; 3044 fptr = ha->fcache; 3045 3046 /* 3047 * For backwards compatibility, get one of each image type 3048 */ 3049 types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI); 3050 while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) { 3051 /* Get the next image */ 3052 if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) { 3053 3054 cpsize = (fptr->buflen < 100 ? fptr->buflen : 100); 3055 3056 if (ddi_copyout(fptr->buf, 3057 (void *)(uintptr_t)(cmd->ResponseAdr + boff), 3058 cpsize, mode) != 0) { 3059 CACHE_UNLOCK(ha); 3060 EL(ha, "ddicopy failed, done\n"); 3061 cmd->Status = EXT_STATUS_COPY_ERR; 3062 cmd->DetailStatus = 0; 3063 return; 3064 } 3065 boff += 100; 3066 bsize += cpsize; 3067 types &= ~(fptr->type); 3068 } 3069 } 3070 3071 /* 3072 * Get the firmware image -- it needs to be last in the 3073 * buffer at offset 300 for backwards compatibility. Also for 3074 * backwards compatibility, the pci header is stripped off. 3075 */ 3076 if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) { 3077 3078 hsize = sizeof (pci_header_t) + sizeof (pci_data_t); 3079 if (hsize > fptr->buflen) { 3080 CACHE_UNLOCK(ha); 3081 EL(ha, "header size (%xh) exceeds buflen (%xh)\n", 3082 hsize, fptr->buflen); 3083 cmd->Status = EXT_STATUS_COPY_ERR; 3084 cmd->DetailStatus = 0; 3085 return; 3086 } 3087 3088 cpsize = ((fptr->buflen - hsize) < 100 ? 3089 fptr->buflen - hsize : 100); 3090 3091 if (ddi_copyout(fptr->buf+hsize, 3092 (void *)(uintptr_t)(cmd->ResponseAdr + 300), 3093 cpsize, mode) != 0) { 3094 CACHE_UNLOCK(ha); 3095 EL(ha, "fw ddicopy failed, done\n"); 3096 cmd->Status = EXT_STATUS_COPY_ERR; 3097 cmd->DetailStatus = 0; 3098 return; 3099 } 3100 bsize += 100; 3101 } 3102 3103 CACHE_UNLOCK(ha); 3104 cmd->Status = EXT_STATUS_OK; 3105 cmd->DetailStatus = bsize; 3106 3107 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3108 } 3109 3110 /* 3111 * ql_get_fcache_ex 3112 * Dumps flash cache contents. 3113 * 3114 * Input: 3115 * ha: adapter state pointer. 3116 * cmd: EXT_IOCTL cmd struct pointer. 3117 * mode: flags. 3118 * 3119 * Returns: 3120 * None, request status indicated in cmd->Status. 3121 * 3122 * Context: 3123 * Kernel context. 3124 */ 3125 static void 3126 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3127 { 3128 uint32_t bsize = 0; 3129 uint32_t boff = 0; 3130 ql_fcache_t *fptr; 3131 3132 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3133 3134 CACHE_LOCK(ha); 3135 if (ha->fcache == NULL) { 3136 CACHE_UNLOCK(ha); 3137 cmd->Status = EXT_STATUS_ERR; 3138 EL(ha, "failed, adapter fcache not setup\n"); 3139 return; 3140 } 3141 3142 /* Make sure user passed enough buffer space */ 3143 for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) { 3144 bsize += FBUFSIZE; 3145 } 3146 3147 if (cmd->ResponseLen < bsize) { 3148 CACHE_UNLOCK(ha); 3149 if (cmd->ResponseLen != 0) { 3150 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n", 3151 bsize, cmd->ResponseLen); 3152 } 3153 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 3154 cmd->DetailStatus = bsize; 3155 return; 3156 } 3157 3158 boff = 0; 3159 fptr = ha->fcache; 3160 while ((fptr != NULL) && (fptr->buf != NULL)) { 3161 /* Get the next image */ 3162 if (ddi_copyout(fptr->buf, 3163 (void *)(uintptr_t)(cmd->ResponseAdr + boff), 3164 (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE), 3165 mode) != 0) { 3166 CACHE_UNLOCK(ha); 3167 EL(ha, "failed, ddicopy at %xh, done\n", boff); 3168 cmd->Status = EXT_STATUS_COPY_ERR; 3169 cmd->DetailStatus = 0; 3170 return; 3171 } 3172 boff += FBUFSIZE; 3173 fptr = fptr->next; 3174 } 3175 3176 CACHE_UNLOCK(ha); 3177 cmd->Status = EXT_STATUS_OK; 3178 cmd->DetailStatus = bsize; 3179 3180 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3181 } 3182 3183 /* 3184 * ql_read_flash 3185 * Get flash contents. 3186 * 3187 * Input: 3188 * ha: adapter state pointer. 3189 * cmd: EXT_IOCTL cmd struct pointer. 3190 * mode: flags. 3191 * 3192 * Returns: 3193 * None, request status indicated in cmd->Status. 3194 * 3195 * Context: 3196 * Kernel context. 3197 */ 3198 static void 3199 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3200 { 3201 ql_xioctl_t *xp = ha->xioctl; 3202 3203 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3204 3205 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 3206 EL(ha, "ql_stall_driver failed\n"); 3207 cmd->Status = EXT_STATUS_BUSY; 3208 cmd->DetailStatus = xp->fdesc.flash_size; 3209 cmd->ResponseLen = 0; 3210 return; 3211 } 3212 3213 if (ql_setup_fcache(ha) != QL_SUCCESS) { 3214 cmd->Status = EXT_STATUS_ERR; 3215 cmd->DetailStatus = xp->fdesc.flash_size; 3216 EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n", 3217 cmd->ResponseLen, xp->fdesc.flash_size); 3218 cmd->ResponseLen = 0; 3219 } else { 3220 /* adjust read size to flash size */ 3221 if (cmd->ResponseLen > xp->fdesc.flash_size) { 3222 EL(ha, "adjusting req=%xh, max=%xh\n", 3223 cmd->ResponseLen, xp->fdesc.flash_size); 3224 cmd->ResponseLen = xp->fdesc.flash_size; 3225 } 3226 3227 /* Get flash data. */ 3228 if (ql_flash_fcode_dump(ha, 3229 (void *)(uintptr_t)(cmd->ResponseAdr), 3230 (size_t)(cmd->ResponseLen), 0, mode) != 0) { 3231 cmd->Status = EXT_STATUS_COPY_ERR; 3232 cmd->ResponseLen = 0; 3233 EL(ha, "failed,\n"); 3234 } 3235 } 3236 3237 /* Resume I/O */ 3238 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3239 ql_restart_driver(ha); 3240 } else { 3241 EL(ha, "isp_abort_needed for restart\n"); 3242 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 3243 DRIVER_STALL); 3244 } 3245 3246 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3247 } 3248 3249 /* 3250 * ql_write_flash 3251 * Loads flash contents. 3252 * 3253 * Input: 3254 * ha: adapter state pointer. 3255 * cmd: EXT_IOCTL cmd struct pointer. 3256 * mode: flags. 3257 * 3258 * Returns: 3259 * None, request status indicated in cmd->Status. 3260 * 3261 * Context: 3262 * Kernel context. 3263 */ 3264 static void 3265 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3266 { 3267 ql_xioctl_t *xp = ha->xioctl; 3268 3269 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3270 3271 if (ql_stall_driver(ha, 0) != QL_SUCCESS) { 3272 EL(ha, "ql_stall_driver failed\n"); 3273 cmd->Status = EXT_STATUS_BUSY; 3274 cmd->DetailStatus = xp->fdesc.flash_size; 3275 cmd->ResponseLen = 0; 3276 return; 3277 } 3278 3279 if (ql_setup_fcache(ha) != QL_SUCCESS) { 3280 cmd->Status = EXT_STATUS_ERR; 3281 cmd->DetailStatus = xp->fdesc.flash_size; 3282 EL(ha, "failed, RequestLen=%xh, size=%xh\n", 3283 cmd->RequestLen, xp->fdesc.flash_size); 3284 cmd->ResponseLen = 0; 3285 } else { 3286 /* Load flash data. */ 3287 if (cmd->RequestLen > xp->fdesc.flash_size) { 3288 cmd->Status = EXT_STATUS_ERR; 3289 cmd->DetailStatus = xp->fdesc.flash_size; 3290 EL(ha, "failed, RequestLen=%xh, flash size=%xh\n", 3291 cmd->RequestLen, xp->fdesc.flash_size); 3292 } else if (ql_flash_fcode_load(ha, 3293 (void *)(uintptr_t)(cmd->RequestAdr), 3294 (size_t)(cmd->RequestLen), mode) != 0) { 3295 cmd->Status = EXT_STATUS_COPY_ERR; 3296 EL(ha, "failed,\n"); 3297 } 3298 } 3299 3300 /* Resume I/O */ 3301 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3302 ql_restart_driver(ha); 3303 } else { 3304 EL(ha, "isp_abort_needed for restart\n"); 3305 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 3306 DRIVER_STALL); 3307 } 3308 3309 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3310 } 3311 3312 /* 3313 * ql_diagnostic_loopback 3314 * Performs EXT_CC_LOOPBACK Command 3315 * 3316 * Input: 3317 * ha: adapter state pointer. 3318 * cmd: Local EXT_IOCTL cmd struct pointer. 3319 * mode: flags. 3320 * 3321 * Returns: 3322 * None, request status indicated in cmd->Status. 3323 * 3324 * Context: 3325 * Kernel context. 3326 */ 3327 static void 3328 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3329 { 3330 EXT_LOOPBACK_REQ plbreq; 3331 EXT_LOOPBACK_RSP plbrsp; 3332 ql_mbx_data_t mr; 3333 uint32_t rval; 3334 caddr_t bp; 3335 uint16_t opt; 3336 3337 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3338 3339 /* Get loop back request. */ 3340 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 3341 (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) { 3342 EL(ha, "failed, ddi_copyin\n"); 3343 cmd->Status = EXT_STATUS_COPY_ERR; 3344 cmd->ResponseLen = 0; 3345 return; 3346 } 3347 3348 opt = (uint16_t)(plbreq.Options & MBC_LOOPBACK_POINT_MASK); 3349 3350 /* Check transfer length fits in buffer. */ 3351 if (plbreq.BufferLength < plbreq.TransferCount && 3352 plbreq.TransferCount < MAILBOX_BUFFER_SIZE) { 3353 EL(ha, "failed, BufferLength=%d, xfercnt=%d, " 3354 "mailbox_buffer_size=%d\n", plbreq.BufferLength, 3355 plbreq.TransferCount, MAILBOX_BUFFER_SIZE); 3356 cmd->Status = EXT_STATUS_INVALID_PARAM; 3357 cmd->ResponseLen = 0; 3358 return; 3359 } 3360 3361 /* Allocate command memory. */ 3362 bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP); 3363 if (bp == NULL) { 3364 EL(ha, "failed, kmem_zalloc\n"); 3365 cmd->Status = EXT_STATUS_NO_MEMORY; 3366 cmd->ResponseLen = 0; 3367 return; 3368 } 3369 3370 /* Get loopback data. */ 3371 if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress, 3372 bp, plbreq.TransferCount, mode) != plbreq.TransferCount) { 3373 EL(ha, "failed, ddi_copyin-2\n"); 3374 kmem_free(bp, plbreq.TransferCount); 3375 cmd->Status = EXT_STATUS_COPY_ERR; 3376 cmd->ResponseLen = 0; 3377 return; 3378 } 3379 3380 if ((ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) || 3381 ql_stall_driver(ha, 0) != QL_SUCCESS) { 3382 EL(ha, "failed, LOOP_NOT_READY\n"); 3383 kmem_free(bp, plbreq.TransferCount); 3384 cmd->Status = EXT_STATUS_BUSY; 3385 cmd->ResponseLen = 0; 3386 return; 3387 } 3388 3389 /* Shutdown IP. */ 3390 if (ha->flags & IP_INITIALIZED) { 3391 (void) ql_shutdown_ip(ha); 3392 } 3393 3394 /* determine topology so we can send the loopback or the echo */ 3395 /* Echo is supported on 2300's only and above */ 3396 3397 if (CFG_IST(ha, CFG_CTRL_8081)) { 3398 if (!(ha->task_daemon_flags & LOOP_DOWN) && opt == 3399 MBC_LOOPBACK_POINT_EXTERNAL) { 3400 if (plbreq.TransferCount > 252) { 3401 EL(ha, "transfer count (%d) > 252\n", 3402 plbreq.TransferCount); 3403 kmem_free(bp, plbreq.TransferCount); 3404 cmd->Status = EXT_STATUS_INVALID_PARAM; 3405 cmd->ResponseLen = 0; 3406 return; 3407 } 3408 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD; 3409 rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount, 3410 MBC_ECHO_ELS, &mr); 3411 } else { 3412 if (CFG_IST(ha, CFG_CTRL_81XX)) { 3413 (void) ql_set_loop_point(ha, opt); 3414 } 3415 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD; 3416 rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount, 3417 opt, plbreq.IterationCount, &mr); 3418 if (CFG_IST(ha, CFG_CTRL_81XX)) { 3419 (void) ql_set_loop_point(ha, 0); 3420 } 3421 } 3422 } else { 3423 if (!(ha->task_daemon_flags & LOOP_DOWN) && 3424 (ha->topology & QL_F_PORT) && 3425 ha->device_id >= 0x2300) { 3426 QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using " 3427 "echo\n", ha->instance); 3428 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD; 3429 rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount, 3430 (uint16_t)(CFG_IST(ha, CFG_CTRL_8081) ? 3431 MBC_ECHO_ELS : MBC_ECHO_64BIT), &mr); 3432 } else { 3433 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD; 3434 rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount, 3435 opt, plbreq.IterationCount, &mr); 3436 } 3437 } 3438 3439 ql_restart_driver(ha); 3440 3441 /* Restart IP if it was shutdown. */ 3442 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) { 3443 (void) ql_initialize_ip(ha); 3444 ql_isp_rcvbuf(ha); 3445 } 3446 3447 if (rval != QL_SUCCESS) { 3448 EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval); 3449 kmem_free(bp, plbreq.TransferCount); 3450 cmd->Status = EXT_STATUS_MAILBOX; 3451 cmd->DetailStatus = rval; 3452 cmd->ResponseLen = 0; 3453 return; 3454 } 3455 3456 /* Return loopback data. */ 3457 if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress, 3458 plbreq.TransferCount, mode) != plbreq.TransferCount) { 3459 EL(ha, "failed, ddi_copyout\n"); 3460 kmem_free(bp, plbreq.TransferCount); 3461 cmd->Status = EXT_STATUS_COPY_ERR; 3462 cmd->ResponseLen = 0; 3463 return; 3464 } 3465 kmem_free(bp, plbreq.TransferCount); 3466 3467 /* Return loopback results. */ 3468 plbrsp.BufferAddress = plbreq.BufferAddress; 3469 plbrsp.BufferLength = plbreq.TransferCount; 3470 plbrsp.CompletionStatus = mr.mb[0]; 3471 3472 if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) { 3473 plbrsp.CrcErrorCount = 0; 3474 plbrsp.DisparityErrorCount = 0; 3475 plbrsp.FrameLengthErrorCount = 0; 3476 plbrsp.IterationCountLastError = 0; 3477 } else { 3478 plbrsp.CrcErrorCount = mr.mb[1]; 3479 plbrsp.DisparityErrorCount = mr.mb[2]; 3480 plbrsp.FrameLengthErrorCount = mr.mb[3]; 3481 plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18]; 3482 } 3483 3484 rval = ddi_copyout((void *)&plbrsp, 3485 (void *)(uintptr_t)cmd->ResponseAdr, 3486 sizeof (EXT_LOOPBACK_RSP), mode); 3487 if (rval != 0) { 3488 EL(ha, "failed, ddi_copyout-2\n"); 3489 cmd->Status = EXT_STATUS_COPY_ERR; 3490 cmd->ResponseLen = 0; 3491 return; 3492 } 3493 cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP); 3494 3495 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3496 } 3497 3498 /* 3499 * ql_set_loop_point 3500 * Setup loop point for port configuration. 3501 * 3502 * Input: 3503 * ha: adapter state structure. 3504 * opt: loop point option. 3505 * 3506 * Returns: 3507 * ql local function return status code. 3508 * 3509 * Context: 3510 * Kernel context. 3511 */ 3512 static int 3513 ql_set_loop_point(ql_adapter_state_t *ha, uint16_t opt) 3514 { 3515 ql_mbx_data_t mr; 3516 int rval; 3517 uint32_t timer; 3518 3519 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3520 3521 /* 3522 * We get the current port config, modify the loopback field and 3523 * write it back out. 3524 */ 3525 if ((rval = ql_get_port_config(ha, &mr)) != QL_SUCCESS) { 3526 EL(ha, "get_port_config status=%xh\n", rval); 3527 return (rval); 3528 } 3529 /* 3530 * Set the loopback mode field while maintaining the others. 3531 * Currently only internal or none are supported. 3532 */ 3533 mr.mb[1] = (uint16_t)(mr.mb[1] &~LOOPBACK_MODE_FIELD_MASK); 3534 if (opt == MBC_LOOPBACK_POINT_INTERNAL) { 3535 mr.mb[1] = (uint16_t)(mr.mb[1] | 3536 LOOPBACK_MODE(LOOPBACK_MODE_INTERNAL)); 3537 } 3538 /* 3539 * Changing the port configuration will cause the port state to cycle 3540 * down and back up. The indication that this has happened is that 3541 * the point to point flag gets set. 3542 */ 3543 ADAPTER_STATE_LOCK(ha); 3544 ha->flags &= ~POINT_TO_POINT; 3545 ADAPTER_STATE_UNLOCK(ha); 3546 if ((rval = ql_set_port_config(ha, &mr)) != QL_SUCCESS) { 3547 EL(ha, "set_port_config status=%xh\n", rval); 3548 } 3549 3550 /* wait for a while */ 3551 for (timer = opt ? 10 : 0; timer; timer--) { 3552 if (ha->flags & POINT_TO_POINT) { 3553 break; 3554 } 3555 /* Delay for 1000000 usec (1 second). */ 3556 ql_delay(ha, 1000000); 3557 } 3558 3559 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3560 3561 return (rval); 3562 } 3563 3564 /* 3565 * ql_send_els_rnid 3566 * IOCTL for extended link service RNID command. 3567 * 3568 * Input: 3569 * ha: adapter state pointer. 3570 * cmd: User space CT arguments pointer. 3571 * mode: flags. 3572 * 3573 * Returns: 3574 * None, request status indicated in cmd->Status. 3575 * 3576 * Context: 3577 * Kernel context. 3578 */ 3579 static void 3580 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3581 { 3582 EXT_RNID_REQ tmp_rnid; 3583 port_id_t tmp_fcid; 3584 caddr_t tmp_buf, bptr; 3585 uint32_t copy_len; 3586 ql_tgt_t *tq; 3587 EXT_RNID_DATA rnid_data; 3588 uint32_t loop_ready_wait = 10 * 60 * 10; 3589 int rval = 0; 3590 uint32_t local_hba = 0; 3591 3592 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3593 3594 if (DRIVER_SUSPENDED(ha)) { 3595 EL(ha, "failed, LOOP_NOT_READY\n"); 3596 cmd->Status = EXT_STATUS_BUSY; 3597 cmd->ResponseLen = 0; 3598 return; 3599 } 3600 3601 if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) { 3602 /* parameter error */ 3603 EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n", 3604 cmd->RequestLen); 3605 cmd->Status = EXT_STATUS_INVALID_PARAM; 3606 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 3607 cmd->ResponseLen = 0; 3608 return; 3609 } 3610 3611 if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, 3612 &tmp_rnid, cmd->RequestLen, mode) != 0) { 3613 EL(ha, "failed, ddi_copyin\n"); 3614 cmd->Status = EXT_STATUS_COPY_ERR; 3615 cmd->ResponseLen = 0; 3616 return; 3617 } 3618 3619 /* Find loop ID of the device */ 3620 if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) { 3621 bptr = CFG_IST(ha, CFG_CTRL_24258081) ? 3622 (caddr_t)&ha->init_ctrl_blk.cb24.node_name : 3623 (caddr_t)&ha->init_ctrl_blk.cb.node_name; 3624 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN, 3625 EXT_DEF_WWN_NAME_SIZE) == 0) { 3626 local_hba = 1; 3627 } else { 3628 tq = ql_find_port(ha, 3629 (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE); 3630 } 3631 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) { 3632 bptr = CFG_IST(ha, CFG_CTRL_24258081) ? 3633 (caddr_t)&ha->init_ctrl_blk.cb24.port_name : 3634 (caddr_t)&ha->init_ctrl_blk.cb.port_name; 3635 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN, 3636 EXT_DEF_WWN_NAME_SIZE) == 0) { 3637 local_hba = 1; 3638 } else { 3639 tq = ql_find_port(ha, 3640 (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT); 3641 } 3642 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) { 3643 /* 3644 * Copy caller's d_id to tmp space. 3645 */ 3646 bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id, 3647 EXT_DEF_PORTID_SIZE_ACTUAL); 3648 BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]); 3649 3650 if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id, 3651 EXT_DEF_PORTID_SIZE_ACTUAL) == 0) { 3652 local_hba = 1; 3653 } else { 3654 tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id, 3655 QLNT_PID); 3656 } 3657 } 3658 3659 /* Allocate memory for command. */ 3660 tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP); 3661 if (tmp_buf == NULL) { 3662 EL(ha, "failed, kmem_zalloc\n"); 3663 cmd->Status = EXT_STATUS_NO_MEMORY; 3664 cmd->ResponseLen = 0; 3665 return; 3666 } 3667 3668 if (local_hba) { 3669 rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf); 3670 if (rval != QL_SUCCESS) { 3671 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval); 3672 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE); 3673 cmd->Status = EXT_STATUS_ERR; 3674 cmd->ResponseLen = 0; 3675 return; 3676 } 3677 3678 /* Save gotten RNID data. */ 3679 bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA)); 3680 3681 /* Now build the Send RNID response */ 3682 tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC); 3683 tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE); 3684 tmp_buf[2] = 0; 3685 tmp_buf[3] = sizeof (EXT_RNID_DATA); 3686 3687 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3688 bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4], 3689 EXT_DEF_WWN_NAME_SIZE); 3690 bcopy(ha->init_ctrl_blk.cb24.node_name, 3691 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE], 3692 EXT_DEF_WWN_NAME_SIZE); 3693 } else { 3694 bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4], 3695 EXT_DEF_WWN_NAME_SIZE); 3696 bcopy(ha->init_ctrl_blk.cb.node_name, 3697 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE], 3698 EXT_DEF_WWN_NAME_SIZE); 3699 } 3700 3701 bcopy((uint8_t *)&rnid_data, 3702 &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE], 3703 sizeof (EXT_RNID_DATA)); 3704 } else { 3705 if (tq == NULL) { 3706 /* no matching device */ 3707 EL(ha, "failed, device not found\n"); 3708 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE); 3709 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 3710 cmd->DetailStatus = EXT_DSTATUS_TARGET; 3711 cmd->ResponseLen = 0; 3712 return; 3713 } 3714 3715 /* Send command */ 3716 rval = ql_send_rnid_els(ha, tq->loop_id, 3717 (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf); 3718 if (rval != QL_SUCCESS) { 3719 EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n", 3720 rval, tq->loop_id); 3721 while (LOOP_NOT_READY(ha)) { 3722 ql_delay(ha, 100000); 3723 if (loop_ready_wait-- == 0) { 3724 EL(ha, "failed, loop not ready\n"); 3725 cmd->Status = EXT_STATUS_ERR; 3726 cmd->ResponseLen = 0; 3727 } 3728 } 3729 rval = ql_send_rnid_els(ha, tq->loop_id, 3730 (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, 3731 tmp_buf); 3732 if (rval != QL_SUCCESS) { 3733 /* error */ 3734 EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n", 3735 rval, tq->loop_id); 3736 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE); 3737 cmd->Status = EXT_STATUS_ERR; 3738 cmd->ResponseLen = 0; 3739 return; 3740 } 3741 } 3742 } 3743 3744 /* Copy the response */ 3745 copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ? 3746 SEND_RNID_RSP_SIZE : cmd->ResponseLen; 3747 3748 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr, 3749 copy_len, mode) != copy_len) { 3750 cmd->Status = EXT_STATUS_COPY_ERR; 3751 EL(ha, "failed, ddi_copyout\n"); 3752 } else { 3753 cmd->ResponseLen = copy_len; 3754 if (copy_len < SEND_RNID_RSP_SIZE) { 3755 cmd->Status = EXT_STATUS_DATA_OVERRUN; 3756 EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n"); 3757 3758 } else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) { 3759 cmd->Status = EXT_STATUS_DATA_UNDERRUN; 3760 EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n"); 3761 } else { 3762 cmd->Status = EXT_STATUS_OK; 3763 QL_PRINT_9(CE_CONT, "(%d): done\n", 3764 ha->instance); 3765 } 3766 } 3767 3768 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE); 3769 } 3770 3771 /* 3772 * ql_set_host_data 3773 * Process IOCTL subcommand to set host/adapter related data. 3774 * 3775 * Input: 3776 * ha: adapter state pointer. 3777 * cmd: User space CT arguments pointer. 3778 * mode: flags. 3779 * 3780 * Returns: 3781 * None, request status indicated in cmd->Status. 3782 * 3783 * Context: 3784 * Kernel context. 3785 */ 3786 static void 3787 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3788 { 3789 QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance, 3790 cmd->SubCode); 3791 3792 /* 3793 * case off on command subcode 3794 */ 3795 switch (cmd->SubCode) { 3796 case EXT_SC_SET_RNID: 3797 ql_set_rnid_parameters(ha, cmd, mode); 3798 break; 3799 case EXT_SC_RST_STATISTICS: 3800 (void) ql_reset_statistics(ha, cmd); 3801 break; 3802 case EXT_SC_SET_BEACON_STATE: 3803 ql_set_led_state(ha, cmd, mode); 3804 break; 3805 case EXT_SC_SET_PARMS: 3806 case EXT_SC_SET_BUS_MODE: 3807 case EXT_SC_SET_DR_DUMP_BUF: 3808 case EXT_SC_SET_RISC_CODE: 3809 case EXT_SC_SET_FLASH_RAM: 3810 case EXT_SC_SET_LUN_BITMASK: 3811 case EXT_SC_SET_RETRY_CNT: 3812 case EXT_SC_SET_RTIN: 3813 case EXT_SC_SET_FC_LUN_BITMASK: 3814 case EXT_SC_ADD_TARGET_DEVICE: 3815 case EXT_SC_SWAP_TARGET_DEVICE: 3816 case EXT_SC_SET_SEL_TIMEOUT: 3817 default: 3818 /* function not supported. */ 3819 EL(ha, "failed, function not supported=%d\n", cmd->SubCode); 3820 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 3821 break; 3822 } 3823 3824 if (cmd->Status != EXT_STATUS_OK) { 3825 EL(ha, "failed, Status=%d\n", cmd->Status); 3826 } else { 3827 /*EMPTY*/ 3828 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3829 } 3830 } 3831 3832 /* 3833 * ql_get_host_data 3834 * Performs EXT_CC_GET_DATA subcommands. 3835 * 3836 * Input: 3837 * ha: adapter state pointer. 3838 * cmd: Local EXT_IOCTL cmd struct pointer. 3839 * mode: flags. 3840 * 3841 * Returns: 3842 * None, request status indicated in cmd->Status. 3843 * 3844 * Context: 3845 * Kernel context. 3846 */ 3847 static void 3848 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 3849 { 3850 int out_size = 0; 3851 3852 QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance, 3853 cmd->SubCode); 3854 3855 /* case off on command subcode */ 3856 switch (cmd->SubCode) { 3857 case EXT_SC_GET_STATISTICS: 3858 out_size = sizeof (EXT_HBA_PORT_STAT); 3859 break; 3860 case EXT_SC_GET_FC_STATISTICS: 3861 out_size = sizeof (EXT_HBA_PORT_STAT); 3862 break; 3863 case EXT_SC_GET_PORT_SUMMARY: 3864 out_size = sizeof (EXT_DEVICEDATA); 3865 break; 3866 case EXT_SC_GET_RNID: 3867 out_size = sizeof (EXT_RNID_DATA); 3868 break; 3869 case EXT_SC_GET_TARGET_ID: 3870 out_size = sizeof (EXT_DEST_ADDR); 3871 break; 3872 case EXT_SC_GET_BEACON_STATE: 3873 out_size = sizeof (EXT_BEACON_CONTROL); 3874 break; 3875 case EXT_SC_GET_FC4_STATISTICS: 3876 out_size = sizeof (EXT_HBA_FC4STATISTICS); 3877 break; 3878 case EXT_SC_GET_DCBX_PARAM: 3879 out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE; 3880 break; 3881 case EXT_SC_GET_RESOURCE_CNTS: 3882 out_size = sizeof (EXT_RESOURCE_CNTS); 3883 break; 3884 case EXT_SC_GET_FCF_LIST: 3885 out_size = sizeof (EXT_FCF_LIST); 3886 break; 3887 case EXT_SC_GET_SCSI_ADDR: 3888 case EXT_SC_GET_ERR_DETECTIONS: 3889 case EXT_SC_GET_BUS_MODE: 3890 case EXT_SC_GET_DR_DUMP_BUF: 3891 case EXT_SC_GET_RISC_CODE: 3892 case EXT_SC_GET_FLASH_RAM: 3893 case EXT_SC_GET_LINK_STATUS: 3894 case EXT_SC_GET_LOOP_ID: 3895 case EXT_SC_GET_LUN_BITMASK: 3896 case EXT_SC_GET_PORT_DATABASE: 3897 case EXT_SC_GET_PORT_DATABASE_MEM: 3898 case EXT_SC_GET_POSITION_MAP: 3899 case EXT_SC_GET_RETRY_CNT: 3900 case EXT_SC_GET_RTIN: 3901 case EXT_SC_GET_FC_LUN_BITMASK: 3902 case EXT_SC_GET_SEL_TIMEOUT: 3903 default: 3904 /* function not supported. */ 3905 EL(ha, "failed, function not supported=%d\n", cmd->SubCode); 3906 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 3907 cmd->ResponseLen = 0; 3908 return; 3909 } 3910 3911 if (cmd->ResponseLen < out_size) { 3912 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 3913 cmd->DetailStatus = out_size; 3914 EL(ha, "failed, ResponseLen=%xh, size=%xh\n", 3915 cmd->ResponseLen, out_size); 3916 cmd->ResponseLen = 0; 3917 return; 3918 } 3919 3920 switch (cmd->SubCode) { 3921 case EXT_SC_GET_RNID: 3922 ql_get_rnid_parameters(ha, cmd, mode); 3923 break; 3924 case EXT_SC_GET_STATISTICS: 3925 ql_get_statistics(ha, cmd, mode); 3926 break; 3927 case EXT_SC_GET_FC_STATISTICS: 3928 ql_get_statistics_fc(ha, cmd, mode); 3929 break; 3930 case EXT_SC_GET_FC4_STATISTICS: 3931 ql_get_statistics_fc4(ha, cmd, mode); 3932 break; 3933 case EXT_SC_GET_PORT_SUMMARY: 3934 ql_get_port_summary(ha, cmd, mode); 3935 break; 3936 case EXT_SC_GET_TARGET_ID: 3937 ql_get_target_id(ha, cmd, mode); 3938 break; 3939 case EXT_SC_GET_BEACON_STATE: 3940 ql_get_led_state(ha, cmd, mode); 3941 break; 3942 case EXT_SC_GET_DCBX_PARAM: 3943 ql_get_dcbx_parameters(ha, cmd, mode); 3944 break; 3945 case EXT_SC_GET_FCF_LIST: 3946 ql_get_fcf_list(ha, cmd, mode); 3947 break; 3948 case EXT_SC_GET_RESOURCE_CNTS: 3949 ql_get_resource_counts(ha, cmd, mode); 3950 break; 3951 } 3952 3953 if (cmd->Status != EXT_STATUS_OK) { 3954 EL(ha, "failed, Status=%d\n", cmd->Status); 3955 } else { 3956 /*EMPTY*/ 3957 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3958 } 3959 } 3960 3961 /* ******************************************************************** */ 3962 /* Helper Functions */ 3963 /* ******************************************************************** */ 3964 3965 /* 3966 * ql_lun_count 3967 * Get numbers of LUNS on target. 3968 * 3969 * Input: 3970 * ha: adapter state pointer. 3971 * q: device queue pointer. 3972 * 3973 * Returns: 3974 * Number of LUNs. 3975 * 3976 * Context: 3977 * Kernel context. 3978 */ 3979 static int 3980 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq) 3981 { 3982 int cnt; 3983 3984 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 3985 3986 /* Bypass LUNs that failed. */ 3987 cnt = ql_report_lun(ha, tq); 3988 if (cnt == 0) { 3989 cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target); 3990 } 3991 3992 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 3993 3994 return (cnt); 3995 } 3996 3997 /* 3998 * ql_report_lun 3999 * Get numbers of LUNS using report LUN command. 4000 * 4001 * Input: 4002 * ha: adapter state pointer. 4003 * q: target queue pointer. 4004 * 4005 * Returns: 4006 * Number of LUNs. 4007 * 4008 * Context: 4009 * Kernel context. 4010 */ 4011 static int 4012 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq) 4013 { 4014 int rval; 4015 uint8_t retries; 4016 ql_mbx_iocb_t *pkt; 4017 ql_rpt_lun_lst_t *rpt; 4018 dma_mem_t dma_mem; 4019 uint32_t pkt_size, cnt; 4020 uint16_t comp_status; 4021 uint8_t scsi_status_h, scsi_status_l, *reqs; 4022 4023 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 4024 4025 if (DRIVER_SUSPENDED(ha)) { 4026 EL(ha, "failed, LOOP_NOT_READY\n"); 4027 return (0); 4028 } 4029 4030 pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t); 4031 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 4032 if (pkt == NULL) { 4033 EL(ha, "failed, kmem_zalloc\n"); 4034 return (0); 4035 } 4036 rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t)); 4037 4038 /* Get DMA memory for the IOCB */ 4039 if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t), 4040 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) { 4041 cmn_err(CE_WARN, "%s(%d): DMA memory " 4042 "alloc failed", QL_NAME, ha->instance); 4043 kmem_free(pkt, pkt_size); 4044 return (0); 4045 } 4046 4047 for (retries = 0; retries < 4; retries++) { 4048 if (CFG_IST(ha, CFG_CTRL_24258081)) { 4049 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7; 4050 pkt->cmd24.entry_count = 1; 4051 4052 /* Set N_port handle */ 4053 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id); 4054 4055 /* Set target ID */ 4056 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa; 4057 pkt->cmd24.target_id[1] = tq->d_id.b.area; 4058 pkt->cmd24.target_id[2] = tq->d_id.b.domain; 4059 4060 /* Set Virtual Port ID */ 4061 pkt->cmd24.vp_index = ha->vp_index; 4062 4063 /* Set ISP command timeout. */ 4064 pkt->cmd24.timeout = LE_16(15); 4065 4066 /* Load SCSI CDB */ 4067 pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS; 4068 pkt->cmd24.scsi_cdb[6] = 4069 MSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4070 pkt->cmd24.scsi_cdb[7] = 4071 LSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4072 pkt->cmd24.scsi_cdb[8] = 4073 MSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4074 pkt->cmd24.scsi_cdb[9] = 4075 LSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4076 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) { 4077 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb 4078 + cnt, 4); 4079 } 4080 4081 /* Set tag queue control flags */ 4082 pkt->cmd24.task = TA_STAG; 4083 4084 /* Set transfer direction. */ 4085 pkt->cmd24.control_flags = CF_RD; 4086 4087 /* Set data segment count. */ 4088 pkt->cmd24.dseg_count = LE_16(1); 4089 4090 /* Load total byte count. */ 4091 /* Load data descriptor. */ 4092 pkt->cmd24.dseg_0_address[0] = (uint32_t) 4093 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4094 pkt->cmd24.dseg_0_address[1] = (uint32_t) 4095 LE_32(MSD(dma_mem.cookie.dmac_laddress)); 4096 pkt->cmd24.total_byte_count = 4097 LE_32(sizeof (ql_rpt_lun_lst_t)); 4098 pkt->cmd24.dseg_0_length = 4099 LE_32(sizeof (ql_rpt_lun_lst_t)); 4100 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 4101 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3; 4102 pkt->cmd3.entry_count = 1; 4103 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 4104 pkt->cmd3.target_l = LSB(tq->loop_id); 4105 pkt->cmd3.target_h = MSB(tq->loop_id); 4106 } else { 4107 pkt->cmd3.target_h = LSB(tq->loop_id); 4108 } 4109 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG; 4110 pkt->cmd3.timeout = LE_16(15); 4111 pkt->cmd3.dseg_count = LE_16(1); 4112 pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS; 4113 pkt->cmd3.scsi_cdb[6] = 4114 MSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4115 pkt->cmd3.scsi_cdb[7] = 4116 LSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4117 pkt->cmd3.scsi_cdb[8] = 4118 MSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4119 pkt->cmd3.scsi_cdb[9] = 4120 LSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4121 pkt->cmd3.byte_count = 4122 LE_32(sizeof (ql_rpt_lun_lst_t)); 4123 pkt->cmd3.dseg_0_address[0] = (uint32_t) 4124 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4125 pkt->cmd3.dseg_0_address[1] = (uint32_t) 4126 LE_32(MSD(dma_mem.cookie.dmac_laddress)); 4127 pkt->cmd3.dseg_0_length = 4128 LE_32(sizeof (ql_rpt_lun_lst_t)); 4129 } else { 4130 pkt->cmd.entry_type = IOCB_CMD_TYPE_2; 4131 pkt->cmd.entry_count = 1; 4132 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 4133 pkt->cmd.target_l = LSB(tq->loop_id); 4134 pkt->cmd.target_h = MSB(tq->loop_id); 4135 } else { 4136 pkt->cmd.target_h = LSB(tq->loop_id); 4137 } 4138 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG; 4139 pkt->cmd.timeout = LE_16(15); 4140 pkt->cmd.dseg_count = LE_16(1); 4141 pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS; 4142 pkt->cmd.scsi_cdb[6] = 4143 MSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4144 pkt->cmd.scsi_cdb[7] = 4145 LSB(MSW(sizeof (ql_rpt_lun_lst_t))); 4146 pkt->cmd.scsi_cdb[8] = 4147 MSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4148 pkt->cmd.scsi_cdb[9] = 4149 LSB(LSW(sizeof (ql_rpt_lun_lst_t))); 4150 pkt->cmd.byte_count = 4151 LE_32(sizeof (ql_rpt_lun_lst_t)); 4152 pkt->cmd.dseg_0_address = (uint32_t) 4153 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4154 pkt->cmd.dseg_0_length = 4155 LE_32(sizeof (ql_rpt_lun_lst_t)); 4156 } 4157 4158 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 4159 sizeof (ql_mbx_iocb_t)); 4160 4161 /* Sync in coming DMA buffer. */ 4162 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size, 4163 DDI_DMA_SYNC_FORKERNEL); 4164 /* Copy in coming DMA data. */ 4165 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt, 4166 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR); 4167 4168 if (CFG_IST(ha, CFG_CTRL_24258081)) { 4169 pkt->sts24.entry_status = (uint8_t) 4170 (pkt->sts24.entry_status & 0x3c); 4171 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status); 4172 scsi_status_h = pkt->sts24.scsi_status_h; 4173 scsi_status_l = pkt->sts24.scsi_status_l; 4174 cnt = scsi_status_h & FCP_RSP_LEN_VALID ? 4175 LE_32(pkt->sts24.fcp_rsp_data_length) : 0; 4176 reqs = &pkt->sts24.rsp_sense_data[cnt]; 4177 } else { 4178 pkt->sts.entry_status = (uint8_t) 4179 (pkt->sts.entry_status & 0x7e); 4180 comp_status = (uint16_t)LE_16(pkt->sts.comp_status); 4181 scsi_status_h = pkt->sts.scsi_status_h; 4182 scsi_status_l = pkt->sts.scsi_status_l; 4183 reqs = &pkt->sts.req_sense_data[0]; 4184 } 4185 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) { 4186 EL(ha, "failed, entry_status=%xh, d_id=%xh\n", 4187 pkt->sts.entry_status, tq->d_id.b24); 4188 rval = QL_FUNCTION_PARAMETER_ERROR; 4189 } 4190 4191 if (rval != QL_SUCCESS || comp_status != CS_COMPLETE || 4192 scsi_status_l & STATUS_CHECK) { 4193 /* Device underrun, treat as OK. */ 4194 if (rval == QL_SUCCESS && 4195 comp_status == CS_DATA_UNDERRUN && 4196 scsi_status_h & FCP_RESID_UNDER) { 4197 break; 4198 } 4199 4200 EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, " 4201 "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24, 4202 comp_status, scsi_status_h, scsi_status_l); 4203 4204 if (rval == QL_SUCCESS) { 4205 if ((comp_status == CS_TIMEOUT) || 4206 (comp_status == CS_PORT_UNAVAILABLE) || 4207 (comp_status == CS_PORT_LOGGED_OUT)) { 4208 rval = QL_FUNCTION_TIMEOUT; 4209 break; 4210 } 4211 rval = QL_FUNCTION_FAILED; 4212 } else if (rval == QL_ABORTED) { 4213 break; 4214 } 4215 4216 if (scsi_status_l & STATUS_CHECK) { 4217 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh" 4218 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh" 4219 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0], 4220 reqs[1], reqs[2], reqs[3], reqs[4], 4221 reqs[5], reqs[6], reqs[7], reqs[8], 4222 reqs[9], reqs[10], reqs[11], reqs[12], 4223 reqs[13], reqs[14], reqs[15], reqs[16], 4224 reqs[17]); 4225 } 4226 } else { 4227 break; 4228 } 4229 bzero((caddr_t)pkt, pkt_size); 4230 } 4231 4232 if (rval != QL_SUCCESS) { 4233 EL(ha, "failed=%xh\n", rval); 4234 rval = 0; 4235 } else { 4236 QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance); 4237 QL_DUMP_9(rpt, 8, rpt->hdr.len + 8); 4238 rval = (int)(BE_32(rpt->hdr.len) / 8); 4239 } 4240 4241 kmem_free(pkt, pkt_size); 4242 ql_free_dma_resource(ha, &dma_mem); 4243 4244 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 4245 4246 return (rval); 4247 } 4248 4249 /* 4250 * ql_inq_scan 4251 * Get numbers of LUNS using inquiry command. 4252 * 4253 * Input: 4254 * ha: adapter state pointer. 4255 * tq: target queue pointer. 4256 * count: scan for the number of existing LUNs. 4257 * 4258 * Returns: 4259 * Number of LUNs. 4260 * 4261 * Context: 4262 * Kernel context. 4263 */ 4264 static int 4265 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count) 4266 { 4267 int lun, cnt, rval; 4268 ql_mbx_iocb_t *pkt; 4269 uint8_t *inq; 4270 uint32_t pkt_size; 4271 4272 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 4273 4274 pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE; 4275 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 4276 if (pkt == NULL) { 4277 EL(ha, "failed, kmem_zalloc\n"); 4278 return (0); 4279 } 4280 inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t)); 4281 4282 cnt = 0; 4283 for (lun = 0; lun < MAX_LUNS; lun++) { 4284 4285 if (DRIVER_SUSPENDED(ha)) { 4286 rval = QL_LOOP_DOWN; 4287 cnt = 0; 4288 break; 4289 } 4290 4291 rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE); 4292 if (rval == QL_SUCCESS) { 4293 switch (*inq) { 4294 case DTYPE_DIRECT: 4295 case DTYPE_PROCESSOR: /* Appliance. */ 4296 case DTYPE_WORM: 4297 case DTYPE_RODIRECT: 4298 case DTYPE_SCANNER: 4299 case DTYPE_OPTICAL: 4300 case DTYPE_CHANGER: 4301 case DTYPE_ESI: 4302 cnt++; 4303 break; 4304 case DTYPE_SEQUENTIAL: 4305 cnt++; 4306 tq->flags |= TQF_TAPE_DEVICE; 4307 break; 4308 default: 4309 QL_PRINT_9(CE_CONT, "(%d): failed, " 4310 "unsupported device id=%xh, lun=%d, " 4311 "type=%xh\n", ha->instance, tq->loop_id, 4312 lun, *inq); 4313 break; 4314 } 4315 4316 if (*inq == DTYPE_ESI || cnt >= count) { 4317 break; 4318 } 4319 } else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) { 4320 cnt = 0; 4321 break; 4322 } 4323 } 4324 4325 kmem_free(pkt, pkt_size); 4326 4327 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 4328 4329 return (cnt); 4330 } 4331 4332 /* 4333 * ql_inq 4334 * Issue inquiry command. 4335 * 4336 * Input: 4337 * ha: adapter state pointer. 4338 * tq: target queue pointer. 4339 * lun: LUN number. 4340 * pkt: command and buffer pointer. 4341 * inq_len: amount of inquiry data. 4342 * 4343 * Returns: 4344 * ql local function return status code. 4345 * 4346 * Context: 4347 * Kernel context. 4348 */ 4349 static int 4350 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt, 4351 uint8_t inq_len) 4352 { 4353 dma_mem_t dma_mem; 4354 int rval, retries; 4355 uint32_t pkt_size, cnt; 4356 uint16_t comp_status; 4357 uint8_t scsi_status_h, scsi_status_l, *reqs; 4358 caddr_t inq_data; 4359 4360 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 4361 4362 if (DRIVER_SUSPENDED(ha)) { 4363 EL(ha, "failed, loop down\n"); 4364 return (QL_FUNCTION_TIMEOUT); 4365 } 4366 4367 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len); 4368 bzero((caddr_t)pkt, pkt_size); 4369 4370 inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t); 4371 4372 /* Get DMA memory for the IOCB */ 4373 if (ql_get_dma_mem(ha, &dma_mem, inq_len, 4374 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) { 4375 cmn_err(CE_WARN, "%s(%d): DMA memory " 4376 "alloc failed", QL_NAME, ha->instance); 4377 return (0); 4378 } 4379 4380 for (retries = 0; retries < 4; retries++) { 4381 if (CFG_IST(ha, CFG_CTRL_24258081)) { 4382 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7; 4383 pkt->cmd24.entry_count = 1; 4384 4385 /* Set LUN number */ 4386 pkt->cmd24.fcp_lun[2] = LSB(lun); 4387 pkt->cmd24.fcp_lun[3] = MSB(lun); 4388 4389 /* Set N_port handle */ 4390 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id); 4391 4392 /* Set target ID */ 4393 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa; 4394 pkt->cmd24.target_id[1] = tq->d_id.b.area; 4395 pkt->cmd24.target_id[2] = tq->d_id.b.domain; 4396 4397 /* Set Virtual Port ID */ 4398 pkt->cmd24.vp_index = ha->vp_index; 4399 4400 /* Set ISP command timeout. */ 4401 pkt->cmd24.timeout = LE_16(15); 4402 4403 /* Load SCSI CDB */ 4404 pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY; 4405 pkt->cmd24.scsi_cdb[4] = inq_len; 4406 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) { 4407 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb 4408 + cnt, 4); 4409 } 4410 4411 /* Set tag queue control flags */ 4412 pkt->cmd24.task = TA_STAG; 4413 4414 /* Set transfer direction. */ 4415 pkt->cmd24.control_flags = CF_RD; 4416 4417 /* Set data segment count. */ 4418 pkt->cmd24.dseg_count = LE_16(1); 4419 4420 /* Load total byte count. */ 4421 pkt->cmd24.total_byte_count = LE_32(inq_len); 4422 4423 /* Load data descriptor. */ 4424 pkt->cmd24.dseg_0_address[0] = (uint32_t) 4425 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4426 pkt->cmd24.dseg_0_address[1] = (uint32_t) 4427 LE_32(MSD(dma_mem.cookie.dmac_laddress)); 4428 pkt->cmd24.dseg_0_length = LE_32(inq_len); 4429 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 4430 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3; 4431 cnt = CMD_TYPE_3_DATA_SEGMENTS; 4432 4433 pkt->cmd3.entry_count = 1; 4434 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 4435 pkt->cmd3.target_l = LSB(tq->loop_id); 4436 pkt->cmd3.target_h = MSB(tq->loop_id); 4437 } else { 4438 pkt->cmd3.target_h = LSB(tq->loop_id); 4439 } 4440 pkt->cmd3.lun_l = LSB(lun); 4441 pkt->cmd3.lun_h = MSB(lun); 4442 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG; 4443 pkt->cmd3.timeout = LE_16(15); 4444 pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY; 4445 pkt->cmd3.scsi_cdb[4] = inq_len; 4446 pkt->cmd3.dseg_count = LE_16(1); 4447 pkt->cmd3.byte_count = LE_32(inq_len); 4448 pkt->cmd3.dseg_0_address[0] = (uint32_t) 4449 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4450 pkt->cmd3.dseg_0_address[1] = (uint32_t) 4451 LE_32(MSD(dma_mem.cookie.dmac_laddress)); 4452 pkt->cmd3.dseg_0_length = LE_32(inq_len); 4453 } else { 4454 pkt->cmd.entry_type = IOCB_CMD_TYPE_2; 4455 cnt = CMD_TYPE_2_DATA_SEGMENTS; 4456 4457 pkt->cmd.entry_count = 1; 4458 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 4459 pkt->cmd.target_l = LSB(tq->loop_id); 4460 pkt->cmd.target_h = MSB(tq->loop_id); 4461 } else { 4462 pkt->cmd.target_h = LSB(tq->loop_id); 4463 } 4464 pkt->cmd.lun_l = LSB(lun); 4465 pkt->cmd.lun_h = MSB(lun); 4466 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG; 4467 pkt->cmd.timeout = LE_16(15); 4468 pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY; 4469 pkt->cmd.scsi_cdb[4] = inq_len; 4470 pkt->cmd.dseg_count = LE_16(1); 4471 pkt->cmd.byte_count = LE_32(inq_len); 4472 pkt->cmd.dseg_0_address = (uint32_t) 4473 LE_32(LSD(dma_mem.cookie.dmac_laddress)); 4474 pkt->cmd.dseg_0_length = LE_32(inq_len); 4475 } 4476 4477 /* rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */ 4478 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 4479 sizeof (ql_mbx_iocb_t)); 4480 4481 /* Sync in coming IOCB DMA buffer. */ 4482 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size, 4483 DDI_DMA_SYNC_FORKERNEL); 4484 /* Copy in coming DMA data. */ 4485 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data, 4486 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR); 4487 4488 if (CFG_IST(ha, CFG_CTRL_24258081)) { 4489 pkt->sts24.entry_status = (uint8_t) 4490 (pkt->sts24.entry_status & 0x3c); 4491 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status); 4492 scsi_status_h = pkt->sts24.scsi_status_h; 4493 scsi_status_l = pkt->sts24.scsi_status_l; 4494 cnt = scsi_status_h & FCP_RSP_LEN_VALID ? 4495 LE_32(pkt->sts24.fcp_rsp_data_length) : 0; 4496 reqs = &pkt->sts24.rsp_sense_data[cnt]; 4497 } else { 4498 pkt->sts.entry_status = (uint8_t) 4499 (pkt->sts.entry_status & 0x7e); 4500 comp_status = (uint16_t)LE_16(pkt->sts.comp_status); 4501 scsi_status_h = pkt->sts.scsi_status_h; 4502 scsi_status_l = pkt->sts.scsi_status_l; 4503 reqs = &pkt->sts.req_sense_data[0]; 4504 } 4505 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) { 4506 EL(ha, "failed, entry_status=%xh, d_id=%xh\n", 4507 pkt->sts.entry_status, tq->d_id.b24); 4508 rval = QL_FUNCTION_PARAMETER_ERROR; 4509 } 4510 4511 if (rval != QL_SUCCESS || comp_status != CS_COMPLETE || 4512 scsi_status_l & STATUS_CHECK) { 4513 EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, " 4514 "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24, 4515 comp_status, scsi_status_h, scsi_status_l); 4516 4517 if (rval == QL_SUCCESS) { 4518 if ((comp_status == CS_TIMEOUT) || 4519 (comp_status == CS_PORT_UNAVAILABLE) || 4520 (comp_status == CS_PORT_LOGGED_OUT)) { 4521 rval = QL_FUNCTION_TIMEOUT; 4522 break; 4523 } 4524 rval = QL_FUNCTION_FAILED; 4525 } 4526 4527 if (scsi_status_l & STATUS_CHECK) { 4528 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh" 4529 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh" 4530 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0], 4531 reqs[1], reqs[2], reqs[3], reqs[4], 4532 reqs[5], reqs[6], reqs[7], reqs[8], 4533 reqs[9], reqs[10], reqs[11], reqs[12], 4534 reqs[13], reqs[14], reqs[15], reqs[16], 4535 reqs[17]); 4536 } 4537 } else { 4538 break; 4539 } 4540 } 4541 ql_free_dma_resource(ha, &dma_mem); 4542 4543 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 4544 4545 return (rval); 4546 } 4547 4548 /* 4549 * ql_get_buffer_data 4550 * Copies data from user space to kernal buffer. 4551 * 4552 * Input: 4553 * src: User source buffer address. 4554 * dst: Kernal destination buffer address. 4555 * size: Amount of data. 4556 * mode: flags. 4557 * 4558 * Returns: 4559 * Returns number of bytes transferred. 4560 * 4561 * Context: 4562 * Kernel context. 4563 */ 4564 static uint32_t 4565 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode) 4566 { 4567 uint32_t cnt; 4568 4569 for (cnt = 0; cnt < size; cnt++) { 4570 if (ddi_copyin(src++, dst++, 1, mode) != 0) { 4571 QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n"); 4572 break; 4573 } 4574 } 4575 4576 return (cnt); 4577 } 4578 4579 /* 4580 * ql_send_buffer_data 4581 * Copies data from kernal buffer to user space. 4582 * 4583 * Input: 4584 * src: Kernal source buffer address. 4585 * dst: User destination buffer address. 4586 * size: Amount of data. 4587 * mode: flags. 4588 * 4589 * Returns: 4590 * Returns number of bytes transferred. 4591 * 4592 * Context: 4593 * Kernel context. 4594 */ 4595 static uint32_t 4596 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode) 4597 { 4598 uint32_t cnt; 4599 4600 for (cnt = 0; cnt < size; cnt++) { 4601 if (ddi_copyout(src++, dst++, 1, mode) != 0) { 4602 QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n"); 4603 break; 4604 } 4605 } 4606 4607 return (cnt); 4608 } 4609 4610 /* 4611 * ql_find_port 4612 * Locates device queue. 4613 * 4614 * Input: 4615 * ha: adapter state pointer. 4616 * name: device port name. 4617 * 4618 * Returns: 4619 * Returns target queue pointer. 4620 * 4621 * Context: 4622 * Kernel context. 4623 */ 4624 static ql_tgt_t * 4625 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type) 4626 { 4627 ql_link_t *link; 4628 ql_tgt_t *tq; 4629 uint16_t index; 4630 4631 /* Scan port list for requested target */ 4632 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 4633 for (link = ha->dev[index].first; link != NULL; 4634 link = link->next) { 4635 tq = link->base_address; 4636 4637 switch (type) { 4638 case QLNT_LOOP_ID: 4639 if (bcmp(name, &tq->loop_id, 4640 sizeof (uint16_t)) == 0) { 4641 return (tq); 4642 } 4643 break; 4644 case QLNT_PORT: 4645 if (bcmp(name, tq->port_name, 8) == 0) { 4646 return (tq); 4647 } 4648 break; 4649 case QLNT_NODE: 4650 if (bcmp(name, tq->node_name, 8) == 0) { 4651 return (tq); 4652 } 4653 break; 4654 case QLNT_PID: 4655 if (bcmp(name, tq->d_id.r.d_id, 4656 sizeof (tq->d_id.r.d_id)) == 0) { 4657 return (tq); 4658 } 4659 break; 4660 default: 4661 EL(ha, "failed, invalid type=%d\n", type); 4662 return (NULL); 4663 } 4664 } 4665 } 4666 4667 return (NULL); 4668 } 4669 4670 /* 4671 * ql_24xx_flash_desc 4672 * Get flash descriptor table. 4673 * 4674 * Input: 4675 * ha: adapter state pointer. 4676 * 4677 * Returns: 4678 * ql local function return status code. 4679 * 4680 * Context: 4681 * Kernel context. 4682 */ 4683 static int 4684 ql_24xx_flash_desc(ql_adapter_state_t *ha) 4685 { 4686 uint32_t cnt; 4687 uint16_t chksum, *bp, data; 4688 int rval; 4689 flash_desc_t *fdesc; 4690 ql_xioctl_t *xp = ha->xioctl; 4691 4692 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 4693 4694 if (ha->flash_desc_addr == 0) { 4695 QL_PRINT_9(CE_CONT, "(%d): desc ptr=0\n", ha->instance); 4696 return (QL_FUNCTION_FAILED); 4697 } 4698 4699 if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) { 4700 EL(ha, "kmem_zalloc=null\n"); 4701 return (QL_MEMORY_ALLOC_FAILED); 4702 } 4703 rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t), 4704 ha->flash_desc_addr << 2); 4705 if (rval != QL_SUCCESS) { 4706 EL(ha, "read status=%xh\n", rval); 4707 kmem_free(fdesc, sizeof (flash_desc_t)); 4708 return (rval); 4709 } 4710 4711 chksum = 0; 4712 bp = (uint16_t *)fdesc; 4713 for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) { 4714 data = *bp++; 4715 LITTLE_ENDIAN_16(&data); 4716 chksum += data; 4717 } 4718 4719 LITTLE_ENDIAN_32(&fdesc->flash_valid); 4720 LITTLE_ENDIAN_16(&fdesc->flash_version); 4721 LITTLE_ENDIAN_16(&fdesc->flash_len); 4722 LITTLE_ENDIAN_16(&fdesc->flash_checksum); 4723 LITTLE_ENDIAN_16(&fdesc->flash_manuf); 4724 LITTLE_ENDIAN_16(&fdesc->flash_id); 4725 LITTLE_ENDIAN_32(&fdesc->block_size); 4726 LITTLE_ENDIAN_32(&fdesc->alt_block_size); 4727 LITTLE_ENDIAN_32(&fdesc->flash_size); 4728 LITTLE_ENDIAN_32(&fdesc->write_enable_data); 4729 LITTLE_ENDIAN_32(&fdesc->read_timeout); 4730 4731 /* flash size in desc table is in 1024 bytes */ 4732 fdesc->flash_size = fdesc->flash_size * 0x400; 4733 4734 if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD || 4735 fdesc->flash_version != FLASH_DESC_VERSION) { 4736 EL(ha, "invalid descriptor table\n"); 4737 kmem_free(fdesc, sizeof (flash_desc_t)); 4738 return (QL_FUNCTION_FAILED); 4739 } 4740 4741 bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t)); 4742 kmem_free(fdesc, sizeof (flash_desc_t)); 4743 4744 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 4745 4746 return (QL_SUCCESS); 4747 } 4748 4749 /* 4750 * ql_setup_flash 4751 * Gets the manufacturer and id number of the flash chip, and 4752 * sets up the size parameter. 4753 * 4754 * Input: 4755 * ha: adapter state pointer. 4756 * 4757 * Returns: 4758 * int: ql local function return status code. 4759 * 4760 * Context: 4761 * Kernel context. 4762 */ 4763 static int 4764 ql_setup_flash(ql_adapter_state_t *ha) 4765 { 4766 ql_xioctl_t *xp = ha->xioctl; 4767 int rval = QL_SUCCESS; 4768 4769 if (xp->fdesc.flash_size != 0) { 4770 return (rval); 4771 } 4772 4773 if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) { 4774 return (QL_FUNCTION_FAILED); 4775 } 4776 4777 if (CFG_IST(ha, CFG_CTRL_258081)) { 4778 /* 4779 * Temporarily set the ha->xioctl->fdesc.flash_size to 4780 * 25xx flash size to avoid failing of ql_dump_focde. 4781 */ 4782 if (CFG_IST(ha, CFG_CTRL_8021)) { 4783 ha->xioctl->fdesc.flash_size = 0x800000; 4784 } else if (CFG_IST(ha, CFG_CTRL_25XX)) { 4785 ha->xioctl->fdesc.flash_size = 0x200000; 4786 } else { 4787 ha->xioctl->fdesc.flash_size = 0x400000; 4788 } 4789 4790 if (ql_24xx_flash_desc(ha) == QL_SUCCESS) { 4791 EL(ha, "flash desc table ok, exit\n"); 4792 return (rval); 4793 } 4794 if (CFG_IST(ha, CFG_CTRL_8021)) { 4795 xp->fdesc.flash_manuf = WINBOND_FLASH; 4796 xp->fdesc.flash_id = WINBOND_FLASHID; 4797 xp->fdesc.flash_len = 0x17; 4798 } else { 4799 (void) ql_24xx_flash_id(ha); 4800 } 4801 4802 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 4803 (void) ql_24xx_flash_id(ha); 4804 } else { 4805 ql_flash_enable(ha); 4806 4807 ql_write_flash_byte(ha, 0x5555, 0xaa); 4808 ql_write_flash_byte(ha, 0x2aaa, 0x55); 4809 ql_write_flash_byte(ha, 0x5555, 0x90); 4810 xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000); 4811 4812 if (CFG_IST(ha, CFG_SBUS_CARD)) { 4813 ql_write_flash_byte(ha, 0xaaaa, 0xaa); 4814 ql_write_flash_byte(ha, 0x5555, 0x55); 4815 ql_write_flash_byte(ha, 0xaaaa, 0x90); 4816 xp->fdesc.flash_id = (uint16_t) 4817 ql_read_flash_byte(ha, 0x0002); 4818 } else { 4819 ql_write_flash_byte(ha, 0x5555, 0xaa); 4820 ql_write_flash_byte(ha, 0x2aaa, 0x55); 4821 ql_write_flash_byte(ha, 0x5555, 0x90); 4822 xp->fdesc.flash_id = (uint16_t) 4823 ql_read_flash_byte(ha, 0x0001); 4824 } 4825 4826 ql_write_flash_byte(ha, 0x5555, 0xaa); 4827 ql_write_flash_byte(ha, 0x2aaa, 0x55); 4828 ql_write_flash_byte(ha, 0x5555, 0xf0); 4829 4830 ql_flash_disable(ha); 4831 } 4832 4833 /* Default flash descriptor table. */ 4834 xp->fdesc.write_statusreg_cmd = 1; 4835 xp->fdesc.write_enable_bits = 0; 4836 xp->fdesc.unprotect_sector_cmd = 0; 4837 xp->fdesc.protect_sector_cmd = 0; 4838 xp->fdesc.write_disable_bits = 0x9c; 4839 xp->fdesc.block_size = 0x10000; 4840 xp->fdesc.erase_cmd = 0xd8; 4841 4842 switch (xp->fdesc.flash_manuf) { 4843 case AMD_FLASH: 4844 switch (xp->fdesc.flash_id) { 4845 case SPAN_FLASHID_2048K: 4846 xp->fdesc.flash_size = 0x200000; 4847 break; 4848 case AMD_FLASHID_1024K: 4849 xp->fdesc.flash_size = 0x100000; 4850 break; 4851 case AMD_FLASHID_512K: 4852 case AMD_FLASHID_512Kt: 4853 case AMD_FLASHID_512Kb: 4854 if (CFG_IST(ha, CFG_SBUS_CARD)) { 4855 xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE; 4856 } else { 4857 xp->fdesc.flash_size = 0x80000; 4858 } 4859 break; 4860 case AMD_FLASHID_128K: 4861 xp->fdesc.flash_size = 0x20000; 4862 break; 4863 default: 4864 rval = QL_FUNCTION_FAILED; 4865 break; 4866 } 4867 break; 4868 case ST_FLASH: 4869 switch (xp->fdesc.flash_id) { 4870 case ST_FLASHID_128K: 4871 xp->fdesc.flash_size = 0x20000; 4872 break; 4873 case ST_FLASHID_512K: 4874 xp->fdesc.flash_size = 0x80000; 4875 break; 4876 case ST_FLASHID_M25PXX: 4877 if (xp->fdesc.flash_len == 0x14) { 4878 xp->fdesc.flash_size = 0x100000; 4879 } else if (xp->fdesc.flash_len == 0x15) { 4880 xp->fdesc.flash_size = 0x200000; 4881 } else { 4882 rval = QL_FUNCTION_FAILED; 4883 } 4884 break; 4885 default: 4886 rval = QL_FUNCTION_FAILED; 4887 break; 4888 } 4889 break; 4890 case SST_FLASH: 4891 switch (xp->fdesc.flash_id) { 4892 case SST_FLASHID_128K: 4893 xp->fdesc.flash_size = 0x20000; 4894 break; 4895 case SST_FLASHID_1024K_A: 4896 xp->fdesc.flash_size = 0x100000; 4897 xp->fdesc.block_size = 0x8000; 4898 xp->fdesc.erase_cmd = 0x52; 4899 break; 4900 case SST_FLASHID_1024K: 4901 case SST_FLASHID_1024K_B: 4902 xp->fdesc.flash_size = 0x100000; 4903 break; 4904 case SST_FLASHID_2048K: 4905 xp->fdesc.flash_size = 0x200000; 4906 break; 4907 default: 4908 rval = QL_FUNCTION_FAILED; 4909 break; 4910 } 4911 break; 4912 case MXIC_FLASH: 4913 switch (xp->fdesc.flash_id) { 4914 case MXIC_FLASHID_512K: 4915 xp->fdesc.flash_size = 0x80000; 4916 break; 4917 case MXIC_FLASHID_1024K: 4918 xp->fdesc.flash_size = 0x100000; 4919 break; 4920 case MXIC_FLASHID_25LXX: 4921 if (xp->fdesc.flash_len == 0x14) { 4922 xp->fdesc.flash_size = 0x100000; 4923 } else if (xp->fdesc.flash_len == 0x15) { 4924 xp->fdesc.flash_size = 0x200000; 4925 } else { 4926 rval = QL_FUNCTION_FAILED; 4927 } 4928 break; 4929 default: 4930 rval = QL_FUNCTION_FAILED; 4931 break; 4932 } 4933 break; 4934 case ATMEL_FLASH: 4935 switch (xp->fdesc.flash_id) { 4936 case ATMEL_FLASHID_1024K: 4937 xp->fdesc.flash_size = 0x100000; 4938 xp->fdesc.write_disable_bits = 0xbc; 4939 xp->fdesc.unprotect_sector_cmd = 0x39; 4940 xp->fdesc.protect_sector_cmd = 0x36; 4941 break; 4942 default: 4943 rval = QL_FUNCTION_FAILED; 4944 break; 4945 } 4946 break; 4947 case WINBOND_FLASH: 4948 switch (xp->fdesc.flash_id) { 4949 case WINBOND_FLASHID: 4950 if (xp->fdesc.flash_len == 0x15) { 4951 xp->fdesc.flash_size = 0x200000; 4952 } else if (xp->fdesc.flash_len == 0x16) { 4953 xp->fdesc.flash_size = 0x400000; 4954 } else if (xp->fdesc.flash_len == 0x17) { 4955 xp->fdesc.flash_size = 0x800000; 4956 } else { 4957 rval = QL_FUNCTION_FAILED; 4958 } 4959 break; 4960 default: 4961 rval = QL_FUNCTION_FAILED; 4962 break; 4963 } 4964 break; 4965 case INTEL_FLASH: 4966 switch (xp->fdesc.flash_id) { 4967 case INTEL_FLASHID: 4968 if (xp->fdesc.flash_len == 0x11) { 4969 xp->fdesc.flash_size = 0x200000; 4970 } else if (xp->fdesc.flash_len == 0x12) { 4971 xp->fdesc.flash_size = 0x400000; 4972 } else if (xp->fdesc.flash_len == 0x13) { 4973 xp->fdesc.flash_size = 0x800000; 4974 } else { 4975 rval = QL_FUNCTION_FAILED; 4976 } 4977 break; 4978 default: 4979 rval = QL_FUNCTION_FAILED; 4980 break; 4981 } 4982 break; 4983 default: 4984 rval = QL_FUNCTION_FAILED; 4985 break; 4986 } 4987 4988 /* Try flash table later. */ 4989 if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_24258081)) { 4990 EL(ha, "no default id\n"); 4991 return (QL_SUCCESS); 4992 } 4993 4994 /* 4995 * hack for non std 2312 and 6312 boards. hardware people need to 4996 * use either the 128k flash chip (original), or something larger. 4997 * For driver purposes, we'll treat it as a 128k flash chip. 4998 */ 4999 if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 || 5000 ha->device_id == 0x2322 || ha->device_id == 0x6322) && 5001 (xp->fdesc.flash_size > 0x20000) && 5002 (CFG_IST(ha, CFG_SBUS_CARD) == 0)) { 5003 EL(ha, "chip exceeds max size: %xh, using 128k\n", 5004 xp->fdesc.flash_size); 5005 xp->fdesc.flash_size = 0x20000; 5006 } 5007 5008 if (rval == QL_SUCCESS) { 5009 EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n", 5010 xp->fdesc.flash_manuf, xp->fdesc.flash_id, 5011 xp->fdesc.flash_size); 5012 } else { 5013 EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n", 5014 xp->fdesc.flash_manuf, xp->fdesc.flash_id); 5015 } 5016 5017 return (rval); 5018 } 5019 5020 /* 5021 * ql_flash_fcode_load 5022 * Loads fcode data into flash from application. 5023 * 5024 * Input: 5025 * ha: adapter state pointer. 5026 * bp: user buffer address. 5027 * size: user buffer size. 5028 * mode: flags 5029 * 5030 * Returns: 5031 * 5032 * Context: 5033 * Kernel context. 5034 */ 5035 static int 5036 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize, 5037 int mode) 5038 { 5039 uint8_t *bfp; 5040 ql_xioctl_t *xp = ha->xioctl; 5041 int rval = 0; 5042 5043 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5044 5045 if (bsize > xp->fdesc.flash_size) { 5046 EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize, 5047 xp->fdesc.flash_size); 5048 return (ENOMEM); 5049 } 5050 5051 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) { 5052 EL(ha, "failed, kmem_zalloc\n"); 5053 rval = ENOMEM; 5054 } else { 5055 if (ddi_copyin(bp, bfp, bsize, mode) != 0) { 5056 EL(ha, "failed, ddi_copyin\n"); 5057 rval = EFAULT; 5058 } else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) { 5059 EL(ha, "failed, load_fcode\n"); 5060 rval = EFAULT; 5061 } else { 5062 /* Reset caches on all adapter instances. */ 5063 ql_update_flash_caches(ha); 5064 rval = 0; 5065 } 5066 kmem_free(bfp, bsize); 5067 } 5068 5069 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5070 5071 return (rval); 5072 } 5073 5074 /* 5075 * ql_load_fcode 5076 * Loads fcode in to flash. 5077 * 5078 * Input: 5079 * ha: adapter state pointer. 5080 * dp: data pointer. 5081 * size: data length. 5082 * addr: flash byte address. 5083 * 5084 * Returns: 5085 * ql local function return status code. 5086 * 5087 * Context: 5088 * Kernel context. 5089 */ 5090 int 5091 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr) 5092 { 5093 uint32_t cnt; 5094 int rval; 5095 5096 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5097 return (ql_24xx_load_flash(ha, dp, size, addr)); 5098 } 5099 5100 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5101 5102 if (CFG_IST(ha, CFG_SBUS_CARD)) { 5103 /* 5104 * sbus has an additional check to make 5105 * sure they don't brick the HBA. 5106 */ 5107 if (dp[0] != 0xf1) { 5108 EL(ha, "failed, incorrect fcode for sbus\n"); 5109 return (QL_FUNCTION_PARAMETER_ERROR); 5110 } 5111 } 5112 5113 GLOBAL_HW_LOCK(); 5114 5115 /* Enable Flash Read/Write. */ 5116 ql_flash_enable(ha); 5117 5118 /* Erase flash prior to write. */ 5119 rval = ql_erase_flash(ha, 0); 5120 5121 if (rval == QL_SUCCESS) { 5122 /* Write fcode data to flash. */ 5123 for (cnt = 0; cnt < (uint32_t)size; cnt++) { 5124 /* Allow other system activity. */ 5125 if (cnt % 0x1000 == 0) { 5126 drv_usecwait(1); 5127 } 5128 rval = ql_program_flash_address(ha, addr++, *dp++); 5129 if (rval != QL_SUCCESS) 5130 break; 5131 } 5132 } 5133 5134 ql_flash_disable(ha); 5135 5136 GLOBAL_HW_UNLOCK(); 5137 5138 if (rval != QL_SUCCESS) { 5139 EL(ha, "failed, rval=%xh\n", rval); 5140 } else { 5141 /*EMPTY*/ 5142 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5143 } 5144 return (rval); 5145 } 5146 5147 /* 5148 * ql_flash_fcode_dump 5149 * Dumps FLASH to application. 5150 * 5151 * Input: 5152 * ha: adapter state pointer. 5153 * bp: user buffer address. 5154 * bsize: user buffer size 5155 * faddr: flash byte address 5156 * mode: flags 5157 * 5158 * Returns: 5159 * 5160 * Context: 5161 * Kernel context. 5162 */ 5163 static int 5164 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize, 5165 uint32_t faddr, int mode) 5166 { 5167 uint8_t *bfp; 5168 int rval; 5169 ql_xioctl_t *xp = ha->xioctl; 5170 5171 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5172 5173 /* adjust max read size to flash size */ 5174 if (bsize > xp->fdesc.flash_size) { 5175 EL(ha, "adjusting req=%xh, max=%xh\n", bsize, 5176 xp->fdesc.flash_size); 5177 bsize = xp->fdesc.flash_size; 5178 } 5179 5180 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) { 5181 EL(ha, "failed, kmem_zalloc\n"); 5182 rval = ENOMEM; 5183 } else { 5184 /* Dump Flash fcode. */ 5185 rval = ql_dump_fcode(ha, bfp, bsize, faddr); 5186 5187 if (rval != QL_SUCCESS) { 5188 EL(ha, "failed, dump_fcode = %x\n", rval); 5189 rval = EFAULT; 5190 } else if (ddi_copyout(bfp, bp, bsize, mode) != 0) { 5191 EL(ha, "failed, ddi_copyout\n"); 5192 rval = EFAULT; 5193 } else { 5194 rval = 0; 5195 } 5196 kmem_free(bfp, bsize); 5197 } 5198 5199 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5200 5201 return (rval); 5202 } 5203 5204 /* 5205 * ql_dump_fcode 5206 * Dumps fcode from flash. 5207 * 5208 * Input: 5209 * ha: adapter state pointer. 5210 * dp: data pointer. 5211 * size: data length in bytes. 5212 * startpos: starting position in flash (byte address). 5213 * 5214 * Returns: 5215 * ql local function return status code. 5216 * 5217 * Context: 5218 * Kernel context. 5219 * 5220 */ 5221 int 5222 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, 5223 uint32_t startpos) 5224 { 5225 uint32_t cnt, data, addr; 5226 uint8_t bp[4], *src; 5227 int fp_rval, rval = QL_SUCCESS; 5228 dma_mem_t mem; 5229 5230 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5231 5232 /* make sure startpos+size doesn't exceed flash */ 5233 if (size + startpos > ha->xioctl->fdesc.flash_size) { 5234 EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n", 5235 size, startpos, ha->xioctl->fdesc.flash_size); 5236 return (QL_FUNCTION_PARAMETER_ERROR); 5237 } 5238 5239 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5240 /* check start addr is 32 bit aligned for 24xx */ 5241 if ((startpos & 0x3) != 0) { 5242 rval = ql_24xx_read_flash(ha, 5243 ha->flash_data_addr | startpos >> 2, &data); 5244 if (rval != QL_SUCCESS) { 5245 EL(ha, "failed2, rval = %xh\n", rval); 5246 return (rval); 5247 } 5248 bp[0] = LSB(LSW(data)); 5249 bp[1] = MSB(LSW(data)); 5250 bp[2] = LSB(MSW(data)); 5251 bp[3] = MSB(MSW(data)); 5252 while (size && startpos & 0x3) { 5253 *dp++ = bp[startpos & 0x3]; 5254 startpos++; 5255 size--; 5256 } 5257 if (size == 0) { 5258 QL_PRINT_9(CE_CONT, "(%d): done2\n", 5259 ha->instance); 5260 return (rval); 5261 } 5262 } 5263 5264 /* adjust 24xx start addr for 32 bit words */ 5265 addr = startpos / 4 | ha->flash_data_addr; 5266 } 5267 5268 bzero(&mem, sizeof (dma_mem_t)); 5269 /* Check for Fast page is supported */ 5270 if ((ha->pha->task_daemon_flags & FIRMWARE_UP) && 5271 (CFG_IST(ha, CFG_CTRL_2581))) { 5272 fp_rval = QL_SUCCESS; 5273 /* Setup DMA buffer. */ 5274 rval = ql_get_dma_mem(ha, &mem, size, 5275 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN); 5276 if (rval != QL_SUCCESS) { 5277 EL(ha, "failed, ql_get_dma_mem=%xh\n", 5278 rval); 5279 return (ENOMEM); 5280 } 5281 } else { 5282 fp_rval = QL_NOT_SUPPORTED; 5283 } 5284 5285 GLOBAL_HW_LOCK(); 5286 5287 /* Enable Flash Read/Write. */ 5288 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) { 5289 ql_flash_enable(ha); 5290 } 5291 5292 /* Read fcode data from flash. */ 5293 while (size) { 5294 /* Allow other system activity. */ 5295 if (size % 0x1000 == 0) { 5296 ql_delay(ha, 100000); 5297 } 5298 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5299 if (fp_rval == QL_SUCCESS && (addr & 0x3f) == 0) { 5300 cnt = (size + 3) >> 2; 5301 fp_rval = ql_rd_risc_ram(ha, addr, 5302 mem.cookie.dmac_laddress, cnt); 5303 if (fp_rval == QL_SUCCESS) { 5304 for (src = mem.bp; size; size--) { 5305 *dp++ = *src++; 5306 } 5307 addr += cnt; 5308 continue; 5309 } 5310 } 5311 rval = ql_24xx_read_flash(ha, addr++, 5312 &data); 5313 if (rval != QL_SUCCESS) { 5314 break; 5315 } 5316 bp[0] = LSB(LSW(data)); 5317 bp[1] = MSB(LSW(data)); 5318 bp[2] = LSB(MSW(data)); 5319 bp[3] = MSB(MSW(data)); 5320 for (cnt = 0; size && cnt < 4; size--) { 5321 *dp++ = bp[cnt++]; 5322 } 5323 } else { 5324 *dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++); 5325 size--; 5326 } 5327 } 5328 5329 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) { 5330 ql_flash_disable(ha); 5331 } 5332 5333 GLOBAL_HW_UNLOCK(); 5334 5335 if (mem.dma_handle != NULL) { 5336 ql_free_dma_resource(ha, &mem); 5337 } 5338 5339 if (rval != QL_SUCCESS) { 5340 EL(ha, "failed, rval = %xh\n", rval); 5341 } else { 5342 /*EMPTY*/ 5343 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5344 } 5345 return (rval); 5346 } 5347 5348 /* 5349 * ql_program_flash_address 5350 * Program flash address. 5351 * 5352 * Input: 5353 * ha: adapter state pointer. 5354 * addr: flash byte address. 5355 * data: data to be written to flash. 5356 * 5357 * Returns: 5358 * ql local function return status code. 5359 * 5360 * Context: 5361 * Kernel context. 5362 */ 5363 static int 5364 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, 5365 uint8_t data) 5366 { 5367 int rval; 5368 5369 /* Write Program Command Sequence */ 5370 if (CFG_IST(ha, CFG_SBUS_CARD)) { 5371 ql_write_flash_byte(ha, 0x5555, 0xa0); 5372 ql_write_flash_byte(ha, addr, data); 5373 } else { 5374 ql_write_flash_byte(ha, 0x5555, 0xaa); 5375 ql_write_flash_byte(ha, 0x2aaa, 0x55); 5376 ql_write_flash_byte(ha, 0x5555, 0xa0); 5377 ql_write_flash_byte(ha, addr, data); 5378 } 5379 5380 /* Wait for write to complete. */ 5381 rval = ql_poll_flash(ha, addr, data); 5382 5383 if (rval != QL_SUCCESS) { 5384 EL(ha, "failed, rval=%xh\n", rval); 5385 } 5386 return (rval); 5387 } 5388 5389 /* 5390 * ql_set_rnid_parameters 5391 * Set RNID parameters. 5392 * 5393 * Input: 5394 * ha: adapter state pointer. 5395 * cmd: User space CT arguments pointer. 5396 * mode: flags. 5397 */ 5398 static void 5399 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5400 { 5401 EXT_SET_RNID_REQ tmp_set; 5402 EXT_RNID_DATA *tmp_buf; 5403 int rval = 0; 5404 5405 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5406 5407 if (DRIVER_SUSPENDED(ha)) { 5408 EL(ha, "failed, LOOP_NOT_READY\n"); 5409 cmd->Status = EXT_STATUS_BUSY; 5410 cmd->ResponseLen = 0; 5411 return; 5412 } 5413 5414 cmd->ResponseLen = 0; /* NO response to caller. */ 5415 if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) { 5416 /* parameter error */ 5417 EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n", 5418 cmd->RequestLen); 5419 cmd->Status = EXT_STATUS_INVALID_PARAM; 5420 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 5421 cmd->ResponseLen = 0; 5422 return; 5423 } 5424 5425 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set, 5426 cmd->RequestLen, mode); 5427 if (rval != 0) { 5428 EL(ha, "failed, ddi_copyin\n"); 5429 cmd->Status = EXT_STATUS_COPY_ERR; 5430 cmd->ResponseLen = 0; 5431 return; 5432 } 5433 5434 /* Allocate memory for command. */ 5435 tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP); 5436 if (tmp_buf == NULL) { 5437 EL(ha, "failed, kmem_zalloc\n"); 5438 cmd->Status = EXT_STATUS_NO_MEMORY; 5439 cmd->ResponseLen = 0; 5440 return; 5441 } 5442 5443 rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA), 5444 (caddr_t)tmp_buf); 5445 if (rval != QL_SUCCESS) { 5446 /* error */ 5447 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval); 5448 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA)); 5449 cmd->Status = EXT_STATUS_ERR; 5450 cmd->ResponseLen = 0; 5451 return; 5452 } 5453 5454 /* Now set the requested params. */ 5455 bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2); 5456 bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2); 5457 bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16); 5458 5459 rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA), 5460 (caddr_t)tmp_buf); 5461 if (rval != QL_SUCCESS) { 5462 /* error */ 5463 EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval); 5464 cmd->Status = EXT_STATUS_ERR; 5465 cmd->ResponseLen = 0; 5466 } 5467 5468 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA)); 5469 5470 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5471 } 5472 5473 /* 5474 * ql_get_rnid_parameters 5475 * Get RNID parameters. 5476 * 5477 * Input: 5478 * ha: adapter state pointer. 5479 * cmd: User space CT arguments pointer. 5480 * mode: flags. 5481 */ 5482 static void 5483 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5484 { 5485 EXT_RNID_DATA *tmp_buf; 5486 uint32_t rval; 5487 5488 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5489 5490 if (DRIVER_SUSPENDED(ha)) { 5491 EL(ha, "failed, LOOP_NOT_READY\n"); 5492 cmd->Status = EXT_STATUS_BUSY; 5493 cmd->ResponseLen = 0; 5494 return; 5495 } 5496 5497 /* Allocate memory for command. */ 5498 tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP); 5499 if (tmp_buf == NULL) { 5500 EL(ha, "failed, kmem_zalloc\n"); 5501 cmd->Status = EXT_STATUS_NO_MEMORY; 5502 cmd->ResponseLen = 0; 5503 return; 5504 } 5505 5506 /* Send command */ 5507 rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA), 5508 (caddr_t)tmp_buf); 5509 if (rval != QL_SUCCESS) { 5510 /* error */ 5511 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval); 5512 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA)); 5513 cmd->Status = EXT_STATUS_ERR; 5514 cmd->ResponseLen = 0; 5515 return; 5516 } 5517 5518 /* Copy the response */ 5519 if (ql_send_buffer_data((caddr_t)tmp_buf, 5520 (caddr_t)(uintptr_t)cmd->ResponseAdr, 5521 sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) { 5522 EL(ha, "failed, ddi_copyout\n"); 5523 cmd->Status = EXT_STATUS_COPY_ERR; 5524 cmd->ResponseLen = 0; 5525 } else { 5526 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5527 cmd->ResponseLen = sizeof (EXT_RNID_DATA); 5528 } 5529 5530 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA)); 5531 } 5532 5533 /* 5534 * ql_reset_statistics 5535 * Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA. 5536 * 5537 * Input: 5538 * ha: adapter state pointer. 5539 * cmd: Local EXT_IOCTL cmd struct pointer. 5540 * 5541 * Returns: 5542 * None, request status indicated in cmd->Status. 5543 * 5544 * Context: 5545 * Kernel context. 5546 */ 5547 static int 5548 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd) 5549 { 5550 ql_xioctl_t *xp = ha->xioctl; 5551 int rval = 0; 5552 5553 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5554 5555 if (DRIVER_SUSPENDED(ha)) { 5556 EL(ha, "failed, LOOP_NOT_READY\n"); 5557 cmd->Status = EXT_STATUS_BUSY; 5558 cmd->ResponseLen = 0; 5559 return (QL_FUNCTION_SUSPENDED); 5560 } 5561 5562 rval = ql_reset_link_status(ha); 5563 if (rval != QL_SUCCESS) { 5564 EL(ha, "failed, reset_link_status_mbx=%xh\n", rval); 5565 cmd->Status = EXT_STATUS_MAILBOX; 5566 cmd->DetailStatus = rval; 5567 cmd->ResponseLen = 0; 5568 } 5569 5570 TASK_DAEMON_LOCK(ha); 5571 xp->IosRequested = 0; 5572 xp->BytesRequested = 0; 5573 xp->IOInputRequests = 0; 5574 xp->IOOutputRequests = 0; 5575 xp->IOControlRequests = 0; 5576 xp->IOInputMByteCnt = 0; 5577 xp->IOOutputMByteCnt = 0; 5578 xp->IOOutputByteCnt = 0; 5579 xp->IOInputByteCnt = 0; 5580 TASK_DAEMON_UNLOCK(ha); 5581 5582 INTR_LOCK(ha); 5583 xp->ControllerErrorCount = 0; 5584 xp->DeviceErrorCount = 0; 5585 xp->TotalLipResets = 0; 5586 xp->TotalInterrupts = 0; 5587 INTR_UNLOCK(ha); 5588 5589 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5590 5591 return (rval); 5592 } 5593 5594 /* 5595 * ql_get_statistics 5596 * Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA. 5597 * 5598 * Input: 5599 * ha: adapter state pointer. 5600 * cmd: Local EXT_IOCTL cmd struct pointer. 5601 * mode: flags. 5602 * 5603 * Returns: 5604 * None, request status indicated in cmd->Status. 5605 * 5606 * Context: 5607 * Kernel context. 5608 */ 5609 static void 5610 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5611 { 5612 EXT_HBA_PORT_STAT ps = {0}; 5613 ql_link_stats_t *ls; 5614 int rval; 5615 ql_xioctl_t *xp = ha->xioctl; 5616 int retry = 10; 5617 5618 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5619 5620 while (ha->task_daemon_flags & 5621 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) { 5622 ql_delay(ha, 10000000); /* 10 second delay */ 5623 5624 retry--; 5625 5626 if (retry == 0) { /* effectively 100 seconds */ 5627 EL(ha, "failed, LOOP_NOT_READY\n"); 5628 cmd->Status = EXT_STATUS_BUSY; 5629 cmd->ResponseLen = 0; 5630 return; 5631 } 5632 } 5633 5634 /* Allocate memory for command. */ 5635 ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP); 5636 if (ls == NULL) { 5637 EL(ha, "failed, kmem_zalloc\n"); 5638 cmd->Status = EXT_STATUS_NO_MEMORY; 5639 cmd->ResponseLen = 0; 5640 return; 5641 } 5642 5643 /* 5644 * I think these are supposed to be port statistics 5645 * the loop ID or port ID should be in cmd->Instance. 5646 */ 5647 rval = ql_get_status_counts(ha, (uint16_t) 5648 (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id), 5649 sizeof (ql_link_stats_t), (caddr_t)ls, 0); 5650 if (rval != QL_SUCCESS) { 5651 EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval, 5652 ha->loop_id); 5653 cmd->Status = EXT_STATUS_MAILBOX; 5654 cmd->DetailStatus = rval; 5655 cmd->ResponseLen = 0; 5656 } else { 5657 ps.ControllerErrorCount = xp->ControllerErrorCount; 5658 ps.DeviceErrorCount = xp->DeviceErrorCount; 5659 ps.IoCount = (uint32_t)(xp->IOInputRequests + 5660 xp->IOOutputRequests + xp->IOControlRequests); 5661 ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt + 5662 xp->IOOutputMByteCnt); 5663 ps.LipResetCount = xp->TotalLipResets; 5664 ps.InterruptCount = xp->TotalInterrupts; 5665 ps.LinkFailureCount = LE_32(ls->link_fail_cnt); 5666 ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt); 5667 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt); 5668 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt); 5669 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt); 5670 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt); 5671 5672 rval = ddi_copyout((void *)&ps, 5673 (void *)(uintptr_t)cmd->ResponseAdr, 5674 sizeof (EXT_HBA_PORT_STAT), mode); 5675 if (rval != 0) { 5676 EL(ha, "failed, ddi_copyout\n"); 5677 cmd->Status = EXT_STATUS_COPY_ERR; 5678 cmd->ResponseLen = 0; 5679 } else { 5680 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT); 5681 } 5682 } 5683 5684 kmem_free(ls, sizeof (ql_link_stats_t)); 5685 5686 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5687 } 5688 5689 /* 5690 * ql_get_statistics_fc 5691 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA. 5692 * 5693 * Input: 5694 * ha: adapter state pointer. 5695 * cmd: Local EXT_IOCTL cmd struct pointer. 5696 * mode: flags. 5697 * 5698 * Returns: 5699 * None, request status indicated in cmd->Status. 5700 * 5701 * Context: 5702 * Kernel context. 5703 */ 5704 static void 5705 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5706 { 5707 EXT_HBA_PORT_STAT ps = {0}; 5708 ql_link_stats_t *ls; 5709 int rval; 5710 uint16_t qlnt; 5711 EXT_DEST_ADDR pextdestaddr; 5712 uint8_t *name; 5713 ql_tgt_t *tq = NULL; 5714 int retry = 10; 5715 5716 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5717 5718 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 5719 (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) { 5720 EL(ha, "failed, ddi_copyin\n"); 5721 cmd->Status = EXT_STATUS_COPY_ERR; 5722 cmd->ResponseLen = 0; 5723 return; 5724 } 5725 5726 qlnt = QLNT_PORT; 5727 name = pextdestaddr.DestAddr.WWPN; 5728 5729 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 5730 ha->instance, name[0], name[1], name[2], name[3], name[4], 5731 name[5], name[6], name[7]); 5732 5733 tq = ql_find_port(ha, name, qlnt); 5734 5735 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) { 5736 EL(ha, "failed, fc_port not found\n"); 5737 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 5738 cmd->ResponseLen = 0; 5739 return; 5740 } 5741 5742 while (ha->task_daemon_flags & 5743 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) { 5744 ql_delay(ha, 10000000); /* 10 second delay */ 5745 5746 retry--; 5747 5748 if (retry == 0) { /* effectively 100 seconds */ 5749 EL(ha, "failed, LOOP_NOT_READY\n"); 5750 cmd->Status = EXT_STATUS_BUSY; 5751 cmd->ResponseLen = 0; 5752 return; 5753 } 5754 } 5755 5756 /* Allocate memory for command. */ 5757 ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP); 5758 if (ls == NULL) { 5759 EL(ha, "failed, kmem_zalloc\n"); 5760 cmd->Status = EXT_STATUS_NO_MEMORY; 5761 cmd->ResponseLen = 0; 5762 return; 5763 } 5764 5765 rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t), 5766 (caddr_t)ls, 0); 5767 if (rval != QL_SUCCESS) { 5768 EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval, 5769 tq->d_id.b24); 5770 cmd->Status = EXT_STATUS_MAILBOX; 5771 cmd->DetailStatus = rval; 5772 cmd->ResponseLen = 0; 5773 } else { 5774 ps.LinkFailureCount = LE_32(ls->link_fail_cnt); 5775 ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt); 5776 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt); 5777 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt); 5778 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt); 5779 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt); 5780 5781 rval = ddi_copyout((void *)&ps, 5782 (void *)(uintptr_t)cmd->ResponseAdr, 5783 sizeof (EXT_HBA_PORT_STAT), mode); 5784 5785 if (rval != 0) { 5786 EL(ha, "failed, ddi_copyout\n"); 5787 cmd->Status = EXT_STATUS_COPY_ERR; 5788 cmd->ResponseLen = 0; 5789 } else { 5790 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT); 5791 } 5792 } 5793 5794 kmem_free(ls, sizeof (ql_link_stats_t)); 5795 5796 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5797 } 5798 5799 /* 5800 * ql_get_statistics_fc4 5801 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA. 5802 * 5803 * Input: 5804 * ha: adapter state pointer. 5805 * cmd: Local EXT_IOCTL cmd struct pointer. 5806 * mode: flags. 5807 * 5808 * Returns: 5809 * None, request status indicated in cmd->Status. 5810 * 5811 * Context: 5812 * Kernel context. 5813 */ 5814 static void 5815 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5816 { 5817 uint32_t rval; 5818 EXT_HBA_FC4STATISTICS fc4stats = {0}; 5819 ql_xioctl_t *xp = ha->xioctl; 5820 5821 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5822 5823 fc4stats.InputRequests = xp->IOInputRequests; 5824 fc4stats.OutputRequests = xp->IOOutputRequests; 5825 fc4stats.ControlRequests = xp->IOControlRequests; 5826 fc4stats.InputMegabytes = xp->IOInputMByteCnt; 5827 fc4stats.OutputMegabytes = xp->IOOutputMByteCnt; 5828 5829 rval = ddi_copyout((void *)&fc4stats, 5830 (void *)(uintptr_t)cmd->ResponseAdr, 5831 sizeof (EXT_HBA_FC4STATISTICS), mode); 5832 5833 if (rval != 0) { 5834 EL(ha, "failed, ddi_copyout\n"); 5835 cmd->Status = EXT_STATUS_COPY_ERR; 5836 cmd->ResponseLen = 0; 5837 } else { 5838 cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS); 5839 } 5840 5841 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5842 } 5843 5844 /* 5845 * ql_set_led_state 5846 * Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA. 5847 * 5848 * Input: 5849 * ha: adapter state pointer. 5850 * cmd: Local EXT_IOCTL cmd struct pointer. 5851 * mode: flags. 5852 * 5853 * Returns: 5854 * None, request status indicated in cmd->Status. 5855 * 5856 * Context: 5857 * Kernel context. 5858 */ 5859 static void 5860 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5861 { 5862 EXT_BEACON_CONTROL bstate; 5863 uint32_t rval; 5864 ql_xioctl_t *xp = ha->xioctl; 5865 5866 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5867 5868 if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) { 5869 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 5870 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL); 5871 EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL," 5872 " Len=%xh\n", cmd->RequestLen); 5873 cmd->ResponseLen = 0; 5874 return; 5875 } 5876 5877 if (ha->device_id < 0x2300) { 5878 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 5879 cmd->DetailStatus = 0; 5880 EL(ha, "done - failed, Invalid function for HBA model\n"); 5881 cmd->ResponseLen = 0; 5882 return; 5883 } 5884 5885 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate, 5886 cmd->RequestLen, mode); 5887 5888 if (rval != 0) { 5889 cmd->Status = EXT_STATUS_COPY_ERR; 5890 EL(ha, "done - failed, ddi_copyin\n"); 5891 return; 5892 } 5893 5894 switch (bstate.State) { 5895 case EXT_DEF_GRN_BLINK_OFF: /* turn beacon off */ 5896 if (xp->ledstate.BeaconState == BEACON_OFF) { 5897 /* not quite an error -- LED state is already off */ 5898 cmd->Status = EXT_STATUS_OK; 5899 EL(ha, "LED off request -- LED is already off\n"); 5900 break; 5901 } 5902 5903 xp->ledstate.BeaconState = BEACON_OFF; 5904 xp->ledstate.LEDflags = LED_ALL_OFF; 5905 5906 if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) { 5907 cmd->Status = EXT_STATUS_MAILBOX; 5908 } else { 5909 cmd->Status = EXT_STATUS_OK; 5910 } 5911 break; 5912 5913 case EXT_DEF_GRN_BLINK_ON: /* turn beacon on */ 5914 if (xp->ledstate.BeaconState == BEACON_ON) { 5915 /* not quite an error -- LED state is already on */ 5916 cmd->Status = EXT_STATUS_OK; 5917 EL(ha, "LED on request - LED is already on\n"); 5918 break; 5919 } 5920 5921 if ((rval = ql_setup_led(ha)) != QL_SUCCESS) { 5922 cmd->Status = EXT_STATUS_MAILBOX; 5923 break; 5924 } 5925 5926 if (CFG_IST(ha, CFG_CTRL_24258081)) { 5927 xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24; 5928 } else { 5929 xp->ledstate.LEDflags = LED_GREEN; 5930 } 5931 xp->ledstate.BeaconState = BEACON_ON; 5932 5933 cmd->Status = EXT_STATUS_OK; 5934 break; 5935 default: 5936 cmd->Status = EXT_STATUS_ERR; 5937 EL(ha, "failed, unknown state request %xh\n", bstate.State); 5938 break; 5939 } 5940 5941 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 5942 } 5943 5944 /* 5945 * ql_get_led_state 5946 * Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA. 5947 * 5948 * Input: 5949 * ha: adapter state pointer. 5950 * cmd: Local EXT_IOCTL cmd struct pointer. 5951 * mode: flags. 5952 * 5953 * Returns: 5954 * None, request status indicated in cmd->Status. 5955 * 5956 * Context: 5957 * Kernel context. 5958 */ 5959 static void 5960 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 5961 { 5962 EXT_BEACON_CONTROL bstate = {0}; 5963 uint32_t rval; 5964 ql_xioctl_t *xp = ha->xioctl; 5965 5966 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 5967 5968 if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) { 5969 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 5970 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL); 5971 EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL," 5972 "Len=%xh\n", cmd->ResponseLen); 5973 cmd->ResponseLen = 0; 5974 return; 5975 } 5976 5977 if (ha->device_id < 0x2300) { 5978 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 5979 cmd->DetailStatus = 0; 5980 EL(ha, "done - failed, Invalid function for HBA model\n"); 5981 cmd->ResponseLen = 0; 5982 return; 5983 } 5984 5985 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) { 5986 cmd->Status = EXT_STATUS_BUSY; 5987 EL(ha, "done - failed, isp abort active\n"); 5988 cmd->ResponseLen = 0; 5989 return; 5990 } 5991 5992 /* inform the user of the current beacon state (off or on) */ 5993 bstate.State = xp->ledstate.BeaconState; 5994 5995 rval = ddi_copyout((void *)&bstate, 5996 (void *)(uintptr_t)cmd->ResponseAdr, 5997 sizeof (EXT_BEACON_CONTROL), mode); 5998 5999 if (rval != 0) { 6000 EL(ha, "failed, ddi_copyout\n"); 6001 cmd->Status = EXT_STATUS_COPY_ERR; 6002 cmd->ResponseLen = 0; 6003 } else { 6004 cmd->Status = EXT_STATUS_OK; 6005 cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL); 6006 } 6007 6008 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6009 } 6010 6011 /* 6012 * ql_blink_led 6013 * Determine the next state of the LED and drive it 6014 * 6015 * Input: 6016 * ha: adapter state pointer. 6017 * 6018 * Context: 6019 * Interrupt context. 6020 */ 6021 void 6022 ql_blink_led(ql_adapter_state_t *ha) 6023 { 6024 uint32_t nextstate; 6025 ql_xioctl_t *xp = ha->xioctl; 6026 6027 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6028 6029 if (xp->ledstate.BeaconState == BEACON_ON) { 6030 /* determine the next led state */ 6031 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6032 nextstate = (xp->ledstate.LEDflags) & 6033 (~(RD32_IO_REG(ha, gpiod))); 6034 } else { 6035 nextstate = (xp->ledstate.LEDflags) & 6036 (~(RD16_IO_REG(ha, gpiod))); 6037 } 6038 6039 /* turn the led on or off */ 6040 ql_drive_led(ha, nextstate); 6041 } 6042 6043 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6044 } 6045 6046 /* 6047 * ql_drive_led 6048 * drive the led's as determined by LEDflags 6049 * 6050 * Input: 6051 * ha: adapter state pointer. 6052 * LEDflags: LED flags 6053 * 6054 * Context: 6055 * Kernel/Interrupt context. 6056 */ 6057 static void 6058 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags) 6059 { 6060 6061 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6062 6063 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) { 6064 6065 uint16_t gpio_enable, gpio_data; 6066 6067 /* setup to send new data */ 6068 gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe); 6069 gpio_enable = (uint16_t)(gpio_enable | LED_MASK); 6070 WRT16_IO_REG(ha, gpioe, gpio_enable); 6071 6072 /* read current data and clear out old led data */ 6073 gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod); 6074 gpio_data = (uint16_t)(gpio_data & ~LED_MASK); 6075 6076 /* set in the new led data. */ 6077 gpio_data = (uint16_t)(gpio_data | LEDflags); 6078 6079 /* write out the new led data */ 6080 WRT16_IO_REG(ha, gpiod, gpio_data); 6081 6082 } else if (CFG_IST(ha, CFG_CTRL_24258081)) { 6083 6084 uint32_t gpio_data; 6085 6086 /* setup to send new data */ 6087 gpio_data = RD32_IO_REG(ha, gpiod); 6088 gpio_data |= LED_MASK_UPDATE_24; 6089 WRT32_IO_REG(ha, gpiod, gpio_data); 6090 6091 /* read current data and clear out old led data */ 6092 gpio_data = RD32_IO_REG(ha, gpiod); 6093 gpio_data &= ~LED_MASK_COLORS_24; 6094 6095 /* set in the new led data */ 6096 gpio_data |= LEDflags; 6097 6098 /* write out the new led data */ 6099 WRT32_IO_REG(ha, gpiod, gpio_data); 6100 6101 } else { 6102 EL(ha, "unsupported HBA: %xh", ha->device_id); 6103 } 6104 6105 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6106 } 6107 6108 /* 6109 * ql_setup_led 6110 * Setup LED for driver control 6111 * 6112 * Input: 6113 * ha: adapter state pointer. 6114 * 6115 * Context: 6116 * Kernel/Interrupt context. 6117 */ 6118 static uint32_t 6119 ql_setup_led(ql_adapter_state_t *ha) 6120 { 6121 uint32_t rval; 6122 ql_mbx_data_t mr; 6123 6124 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6125 6126 /* decouple the LED control from the fw */ 6127 rval = ql_get_firmware_option(ha, &mr); 6128 if (rval != QL_SUCCESS) { 6129 EL(ha, "failed, get_firmware_option=%xh\n", rval); 6130 return (rval); 6131 } 6132 6133 /* set the appropriate options */ 6134 mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO); 6135 6136 /* send it back to the firmware */ 6137 rval = ql_set_firmware_option(ha, &mr); 6138 if (rval != QL_SUCCESS) { 6139 EL(ha, "failed, set_firmware_option=%xh\n", rval); 6140 return (rval); 6141 } 6142 6143 /* initally, turn the LED's off */ 6144 ql_drive_led(ha, LED_ALL_OFF); 6145 6146 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6147 6148 return (rval); 6149 } 6150 6151 /* 6152 * ql_wrapup_led 6153 * Return LED control to the firmware 6154 * 6155 * Input: 6156 * ha: adapter state pointer. 6157 * 6158 * Context: 6159 * Kernel/Interrupt context. 6160 */ 6161 static uint32_t 6162 ql_wrapup_led(ql_adapter_state_t *ha) 6163 { 6164 uint32_t rval; 6165 ql_mbx_data_t mr; 6166 6167 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6168 6169 /* Turn all LED's off */ 6170 ql_drive_led(ha, LED_ALL_OFF); 6171 6172 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6173 6174 uint32_t gpio_data; 6175 6176 /* disable the LED update mask */ 6177 gpio_data = RD32_IO_REG(ha, gpiod); 6178 gpio_data &= ~LED_MASK_UPDATE_24; 6179 6180 /* write out the data */ 6181 WRT32_IO_REG(ha, gpiod, gpio_data); 6182 } 6183 6184 /* give LED control back to the f/w */ 6185 rval = ql_get_firmware_option(ha, &mr); 6186 if (rval != QL_SUCCESS) { 6187 EL(ha, "failed, get_firmware_option=%xh\n", rval); 6188 return (rval); 6189 } 6190 6191 mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO); 6192 6193 rval = ql_set_firmware_option(ha, &mr); 6194 if (rval != QL_SUCCESS) { 6195 EL(ha, "failed, set_firmware_option=%xh\n", rval); 6196 return (rval); 6197 } 6198 6199 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6200 6201 return (rval); 6202 } 6203 6204 /* 6205 * ql_get_port_summary 6206 * Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA. 6207 * 6208 * The EXT_IOCTL->RequestAdr points to a single 6209 * UINT32 which identifies the device type. 6210 * 6211 * Input: 6212 * ha: adapter state pointer. 6213 * cmd: Local EXT_IOCTL cmd struct pointer. 6214 * mode: flags. 6215 * 6216 * Returns: 6217 * None, request status indicated in cmd->Status. 6218 * 6219 * Context: 6220 * Kernel context. 6221 */ 6222 static void 6223 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 6224 { 6225 EXT_DEVICEDATA dd = {0}; 6226 EXT_DEVICEDATA *uddp; 6227 ql_link_t *link; 6228 ql_tgt_t *tq; 6229 uint32_t rlen, dev_type, index; 6230 int rval = 0; 6231 EXT_DEVICEDATAENTRY *uddep, *ddep; 6232 6233 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6234 6235 ddep = &dd.EntryList[0]; 6236 6237 /* 6238 * Get the type of device the requestor is looking for. 6239 * 6240 * We ignore this for now. 6241 */ 6242 rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 6243 (void *)&dev_type, sizeof (dev_type), mode); 6244 if (rval != 0) { 6245 cmd->Status = EXT_STATUS_COPY_ERR; 6246 cmd->ResponseLen = 0; 6247 EL(ha, "failed, ddi_copyin\n"); 6248 return; 6249 } 6250 /* 6251 * Count the number of entries to be returned. Count devices 6252 * that are offlline, but have been persistently bound. 6253 */ 6254 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 6255 for (link = ha->dev[index].first; link != NULL; 6256 link = link->next) { 6257 tq = link->base_address; 6258 if (tq->flags & TQF_INITIATOR_DEVICE || 6259 !VALID_TARGET_ID(ha, tq->loop_id)) { 6260 continue; /* Skip this one */ 6261 } 6262 dd.TotalDevices++; 6263 } 6264 } 6265 /* 6266 * Compute the number of entries that can be returned 6267 * based upon the size of caller's response buffer. 6268 */ 6269 dd.ReturnListEntryCount = 0; 6270 if (dd.TotalDevices == 0) { 6271 rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY); 6272 } else { 6273 rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) + 6274 (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1))); 6275 } 6276 if (rlen > cmd->ResponseLen) { 6277 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 6278 cmd->DetailStatus = rlen; 6279 EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n", 6280 rlen, cmd->ResponseLen); 6281 cmd->ResponseLen = 0; 6282 return; 6283 } 6284 cmd->ResponseLen = 0; 6285 uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr; 6286 uddep = &uddp->EntryList[0]; 6287 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 6288 for (link = ha->dev[index].first; link != NULL; 6289 link = link->next) { 6290 tq = link->base_address; 6291 if (tq->flags & TQF_INITIATOR_DEVICE || 6292 !VALID_TARGET_ID(ha, tq->loop_id)) { 6293 continue; /* Skip this one */ 6294 } 6295 6296 bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY)); 6297 6298 bcopy(tq->node_name, ddep->NodeWWN, 8); 6299 bcopy(tq->port_name, ddep->PortWWN, 8); 6300 6301 ddep->PortID[0] = tq->d_id.b.domain; 6302 ddep->PortID[1] = tq->d_id.b.area; 6303 ddep->PortID[2] = tq->d_id.b.al_pa; 6304 6305 bcopy(tq->port_name, 6306 (caddr_t)&ddep->TargetAddress.Target, 8); 6307 6308 ddep->DeviceFlags = tq->flags; 6309 ddep->LoopID = tq->loop_id; 6310 QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, " 6311 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, " 6312 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 6313 ha->instance, ddep->TargetAddress.Target, 6314 ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1], 6315 ddep->NodeWWN[2], ddep->NodeWWN[3], 6316 ddep->NodeWWN[4], ddep->NodeWWN[5], 6317 ddep->NodeWWN[6], ddep->NodeWWN[7], 6318 ddep->PortWWN[0], ddep->PortWWN[1], 6319 ddep->PortWWN[2], ddep->PortWWN[3], 6320 ddep->PortWWN[4], ddep->PortWWN[5], 6321 ddep->PortWWN[6], ddep->PortWWN[7]); 6322 rval = ddi_copyout((void *)ddep, (void *)uddep, 6323 sizeof (EXT_DEVICEDATAENTRY), mode); 6324 6325 if (rval != 0) { 6326 cmd->Status = EXT_STATUS_COPY_ERR; 6327 cmd->ResponseLen = 0; 6328 EL(ha, "failed, ddi_copyout\n"); 6329 break; 6330 } 6331 dd.ReturnListEntryCount++; 6332 uddep++; 6333 cmd->ResponseLen += (uint32_t) 6334 sizeof (EXT_DEVICEDATAENTRY); 6335 } 6336 } 6337 rval = ddi_copyout((void *)&dd, (void *)uddp, 6338 sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode); 6339 6340 if (rval != 0) { 6341 cmd->Status = EXT_STATUS_COPY_ERR; 6342 cmd->ResponseLen = 0; 6343 EL(ha, "failed, ddi_copyout-2\n"); 6344 } else { 6345 cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY); 6346 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6347 } 6348 } 6349 6350 /* 6351 * ql_get_target_id 6352 * Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA. 6353 * 6354 * Input: 6355 * ha: adapter state pointer. 6356 * cmd: Local EXT_IOCTL cmd struct pointer. 6357 * mode: flags. 6358 * 6359 * Returns: 6360 * None, request status indicated in cmd->Status. 6361 * 6362 * Context: 6363 * Kernel context. 6364 */ 6365 static void 6366 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 6367 { 6368 uint32_t rval; 6369 uint16_t qlnt; 6370 EXT_DEST_ADDR extdestaddr = {0}; 6371 uint8_t *name; 6372 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE]; 6373 ql_tgt_t *tq; 6374 6375 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6376 6377 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 6378 (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) { 6379 EL(ha, "failed, ddi_copyin\n"); 6380 cmd->Status = EXT_STATUS_COPY_ERR; 6381 cmd->ResponseLen = 0; 6382 return; 6383 } 6384 6385 qlnt = QLNT_PORT; 6386 name = wwpn; 6387 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 6388 ha->instance, name[0], name[1], name[2], name[3], name[4], 6389 name[5], name[6], name[7]); 6390 6391 tq = ql_find_port(ha, name, qlnt); 6392 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) { 6393 EL(ha, "failed, fc_port not found\n"); 6394 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 6395 cmd->ResponseLen = 0; 6396 return; 6397 } 6398 6399 bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8); 6400 6401 rval = ddi_copyout((void *)&extdestaddr, 6402 (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode); 6403 if (rval != 0) { 6404 EL(ha, "failed, ddi_copyout\n"); 6405 cmd->Status = EXT_STATUS_COPY_ERR; 6406 cmd->ResponseLen = 0; 6407 } 6408 6409 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6410 } 6411 6412 /* 6413 * ql_setup_fcache 6414 * Populates selected flash sections into the cache 6415 * 6416 * Input: 6417 * ha = adapter state pointer. 6418 * 6419 * Returns: 6420 * ql local function return status code. 6421 * 6422 * Context: 6423 * Kernel context. 6424 * 6425 * Note: 6426 * Driver must be in stalled state prior to entering or 6427 * add code to this function prior to calling ql_setup_flash() 6428 */ 6429 int 6430 ql_setup_fcache(ql_adapter_state_t *ha) 6431 { 6432 int rval; 6433 uint32_t freadpos = 0; 6434 uint32_t fw_done = 0; 6435 ql_fcache_t *head = NULL; 6436 ql_fcache_t *tail = NULL; 6437 ql_fcache_t *ftmp; 6438 6439 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 6440 6441 CACHE_LOCK(ha); 6442 6443 /* If we already have populated it, rtn */ 6444 if (ha->fcache != NULL) { 6445 CACHE_UNLOCK(ha); 6446 EL(ha, "buffer already populated\n"); 6447 return (QL_SUCCESS); 6448 } 6449 6450 ql_flash_nvram_defaults(ha); 6451 6452 if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) { 6453 CACHE_UNLOCK(ha); 6454 EL(ha, "unable to setup flash; rval=%xh\n", rval); 6455 return (rval); 6456 } 6457 6458 while (freadpos != 0xffffffff) { 6459 /* Allocate & populate this node */ 6460 if ((ftmp = ql_setup_fnode(ha)) == NULL) { 6461 EL(ha, "node alloc failed\n"); 6462 rval = QL_FUNCTION_FAILED; 6463 break; 6464 } 6465 6466 /* link in the new node */ 6467 if (head == NULL) { 6468 head = tail = ftmp; 6469 } else { 6470 tail->next = ftmp; 6471 tail = ftmp; 6472 } 6473 6474 /* Do the firmware node first for 24xx/25xx's */ 6475 if (fw_done == 0) { 6476 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6477 freadpos = ha->flash_fw_addr << 2; 6478 } 6479 fw_done = 1; 6480 } 6481 6482 if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE, 6483 freadpos)) != QL_SUCCESS) { 6484 EL(ha, "failed, 24xx dump_fcode" 6485 " pos=%xh rval=%xh\n", freadpos, rval); 6486 rval = QL_FUNCTION_FAILED; 6487 break; 6488 } 6489 6490 /* checkout the pci data / format */ 6491 if (ql_check_pci(ha, ftmp, &freadpos)) { 6492 EL(ha, "flash header incorrect\n"); 6493 rval = QL_FUNCTION_FAILED; 6494 break; 6495 } 6496 } 6497 6498 if (rval != QL_SUCCESS) { 6499 /* release all resources we have */ 6500 ftmp = head; 6501 while (ftmp != NULL) { 6502 tail = ftmp->next; 6503 kmem_free(ftmp->buf, FBUFSIZE); 6504 kmem_free(ftmp, sizeof (ql_fcache_t)); 6505 ftmp = tail; 6506 } 6507 6508 EL(ha, "failed, done\n"); 6509 } else { 6510 ha->fcache = head; 6511 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 6512 } 6513 CACHE_UNLOCK(ha); 6514 6515 return (rval); 6516 } 6517 6518 /* 6519 * ql_update_fcache 6520 * re-populates updated flash into the fcache. If 6521 * fcache does not exist (e.g., flash was empty/invalid on 6522 * boot), this routine will create and the populate it. 6523 * 6524 * Input: 6525 * ha = adapter state pointer. 6526 * *bpf = Pointer to flash buffer. 6527 * bsize = Size of flash buffer. 6528 * 6529 * Returns: 6530 * 6531 * Context: 6532 * Kernel context. 6533 */ 6534 void 6535 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize) 6536 { 6537 int rval = QL_SUCCESS; 6538 uint32_t freadpos = 0; 6539 uint32_t fw_done = 0; 6540 ql_fcache_t *head = NULL; 6541 ql_fcache_t *tail = NULL; 6542 ql_fcache_t *ftmp; 6543 6544 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 6545 6546 while (freadpos != 0xffffffff) { 6547 6548 /* Allocate & populate this node */ 6549 6550 if ((ftmp = ql_setup_fnode(ha)) == NULL) { 6551 EL(ha, "node alloc failed\n"); 6552 rval = QL_FUNCTION_FAILED; 6553 break; 6554 } 6555 6556 /* link in the new node */ 6557 if (head == NULL) { 6558 head = tail = ftmp; 6559 } else { 6560 tail->next = ftmp; 6561 tail = ftmp; 6562 } 6563 6564 /* Do the firmware node first for 24xx's */ 6565 if (fw_done == 0) { 6566 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6567 freadpos = ha->flash_fw_addr << 2; 6568 } 6569 fw_done = 1; 6570 } 6571 6572 /* read in first FBUFSIZE bytes of this flash section */ 6573 if (freadpos+FBUFSIZE > bsize) { 6574 EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n", 6575 freadpos, bsize); 6576 rval = QL_FUNCTION_FAILED; 6577 break; 6578 } 6579 bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE); 6580 6581 /* checkout the pci data / format */ 6582 if (ql_check_pci(ha, ftmp, &freadpos)) { 6583 EL(ha, "flash header incorrect\n"); 6584 rval = QL_FUNCTION_FAILED; 6585 break; 6586 } 6587 } 6588 6589 if (rval != QL_SUCCESS) { 6590 /* 6591 * release all resources we have 6592 */ 6593 ql_fcache_rel(head); 6594 EL(ha, "failed, done\n"); 6595 } else { 6596 /* 6597 * Release previous fcache resources and update with new 6598 */ 6599 CACHE_LOCK(ha); 6600 ql_fcache_rel(ha->fcache); 6601 ha->fcache = head; 6602 CACHE_UNLOCK(ha); 6603 6604 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 6605 } 6606 } 6607 6608 /* 6609 * ql_setup_fnode 6610 * Allocates fcache node 6611 * 6612 * Input: 6613 * ha = adapter state pointer. 6614 * node = point to allocated fcache node (NULL = failed) 6615 * 6616 * Returns: 6617 * 6618 * Context: 6619 * Kernel context. 6620 * 6621 * Note: 6622 * Driver must be in stalled state prior to entering or 6623 * add code to this function prior to calling ql_setup_flash() 6624 */ 6625 static ql_fcache_t * 6626 ql_setup_fnode(ql_adapter_state_t *ha) 6627 { 6628 ql_fcache_t *fnode = NULL; 6629 6630 if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t), 6631 KM_SLEEP))) == NULL) { 6632 EL(ha, "fnode alloc failed\n"); 6633 fnode = NULL; 6634 } else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, 6635 KM_SLEEP))) == NULL) { 6636 EL(ha, "buf alloc failed\n"); 6637 kmem_free(fnode, sizeof (ql_fcache_t)); 6638 fnode = NULL; 6639 } else { 6640 fnode->buflen = FBUFSIZE; 6641 } 6642 6643 return (fnode); 6644 } 6645 6646 /* 6647 * ql_fcache_rel 6648 * Releases the fcache resources 6649 * 6650 * Input: 6651 * ha = adapter state pointer. 6652 * head = Pointer to fcache linked list 6653 * 6654 * Returns: 6655 * 6656 * Context: 6657 * Kernel context. 6658 * 6659 */ 6660 void 6661 ql_fcache_rel(ql_fcache_t *head) 6662 { 6663 ql_fcache_t *ftmp = head; 6664 ql_fcache_t *tail; 6665 6666 /* release all resources we have */ 6667 while (ftmp != NULL) { 6668 tail = ftmp->next; 6669 kmem_free(ftmp->buf, FBUFSIZE); 6670 kmem_free(ftmp, sizeof (ql_fcache_t)); 6671 ftmp = tail; 6672 } 6673 } 6674 6675 /* 6676 * ql_update_flash_caches 6677 * Updates driver flash caches 6678 * 6679 * Input: 6680 * ha: adapter state pointer. 6681 * 6682 * Context: 6683 * Kernel context. 6684 */ 6685 static void 6686 ql_update_flash_caches(ql_adapter_state_t *ha) 6687 { 6688 uint32_t len; 6689 ql_link_t *link; 6690 ql_adapter_state_t *ha2; 6691 6692 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 6693 6694 /* Get base path length. */ 6695 for (len = (uint32_t)strlen(ha->devpath); len; len--) { 6696 if (ha->devpath[len] == ',' || 6697 ha->devpath[len] == '@') { 6698 break; 6699 } 6700 } 6701 6702 /* Reset fcache on all adapter instances. */ 6703 for (link = ql_hba.first; link != NULL; link = link->next) { 6704 ha2 = link->base_address; 6705 6706 if (strncmp(ha->devpath, ha2->devpath, len) != 0) { 6707 continue; 6708 } 6709 6710 CACHE_LOCK(ha2); 6711 ql_fcache_rel(ha2->fcache); 6712 ha2->fcache = NULL; 6713 6714 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6715 if (ha2->vcache != NULL) { 6716 kmem_free(ha2->vcache, QL_24XX_VPD_SIZE); 6717 ha2->vcache = NULL; 6718 } 6719 } 6720 CACHE_UNLOCK(ha2); 6721 6722 (void) ql_setup_fcache(ha2); 6723 } 6724 6725 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 6726 } 6727 6728 /* 6729 * ql_get_fbuf 6730 * Search the fcache list for the type specified 6731 * 6732 * Input: 6733 * fptr = Pointer to fcache linked list 6734 * ftype = Type of image to be returned. 6735 * 6736 * Returns: 6737 * Pointer to ql_fcache_t. 6738 * NULL means not found. 6739 * 6740 * Context: 6741 * Kernel context. 6742 * 6743 * 6744 */ 6745 ql_fcache_t * 6746 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype) 6747 { 6748 while (fptr != NULL) { 6749 /* does this image meet criteria? */ 6750 if (ftype & fptr->type) { 6751 break; 6752 } 6753 fptr = fptr->next; 6754 } 6755 return (fptr); 6756 } 6757 6758 /* 6759 * ql_check_pci 6760 * 6761 * checks the passed buffer for a valid pci signature and 6762 * expected (and in range) pci length values. 6763 * 6764 * For firmware type, a pci header is added since the image in 6765 * the flash does not have one (!!!). 6766 * 6767 * On successful pci check, nextpos adjusted to next pci header. 6768 * 6769 * Returns: 6770 * -1 --> last pci image 6771 * 0 --> pci header valid 6772 * 1 --> pci header invalid. 6773 * 6774 * Context: 6775 * Kernel context. 6776 */ 6777 static int 6778 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos) 6779 { 6780 pci_header_t *pcih; 6781 pci_data_t *pcid; 6782 uint32_t doff; 6783 uint8_t *pciinfo; 6784 6785 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6786 6787 if (fcache != NULL) { 6788 pciinfo = fcache->buf; 6789 } else { 6790 EL(ha, "failed, null fcache ptr passed\n"); 6791 return (1); 6792 } 6793 6794 if (pciinfo == NULL) { 6795 EL(ha, "failed, null pciinfo ptr passed\n"); 6796 return (1); 6797 } 6798 6799 if (CFG_IST(ha, CFG_SBUS_CARD)) { 6800 caddr_t bufp; 6801 uint_t len; 6802 6803 if (pciinfo[0] != SBUS_CODE_FCODE) { 6804 EL(ha, "failed, unable to detect sbus fcode\n"); 6805 return (1); 6806 } 6807 fcache->type = FTYPE_FCODE; 6808 6809 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/ 6810 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, 6811 PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS | 6812 DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp, 6813 (int *)&len) == DDI_PROP_SUCCESS) { 6814 6815 (void) snprintf(fcache->verstr, 6816 FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp); 6817 kmem_free(bufp, len); 6818 } 6819 6820 *nextpos = 0xffffffff; 6821 6822 QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n", 6823 ha->instance); 6824 6825 return (0); 6826 } 6827 6828 if (*nextpos == ha->flash_fw_addr << 2) { 6829 6830 pci_header_t fwh = {0}; 6831 pci_data_t fwd = {0}; 6832 uint8_t *buf, *bufp; 6833 6834 /* 6835 * Build a pci header for the firmware module 6836 */ 6837 if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) == 6838 NULL) { 6839 EL(ha, "failed, unable to allocate buffer\n"); 6840 return (1); 6841 } 6842 6843 fwh.signature[0] = PCI_HEADER0; 6844 fwh.signature[1] = PCI_HEADER1; 6845 fwh.dataoffset[0] = LSB(sizeof (pci_header_t)); 6846 fwh.dataoffset[1] = MSB(sizeof (pci_header_t)); 6847 6848 fwd.signature[0] = 'P'; 6849 fwd.signature[1] = 'C'; 6850 fwd.signature[2] = 'I'; 6851 fwd.signature[3] = 'R'; 6852 fwd.codetype = PCI_CODE_FW; 6853 fwd.pcidatalen[0] = LSB(sizeof (pci_data_t)); 6854 fwd.pcidatalen[1] = MSB(sizeof (pci_data_t)); 6855 6856 bufp = buf; 6857 bcopy(&fwh, bufp, sizeof (pci_header_t)); 6858 bufp += sizeof (pci_header_t); 6859 bcopy(&fwd, bufp, sizeof (pci_data_t)); 6860 bufp += sizeof (pci_data_t); 6861 6862 bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) - 6863 sizeof (pci_data_t))); 6864 bcopy(buf, fcache->buf, FBUFSIZE); 6865 6866 fcache->type = FTYPE_FW; 6867 6868 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN, 6869 "%d.%02d.%02d", fcache->buf[19], fcache->buf[23], 6870 fcache->buf[27]); 6871 6872 if (CFG_IST(ha, CFG_CTRL_81XX)) { 6873 *nextpos = 0x200000; 6874 } else if (CFG_IST(ha, CFG_CTRL_8021)) { 6875 *nextpos = 0x80000; 6876 } else { 6877 *nextpos = 0; 6878 } 6879 kmem_free(buf, FBUFSIZE); 6880 6881 QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance); 6882 6883 return (0); 6884 } 6885 6886 /* get to the pci header image length */ 6887 pcih = (pci_header_t *)pciinfo; 6888 6889 doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8); 6890 6891 /* some header section sanity check */ 6892 if (pcih->signature[0] != PCI_HEADER0 || 6893 pcih->signature[1] != PCI_HEADER1 || doff > 50) { 6894 EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n", 6895 pcih->signature[0], pcih->signature[1], doff); 6896 return (1); 6897 } 6898 6899 pcid = (pci_data_t *)(pciinfo + doff); 6900 6901 /* a slight sanity data section check */ 6902 if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' || 6903 pcid->signature[2] != 'I' || pcid->signature[3] != 'R') { 6904 EL(ha, "failed, data sig mismatch!\n"); 6905 return (1); 6906 } 6907 6908 if (pcid->indicator == PCI_IND_LAST_IMAGE) { 6909 QL_PRINT_9(CE_CONT, "(%d): last image\n", ha->instance); 6910 if (CFG_IST(ha, CFG_CTRL_24258081)) { 6911 ql_flash_layout_table(ha, *nextpos + 6912 (pcid->imagelength[0] | (pcid->imagelength[1] << 6913 8)) * PCI_SECTOR_SIZE); 6914 (void) ql_24xx_flash_desc(ha); 6915 } 6916 *nextpos = 0xffffffff; 6917 } else { 6918 /* adjust the next flash read start position */ 6919 *nextpos += (pcid->imagelength[0] | 6920 (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE; 6921 } 6922 6923 switch (pcid->codetype) { 6924 case PCI_CODE_X86PC: 6925 fcache->type = FTYPE_BIOS; 6926 break; 6927 case PCI_CODE_FCODE: 6928 fcache->type = FTYPE_FCODE; 6929 break; 6930 case PCI_CODE_EFI: 6931 fcache->type = FTYPE_EFI; 6932 break; 6933 case PCI_CODE_HPPA: 6934 fcache->type = FTYPE_HPPA; 6935 break; 6936 default: 6937 fcache->type = FTYPE_UNKNOWN; 6938 break; 6939 } 6940 6941 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN, 6942 "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]); 6943 6944 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 6945 6946 return (0); 6947 } 6948 6949 /* 6950 * ql_flash_layout_table 6951 * Obtains flash addresses from table 6952 * 6953 * Input: 6954 * ha: adapter state pointer. 6955 * flt_paddr: flash layout pointer address. 6956 * 6957 * Context: 6958 * Kernel context. 6959 */ 6960 static void 6961 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr) 6962 { 6963 ql_flt_ptr_t *fptr; 6964 uint8_t *bp; 6965 int rval; 6966 uint32_t len, faddr, cnt; 6967 uint16_t chksum, w16; 6968 6969 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 6970 6971 /* Process flash layout table header */ 6972 len = sizeof (ql_flt_ptr_t); 6973 if ((bp = kmem_zalloc(len, KM_SLEEP)) == NULL) { 6974 EL(ha, "kmem_zalloc=null\n"); 6975 return; 6976 } 6977 6978 /* Process pointer to flash layout table */ 6979 if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) { 6980 EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr, 6981 rval); 6982 kmem_free(bp, len); 6983 return; 6984 } 6985 fptr = (ql_flt_ptr_t *)bp; 6986 6987 /* Verify pointer to flash layout table. */ 6988 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) { 6989 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]); 6990 chksum += w16; 6991 } 6992 if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' || 6993 fptr->sig[2] != 'L' || fptr->sig[3] != 'T') { 6994 EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0], 6995 fptr->sig[1], fptr->sig[2], fptr->sig[3]); 6996 kmem_free(bp, len); 6997 return; 6998 } 6999 faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2], 7000 fptr->addr[3]); 7001 7002 kmem_free(bp, len); 7003 7004 ql_process_flt(ha, faddr); 7005 7006 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7007 } 7008 7009 /* 7010 * ql_process_flt 7011 * Obtains flash addresses from flash layout table 7012 * 7013 * Input: 7014 * ha: adapter state pointer. 7015 * faddr: flash layout table byte address. 7016 * 7017 * Context: 7018 * Kernel context. 7019 */ 7020 static void 7021 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr) 7022 { 7023 ql_flt_hdr_t *fhdr; 7024 ql_flt_region_t *frgn; 7025 uint8_t *bp, *eaddr, nv_rg, vpd_rg; 7026 int rval; 7027 uint32_t len, cnt, fe_addr; 7028 uint16_t chksum, w16; 7029 7030 QL_PRINT_9(CE_CONT, "(%d): started faddr=%xh\n", ha->instance, faddr); 7031 7032 /* Process flash layout table header */ 7033 if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) { 7034 EL(ha, "kmem_zalloc=null\n"); 7035 return; 7036 } 7037 fhdr = (ql_flt_hdr_t *)bp; 7038 7039 /* Process flash layout table. */ 7040 if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) != 7041 QL_SUCCESS) { 7042 EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval); 7043 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE); 7044 return; 7045 } 7046 7047 /* Verify flash layout table. */ 7048 len = (uint32_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) + 7049 sizeof (ql_flt_hdr_t) + sizeof (ql_flt_region_t)); 7050 if (len > FLASH_LAYOUT_TABLE_SIZE) { 7051 chksum = 0xffff; 7052 } else { 7053 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) { 7054 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]); 7055 chksum += w16; 7056 } 7057 } 7058 w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]); 7059 if (chksum != 0 || w16 != 1) { 7060 EL(ha, "table chksum=%xh, version=%d\n", chksum, w16); 7061 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE); 7062 return; 7063 } 7064 eaddr = bp + len; 7065 7066 /* Process Function/Port Configuration Map. */ 7067 nv_rg = vpd_rg = 0; 7068 if (CFG_IST(ha, CFG_CTRL_8021)) { 7069 uint16_t i; 7070 uint8_t *mbp = eaddr; 7071 ql_fp_cfg_map_t *cmp = (ql_fp_cfg_map_t *)mbp; 7072 7073 len = (uint32_t)(CHAR_TO_SHORT(cmp->hdr.len[0], 7074 cmp->hdr.len[1])); 7075 if (len > FLASH_LAYOUT_TABLE_SIZE) { 7076 chksum = 0xffff; 7077 } else { 7078 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) { 7079 w16 = (uint16_t)CHAR_TO_SHORT(mbp[cnt], 7080 mbp[cnt + 1]); 7081 chksum += w16; 7082 } 7083 } 7084 w16 = CHAR_TO_SHORT(cmp->hdr.version[0], cmp->hdr.version[1]); 7085 if (chksum != 0 || w16 != 1 || 7086 cmp->hdr.Signature[0] != 'F' || 7087 cmp->hdr.Signature[1] != 'P' || 7088 cmp->hdr.Signature[2] != 'C' || 7089 cmp->hdr.Signature[3] != 'M') { 7090 EL(ha, "cfg_map chksum=%xh, version=%d, " 7091 "sig=%c%c%c%c\n", chksum, w16, 7092 cmp->hdr.Signature[0], cmp->hdr.Signature[1], 7093 cmp->hdr.Signature[2], cmp->hdr.Signature[3]); 7094 } else { 7095 cnt = (uint16_t) 7096 (CHAR_TO_SHORT(cmp->hdr.NumberEntries[0], 7097 cmp->hdr.NumberEntries[1])); 7098 /* Locate entry for function. */ 7099 for (i = 0; i < cnt; i++) { 7100 if (cmp->cfg[i].FunctionType == FT_FC && 7101 cmp->cfg[i].FunctionNumber[0] == 7102 ha->function_number && 7103 cmp->cfg[i].FunctionNumber[1] == 0) { 7104 nv_rg = cmp->cfg[i].ConfigRegion; 7105 vpd_rg = cmp->cfg[i].VpdRegion; 7106 break; 7107 } 7108 } 7109 7110 if (nv_rg == 0 || vpd_rg == 0) { 7111 EL(ha, "cfg_map nv_rg=%d, vpd_rg=%d\n", nv_rg, 7112 vpd_rg); 7113 nv_rg = vpd_rg = 0; 7114 } 7115 } 7116 } 7117 7118 /* Process flash layout table regions */ 7119 for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t)); 7120 (uint8_t *)frgn < eaddr; frgn++) { 7121 faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1], 7122 frgn->beg_addr[2], frgn->beg_addr[3]); 7123 faddr >>= 2; 7124 fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1], 7125 frgn->end_addr[2], frgn->end_addr[3]); 7126 fe_addr >>= 2; 7127 7128 switch (frgn->region) { 7129 case FLASH_8021_BOOTLOADER_REGION: 7130 ha->bootloader_addr = faddr; 7131 ha->bootloader_size = (fe_addr - faddr) + 1; 7132 QL_PRINT_9(CE_CONT, "(%d): bootloader_addr=%xh, " 7133 "size=%xh\n", ha->instance, faddr, 7134 ha->bootloader_size); 7135 break; 7136 case FLASH_FW_REGION: 7137 case FLASH_8021_FW_REGION: 7138 ha->flash_fw_addr = faddr; 7139 ha->flash_fw_size = (fe_addr - faddr) + 1; 7140 QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh, " 7141 "size=%xh\n", ha->instance, faddr, 7142 ha->flash_fw_size); 7143 break; 7144 case FLASH_GOLDEN_FW_REGION: 7145 case FLASH_8021_GOLDEN_FW_REGION: 7146 ha->flash_golden_fw_addr = faddr; 7147 QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n", 7148 ha->instance, faddr); 7149 break; 7150 case FLASH_8021_VPD_REGION: 7151 if (!vpd_rg || vpd_rg == FLASH_8021_VPD_REGION) { 7152 ha->flash_vpd_addr = faddr; 7153 QL_PRINT_9(CE_CONT, "(%d): 8021_flash_vpd_" 7154 "addr=%xh\n", ha->instance, faddr); 7155 } 7156 break; 7157 case FLASH_VPD_0_REGION: 7158 if (vpd_rg) { 7159 if (vpd_rg == FLASH_VPD_0_REGION) { 7160 ha->flash_vpd_addr = faddr; 7161 QL_PRINT_9(CE_CONT, "(%d): vpd_rg " 7162 "flash_vpd_addr=%xh\n", 7163 ha->instance, faddr); 7164 } 7165 } else if (!(ha->flags & FUNCTION_1) && 7166 !(CFG_IST(ha, CFG_CTRL_8021))) { 7167 ha->flash_vpd_addr = faddr; 7168 QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh" 7169 "\n", ha->instance, faddr); 7170 } 7171 break; 7172 case FLASH_NVRAM_0_REGION: 7173 if (nv_rg) { 7174 if (nv_rg == FLASH_NVRAM_0_REGION) { 7175 ADAPTER_STATE_LOCK(ha); 7176 ha->flags &= ~FUNCTION_1; 7177 ADAPTER_STATE_UNLOCK(ha); 7178 ha->flash_nvram_addr = faddr; 7179 QL_PRINT_9(CE_CONT, "(%d): nv_rg " 7180 "flash_nvram_addr=%xh\n", 7181 ha->instance, faddr); 7182 } 7183 } else if (!(ha->flags & FUNCTION_1)) { 7184 ha->flash_nvram_addr = faddr; 7185 QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr=" 7186 "%xh\n", ha->instance, faddr); 7187 } 7188 break; 7189 case FLASH_VPD_1_REGION: 7190 if (vpd_rg) { 7191 if (vpd_rg == FLASH_VPD_1_REGION) { 7192 ha->flash_vpd_addr = faddr; 7193 QL_PRINT_9(CE_CONT, "(%d): vpd_rg " 7194 "flash_vpd_addr=%xh\n", 7195 ha->instance, faddr); 7196 } 7197 } else if (ha->flags & FUNCTION_1 && 7198 !(CFG_IST(ha, CFG_CTRL_8021))) { 7199 ha->flash_vpd_addr = faddr; 7200 QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh" 7201 "\n", ha->instance, faddr); 7202 } 7203 break; 7204 case FLASH_NVRAM_1_REGION: 7205 if (nv_rg) { 7206 if (nv_rg == FLASH_NVRAM_1_REGION) { 7207 ADAPTER_STATE_LOCK(ha); 7208 ha->flags |= FUNCTION_1; 7209 ADAPTER_STATE_UNLOCK(ha); 7210 ha->flash_nvram_addr = faddr; 7211 QL_PRINT_9(CE_CONT, "(%d): nv_rg " 7212 "flash_nvram_addr=%xh\n", 7213 ha->instance, faddr); 7214 } 7215 } else if (ha->flags & FUNCTION_1) { 7216 ha->flash_nvram_addr = faddr; 7217 QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr=" 7218 "%xh\n", ha->instance, faddr); 7219 } 7220 break; 7221 case FLASH_DESC_TABLE_REGION: 7222 if (!(CFG_IST(ha, CFG_CTRL_8021))) { 7223 ha->flash_desc_addr = faddr; 7224 QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr=" 7225 "%xh\n", ha->instance, faddr); 7226 } 7227 break; 7228 case FLASH_ERROR_LOG_0_REGION: 7229 if (!(ha->flags & FUNCTION_1)) { 7230 ha->flash_errlog_start = faddr; 7231 QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr=" 7232 "%xh\n", ha->instance, faddr); 7233 } 7234 break; 7235 case FLASH_ERROR_LOG_1_REGION: 7236 if (ha->flags & FUNCTION_1) { 7237 ha->flash_errlog_start = faddr; 7238 QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr=" 7239 "%xh\n", ha->instance, faddr); 7240 } 7241 break; 7242 default: 7243 break; 7244 } 7245 } 7246 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE); 7247 7248 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7249 } 7250 7251 /* 7252 * ql_flash_nvram_defaults 7253 * Flash default addresses. 7254 * 7255 * Input: 7256 * ha: adapter state pointer. 7257 * 7258 * Returns: 7259 * ql local function return status code. 7260 * 7261 * Context: 7262 * Kernel context. 7263 */ 7264 static void 7265 ql_flash_nvram_defaults(ql_adapter_state_t *ha) 7266 { 7267 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7268 7269 if (ha->flags & FUNCTION_1) { 7270 if (CFG_IST(ha, CFG_CTRL_2300)) { 7271 ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR; 7272 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR; 7273 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 7274 ha->flash_data_addr = FLASH_24_25_DATA_ADDR; 7275 ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR; 7276 ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR; 7277 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1; 7278 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE; 7279 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR; 7280 } else if (CFG_IST(ha, CFG_CTRL_25XX)) { 7281 ha->flash_data_addr = FLASH_24_25_DATA_ADDR; 7282 ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR; 7283 ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR; 7284 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1; 7285 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE; 7286 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR; 7287 } else if (CFG_IST(ha, CFG_CTRL_81XX)) { 7288 ha->flash_data_addr = FLASH_8100_DATA_ADDR; 7289 ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR; 7290 ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR; 7291 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1; 7292 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE; 7293 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR; 7294 } else if (CFG_IST(ha, CFG_CTRL_8021)) { 7295 ha->flash_data_addr = 0; 7296 ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR; 7297 ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR; 7298 ha->flash_errlog_start = 0; 7299 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE; 7300 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR; 7301 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE; 7302 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR; 7303 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE; 7304 } 7305 } else { 7306 if (CFG_IST(ha, CFG_CTRL_2200)) { 7307 ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR; 7308 ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR; 7309 } else if (CFG_IST(ha, CFG_CTRL_2300) || 7310 (CFG_IST(ha, CFG_CTRL_6322))) { 7311 ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR; 7312 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR; 7313 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 7314 ha->flash_data_addr = FLASH_24_25_DATA_ADDR; 7315 ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR; 7316 ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR; 7317 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0; 7318 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE; 7319 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR; 7320 } else if (CFG_IST(ha, CFG_CTRL_25XX)) { 7321 ha->flash_data_addr = FLASH_24_25_DATA_ADDR; 7322 ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR; 7323 ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR; 7324 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0; 7325 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE; 7326 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR; 7327 } else if (CFG_IST(ha, CFG_CTRL_81XX)) { 7328 ha->flash_data_addr = FLASH_8100_DATA_ADDR; 7329 ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR; 7330 ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR; 7331 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0; 7332 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE; 7333 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR; 7334 } else if (CFG_IST(ha, CFG_CTRL_8021)) { 7335 ha->flash_data_addr = 0; 7336 ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR; 7337 ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR; 7338 ha->flash_errlog_start = 0; 7339 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE; 7340 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR; 7341 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE; 7342 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR; 7343 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE; 7344 } else { 7345 EL(ha, "unassigned flash fn0 addr: %x\n", 7346 ha->device_id); 7347 } 7348 } 7349 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7350 } 7351 7352 /* 7353 * ql_get_sfp 7354 * Returns sfp data to sdmapi caller 7355 * 7356 * Input: 7357 * ha: adapter state pointer. 7358 * cmd: Local EXT_IOCTL cmd struct pointer. 7359 * mode: flags. 7360 * 7361 * Returns: 7362 * None, request status indicated in cmd->Status. 7363 * 7364 * Context: 7365 * Kernel context. 7366 */ 7367 static void 7368 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7369 { 7370 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7371 7372 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 7373 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7374 EL(ha, "failed, invalid request for HBA\n"); 7375 return; 7376 } 7377 7378 if (cmd->ResponseLen < QL_24XX_SFP_SIZE) { 7379 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7380 cmd->DetailStatus = QL_24XX_SFP_SIZE; 7381 EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n", 7382 cmd->ResponseLen); 7383 return; 7384 } 7385 7386 /* Dump SFP data in user buffer */ 7387 if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr), 7388 mode)) != 0) { 7389 cmd->Status = EXT_STATUS_COPY_ERR; 7390 EL(ha, "failed, copy error\n"); 7391 } else { 7392 cmd->Status = EXT_STATUS_OK; 7393 } 7394 7395 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7396 } 7397 7398 /* 7399 * ql_dump_sfp 7400 * Dumps SFP. 7401 * 7402 * Input: 7403 * ha: adapter state pointer. 7404 * bp: buffer address. 7405 * mode: flags 7406 * 7407 * Returns: 7408 * 7409 * Context: 7410 * Kernel context. 7411 */ 7412 static int 7413 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode) 7414 { 7415 dma_mem_t mem; 7416 uint32_t cnt; 7417 int rval2, rval = 0; 7418 uint32_t dxfer; 7419 7420 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7421 7422 /* Get memory for SFP. */ 7423 7424 if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA, 7425 QL_DMA_DATA_ALIGN)) != QL_SUCCESS) { 7426 EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2); 7427 return (ENOMEM); 7428 } 7429 7430 for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) { 7431 rval2 = ql_read_sfp(ha, &mem, 7432 (uint16_t)(cnt < 256 ? 0xA0 : 0xA2), 7433 (uint16_t)(cnt & 0xff)); 7434 if (rval2 != QL_SUCCESS) { 7435 EL(ha, "failed, read_sfp=%xh\n", rval2); 7436 rval = EFAULT; 7437 break; 7438 } 7439 7440 /* copy the data back */ 7441 if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size, 7442 mode)) != mem.size) { 7443 /* ddi copy error */ 7444 EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer); 7445 rval = EFAULT; 7446 break; 7447 } 7448 7449 /* adjust the buffer pointer */ 7450 bp = (caddr_t)bp + mem.size; 7451 } 7452 7453 ql_free_phys(ha, &mem); 7454 7455 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7456 7457 return (rval); 7458 } 7459 7460 /* 7461 * ql_port_param 7462 * Retrieves or sets the firmware port speed settings 7463 * 7464 * Input: 7465 * ha: adapter state pointer. 7466 * cmd: Local EXT_IOCTL cmd struct pointer. 7467 * mode: flags. 7468 * 7469 * Returns: 7470 * None, request status indicated in cmd->Status. 7471 * 7472 * Context: 7473 * Kernel context. 7474 * 7475 */ 7476 static void 7477 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7478 { 7479 uint8_t *name; 7480 ql_tgt_t *tq; 7481 EXT_PORT_PARAM port_param = {0}; 7482 uint32_t rval = QL_SUCCESS; 7483 uint32_t idma_rate; 7484 7485 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7486 7487 if (CFG_IST(ha, CFG_CTRL_242581) == 0) { 7488 EL(ha, "invalid request for this HBA\n"); 7489 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7490 cmd->ResponseLen = 0; 7491 return; 7492 } 7493 7494 if (LOOP_NOT_READY(ha)) { 7495 EL(ha, "failed, loop not ready\n"); 7496 cmd->Status = EXT_STATUS_DEVICE_OFFLINE; 7497 cmd->ResponseLen = 0; 7498 return; 7499 } 7500 7501 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 7502 (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) { 7503 EL(ha, "failed, ddi_copyin\n"); 7504 cmd->Status = EXT_STATUS_COPY_ERR; 7505 cmd->ResponseLen = 0; 7506 return; 7507 } 7508 7509 if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) { 7510 EL(ha, "Unsupported dest lookup type: %xh\n", 7511 port_param.FCScsiAddr.DestType); 7512 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 7513 cmd->ResponseLen = 0; 7514 return; 7515 } 7516 7517 name = port_param.FCScsiAddr.DestAddr.WWPN; 7518 7519 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n", 7520 ha->instance, name[0], name[1], name[2], name[3], name[4], 7521 name[5], name[6], name[7]); 7522 7523 tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT); 7524 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) { 7525 EL(ha, "failed, fc_port not found\n"); 7526 cmd->Status = EXT_STATUS_DEV_NOT_FOUND; 7527 cmd->ResponseLen = 0; 7528 return; 7529 } 7530 7531 cmd->Status = EXT_STATUS_OK; 7532 cmd->DetailStatus = EXT_STATUS_OK; 7533 7534 switch (port_param.Mode) { 7535 case EXT_IIDMA_MODE_GET: 7536 /* 7537 * Report the firmware's port rate for the wwpn 7538 */ 7539 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate, 7540 port_param.Mode); 7541 7542 if (rval != QL_SUCCESS) { 7543 EL(ha, "iidma get failed: %xh\n", rval); 7544 cmd->Status = EXT_STATUS_MAILBOX; 7545 cmd->DetailStatus = rval; 7546 cmd->ResponseLen = 0; 7547 } else { 7548 switch (idma_rate) { 7549 case IIDMA_RATE_1GB: 7550 port_param.Speed = 7551 EXT_DEF_PORTSPEED_1GBIT; 7552 break; 7553 case IIDMA_RATE_2GB: 7554 port_param.Speed = 7555 EXT_DEF_PORTSPEED_2GBIT; 7556 break; 7557 case IIDMA_RATE_4GB: 7558 port_param.Speed = 7559 EXT_DEF_PORTSPEED_4GBIT; 7560 break; 7561 case IIDMA_RATE_8GB: 7562 port_param.Speed = 7563 EXT_DEF_PORTSPEED_8GBIT; 7564 break; 7565 case IIDMA_RATE_10GB: 7566 port_param.Speed = 7567 EXT_DEF_PORTSPEED_10GBIT; 7568 break; 7569 default: 7570 port_param.Speed = 7571 EXT_DEF_PORTSPEED_UNKNOWN; 7572 EL(ha, "failed, Port speed rate=%xh\n", 7573 idma_rate); 7574 break; 7575 } 7576 7577 /* Copy back the data */ 7578 rval = ddi_copyout((void *)&port_param, 7579 (void *)(uintptr_t)cmd->ResponseAdr, 7580 sizeof (EXT_PORT_PARAM), mode); 7581 7582 if (rval != 0) { 7583 cmd->Status = EXT_STATUS_COPY_ERR; 7584 cmd->ResponseLen = 0; 7585 EL(ha, "failed, ddi_copyout\n"); 7586 } else { 7587 cmd->ResponseLen = (uint32_t) 7588 sizeof (EXT_PORT_PARAM); 7589 } 7590 } 7591 break; 7592 7593 case EXT_IIDMA_MODE_SET: 7594 /* 7595 * Set the firmware's port rate for the wwpn 7596 */ 7597 switch (port_param.Speed) { 7598 case EXT_DEF_PORTSPEED_1GBIT: 7599 idma_rate = IIDMA_RATE_1GB; 7600 break; 7601 case EXT_DEF_PORTSPEED_2GBIT: 7602 idma_rate = IIDMA_RATE_2GB; 7603 break; 7604 case EXT_DEF_PORTSPEED_4GBIT: 7605 idma_rate = IIDMA_RATE_4GB; 7606 break; 7607 case EXT_DEF_PORTSPEED_8GBIT: 7608 idma_rate = IIDMA_RATE_8GB; 7609 break; 7610 case EXT_DEF_PORTSPEED_10GBIT: 7611 port_param.Speed = IIDMA_RATE_10GB; 7612 break; 7613 default: 7614 EL(ha, "invalid set iidma rate: %x\n", 7615 port_param.Speed); 7616 cmd->Status = EXT_STATUS_INVALID_PARAM; 7617 cmd->ResponseLen = 0; 7618 rval = QL_PARAMETER_ERROR; 7619 break; 7620 } 7621 7622 if (rval == QL_SUCCESS) { 7623 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate, 7624 port_param.Mode); 7625 if (rval != QL_SUCCESS) { 7626 EL(ha, "iidma set failed: %xh\n", rval); 7627 cmd->Status = EXT_STATUS_MAILBOX; 7628 cmd->DetailStatus = rval; 7629 cmd->ResponseLen = 0; 7630 } 7631 } 7632 break; 7633 default: 7634 EL(ha, "invalid mode specified: %x\n", port_param.Mode); 7635 cmd->Status = EXT_STATUS_INVALID_PARAM; 7636 cmd->ResponseLen = 0; 7637 cmd->DetailStatus = 0; 7638 break; 7639 } 7640 7641 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7642 } 7643 7644 /* 7645 * ql_get_fwexttrace 7646 * Dumps f/w extended trace buffer 7647 * 7648 * Input: 7649 * ha: adapter state pointer. 7650 * bp: buffer address. 7651 * mode: flags 7652 * 7653 * Returns: 7654 * 7655 * Context: 7656 * Kernel context. 7657 */ 7658 /* ARGSUSED */ 7659 static void 7660 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7661 { 7662 int rval; 7663 caddr_t payload; 7664 7665 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7666 7667 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) { 7668 EL(ha, "invalid request for this HBA\n"); 7669 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7670 cmd->ResponseLen = 0; 7671 return; 7672 } 7673 7674 if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) || 7675 (ha->fwexttracebuf.bp == NULL)) { 7676 EL(ha, "f/w extended trace is not enabled\n"); 7677 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7678 cmd->ResponseLen = 0; 7679 return; 7680 } 7681 7682 if (cmd->ResponseLen < FWEXTSIZE) { 7683 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7684 cmd->DetailStatus = FWEXTSIZE; 7685 EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n", 7686 cmd->ResponseLen, FWEXTSIZE); 7687 cmd->ResponseLen = 0; 7688 return; 7689 } 7690 7691 /* Time Stamp */ 7692 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP); 7693 if (rval != QL_SUCCESS) { 7694 EL(ha, "f/w extended trace insert" 7695 "time stamp failed: %xh\n", rval); 7696 cmd->Status = EXT_STATUS_ERR; 7697 cmd->ResponseLen = 0; 7698 return; 7699 } 7700 7701 /* Disable Tracing */ 7702 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE); 7703 if (rval != QL_SUCCESS) { 7704 EL(ha, "f/w extended trace disable failed: %xh\n", rval); 7705 cmd->Status = EXT_STATUS_ERR; 7706 cmd->ResponseLen = 0; 7707 return; 7708 } 7709 7710 /* Allocate payload buffer */ 7711 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP); 7712 if (payload == NULL) { 7713 EL(ha, "failed, kmem_zalloc\n"); 7714 cmd->Status = EXT_STATUS_NO_MEMORY; 7715 cmd->ResponseLen = 0; 7716 return; 7717 } 7718 7719 /* Sync DMA buffer. */ 7720 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0, 7721 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL); 7722 7723 /* Copy trace buffer data. */ 7724 ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload, 7725 (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE, 7726 DDI_DEV_AUTOINCR); 7727 7728 /* Send payload to application. */ 7729 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr, 7730 cmd->ResponseLen, mode) != cmd->ResponseLen) { 7731 EL(ha, "failed, send_buffer_data\n"); 7732 cmd->Status = EXT_STATUS_COPY_ERR; 7733 cmd->ResponseLen = 0; 7734 } else { 7735 cmd->Status = EXT_STATUS_OK; 7736 } 7737 7738 kmem_free(payload, FWEXTSIZE); 7739 7740 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7741 } 7742 7743 /* 7744 * ql_get_fwfcetrace 7745 * Dumps f/w fibre channel event trace buffer 7746 * 7747 * Input: 7748 * ha: adapter state pointer. 7749 * bp: buffer address. 7750 * mode: flags 7751 * 7752 * Returns: 7753 * 7754 * Context: 7755 * Kernel context. 7756 */ 7757 /* ARGSUSED */ 7758 static void 7759 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7760 { 7761 int rval; 7762 caddr_t payload; 7763 7764 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7765 7766 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) { 7767 EL(ha, "invalid request for this HBA\n"); 7768 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7769 cmd->ResponseLen = 0; 7770 return; 7771 } 7772 7773 if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) || 7774 (ha->fwfcetracebuf.bp == NULL)) { 7775 EL(ha, "f/w FCE trace is not enabled\n"); 7776 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7777 cmd->ResponseLen = 0; 7778 return; 7779 } 7780 7781 if (cmd->ResponseLen < FWFCESIZE) { 7782 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7783 cmd->DetailStatus = FWFCESIZE; 7784 EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n", 7785 cmd->ResponseLen, FWFCESIZE); 7786 cmd->ResponseLen = 0; 7787 return; 7788 } 7789 7790 /* Disable Tracing */ 7791 rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE); 7792 if (rval != QL_SUCCESS) { 7793 EL(ha, "f/w FCE trace disable failed: %xh\n", rval); 7794 cmd->Status = EXT_STATUS_ERR; 7795 cmd->ResponseLen = 0; 7796 return; 7797 } 7798 7799 /* Allocate payload buffer */ 7800 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP); 7801 if (payload == NULL) { 7802 EL(ha, "failed, kmem_zalloc\n"); 7803 cmd->Status = EXT_STATUS_NO_MEMORY; 7804 cmd->ResponseLen = 0; 7805 return; 7806 } 7807 7808 /* Sync DMA buffer. */ 7809 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0, 7810 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL); 7811 7812 /* Copy trace buffer data. */ 7813 ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload, 7814 (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE, 7815 DDI_DEV_AUTOINCR); 7816 7817 /* Send payload to application. */ 7818 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr, 7819 cmd->ResponseLen, mode) != cmd->ResponseLen) { 7820 EL(ha, "failed, send_buffer_data\n"); 7821 cmd->Status = EXT_STATUS_COPY_ERR; 7822 cmd->ResponseLen = 0; 7823 } else { 7824 cmd->Status = EXT_STATUS_OK; 7825 } 7826 7827 kmem_free(payload, FWFCESIZE); 7828 7829 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7830 } 7831 7832 /* 7833 * ql_get_pci_data 7834 * Retrieves pci config space data 7835 * 7836 * Input: 7837 * ha: adapter state pointer. 7838 * cmd: Local EXT_IOCTL cmd struct pointer. 7839 * mode: flags. 7840 * 7841 * Returns: 7842 * None, request status indicated in cmd->Status. 7843 * 7844 * Context: 7845 * Kernel context. 7846 * 7847 */ 7848 static void 7849 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7850 { 7851 uint8_t cap_ptr; 7852 uint8_t cap_id; 7853 uint32_t buf_size = 256; 7854 7855 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7856 7857 /* 7858 * First check the "Capabilities List" bit of the status register. 7859 */ 7860 if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) { 7861 /* 7862 * Now get the capability pointer 7863 */ 7864 cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR); 7865 while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) { 7866 /* 7867 * Check for the pcie capability. 7868 */ 7869 cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr); 7870 if (cap_id == PCI_CAP_ID_PCI_E) { 7871 buf_size = 4096; 7872 break; 7873 } 7874 cap_ptr = (uint8_t)ql_pci_config_get8(ha, 7875 (cap_ptr + PCI_CAP_NEXT_PTR)); 7876 } 7877 } 7878 7879 if (cmd->ResponseLen < buf_size) { 7880 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 7881 cmd->DetailStatus = buf_size; 7882 EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n", 7883 cmd->ResponseLen); 7884 return; 7885 } 7886 7887 /* Dump PCI config data. */ 7888 if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr), 7889 buf_size, mode)) != 0) { 7890 cmd->Status = EXT_STATUS_COPY_ERR; 7891 cmd->DetailStatus = 0; 7892 EL(ha, "failed, copy err pci_dump\n"); 7893 } else { 7894 cmd->Status = EXT_STATUS_OK; 7895 cmd->DetailStatus = buf_size; 7896 } 7897 7898 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7899 } 7900 7901 /* 7902 * ql_pci_dump 7903 * Dumps PCI config data to application buffer. 7904 * 7905 * Input: 7906 * ha = adapter state pointer. 7907 * bp = user buffer address. 7908 * 7909 * Returns: 7910 * 7911 * Context: 7912 * Kernel context. 7913 */ 7914 int 7915 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode) 7916 { 7917 uint32_t pci_os; 7918 uint32_t *ptr32, *org_ptr32; 7919 7920 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7921 7922 ptr32 = kmem_zalloc(pci_size, KM_SLEEP); 7923 if (ptr32 == NULL) { 7924 EL(ha, "failed kmem_zalloc\n"); 7925 return (ENOMEM); 7926 } 7927 7928 /* store the initial value of ptr32 */ 7929 org_ptr32 = ptr32; 7930 for (pci_os = 0; pci_os < pci_size; pci_os += 4) { 7931 *ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os); 7932 LITTLE_ENDIAN_32(ptr32); 7933 ptr32++; 7934 } 7935 7936 if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) != 7937 0) { 7938 EL(ha, "failed ddi_copyout\n"); 7939 kmem_free(org_ptr32, pci_size); 7940 return (EFAULT); 7941 } 7942 7943 QL_DUMP_9(org_ptr32, 8, pci_size); 7944 7945 kmem_free(org_ptr32, pci_size); 7946 7947 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 7948 7949 return (0); 7950 } 7951 7952 /* 7953 * ql_menlo_reset 7954 * Reset Menlo 7955 * 7956 * Input: 7957 * ha: adapter state pointer. 7958 * bp: buffer address. 7959 * mode: flags 7960 * 7961 * Returns: 7962 * 7963 * Context: 7964 * Kernel context. 7965 */ 7966 static void 7967 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 7968 { 7969 EXT_MENLO_RESET rst; 7970 ql_mbx_data_t mr; 7971 int rval; 7972 7973 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 7974 7975 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) { 7976 EL(ha, "failed, invalid request for HBA\n"); 7977 cmd->Status = EXT_STATUS_INVALID_REQUEST; 7978 cmd->ResponseLen = 0; 7979 return; 7980 } 7981 7982 /* 7983 * TODO: only vp_index 0 can do this (?) 7984 */ 7985 7986 /* Verify the size of request structure. */ 7987 if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) { 7988 /* Return error */ 7989 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen, 7990 sizeof (EXT_MENLO_RESET)); 7991 cmd->Status = EXT_STATUS_INVALID_PARAM; 7992 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 7993 cmd->ResponseLen = 0; 7994 return; 7995 } 7996 7997 /* Get reset request. */ 7998 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, 7999 (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) { 8000 EL(ha, "failed, ddi_copyin\n"); 8001 cmd->Status = EXT_STATUS_COPY_ERR; 8002 cmd->ResponseLen = 0; 8003 return; 8004 } 8005 8006 /* Wait for I/O to stop and daemon to stall. */ 8007 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) { 8008 EL(ha, "ql_stall_driver failed\n"); 8009 ql_restart_hba(ha); 8010 cmd->Status = EXT_STATUS_BUSY; 8011 cmd->ResponseLen = 0; 8012 return; 8013 } 8014 8015 rval = ql_reset_menlo(ha, &mr, rst.Flags); 8016 if (rval != QL_SUCCESS) { 8017 EL(ha, "failed, status=%xh\n", rval); 8018 cmd->Status = EXT_STATUS_MAILBOX; 8019 cmd->DetailStatus = rval; 8020 cmd->ResponseLen = 0; 8021 } else if (mr.mb[1] != 0) { 8022 EL(ha, "failed, substatus=%d\n", mr.mb[1]); 8023 cmd->Status = EXT_STATUS_ERR; 8024 cmd->DetailStatus = mr.mb[1]; 8025 cmd->ResponseLen = 0; 8026 } 8027 8028 ql_restart_hba(ha); 8029 8030 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8031 } 8032 8033 /* 8034 * ql_menlo_get_fw_version 8035 * Get Menlo firmware version. 8036 * 8037 * Input: 8038 * ha: adapter state pointer. 8039 * bp: buffer address. 8040 * mode: flags 8041 * 8042 * Returns: 8043 * 8044 * Context: 8045 * Kernel context. 8046 */ 8047 static void 8048 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8049 { 8050 int rval; 8051 ql_mbx_iocb_t *pkt; 8052 EXT_MENLO_GET_FW_VERSION ver = {0}; 8053 8054 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8055 8056 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) { 8057 EL(ha, "failed, invalid request for HBA\n"); 8058 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8059 cmd->ResponseLen = 0; 8060 return; 8061 } 8062 8063 if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) { 8064 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8065 cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION); 8066 EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen, 8067 sizeof (EXT_MENLO_GET_FW_VERSION)); 8068 cmd->ResponseLen = 0; 8069 return; 8070 } 8071 8072 /* Allocate packet. */ 8073 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP); 8074 if (pkt == NULL) { 8075 EL(ha, "failed, kmem_zalloc\n"); 8076 cmd->Status = EXT_STATUS_NO_MEMORY; 8077 cmd->ResponseLen = 0; 8078 return; 8079 } 8080 8081 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE; 8082 pkt->mvfy.entry_count = 1; 8083 pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW); 8084 8085 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t)); 8086 LITTLE_ENDIAN_16(&pkt->mvfy.options_status); 8087 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code); 8088 ver.FwVersion = LE_32(pkt->mvfy.fw_version); 8089 8090 if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 || 8091 pkt->mvfy.options_status != CS_COMPLETE) { 8092 /* Command error */ 8093 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval, 8094 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status, 8095 pkt->mvfy.failure_code); 8096 cmd->Status = EXT_STATUS_ERR; 8097 cmd->DetailStatus = rval != QL_SUCCESS ? rval : 8098 QL_FUNCTION_FAILED; 8099 cmd->ResponseLen = 0; 8100 } else if (ddi_copyout((void *)&ver, 8101 (void *)(uintptr_t)cmd->ResponseAdr, 8102 sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) { 8103 EL(ha, "failed, ddi_copyout\n"); 8104 cmd->Status = EXT_STATUS_COPY_ERR; 8105 cmd->ResponseLen = 0; 8106 } else { 8107 cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION); 8108 } 8109 8110 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8111 8112 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8113 } 8114 8115 /* 8116 * ql_menlo_update_fw 8117 * Get Menlo update firmware. 8118 * 8119 * Input: 8120 * ha: adapter state pointer. 8121 * bp: buffer address. 8122 * mode: flags 8123 * 8124 * Returns: 8125 * 8126 * Context: 8127 * Kernel context. 8128 */ 8129 static void 8130 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8131 { 8132 ql_mbx_iocb_t *pkt; 8133 dma_mem_t *dma_mem; 8134 EXT_MENLO_UPDATE_FW fw; 8135 uint32_t *ptr32; 8136 int rval; 8137 8138 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8139 8140 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) { 8141 EL(ha, "failed, invalid request for HBA\n"); 8142 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8143 cmd->ResponseLen = 0; 8144 return; 8145 } 8146 8147 /* 8148 * TODO: only vp_index 0 can do this (?) 8149 */ 8150 8151 /* Verify the size of request structure. */ 8152 if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) { 8153 /* Return error */ 8154 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen, 8155 sizeof (EXT_MENLO_UPDATE_FW)); 8156 cmd->Status = EXT_STATUS_INVALID_PARAM; 8157 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 8158 cmd->ResponseLen = 0; 8159 return; 8160 } 8161 8162 /* Get update fw request. */ 8163 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw, 8164 sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) { 8165 EL(ha, "failed, ddi_copyin\n"); 8166 cmd->Status = EXT_STATUS_COPY_ERR; 8167 cmd->ResponseLen = 0; 8168 return; 8169 } 8170 8171 /* Wait for I/O to stop and daemon to stall. */ 8172 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) { 8173 EL(ha, "ql_stall_driver failed\n"); 8174 ql_restart_hba(ha); 8175 cmd->Status = EXT_STATUS_BUSY; 8176 cmd->ResponseLen = 0; 8177 return; 8178 } 8179 8180 /* Allocate packet. */ 8181 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP); 8182 if (dma_mem == NULL) { 8183 EL(ha, "failed, kmem_zalloc\n"); 8184 cmd->Status = EXT_STATUS_NO_MEMORY; 8185 cmd->ResponseLen = 0; 8186 return; 8187 } 8188 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP); 8189 if (pkt == NULL) { 8190 EL(ha, "failed, kmem_zalloc\n"); 8191 kmem_free(dma_mem, sizeof (dma_mem_t)); 8192 ql_restart_hba(ha); 8193 cmd->Status = EXT_STATUS_NO_MEMORY; 8194 cmd->ResponseLen = 0; 8195 return; 8196 } 8197 8198 /* Get DMA memory for the IOCB */ 8199 if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA, 8200 QL_DMA_DATA_ALIGN) != QL_SUCCESS) { 8201 cmn_err(CE_WARN, "%s(%d): request queue DMA memory " 8202 "alloc failed", QL_NAME, ha->instance); 8203 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8204 kmem_free(dma_mem, sizeof (dma_mem_t)); 8205 ql_restart_hba(ha); 8206 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 8207 cmd->ResponseLen = 0; 8208 return; 8209 } 8210 8211 /* Get firmware data. */ 8212 if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp, 8213 fw.TotalByteCount, mode) != fw.TotalByteCount) { 8214 EL(ha, "failed, get_buffer_data\n"); 8215 ql_free_dma_resource(ha, dma_mem); 8216 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8217 kmem_free(dma_mem, sizeof (dma_mem_t)); 8218 ql_restart_hba(ha); 8219 cmd->Status = EXT_STATUS_COPY_ERR; 8220 cmd->ResponseLen = 0; 8221 return; 8222 } 8223 8224 /* Sync DMA buffer. */ 8225 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size, 8226 DDI_DMA_SYNC_FORDEV); 8227 8228 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE; 8229 pkt->mvfy.entry_count = 1; 8230 pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags); 8231 ptr32 = dma_mem->bp; 8232 pkt->mvfy.fw_version = LE_32(ptr32[2]); 8233 pkt->mvfy.fw_size = LE_32(fw.TotalByteCount); 8234 pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount); 8235 pkt->mvfy.dseg_count = LE_16(1); 8236 pkt->mvfy.dseg_0_address[0] = (uint32_t) 8237 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 8238 pkt->mvfy.dseg_0_address[1] = (uint32_t) 8239 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 8240 pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount); 8241 8242 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t)); 8243 LITTLE_ENDIAN_16(&pkt->mvfy.options_status); 8244 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code); 8245 8246 if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 || 8247 pkt->mvfy.options_status != CS_COMPLETE) { 8248 /* Command error */ 8249 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval, 8250 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status, 8251 pkt->mvfy.failure_code); 8252 cmd->Status = EXT_STATUS_ERR; 8253 cmd->DetailStatus = rval != QL_SUCCESS ? rval : 8254 QL_FUNCTION_FAILED; 8255 cmd->ResponseLen = 0; 8256 } 8257 8258 ql_free_dma_resource(ha, dma_mem); 8259 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8260 kmem_free(dma_mem, sizeof (dma_mem_t)); 8261 ql_restart_hba(ha); 8262 8263 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8264 } 8265 8266 /* 8267 * ql_menlo_manage_info 8268 * Get Menlo manage info. 8269 * 8270 * Input: 8271 * ha: adapter state pointer. 8272 * bp: buffer address. 8273 * mode: flags 8274 * 8275 * Returns: 8276 * 8277 * Context: 8278 * Kernel context. 8279 */ 8280 static void 8281 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8282 { 8283 ql_mbx_iocb_t *pkt; 8284 dma_mem_t *dma_mem = NULL; 8285 EXT_MENLO_MANAGE_INFO info; 8286 int rval; 8287 8288 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8289 8290 8291 /* The call is only supported for Schultz right now */ 8292 if (CFG_IST(ha, CFG_CTRL_8081)) { 8293 ql_get_xgmac_statistics(ha, cmd, mode); 8294 QL_PRINT_9(CE_CONT, "(%d): CFG_CTRL_81XX done\n", 8295 ha->instance); 8296 return; 8297 } 8298 8299 if (!CFG_IST(ha, CFG_CTRL_8081) || !CFG_IST(ha, CFG_CTRL_MENLO)) { 8300 EL(ha, "failed, invalid request for HBA\n"); 8301 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8302 cmd->ResponseLen = 0; 8303 return; 8304 } 8305 8306 /* Verify the size of request structure. */ 8307 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) { 8308 /* Return error */ 8309 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen, 8310 sizeof (EXT_MENLO_MANAGE_INFO)); 8311 cmd->Status = EXT_STATUS_INVALID_PARAM; 8312 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 8313 cmd->ResponseLen = 0; 8314 return; 8315 } 8316 8317 /* Get manage info request. */ 8318 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, 8319 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) { 8320 EL(ha, "failed, ddi_copyin\n"); 8321 cmd->Status = EXT_STATUS_COPY_ERR; 8322 cmd->ResponseLen = 0; 8323 return; 8324 } 8325 8326 /* Allocate packet. */ 8327 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP); 8328 if (pkt == NULL) { 8329 EL(ha, "failed, kmem_zalloc\n"); 8330 ql_restart_driver(ha); 8331 cmd->Status = EXT_STATUS_NO_MEMORY; 8332 cmd->ResponseLen = 0; 8333 return; 8334 } 8335 8336 pkt->mdata.entry_type = MENLO_DATA_TYPE; 8337 pkt->mdata.entry_count = 1; 8338 pkt->mdata.options_status = (uint16_t)LE_16(info.Operation); 8339 8340 /* Get DMA memory for the IOCB */ 8341 if (info.Operation == MENLO_OP_READ_MEM || 8342 info.Operation == MENLO_OP_WRITE_MEM) { 8343 pkt->mdata.total_byte_count = LE_32(info.TotalByteCount); 8344 pkt->mdata.parameter_1 = 8345 LE_32(info.Parameters.ap.MenloMemory.StartingAddr); 8346 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), 8347 KM_SLEEP); 8348 if (dma_mem == NULL) { 8349 EL(ha, "failed, kmem_zalloc\n"); 8350 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8351 cmd->Status = EXT_STATUS_NO_MEMORY; 8352 cmd->ResponseLen = 0; 8353 return; 8354 } 8355 if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount, 8356 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) { 8357 cmn_err(CE_WARN, "%s(%d): request queue DMA memory " 8358 "alloc failed", QL_NAME, ha->instance); 8359 kmem_free(dma_mem, sizeof (dma_mem_t)); 8360 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8361 cmd->Status = EXT_STATUS_MS_NO_RESPONSE; 8362 cmd->ResponseLen = 0; 8363 return; 8364 } 8365 if (info.Operation == MENLO_OP_WRITE_MEM) { 8366 /* Get data. */ 8367 if (ql_get_buffer_data( 8368 (caddr_t)(uintptr_t)info.pDataBytes, 8369 dma_mem->bp, info.TotalByteCount, mode) != 8370 info.TotalByteCount) { 8371 EL(ha, "failed, get_buffer_data\n"); 8372 ql_free_dma_resource(ha, dma_mem); 8373 kmem_free(dma_mem, sizeof (dma_mem_t)); 8374 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8375 cmd->Status = EXT_STATUS_COPY_ERR; 8376 cmd->ResponseLen = 0; 8377 return; 8378 } 8379 (void) ddi_dma_sync(dma_mem->dma_handle, 0, 8380 dma_mem->size, DDI_DMA_SYNC_FORDEV); 8381 } 8382 pkt->mdata.dseg_count = LE_16(1); 8383 pkt->mdata.dseg_0_address[0] = (uint32_t) 8384 LE_32(LSD(dma_mem->cookie.dmac_laddress)); 8385 pkt->mdata.dseg_0_address[1] = (uint32_t) 8386 LE_32(MSD(dma_mem->cookie.dmac_laddress)); 8387 pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount); 8388 } else if (info.Operation & MENLO_OP_CHANGE_CONFIG) { 8389 pkt->mdata.parameter_1 = 8390 LE_32(info.Parameters.ap.MenloConfig.ConfigParamID); 8391 pkt->mdata.parameter_2 = 8392 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0); 8393 pkt->mdata.parameter_3 = 8394 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1); 8395 } else if (info.Operation & MENLO_OP_GET_INFO) { 8396 pkt->mdata.parameter_1 = 8397 LE_32(info.Parameters.ap.MenloInfo.InfoDataType); 8398 pkt->mdata.parameter_2 = 8399 LE_32(info.Parameters.ap.MenloInfo.InfoContext); 8400 } 8401 8402 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t)); 8403 LITTLE_ENDIAN_16(&pkt->mdata.options_status); 8404 LITTLE_ENDIAN_16(&pkt->mdata.failure_code); 8405 8406 if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 || 8407 pkt->mdata.options_status != CS_COMPLETE) { 8408 /* Command error */ 8409 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval, 8410 pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status, 8411 pkt->mdata.failure_code); 8412 cmd->Status = EXT_STATUS_ERR; 8413 cmd->DetailStatus = rval != QL_SUCCESS ? rval : 8414 QL_FUNCTION_FAILED; 8415 cmd->ResponseLen = 0; 8416 } else if (info.Operation == MENLO_OP_READ_MEM) { 8417 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size, 8418 DDI_DMA_SYNC_FORKERNEL); 8419 if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes, 8420 dma_mem->bp, info.TotalByteCount, mode) != 8421 info.TotalByteCount) { 8422 cmd->Status = EXT_STATUS_COPY_ERR; 8423 cmd->ResponseLen = 0; 8424 } 8425 } 8426 8427 ql_free_dma_resource(ha, dma_mem); 8428 kmem_free(dma_mem, sizeof (dma_mem_t)); 8429 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 8430 8431 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8432 } 8433 8434 /* 8435 * ql_suspend_hba 8436 * Suspends all adapter ports. 8437 * 8438 * Input: 8439 * ha: adapter state pointer. 8440 * options: BIT_0 --> leave driver stalled on exit if 8441 * failed. 8442 * 8443 * Returns: 8444 * ql local function return status code. 8445 * 8446 * Context: 8447 * Kernel context. 8448 */ 8449 static int 8450 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt) 8451 { 8452 ql_adapter_state_t *ha2; 8453 ql_link_t *link; 8454 int rval = QL_SUCCESS; 8455 8456 /* Quiesce I/O on all adapter ports */ 8457 for (link = ql_hba.first; link != NULL; link = link->next) { 8458 ha2 = link->base_address; 8459 8460 if (ha2->fru_hba_index != ha->fru_hba_index) { 8461 continue; 8462 } 8463 8464 if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) { 8465 EL(ha, "ql_stall_driver status=%xh\n", rval); 8466 break; 8467 } 8468 } 8469 8470 return (rval); 8471 } 8472 8473 /* 8474 * ql_restart_hba 8475 * Restarts adapter. 8476 * 8477 * Input: 8478 * ha: adapter state pointer. 8479 * 8480 * Context: 8481 * Kernel context. 8482 */ 8483 static void 8484 ql_restart_hba(ql_adapter_state_t *ha) 8485 { 8486 ql_adapter_state_t *ha2; 8487 ql_link_t *link; 8488 8489 /* Resume I/O on all adapter ports */ 8490 for (link = ql_hba.first; link != NULL; link = link->next) { 8491 ha2 = link->base_address; 8492 8493 if (ha2->fru_hba_index != ha->fru_hba_index) { 8494 continue; 8495 } 8496 8497 ql_restart_driver(ha2); 8498 } 8499 } 8500 8501 /* 8502 * ql_get_vp_cnt_id 8503 * Retrieves pci config space data 8504 * 8505 * Input: 8506 * ha: adapter state pointer. 8507 * cmd: Local EXT_IOCTL cmd struct pointer. 8508 * mode: flags. 8509 * 8510 * Returns: 8511 * None, request status indicated in cmd->Status. 8512 * 8513 * Context: 8514 * Kernel context. 8515 * 8516 */ 8517 static void 8518 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8519 { 8520 ql_adapter_state_t *vha; 8521 PEXT_VPORT_ID_CNT ptmp_vp; 8522 int id = 0; 8523 int rval; 8524 char name[MAXPATHLEN]; 8525 8526 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8527 8528 /* 8529 * To be backward compatible with older API 8530 * check for the size of old EXT_VPORT_ID_CNT 8531 */ 8532 if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) && 8533 (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) { 8534 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8535 cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT); 8536 EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n", 8537 cmd->ResponseLen); 8538 cmd->ResponseLen = 0; 8539 return; 8540 } 8541 8542 ptmp_vp = (EXT_VPORT_ID_CNT *) 8543 kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP); 8544 if (ptmp_vp == NULL) { 8545 EL(ha, "failed, kmem_zalloc\n"); 8546 cmd->ResponseLen = 0; 8547 return; 8548 } 8549 vha = ha->vp_next; 8550 while (vha != NULL) { 8551 ptmp_vp->VpCnt++; 8552 ptmp_vp->VpId[id] = vha->vp_index; 8553 (void) ddi_pathname(vha->dip, name); 8554 (void) strcpy((char *)ptmp_vp->vp_path[id], name); 8555 ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance; 8556 id++; 8557 vha = vha->vp_next; 8558 } 8559 rval = ddi_copyout((void *)ptmp_vp, 8560 (void *)(uintptr_t)(cmd->ResponseAdr), 8561 cmd->ResponseLen, mode); 8562 if (rval != 0) { 8563 cmd->Status = EXT_STATUS_COPY_ERR; 8564 cmd->ResponseLen = 0; 8565 EL(ha, "failed, ddi_copyout\n"); 8566 } else { 8567 cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT); 8568 QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n", 8569 ha->instance, ptmp_vp->VpCnt); 8570 } 8571 8572 } 8573 8574 /* 8575 * ql_vp_ioctl 8576 * Performs all EXT_CC_VPORT_CMD functions. 8577 * 8578 * Input: 8579 * ha: adapter state pointer. 8580 * cmd: Local EXT_IOCTL cmd struct pointer. 8581 * mode: flags. 8582 * 8583 * Returns: 8584 * None, request status indicated in cmd->Status. 8585 * 8586 * Context: 8587 * Kernel context. 8588 */ 8589 static void 8590 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8591 { 8592 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, 8593 cmd->SubCode); 8594 8595 /* case off on command subcode */ 8596 switch (cmd->SubCode) { 8597 case EXT_VF_SC_VPORT_GETINFO: 8598 ql_qry_vport(ha, cmd, mode); 8599 break; 8600 default: 8601 /* function not supported. */ 8602 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE; 8603 EL(ha, "failed, Unsupported Subcode=%xh\n", 8604 cmd->SubCode); 8605 break; 8606 } 8607 8608 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8609 } 8610 8611 /* 8612 * ql_qry_vport 8613 * Performs EXT_VF_SC_VPORT_GETINFO subfunction. 8614 * 8615 * Input: 8616 * ha: adapter state pointer. 8617 * cmd: EXT_IOCTL cmd struct pointer. 8618 * mode: flags. 8619 * 8620 * Returns: 8621 * None, request status indicated in cmd->Status. 8622 * 8623 * Context: 8624 * Kernel context. 8625 */ 8626 static void 8627 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode) 8628 { 8629 ql_adapter_state_t *tmp_vha; 8630 EXT_VPORT_INFO tmp_vport = {0}; 8631 int max_vport; 8632 8633 QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance); 8634 8635 if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) { 8636 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8637 cmd->DetailStatus = sizeof (EXT_VPORT_INFO); 8638 EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n", 8639 cmd->ResponseLen); 8640 cmd->ResponseLen = 0; 8641 return; 8642 } 8643 8644 /* Fill in the vport information. */ 8645 bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn, 8646 EXT_DEF_WWN_NAME_SIZE); 8647 bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn, 8648 EXT_DEF_WWN_NAME_SIZE); 8649 tmp_vport.state = vha->state; 8650 tmp_vport.id = vha->vp_index; 8651 8652 tmp_vha = vha->pha->vp_next; 8653 while (tmp_vha != NULL) { 8654 tmp_vport.used++; 8655 tmp_vha = tmp_vha->vp_next; 8656 } 8657 8658 max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS : 8659 MAX_25_VIRTUAL_PORTS); 8660 if (max_vport > tmp_vport.used) { 8661 tmp_vport.free = max_vport - tmp_vport.used; 8662 } 8663 8664 if (ddi_copyout((void *)&tmp_vport, 8665 (void *)(uintptr_t)(cmd->ResponseAdr), 8666 sizeof (EXT_VPORT_INFO), mode) != 0) { 8667 cmd->Status = EXT_STATUS_COPY_ERR; 8668 cmd->ResponseLen = 0; 8669 EL(vha, "failed, ddi_copyout\n"); 8670 } else { 8671 cmd->ResponseLen = sizeof (EXT_VPORT_INFO); 8672 QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance); 8673 } 8674 } 8675 8676 /* 8677 * ql_access_flash 8678 * Performs all EXT_CC_ACCESS_FLASH_OS functions. 8679 * 8680 * Input: 8681 * pi: port info pointer. 8682 * cmd: Local EXT_IOCTL cmd struct pointer. 8683 * mode: flags. 8684 * 8685 * Returns: 8686 * None, request status indicated in cmd->Status. 8687 * 8688 * Context: 8689 * Kernel context. 8690 */ 8691 static void 8692 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8693 { 8694 int rval; 8695 8696 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8697 8698 switch (cmd->SubCode) { 8699 case EXT_SC_FLASH_READ: 8700 if ((rval = ql_flash_fcode_dump(ha, 8701 (void *)(uintptr_t)(cmd->ResponseAdr), 8702 (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) { 8703 cmd->Status = EXT_STATUS_COPY_ERR; 8704 cmd->ResponseLen = 0; 8705 EL(ha, "flash_fcode_dump status=%xh\n", rval); 8706 } 8707 break; 8708 case EXT_SC_FLASH_WRITE: 8709 if ((rval = ql_r_m_w_flash(ha, 8710 (void *)(uintptr_t)(cmd->RequestAdr), 8711 (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) != 8712 QL_SUCCESS) { 8713 cmd->Status = EXT_STATUS_COPY_ERR; 8714 cmd->ResponseLen = 0; 8715 EL(ha, "r_m_w_flash status=%xh\n", rval); 8716 } else { 8717 /* Reset caches on all adapter instances. */ 8718 ql_update_flash_caches(ha); 8719 } 8720 break; 8721 default: 8722 EL(ha, "unknown subcode=%xh\n", cmd->SubCode); 8723 cmd->Status = EXT_STATUS_ERR; 8724 cmd->ResponseLen = 0; 8725 break; 8726 } 8727 8728 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8729 } 8730 8731 /* 8732 * ql_reset_cmd 8733 * Performs all EXT_CC_RESET_FW_OS functions. 8734 * 8735 * Input: 8736 * ha: adapter state pointer. 8737 * cmd: Local EXT_IOCTL cmd struct pointer. 8738 * 8739 * Returns: 8740 * None, request status indicated in cmd->Status. 8741 * 8742 * Context: 8743 * Kernel context. 8744 */ 8745 static void 8746 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd) 8747 { 8748 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8749 8750 switch (cmd->SubCode) { 8751 case EXT_SC_RESET_FC_FW: 8752 EL(ha, "isp_abort_needed\n"); 8753 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0); 8754 break; 8755 case EXT_SC_RESET_MPI_FW: 8756 if (!(CFG_IST(ha, CFG_CTRL_81XX))) { 8757 EL(ha, "invalid request for HBA\n"); 8758 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8759 cmd->ResponseLen = 0; 8760 } else { 8761 /* Wait for I/O to stop and daemon to stall. */ 8762 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) { 8763 EL(ha, "ql_suspend_hba failed\n"); 8764 cmd->Status = EXT_STATUS_BUSY; 8765 cmd->ResponseLen = 0; 8766 } else if (ql_restart_mpi(ha) != QL_SUCCESS) { 8767 cmd->Status = EXT_STATUS_ERR; 8768 cmd->ResponseLen = 0; 8769 } else { 8770 uint8_t timer; 8771 /* 8772 * While the restart_mpi mailbox cmd may be 8773 * done the MPI is not. Wait at least 6 sec. or 8774 * exit if the loop comes up. 8775 */ 8776 for (timer = 6; timer; timer--) { 8777 if (!(ha->task_daemon_flags & 8778 LOOP_DOWN)) { 8779 break; 8780 } 8781 /* Delay for 1 second. */ 8782 ql_delay(ha, 1000000); 8783 } 8784 } 8785 ql_restart_hba(ha); 8786 } 8787 break; 8788 default: 8789 EL(ha, "unknown subcode=%xh\n", cmd->SubCode); 8790 cmd->Status = EXT_STATUS_ERR; 8791 cmd->ResponseLen = 0; 8792 break; 8793 } 8794 8795 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8796 } 8797 8798 /* 8799 * ql_get_dcbx_parameters 8800 * Get DCBX parameters. 8801 * 8802 * Input: 8803 * ha: adapter state pointer. 8804 * cmd: User space CT arguments pointer. 8805 * mode: flags. 8806 */ 8807 static void 8808 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8809 { 8810 uint8_t *tmp_buf; 8811 int rval; 8812 8813 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8814 8815 if (!(CFG_IST(ha, CFG_CTRL_8081))) { 8816 EL(ha, "invalid request for HBA\n"); 8817 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8818 cmd->ResponseLen = 0; 8819 return; 8820 } 8821 8822 /* Allocate memory for command. */ 8823 tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP); 8824 if (tmp_buf == NULL) { 8825 EL(ha, "failed, kmem_zalloc\n"); 8826 cmd->Status = EXT_STATUS_NO_MEMORY; 8827 cmd->ResponseLen = 0; 8828 return; 8829 } 8830 /* Send command */ 8831 rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE, 8832 (caddr_t)tmp_buf); 8833 if (rval != QL_SUCCESS) { 8834 /* error */ 8835 EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval); 8836 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE); 8837 cmd->Status = EXT_STATUS_ERR; 8838 cmd->ResponseLen = 0; 8839 return; 8840 } 8841 8842 /* Copy the response */ 8843 if (ql_send_buffer_data((caddr_t)tmp_buf, 8844 (caddr_t)(uintptr_t)cmd->ResponseAdr, 8845 EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) { 8846 EL(ha, "failed, ddi_copyout\n"); 8847 cmd->Status = EXT_STATUS_COPY_ERR; 8848 cmd->ResponseLen = 0; 8849 } else { 8850 cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE; 8851 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8852 } 8853 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE); 8854 8855 } 8856 8857 /* 8858 * ql_qry_cna_port 8859 * Performs EXT_SC_QUERY_CNA_PORT subfunction. 8860 * 8861 * Input: 8862 * ha: adapter state pointer. 8863 * cmd: EXT_IOCTL cmd struct pointer. 8864 * mode: flags. 8865 * 8866 * Returns: 8867 * None, request status indicated in cmd->Status. 8868 * 8869 * Context: 8870 * Kernel context. 8871 */ 8872 static void 8873 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 8874 { 8875 EXT_CNA_PORT cna_port = {0}; 8876 8877 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8878 8879 if (!(CFG_IST(ha, CFG_CTRL_8081))) { 8880 EL(ha, "invalid request for HBA\n"); 8881 cmd->Status = EXT_STATUS_INVALID_REQUEST; 8882 cmd->ResponseLen = 0; 8883 return; 8884 } 8885 8886 if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) { 8887 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8888 cmd->DetailStatus = sizeof (EXT_CNA_PORT); 8889 EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n", 8890 cmd->ResponseLen); 8891 cmd->ResponseLen = 0; 8892 return; 8893 } 8894 8895 cna_port.VLanId = ha->fcoe_vlan_id; 8896 cna_port.FabricParam = ha->fabric_params; 8897 bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress, 8898 EXT_DEF_MAC_ADDRESS_SIZE); 8899 8900 if (ddi_copyout((void *)&cna_port, 8901 (void *)(uintptr_t)(cmd->ResponseAdr), 8902 sizeof (EXT_CNA_PORT), mode) != 0) { 8903 cmd->Status = EXT_STATUS_COPY_ERR; 8904 cmd->ResponseLen = 0; 8905 EL(ha, "failed, ddi_copyout\n"); 8906 } else { 8907 cmd->ResponseLen = sizeof (EXT_CNA_PORT); 8908 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 8909 } 8910 } 8911 8912 /* 8913 * ql_qry_adapter_versions 8914 * Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction. 8915 * 8916 * Input: 8917 * ha: adapter state pointer. 8918 * cmd: EXT_IOCTL cmd struct pointer. 8919 * mode: flags. 8920 * 8921 * Returns: 8922 * None, request status indicated in cmd->Status. 8923 * 8924 * Context: 8925 * Kernel context. 8926 */ 8927 static void 8928 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd, 8929 int mode) 8930 { 8931 uint8_t is_8142, mpi_cap; 8932 uint32_t ver_len, transfer_size; 8933 PEXT_ADAPTERREGIONVERSION padapter_ver = NULL; 8934 8935 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 8936 8937 /* 8142s do not have a EDC PHY firmware. */ 8938 mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8); 8939 8940 is_8142 = 0; 8941 /* Sizeof (Length + Reserved) = 8 Bytes */ 8942 if (mpi_cap == 0x02 || mpi_cap == 0x04) { 8943 ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1)) 8944 + 8; 8945 is_8142 = 1; 8946 } else { 8947 ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8; 8948 } 8949 8950 /* Allocate local memory for EXT_ADAPTERREGIONVERSION */ 8951 padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len, 8952 KM_SLEEP); 8953 8954 if (padapter_ver == NULL) { 8955 EL(ha, "failed, kmem_zalloc\n"); 8956 cmd->Status = EXT_STATUS_NO_MEMORY; 8957 cmd->ResponseLen = 0; 8958 return; 8959 } 8960 8961 padapter_ver->Length = 1; 8962 /* Copy MPI version */ 8963 padapter_ver->RegionVersion[0].Region = 8964 EXT_OPT_ROM_REGION_MPI_RISC_FW; 8965 padapter_ver->RegionVersion[0].Version[0] = 8966 ha->mpi_fw_major_version; 8967 padapter_ver->RegionVersion[0].Version[1] = 8968 ha->mpi_fw_minor_version; 8969 padapter_ver->RegionVersion[0].Version[2] = 8970 ha->mpi_fw_subminor_version; 8971 padapter_ver->RegionVersion[0].VersionLength = 3; 8972 padapter_ver->RegionVersion[0].Location = RUNNING_VERSION; 8973 8974 if (!is_8142) { 8975 padapter_ver->RegionVersion[1].Region = 8976 EXT_OPT_ROM_REGION_EDC_PHY_FW; 8977 padapter_ver->RegionVersion[1].Version[0] = 8978 ha->phy_fw_major_version; 8979 padapter_ver->RegionVersion[1].Version[1] = 8980 ha->phy_fw_minor_version; 8981 padapter_ver->RegionVersion[1].Version[2] = 8982 ha->phy_fw_subminor_version; 8983 padapter_ver->RegionVersion[1].VersionLength = 3; 8984 padapter_ver->RegionVersion[1].Location = RUNNING_VERSION; 8985 padapter_ver->Length = NO_OF_VERSIONS; 8986 } 8987 8988 if (cmd->ResponseLen < ver_len) { 8989 EL(ha, "failed, ResponseLen < ver_len, ", 8990 "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len); 8991 /* Calculate the No. of valid versions being returned. */ 8992 padapter_ver->Length = (uint32_t) 8993 ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION)); 8994 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 8995 cmd->DetailStatus = ver_len; 8996 transfer_size = cmd->ResponseLen; 8997 } else { 8998 transfer_size = ver_len; 8999 } 9000 9001 if (ddi_copyout((void *)padapter_ver, 9002 (void *)(uintptr_t)(cmd->ResponseAdr), 9003 transfer_size, mode) != 0) { 9004 cmd->Status = EXT_STATUS_COPY_ERR; 9005 cmd->ResponseLen = 0; 9006 EL(ha, "failed, ddi_copyout\n"); 9007 } else { 9008 cmd->ResponseLen = ver_len; 9009 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 9010 } 9011 9012 kmem_free(padapter_ver, ver_len); 9013 } 9014 9015 /* 9016 * ql_get_xgmac_statistics 9017 * Get XgMac information 9018 * 9019 * Input: 9020 * ha: adapter state pointer. 9021 * cmd: EXT_IOCTL cmd struct pointer. 9022 * mode: flags. 9023 * 9024 * Returns: 9025 * None, request status indicated in cmd->Status. 9026 * 9027 * Context: 9028 * Kernel context. 9029 */ 9030 static void 9031 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 9032 { 9033 int rval; 9034 uint32_t size; 9035 int8_t *tmp_buf; 9036 EXT_MENLO_MANAGE_INFO info; 9037 9038 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 9039 9040 /* Verify the size of request structure. */ 9041 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) { 9042 /* Return error */ 9043 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen, 9044 sizeof (EXT_MENLO_MANAGE_INFO)); 9045 cmd->Status = EXT_STATUS_INVALID_PARAM; 9046 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN; 9047 cmd->ResponseLen = 0; 9048 return; 9049 } 9050 9051 /* Get manage info request. */ 9052 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, 9053 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) { 9054 EL(ha, "failed, ddi_copyin\n"); 9055 cmd->Status = EXT_STATUS_COPY_ERR; 9056 cmd->ResponseLen = 0; 9057 return; 9058 } 9059 9060 size = info.TotalByteCount; 9061 if (!size) { 9062 /* parameter error */ 9063 cmd->Status = EXT_STATUS_INVALID_PARAM; 9064 cmd->DetailStatus = 0; 9065 EL(ha, "failed, size=%xh\n", size); 9066 cmd->ResponseLen = 0; 9067 return; 9068 } 9069 9070 /* Allocate memory for command. */ 9071 tmp_buf = kmem_zalloc(size, KM_SLEEP); 9072 if (tmp_buf == NULL) { 9073 EL(ha, "failed, kmem_zalloc\n"); 9074 cmd->Status = EXT_STATUS_NO_MEMORY; 9075 cmd->ResponseLen = 0; 9076 return; 9077 } 9078 9079 if (!(info.Operation & MENLO_OP_GET_INFO)) { 9080 EL(ha, "Invalid request for 81XX\n"); 9081 kmem_free(tmp_buf, size); 9082 cmd->Status = EXT_STATUS_ERR; 9083 cmd->ResponseLen = 0; 9084 return; 9085 } 9086 9087 rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf); 9088 9089 if (rval != QL_SUCCESS) { 9090 /* error */ 9091 EL(ha, "failed, get_xgmac_stats =%xh\n", rval); 9092 kmem_free(tmp_buf, size); 9093 cmd->Status = EXT_STATUS_ERR; 9094 cmd->ResponseLen = 0; 9095 return; 9096 } 9097 9098 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes, 9099 size, mode) != size) { 9100 EL(ha, "failed, ddi_copyout\n"); 9101 cmd->Status = EXT_STATUS_COPY_ERR; 9102 cmd->ResponseLen = 0; 9103 } else { 9104 cmd->ResponseLen = info.TotalByteCount; 9105 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 9106 } 9107 kmem_free(tmp_buf, size); 9108 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 9109 } 9110 9111 /* 9112 * ql_get_fcf_list 9113 * Get FCF list. 9114 * 9115 * Input: 9116 * ha: adapter state pointer. 9117 * cmd: User space CT arguments pointer. 9118 * mode: flags. 9119 */ 9120 static void 9121 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 9122 { 9123 uint8_t *tmp_buf; 9124 int rval; 9125 EXT_FCF_LIST fcf_list = {0}; 9126 ql_fcf_list_desc_t mb_fcf_list = {0}; 9127 9128 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 9129 9130 if (!(CFG_IST(ha, CFG_CTRL_81XX))) { 9131 EL(ha, "invalid request for HBA\n"); 9132 cmd->Status = EXT_STATUS_INVALID_REQUEST; 9133 cmd->ResponseLen = 0; 9134 return; 9135 } 9136 /* Get manage info request. */ 9137 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, 9138 (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) { 9139 EL(ha, "failed, ddi_copyin\n"); 9140 cmd->Status = EXT_STATUS_COPY_ERR; 9141 cmd->ResponseLen = 0; 9142 return; 9143 } 9144 9145 if (!(fcf_list.BufSize)) { 9146 /* Return error */ 9147 EL(ha, "failed, fcf_list BufSize is=%xh\n", 9148 fcf_list.BufSize); 9149 cmd->Status = EXT_STATUS_INVALID_PARAM; 9150 cmd->ResponseLen = 0; 9151 return; 9152 } 9153 /* Allocate memory for command. */ 9154 tmp_buf = kmem_zalloc(fcf_list.BufSize, KM_SLEEP); 9155 if (tmp_buf == NULL) { 9156 EL(ha, "failed, kmem_zalloc\n"); 9157 cmd->Status = EXT_STATUS_NO_MEMORY; 9158 cmd->ResponseLen = 0; 9159 return; 9160 } 9161 /* build the descriptor */ 9162 if (fcf_list.Options) { 9163 mb_fcf_list.options = FCF_LIST_RETURN_ONE; 9164 } else { 9165 mb_fcf_list.options = FCF_LIST_RETURN_ALL; 9166 } 9167 mb_fcf_list.fcf_index = (uint16_t)fcf_list.FcfIndex; 9168 mb_fcf_list.buffer_size = fcf_list.BufSize; 9169 9170 /* Send command */ 9171 rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf); 9172 if (rval != QL_SUCCESS) { 9173 /* error */ 9174 EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval); 9175 kmem_free(tmp_buf, fcf_list.BufSize); 9176 cmd->Status = EXT_STATUS_ERR; 9177 cmd->ResponseLen = 0; 9178 return; 9179 } 9180 9181 /* Copy the response */ 9182 if (ql_send_buffer_data((caddr_t)tmp_buf, 9183 (caddr_t)(uintptr_t)cmd->ResponseAdr, 9184 fcf_list.BufSize, mode) != fcf_list.BufSize) { 9185 EL(ha, "failed, ddi_copyout\n"); 9186 cmd->Status = EXT_STATUS_COPY_ERR; 9187 cmd->ResponseLen = 0; 9188 } else { 9189 cmd->ResponseLen = mb_fcf_list.buffer_size; 9190 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 9191 } 9192 9193 kmem_free(tmp_buf, fcf_list.BufSize); 9194 } 9195 9196 /* 9197 * ql_get_resource_counts 9198 * Get Resource counts: 9199 * 9200 * Input: 9201 * ha: adapter state pointer. 9202 * cmd: User space CT arguments pointer. 9203 * mode: flags. 9204 */ 9205 static void 9206 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode) 9207 { 9208 int rval; 9209 ql_mbx_data_t mr; 9210 EXT_RESOURCE_CNTS tmp_rc_cnt = {0}; 9211 9212 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance); 9213 9214 if (!(CFG_IST(ha, CFG_CTRL_242581))) { 9215 EL(ha, "invalid request for HBA\n"); 9216 cmd->Status = EXT_STATUS_INVALID_REQUEST; 9217 cmd->ResponseLen = 0; 9218 return; 9219 } 9220 9221 if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) { 9222 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL; 9223 cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS); 9224 EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, " 9225 "Len=%xh\n", cmd->ResponseLen); 9226 cmd->ResponseLen = 0; 9227 return; 9228 } 9229 9230 rval = ql_get_resource_cnts(ha, &mr); 9231 if (rval != QL_SUCCESS) { 9232 EL(ha, "resource cnt mbx failed\n"); 9233 cmd->Status = EXT_STATUS_ERR; 9234 cmd->ResponseLen = 0; 9235 return; 9236 } 9237 9238 tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1]; 9239 tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2]; 9240 tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3]; 9241 tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6]; 9242 tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7]; 9243 tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10]; 9244 tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11]; 9245 tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12]; 9246 9247 rval = ddi_copyout((void *)&tmp_rc_cnt, 9248 (void *)(uintptr_t)(cmd->ResponseAdr), 9249 sizeof (EXT_RESOURCE_CNTS), mode); 9250 if (rval != 0) { 9251 cmd->Status = EXT_STATUS_COPY_ERR; 9252 cmd->ResponseLen = 0; 9253 EL(ha, "failed, ddi_copyout\n"); 9254 } else { 9255 cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS); 9256 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance); 9257 } 9258 } 9259