1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 #define DEF_ICFG 1 29 30 #include <emlxs.h> 31 #include <emlxs_version.h> 32 33 34 char emlxs_revision[] = EMLXS_REVISION; 35 char emlxs_version[] = EMLXS_VERSION; 36 char emlxs_name[] = EMLXS_NAME; 37 char emlxs_label[] = EMLXS_LABEL; 38 39 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 40 EMLXS_MSG_DEF(EMLXS_SOLARIS_C); 41 42 #ifdef MENLO_SUPPORT 43 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp); 44 #endif /* MENLO_SUPPORT */ 45 46 static void emlxs_fca_attach(emlxs_hba_t *hba); 47 static void emlxs_fca_detach(emlxs_hba_t *hba); 48 static void emlxs_drv_banner(emlxs_hba_t *hba); 49 50 static int32_t emlxs_get_props(emlxs_hba_t *hba); 51 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp); 52 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp); 53 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp); 54 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp); 55 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp); 56 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 57 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp); 58 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 59 static uint32_t emlxs_add_instance(int32_t ddiinst); 60 static void emlxs_iodone(emlxs_buf_t *sbp); 61 static int emlxs_pm_lower_power(dev_info_t *dip); 62 static int emlxs_pm_raise_power(dev_info_t *dip); 63 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, 64 uint32_t failed); 65 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3); 66 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba); 67 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code, 68 uint32_t args, uint32_t *arg); 69 70 static void emlxs_read_vport_prop(emlxs_hba_t *hba); 71 72 73 74 /* 75 * Driver Entry Routines. 76 */ 77 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t); 78 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t); 79 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *); 80 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *); 81 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t, 82 cred_t *, int32_t *); 83 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 84 85 86 /* 87 * FC_AL Transport Functions. 88 */ 89 static opaque_t emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *, 90 fc_fca_bind_info_t *); 91 static void emlxs_unbind_port(opaque_t); 92 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *); 93 static int32_t emlxs_get_cap(opaque_t, char *, void *); 94 static int32_t emlxs_set_cap(opaque_t, char *, void *); 95 static int32_t emlxs_get_map(opaque_t, fc_lilpmap_t *); 96 static int32_t emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t, 97 uint32_t *, uint32_t); 98 static int32_t emlxs_ub_free(opaque_t, uint32_t, uint64_t *); 99 100 static opaque_t emlxs_get_device(opaque_t, fc_portid_t); 101 static int32_t emlxs_notify(opaque_t, uint32_t); 102 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *); 103 104 /* 105 * Driver Internal Functions. 106 */ 107 108 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *); 109 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t); 110 #ifdef EMLXS_I386 111 #ifdef S11 112 static int32_t emlxs_quiesce(dev_info_t *); 113 #endif 114 #endif 115 static int32_t emlxs_hba_resume(dev_info_t *); 116 static int32_t emlxs_hba_suspend(dev_info_t *); 117 static int32_t emlxs_hba_detach(dev_info_t *); 118 static int32_t emlxs_hba_attach(dev_info_t *); 119 static void emlxs_lock_destroy(emlxs_hba_t *); 120 static void emlxs_lock_init(emlxs_hba_t *); 121 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *, 122 uint32_t, uint8_t); 123 124 char *emlxs_pm_components[] = { 125 "NAME=emlxx000", 126 "0=Device D3 State", 127 "1=Device D0 State" 128 }; 129 130 131 /* 132 * Default emlx dma limits 133 */ 134 ddi_dma_lim_t emlxs_dma_lim = { 135 (uint32_t)0, /* dlim_addr_lo */ 136 (uint32_t)0xffffffff, /* dlim_addr_hi */ 137 (uint_t)0x00ffffff, /* dlim_cntr_max */ 138 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */ 139 1, /* dlim_minxfer */ 140 0x00ffffff /* dlim_dmaspeed */ 141 }; 142 143 /* 144 * Be careful when using these attributes; the defaults listed below are 145 * (almost) the most general case, permitting allocation in almost any 146 * way supported by the LightPulse family. The sole exception is the 147 * alignment specified as requiring memory allocation on a 4-byte boundary; 148 * the Lightpulse can DMA memory on any byte boundary. 149 * 150 * The LightPulse family currently is limited to 16M transfers; 151 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields. 152 */ 153 ddi_dma_attr_t emlxs_dma_attr = { 154 DMA_ATTR_V0, /* dma_attr_version */ 155 (uint64_t)0, /* dma_attr_addr_lo */ 156 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 157 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 158 1, /* dma_attr_align */ 159 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 160 1, /* dma_attr_minxfer */ 161 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 162 (uint64_t)0xffffffff, /* dma_attr_seg */ 163 EMLXS_SGLLEN, /* dma_attr_sgllen */ 164 1, /* dma_attr_granular */ 165 0 /* dma_attr_flags */ 166 }; 167 168 ddi_dma_attr_t emlxs_dma_attr_ro = { 169 DMA_ATTR_V0, /* dma_attr_version */ 170 (uint64_t)0, /* dma_attr_addr_lo */ 171 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 172 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 173 1, /* dma_attr_align */ 174 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 175 1, /* dma_attr_minxfer */ 176 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 177 (uint64_t)0xffffffff, /* dma_attr_seg */ 178 EMLXS_SGLLEN, /* dma_attr_sgllen */ 179 1, /* dma_attr_granular */ 180 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */ 181 }; 182 183 ddi_dma_attr_t emlxs_dma_attr_1sg = { 184 DMA_ATTR_V0, /* dma_attr_version */ 185 (uint64_t)0, /* dma_attr_addr_lo */ 186 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 187 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 188 1, /* dma_attr_align */ 189 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 190 1, /* dma_attr_minxfer */ 191 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 192 (uint64_t)0xffffffff, /* dma_attr_seg */ 193 1, /* dma_attr_sgllen */ 194 1, /* dma_attr_granular */ 195 0 /* dma_attr_flags */ 196 }; 197 198 #if (EMLXS_MODREV >= EMLXS_MODREV3) 199 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = { 200 DMA_ATTR_V0, /* dma_attr_version */ 201 (uint64_t)0, /* dma_attr_addr_lo */ 202 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 203 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 204 1, /* dma_attr_align */ 205 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 206 1, /* dma_attr_minxfer */ 207 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 208 (uint64_t)0xffffffff, /* dma_attr_seg */ 209 EMLXS_SGLLEN, /* dma_attr_sgllen */ 210 1, /* dma_attr_granular */ 211 0 /* dma_attr_flags */ 212 }; 213 #endif /* >= EMLXS_MODREV3 */ 214 215 /* 216 * DDI access attributes for device 217 */ 218 ddi_device_acc_attr_t emlxs_dev_acc_attr = { 219 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 220 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */ 221 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 222 DDI_DEFAULT_ACC /* devacc_attr_access */ 223 }; 224 225 /* 226 * DDI access attributes for data 227 */ 228 ddi_device_acc_attr_t emlxs_data_acc_attr = { 229 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 230 DDI_NEVERSWAP_ACC, /* don't swap for Data */ 231 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 232 DDI_DEFAULT_ACC /* devacc_attr_access */ 233 }; 234 235 /* 236 * Fill in the FC Transport structure, 237 * as defined in the Fibre Channel Transport Programmming Guide. 238 */ 239 #if (EMLXS_MODREV == EMLXS_MODREV5) 240 static fc_fca_tran_t emlxs_fca_tran = { 241 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */ 242 MAX_VPORTS, /* fca numerb of ports */ 243 sizeof (emlxs_buf_t), /* fca pkt size */ 244 2048, /* fca cmd max */ 245 &emlxs_dma_lim, /* fca dma limits */ 246 0, /* fca iblock, to be filled in later */ 247 &emlxs_dma_attr, /* fca dma attributes */ 248 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 249 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 250 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 251 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 252 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 253 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 254 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 255 &emlxs_data_acc_attr, /* fca access atributes */ 256 0, /* fca_num_npivports */ 257 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */ 258 emlxs_bind_port, 259 emlxs_unbind_port, 260 emlxs_pkt_init, 261 emlxs_pkt_uninit, 262 emlxs_transport, 263 emlxs_get_cap, 264 emlxs_set_cap, 265 emlxs_get_map, 266 emlxs_transport, 267 emlxs_ub_alloc, 268 emlxs_ub_free, 269 emlxs_ub_release, 270 emlxs_pkt_abort, 271 emlxs_reset, 272 emlxs_port_manage, 273 emlxs_get_device, 274 emlxs_notify 275 }; 276 #endif /* EMLXS_MODREV5 */ 277 278 279 #if (EMLXS_MODREV == EMLXS_MODREV4) 280 static fc_fca_tran_t emlxs_fca_tran = { 281 FCTL_FCA_MODREV_4, /* fca_version */ 282 MAX_VPORTS, /* fca numerb of ports */ 283 sizeof (emlxs_buf_t), /* fca pkt size */ 284 2048, /* fca cmd max */ 285 &emlxs_dma_lim, /* fca dma limits */ 286 0, /* fca iblock, to be filled in later */ 287 &emlxs_dma_attr, /* fca dma attributes */ 288 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 289 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 290 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 291 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 292 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 293 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 294 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 295 &emlxs_data_acc_attr, /* fca access atributes */ 296 emlxs_bind_port, 297 emlxs_unbind_port, 298 emlxs_pkt_init, 299 emlxs_pkt_uninit, 300 emlxs_transport, 301 emlxs_get_cap, 302 emlxs_set_cap, 303 emlxs_get_map, 304 emlxs_transport, 305 emlxs_ub_alloc, 306 emlxs_ub_free, 307 emlxs_ub_release, 308 emlxs_pkt_abort, 309 emlxs_reset, 310 emlxs_port_manage, 311 emlxs_get_device, 312 emlxs_notify 313 }; 314 #endif /* EMLXS_MODEREV4 */ 315 316 317 #if (EMLXS_MODREV == EMLXS_MODREV3) 318 static fc_fca_tran_t emlxs_fca_tran = { 319 FCTL_FCA_MODREV_3, /* fca_version */ 320 MAX_VPORTS, /* fca numerb of ports */ 321 sizeof (emlxs_buf_t), /* fca pkt size */ 322 2048, /* fca cmd max */ 323 &emlxs_dma_lim, /* fca dma limits */ 324 0, /* fca iblock, to be filled in later */ 325 &emlxs_dma_attr, /* fca dma attributes */ 326 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 327 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 328 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 329 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 330 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 331 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 332 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 333 &emlxs_data_acc_attr, /* fca access atributes */ 334 emlxs_bind_port, 335 emlxs_unbind_port, 336 emlxs_pkt_init, 337 emlxs_pkt_uninit, 338 emlxs_transport, 339 emlxs_get_cap, 340 emlxs_set_cap, 341 emlxs_get_map, 342 emlxs_transport, 343 emlxs_ub_alloc, 344 emlxs_ub_free, 345 emlxs_ub_release, 346 emlxs_pkt_abort, 347 emlxs_reset, 348 emlxs_port_manage, 349 emlxs_get_device, 350 emlxs_notify 351 }; 352 #endif /* EMLXS_MODREV3 */ 353 354 355 #if (EMLXS_MODREV == EMLXS_MODREV2) 356 static fc_fca_tran_t emlxs_fca_tran = { 357 FCTL_FCA_MODREV_2, /* fca_version */ 358 MAX_VPORTS, /* number of ports */ 359 sizeof (emlxs_buf_t), /* pkt size */ 360 2048, /* max cmds */ 361 &emlxs_dma_lim, /* DMA limits */ 362 0, /* iblock, to be filled in later */ 363 &emlxs_dma_attr, /* dma attributes */ 364 &emlxs_data_acc_attr, /* access atributes */ 365 emlxs_bind_port, 366 emlxs_unbind_port, 367 emlxs_pkt_init, 368 emlxs_pkt_uninit, 369 emlxs_transport, 370 emlxs_get_cap, 371 emlxs_set_cap, 372 emlxs_get_map, 373 emlxs_transport, 374 emlxs_ub_alloc, 375 emlxs_ub_free, 376 emlxs_ub_release, 377 emlxs_pkt_abort, 378 emlxs_reset, 379 emlxs_port_manage, 380 emlxs_get_device, 381 emlxs_notify 382 }; 383 #endif /* EMLXS_MODREV2 */ 384 385 /* 386 * This is needed when the module gets loaded by the kernel 387 * so ddi library calls get resolved. 388 */ 389 #ifndef MODSYM_SUPPORT 390 char _depends_on[] = "misc/fctl"; 391 #endif /* MODSYM_SUPPORT */ 392 393 /* 394 * state pointer which the implementation uses as a place to 395 * hang a set of per-driver structures; 396 * 397 */ 398 void *emlxs_soft_state = NULL; 399 400 /* 401 * Driver Global variables. 402 */ 403 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */ 404 405 emlxs_device_t emlxs_device; 406 407 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */ 408 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */ 409 410 411 /* 412 * Single private "global" lock used to gain access to 413 * the hba_list and/or any other case where we want need to be 414 * single-threaded. 415 */ 416 uint32_t emlxs_diag_state; 417 418 /* 419 * CB ops vector. Used for administration only. 420 */ 421 static struct cb_ops emlxs_cb_ops = { 422 emlxs_open, /* cb_open */ 423 emlxs_close, /* cb_close */ 424 nodev, /* cb_strategy */ 425 nodev, /* cb_print */ 426 nodev, /* cb_dump */ 427 nodev, /* cb_read */ 428 nodev, /* cb_write */ 429 emlxs_ioctl, /* cb_ioctl */ 430 nodev, /* cb_devmap */ 431 nodev, /* cb_mmap */ 432 nodev, /* cb_segmap */ 433 nochpoll, /* cb_chpoll */ 434 ddi_prop_op, /* cb_prop_op */ 435 0, /* cb_stream */ 436 #ifdef _LP64 437 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 438 #else 439 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 440 #endif 441 CB_REV, /* rev */ 442 nodev, /* cb_aread */ 443 nodev /* cb_awrite */ 444 }; 445 446 static struct dev_ops emlxs_ops = { 447 DEVO_REV, /* rev */ 448 0, /* refcnt */ 449 emlxs_info, /* getinfo */ 450 nulldev, /* identify */ 451 nulldev, /* probe */ 452 emlxs_attach, /* attach */ 453 emlxs_detach, /* detach */ 454 nodev, /* reset */ 455 &emlxs_cb_ops, /* devo_cb_ops */ 456 NULL, /* devo_bus_ops */ 457 emlxs_power, /* power ops */ 458 #ifdef EMLXS_I386 459 #ifdef S11 460 emlxs_quiesce, /* quiesce */ 461 #endif 462 #endif 463 }; 464 465 #include <sys/modctl.h> 466 extern struct mod_ops mod_driverops; 467 468 #ifdef SAN_DIAG_SUPPORT 469 extern kmutex_t sd_bucket_mutex; 470 extern sd_bucket_info_t sd_bucket; 471 #endif /* SAN_DIAG_SUPPORT */ 472 473 /* 474 * Module linkage information for the kernel. 475 */ 476 static struct modldrv emlxs_modldrv = { 477 &mod_driverops, /* module type - driver */ 478 emlxs_name, /* module name */ 479 &emlxs_ops, /* driver ops */ 480 }; 481 482 483 /* 484 * Driver module linkage structure 485 */ 486 static struct modlinkage emlxs_modlinkage = { 487 MODREV_1, /* ml_rev - must be MODREV_1 */ 488 &emlxs_modldrv, /* ml_linkage */ 489 NULL /* end of driver linkage */ 490 }; 491 492 493 /* We only need to add entries for non-default return codes. */ 494 /* Entries do not need to be in order. */ 495 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 496 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */ 497 498 emlxs_xlat_err_t emlxs_iostat_tbl[] = { 499 /* {f/w code, pkt_state, pkt_reason, */ 500 /* pkt_expln, pkt_action} */ 501 502 /* 0x00 - Do not remove */ 503 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE, 504 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 505 506 /* 0x01 - Do not remove */ 507 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE, 508 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 509 510 /* 0x02 */ 511 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS, 512 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE}, 513 514 /* 515 * This is a default entry. 516 * The real codes are written dynamically in emlxs_els.c 517 */ 518 /* 0x09 */ 519 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE, 520 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 521 522 /* Special error code */ 523 /* 0x10 */ 524 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 525 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 526 527 /* Special error code */ 528 /* 0x11 */ 529 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, 530 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 531 532 /* CLASS 2 only */ 533 /* 0x04 */ 534 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR, 535 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 536 537 /* CLASS 2 only */ 538 /* 0x05 */ 539 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR, 540 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 541 542 /* CLASS 2 only */ 543 /* 0x06 */ 544 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY, 545 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 546 547 /* CLASS 2 only */ 548 /* 0x07 */ 549 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY, 550 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 551 }; 552 553 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t)) 554 555 556 /* We only need to add entries for non-default return codes. */ 557 /* Entries do not need to be in order. */ 558 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 559 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */ 560 561 emlxs_xlat_err_t emlxs_ioerr_tbl[] = { 562 /* {f/w code, pkt_state, pkt_reason, */ 563 /* pkt_expln, pkt_action} */ 564 565 /* 0x01 */ 566 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 567 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 568 569 /* 0x02 */ 570 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 571 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 572 573 /* 0x04 */ 574 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 575 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 576 577 /* 0x05 */ 578 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 579 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 580 581 /* 0x06 */ 582 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 583 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 584 585 /* 0x07 */ 586 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 587 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 588 589 /* 0x08 */ 590 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 591 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 592 593 /* 0x0B */ 594 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 595 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 596 597 /* 0x0D */ 598 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 599 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 600 601 /* 0x0E */ 602 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 603 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 604 605 /* 0x0F */ 606 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME, 607 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 608 609 /* 0x11 */ 610 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 611 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 612 613 /* 0x13 */ 614 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH, 615 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 616 617 /* 0x14 */ 618 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED, 619 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 620 621 /* 0x15 */ 622 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 623 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 624 625 /* 0x16 */ 626 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 627 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 628 629 /* 0x17 */ 630 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT, 631 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 632 633 /* 0x18 */ 634 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL, 635 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 636 637 /* 0x1A */ 638 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 639 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 640 641 /* 0x21 */ 642 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID, 643 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 644 645 /* Occurs at link down */ 646 /* 0x28 */ 647 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 648 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 649 650 /* 0xF0 */ 651 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 652 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 653 }; 654 655 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t)) 656 657 658 659 emlxs_table_t emlxs_error_table[] = { 660 {IOERR_SUCCESS, "No error."}, 661 {IOERR_MISSING_CONTINUE, "Missing continue."}, 662 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."}, 663 {IOERR_INTERNAL_ERROR, "Internal error."}, 664 {IOERR_INVALID_RPI, "Invalid RPI."}, 665 {IOERR_NO_XRI, "No XRI."}, 666 {IOERR_ILLEGAL_COMMAND, "Illegal command."}, 667 {IOERR_XCHG_DROPPED, "Exchange dropped."}, 668 {IOERR_ILLEGAL_FIELD, "Illegal field."}, 669 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."}, 670 {IOERR_TX_DMA_FAILED, "TX DMA failed."}, 671 {IOERR_RX_DMA_FAILED, "RX DMA failed."}, 672 {IOERR_ILLEGAL_FRAME, "Illegal frame."}, 673 {IOERR_NO_RESOURCES, "No resources."}, 674 {IOERR_ILLEGAL_LENGTH, "Illegal length."}, 675 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."}, 676 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."}, 677 {IOERR_ABORT_REQUESTED, "Abort requested."}, 678 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."}, 679 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."}, 680 {IOERR_RING_RESET, "Ring reset."}, 681 {IOERR_LINK_DOWN, "Link down."}, 682 {IOERR_CORRUPTED_DATA, "Corrupted data."}, 683 {IOERR_CORRUPTED_RPI, "Corrupted RPI."}, 684 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."}, 685 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."}, 686 {IOERR_DUP_FRAME, "Duplicate frame."}, 687 {IOERR_LINK_CONTROL_FRAME, "Link control frame."}, 688 {IOERR_BAD_HOST_ADDRESS, "Bad host address."}, 689 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."}, 690 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."}, 691 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."}, 692 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."}, 693 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."}, 694 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"}, 695 {IOERR_XRIBUF_MISSING, "XRI buffer missing"}, 696 {IOERR_ROFFSET_INVAL, "Relative offset invalid."}, 697 {IOERR_ROFFSET_MISSING, "Relative offset missing."}, 698 {IOERR_INSUF_BUFFER, "Buffer too small."}, 699 {IOERR_MISSING_SI, "ELS frame missing SI"}, 700 {IOERR_MISSING_ES, "Exhausted burst without ES"}, 701 {IOERR_INCOMP_XFER, "Transfer incomplete."}, 702 {IOERR_ABORT_TIMEOUT, "Abort timeout."} 703 704 }; /* emlxs_error_table */ 705 706 707 emlxs_table_t emlxs_state_table[] = { 708 {IOSTAT_SUCCESS, "Success."}, 709 {IOSTAT_FCP_RSP_ERROR, "FCP response error."}, 710 {IOSTAT_REMOTE_STOP, "Remote stop."}, 711 {IOSTAT_LOCAL_REJECT, "Local reject."}, 712 {IOSTAT_NPORT_RJT, "NPort reject."}, 713 {IOSTAT_FABRIC_RJT, "Fabric reject."}, 714 {IOSTAT_NPORT_BSY, "Nport busy."}, 715 {IOSTAT_FABRIC_BSY, "Fabric busy."}, 716 {IOSTAT_INTERMED_RSP, "Intermediate response."}, 717 {IOSTAT_LS_RJT, "LS reject."}, 718 {IOSTAT_CMD_REJECT, "Cmd reject."}, 719 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."}, 720 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."}, 721 {IOSTAT_DATA_UNDERRUN, "Data underrun."}, 722 {IOSTAT_DATA_OVERRUN, "Data overrun."}, 723 724 }; /* emlxs_state_table */ 725 726 727 #ifdef MENLO_SUPPORT 728 emlxs_table_t emlxs_menlo_cmd_table[] = { 729 {MENLO_CMD_INITIALIZE, "MENLO_INIT"}, 730 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"}, 731 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"}, 732 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"}, 733 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"}, 734 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"}, 735 736 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"}, 737 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"}, 738 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"}, 739 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"}, 740 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"}, 741 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"}, 742 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"}, 743 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"}, 744 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"}, 745 746 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"}, 747 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"}, 748 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"}, 749 750 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"}, 751 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"}, 752 753 {MENLO_CMD_RESET, "MENLO_RESET"}, 754 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"} 755 756 }; /* emlxs_menlo_cmd_table */ 757 758 emlxs_table_t emlxs_menlo_rsp_table[] = { 759 {MENLO_RSP_SUCCESS, "SUCCESS"}, 760 {MENLO_ERR_FAILED, "FAILED"}, 761 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"}, 762 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"}, 763 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"}, 764 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"}, 765 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"}, 766 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"}, 767 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"}, 768 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"}, 769 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"}, 770 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"}, 771 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"}, 772 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"}, 773 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"}, 774 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"}, 775 {MENLO_ERR_BUSY, "BUSY"}, 776 777 }; /* emlxs_menlo_rsp_table */ 778 779 #endif /* MENLO_SUPPORT */ 780 781 782 emlxs_table_t emlxs_mscmd_table[] = { 783 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 784 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 785 {MS_GTIN, "MS_GTIN"}, 786 {MS_GIEL, "MS_GIEL"}, 787 {MS_GIET, "MS_GIET"}, 788 {MS_GDID, "MS_GDID"}, 789 {MS_GMID, "MS_GMID"}, 790 {MS_GFN, "MS_GFN"}, 791 {MS_GIELN, "MS_GIELN"}, 792 {MS_GMAL, "MS_GMAL"}, 793 {MS_GIEIL, "MS_GIEIL"}, 794 {MS_GPL, "MS_GPL"}, 795 {MS_GPT, "MS_GPT"}, 796 {MS_GPPN, "MS_GPPN"}, 797 {MS_GAPNL, "MS_GAPNL"}, 798 {MS_GPS, "MS_GPS"}, 799 {MS_GPSC, "MS_GPSC"}, 800 {MS_GATIN, "MS_GATIN"}, 801 {MS_GSES, "MS_GSES"}, 802 {MS_GPLNL, "MS_GPLNL"}, 803 {MS_GPLT, "MS_GPLT"}, 804 {MS_GPLML, "MS_GPLML"}, 805 {MS_GPAB, "MS_GPAB"}, 806 {MS_GNPL, "MS_GNPL"}, 807 {MS_GPNL, "MS_GPNL"}, 808 {MS_GPFCP, "MS_GPFCP"}, 809 {MS_GPLI, "MS_GPLI"}, 810 {MS_GNID, "MS_GNID"}, 811 {MS_RIELN, "MS_RIELN"}, 812 {MS_RPL, "MS_RPL"}, 813 {MS_RPLN, "MS_RPLN"}, 814 {MS_RPLT, "MS_RPLT"}, 815 {MS_RPLM, "MS_RPLM"}, 816 {MS_RPAB, "MS_RPAB"}, 817 {MS_RPFCP, "MS_RPFCP"}, 818 {MS_RPLI, "MS_RPLI"}, 819 {MS_DPL, "MS_DPL"}, 820 {MS_DPLN, "MS_DPLN"}, 821 {MS_DPLM, "MS_DPLM"}, 822 {MS_DPLML, "MS_DPLML"}, 823 {MS_DPLI, "MS_DPLI"}, 824 {MS_DPAB, "MS_DPAB"}, 825 {MS_DPALL, "MS_DPALL"} 826 827 }; /* emlxs_mscmd_table */ 828 829 830 emlxs_table_t emlxs_ctcmd_table[] = { 831 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 832 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 833 {SLI_CTNS_GA_NXT, "GA_NXT"}, 834 {SLI_CTNS_GPN_ID, "GPN_ID"}, 835 {SLI_CTNS_GNN_ID, "GNN_ID"}, 836 {SLI_CTNS_GCS_ID, "GCS_ID"}, 837 {SLI_CTNS_GFT_ID, "GFT_ID"}, 838 {SLI_CTNS_GSPN_ID, "GSPN_ID"}, 839 {SLI_CTNS_GPT_ID, "GPT_ID"}, 840 {SLI_CTNS_GID_PN, "GID_PN"}, 841 {SLI_CTNS_GID_NN, "GID_NN"}, 842 {SLI_CTNS_GIP_NN, "GIP_NN"}, 843 {SLI_CTNS_GIPA_NN, "GIPA_NN"}, 844 {SLI_CTNS_GSNN_NN, "GSNN_NN"}, 845 {SLI_CTNS_GNN_IP, "GNN_IP"}, 846 {SLI_CTNS_GIPA_IP, "GIPA_IP"}, 847 {SLI_CTNS_GID_FT, "GID_FT"}, 848 {SLI_CTNS_GID_PT, "GID_PT"}, 849 {SLI_CTNS_RPN_ID, "RPN_ID"}, 850 {SLI_CTNS_RNN_ID, "RNN_ID"}, 851 {SLI_CTNS_RCS_ID, "RCS_ID"}, 852 {SLI_CTNS_RFT_ID, "RFT_ID"}, 853 {SLI_CTNS_RSPN_ID, "RSPN_ID"}, 854 {SLI_CTNS_RPT_ID, "RPT_ID"}, 855 {SLI_CTNS_RIP_NN, "RIP_NN"}, 856 {SLI_CTNS_RIPA_NN, "RIPA_NN"}, 857 {SLI_CTNS_RSNN_NN, "RSNN_NN"}, 858 {SLI_CTNS_DA_ID, "DA_ID"}, 859 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 860 861 }; /* emlxs_ctcmd_table */ 862 863 864 865 emlxs_table_t emlxs_rmcmd_table[] = { 866 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 867 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 868 {CT_OP_GSAT, "RM_GSAT"}, 869 {CT_OP_GHAT, "RM_GHAT"}, 870 {CT_OP_GPAT, "RM_GPAT"}, 871 {CT_OP_GDAT, "RM_GDAT"}, 872 {CT_OP_GPST, "RM_GPST"}, 873 {CT_OP_GDP, "RM_GDP"}, 874 {CT_OP_GDPG, "RM_GDPG"}, 875 {CT_OP_GEPS, "RM_GEPS"}, 876 {CT_OP_GLAT, "RM_GLAT"}, 877 {CT_OP_SSAT, "RM_SSAT"}, 878 {CT_OP_SHAT, "RM_SHAT"}, 879 {CT_OP_SPAT, "RM_SPAT"}, 880 {CT_OP_SDAT, "RM_SDAT"}, 881 {CT_OP_SDP, "RM_SDP"}, 882 {CT_OP_SBBS, "RM_SBBS"}, 883 {CT_OP_RPST, "RM_RPST"}, 884 {CT_OP_VFW, "RM_VFW"}, 885 {CT_OP_DFW, "RM_DFW"}, 886 {CT_OP_RES, "RM_RES"}, 887 {CT_OP_RHD, "RM_RHD"}, 888 {CT_OP_UFW, "RM_UFW"}, 889 {CT_OP_RDP, "RM_RDP"}, 890 {CT_OP_GHDR, "RM_GHDR"}, 891 {CT_OP_CHD, "RM_CHD"}, 892 {CT_OP_SSR, "RM_SSR"}, 893 {CT_OP_RSAT, "RM_RSAT"}, 894 {CT_OP_WSAT, "RM_WSAT"}, 895 {CT_OP_RSAH, "RM_RSAH"}, 896 {CT_OP_WSAH, "RM_WSAH"}, 897 {CT_OP_RACT, "RM_RACT"}, 898 {CT_OP_WACT, "RM_WACT"}, 899 {CT_OP_RKT, "RM_RKT"}, 900 {CT_OP_WKT, "RM_WKT"}, 901 {CT_OP_SSC, "RM_SSC"}, 902 {CT_OP_QHBA, "RM_QHBA"}, 903 {CT_OP_GST, "RM_GST"}, 904 {CT_OP_GFTM, "RM_GFTM"}, 905 {CT_OP_SRL, "RM_SRL"}, 906 {CT_OP_SI, "RM_SI"}, 907 {CT_OP_SRC, "RM_SRC"}, 908 {CT_OP_GPB, "RM_GPB"}, 909 {CT_OP_SPB, "RM_SPB"}, 910 {CT_OP_RPB, "RM_RPB"}, 911 {CT_OP_RAPB, "RM_RAPB"}, 912 {CT_OP_GBC, "RM_GBC"}, 913 {CT_OP_GBS, "RM_GBS"}, 914 {CT_OP_SBS, "RM_SBS"}, 915 {CT_OP_GANI, "RM_GANI"}, 916 {CT_OP_GRV, "RM_GRV"}, 917 {CT_OP_GAPBS, "RM_GAPBS"}, 918 {CT_OP_APBC, "RM_APBC"}, 919 {CT_OP_GDT, "RM_GDT"}, 920 {CT_OP_GDLMI, "RM_GDLMI"}, 921 {CT_OP_GANA, "RM_GANA"}, 922 {CT_OP_GDLV, "RM_GDLV"}, 923 {CT_OP_GWUP, "RM_GWUP"}, 924 {CT_OP_GLM, "RM_GLM"}, 925 {CT_OP_GABS, "RM_GABS"}, 926 {CT_OP_SABS, "RM_SABS"}, 927 {CT_OP_RPR, "RM_RPR"}, 928 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 929 930 }; /* emlxs_rmcmd_table */ 931 932 933 emlxs_table_t emlxs_elscmd_table[] = { 934 {ELS_CMD_ACC, "ACC"}, 935 {ELS_CMD_LS_RJT, "LS_RJT"}, 936 {ELS_CMD_PLOGI, "PLOGI"}, 937 {ELS_CMD_FLOGI, "FLOGI"}, 938 {ELS_CMD_LOGO, "LOGO"}, 939 {ELS_CMD_ABTX, "ABTX"}, 940 {ELS_CMD_RCS, "RCS"}, 941 {ELS_CMD_RES, "RES"}, 942 {ELS_CMD_RSS, "RSS"}, 943 {ELS_CMD_RSI, "RSI"}, 944 {ELS_CMD_ESTS, "ESTS"}, 945 {ELS_CMD_ESTC, "ESTC"}, 946 {ELS_CMD_ADVC, "ADVC"}, 947 {ELS_CMD_RTV, "RTV"}, 948 {ELS_CMD_RLS, "RLS"}, 949 {ELS_CMD_ECHO, "ECHO"}, 950 {ELS_CMD_TEST, "TEST"}, 951 {ELS_CMD_RRQ, "RRQ"}, 952 {ELS_CMD_PRLI, "PRLI"}, 953 {ELS_CMD_PRLO, "PRLO"}, 954 {ELS_CMD_SCN, "SCN"}, 955 {ELS_CMD_TPLS, "TPLS"}, 956 {ELS_CMD_GPRLO, "GPRLO"}, 957 {ELS_CMD_GAID, "GAID"}, 958 {ELS_CMD_FACT, "FACT"}, 959 {ELS_CMD_FDACT, "FDACT"}, 960 {ELS_CMD_NACT, "NACT"}, 961 {ELS_CMD_NDACT, "NDACT"}, 962 {ELS_CMD_QoSR, "QoSR"}, 963 {ELS_CMD_RVCS, "RVCS"}, 964 {ELS_CMD_PDISC, "PDISC"}, 965 {ELS_CMD_FDISC, "FDISC"}, 966 {ELS_CMD_ADISC, "ADISC"}, 967 {ELS_CMD_FARP, "FARP"}, 968 {ELS_CMD_FARPR, "FARPR"}, 969 {ELS_CMD_FAN, "FAN"}, 970 {ELS_CMD_RSCN, "RSCN"}, 971 {ELS_CMD_SCR, "SCR"}, 972 {ELS_CMD_LINIT, "LINIT"}, 973 {ELS_CMD_RNID, "RNID"}, 974 {ELS_CMD_AUTH, "AUTH"} 975 976 }; /* emlxs_elscmd_table */ 977 978 979 /* 980 * 981 * Device Driver Entry Routines 982 * 983 */ 984 985 #ifdef MODSYM_SUPPORT 986 static void emlxs_fca_modclose(); 987 static int emlxs_fca_modopen(); 988 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */ 989 990 static int 991 emlxs_fca_modopen() 992 { 993 int err; 994 995 if (emlxs_modsym.mod_fctl) { 996 return (0); 997 } 998 999 /* Leadville (fctl) */ 1000 err = 0; 1001 emlxs_modsym.mod_fctl = 1002 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err); 1003 if (!emlxs_modsym.mod_fctl) { 1004 cmn_err(CE_WARN, 1005 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d", 1006 DRIVER_NAME, err); 1007 1008 goto failed; 1009 } 1010 1011 err = 0; 1012 /* Check if the fctl fc_fca_attach is present */ 1013 emlxs_modsym.fc_fca_attach = 1014 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", 1015 &err); 1016 if ((void *)emlxs_modsym.fc_fca_attach == NULL) { 1017 cmn_err(CE_WARN, 1018 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME); 1019 goto failed; 1020 } 1021 1022 err = 0; 1023 /* Check if the fctl fc_fca_detach is present */ 1024 emlxs_modsym.fc_fca_detach = 1025 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", 1026 &err); 1027 if ((void *)emlxs_modsym.fc_fca_detach == NULL) { 1028 cmn_err(CE_WARN, 1029 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME); 1030 goto failed; 1031 } 1032 1033 err = 0; 1034 /* Check if the fctl fc_fca_init is present */ 1035 emlxs_modsym.fc_fca_init = 1036 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err); 1037 if ((void *)emlxs_modsym.fc_fca_init == NULL) { 1038 cmn_err(CE_WARN, 1039 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME); 1040 goto failed; 1041 } 1042 1043 return (0); 1044 1045 failed: 1046 1047 emlxs_fca_modclose(); 1048 1049 return (1); 1050 1051 1052 } /* emlxs_fca_modopen() */ 1053 1054 1055 static void 1056 emlxs_fca_modclose() 1057 { 1058 if (emlxs_modsym.mod_fctl) { 1059 (void) ddi_modclose(emlxs_modsym.mod_fctl); 1060 emlxs_modsym.mod_fctl = 0; 1061 } 1062 1063 emlxs_modsym.fc_fca_attach = NULL; 1064 emlxs_modsym.fc_fca_detach = NULL; 1065 emlxs_modsym.fc_fca_init = NULL; 1066 1067 return; 1068 1069 } /* emlxs_fca_modclose() */ 1070 1071 #endif /* MODSYM_SUPPORT */ 1072 1073 1074 1075 /* 1076 * Global driver initialization, called once when driver is loaded 1077 */ 1078 int 1079 _init(void) 1080 { 1081 int ret; 1082 char buf[64]; 1083 1084 /* 1085 * First init call for this driver, 1086 * so initialize the emlxs_dev_ctl structure. 1087 */ 1088 bzero(&emlxs_device, sizeof (emlxs_device)); 1089 1090 #ifdef MODSYM_SUPPORT 1091 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t)); 1092 #endif /* MODSYM_SUPPORT */ 1093 1094 (void) sprintf(buf, "%s_device mutex", DRIVER_NAME); 1095 mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL); 1096 1097 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp); 1098 emlxs_device.drv_timestamp = ddi_get_time(); 1099 1100 for (ret = 0; ret < MAX_FC_BRDS; ret++) { 1101 emlxs_instance[ret] = (uint32_t)-1; 1102 } 1103 1104 /* 1105 * Provide for one ddiinst of the emlxs_dev_ctl structure 1106 * for each possible board in the system. 1107 */ 1108 if ((ret = ddi_soft_state_init(&emlxs_soft_state, 1109 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) { 1110 cmn_err(CE_WARN, 1111 "?%s: _init: ddi_soft_state_init failed. rval=%x", 1112 DRIVER_NAME, ret); 1113 1114 return (ret); 1115 } 1116 1117 #ifdef MODSYM_SUPPORT 1118 /* Open SFS */ 1119 (void) emlxs_fca_modopen(); 1120 #endif /* MODSYM_SUPPORT */ 1121 1122 /* Setup devops for SFS */ 1123 MODSYM(fc_fca_init)(&emlxs_ops); 1124 1125 if ((ret = mod_install(&emlxs_modlinkage)) != 0) { 1126 (void) ddi_soft_state_fini(&emlxs_soft_state); 1127 #ifdef MODSYM_SUPPORT 1128 /* Close SFS */ 1129 emlxs_fca_modclose(); 1130 #endif /* MODSYM_SUPPORT */ 1131 1132 return (ret); 1133 } 1134 1135 #ifdef SAN_DIAG_SUPPORT 1136 (void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME); 1137 mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL); 1138 #endif /* SAN_DIAG_SUPPORT */ 1139 1140 return (ret); 1141 1142 } /* _init() */ 1143 1144 1145 /* 1146 * Called when driver is unloaded. 1147 */ 1148 int 1149 _fini(void) 1150 { 1151 int ret; 1152 1153 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) { 1154 return (ret); 1155 } 1156 #ifdef MODSYM_SUPPORT 1157 /* Close SFS */ 1158 emlxs_fca_modclose(); 1159 #endif /* MODSYM_SUPPORT */ 1160 1161 /* 1162 * Destroy the soft state structure 1163 */ 1164 (void) ddi_soft_state_fini(&emlxs_soft_state); 1165 1166 /* Destroy the global device lock */ 1167 mutex_destroy(&emlxs_device.lock); 1168 1169 #ifdef SAN_DIAG_SUPPORT 1170 mutex_destroy(&sd_bucket_mutex); 1171 #endif /* SAN_DIAG_SUPPORT */ 1172 1173 return (ret); 1174 1175 } /* _fini() */ 1176 1177 1178 1179 int 1180 _info(struct modinfo *modinfop) 1181 { 1182 1183 return (mod_info(&emlxs_modlinkage, modinfop)); 1184 1185 } /* _info() */ 1186 1187 1188 /* 1189 * Attach an ddiinst of an emlx host adapter. 1190 * Allocate data structures, initialize the adapter and we're ready to fly. 1191 */ 1192 static int 1193 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1194 { 1195 emlxs_hba_t *hba; 1196 int ddiinst; 1197 int emlxinst; 1198 int rval; 1199 1200 switch (cmd) { 1201 case DDI_ATTACH: 1202 /* If successful this will set EMLXS_PM_IN_ATTACH */ 1203 rval = emlxs_hba_attach(dip); 1204 break; 1205 1206 case DDI_PM_RESUME: 1207 /* This will resume the driver */ 1208 rval = emlxs_pm_raise_power(dip); 1209 break; 1210 1211 case DDI_RESUME: 1212 /* This will resume the driver */ 1213 rval = emlxs_hba_resume(dip); 1214 break; 1215 1216 default: 1217 rval = DDI_FAILURE; 1218 } 1219 1220 if (rval == DDI_SUCCESS) { 1221 ddiinst = ddi_get_instance(dip); 1222 emlxinst = emlxs_get_instance(ddiinst); 1223 hba = emlxs_device.hba[emlxinst]; 1224 1225 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) { 1226 1227 /* Enable driver dump feature */ 1228 mutex_enter(&EMLXS_PORT_LOCK); 1229 hba->flag |= FC_DUMP_SAFE; 1230 mutex_exit(&EMLXS_PORT_LOCK); 1231 } 1232 } 1233 1234 return (rval); 1235 1236 } /* emlxs_attach() */ 1237 1238 1239 /* 1240 * Detach/prepare driver to unload (see detach(9E)). 1241 */ 1242 static int 1243 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1244 { 1245 emlxs_hba_t *hba; 1246 emlxs_port_t *port; 1247 int ddiinst; 1248 int emlxinst; 1249 int rval; 1250 1251 ddiinst = ddi_get_instance(dip); 1252 emlxinst = emlxs_get_instance(ddiinst); 1253 hba = emlxs_device.hba[emlxinst]; 1254 1255 if (hba == NULL) { 1256 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME); 1257 1258 return (DDI_FAILURE); 1259 } 1260 1261 if (hba == (emlxs_hba_t *)-1) { 1262 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.", 1263 DRIVER_NAME); 1264 1265 return (DDI_FAILURE); 1266 } 1267 1268 port = &PPORT; 1269 rval = DDI_SUCCESS; 1270 1271 /* Check driver dump */ 1272 mutex_enter(&EMLXS_PORT_LOCK); 1273 1274 if (hba->flag & FC_DUMP_ACTIVE) { 1275 mutex_exit(&EMLXS_PORT_LOCK); 1276 1277 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1278 "emlxs_detach: Driver busy. Driver dump active."); 1279 1280 return (DDI_FAILURE); 1281 } 1282 1283 #ifdef SFCT_SUPPORT 1284 if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) || 1285 (port->fct_flags & FCT_STATE_NOT_ACKED))) { 1286 mutex_exit(&EMLXS_PORT_LOCK); 1287 1288 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1289 "emlxs_detach: Driver busy. Target mode active."); 1290 1291 return (DDI_FAILURE); 1292 } 1293 #endif /* SFCT_SUPPORT */ 1294 1295 if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) { 1296 mutex_exit(&EMLXS_PORT_LOCK); 1297 1298 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1299 "emlxs_detach: Driver busy. Initiator mode active."); 1300 1301 return (DDI_FAILURE); 1302 } 1303 1304 hba->flag &= ~FC_DUMP_SAFE; 1305 1306 mutex_exit(&EMLXS_PORT_LOCK); 1307 1308 switch (cmd) { 1309 case DDI_DETACH: 1310 1311 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1312 "DDI_DETACH"); 1313 1314 rval = emlxs_hba_detach(dip); 1315 1316 if (rval != DDI_SUCCESS) { 1317 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1318 "Unable to detach."); 1319 } 1320 break; 1321 1322 1323 case DDI_PM_SUSPEND: 1324 1325 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1326 "DDI_PM_SUSPEND"); 1327 1328 /* This will suspend the driver */ 1329 rval = emlxs_pm_lower_power(dip); 1330 1331 if (rval != DDI_SUCCESS) { 1332 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1333 "Unable to lower power."); 1334 } 1335 1336 break; 1337 1338 1339 case DDI_SUSPEND: 1340 1341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1342 "DDI_SUSPEND"); 1343 1344 /* Suspend the driver */ 1345 rval = emlxs_hba_suspend(dip); 1346 1347 if (rval != DDI_SUCCESS) { 1348 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1349 "Unable to suspend driver."); 1350 } 1351 break; 1352 1353 1354 default: 1355 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x", 1356 DRIVER_NAME, cmd); 1357 rval = DDI_FAILURE; 1358 } 1359 1360 if (rval == DDI_FAILURE) { 1361 /* Re-Enable driver dump feature */ 1362 mutex_enter(&EMLXS_PORT_LOCK); 1363 hba->flag |= FC_DUMP_SAFE; 1364 mutex_exit(&EMLXS_PORT_LOCK); 1365 } 1366 1367 return (rval); 1368 1369 } /* emlxs_detach() */ 1370 1371 1372 /* EMLXS_PORT_LOCK must be held when calling this */ 1373 extern void 1374 emlxs_port_init(emlxs_port_t *port) 1375 { 1376 emlxs_hba_t *hba = HBA; 1377 1378 /* Initialize the base node */ 1379 bzero((caddr_t)&port->node_base, sizeof (NODELIST)); 1380 port->node_base.nlp_Rpi = 0; 1381 port->node_base.nlp_DID = 0xffffff; 1382 port->node_base.nlp_list_next = NULL; 1383 port->node_base.nlp_list_prev = NULL; 1384 port->node_base.nlp_active = 1; 1385 port->node_base.nlp_base = 1; 1386 port->node_count = 0; 1387 1388 if (!(port->flag & EMLXS_PORT_ENABLE)) { 1389 uint8_t dummy_wwn[8] = 1390 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1391 1392 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn, 1393 sizeof (NAME_TYPE)); 1394 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn, 1395 sizeof (NAME_TYPE)); 1396 } 1397 1398 if (!(port->flag & EMLXS_PORT_CONFIG)) { 1399 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256); 1400 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256); 1401 } 1402 1403 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam, 1404 sizeof (SERV_PARM)); 1405 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName, 1406 sizeof (NAME_TYPE)); 1407 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName, 1408 sizeof (NAME_TYPE)); 1409 1410 return; 1411 1412 } /* emlxs_port_init() */ 1413 1414 1415 void 1416 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba) 1417 { 1418 #define NXT_PTR_OFF PCI_BYTE 1419 #define PCIE_DEVCTL_OFF 0x8 1420 #define PCIE_CAP_ID 0x10 1421 1422 uint8_t cap_ptr; 1423 uint8_t cap_id; 1424 uint16_t tmp16; 1425 1426 cap_ptr = ddi_get8(hba->pci_acc_handle, 1427 (uint8_t *)(hba->pci_addr + PCI_CAP_POINTER)); 1428 1429 while (cap_ptr) { 1430 cap_id = ddi_get8(hba->pci_acc_handle, 1431 (uint8_t *)(hba->pci_addr + cap_ptr)); 1432 1433 if (cap_id == PCIE_CAP_ID) { 1434 break; 1435 } 1436 cap_ptr = ddi_get8(hba->pci_acc_handle, 1437 (uint8_t *)(hba->pci_addr + cap_ptr + NXT_PTR_OFF)); 1438 } 1439 1440 /* PCI Express Capability Register Set */ 1441 /* Turn off the Correctable Error Reporting */ 1442 /* (the Device Control Register, bit 0). */ 1443 1444 if (cap_id == PCIE_CAP_ID) { 1445 tmp16 = ddi_get16(hba->pci_acc_handle, 1446 (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF)); 1447 tmp16 &= ~1; 1448 (void) ddi_put16(hba->pci_acc_handle, 1449 (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF), 1450 tmp16); 1451 } 1452 } 1453 1454 /* 1455 * emlxs_bind_port 1456 * 1457 * Arguments: 1458 * 1459 * dip: the dev_info pointer for the ddiinst 1460 * port_info: pointer to info handed back to the transport 1461 * bind_info: pointer to info from the transport 1462 * 1463 * Return values: a port handle for this port, NULL for failure 1464 * 1465 */ 1466 static opaque_t 1467 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info, 1468 fc_fca_bind_info_t *bind_info) 1469 { 1470 emlxs_hba_t *hba; 1471 emlxs_port_t *port; 1472 emlxs_port_t *vport; 1473 int ddiinst; 1474 emlxs_vpd_t *vpd; 1475 emlxs_config_t *cfg; 1476 char *dptr; 1477 char buffer[16]; 1478 uint32_t length; 1479 uint32_t len; 1480 char topology[32]; 1481 char linkspeed[32]; 1482 1483 ddiinst = ddi_get_instance(dip); 1484 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 1485 port = &PPORT; 1486 1487 ddiinst = hba->ddiinst; 1488 vpd = &VPD; 1489 cfg = &CFG; 1490 1491 mutex_enter(&EMLXS_PORT_LOCK); 1492 1493 if (bind_info->port_num > 0) { 1494 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1495 if (!(hba->flag & FC_NPIV_ENABLED) || 1496 !(bind_info->port_npiv) || 1497 (bind_info->port_num > hba->vpi_max)) 1498 #elif (EMLXS_MODREV >= EMLXS_MODREV3) 1499 if (!(hba->flag & FC_NPIV_ENABLED) || 1500 (bind_info->port_num > hba->vpi_high)) 1501 #endif 1502 { 1503 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1504 "emlxs_port_bind: Port %d not supported.", 1505 bind_info->port_num); 1506 1507 mutex_exit(&EMLXS_PORT_LOCK); 1508 1509 port_info->pi_error = FC_OUTOFBOUNDS; 1510 return (NULL); 1511 } 1512 } 1513 1514 /* Get true port pointer */ 1515 port = &VPORT(bind_info->port_num); 1516 1517 if (port->tgt_mode) { 1518 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1519 "emlxs_port_bind: Port %d is in target mode.", 1520 bind_info->port_num); 1521 1522 mutex_exit(&EMLXS_PORT_LOCK); 1523 1524 port_info->pi_error = FC_OUTOFBOUNDS; 1525 return (NULL); 1526 } 1527 1528 if (!port->ini_mode) { 1529 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1530 "emlxs_port_bind: Port %d is not in initiator mode.", 1531 bind_info->port_num); 1532 1533 mutex_exit(&EMLXS_PORT_LOCK); 1534 1535 port_info->pi_error = FC_OUTOFBOUNDS; 1536 return (NULL); 1537 } 1538 1539 /* Make sure the port is not already bound to the transport */ 1540 if (port->flag & EMLXS_PORT_BOUND) { 1541 1542 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1543 "emlxs_port_bind: Port %d already bound. flag=%x", 1544 bind_info->port_num, port->flag); 1545 1546 mutex_exit(&EMLXS_PORT_LOCK); 1547 1548 port_info->pi_error = FC_ALREADY; 1549 return (NULL); 1550 } 1551 1552 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1553 "fca_bind_port: Port %d: port_info=%p bind_info=%p", 1554 bind_info->port_num, port_info, bind_info); 1555 1556 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1557 if (bind_info->port_npiv) { 1558 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn, 1559 sizeof (NAME_TYPE)); 1560 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn, 1561 sizeof (NAME_TYPE)); 1562 if (port->snn[0] == 0) { 1563 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 1564 256); 1565 } 1566 1567 if (port->spn[0] == 0) { 1568 (void) sprintf((caddr_t)port->spn, "%s VPort-%d", 1569 (caddr_t)hba->spn, port->vpi); 1570 } 1571 port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 1572 } 1573 #endif /* >= EMLXS_MODREV5 */ 1574 1575 /* 1576 * Restricted login should apply both physical and 1577 * virtual ports. 1578 */ 1579 if (cfg[CFG_VPORT_RESTRICTED].current) { 1580 port->flag |= EMLXS_PORT_RESTRICTED; 1581 } 1582 1583 /* Perform generic port initialization */ 1584 emlxs_port_init(port); 1585 1586 /* Perform SFS specific initialization */ 1587 port->ulp_handle = bind_info->port_handle; 1588 port->ulp_statec_cb = bind_info->port_statec_cb; 1589 port->ulp_unsol_cb = bind_info->port_unsol_cb; 1590 port->ub_count = EMLXS_UB_TOKEN_OFFSET; 1591 port->ub_pool = NULL; 1592 1593 /* Update the port info structure */ 1594 1595 /* Set the topology and state */ 1596 if ((hba->state < FC_LINK_UP) || 1597 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) || 1598 !(hba->flag & FC_NPIV_SUPPORTED)))) { 1599 port_info->pi_port_state = FC_STATE_OFFLINE; 1600 port_info->pi_topology = FC_TOP_UNKNOWN; 1601 } 1602 #ifdef MENLO_SUPPORT 1603 else if (hba->flag & FC_MENLO_MODE) { 1604 port_info->pi_port_state = FC_STATE_OFFLINE; 1605 port_info->pi_topology = FC_TOP_UNKNOWN; 1606 } 1607 #endif /* MENLO_SUPPORT */ 1608 else { 1609 /* Check for loop topology */ 1610 if (hba->topology == TOPOLOGY_LOOP) { 1611 port_info->pi_port_state = FC_STATE_LOOP; 1612 (void) strcpy(topology, ", loop"); 1613 1614 if (hba->flag & FC_FABRIC_ATTACHED) { 1615 port_info->pi_topology = FC_TOP_PUBLIC_LOOP; 1616 } else { 1617 port_info->pi_topology = FC_TOP_PRIVATE_LOOP; 1618 } 1619 } else { 1620 port_info->pi_topology = FC_TOP_FABRIC; 1621 port_info->pi_port_state = FC_STATE_ONLINE; 1622 (void) strcpy(topology, ", fabric"); 1623 } 1624 1625 /* Set the link speed */ 1626 switch (hba->linkspeed) { 1627 case 0: 1628 (void) strcpy(linkspeed, "Gb"); 1629 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1630 break; 1631 1632 case LA_1GHZ_LINK: 1633 (void) strcpy(linkspeed, "1Gb"); 1634 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1635 break; 1636 case LA_2GHZ_LINK: 1637 (void) strcpy(linkspeed, "2Gb"); 1638 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED; 1639 break; 1640 case LA_4GHZ_LINK: 1641 (void) strcpy(linkspeed, "4Gb"); 1642 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED; 1643 break; 1644 case LA_8GHZ_LINK: 1645 (void) strcpy(linkspeed, "8Gb"); 1646 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED; 1647 break; 1648 case LA_10GHZ_LINK: 1649 (void) strcpy(linkspeed, "10Gb"); 1650 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED; 1651 break; 1652 default: 1653 (void) sprintf(linkspeed, "unknown(0x%x)", 1654 hba->linkspeed); 1655 break; 1656 } 1657 1658 /* Adjusting port context for link up messages */ 1659 vport = port; 1660 port = &PPORT; 1661 if (vport->vpi == 0) { 1662 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s", 1663 linkspeed, topology); 1664 } else if (!(hba->flag & FC_NPIV_LINKUP)) { 1665 hba->flag |= FC_NPIV_LINKUP; 1666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg, 1667 "%s%s", linkspeed, topology); 1668 } 1669 port = vport; 1670 1671 } 1672 1673 /* PCIE Correctable Error Reporting workaround */ 1674 if ((hba->model_info.chip == EMLXS_BE_CHIP) && 1675 (bind_info->port_num == 0)) { 1676 emlxs_disable_pcie_ce_err(hba); 1677 } 1678 1679 /* Save initial state */ 1680 port->ulp_statec = port_info->pi_port_state; 1681 1682 /* 1683 * The transport needs a copy of the common service parameters 1684 * for this port. The transport can get any updates through 1685 * the getcap entry point. 1686 */ 1687 bcopy((void *) &port->sparam, 1688 (void *) &port_info->pi_login_params.common_service, 1689 sizeof (SERV_PARM)); 1690 1691 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 1692 /* Swap the service parameters for ULP */ 1693 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params. 1694 common_service); 1695 #endif /* EMLXS_MODREV2X */ 1696 1697 port_info->pi_login_params.common_service.btob_credit = 0xffff; 1698 1699 bcopy((void *) &port->wwnn, 1700 (void *) &port_info->pi_login_params.node_ww_name, 1701 sizeof (NAME_TYPE)); 1702 1703 bcopy((void *) &port->wwpn, 1704 (void *) &port_info->pi_login_params.nport_ww_name, 1705 sizeof (NAME_TYPE)); 1706 1707 /* 1708 * We need to turn off CLASS2 support. 1709 * Otherwise, FC transport will use CLASS2 as default class 1710 * and never try with CLASS3. 1711 */ 1712 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1713 #if (EMLXS_MODREVX >= EMLXS_MODREV3X) 1714 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) { 1715 port_info->pi_login_params.class_1.class_opt &= ~0x0080; 1716 } 1717 1718 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) { 1719 port_info->pi_login_params.class_2.class_opt &= ~0x0080; 1720 } 1721 #else /* EMLXS_SPARC or EMLXS_MODREV2X */ 1722 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) { 1723 port_info->pi_login_params.class_1.class_opt &= ~0x8000; 1724 } 1725 1726 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) { 1727 port_info->pi_login_params.class_2.class_opt &= ~0x8000; 1728 } 1729 #endif /* >= EMLXS_MODREV3X */ 1730 #endif /* >= EMLXS_MODREV3 */ 1731 1732 1733 #if (EMLXS_MODREV <= EMLXS_MODREV2) 1734 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) { 1735 port_info->pi_login_params.class_1.data[0] &= ~0x80; 1736 } 1737 1738 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) { 1739 port_info->pi_login_params.class_2.data[0] &= ~0x80; 1740 } 1741 #endif /* <= EMLXS_MODREV2 */ 1742 1743 /* Additional parameters */ 1744 port_info->pi_s_id.port_id = port->did; 1745 port_info->pi_s_id.priv_lilp_posit = 0; 1746 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current; 1747 1748 /* Initialize the RNID parameters */ 1749 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params)); 1750 1751 (void) sprintf((char *)port_info->pi_rnid_params.params.global_id, 1752 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType, 1753 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 1754 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 1755 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 1756 1757 port_info->pi_rnid_params.params.unit_type = RNID_HBA; 1758 port_info->pi_rnid_params.params.port_id = port->did; 1759 port_info->pi_rnid_params.params.ip_version = RNID_IPV4; 1760 1761 /* Initialize the port attributes */ 1762 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs)); 1763 1764 (void) strcpy(port_info->pi_attrs.manufacturer, "Emulex"); 1765 1766 port_info->pi_rnid_params.status = FC_SUCCESS; 1767 1768 (void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num); 1769 1770 (void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)", 1771 vpd->fw_version, vpd->fw_label); 1772 1773 #ifdef EMLXS_I386 1774 (void) sprintf(port_info->pi_attrs.option_rom_version, 1775 "Boot:%s", vpd->boot_version); 1776 #else /* EMLXS_SPARC */ 1777 (void) sprintf(port_info->pi_attrs.option_rom_version, 1778 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version); 1779 #endif /* EMLXS_I386 */ 1780 1781 1782 (void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)", 1783 emlxs_version, emlxs_revision); 1784 1785 (void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME); 1786 1787 port_info->pi_attrs.vendor_specific_id = 1788 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX); 1789 1790 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3); 1791 1792 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE; 1793 1794 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1795 1796 port_info->pi_rnid_params.params.num_attached = 0; 1797 1798 /* 1799 * Copy the serial number string (right most 16 chars) into the right 1800 * justified local buffer 1801 */ 1802 bzero(buffer, sizeof (buffer)); 1803 length = strlen(vpd->serial_num); 1804 len = (length > 16) ? 16 : length; 1805 bcopy(&vpd->serial_num[(length - len)], 1806 &buffer[(sizeof (buffer) - len)], len); 1807 1808 port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index; 1809 1810 #endif /* >= EMLXS_MODREV5 */ 1811 1812 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4)) 1813 1814 port_info->pi_rnid_params.params.num_attached = 0; 1815 1816 if (hba->flag & FC_NPIV_ENABLED) { 1817 uint8_t byte; 1818 uint8_t *wwpn; 1819 uint32_t i; 1820 uint32_t j; 1821 1822 /* Copy the WWPN as a string into the local buffer */ 1823 wwpn = (uint8_t *)&hba->wwpn; 1824 for (i = 0; i < 16; i++) { 1825 byte = *wwpn++; 1826 j = ((byte & 0xf0) >> 4); 1827 if (j <= 9) { 1828 buffer[i] = 1829 (char)((uint8_t)'0' + (uint8_t)j); 1830 } else { 1831 buffer[i] = 1832 (char)((uint8_t)'A' + (uint8_t)(j - 1833 10)); 1834 } 1835 1836 i++; 1837 j = (byte & 0xf); 1838 if (j <= 9) { 1839 buffer[i] = 1840 (char)((uint8_t)'0' + (uint8_t)j); 1841 } else { 1842 buffer[i] = 1843 (char)((uint8_t)'A' + (uint8_t)(j - 1844 10)); 1845 } 1846 } 1847 1848 port_info->pi_attrs.hba_fru_details.port_index = port->vpi; 1849 } else { 1850 /* Copy the serial number string (right most 16 chars) */ 1851 /* into the right justified local buffer */ 1852 bzero(buffer, sizeof (buffer)); 1853 length = strlen(vpd->serial_num); 1854 len = (length > 16) ? 16 : length; 1855 bcopy(&vpd->serial_num[(length - len)], 1856 &buffer[(sizeof (buffer) - len)], len); 1857 1858 port_info->pi_attrs.hba_fru_details.port_index = 1859 vpd->port_index; 1860 } 1861 1862 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */ 1863 1864 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1865 1866 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high; 1867 dptr[0] = buffer[0]; 1868 dptr[1] = buffer[1]; 1869 dptr[2] = buffer[2]; 1870 dptr[3] = buffer[3]; 1871 dptr[4] = buffer[4]; 1872 dptr[5] = buffer[5]; 1873 dptr[6] = buffer[6]; 1874 dptr[7] = buffer[7]; 1875 port_info->pi_attrs.hba_fru_details.high = 1876 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high); 1877 1878 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low; 1879 dptr[0] = buffer[8]; 1880 dptr[1] = buffer[9]; 1881 dptr[2] = buffer[10]; 1882 dptr[3] = buffer[11]; 1883 dptr[4] = buffer[12]; 1884 dptr[5] = buffer[13]; 1885 dptr[6] = buffer[14]; 1886 dptr[7] = buffer[15]; 1887 port_info->pi_attrs.hba_fru_details.low = 1888 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low); 1889 1890 #endif /* >= EMLXS_MODREV3 */ 1891 1892 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1893 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name, 1894 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN); 1895 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name, 1896 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN); 1897 #endif /* >= EMLXS_MODREV4 */ 1898 1899 (void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev); 1900 1901 /* Set the hba speed limit */ 1902 if (vpd->link_speed & LMT_10GB_CAPABLE) { 1903 port_info->pi_attrs.supported_speed |= 1904 FC_HBA_PORTSPEED_10GBIT; 1905 } 1906 if (vpd->link_speed & LMT_8GB_CAPABLE) { 1907 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT; 1908 } 1909 if (vpd->link_speed & LMT_4GB_CAPABLE) { 1910 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT; 1911 } 1912 if (vpd->link_speed & LMT_2GB_CAPABLE) { 1913 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT; 1914 } 1915 if (vpd->link_speed & LMT_1GB_CAPABLE) { 1916 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT; 1917 } 1918 1919 /* Set the hba model info */ 1920 (void) strcpy(port_info->pi_attrs.model, hba->model_info.model); 1921 (void) strcpy(port_info->pi_attrs.model_description, 1922 hba->model_info.model_desc); 1923 1924 1925 /* Log information */ 1926 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1927 "Bind info: port_num = %d", bind_info->port_num); 1928 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1929 "Bind info: port_handle = %p", bind_info->port_handle); 1930 1931 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1932 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1933 "Bind info: port_npiv = %d", bind_info->port_npiv); 1934 #endif /* >= EMLXS_MODREV5 */ 1935 1936 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1937 "Port info: pi_topology = %x", port_info->pi_topology); 1938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1939 "Port info: pi_error = %x", port_info->pi_error); 1940 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1941 "Port info: pi_port_state = %x", port_info->pi_port_state); 1942 1943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1944 "Port info: port_id = %x", port_info->pi_s_id.port_id); 1945 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1946 "Port info: priv_lilp_posit = %x", 1947 port_info->pi_s_id.priv_lilp_posit); 1948 1949 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1950 "Port info: hard_addr = %x", 1951 port_info->pi_hard_addr.hard_addr); 1952 1953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1954 "Port info: rnid.status = %x", 1955 port_info->pi_rnid_params.status); 1956 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1957 "Port info: rnid.global_id = %16s", 1958 port_info->pi_rnid_params.params.global_id); 1959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1960 "Port info: rnid.unit_type = %x", 1961 port_info->pi_rnid_params.params.unit_type); 1962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1963 "Port info: rnid.port_id = %x", 1964 port_info->pi_rnid_params.params.port_id); 1965 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1966 "Port info: rnid.num_attached = %x", 1967 port_info->pi_rnid_params.params.num_attached); 1968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1969 "Port info: rnid.ip_version = %x", 1970 port_info->pi_rnid_params.params.ip_version); 1971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1972 "Port info: rnid.udp_port = %x", 1973 port_info->pi_rnid_params.params.udp_port); 1974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1975 "Port info: rnid.ip_addr = %16s", 1976 port_info->pi_rnid_params.params.ip_addr); 1977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1978 "Port info: rnid.spec_id_resv = %x", 1979 port_info->pi_rnid_params.params.specific_id_resv); 1980 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1981 "Port info: rnid.topo_flags = %x", 1982 port_info->pi_rnid_params.params.topo_flags); 1983 1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1985 "Port info: manufacturer = %s", 1986 port_info->pi_attrs.manufacturer); 1987 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1988 "Port info: serial_num = %s", 1989 port_info->pi_attrs.serial_number); 1990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1991 "Port info: model = %s", port_info->pi_attrs.model); 1992 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1993 "Port info: model_description = %s", 1994 port_info->pi_attrs.model_description); 1995 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1996 "Port info: hardware_version = %s", 1997 port_info->pi_attrs.hardware_version); 1998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1999 "Port info: driver_version = %s", 2000 port_info->pi_attrs.driver_version); 2001 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2002 "Port info: option_rom_version = %s", 2003 port_info->pi_attrs.option_rom_version); 2004 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2005 "Port info: firmware_version = %s", 2006 port_info->pi_attrs.firmware_version); 2007 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2008 "Port info: driver_name = %s", 2009 port_info->pi_attrs.driver_name); 2010 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2011 "Port info: vendor_specific_id = %x", 2012 port_info->pi_attrs.vendor_specific_id); 2013 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2014 "Port info: supported_cos = %x", 2015 port_info->pi_attrs.supported_cos); 2016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2017 "Port info: supported_speed = %x", 2018 port_info->pi_attrs.supported_speed); 2019 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2020 "Port info: max_frame_size = %x", 2021 port_info->pi_attrs.max_frame_size); 2022 2023 #if (EMLXS_MODREV >= EMLXS_MODREV3) 2024 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2025 "Port info: fru_port_index = %x", 2026 port_info->pi_attrs.hba_fru_details.port_index); 2027 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2028 "Port info: fru_high = %llx", 2029 port_info->pi_attrs.hba_fru_details.high); 2030 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2031 "Port info: fru_low = %llx", 2032 port_info->pi_attrs.hba_fru_details.low); 2033 #endif /* >= EMLXS_MODREV3 */ 2034 2035 #if (EMLXS_MODREV >= EMLXS_MODREV4) 2036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2037 "Port info: sym_node_name = %s", 2038 port_info->pi_attrs.sym_node_name); 2039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2040 "Port info: sym_port_name = %s", 2041 port_info->pi_attrs.sym_port_name); 2042 #endif /* >= EMLXS_MODREV4 */ 2043 2044 /* Set the bound flag */ 2045 port->flag |= EMLXS_PORT_BOUND; 2046 hba->num_of_ports++; 2047 2048 mutex_exit(&EMLXS_PORT_LOCK); 2049 2050 return ((opaque_t)port); 2051 2052 } /* emlxs_bind_port() */ 2053 2054 2055 static void 2056 emlxs_unbind_port(opaque_t fca_port_handle) 2057 { 2058 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2059 emlxs_hba_t *hba = HBA; 2060 2061 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2062 "fca_unbind_port: port=%p", port); 2063 2064 /* Destroy & flush all port nodes, if they exist */ 2065 if (port->node_count) { 2066 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 2067 (void) emlxs_sli4_unreg_all_rpi_by_port(port); 2068 } else { 2069 (void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0); 2070 } 2071 } 2072 2073 #if (EMLXS_MODREV >= EMLXS_MODREV5) 2074 if ((hba->flag & FC_NPIV_ENABLED) && 2075 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) { 2076 (void) emlxs_mb_unreg_vpi(port); 2077 } 2078 #endif 2079 2080 mutex_enter(&EMLXS_PORT_LOCK); 2081 2082 if (!(port->flag & EMLXS_PORT_BOUND)) { 2083 mutex_exit(&EMLXS_PORT_LOCK); 2084 return; 2085 } 2086 2087 port->flag &= ~EMLXS_PORT_BOUND; 2088 hba->num_of_ports--; 2089 2090 port->ulp_handle = 0; 2091 port->ulp_statec = FC_STATE_OFFLINE; 2092 port->ulp_statec_cb = NULL; 2093 port->ulp_unsol_cb = NULL; 2094 2095 mutex_exit(&EMLXS_PORT_LOCK); 2096 2097 return; 2098 2099 } /* emlxs_unbind_port() */ 2100 2101 2102 /*ARGSUSED*/ 2103 extern int 2104 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 2105 { 2106 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2107 emlxs_hba_t *hba = HBA; 2108 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 2109 2110 if (!sbp) { 2111 return (FC_FAILURE); 2112 } 2113 bzero((void *)sbp, sizeof (emlxs_buf_t)); 2114 2115 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg); 2116 sbp->pkt_flags = 2117 PACKET_VALID | PACKET_ULP_OWNED; 2118 sbp->port = port; 2119 sbp->pkt = pkt; 2120 sbp->iocbq.sbp = sbp; 2121 2122 return (FC_SUCCESS); 2123 2124 } /* emlxs_pkt_init() */ 2125 2126 2127 2128 static void 2129 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp) 2130 { 2131 emlxs_hba_t *hba = HBA; 2132 emlxs_config_t *cfg = &CFG; 2133 fc_packet_t *pkt = PRIV2PKT(sbp); 2134 uint32_t *iptr; 2135 2136 mutex_enter(&sbp->mtx); 2137 2138 /* Reinitialize */ 2139 sbp->pkt = pkt; 2140 sbp->port = port; 2141 sbp->bmp = NULL; 2142 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED); 2143 sbp->iotag = 0; 2144 sbp->ticks = 0; 2145 sbp->abort_attempts = 0; 2146 sbp->fpkt = NULL; 2147 sbp->flush_count = 0; 2148 sbp->next = NULL; 2149 2150 if (!port->tgt_mode) { 2151 sbp->node = NULL; 2152 sbp->did = 0; 2153 sbp->lun = 0; 2154 sbp->class = 0; 2155 sbp->class = 0; 2156 sbp->channel = NULL; 2157 } 2158 2159 bzero((void *)&sbp->iocbq, sizeof (IOCBQ)); 2160 sbp->iocbq.sbp = sbp; 2161 2162 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp || 2163 ddi_in_panic()) { 2164 sbp->pkt_flags |= PACKET_POLLED; 2165 } 2166 2167 /* Prepare the fc packet */ 2168 pkt->pkt_state = FC_PKT_SUCCESS; 2169 pkt->pkt_reason = 0; 2170 pkt->pkt_action = 0; 2171 pkt->pkt_expln = 0; 2172 pkt->pkt_data_resid = 0; 2173 pkt->pkt_resp_resid = 0; 2174 2175 /* Make sure all pkt's have a proper timeout */ 2176 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 2177 /* This disables all IOCB on chip timeouts */ 2178 pkt->pkt_timeout = 0x80000000; 2179 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) { 2180 pkt->pkt_timeout = 60; 2181 } 2182 2183 /* Clear the response buffer */ 2184 if (pkt->pkt_rsplen) { 2185 /* Check for FCP commands */ 2186 if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) || 2187 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) { 2188 iptr = (uint32_t *)pkt->pkt_resp; 2189 iptr[2] = 0; 2190 iptr[3] = 0; 2191 } else { 2192 bzero(pkt->pkt_resp, pkt->pkt_rsplen); 2193 } 2194 } 2195 2196 mutex_exit(&sbp->mtx); 2197 2198 return; 2199 2200 } /* emlxs_initialize_pkt() */ 2201 2202 2203 2204 /* 2205 * We may not need this routine 2206 */ 2207 /*ARGSUSED*/ 2208 extern int 2209 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt) 2210 { 2211 emlxs_buf_t *sbp = PKT2PRIV(pkt); 2212 2213 if (!sbp) { 2214 return (FC_FAILURE); 2215 } 2216 2217 if (!(sbp->pkt_flags & PACKET_VALID)) { 2218 return (FC_FAILURE); 2219 } 2220 sbp->pkt_flags &= ~PACKET_VALID; 2221 mutex_destroy(&sbp->mtx); 2222 2223 return (FC_SUCCESS); 2224 2225 } /* emlxs_pkt_uninit() */ 2226 2227 2228 static int 2229 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2230 { 2231 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2232 emlxs_hba_t *hba = HBA; 2233 int32_t rval; 2234 2235 if (!(port->flag & EMLXS_PORT_BOUND)) { 2236 return (FC_CAP_ERROR); 2237 } 2238 2239 if (strcmp(cap, FC_NODE_WWN) == 0) { 2240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2241 "fca_get_cap: FC_NODE_WWN"); 2242 2243 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE)); 2244 rval = FC_CAP_FOUND; 2245 2246 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2247 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2248 "fca_get_cap: FC_LOGIN_PARAMS"); 2249 2250 /* 2251 * We need to turn off CLASS2 support. 2252 * Otherwise, FC transport will use CLASS2 as default class 2253 * and never try with CLASS3. 2254 */ 2255 hba->sparam.cls2.classValid = 0; 2256 2257 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM)); 2258 2259 rval = FC_CAP_FOUND; 2260 2261 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2262 int32_t *num_bufs; 2263 emlxs_config_t *cfg = &CFG; 2264 2265 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2266 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)", 2267 cfg[CFG_UB_BUFS].current); 2268 2269 num_bufs = (int32_t *)ptr; 2270 2271 /* We multiply by MAX_VPORTS because ULP uses a */ 2272 /* formula to calculate ub bufs from this */ 2273 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS); 2274 2275 rval = FC_CAP_FOUND; 2276 2277 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2278 int32_t *size; 2279 2280 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2281 "fca_get_cap: FC_CAP_PAYLOAD_SIZE"); 2282 2283 size = (int32_t *)ptr; 2284 *size = -1; 2285 rval = FC_CAP_FOUND; 2286 2287 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2288 fc_reset_action_t *action; 2289 2290 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2291 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR"); 2292 2293 action = (fc_reset_action_t *)ptr; 2294 *action = FC_RESET_RETURN_ALL; 2295 rval = FC_CAP_FOUND; 2296 2297 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) { 2298 fc_dma_behavior_t *behavior; 2299 2300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2301 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF"); 2302 2303 behavior = (fc_dma_behavior_t *)ptr; 2304 *behavior = FC_ALLOW_STREAMING; 2305 rval = FC_CAP_FOUND; 2306 2307 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) { 2308 fc_fcp_dma_t *fcp_dma; 2309 2310 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2311 "fca_get_cap: FC_CAP_FCP_DMA"); 2312 2313 fcp_dma = (fc_fcp_dma_t *)ptr; 2314 *fcp_dma = FC_DVMA_SPACE; 2315 rval = FC_CAP_FOUND; 2316 2317 } else { 2318 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2319 "fca_get_cap: Unknown capability. [%s]", cap); 2320 2321 rval = FC_CAP_ERROR; 2322 2323 } 2324 2325 return (rval); 2326 2327 } /* emlxs_get_cap() */ 2328 2329 2330 2331 static int 2332 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2333 { 2334 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2335 2336 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2337 "fca_set_cap: cap=[%s] arg=%p", cap, ptr); 2338 2339 return (FC_CAP_ERROR); 2340 2341 } /* emlxs_set_cap() */ 2342 2343 2344 static opaque_t 2345 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id) 2346 { 2347 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2348 2349 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2350 "fca_get_device: did=%x", d_id.port_id); 2351 2352 return (NULL); 2353 2354 } /* emlxs_get_device() */ 2355 2356 2357 static int32_t 2358 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd) 2359 { 2360 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2361 2362 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x", 2363 cmd); 2364 2365 return (FC_SUCCESS); 2366 2367 } /* emlxs_notify */ 2368 2369 2370 2371 static int 2372 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf) 2373 { 2374 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2375 emlxs_hba_t *hba = HBA; 2376 uint32_t lilp_length; 2377 2378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2379 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf, 2380 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2], 2381 port->alpa_map[3], port->alpa_map[4]); 2382 2383 if (!(port->flag & EMLXS_PORT_BOUND)) { 2384 return (FC_NOMAP); 2385 } 2386 2387 if (hba->topology != TOPOLOGY_LOOP) { 2388 return (FC_NOMAP); 2389 } 2390 2391 /* Check if alpa map is available */ 2392 if (port->alpa_map[0] != 0) { 2393 mapbuf->lilp_magic = MAGIC_LILP; 2394 } else { /* No LILP map available */ 2395 2396 /* Set lilp_magic to MAGIC_LISA and this will */ 2397 /* trigger an ALPA scan in ULP */ 2398 mapbuf->lilp_magic = MAGIC_LISA; 2399 } 2400 2401 mapbuf->lilp_myalpa = port->did; 2402 2403 /* The first byte of the alpa_map is the lilp map length */ 2404 /* Add one to include the lilp length byte itself */ 2405 lilp_length = (uint32_t)port->alpa_map[0] + 1; 2406 2407 /* Make sure the max transfer is 128 bytes */ 2408 if (lilp_length > 128) { 2409 lilp_length = 128; 2410 } 2411 2412 /* We start copying from the lilp_length field */ 2413 /* in order to get a word aligned address */ 2414 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length, 2415 lilp_length); 2416 2417 return (FC_SUCCESS); 2418 2419 } /* emlxs_get_map() */ 2420 2421 2422 2423 extern int 2424 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt) 2425 { 2426 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2427 emlxs_hba_t *hba = HBA; 2428 emlxs_buf_t *sbp; 2429 uint32_t rval; 2430 uint32_t pkt_flags; 2431 2432 /* Make sure adapter is online */ 2433 if (!(hba->flag & FC_ONLINE_MODE)) { 2434 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 2435 "Adapter offline."); 2436 2437 return (FC_OFFLINE); 2438 } 2439 2440 /* Validate packet */ 2441 sbp = PKT2PRIV(pkt); 2442 2443 /* Make sure ULP was told that the port was online */ 2444 if ((port->ulp_statec == FC_STATE_OFFLINE) && 2445 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2446 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 2447 "Port offline."); 2448 2449 return (FC_OFFLINE); 2450 } 2451 2452 if (sbp->port != port) { 2453 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2454 "Invalid port handle. sbp=%p port=%p flags=%x", sbp, 2455 sbp->port, sbp->pkt_flags); 2456 return (FC_BADPACKET); 2457 } 2458 2459 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) { 2460 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2461 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp, 2462 sbp->port, sbp->pkt_flags); 2463 return (FC_BADPACKET); 2464 } 2465 #ifdef SFCT_SUPPORT 2466 if (port->tgt_mode && !sbp->fct_cmd && 2467 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2468 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2469 "Packet blocked. Target mode."); 2470 return (FC_TRANSPORT_ERROR); 2471 } 2472 #endif /* SFCT_SUPPORT */ 2473 2474 #ifdef IDLE_TIMER 2475 emlxs_pm_busy_component(hba); 2476 #endif /* IDLE_TIMER */ 2477 2478 /* Prepare the packet for transport */ 2479 emlxs_initialize_pkt(port, sbp); 2480 2481 /* Save a copy of the pkt flags. */ 2482 /* We will check the polling flag later */ 2483 pkt_flags = sbp->pkt_flags; 2484 2485 /* Send the packet */ 2486 switch (pkt->pkt_tran_type) { 2487 case FC_PKT_FCP_READ: 2488 case FC_PKT_FCP_WRITE: 2489 rval = emlxs_send_fcp_cmd(port, sbp); 2490 break; 2491 2492 case FC_PKT_IP_WRITE: 2493 case FC_PKT_BROADCAST: 2494 rval = emlxs_send_ip(port, sbp); 2495 break; 2496 2497 case FC_PKT_EXCHANGE: 2498 switch (pkt->pkt_cmd_fhdr.type) { 2499 case FC_TYPE_SCSI_FCP: 2500 rval = emlxs_send_fcp_cmd(port, sbp); 2501 break; 2502 2503 case FC_TYPE_FC_SERVICES: 2504 rval = emlxs_send_ct(port, sbp); 2505 break; 2506 2507 #ifdef MENLO_SUPPORT 2508 case EMLXS_MENLO_TYPE: 2509 rval = emlxs_send_menlo(port, sbp); 2510 break; 2511 #endif /* MENLO_SUPPORT */ 2512 2513 default: 2514 rval = emlxs_send_els(port, sbp); 2515 } 2516 break; 2517 2518 case FC_PKT_OUTBOUND: 2519 switch (pkt->pkt_cmd_fhdr.type) { 2520 #ifdef SFCT_SUPPORT 2521 case FC_TYPE_SCSI_FCP: 2522 rval = emlxs_send_fct_status(port, sbp); 2523 break; 2524 2525 case FC_TYPE_BASIC_LS: 2526 rval = emlxs_send_fct_abort(port, sbp); 2527 break; 2528 #endif /* SFCT_SUPPORT */ 2529 2530 case FC_TYPE_FC_SERVICES: 2531 rval = emlxs_send_ct_rsp(port, sbp); 2532 break; 2533 #ifdef MENLO_SUPPORT 2534 case EMLXS_MENLO_TYPE: 2535 rval = emlxs_send_menlo(port, sbp); 2536 break; 2537 #endif /* MENLO_SUPPORT */ 2538 2539 default: 2540 rval = emlxs_send_els_rsp(port, sbp); 2541 } 2542 break; 2543 2544 default: 2545 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2546 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type); 2547 rval = FC_TRANSPORT_ERROR; 2548 break; 2549 } 2550 2551 /* Check if send was not successful */ 2552 if (rval != FC_SUCCESS) { 2553 /* Return packet to ULP */ 2554 mutex_enter(&sbp->mtx); 2555 sbp->pkt_flags |= PACKET_ULP_OWNED; 2556 mutex_exit(&sbp->mtx); 2557 2558 return (rval); 2559 } 2560 2561 /* Check if this packet should be polled for completion before */ 2562 /* returning. This check must be done with a saved copy of the */ 2563 /* pkt_flags because the packet itself could already be freed from */ 2564 /* memory if it was not polled. */ 2565 if (pkt_flags & PACKET_POLLED) { 2566 emlxs_poll(port, sbp); 2567 } 2568 2569 return (FC_SUCCESS); 2570 2571 } /* emlxs_transport() */ 2572 2573 2574 2575 static void 2576 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp) 2577 { 2578 emlxs_hba_t *hba = HBA; 2579 fc_packet_t *pkt = PRIV2PKT(sbp); 2580 clock_t timeout; 2581 clock_t time; 2582 uint32_t att_bit; 2583 CHANNEL *cp; 2584 2585 mutex_enter(&EMLXS_PORT_LOCK); 2586 hba->io_poll_count++; 2587 mutex_exit(&EMLXS_PORT_LOCK); 2588 2589 /* Check for panic situation */ 2590 cp = (CHANNEL *)sbp->channel; 2591 2592 if (ddi_in_panic()) { 2593 /* 2594 * In panic situations there will be one thread with 2595 * no interrrupts (hard or soft) and no timers 2596 */ 2597 2598 /* 2599 * We must manually poll everything in this thread 2600 * to keep the driver going. 2601 */ 2602 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 2603 switch (cp->channelno) { 2604 case FC_FCP_RING: 2605 att_bit = HA_R0ATT; 2606 break; 2607 2608 case FC_IP_RING: 2609 att_bit = HA_R1ATT; 2610 break; 2611 2612 case FC_ELS_RING: 2613 att_bit = HA_R2ATT; 2614 break; 2615 2616 case FC_CT_RING: 2617 att_bit = HA_R3ATT; 2618 break; 2619 } 2620 } 2621 2622 /* Keep polling the chip until our IO is completed */ 2623 /* Driver's timer will not function during panics. */ 2624 /* Therefore, timer checks must be performed manually. */ 2625 (void) drv_getparm(LBOLT, &time); 2626 timeout = time + drv_usectohz(1000000); 2627 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2628 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 2629 EMLXS_SLI_POLL_INTR(hba, att_bit); 2630 } else { 2631 EMLXS_SLI_POLL_INTR(hba, 0); 2632 } 2633 (void) drv_getparm(LBOLT, &time); 2634 2635 /* Trigger timer checks periodically */ 2636 if (time >= timeout) { 2637 emlxs_timer_checks(hba); 2638 timeout = time + drv_usectohz(1000000); 2639 } 2640 } 2641 } else { 2642 /* Wait for IO completion */ 2643 /* The driver's timer will detect */ 2644 /* any timeout and abort the I/O. */ 2645 mutex_enter(&EMLXS_PKT_LOCK); 2646 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2647 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK); 2648 } 2649 mutex_exit(&EMLXS_PKT_LOCK); 2650 } 2651 2652 /* Check for fcp reset pkt */ 2653 if (sbp->pkt_flags & PACKET_FCP_RESET) { 2654 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2655 /* Flush the IO's on the chipq */ 2656 (void) emlxs_chipq_node_flush(port, 2657 &hba->chan[hba->channel_fcp], 2658 sbp->node, sbp); 2659 } else { 2660 /* Flush the IO's on the chipq for this lun */ 2661 (void) emlxs_chipq_lun_flush(port, 2662 sbp->node, sbp->lun, sbp); 2663 } 2664 2665 if (sbp->flush_count == 0) { 2666 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2667 goto done; 2668 } 2669 2670 /* Set the timeout so the flush has time to complete */ 2671 timeout = emlxs_timeout(hba, 60); 2672 (void) drv_getparm(LBOLT, &time); 2673 while ((time < timeout) && sbp->flush_count > 0) { 2674 delay(drv_usectohz(500000)); 2675 (void) drv_getparm(LBOLT, &time); 2676 } 2677 2678 if (sbp->flush_count == 0) { 2679 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2680 goto done; 2681 } 2682 2683 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2684 "sbp=%p flush_count=%d. Waiting...", sbp, 2685 sbp->flush_count); 2686 2687 /* Let's try this one more time */ 2688 2689 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2690 /* Flush the IO's on the chipq */ 2691 (void) emlxs_chipq_node_flush(port, 2692 &hba->chan[hba->channel_fcp], 2693 sbp->node, sbp); 2694 } else { 2695 /* Flush the IO's on the chipq for this lun */ 2696 (void) emlxs_chipq_lun_flush(port, 2697 sbp->node, sbp->lun, sbp); 2698 } 2699 2700 /* Reset the timeout so the flush has time to complete */ 2701 timeout = emlxs_timeout(hba, 60); 2702 (void) drv_getparm(LBOLT, &time); 2703 while ((time < timeout) && sbp->flush_count > 0) { 2704 delay(drv_usectohz(500000)); 2705 (void) drv_getparm(LBOLT, &time); 2706 } 2707 2708 if (sbp->flush_count == 0) { 2709 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2710 goto done; 2711 } 2712 2713 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2714 "sbp=%p flush_count=%d. Resetting link.", sbp, 2715 sbp->flush_count); 2716 2717 /* Let's first try to reset the link */ 2718 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 2719 2720 if (sbp->flush_count == 0) { 2721 goto done; 2722 } 2723 2724 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2725 "sbp=%p flush_count=%d. Resetting HBA.", sbp, 2726 sbp->flush_count); 2727 2728 /* If that doesn't work, reset the adapter */ 2729 (void) emlxs_reset(port, FC_FCA_RESET); 2730 2731 if (sbp->flush_count != 0) { 2732 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2733 "sbp=%p flush_count=%d. Giving up.", sbp, 2734 sbp->flush_count); 2735 } 2736 2737 } 2738 /* PACKET_FCP_RESET */ 2739 done: 2740 2741 /* Packet has been declared completed and is now ready to be returned */ 2742 2743 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 2744 emlxs_unswap_pkt(sbp); 2745 #endif /* EMLXS_MODREV2X */ 2746 2747 mutex_enter(&sbp->mtx); 2748 sbp->pkt_flags |= PACKET_ULP_OWNED; 2749 mutex_exit(&sbp->mtx); 2750 2751 mutex_enter(&EMLXS_PORT_LOCK); 2752 hba->io_poll_count--; 2753 mutex_exit(&EMLXS_PORT_LOCK); 2754 2755 /* Make ULP completion callback if required */ 2756 if (pkt->pkt_comp) { 2757 cp->ulpCmplCmd++; 2758 (*pkt->pkt_comp) (pkt); 2759 } 2760 2761 return; 2762 2763 } /* emlxs_poll() */ 2764 2765 2766 static int 2767 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size, 2768 uint32_t *count, uint32_t type) 2769 { 2770 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2771 emlxs_hba_t *hba = HBA; 2772 2773 char *err = NULL; 2774 emlxs_unsol_buf_t *pool; 2775 emlxs_unsol_buf_t *new_pool; 2776 int32_t i; 2777 int result; 2778 uint32_t free_resv; 2779 uint32_t free; 2780 emlxs_config_t *cfg = &CFG; 2781 fc_unsol_buf_t *ubp; 2782 emlxs_ub_priv_t *ub_priv; 2783 int rc; 2784 2785 if (port->tgt_mode) { 2786 if (tokens && count) { 2787 bzero(tokens, (sizeof (uint64_t) * (*count))); 2788 } 2789 return (FC_SUCCESS); 2790 } 2791 2792 if (!(port->flag & EMLXS_PORT_BOUND)) { 2793 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2794 "ub_alloc failed: Port not bound! size=%x count=%d " 2795 "type=%x", size, *count, type); 2796 2797 return (FC_FAILURE); 2798 } 2799 2800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2801 "ub_alloc: size=%x count=%d type=%x", size, *count, type); 2802 2803 if (count && (*count > EMLXS_MAX_UBUFS)) { 2804 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2805 "ub_alloc failed: Too many unsolicted buffers requested. " 2806 "count=%x", *count); 2807 2808 return (FC_FAILURE); 2809 2810 } 2811 2812 if (tokens == NULL) { 2813 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2814 "ub_alloc failed: Token array is NULL."); 2815 2816 return (FC_FAILURE); 2817 } 2818 2819 /* Clear the token array */ 2820 bzero(tokens, (sizeof (uint64_t) * (*count))); 2821 2822 free_resv = 0; 2823 free = *count; 2824 switch (type) { 2825 case FC_TYPE_BASIC_LS: 2826 err = "BASIC_LS"; 2827 break; 2828 case FC_TYPE_EXTENDED_LS: 2829 err = "EXTENDED_LS"; 2830 free = *count / 2; /* Hold 50% for normal use */ 2831 free_resv = *count - free; /* Reserve 50% for RSCN use */ 2832 break; 2833 case FC_TYPE_IS8802: 2834 err = "IS8802"; 2835 break; 2836 case FC_TYPE_IS8802_SNAP: 2837 err = "IS8802_SNAP"; 2838 2839 if (cfg[CFG_NETWORK_ON].current == 0) { 2840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2841 "ub_alloc failed: IP support is disabled."); 2842 2843 return (FC_FAILURE); 2844 } 2845 break; 2846 case FC_TYPE_SCSI_FCP: 2847 err = "SCSI_FCP"; 2848 break; 2849 case FC_TYPE_SCSI_GPP: 2850 err = "SCSI_GPP"; 2851 break; 2852 case FC_TYPE_HIPP_FP: 2853 err = "HIPP_FP"; 2854 break; 2855 case FC_TYPE_IPI3_MASTER: 2856 err = "IPI3_MASTER"; 2857 break; 2858 case FC_TYPE_IPI3_SLAVE: 2859 err = "IPI3_SLAVE"; 2860 break; 2861 case FC_TYPE_IPI3_PEER: 2862 err = "IPI3_PEER"; 2863 break; 2864 case FC_TYPE_FC_SERVICES: 2865 err = "FC_SERVICES"; 2866 break; 2867 } 2868 2869 mutex_enter(&EMLXS_UB_LOCK); 2870 2871 /* 2872 * Walk through the list of the unsolicited buffers 2873 * for this ddiinst of emlx. 2874 */ 2875 2876 pool = port->ub_pool; 2877 2878 /* 2879 * The emlxs_ub_alloc() can be called more than once with different 2880 * size. We will reject the call if there are 2881 * duplicate size with the same FC-4 type. 2882 */ 2883 while (pool) { 2884 if ((pool->pool_type == type) && 2885 (pool->pool_buf_size == size)) { 2886 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2887 "ub_alloc failed: Unsolicited buffer pool for %s " 2888 "of size 0x%x bytes already exists.", err, size); 2889 2890 result = FC_FAILURE; 2891 goto fail; 2892 } 2893 2894 pool = pool->pool_next; 2895 } 2896 2897 mutex_exit(&EMLXS_UB_LOCK); 2898 2899 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t), 2900 KM_SLEEP); 2901 2902 new_pool->pool_next = NULL; 2903 new_pool->pool_type = type; 2904 new_pool->pool_buf_size = size; 2905 new_pool->pool_nentries = *count; 2906 new_pool->pool_available = new_pool->pool_nentries; 2907 new_pool->pool_free = free; 2908 new_pool->pool_free_resv = free_resv; 2909 new_pool->fc_ubufs = 2910 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP); 2911 2912 new_pool->pool_first_token = port->ub_count; 2913 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries; 2914 2915 for (i = 0; i < new_pool->pool_nentries; i++) { 2916 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i]; 2917 ubp->ub_port_handle = port->ulp_handle; 2918 ubp->ub_token = (uint64_t)((unsigned long)ubp); 2919 ubp->ub_bufsize = size; 2920 ubp->ub_class = FC_TRAN_CLASS3; 2921 ubp->ub_port_private = NULL; 2922 ubp->ub_fca_private = 2923 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t), 2924 KM_SLEEP); 2925 2926 /* 2927 * Initialize emlxs_ub_priv_t 2928 */ 2929 ub_priv = ubp->ub_fca_private; 2930 ub_priv->ubp = ubp; 2931 ub_priv->port = port; 2932 ub_priv->flags = EMLXS_UB_FREE; 2933 ub_priv->available = 1; 2934 ub_priv->pool = new_pool; 2935 ub_priv->time = 0; 2936 ub_priv->timeout = 0; 2937 ub_priv->token = port->ub_count; 2938 ub_priv->cmd = 0; 2939 2940 /* Allocate the actual buffer */ 2941 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP); 2942 2943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 2944 "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp, 2945 ub_priv->token, ubp->ub_bufsize, type); 2946 2947 tokens[i] = (uint64_t)((unsigned long)ubp); 2948 port->ub_count++; 2949 } 2950 2951 mutex_enter(&EMLXS_UB_LOCK); 2952 2953 /* Add the pool to the top of the pool list */ 2954 new_pool->pool_prev = NULL; 2955 new_pool->pool_next = port->ub_pool; 2956 2957 if (port->ub_pool) { 2958 port->ub_pool->pool_prev = new_pool; 2959 } 2960 port->ub_pool = new_pool; 2961 2962 /* Set the post counts */ 2963 if (type == FC_TYPE_IS8802_SNAP) { 2964 MAILBOXQ *mbox; 2965 2966 port->ub_post[hba->channel_ip] += new_pool->pool_nentries; 2967 2968 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 2969 MEM_MBOX, 1))) { 2970 emlxs_mb_config_farp(hba, mbox); 2971 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 2972 mbox, MBX_NOWAIT, 0); 2973 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 2974 (void) emlxs_mem_put(hba, MEM_MBOX, 2975 (uint8_t *)mbox); 2976 } 2977 } 2978 port->flag |= EMLXS_PORT_IP_UP; 2979 } else if (type == FC_TYPE_EXTENDED_LS) { 2980 port->ub_post[hba->channel_els] += new_pool->pool_nentries; 2981 } else if (type == FC_TYPE_FC_SERVICES) { 2982 port->ub_post[hba->channel_ct] += new_pool->pool_nentries; 2983 } 2984 2985 mutex_exit(&EMLXS_UB_LOCK); 2986 2987 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2988 "%d unsolicited buffers allocated for %s of size 0x%x bytes.", 2989 *count, err, size); 2990 2991 return (FC_SUCCESS); 2992 2993 fail: 2994 2995 /* Clean the pool */ 2996 for (i = 0; tokens[i] != NULL; i++) { 2997 /* Get the buffer object */ 2998 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 2999 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3000 3001 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3002 "ub_alloc failed: Freed buffer=%p token=%x size=%x " 3003 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type); 3004 3005 /* Free the actual buffer */ 3006 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3007 3008 /* Free the private area of the buffer object */ 3009 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3010 3011 tokens[i] = 0; 3012 port->ub_count--; 3013 } 3014 3015 /* Free the array of buffer objects in the pool */ 3016 kmem_free((caddr_t)new_pool->fc_ubufs, 3017 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries)); 3018 3019 /* Free the pool object */ 3020 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t)); 3021 3022 mutex_exit(&EMLXS_UB_LOCK); 3023 3024 return (result); 3025 3026 } /* emlxs_ub_alloc() */ 3027 3028 3029 static void 3030 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp) 3031 { 3032 emlxs_hba_t *hba = HBA; 3033 emlxs_ub_priv_t *ub_priv; 3034 fc_packet_t *pkt; 3035 ELS_PKT *els; 3036 uint32_t sid; 3037 3038 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3039 3040 if (hba->state <= FC_LINK_DOWN) { 3041 return; 3042 } 3043 3044 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + 3045 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) { 3046 return; 3047 } 3048 3049 sid = LE_SWAP24_LO(ubp->ub_frame.s_id); 3050 3051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg, 3052 "%s dropped: sid=%x. Rejecting.", 3053 emlxs_elscmd_xlate(ub_priv->cmd), sid); 3054 3055 pkt->pkt_tran_type = FC_PKT_OUTBOUND; 3056 pkt->pkt_timeout = (2 * hba->fc_ratov); 3057 3058 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) { 3059 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3; 3060 pkt->pkt_tran_flags |= FC_TRAN_CLASS2; 3061 } 3062 3063 /* Build the fc header */ 3064 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id; 3065 pkt->pkt_cmd_fhdr.r_ctl = 3066 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL; 3067 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did); 3068 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 3069 pkt->pkt_cmd_fhdr.f_ctl = 3070 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ; 3071 pkt->pkt_cmd_fhdr.seq_id = 0; 3072 pkt->pkt_cmd_fhdr.df_ctl = 0; 3073 pkt->pkt_cmd_fhdr.seq_cnt = 0; 3074 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff; 3075 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id; 3076 pkt->pkt_cmd_fhdr.ro = 0; 3077 3078 /* Build the command */ 3079 els = (ELS_PKT *) pkt->pkt_cmd; 3080 els->elsCode = 0x01; 3081 els->un.lsRjt.un.b.lsRjtRsvd0 = 0; 3082 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3083 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3084 els->un.lsRjt.un.b.vendorUnique = 0x02; 3085 3086 /* Send the pkt later in another thread */ 3087 (void) emlxs_pkt_send(pkt, 0); 3088 3089 return; 3090 3091 } /* emlxs_ub_els_reject() */ 3092 3093 extern int 3094 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3095 { 3096 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3097 emlxs_hba_t *hba = HBA; 3098 fc_unsol_buf_t *ubp; 3099 emlxs_ub_priv_t *ub_priv; 3100 uint32_t i; 3101 uint32_t time; 3102 emlxs_unsol_buf_t *pool; 3103 3104 if (count == 0) { 3105 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3106 "ub_release: Nothing to do. count=%d", count); 3107 3108 return (FC_SUCCESS); 3109 } 3110 3111 if (!(port->flag & EMLXS_PORT_BOUND)) { 3112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3113 "ub_release failed: Port not bound. count=%d token[0]=%p", 3114 count, tokens[0]); 3115 3116 return (FC_UNBOUND); 3117 } 3118 3119 mutex_enter(&EMLXS_UB_LOCK); 3120 3121 if (!port->ub_pool) { 3122 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3123 "ub_release failed: No pools! count=%d token[0]=%p", 3124 count, tokens[0]); 3125 3126 mutex_exit(&EMLXS_UB_LOCK); 3127 return (FC_UB_BADTOKEN); 3128 } 3129 3130 for (i = 0; i < count; i++) { 3131 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3132 3133 if (!ubp) { 3134 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3135 "ub_release failed: count=%d tokens[%d]=0", count, 3136 i); 3137 3138 mutex_exit(&EMLXS_UB_LOCK); 3139 return (FC_UB_BADTOKEN); 3140 } 3141 3142 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3143 3144 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3145 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3146 "ub_release failed: Dead buffer found. ubp=%p", 3147 ubp); 3148 3149 mutex_exit(&EMLXS_UB_LOCK); 3150 return (FC_UB_BADTOKEN); 3151 } 3152 3153 if (ub_priv->flags == EMLXS_UB_FREE) { 3154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3155 "ub_release: Buffer already free! ubp=%p token=%x", 3156 ubp, ub_priv->token); 3157 3158 continue; 3159 } 3160 3161 /* Check for dropped els buffer */ 3162 /* ULP will do this sometimes without sending a reply */ 3163 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) && 3164 !(ub_priv->flags & EMLXS_UB_REPLY)) { 3165 emlxs_ub_els_reject(port, ubp); 3166 } 3167 3168 /* Mark the buffer free */ 3169 ub_priv->flags = EMLXS_UB_FREE; 3170 bzero(ubp->ub_buffer, ubp->ub_bufsize); 3171 3172 time = hba->timer_tics - ub_priv->time; 3173 ub_priv->time = 0; 3174 ub_priv->timeout = 0; 3175 3176 pool = ub_priv->pool; 3177 3178 if (ub_priv->flags & EMLXS_UB_RESV) { 3179 pool->pool_free_resv++; 3180 } else { 3181 pool->pool_free++; 3182 } 3183 3184 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3185 "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)", 3186 ubp, ub_priv->token, time, ub_priv->available, 3187 pool->pool_nentries, pool->pool_available, 3188 pool->pool_free, pool->pool_free_resv); 3189 3190 /* Check if pool can be destroyed now */ 3191 if ((pool->pool_available == 0) && 3192 (pool->pool_free + pool->pool_free_resv == 3193 pool->pool_nentries)) { 3194 emlxs_ub_destroy(port, pool); 3195 } 3196 } 3197 3198 mutex_exit(&EMLXS_UB_LOCK); 3199 3200 return (FC_SUCCESS); 3201 3202 } /* emlxs_ub_release() */ 3203 3204 3205 static int 3206 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3207 { 3208 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3209 emlxs_unsol_buf_t *pool; 3210 fc_unsol_buf_t *ubp; 3211 emlxs_ub_priv_t *ub_priv; 3212 uint32_t i; 3213 3214 if (port->tgt_mode) { 3215 return (FC_SUCCESS); 3216 } 3217 3218 if (count == 0) { 3219 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3220 "ub_free: Nothing to do. count=%d token[0]=%p", count, 3221 tokens[0]); 3222 3223 return (FC_SUCCESS); 3224 } 3225 3226 if (!(port->flag & EMLXS_PORT_BOUND)) { 3227 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3228 "ub_free: Port not bound. count=%d token[0]=%p", count, 3229 tokens[0]); 3230 3231 return (FC_SUCCESS); 3232 } 3233 3234 mutex_enter(&EMLXS_UB_LOCK); 3235 3236 if (!port->ub_pool) { 3237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3238 "ub_free failed: No pools! count=%d token[0]=%p", count, 3239 tokens[0]); 3240 3241 mutex_exit(&EMLXS_UB_LOCK); 3242 return (FC_UB_BADTOKEN); 3243 } 3244 3245 /* Process buffer list */ 3246 for (i = 0; i < count; i++) { 3247 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3248 3249 if (!ubp) { 3250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3251 "ub_free failed: count=%d tokens[%d]=0", count, 3252 i); 3253 3254 mutex_exit(&EMLXS_UB_LOCK); 3255 return (FC_UB_BADTOKEN); 3256 } 3257 3258 /* Mark buffer unavailable */ 3259 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3260 3261 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3262 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3263 "ub_free failed: Dead buffer found. ubp=%p", ubp); 3264 3265 mutex_exit(&EMLXS_UB_LOCK); 3266 return (FC_UB_BADTOKEN); 3267 } 3268 3269 ub_priv->available = 0; 3270 3271 /* Mark one less buffer available in the parent pool */ 3272 pool = ub_priv->pool; 3273 3274 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3275 "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 3276 ub_priv->token, pool->pool_nentries, 3277 pool->pool_available - 1, pool->pool_free, 3278 pool->pool_free_resv); 3279 3280 if (pool->pool_available) { 3281 pool->pool_available--; 3282 3283 /* Check if pool can be destroyed */ 3284 if ((pool->pool_available == 0) && 3285 (pool->pool_free + pool->pool_free_resv == 3286 pool->pool_nentries)) { 3287 emlxs_ub_destroy(port, pool); 3288 } 3289 } 3290 } 3291 3292 mutex_exit(&EMLXS_UB_LOCK); 3293 3294 return (FC_SUCCESS); 3295 3296 } /* emlxs_ub_free() */ 3297 3298 3299 /* EMLXS_UB_LOCK must be held when calling this routine */ 3300 extern void 3301 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool) 3302 { 3303 emlxs_hba_t *hba = HBA; 3304 emlxs_unsol_buf_t *next; 3305 emlxs_unsol_buf_t *prev; 3306 fc_unsol_buf_t *ubp; 3307 uint32_t i; 3308 3309 /* Remove the pool object from the pool list */ 3310 next = pool->pool_next; 3311 prev = pool->pool_prev; 3312 3313 if (port->ub_pool == pool) { 3314 port->ub_pool = next; 3315 } 3316 3317 if (prev) { 3318 prev->pool_next = next; 3319 } 3320 3321 if (next) { 3322 next->pool_prev = prev; 3323 } 3324 3325 pool->pool_prev = NULL; 3326 pool->pool_next = NULL; 3327 3328 /* Clear the post counts */ 3329 switch (pool->pool_type) { 3330 case FC_TYPE_IS8802_SNAP: 3331 port->ub_post[hba->channel_ip] -= pool->pool_nentries; 3332 break; 3333 3334 case FC_TYPE_EXTENDED_LS: 3335 port->ub_post[hba->channel_els] -= pool->pool_nentries; 3336 break; 3337 3338 case FC_TYPE_FC_SERVICES: 3339 port->ub_post[hba->channel_ct] -= pool->pool_nentries; 3340 break; 3341 } 3342 3343 /* Now free the pool memory */ 3344 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3345 "ub_destroy: pool=%p type=%d size=%d count=%d", pool, 3346 pool->pool_type, pool->pool_buf_size, pool->pool_nentries); 3347 3348 /* Process the array of buffer objects in the pool */ 3349 for (i = 0; i < pool->pool_nentries; i++) { 3350 /* Get the buffer object */ 3351 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 3352 3353 /* Free the memory the buffer object represents */ 3354 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3355 3356 /* Free the private area of the buffer object */ 3357 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3358 } 3359 3360 /* Free the array of buffer objects in the pool */ 3361 kmem_free((caddr_t)pool->fc_ubufs, 3362 (sizeof (fc_unsol_buf_t)*pool->pool_nentries)); 3363 3364 /* Free the pool object */ 3365 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t)); 3366 3367 return; 3368 3369 } /* emlxs_ub_destroy() */ 3370 3371 3372 /*ARGSUSED*/ 3373 extern int 3374 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 3375 { 3376 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3377 emlxs_hba_t *hba = HBA; 3378 emlxs_config_t *cfg = &CFG; 3379 3380 emlxs_buf_t *sbp; 3381 NODELIST *nlp; 3382 NODELIST *prev_nlp; 3383 uint8_t channelno; 3384 CHANNEL *cp; 3385 clock_t timeout; 3386 clock_t time; 3387 int32_t pkt_ret; 3388 IOCBQ *iocbq; 3389 IOCBQ *next; 3390 IOCBQ *prev; 3391 uint32_t found; 3392 uint32_t att_bit; 3393 uint32_t pass = 0; 3394 3395 sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 3396 iocbq = &sbp->iocbq; 3397 nlp = (NODELIST *)sbp->node; 3398 cp = (CHANNEL *)sbp->channel; 3399 channelno = (cp) ? cp->channelno : 0; 3400 3401 if (!(port->flag & EMLXS_PORT_BOUND)) { 3402 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3403 "Port not bound."); 3404 return (FC_UNBOUND); 3405 } 3406 3407 if (!(hba->flag & FC_ONLINE_MODE)) { 3408 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3409 "Adapter offline."); 3410 return (FC_OFFLINE); 3411 } 3412 3413 /* ULP requires the aborted pkt to be completed */ 3414 /* back to ULP before returning from this call. */ 3415 /* SUN knows of problems with this call so they suggested that we */ 3416 /* always return a FC_FAILURE for this call, until it is worked out. */ 3417 3418 /* Check if pkt is no good */ 3419 if (!(sbp->pkt_flags & PACKET_VALID) || 3420 (sbp->pkt_flags & PACKET_ULP_OWNED)) { 3421 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3422 "Bad sbp. flags=%x", sbp->pkt_flags); 3423 return (FC_FAILURE); 3424 } 3425 3426 /* Tag this now */ 3427 /* This will prevent any thread except ours from completing it */ 3428 mutex_enter(&sbp->mtx); 3429 3430 /* Check again if we still own this */ 3431 if (!(sbp->pkt_flags & PACKET_VALID) || 3432 (sbp->pkt_flags & PACKET_ULP_OWNED)) { 3433 mutex_exit(&sbp->mtx); 3434 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3435 "Bad sbp. flags=%x", sbp->pkt_flags); 3436 return (FC_FAILURE); 3437 } 3438 3439 /* Check if pkt is a real polled command */ 3440 if (!(sbp->pkt_flags & PACKET_IN_ABORT) && 3441 (sbp->pkt_flags & PACKET_POLLED)) { 3442 mutex_exit(&sbp->mtx); 3443 3444 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3445 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp, 3446 sbp->pkt_flags); 3447 return (FC_FAILURE); 3448 } 3449 3450 sbp->pkt_flags |= PACKET_POLLED; 3451 sbp->pkt_flags |= PACKET_IN_ABORT; 3452 3453 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | 3454 PACKET_IN_TIMEOUT)) { 3455 mutex_exit(&sbp->mtx); 3456 3457 /* Do nothing, pkt already on its way out */ 3458 goto done; 3459 } 3460 3461 mutex_exit(&sbp->mtx); 3462 3463 begin: 3464 pass++; 3465 3466 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 3467 3468 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3469 /* Find it on the queue */ 3470 found = 0; 3471 if (iocbq->flag & IOCB_PRIORITY) { 3472 /* Search the priority queue */ 3473 prev = NULL; 3474 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first; 3475 3476 while (next) { 3477 if (next == iocbq) { 3478 /* Remove it */ 3479 if (prev) { 3480 prev->next = iocbq->next; 3481 } 3482 3483 if (nlp->nlp_ptx[channelno].q_last == 3484 (void *)iocbq) { 3485 nlp->nlp_ptx[channelno].q_last = 3486 (void *)prev; 3487 } 3488 3489 if (nlp->nlp_ptx[channelno].q_first == 3490 (void *)iocbq) { 3491 nlp->nlp_ptx[channelno]. 3492 q_first = 3493 (void *)iocbq->next; 3494 } 3495 3496 nlp->nlp_ptx[channelno].q_cnt--; 3497 iocbq->next = NULL; 3498 found = 1; 3499 break; 3500 } 3501 3502 prev = next; 3503 next = next->next; 3504 } 3505 } else { 3506 /* Search the normal queue */ 3507 prev = NULL; 3508 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first; 3509 3510 while (next) { 3511 if (next == iocbq) { 3512 /* Remove it */ 3513 if (prev) { 3514 prev->next = iocbq->next; 3515 } 3516 3517 if (nlp->nlp_tx[channelno].q_last == 3518 (void *)iocbq) { 3519 nlp->nlp_tx[channelno].q_last = 3520 (void *)prev; 3521 } 3522 3523 if (nlp->nlp_tx[channelno].q_first == 3524 (void *)iocbq) { 3525 nlp->nlp_tx[channelno].q_first = 3526 (void *)iocbq->next; 3527 } 3528 3529 nlp->nlp_tx[channelno].q_cnt--; 3530 iocbq->next = NULL; 3531 found = 1; 3532 break; 3533 } 3534 3535 prev = next; 3536 next = (IOCBQ *) next->next; 3537 } 3538 } 3539 3540 if (!found) { 3541 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3542 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3543 "I/O not found in driver. sbp=%p flags=%x", sbp, 3544 sbp->pkt_flags); 3545 goto done; 3546 } 3547 3548 /* Check if node still needs servicing */ 3549 if ((nlp->nlp_ptx[channelno].q_first) || 3550 (nlp->nlp_tx[channelno].q_first && 3551 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) { 3552 3553 /* 3554 * If this is the base node, 3555 * then don't shift the pointers 3556 */ 3557 /* We want to drain the base node before moving on */ 3558 if (!nlp->nlp_base) { 3559 /* Just shift channel queue */ 3560 /* pointers to next node */ 3561 cp->nodeq.q_last = (void *) nlp; 3562 cp->nodeq.q_first = nlp->nlp_next[channelno]; 3563 } 3564 } else { 3565 /* Remove node from channel queue */ 3566 3567 /* If this is the only node on list */ 3568 if (cp->nodeq.q_first == (void *)nlp && 3569 cp->nodeq.q_last == (void *)nlp) { 3570 cp->nodeq.q_last = NULL; 3571 cp->nodeq.q_first = NULL; 3572 cp->nodeq.q_cnt = 0; 3573 } else if (cp->nodeq.q_first == (void *)nlp) { 3574 cp->nodeq.q_first = nlp->nlp_next[channelno]; 3575 ((NODELIST *) cp->nodeq.q_last)-> 3576 nlp_next[channelno] = cp->nodeq.q_first; 3577 cp->nodeq.q_cnt--; 3578 } else { 3579 /* 3580 * This is a little more difficult find the 3581 * previous node in the circular channel queue 3582 */ 3583 prev_nlp = nlp; 3584 while (prev_nlp->nlp_next[channelno] != nlp) { 3585 prev_nlp = prev_nlp-> 3586 nlp_next[channelno]; 3587 } 3588 3589 prev_nlp->nlp_next[channelno] = 3590 nlp->nlp_next[channelno]; 3591 3592 if (cp->nodeq.q_last == (void *)nlp) { 3593 cp->nodeq.q_last = (void *)prev_nlp; 3594 } 3595 cp->nodeq.q_cnt--; 3596 3597 } 3598 3599 /* Clear node */ 3600 nlp->nlp_next[channelno] = NULL; 3601 } 3602 3603 /* Free the ULPIOTAG and the bmp */ 3604 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 3605 hba->fc_table[sbp->iotag] = NULL; 3606 emlxs_sli4_free_xri(hba, sbp, sbp->xp); 3607 } else { 3608 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1); 3609 } 3610 3611 3612 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3613 3614 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 3615 IOERR_ABORT_REQUESTED, 1); 3616 3617 goto done; 3618 } 3619 3620 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3621 3622 3623 /* Check the chip queue */ 3624 mutex_enter(&EMLXS_FCTAB_LOCK); 3625 3626 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) && 3627 !(sbp->pkt_flags & PACKET_XRI_CLOSED) && 3628 (sbp == hba->fc_table[sbp->iotag])) { 3629 3630 /* Create the abort IOCB */ 3631 if (hba->state >= FC_LINK_UP) { 3632 iocbq = 3633 emlxs_create_abort_xri_cn(port, sbp->node, 3634 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS); 3635 3636 mutex_enter(&sbp->mtx); 3637 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3638 sbp->ticks = 3639 hba->timer_tics + (4 * hba->fc_ratov) + 10; 3640 sbp->abort_attempts++; 3641 mutex_exit(&sbp->mtx); 3642 } else { 3643 iocbq = 3644 emlxs_create_close_xri_cn(port, sbp->node, 3645 sbp->iotag, cp); 3646 3647 mutex_enter(&sbp->mtx); 3648 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3649 sbp->ticks = hba->timer_tics + 30; 3650 sbp->abort_attempts++; 3651 mutex_exit(&sbp->mtx); 3652 } 3653 3654 mutex_exit(&EMLXS_FCTAB_LOCK); 3655 3656 /* Send this iocbq */ 3657 if (iocbq) { 3658 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 3659 iocbq = NULL; 3660 } 3661 3662 goto done; 3663 } 3664 3665 mutex_exit(&EMLXS_FCTAB_LOCK); 3666 3667 /* Pkt was not on any queues */ 3668 3669 /* Check again if we still own this */ 3670 if (!(sbp->pkt_flags & PACKET_VALID) || 3671 (sbp->pkt_flags & 3672 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION | 3673 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3674 goto done; 3675 } 3676 3677 if (!sleep) { 3678 return (FC_FAILURE); 3679 } 3680 3681 /* Apparently the pkt was not found. Let's delay and try again */ 3682 if (pass < 5) { 3683 delay(drv_usectohz(5000000)); /* 5 seconds */ 3684 3685 /* Check again if we still own this */ 3686 if (!(sbp->pkt_flags & PACKET_VALID) || 3687 (sbp->pkt_flags & 3688 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION | 3689 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3690 goto done; 3691 } 3692 3693 goto begin; 3694 } 3695 3696 force_it: 3697 3698 /* Force the completion now */ 3699 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3700 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag); 3701 3702 /* Now complete it */ 3703 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 3704 1); 3705 3706 done: 3707 3708 /* Now wait for the pkt to complete */ 3709 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3710 /* Set thread timeout */ 3711 timeout = emlxs_timeout(hba, 30); 3712 3713 /* Check for panic situation */ 3714 if (ddi_in_panic()) { 3715 3716 /* 3717 * In panic situations there will be one thread with no 3718 * interrrupts (hard or soft) and no timers 3719 */ 3720 3721 /* 3722 * We must manually poll everything in this thread 3723 * to keep the driver going. 3724 */ 3725 3726 cp = (CHANNEL *)sbp->channel; 3727 switch (cp->channelno) { 3728 case FC_FCP_RING: 3729 att_bit = HA_R0ATT; 3730 break; 3731 3732 case FC_IP_RING: 3733 att_bit = HA_R1ATT; 3734 break; 3735 3736 case FC_ELS_RING: 3737 att_bit = HA_R2ATT; 3738 break; 3739 3740 case FC_CT_RING: 3741 att_bit = HA_R3ATT; 3742 break; 3743 } 3744 3745 /* Keep polling the chip until our IO is completed */ 3746 (void) drv_getparm(LBOLT, &time); 3747 while ((time < timeout) && 3748 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3749 EMLXS_SLI_POLL_INTR(hba, att_bit); 3750 (void) drv_getparm(LBOLT, &time); 3751 } 3752 } else { 3753 /* Wait for IO completion or timeout */ 3754 mutex_enter(&EMLXS_PKT_LOCK); 3755 pkt_ret = 0; 3756 while ((pkt_ret != -1) && 3757 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3758 pkt_ret = 3759 cv_timedwait(&EMLXS_PKT_CV, 3760 &EMLXS_PKT_LOCK, timeout); 3761 } 3762 mutex_exit(&EMLXS_PKT_LOCK); 3763 } 3764 3765 /* Check if timeout occured. This is not good. */ 3766 /* Something happened to our IO. */ 3767 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3768 /* Force the completion now */ 3769 goto force_it; 3770 } 3771 } 3772 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 3773 emlxs_unswap_pkt(sbp); 3774 #endif /* EMLXS_MODREV2X */ 3775 3776 /* Check again if we still own this */ 3777 if ((sbp->pkt_flags & PACKET_VALID) && 3778 !(sbp->pkt_flags & PACKET_ULP_OWNED)) { 3779 mutex_enter(&sbp->mtx); 3780 if ((sbp->pkt_flags & PACKET_VALID) && 3781 !(sbp->pkt_flags & PACKET_ULP_OWNED)) { 3782 sbp->pkt_flags |= PACKET_ULP_OWNED; 3783 } 3784 mutex_exit(&sbp->mtx); 3785 } 3786 3787 #ifdef ULP_PATCH5 3788 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) { 3789 return (FC_FAILURE); 3790 } 3791 #endif /* ULP_PATCH5 */ 3792 3793 return (FC_SUCCESS); 3794 3795 } /* emlxs_pkt_abort() */ 3796 3797 3798 static void 3799 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip) 3800 { 3801 emlxs_port_t *port = &PPORT; 3802 fc_packet_t *pkt; 3803 emlxs_buf_t *sbp; 3804 uint32_t i; 3805 uint32_t flg; 3806 uint32_t rc; 3807 uint32_t txcnt; 3808 uint32_t chipcnt; 3809 3810 txcnt = 0; 3811 chipcnt = 0; 3812 3813 mutex_enter(&EMLXS_FCTAB_LOCK); 3814 for (i = 0; i < hba->max_iotag; i++) { 3815 sbp = hba->fc_table[i]; 3816 if (sbp == NULL || sbp == STALE_PACKET) { 3817 continue; 3818 } 3819 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ); 3820 pkt = PRIV2PKT(sbp); 3821 mutex_exit(&EMLXS_FCTAB_LOCK); 3822 rc = emlxs_pkt_abort(port, pkt, 0); 3823 if (rc == FC_SUCCESS) { 3824 if (flg) { 3825 chipcnt++; 3826 } else { 3827 txcnt++; 3828 } 3829 } 3830 mutex_enter(&EMLXS_FCTAB_LOCK); 3831 } 3832 mutex_exit(&EMLXS_FCTAB_LOCK); 3833 *tx = txcnt; 3834 *chip = chipcnt; 3835 } /* emlxs_abort_all() */ 3836 3837 3838 extern int32_t 3839 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd) 3840 { 3841 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3842 emlxs_hba_t *hba = HBA; 3843 int rval; 3844 int ret; 3845 clock_t timeout; 3846 3847 if (!(port->flag & EMLXS_PORT_BOUND)) { 3848 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3849 "fca_reset failed. Port not bound."); 3850 3851 return (FC_UNBOUND); 3852 } 3853 3854 switch (cmd) { 3855 case FC_FCA_LINK_RESET: 3856 3857 if (!(hba->flag & FC_ONLINE_MODE) || 3858 (hba->state <= FC_LINK_DOWN)) { 3859 return (FC_SUCCESS); 3860 } 3861 3862 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3863 "fca_reset: Resetting Link."); 3864 3865 mutex_enter(&EMLXS_LINKUP_LOCK); 3866 hba->linkup_wait_flag = TRUE; 3867 mutex_exit(&EMLXS_LINKUP_LOCK); 3868 3869 if (emlxs_reset_link(hba, 1, 1)) { 3870 mutex_enter(&EMLXS_LINKUP_LOCK); 3871 hba->linkup_wait_flag = FALSE; 3872 mutex_exit(&EMLXS_LINKUP_LOCK); 3873 3874 return (FC_FAILURE); 3875 } 3876 3877 mutex_enter(&EMLXS_LINKUP_LOCK); 3878 timeout = emlxs_timeout(hba, 60); 3879 ret = 0; 3880 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) { 3881 ret = 3882 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK, 3883 timeout); 3884 } 3885 3886 hba->linkup_wait_flag = FALSE; 3887 mutex_exit(&EMLXS_LINKUP_LOCK); 3888 3889 if (ret == -1) { 3890 return (FC_FAILURE); 3891 } 3892 3893 return (FC_SUCCESS); 3894 3895 case FC_FCA_CORE: 3896 #ifdef DUMP_SUPPORT 3897 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3898 "fca_reset: Core dump."); 3899 3900 /* Schedule a USER dump */ 3901 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 3902 3903 /* Wait for dump to complete */ 3904 emlxs_dump_wait(hba); 3905 3906 return (FC_SUCCESS); 3907 #endif /* DUMP_SUPPORT */ 3908 3909 case FC_FCA_RESET: 3910 case FC_FCA_RESET_CORE: 3911 3912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3913 "fca_reset: Resetting Adapter."); 3914 3915 rval = FC_SUCCESS; 3916 3917 if (emlxs_offline(hba) == 0) { 3918 (void) emlxs_online(hba); 3919 } else { 3920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3921 "fca_reset: Adapter reset failed. Device busy."); 3922 3923 rval = FC_DEVICE_BUSY; 3924 } 3925 3926 return (rval); 3927 3928 default: 3929 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3930 "fca_reset: Unknown command. cmd=%x", cmd); 3931 3932 break; 3933 } 3934 3935 return (FC_FAILURE); 3936 3937 } /* emlxs_reset() */ 3938 3939 3940 extern int 3941 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm) 3942 { 3943 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3944 emlxs_hba_t *hba = HBA; 3945 int32_t ret; 3946 emlxs_vpd_t *vpd = &VPD; 3947 3948 3949 ret = FC_SUCCESS; 3950 3951 if (!(port->flag & EMLXS_PORT_BOUND)) { 3952 return (FC_UNBOUND); 3953 } 3954 3955 3956 #ifdef IDLE_TIMER 3957 emlxs_pm_busy_component(hba); 3958 #endif /* IDLE_TIMER */ 3959 3960 switch (pm->pm_cmd_code) { 3961 3962 case FC_PORT_GET_FW_REV: 3963 { 3964 char buffer[128]; 3965 3966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3967 "fca_port_manage: FC_PORT_GET_FW_REV"); 3968 3969 (void) sprintf(buffer, "%s %s", hba->model_info.model, 3970 vpd->fw_version); 3971 bzero(pm->pm_data_buf, pm->pm_data_len); 3972 3973 if (pm->pm_data_len < strlen(buffer) + 1) { 3974 ret = FC_NOMEM; 3975 3976 break; 3977 } 3978 3979 (void) strcpy(pm->pm_data_buf, buffer); 3980 break; 3981 } 3982 3983 case FC_PORT_GET_FCODE_REV: 3984 { 3985 char buffer[128]; 3986 3987 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3988 "fca_port_manage: FC_PORT_GET_FCODE_REV"); 3989 3990 /* Force update here just to be sure */ 3991 emlxs_get_fcode_version(hba); 3992 3993 (void) sprintf(buffer, "%s %s", hba->model_info.model, 3994 vpd->fcode_version); 3995 bzero(pm->pm_data_buf, pm->pm_data_len); 3996 3997 if (pm->pm_data_len < strlen(buffer) + 1) { 3998 ret = FC_NOMEM; 3999 break; 4000 } 4001 4002 (void) strcpy(pm->pm_data_buf, buffer); 4003 break; 4004 } 4005 4006 case FC_PORT_GET_DUMP_SIZE: 4007 { 4008 #ifdef DUMP_SUPPORT 4009 uint32_t dump_size = 0; 4010 4011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4012 "fca_port_manage: FC_PORT_GET_DUMP_SIZE"); 4013 4014 if (pm->pm_data_len < sizeof (uint32_t)) { 4015 ret = FC_NOMEM; 4016 break; 4017 } 4018 4019 (void) emlxs_get_dump(hba, NULL, &dump_size); 4020 4021 *((uint32_t *)pm->pm_data_buf) = dump_size; 4022 4023 #else 4024 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4025 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported."); 4026 4027 #endif /* DUMP_SUPPORT */ 4028 4029 break; 4030 } 4031 4032 case FC_PORT_GET_DUMP: 4033 { 4034 #ifdef DUMP_SUPPORT 4035 uint32_t dump_size = 0; 4036 4037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4038 "fca_port_manage: FC_PORT_GET_DUMP"); 4039 4040 (void) emlxs_get_dump(hba, NULL, &dump_size); 4041 4042 if (pm->pm_data_len < dump_size) { 4043 ret = FC_NOMEM; 4044 break; 4045 } 4046 4047 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf, 4048 (uint32_t *)&dump_size); 4049 #else 4050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4051 "fca_port_manage: FC_PORT_GET_DUMP unsupported."); 4052 4053 #endif /* DUMP_SUPPORT */ 4054 4055 break; 4056 } 4057 4058 case FC_PORT_FORCE_DUMP: 4059 { 4060 #ifdef DUMP_SUPPORT 4061 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4062 "fca_port_manage: FC_PORT_FORCE_DUMP"); 4063 4064 /* Schedule a USER dump */ 4065 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 4066 4067 /* Wait for dump to complete */ 4068 emlxs_dump_wait(hba); 4069 #else 4070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4071 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported."); 4072 4073 #endif /* DUMP_SUPPORT */ 4074 break; 4075 } 4076 4077 case FC_PORT_LINK_STATE: 4078 { 4079 uint32_t *link_state; 4080 4081 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4082 "fca_port_manage: FC_PORT_LINK_STATE"); 4083 4084 if (pm->pm_stat_len != sizeof (*link_state)) { 4085 ret = FC_NOMEM; 4086 break; 4087 } 4088 4089 if (pm->pm_cmd_buf != NULL) { 4090 /* 4091 * Can't look beyond the FCA port. 4092 */ 4093 ret = FC_INVALID_REQUEST; 4094 break; 4095 } 4096 4097 link_state = (uint32_t *)pm->pm_stat_buf; 4098 4099 /* Set the state */ 4100 if (hba->state >= FC_LINK_UP) { 4101 /* Check for loop topology */ 4102 if (hba->topology == TOPOLOGY_LOOP) { 4103 *link_state = FC_STATE_LOOP; 4104 } else { 4105 *link_state = FC_STATE_ONLINE; 4106 } 4107 4108 /* Set the link speed */ 4109 switch (hba->linkspeed) { 4110 case LA_2GHZ_LINK: 4111 *link_state |= FC_STATE_2GBIT_SPEED; 4112 break; 4113 case LA_4GHZ_LINK: 4114 *link_state |= FC_STATE_4GBIT_SPEED; 4115 break; 4116 case LA_8GHZ_LINK: 4117 *link_state |= FC_STATE_8GBIT_SPEED; 4118 break; 4119 case LA_10GHZ_LINK: 4120 *link_state |= FC_STATE_10GBIT_SPEED; 4121 break; 4122 case LA_1GHZ_LINK: 4123 default: 4124 *link_state |= FC_STATE_1GBIT_SPEED; 4125 break; 4126 } 4127 } else { 4128 *link_state = FC_STATE_OFFLINE; 4129 } 4130 4131 break; 4132 } 4133 4134 4135 case FC_PORT_ERR_STATS: 4136 case FC_PORT_RLS: 4137 { 4138 MAILBOXQ *mbq; 4139 MAILBOX *mb; 4140 fc_rls_acc_t *bp; 4141 4142 if (!(hba->flag & FC_ONLINE_MODE)) { 4143 return (FC_OFFLINE); 4144 } 4145 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4146 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS"); 4147 4148 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) { 4149 ret = FC_NOMEM; 4150 break; 4151 } 4152 4153 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, 4154 MEM_MBOX, 1)) == 0) { 4155 ret = FC_NOMEM; 4156 break; 4157 } 4158 mb = (MAILBOX *)mbq; 4159 4160 emlxs_mb_read_lnk_stat(hba, mbq); 4161 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) 4162 != MBX_SUCCESS) { 4163 ret = FC_PBUSY; 4164 } else { 4165 bp = (fc_rls_acc_t *)pm->pm_data_buf; 4166 4167 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt; 4168 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt; 4169 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt; 4170 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt; 4171 bp->rls_invalid_word = 4172 mb->un.varRdLnk.invalidXmitWord; 4173 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt; 4174 } 4175 4176 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 4177 break; 4178 } 4179 4180 case FC_PORT_DOWNLOAD_FW: 4181 if (!(hba->flag & FC_ONLINE_MODE)) { 4182 return (FC_OFFLINE); 4183 } 4184 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4185 "fca_port_manage: FC_PORT_DOWNLOAD_FW"); 4186 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4187 pm->pm_data_len, 1); 4188 break; 4189 4190 case FC_PORT_DOWNLOAD_FCODE: 4191 if (!(hba->flag & FC_ONLINE_MODE)) { 4192 return (FC_OFFLINE); 4193 } 4194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4195 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE"); 4196 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4197 pm->pm_data_len, 1); 4198 break; 4199 4200 case FC_PORT_DIAG: 4201 { 4202 uint32_t errno = 0; 4203 uint32_t did = 0; 4204 uint32_t pattern = 0; 4205 4206 switch (pm->pm_cmd_flags) { 4207 case EMLXS_DIAG_BIU: 4208 4209 if (!(hba->flag & FC_ONLINE_MODE)) { 4210 return (FC_OFFLINE); 4211 } 4212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4213 "fca_port_manage: EMLXS_DIAG_BIU"); 4214 4215 if (pm->pm_data_len) { 4216 pattern = *((uint32_t *)pm->pm_data_buf); 4217 } 4218 4219 errno = emlxs_diag_biu_run(hba, pattern); 4220 4221 if (pm->pm_stat_len == sizeof (errno)) { 4222 *(int *)pm->pm_stat_buf = errno; 4223 } 4224 4225 break; 4226 4227 4228 case EMLXS_DIAG_POST: 4229 4230 if (!(hba->flag & FC_ONLINE_MODE)) { 4231 return (FC_OFFLINE); 4232 } 4233 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4234 "fca_port_manage: EMLXS_DIAG_POST"); 4235 4236 errno = emlxs_diag_post_run(hba); 4237 4238 if (pm->pm_stat_len == sizeof (errno)) { 4239 *(int *)pm->pm_stat_buf = errno; 4240 } 4241 4242 break; 4243 4244 4245 case EMLXS_DIAG_ECHO: 4246 4247 if (!(hba->flag & FC_ONLINE_MODE)) { 4248 return (FC_OFFLINE); 4249 } 4250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4251 "fca_port_manage: EMLXS_DIAG_ECHO"); 4252 4253 if (pm->pm_cmd_len != sizeof (uint32_t)) { 4254 ret = FC_INVALID_REQUEST; 4255 break; 4256 } 4257 4258 did = *((uint32_t *)pm->pm_cmd_buf); 4259 4260 if (pm->pm_data_len) { 4261 pattern = *((uint32_t *)pm->pm_data_buf); 4262 } 4263 4264 errno = emlxs_diag_echo_run(port, did, pattern); 4265 4266 if (pm->pm_stat_len == sizeof (errno)) { 4267 *(int *)pm->pm_stat_buf = errno; 4268 } 4269 4270 break; 4271 4272 4273 case EMLXS_PARM_GET_NUM: 4274 { 4275 uint32_t *num; 4276 emlxs_config_t *cfg; 4277 uint32_t i; 4278 uint32_t count; 4279 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4280 "fca_port_manage: EMLXS_PARM_GET_NUM"); 4281 4282 if (pm->pm_stat_len < sizeof (uint32_t)) { 4283 ret = FC_NOMEM; 4284 break; 4285 } 4286 4287 num = (uint32_t *)pm->pm_stat_buf; 4288 count = 0; 4289 cfg = &CFG; 4290 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4291 if (!(cfg->flags & PARM_HIDDEN)) { 4292 count++; 4293 } 4294 4295 } 4296 4297 *num = count; 4298 4299 break; 4300 } 4301 4302 case EMLXS_PARM_GET_LIST: 4303 { 4304 emlxs_parm_t *parm; 4305 emlxs_config_t *cfg; 4306 uint32_t i; 4307 uint32_t max_count; 4308 4309 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4310 "fca_port_manage: EMLXS_PARM_GET_LIST"); 4311 4312 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4313 ret = FC_NOMEM; 4314 break; 4315 } 4316 4317 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t); 4318 4319 parm = (emlxs_parm_t *)pm->pm_stat_buf; 4320 cfg = &CFG; 4321 for (i = 0; i < NUM_CFG_PARAM && max_count; i++, 4322 cfg++) { 4323 if (!(cfg->flags & PARM_HIDDEN)) { 4324 (void) strcpy(parm->label, cfg->string); 4325 parm->min = cfg->low; 4326 parm->max = cfg->hi; 4327 parm->def = cfg->def; 4328 parm->current = cfg->current; 4329 parm->flags = cfg->flags; 4330 (void) strcpy(parm->help, cfg->help); 4331 parm++; 4332 max_count--; 4333 } 4334 } 4335 4336 break; 4337 } 4338 4339 case EMLXS_PARM_GET: 4340 { 4341 emlxs_parm_t *parm_in; 4342 emlxs_parm_t *parm_out; 4343 emlxs_config_t *cfg; 4344 uint32_t i; 4345 uint32_t len; 4346 4347 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4348 EMLXS_MSGF(EMLXS_CONTEXT, 4349 &emlxs_sfs_debug_msg, 4350 "fca_port_manage: EMLXS_PARM_GET. " 4351 "inbuf too small."); 4352 4353 ret = FC_BADCMD; 4354 break; 4355 } 4356 4357 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4358 EMLXS_MSGF(EMLXS_CONTEXT, 4359 &emlxs_sfs_debug_msg, 4360 "fca_port_manage: EMLXS_PARM_GET. " 4361 "outbuf too small"); 4362 4363 ret = FC_BADCMD; 4364 break; 4365 } 4366 4367 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4368 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4369 len = strlen(parm_in->label); 4370 cfg = &CFG; 4371 ret = FC_BADOBJECT; 4372 4373 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4374 "fca_port_manage: EMLXS_PARM_GET: %s", 4375 parm_in->label); 4376 4377 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4378 if (len == strlen(cfg->string) && 4379 (strcmp(parm_in->label, 4380 cfg->string) == 0)) { 4381 (void) strcpy(parm_out->label, 4382 cfg->string); 4383 parm_out->min = cfg->low; 4384 parm_out->max = cfg->hi; 4385 parm_out->def = cfg->def; 4386 parm_out->current = cfg->current; 4387 parm_out->flags = cfg->flags; 4388 (void) strcpy(parm_out->help, 4389 cfg->help); 4390 4391 ret = FC_SUCCESS; 4392 break; 4393 } 4394 } 4395 4396 break; 4397 } 4398 4399 case EMLXS_PARM_SET: 4400 { 4401 emlxs_parm_t *parm_in; 4402 emlxs_parm_t *parm_out; 4403 emlxs_config_t *cfg; 4404 uint32_t i; 4405 uint32_t len; 4406 4407 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4408 EMLXS_MSGF(EMLXS_CONTEXT, 4409 &emlxs_sfs_debug_msg, 4410 "fca_port_manage: EMLXS_PARM_GET. " 4411 "inbuf too small."); 4412 4413 ret = FC_BADCMD; 4414 break; 4415 } 4416 4417 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4418 EMLXS_MSGF(EMLXS_CONTEXT, 4419 &emlxs_sfs_debug_msg, 4420 "fca_port_manage: EMLXS_PARM_GET. " 4421 "outbuf too small"); 4422 ret = FC_BADCMD; 4423 break; 4424 } 4425 4426 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4427 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4428 len = strlen(parm_in->label); 4429 cfg = &CFG; 4430 ret = FC_BADOBJECT; 4431 4432 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4433 "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d", 4434 parm_in->label, parm_in->current, 4435 parm_in->current); 4436 4437 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4438 /* Find matching parameter string */ 4439 if (len == strlen(cfg->string) && 4440 (strcmp(parm_in->label, 4441 cfg->string) == 0)) { 4442 /* Attempt to update parameter */ 4443 if (emlxs_set_parm(hba, i, 4444 parm_in->current) == FC_SUCCESS) { 4445 (void) strcpy(parm_out->label, 4446 cfg->string); 4447 parm_out->min = cfg->low; 4448 parm_out->max = cfg->hi; 4449 parm_out->def = cfg->def; 4450 parm_out->current = 4451 cfg->current; 4452 parm_out->flags = cfg->flags; 4453 (void) strcpy(parm_out->help, 4454 cfg->help); 4455 4456 ret = FC_SUCCESS; 4457 } 4458 4459 break; 4460 } 4461 } 4462 4463 break; 4464 } 4465 4466 case EMLXS_LOG_GET: 4467 { 4468 emlxs_log_req_t *req; 4469 emlxs_log_resp_t *resp; 4470 uint32_t len; 4471 4472 /* Check command size */ 4473 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) { 4474 ret = FC_BADCMD; 4475 break; 4476 } 4477 4478 /* Get the request */ 4479 req = (emlxs_log_req_t *)pm->pm_cmd_buf; 4480 4481 /* Calculate the response length from the request */ 4482 len = sizeof (emlxs_log_resp_t) + 4483 (req->count * MAX_LOG_MSG_LENGTH); 4484 4485 /* Check the response buffer length */ 4486 if (pm->pm_stat_len < len) { 4487 ret = FC_BADCMD; 4488 break; 4489 } 4490 4491 /* Get the response pointer */ 4492 resp = (emlxs_log_resp_t *)pm->pm_stat_buf; 4493 4494 /* Get the request log enties */ 4495 (void) emlxs_msg_log_get(hba, req, resp); 4496 4497 ret = FC_SUCCESS; 4498 break; 4499 } 4500 4501 case EMLXS_GET_BOOT_REV: 4502 { 4503 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4504 "fca_port_manage: EMLXS_GET_BOOT_REV"); 4505 4506 if (pm->pm_stat_len < strlen(vpd->boot_version)) { 4507 ret = FC_NOMEM; 4508 break; 4509 } 4510 4511 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4512 (void) sprintf(pm->pm_stat_buf, "%s %s", 4513 hba->model_info.model, vpd->boot_version); 4514 4515 break; 4516 } 4517 4518 case EMLXS_DOWNLOAD_BOOT: 4519 if (!(hba->flag & FC_ONLINE_MODE)) { 4520 return (FC_OFFLINE); 4521 } 4522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4523 "fca_port_manage: EMLXS_DOWNLOAD_BOOT"); 4524 4525 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4526 pm->pm_data_len, 1); 4527 break; 4528 4529 case EMLXS_DOWNLOAD_CFL: 4530 { 4531 uint32_t *buffer; 4532 uint32_t region; 4533 uint32_t length; 4534 4535 if (!(hba->flag & FC_ONLINE_MODE)) { 4536 return (FC_OFFLINE); 4537 } 4538 4539 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4540 "fca_port_manage: EMLXS_DOWNLOAD_CFL"); 4541 4542 /* Extract the region number from the first word. */ 4543 buffer = (uint32_t *)pm->pm_data_buf; 4544 region = *buffer++; 4545 4546 /* Adjust the image length for the header word */ 4547 length = pm->pm_data_len - 4; 4548 4549 ret = 4550 emlxs_cfl_download(hba, region, (caddr_t)buffer, 4551 length); 4552 break; 4553 } 4554 4555 case EMLXS_VPD_GET: 4556 { 4557 emlxs_vpd_desc_t *vpd_out; 4558 4559 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4560 "fca_port_manage: EMLXS_VPD_GET"); 4561 4562 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) { 4563 ret = FC_BADCMD; 4564 break; 4565 } 4566 4567 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf; 4568 bzero(vpd_out, sizeof (emlxs_vpd_desc_t)); 4569 4570 (void) strncpy(vpd_out->id, vpd->id, 4571 sizeof (vpd_out->id)); 4572 (void) strncpy(vpd_out->part_num, vpd->part_num, 4573 sizeof (vpd_out->part_num)); 4574 (void) strncpy(vpd_out->eng_change, vpd->eng_change, 4575 sizeof (vpd_out->eng_change)); 4576 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer, 4577 sizeof (vpd_out->manufacturer)); 4578 (void) strncpy(vpd_out->serial_num, vpd->serial_num, 4579 sizeof (vpd_out->serial_num)); 4580 (void) strncpy(vpd_out->model, vpd->model, 4581 sizeof (vpd_out->model)); 4582 (void) strncpy(vpd_out->model_desc, vpd->model_desc, 4583 sizeof (vpd_out->model_desc)); 4584 (void) strncpy(vpd_out->port_num, vpd->port_num, 4585 sizeof (vpd_out->port_num)); 4586 (void) strncpy(vpd_out->prog_types, vpd->prog_types, 4587 sizeof (vpd_out->prog_types)); 4588 4589 ret = FC_SUCCESS; 4590 4591 break; 4592 } 4593 4594 case EMLXS_GET_FCIO_REV: 4595 { 4596 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4597 "fca_port_manage: EMLXS_GET_FCIO_REV"); 4598 4599 if (pm->pm_stat_len < sizeof (uint32_t)) { 4600 ret = FC_NOMEM; 4601 break; 4602 } 4603 4604 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4605 *(uint32_t *)pm->pm_stat_buf = FCIO_REV; 4606 4607 break; 4608 } 4609 4610 case EMLXS_GET_DFC_REV: 4611 { 4612 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4613 "fca_port_manage: EMLXS_GET_DFC_REV"); 4614 4615 if (pm->pm_stat_len < sizeof (uint32_t)) { 4616 ret = FC_NOMEM; 4617 break; 4618 } 4619 4620 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4621 *(uint32_t *)pm->pm_stat_buf = DFC_REV; 4622 4623 break; 4624 } 4625 4626 case EMLXS_SET_BOOT_STATE: 4627 case EMLXS_SET_BOOT_STATE_old: 4628 { 4629 uint32_t state; 4630 4631 if (!(hba->flag & FC_ONLINE_MODE)) { 4632 return (FC_OFFLINE); 4633 } 4634 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4635 EMLXS_MSGF(EMLXS_CONTEXT, 4636 &emlxs_sfs_debug_msg, 4637 "fca_port_manage: EMLXS_SET_BOOT_STATE"); 4638 ret = FC_BADCMD; 4639 break; 4640 } 4641 4642 state = *(uint32_t *)pm->pm_cmd_buf; 4643 4644 if (state == 0) { 4645 EMLXS_MSGF(EMLXS_CONTEXT, 4646 &emlxs_sfs_debug_msg, 4647 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4648 "Disable"); 4649 ret = emlxs_boot_code_disable(hba); 4650 } else { 4651 EMLXS_MSGF(EMLXS_CONTEXT, 4652 &emlxs_sfs_debug_msg, 4653 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4654 "Enable"); 4655 ret = emlxs_boot_code_enable(hba); 4656 } 4657 4658 break; 4659 } 4660 4661 case EMLXS_GET_BOOT_STATE: 4662 case EMLXS_GET_BOOT_STATE_old: 4663 { 4664 if (!(hba->flag & FC_ONLINE_MODE)) { 4665 return (FC_OFFLINE); 4666 } 4667 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4668 "fca_port_manage: EMLXS_GET_BOOT_STATE"); 4669 4670 if (pm->pm_stat_len < sizeof (uint32_t)) { 4671 ret = FC_NOMEM; 4672 break; 4673 } 4674 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4675 4676 ret = emlxs_boot_code_state(hba); 4677 4678 if (ret == FC_SUCCESS) { 4679 *(uint32_t *)pm->pm_stat_buf = 1; 4680 ret = FC_SUCCESS; 4681 } else if (ret == FC_FAILURE) { 4682 ret = FC_SUCCESS; 4683 } 4684 4685 break; 4686 } 4687 4688 case EMLXS_HW_ERROR_TEST: 4689 { 4690 if (!(hba->flag & FC_ONLINE_MODE)) { 4691 return (FC_OFFLINE); 4692 } 4693 4694 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4695 "fca_port_manage: EMLXS_HW_ERROR_TEST"); 4696 4697 /* Trigger a mailbox timeout */ 4698 hba->mbox_timer = hba->timer_tics; 4699 4700 break; 4701 } 4702 4703 case EMLXS_TEST_CODE: 4704 { 4705 uint32_t *cmd; 4706 4707 if (!(hba->flag & FC_ONLINE_MODE)) { 4708 return (FC_OFFLINE); 4709 } 4710 4711 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4712 "fca_port_manage: EMLXS_TEST_CODE"); 4713 4714 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4715 EMLXS_MSGF(EMLXS_CONTEXT, 4716 &emlxs_sfs_debug_msg, 4717 "fca_port_manage: EMLXS_TEST_CODE. " 4718 "inbuf to small."); 4719 4720 ret = FC_BADCMD; 4721 break; 4722 } 4723 4724 cmd = (uint32_t *)pm->pm_cmd_buf; 4725 4726 ret = emlxs_test(hba, cmd[0], 4727 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]); 4728 4729 break; 4730 } 4731 4732 case EMLXS_BAR_IO: 4733 { 4734 uint32_t *cmd; 4735 uint32_t *datap; 4736 uint32_t offset; 4737 caddr_t addr; 4738 uint32_t i; 4739 uint32_t tx_cnt; 4740 uint32_t chip_cnt; 4741 4742 cmd = (uint32_t *)pm->pm_cmd_buf; 4743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4744 "fca_port_manage: EMLXS_BAR_IO %x %x %x", 4745 cmd[0], cmd[1], cmd[2]); 4746 4747 offset = cmd[1]; 4748 4749 ret = FC_SUCCESS; 4750 4751 switch (cmd[0]) { 4752 case 2: /* bar1read */ 4753 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4754 return (FC_BADCMD); 4755 } 4756 4757 /* Registers in this range are invalid */ 4758 if ((offset >= 0x4C00) && (offset < 0x5000)) { 4759 return (FC_BADCMD); 4760 } 4761 if ((offset >= 0x5800) || (offset & 0x3)) { 4762 return (FC_BADCMD); 4763 } 4764 datap = (uint32_t *)pm->pm_stat_buf; 4765 4766 for (i = 0; i < pm->pm_stat_len; 4767 i += sizeof (uint32_t)) { 4768 if ((offset >= 0x4C00) && 4769 (offset < 0x5000)) { 4770 pm->pm_stat_len = i; 4771 break; 4772 } 4773 if (offset >= 0x5800) { 4774 pm->pm_stat_len = i; 4775 break; 4776 } 4777 addr = hba->sli.sli4.bar1_addr + offset; 4778 *datap = READ_BAR1_REG(hba, addr); 4779 datap++; 4780 offset += sizeof (uint32_t); 4781 } 4782 #ifdef FMA_SUPPORT 4783 /* Access handle validation */ 4784 EMLXS_CHK_ACC_HANDLE(hba, 4785 hba->sli.sli4.bar1_acc_handle); 4786 #endif /* FMA_SUPPORT */ 4787 break; 4788 case 3: /* bar2read */ 4789 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4790 return (FC_BADCMD); 4791 } 4792 if ((offset >= 0x1000) || (offset & 0x3)) { 4793 return (FC_BADCMD); 4794 } 4795 datap = (uint32_t *)pm->pm_stat_buf; 4796 4797 for (i = 0; i < pm->pm_stat_len; 4798 i += sizeof (uint32_t)) { 4799 *datap = READ_BAR2_REG(hba, 4800 hba->sli.sli4.bar2_addr + offset); 4801 datap++; 4802 offset += sizeof (uint32_t); 4803 } 4804 #ifdef FMA_SUPPORT 4805 /* Access handle validation */ 4806 EMLXS_CHK_ACC_HANDLE(hba, 4807 hba->sli.sli4.bar2_acc_handle); 4808 #endif /* FMA_SUPPORT */ 4809 break; 4810 case 4: /* bar1write */ 4811 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4812 return (FC_BADCMD); 4813 } 4814 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr + 4815 offset, cmd[2]); 4816 #ifdef FMA_SUPPORT 4817 /* Access handle validation */ 4818 EMLXS_CHK_ACC_HANDLE(hba, 4819 hba->sli.sli4.bar1_acc_handle); 4820 #endif /* FMA_SUPPORT */ 4821 break; 4822 case 5: /* bar2write */ 4823 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4824 return (FC_BADCMD); 4825 } 4826 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr + 4827 offset, cmd[2]); 4828 #ifdef FMA_SUPPORT 4829 /* Access handle validation */ 4830 EMLXS_CHK_ACC_HANDLE(hba, 4831 hba->sli.sli4.bar2_acc_handle); 4832 #endif /* FMA_SUPPORT */ 4833 break; 4834 case 6: /* dumpbsmbox */ 4835 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4836 return (FC_BADCMD); 4837 } 4838 if (offset != 0) { 4839 return (FC_BADCMD); 4840 } 4841 4842 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt, 4843 (caddr_t)pm->pm_stat_buf, 256); 4844 break; 4845 case 7: /* pciread */ 4846 if ((offset >= 0x200) || (offset & 0x3)) { 4847 return (FC_BADCMD); 4848 } 4849 datap = (uint32_t *)pm->pm_stat_buf; 4850 for (i = 0; i < pm->pm_stat_len; 4851 i += sizeof (uint32_t)) { 4852 *datap = ddi_get32(hba->pci_acc_handle, 4853 (uint32_t *)(hba->pci_addr + 4854 offset)); 4855 datap++; 4856 offset += sizeof (uint32_t); 4857 } 4858 #ifdef FMA_SUPPORT 4859 /* Access handle validation */ 4860 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle); 4861 #endif /* FMA_SUPPORT */ 4862 break; 4863 case 8: /* abortall */ 4864 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4865 return (FC_BADCMD); 4866 } 4867 emlxs_abort_all(hba, &tx_cnt, &chip_cnt); 4868 datap = (uint32_t *)pm->pm_stat_buf; 4869 *datap++ = tx_cnt; 4870 *datap = chip_cnt; 4871 break; 4872 default: 4873 ret = FC_BADCMD; 4874 break; 4875 } 4876 break; 4877 } 4878 4879 default: 4880 4881 ret = FC_INVALID_REQUEST; 4882 break; 4883 } 4884 4885 break; 4886 4887 } 4888 4889 case FC_PORT_INITIALIZE: 4890 if (!(hba->flag & FC_ONLINE_MODE)) { 4891 return (FC_OFFLINE); 4892 } 4893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4894 "fca_port_manage: FC_PORT_INITIALIZE"); 4895 break; 4896 4897 case FC_PORT_LOOPBACK: 4898 if (!(hba->flag & FC_ONLINE_MODE)) { 4899 return (FC_OFFLINE); 4900 } 4901 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4902 "fca_port_manage: FC_PORT_LOOPBACK"); 4903 break; 4904 4905 case FC_PORT_BYPASS: 4906 if (!(hba->flag & FC_ONLINE_MODE)) { 4907 return (FC_OFFLINE); 4908 } 4909 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4910 "fca_port_manage: FC_PORT_BYPASS"); 4911 ret = FC_INVALID_REQUEST; 4912 break; 4913 4914 case FC_PORT_UNBYPASS: 4915 if (!(hba->flag & FC_ONLINE_MODE)) { 4916 return (FC_OFFLINE); 4917 } 4918 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4919 "fca_port_manage: FC_PORT_UNBYPASS"); 4920 ret = FC_INVALID_REQUEST; 4921 break; 4922 4923 case FC_PORT_GET_NODE_ID: 4924 { 4925 fc_rnid_t *rnid; 4926 4927 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4928 "fca_port_manage: FC_PORT_GET_NODE_ID"); 4929 4930 bzero(pm->pm_data_buf, pm->pm_data_len); 4931 4932 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4933 ret = FC_NOMEM; 4934 break; 4935 } 4936 4937 rnid = (fc_rnid_t *)pm->pm_data_buf; 4938 4939 (void) sprintf((char *)rnid->global_id, 4940 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", 4941 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, 4942 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 4943 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 4944 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 4945 4946 rnid->unit_type = RNID_HBA; 4947 rnid->port_id = port->did; 4948 rnid->ip_version = RNID_IPV4; 4949 4950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4951 "GET_NODE_ID: wwpn: %s", rnid->global_id); 4952 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4953 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 4954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4955 "GET_NODE_ID: port_id: 0x%x", rnid->port_id); 4956 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4957 "GET_NODE_ID: num_attach: %d", rnid->num_attached); 4958 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4959 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 4960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4961 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 4962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4963 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr); 4964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4965 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 4966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4967 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 4968 4969 ret = FC_SUCCESS; 4970 break; 4971 } 4972 4973 case FC_PORT_SET_NODE_ID: 4974 { 4975 fc_rnid_t *rnid; 4976 4977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4978 "fca_port_manage: FC_PORT_SET_NODE_ID"); 4979 4980 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4981 ret = FC_NOMEM; 4982 break; 4983 } 4984 4985 rnid = (fc_rnid_t *)pm->pm_data_buf; 4986 4987 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4988 "SET_NODE_ID: wwpn: %s", rnid->global_id); 4989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4990 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 4991 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4992 "SET_NODE_ID: port_id: 0x%x", rnid->port_id); 4993 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4994 "SET_NODE_ID: num_attach: %d", rnid->num_attached); 4995 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4996 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 4997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4998 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 4999 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5000 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr); 5001 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5002 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 5003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5004 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 5005 5006 ret = FC_SUCCESS; 5007 break; 5008 } 5009 5010 #ifdef S11 5011 case FC_PORT_GET_P2P_INFO: 5012 { 5013 fc_fca_p2p_info_t *p2p_info; 5014 NODELIST *ndlp; 5015 5016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5017 "fca_port_manage: FC_PORT_GET_P2P_INFO"); 5018 5019 bzero(pm->pm_data_buf, pm->pm_data_len); 5020 5021 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) { 5022 ret = FC_NOMEM; 5023 break; 5024 } 5025 5026 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf; 5027 5028 if (hba->state >= FC_LINK_UP) { 5029 if ((hba->topology == TOPOLOGY_PT_PT) && 5030 (hba->flag & FC_PT_TO_PT)) { 5031 p2p_info->fca_d_id = port->did; 5032 p2p_info->d_id = port->rdid; 5033 5034 ndlp = emlxs_node_find_did(port, 5035 port->rdid); 5036 5037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5038 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, " 5039 "d_id: 0x%x, ndlp: 0x%p", port->did, 5040 port->rdid, ndlp); 5041 if (ndlp) { 5042 bcopy(&ndlp->nlp_portname, 5043 (caddr_t)&p2p_info->pwwn, 5044 sizeof (la_wwn_t)); 5045 bcopy(&ndlp->nlp_nodename, 5046 (caddr_t)&p2p_info->nwwn, 5047 sizeof (la_wwn_t)); 5048 5049 ret = FC_SUCCESS; 5050 break; 5051 5052 } 5053 } 5054 } 5055 5056 ret = FC_FAILURE; 5057 break; 5058 } 5059 #endif 5060 5061 default: 5062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5063 "fca_port_manage: code=%x", pm->pm_cmd_code); 5064 ret = FC_INVALID_REQUEST; 5065 break; 5066 5067 } 5068 5069 return (ret); 5070 5071 } /* emlxs_port_manage() */ 5072 5073 5074 /*ARGSUSED*/ 5075 static uint32_t 5076 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, 5077 uint32_t *arg) 5078 { 5079 uint32_t rval = 0; 5080 emlxs_port_t *port = &PPORT; 5081 5082 switch (test_code) { 5083 #ifdef TEST_SUPPORT 5084 case 1: /* SCSI underrun */ 5085 { 5086 hba->underrun_counter = (args)? arg[0]:1; 5087 break; 5088 } 5089 #endif /* TEST_SUPPORT */ 5090 5091 default: 5092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5093 "emlxs_test: Unsupported test code. (0x%x)", test_code); 5094 rval = FC_INVALID_REQUEST; 5095 } 5096 5097 return (rval); 5098 5099 } /* emlxs_test() */ 5100 5101 5102 /* 5103 * Given the device number, return the devinfo pointer or the ddiinst number. 5104 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even 5105 * before attach. 5106 * 5107 * Translate "dev_t" to a pointer to the associated "dev_info_t". 5108 */ 5109 /*ARGSUSED*/ 5110 static int 5111 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5112 { 5113 emlxs_hba_t *hba; 5114 int32_t ddiinst; 5115 5116 ddiinst = getminor((dev_t)arg); 5117 5118 switch (infocmd) { 5119 case DDI_INFO_DEVT2DEVINFO: 5120 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5121 if (hba) 5122 *result = hba->dip; 5123 else 5124 *result = NULL; 5125 break; 5126 5127 case DDI_INFO_DEVT2INSTANCE: 5128 *result = (void *)((unsigned long)ddiinst); 5129 break; 5130 5131 default: 5132 return (DDI_FAILURE); 5133 } 5134 5135 return (DDI_SUCCESS); 5136 5137 } /* emlxs_info() */ 5138 5139 5140 static int32_t 5141 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level) 5142 { 5143 emlxs_hba_t *hba; 5144 emlxs_port_t *port; 5145 int32_t ddiinst; 5146 int rval = DDI_SUCCESS; 5147 5148 ddiinst = ddi_get_instance(dip); 5149 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5150 port = &PPORT; 5151 5152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5153 "fca_power: comp=%x level=%x", comp, level); 5154 5155 if (hba == NULL || comp != EMLXS_PM_ADAPTER) { 5156 return (DDI_FAILURE); 5157 } 5158 5159 mutex_enter(&hba->pm_lock); 5160 5161 /* If we are already at the proper level then return success */ 5162 if (hba->pm_level == level) { 5163 mutex_exit(&hba->pm_lock); 5164 return (DDI_SUCCESS); 5165 } 5166 5167 switch (level) { 5168 case EMLXS_PM_ADAPTER_UP: 5169 5170 /* 5171 * If we are already in emlxs_attach, 5172 * let emlxs_hba_attach take care of things 5173 */ 5174 if (hba->pm_state & EMLXS_PM_IN_ATTACH) { 5175 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5176 break; 5177 } 5178 5179 /* Check if adapter is suspended */ 5180 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5181 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5182 5183 /* Try to resume the port */ 5184 rval = emlxs_hba_resume(dip); 5185 5186 if (rval != DDI_SUCCESS) { 5187 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5188 } 5189 break; 5190 } 5191 5192 /* Set adapter up */ 5193 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5194 break; 5195 5196 case EMLXS_PM_ADAPTER_DOWN: 5197 5198 5199 /* 5200 * If we are already in emlxs_detach, 5201 * let emlxs_hba_detach take care of things 5202 */ 5203 if (hba->pm_state & EMLXS_PM_IN_DETACH) { 5204 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5205 break; 5206 } 5207 5208 /* Check if adapter is not suspended */ 5209 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5210 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5211 5212 /* Try to suspend the port */ 5213 rval = emlxs_hba_suspend(dip); 5214 5215 if (rval != DDI_SUCCESS) { 5216 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5217 } 5218 5219 break; 5220 } 5221 5222 /* Set adapter down */ 5223 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5224 break; 5225 5226 default: 5227 rval = DDI_FAILURE; 5228 break; 5229 5230 } 5231 5232 mutex_exit(&hba->pm_lock); 5233 5234 return (rval); 5235 5236 } /* emlxs_power() */ 5237 5238 5239 #ifdef EMLXS_I386 5240 #ifdef S11 5241 /* 5242 * quiesce(9E) entry point. 5243 * 5244 * This function is called when the system is single-thread at hight PIL 5245 * with preemption disabled. Therefore, this function must not be blocked. 5246 * 5247 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 5248 * DDI_FAILURE indicates an error condition and should almost never happen. 5249 */ 5250 static int 5251 emlxs_quiesce(dev_info_t *dip) 5252 { 5253 emlxs_hba_t *hba; 5254 emlxs_port_t *port; 5255 int32_t ddiinst; 5256 int rval = DDI_SUCCESS; 5257 5258 ddiinst = ddi_get_instance(dip); 5259 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5260 port = &PPORT; 5261 5262 if (hba == NULL || port == NULL) { 5263 return (DDI_FAILURE); 5264 } 5265 5266 /* The fourth arg 1 indicates the call is from quiesce */ 5267 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) { 5268 return (rval); 5269 } else { 5270 return (DDI_FAILURE); 5271 } 5272 5273 } /* emlxs_quiesce */ 5274 #endif 5275 #endif /* EMLXS_I386 */ 5276 5277 5278 static int 5279 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p) 5280 { 5281 emlxs_hba_t *hba; 5282 emlxs_port_t *port; 5283 int ddiinst; 5284 5285 ddiinst = getminor(*dev_p); 5286 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5287 5288 if (hba == NULL) { 5289 return (ENXIO); 5290 } 5291 5292 port = &PPORT; 5293 5294 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5295 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5296 "open failed: Driver suspended."); 5297 return (ENXIO); 5298 } 5299 5300 if (otype != OTYP_CHR) { 5301 return (EINVAL); 5302 } 5303 5304 if (drv_priv(cred_p)) { 5305 return (EPERM); 5306 } 5307 5308 mutex_enter(&EMLXS_IOCTL_LOCK); 5309 5310 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) { 5311 mutex_exit(&EMLXS_IOCTL_LOCK); 5312 return (EBUSY); 5313 } 5314 5315 if (flag & FEXCL) { 5316 if (hba->ioctl_flags & EMLXS_OPEN) { 5317 mutex_exit(&EMLXS_IOCTL_LOCK); 5318 return (EBUSY); 5319 } 5320 5321 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE; 5322 } 5323 5324 hba->ioctl_flags |= EMLXS_OPEN; 5325 5326 mutex_exit(&EMLXS_IOCTL_LOCK); 5327 5328 return (0); 5329 5330 } /* emlxs_open() */ 5331 5332 5333 /*ARGSUSED*/ 5334 static int 5335 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p) 5336 { 5337 emlxs_hba_t *hba; 5338 int ddiinst; 5339 5340 ddiinst = getminor(dev); 5341 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5342 5343 if (hba == NULL) { 5344 return (ENXIO); 5345 } 5346 5347 if (otype != OTYP_CHR) { 5348 return (EINVAL); 5349 } 5350 5351 mutex_enter(&EMLXS_IOCTL_LOCK); 5352 5353 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5354 mutex_exit(&EMLXS_IOCTL_LOCK); 5355 return (ENODEV); 5356 } 5357 5358 hba->ioctl_flags &= ~EMLXS_OPEN; 5359 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE; 5360 5361 mutex_exit(&EMLXS_IOCTL_LOCK); 5362 5363 return (0); 5364 5365 } /* emlxs_close() */ 5366 5367 5368 /*ARGSUSED*/ 5369 static int 5370 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode, 5371 cred_t *cred_p, int32_t *rval_p) 5372 { 5373 emlxs_hba_t *hba; 5374 emlxs_port_t *port; 5375 int rval = 0; /* return code */ 5376 int ddiinst; 5377 5378 ddiinst = getminor(dev); 5379 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5380 5381 if (hba == NULL) { 5382 return (ENXIO); 5383 } 5384 5385 port = &PPORT; 5386 5387 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5388 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5389 "ioctl failed: Driver suspended."); 5390 5391 return (ENXIO); 5392 } 5393 5394 mutex_enter(&EMLXS_IOCTL_LOCK); 5395 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5396 mutex_exit(&EMLXS_IOCTL_LOCK); 5397 return (ENXIO); 5398 } 5399 mutex_exit(&EMLXS_IOCTL_LOCK); 5400 5401 #ifdef IDLE_TIMER 5402 emlxs_pm_busy_component(hba); 5403 #endif /* IDLE_TIMER */ 5404 5405 switch (cmd) { 5406 case EMLXS_DFC_COMMAND: 5407 rval = emlxs_dfc_manage(hba, (void *)arg, mode); 5408 break; 5409 5410 default: 5411 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5412 "ioctl: Invalid command received. cmd=%x", cmd); 5413 rval = EINVAL; 5414 } 5415 5416 done: 5417 return (rval); 5418 5419 } /* emlxs_ioctl() */ 5420 5421 5422 5423 /* 5424 * 5425 * Device Driver Common Routines 5426 * 5427 */ 5428 5429 /* emlxs_pm_lock must be held for this call */ 5430 static int 5431 emlxs_hba_resume(dev_info_t *dip) 5432 { 5433 emlxs_hba_t *hba; 5434 emlxs_port_t *port; 5435 int ddiinst; 5436 5437 ddiinst = ddi_get_instance(dip); 5438 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5439 port = &PPORT; 5440 5441 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL); 5442 5443 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5444 return (DDI_SUCCESS); 5445 } 5446 5447 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5448 5449 /* Take the adapter online */ 5450 if (emlxs_power_up(hba)) { 5451 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg, 5452 "Unable to take adapter online."); 5453 5454 hba->pm_state |= EMLXS_PM_SUSPENDED; 5455 5456 return (DDI_FAILURE); 5457 } 5458 5459 return (DDI_SUCCESS); 5460 5461 } /* emlxs_hba_resume() */ 5462 5463 5464 /* emlxs_pm_lock must be held for this call */ 5465 static int 5466 emlxs_hba_suspend(dev_info_t *dip) 5467 { 5468 emlxs_hba_t *hba; 5469 emlxs_port_t *port; 5470 int ddiinst; 5471 5472 ddiinst = ddi_get_instance(dip); 5473 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5474 port = &PPORT; 5475 5476 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL); 5477 5478 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5479 return (DDI_SUCCESS); 5480 } 5481 5482 hba->pm_state |= EMLXS_PM_SUSPENDED; 5483 5484 /* Take the adapter offline */ 5485 if (emlxs_power_down(hba)) { 5486 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5487 5488 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg, 5489 "Unable to take adapter offline."); 5490 5491 return (DDI_FAILURE); 5492 } 5493 5494 return (DDI_SUCCESS); 5495 5496 } /* emlxs_hba_suspend() */ 5497 5498 5499 5500 static void 5501 emlxs_lock_init(emlxs_hba_t *hba) 5502 { 5503 emlxs_port_t *port = &PPORT; 5504 int32_t ddiinst; 5505 char buf[64]; 5506 uint32_t i; 5507 5508 ddiinst = hba->ddiinst; 5509 5510 /* Initialize the power management */ 5511 (void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst); 5512 mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg); 5513 5514 (void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst); 5515 mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, 5516 (void *)hba->intr_arg); 5517 5518 (void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst); 5519 cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL); 5520 5521 (void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst); 5522 mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, 5523 (void *)hba->intr_arg); 5524 5525 (void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst); 5526 mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, 5527 (void *)hba->intr_arg); 5528 5529 (void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst); 5530 cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL); 5531 5532 (void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst); 5533 mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER, 5534 (void *)hba->intr_arg); 5535 5536 (void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst); 5537 cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL); 5538 5539 (void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst); 5540 mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER, 5541 (void *)hba->intr_arg); 5542 5543 for (i = 0; i < MAX_RINGS; i++) { 5544 (void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME, 5545 ddiinst, i); 5546 mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER, 5547 (void *)hba->intr_arg); 5548 } 5549 5550 (void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst); 5551 mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER, 5552 (void *)hba->intr_arg); 5553 5554 (void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst); 5555 mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER, 5556 (void *)hba->intr_arg); 5557 5558 (void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst); 5559 mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER, 5560 (void *)hba->intr_arg); 5561 5562 (void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst); 5563 mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, 5564 (void *)hba->intr_arg); 5565 5566 #ifdef DUMP_SUPPORT 5567 (void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst); 5568 mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER, 5569 (void *)hba->intr_arg); 5570 #endif /* DUMP_SUPPORT */ 5571 5572 /* Create per port locks */ 5573 for (i = 0; i < MAX_VPORTS; i++) { 5574 port = &VPORT(i); 5575 5576 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL); 5577 5578 if (i == 0) { 5579 (void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME, 5580 ddiinst); 5581 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5582 (void *)hba->intr_arg); 5583 5584 (void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME, 5585 ddiinst); 5586 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5587 5588 (void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME, 5589 ddiinst); 5590 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5591 (void *)hba->intr_arg); 5592 } else { 5593 (void) sprintf(buf, "%s%d.%d_pkt_lock mutex", 5594 DRIVER_NAME, ddiinst, port->vpi); 5595 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5596 (void *)hba->intr_arg); 5597 5598 (void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME, 5599 ddiinst, port->vpi); 5600 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5601 5602 (void) sprintf(buf, "%s%d.%d_ub_lock mutex", 5603 DRIVER_NAME, ddiinst, port->vpi); 5604 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5605 (void *)hba->intr_arg); 5606 } 5607 } 5608 5609 return; 5610 5611 } /* emlxs_lock_init() */ 5612 5613 5614 5615 static void 5616 emlxs_lock_destroy(emlxs_hba_t *hba) 5617 { 5618 emlxs_port_t *port = &PPORT; 5619 uint32_t i; 5620 5621 mutex_destroy(&EMLXS_TIMER_LOCK); 5622 cv_destroy(&hba->timer_lock_cv); 5623 5624 mutex_destroy(&EMLXS_PORT_LOCK); 5625 5626 cv_destroy(&EMLXS_MBOX_CV); 5627 cv_destroy(&EMLXS_LINKUP_CV); 5628 5629 mutex_destroy(&EMLXS_LINKUP_LOCK); 5630 mutex_destroy(&EMLXS_MBOX_LOCK); 5631 5632 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK); 5633 5634 for (i = 0; i < MAX_RINGS; i++) { 5635 mutex_destroy(&EMLXS_CMD_RING_LOCK(i)); 5636 } 5637 5638 mutex_destroy(&EMLXS_FCTAB_LOCK); 5639 mutex_destroy(&EMLXS_MEMGET_LOCK); 5640 mutex_destroy(&EMLXS_MEMPUT_LOCK); 5641 mutex_destroy(&EMLXS_IOCTL_LOCK); 5642 mutex_destroy(&hba->pm_lock); 5643 5644 #ifdef DUMP_SUPPORT 5645 mutex_destroy(&EMLXS_DUMP_LOCK); 5646 #endif /* DUMP_SUPPORT */ 5647 5648 /* Destroy per port locks */ 5649 for (i = 0; i < MAX_VPORTS; i++) { 5650 port = &VPORT(i); 5651 rw_destroy(&port->node_rwlock); 5652 mutex_destroy(&EMLXS_PKT_LOCK); 5653 cv_destroy(&EMLXS_PKT_CV); 5654 mutex_destroy(&EMLXS_UB_LOCK); 5655 } 5656 5657 return; 5658 5659 } /* emlxs_lock_destroy() */ 5660 5661 5662 /* init_flag values */ 5663 #define ATTACH_SOFT_STATE 0x00000001 5664 #define ATTACH_FCA_TRAN 0x00000002 5665 #define ATTACH_HBA 0x00000004 5666 #define ATTACH_LOG 0x00000008 5667 #define ATTACH_MAP_BUS 0x00000010 5668 #define ATTACH_INTR_INIT 0x00000020 5669 #define ATTACH_PROP 0x00000040 5670 #define ATTACH_LOCK 0x00000080 5671 #define ATTACH_THREAD 0x00000100 5672 #define ATTACH_INTR_ADD 0x00000200 5673 #define ATTACH_ONLINE 0x00000400 5674 #define ATTACH_NODE 0x00000800 5675 #define ATTACH_FCT 0x00001000 5676 #define ATTACH_FCA 0x00002000 5677 #define ATTACH_KSTAT 0x00004000 5678 #define ATTACH_DHCHAP 0x00008000 5679 #define ATTACH_FM 0x00010000 5680 #define ATTACH_MAP_SLI 0x00020000 5681 #define ATTACH_SPAWN 0x00040000 5682 #define ATTACH_EVENTS 0x00080000 5683 5684 static void 5685 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed) 5686 { 5687 emlxs_hba_t *hba = NULL; 5688 int ddiinst; 5689 5690 ddiinst = ddi_get_instance(dip); 5691 5692 if (init_flag & ATTACH_HBA) { 5693 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5694 5695 if (init_flag & ATTACH_SPAWN) { 5696 emlxs_thread_spawn_destroy(hba); 5697 } 5698 5699 if (init_flag & ATTACH_ONLINE) { 5700 (void) emlxs_offline(hba); 5701 } 5702 5703 if (init_flag & ATTACH_INTR_ADD) { 5704 (void) EMLXS_INTR_REMOVE(hba); 5705 } 5706 #ifdef SFCT_SUPPORT 5707 if (init_flag & ATTACH_FCT) { 5708 emlxs_fct_detach(hba); 5709 emlxs_fct_modclose(); 5710 } 5711 #endif /* SFCT_SUPPORT */ 5712 5713 #ifdef DHCHAP_SUPPORT 5714 if (init_flag & ATTACH_DHCHAP) { 5715 emlxs_dhc_detach(hba); 5716 } 5717 #endif /* DHCHAP_SUPPORT */ 5718 5719 if (init_flag & ATTACH_KSTAT) { 5720 kstat_delete(hba->kstat); 5721 } 5722 5723 if (init_flag & ATTACH_FCA) { 5724 emlxs_fca_detach(hba); 5725 } 5726 5727 if (init_flag & ATTACH_NODE) { 5728 (void) ddi_remove_minor_node(hba->dip, "devctl"); 5729 } 5730 5731 if (init_flag & ATTACH_THREAD) { 5732 emlxs_thread_destroy(&hba->iodone_thread); 5733 } 5734 5735 if (init_flag & ATTACH_PROP) { 5736 (void) ddi_prop_remove_all(hba->dip); 5737 } 5738 5739 if (init_flag & ATTACH_LOCK) { 5740 emlxs_lock_destroy(hba); 5741 } 5742 5743 if (init_flag & ATTACH_INTR_INIT) { 5744 (void) EMLXS_INTR_UNINIT(hba); 5745 } 5746 5747 if (init_flag & ATTACH_MAP_BUS) { 5748 emlxs_unmap_bus(hba); 5749 } 5750 5751 if (init_flag & ATTACH_MAP_SLI) { 5752 EMLXS_SLI_UNMAP_HDW(hba); 5753 } 5754 5755 #ifdef FMA_SUPPORT 5756 if (init_flag & ATTACH_FM) { 5757 emlxs_fm_fini(hba); 5758 } 5759 #endif /* FMA_SUPPORT */ 5760 5761 if (init_flag & ATTACH_EVENTS) { 5762 (void) emlxs_event_queue_destroy(hba); 5763 } 5764 5765 if (init_flag & ATTACH_LOG) { 5766 (void) emlxs_msg_log_destroy(hba); 5767 } 5768 5769 if (init_flag & ATTACH_FCA_TRAN) { 5770 (void) ddi_set_driver_private(hba->dip, NULL); 5771 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t)); 5772 hba->fca_tran = NULL; 5773 } 5774 5775 if (init_flag & ATTACH_HBA) { 5776 emlxs_device.log[hba->emlxinst] = 0; 5777 emlxs_device.hba[hba->emlxinst] = 5778 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0)); 5779 #ifdef DUMP_SUPPORT 5780 emlxs_device.dump_txtfile[hba->emlxinst] = 0; 5781 emlxs_device.dump_dmpfile[hba->emlxinst] = 0; 5782 emlxs_device.dump_ceefile[hba->emlxinst] = 0; 5783 #endif /* DUMP_SUPPORT */ 5784 5785 } 5786 } 5787 5788 if (init_flag & ATTACH_SOFT_STATE) { 5789 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst); 5790 } 5791 5792 return; 5793 5794 } /* emlxs_driver_remove() */ 5795 5796 5797 5798 /* This determines which ports will be initiator mode */ 5799 static void 5800 emlxs_fca_init(emlxs_hba_t *hba) 5801 { 5802 emlxs_port_t *port = &PPORT; 5803 emlxs_port_t *vport; 5804 uint32_t i; 5805 5806 if (!hba->ini_mode) { 5807 return; 5808 } 5809 /* Check if SFS present */ 5810 if (((void *)MODSYM(fc_fca_init) == NULL) || 5811 ((void *)MODSYM(fc_fca_attach) == NULL)) { 5812 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5813 "SFS not present. Initiator mode disabled."); 5814 goto failed; 5815 } 5816 5817 /* Check if our SFS driver interface matches the current SFS stack */ 5818 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) { 5819 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5820 "SFS/FCA version mismatch. FCA=0x%x", 5821 hba->fca_tran->fca_version); 5822 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5823 "SFS present. Initiator mode disabled."); 5824 5825 goto failed; 5826 } 5827 5828 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5829 "SFS present. Initiator mode enabled."); 5830 5831 return; 5832 5833 failed: 5834 5835 hba->ini_mode = 0; 5836 for (i = 0; i < MAX_VPORTS; i++) { 5837 vport = &VPORT(i); 5838 vport->ini_mode = 0; 5839 } 5840 5841 return; 5842 5843 } /* emlxs_fca_init() */ 5844 5845 5846 /* This determines which ports will be initiator or target mode */ 5847 static void 5848 emlxs_set_mode(emlxs_hba_t *hba) 5849 { 5850 emlxs_port_t *port = &PPORT; 5851 emlxs_port_t *vport; 5852 uint32_t i; 5853 uint32_t tgt_mode = 0; 5854 5855 #ifdef SFCT_SUPPORT 5856 emlxs_config_t *cfg; 5857 5858 cfg = &hba->config[CFG_TARGET_MODE]; 5859 tgt_mode = cfg->current; 5860 5861 if (tgt_mode) { 5862 if (emlxs_fct_modopen() != 0) { 5863 tgt_mode = 0; 5864 } 5865 } 5866 5867 port->fct_flags = 0; 5868 #endif /* SFCT_SUPPORT */ 5869 5870 /* Initialize physical port */ 5871 if (tgt_mode) { 5872 hba->tgt_mode = 1; 5873 hba->ini_mode = 0; 5874 5875 port->tgt_mode = 1; 5876 port->ini_mode = 0; 5877 } else { 5878 hba->tgt_mode = 0; 5879 hba->ini_mode = 1; 5880 5881 port->tgt_mode = 0; 5882 port->ini_mode = 1; 5883 } 5884 5885 /* Initialize virtual ports */ 5886 /* Virtual ports take on the mode of the parent physical port */ 5887 for (i = 1; i < MAX_VPORTS; i++) { 5888 vport = &VPORT(i); 5889 5890 #ifdef SFCT_SUPPORT 5891 vport->fct_flags = 0; 5892 #endif /* SFCT_SUPPORT */ 5893 5894 vport->ini_mode = port->ini_mode; 5895 vport->tgt_mode = port->tgt_mode; 5896 } 5897 5898 /* Check if initiator mode is requested */ 5899 if (hba->ini_mode) { 5900 emlxs_fca_init(hba); 5901 } else { 5902 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5903 "Initiator mode not enabled."); 5904 } 5905 5906 #ifdef SFCT_SUPPORT 5907 /* Check if target mode is requested */ 5908 if (hba->tgt_mode) { 5909 emlxs_fct_init(hba); 5910 } else { 5911 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5912 "Target mode not enabled."); 5913 } 5914 #endif /* SFCT_SUPPORT */ 5915 5916 return; 5917 5918 } /* emlxs_set_mode() */ 5919 5920 5921 5922 static void 5923 emlxs_fca_attach(emlxs_hba_t *hba) 5924 { 5925 /* Update our transport structure */ 5926 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg; 5927 hba->fca_tran->fca_cmd_max = hba->io_throttle; 5928 5929 #if (EMLXS_MODREV >= EMLXS_MODREV5) 5930 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn, 5931 sizeof (NAME_TYPE)); 5932 #endif /* >= EMLXS_MODREV5 */ 5933 5934 return; 5935 5936 } /* emlxs_fca_attach() */ 5937 5938 5939 static void 5940 emlxs_fca_detach(emlxs_hba_t *hba) 5941 { 5942 uint32_t i; 5943 emlxs_port_t *vport; 5944 5945 if (hba->ini_mode) { 5946 if ((void *)MODSYM(fc_fca_detach) != NULL) { 5947 MODSYM(fc_fca_detach)(hba->dip); 5948 } 5949 5950 hba->ini_mode = 0; 5951 5952 for (i = 0; i < MAX_VPORTS; i++) { 5953 vport = &VPORT(i); 5954 vport->ini_mode = 0; 5955 } 5956 } 5957 5958 return; 5959 5960 } /* emlxs_fca_detach() */ 5961 5962 5963 5964 static void 5965 emlxs_drv_banner(emlxs_hba_t *hba) 5966 { 5967 emlxs_port_t *port = &PPORT; 5968 uint32_t i; 5969 char sli_mode[16]; 5970 char msi_mode[16]; 5971 char npiv_mode[16]; 5972 emlxs_vpd_t *vpd = &VPD; 5973 emlxs_config_t *cfg = &CFG; 5974 uint8_t *wwpn; 5975 uint8_t *wwnn; 5976 5977 /* Display firmware library one time */ 5978 if (emlxs_instance_count == 1) { 5979 emlxs_fw_show(hba); 5980 } 5981 5982 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label, 5983 emlxs_revision); 5984 5985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5986 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model, 5987 hba->model_info.device_id, hba->model_info.ssdid, 5988 hba->model_info.id); 5989 5990 #ifdef EMLXS_I386 5991 5992 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5993 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label, 5994 vpd->boot_version); 5995 5996 #else /* EMLXS_SPARC */ 5997 5998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5999 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version, 6000 vpd->fw_label, vpd->boot_version, vpd->fcode_version); 6001 6002 #endif /* EMLXS_I386 */ 6003 6004 if (hba->sli_mode > 3) { 6005 (void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode, 6006 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP")); 6007 } else { 6008 (void) sprintf(sli_mode, "SLI:%d", hba->sli_mode); 6009 } 6010 6011 (void) strcpy(msi_mode, " INTX:1"); 6012 6013 #ifdef MSI_SUPPORT 6014 if (hba->intr_flags & EMLXS_MSI_ENABLED) { 6015 switch (hba->intr_type) { 6016 case DDI_INTR_TYPE_FIXED: 6017 (void) strcpy(msi_mode, " MSI:0"); 6018 break; 6019 6020 case DDI_INTR_TYPE_MSI: 6021 (void) sprintf(msi_mode, " MSI:%d", hba->intr_count); 6022 break; 6023 6024 case DDI_INTR_TYPE_MSIX: 6025 (void) sprintf(msi_mode, " MSIX:%d", hba->intr_count); 6026 break; 6027 } 6028 } 6029 #endif 6030 6031 (void) strcpy(npiv_mode, ""); 6032 6033 if (hba->flag & FC_NPIV_ENABLED) { 6034 (void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1); 6035 } else { 6036 (void) strcpy(npiv_mode, " NPIV:0"); 6037 } 6038 6039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s", 6040 sli_mode, msi_mode, npiv_mode, 6041 ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":"")); 6042 6043 wwpn = (uint8_t *)&hba->wwpn; 6044 wwnn = (uint8_t *)&hba->wwnn; 6045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6046 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 6047 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 6048 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], 6049 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5], 6050 wwnn[6], wwnn[7]); 6051 6052 for (i = 0; i < MAX_VPORTS; i++) { 6053 port = &VPORT(i); 6054 6055 if (!(port->flag & EMLXS_PORT_CONFIG)) { 6056 continue; 6057 } 6058 6059 wwpn = (uint8_t *)&port->wwpn; 6060 wwnn = (uint8_t *)&port->wwnn; 6061 6062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6063 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 6064 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 6065 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], 6066 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], 6067 wwnn[4], wwnn[5], wwnn[6], wwnn[7]); 6068 } 6069 port = &PPORT; 6070 6071 /* 6072 * No dependency for Restricted login parameter. 6073 */ 6074 if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) { 6075 port->flag |= EMLXS_PORT_RESTRICTED; 6076 } else { 6077 port->flag &= ~EMLXS_PORT_RESTRICTED; 6078 } 6079 6080 /* 6081 * Announce the device: ddi_report_dev() prints a banner at boot time, 6082 * announcing the device pointed to by dip. 6083 */ 6084 (void) ddi_report_dev(hba->dip); 6085 6086 return; 6087 6088 } /* emlxs_drv_banner() */ 6089 6090 6091 extern void 6092 emlxs_get_fcode_version(emlxs_hba_t *hba) 6093 { 6094 emlxs_vpd_t *vpd = &VPD; 6095 char *prop_str; 6096 int status; 6097 6098 /* Setup fcode version property */ 6099 prop_str = NULL; 6100 status = 6101 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0, 6102 "fcode-version", (char **)&prop_str); 6103 6104 if (status == DDI_PROP_SUCCESS) { 6105 bcopy(prop_str, vpd->fcode_version, strlen(prop_str)); 6106 (void) ddi_prop_free((void *)prop_str); 6107 } else { 6108 (void) strcpy(vpd->fcode_version, "none"); 6109 } 6110 6111 return; 6112 6113 } /* emlxs_get_fcode_version() */ 6114 6115 6116 static int 6117 emlxs_hba_attach(dev_info_t *dip) 6118 { 6119 emlxs_hba_t *hba; 6120 emlxs_port_t *port; 6121 emlxs_config_t *cfg; 6122 char *prop_str; 6123 int ddiinst; 6124 int32_t emlxinst; 6125 int status; 6126 uint32_t rval; 6127 uint32_t init_flag = 0; 6128 char local_pm_components[32]; 6129 #ifdef EMLXS_I386 6130 uint32_t i; 6131 #endif /* EMLXS_I386 */ 6132 6133 ddiinst = ddi_get_instance(dip); 6134 emlxinst = emlxs_add_instance(ddiinst); 6135 6136 if (emlxinst >= MAX_FC_BRDS) { 6137 cmn_err(CE_WARN, 6138 "?%s: fca_hba_attach failed. Too many driver ddiinsts. " 6139 "inst=%x", DRIVER_NAME, ddiinst); 6140 return (DDI_FAILURE); 6141 } 6142 6143 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) { 6144 return (DDI_FAILURE); 6145 } 6146 6147 if (emlxs_device.hba[emlxinst]) { 6148 return (DDI_SUCCESS); 6149 } 6150 6151 /* An adapter can accidentally be plugged into a slave-only PCI slot */ 6152 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 6153 cmn_err(CE_WARN, 6154 "?%s%d: fca_hba_attach failed. Device in slave-only slot.", 6155 DRIVER_NAME, ddiinst); 6156 return (DDI_FAILURE); 6157 } 6158 6159 /* Allocate emlxs_dev_ctl structure. */ 6160 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) { 6161 cmn_err(CE_WARN, 6162 "?%s%d: fca_hba_attach failed. Unable to allocate soft " 6163 "state.", DRIVER_NAME, ddiinst); 6164 return (DDI_FAILURE); 6165 } 6166 init_flag |= ATTACH_SOFT_STATE; 6167 6168 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state, 6169 ddiinst)) == NULL) { 6170 cmn_err(CE_WARN, 6171 "?%s%d: fca_hba_attach failed. Unable to get soft state.", 6172 DRIVER_NAME, ddiinst); 6173 goto failed; 6174 } 6175 bzero((char *)hba, sizeof (emlxs_hba_t)); 6176 6177 emlxs_device.hba[emlxinst] = hba; 6178 emlxs_device.log[emlxinst] = &hba->log; 6179 6180 #ifdef DUMP_SUPPORT 6181 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile; 6182 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile; 6183 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile; 6184 #endif /* DUMP_SUPPORT */ 6185 6186 hba->dip = dip; 6187 hba->emlxinst = emlxinst; 6188 hba->ddiinst = ddiinst; 6189 hba->ini_mode = 0; 6190 hba->tgt_mode = 0; 6191 6192 init_flag |= ATTACH_HBA; 6193 6194 /* Enable the physical port on this HBA */ 6195 port = &PPORT; 6196 port->hba = hba; 6197 port->vpi = 0; 6198 port->flag |= EMLXS_PORT_ENABLE; 6199 6200 /* Allocate a transport structure */ 6201 hba->fca_tran = 6202 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP); 6203 if (hba->fca_tran == NULL) { 6204 cmn_err(CE_WARN, 6205 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran " 6206 "memory.", DRIVER_NAME, ddiinst); 6207 goto failed; 6208 } 6209 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran, 6210 sizeof (fc_fca_tran_t)); 6211 6212 /* 6213 * Copy the global ddi_dma_attr to the local hba fields 6214 */ 6215 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr, 6216 sizeof (ddi_dma_attr_t)); 6217 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro, 6218 sizeof (ddi_dma_attr_t)); 6219 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg, 6220 sizeof (ddi_dma_attr_t)); 6221 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp, 6222 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t)); 6223 6224 /* Reset the fca_tran dma_attr fields to the per-hba copies */ 6225 hba->fca_tran->fca_dma_attr = &hba->dma_attr; 6226 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg; 6227 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg; 6228 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro; 6229 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg; 6230 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp; 6231 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg; 6232 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr; 6233 6234 /* Set the transport structure pointer in our dip */ 6235 /* SFS may panic if we are in target only mode */ 6236 /* We will update the transport structure later */ 6237 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran); 6238 init_flag |= ATTACH_FCA_TRAN; 6239 6240 /* Perform driver integrity check */ 6241 rval = emlxs_integrity_check(hba); 6242 if (rval) { 6243 cmn_err(CE_WARN, 6244 "?%s%d: fca_hba_attach failed. Driver integrity check " 6245 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval); 6246 goto failed; 6247 } 6248 6249 cfg = &CFG; 6250 6251 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg)); 6252 #ifdef MSI_SUPPORT 6253 if ((void *)&ddi_intr_get_supported_types != NULL) { 6254 hba->intr_flags |= EMLXS_MSI_ENABLED; 6255 } 6256 #endif /* MSI_SUPPORT */ 6257 6258 6259 /* Create the msg log file */ 6260 if (emlxs_msg_log_create(hba) == 0) { 6261 cmn_err(CE_WARN, 6262 "?%s%d: fca_hba_attach failed. Unable to create message " 6263 "log", DRIVER_NAME, ddiinst); 6264 goto failed; 6265 6266 } 6267 init_flag |= ATTACH_LOG; 6268 6269 /* We can begin to use EMLXS_MSGF from this point on */ 6270 6271 /* Create the event queue */ 6272 if (emlxs_event_queue_create(hba) == 0) { 6273 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6274 "Unable to create event queue"); 6275 6276 goto failed; 6277 6278 } 6279 init_flag |= ATTACH_EVENTS; 6280 6281 /* 6282 * Find the I/O bus type If it is not a SBUS card, 6283 * then it is a PCI card. Default is PCI_FC (0). 6284 */ 6285 prop_str = NULL; 6286 status = ddi_prop_lookup_string(DDI_DEV_T_ANY, 6287 (dev_info_t *)dip, 0, "name", (char **)&prop_str); 6288 6289 if (status == DDI_PROP_SUCCESS) { 6290 if (strncmp(prop_str, "lpfs", 4) == 0) { 6291 hba->bus_type = SBUS_FC; 6292 } 6293 6294 (void) ddi_prop_free((void *)prop_str); 6295 } 6296 6297 /* 6298 * Copy DDS from the config method and update configuration parameters 6299 */ 6300 (void) emlxs_get_props(hba); 6301 6302 #ifdef FMA_SUPPORT 6303 hba->fm_caps = cfg[CFG_FM_CAPS].current; 6304 6305 emlxs_fm_init(hba); 6306 6307 init_flag |= ATTACH_FM; 6308 #endif /* FMA_SUPPORT */ 6309 6310 if (emlxs_map_bus(hba)) { 6311 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6312 "Unable to map memory"); 6313 goto failed; 6314 6315 } 6316 init_flag |= ATTACH_MAP_BUS; 6317 6318 /* Attempt to identify the adapter */ 6319 rval = emlxs_init_adapter_info(hba); 6320 6321 if (rval == 0) { 6322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6323 "Unable to get adapter info. Id:%d Device id:0x%x " 6324 "Model:%s", hba->model_info.id, 6325 hba->model_info.device_id, hba->model_info.model); 6326 goto failed; 6327 } 6328 6329 /* Check if adapter is not supported */ 6330 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) { 6331 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6332 "Unsupported adapter found. Id:%d Device id:0x%x " 6333 "SSDID:0x%x Model:%s", hba->model_info.id, 6334 hba->model_info.device_id, 6335 hba->model_info.ssdid, hba->model_info.model); 6336 goto failed; 6337 } 6338 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) { 6339 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE; 6340 #ifdef EMLXS_I386 6341 /* 6342 * TigerShark has 64K limit for SG element size 6343 * Do this for x86 alone. For SPARC, the driver 6344 * breaks up the single SGE later on. 6345 */ 6346 hba->dma_attr_ro.dma_attr_count_max = 0xffff; 6347 6348 i = cfg[CFG_MAX_XFER_SIZE].current; 6349 /* Update SGL size based on max_xfer_size */ 6350 if (i > 688128) { 6351 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6352 hba->sli.sli4.mem_sgl_size = 4096; 6353 } else if (i > 339968) { 6354 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6355 hba->sli.sli4.mem_sgl_size = 2048; 6356 } else { 6357 hba->sli.sli4.mem_sgl_size = 1024; 6358 } 6359 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size); 6360 #endif /* EMLXS_I386 */ 6361 } else { 6362 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE; 6363 #ifdef EMLXS_I386 6364 i = cfg[CFG_MAX_XFER_SIZE].current; 6365 /* Update BPL size based on max_xfer_size */ 6366 if (i > 688128) { 6367 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6368 hba->sli.sli3.mem_bpl_size = 4096; 6369 } else if (i > 339968) { 6370 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6371 hba->sli.sli3.mem_bpl_size = 2048; 6372 } else { 6373 hba->sli.sli3.mem_bpl_size = 1024; 6374 } 6375 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size); 6376 #endif /* EMLXS_I386 */ 6377 } 6378 6379 #ifdef EMLXS_I386 6380 /* Update dma_attr_sgllen based on BPL size */ 6381 hba->dma_attr.dma_attr_sgllen = i; 6382 hba->dma_attr_ro.dma_attr_sgllen = i; 6383 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i; 6384 #endif /* EMLXS_I386 */ 6385 6386 if (EMLXS_SLI_MAP_HDW(hba)) { 6387 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6388 "Unable to map memory"); 6389 goto failed; 6390 6391 } 6392 init_flag |= ATTACH_MAP_SLI; 6393 6394 /* Initialize the interrupts. But don't add them yet */ 6395 status = EMLXS_INTR_INIT(hba, 0); 6396 if (status != DDI_SUCCESS) { 6397 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6398 "Unable to initalize interrupt(s)."); 6399 goto failed; 6400 6401 } 6402 init_flag |= ATTACH_INTR_INIT; 6403 6404 /* Initialize LOCKs */ 6405 emlxs_lock_init(hba); 6406 init_flag |= ATTACH_LOCK; 6407 6408 /* Initialize the power management */ 6409 mutex_enter(&hba->pm_lock); 6410 hba->pm_state = EMLXS_PM_IN_ATTACH; 6411 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 6412 hba->pm_busy = 0; 6413 #ifdef IDLE_TIMER 6414 hba->pm_active = 1; 6415 hba->pm_idle_timer = 0; 6416 #endif /* IDLE_TIMER */ 6417 mutex_exit(&hba->pm_lock); 6418 6419 /* Set the pm component name */ 6420 (void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME, 6421 ddiinst); 6422 emlxs_pm_components[0] = local_pm_components; 6423 6424 /* Check if power management support is enabled */ 6425 if (cfg[CFG_PM_SUPPORT].current) { 6426 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 6427 "pm-components", emlxs_pm_components, 6428 sizeof (emlxs_pm_components) / 6429 sizeof (emlxs_pm_components[0])) != 6430 DDI_PROP_SUCCESS) { 6431 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6432 "Unable to create pm components."); 6433 goto failed; 6434 } 6435 } 6436 6437 /* Needed for suspend and resume support */ 6438 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state", 6439 "needs-suspend-resume"); 6440 init_flag |= ATTACH_PROP; 6441 6442 emlxs_thread_spawn_create(hba); 6443 init_flag |= ATTACH_SPAWN; 6444 6445 emlxs_thread_create(hba, &hba->iodone_thread); 6446 6447 init_flag |= ATTACH_THREAD; 6448 6449 /* Setup initiator / target ports */ 6450 emlxs_set_mode(hba); 6451 6452 /* If driver did not attach to either stack, */ 6453 /* then driver attach failed */ 6454 if (!hba->tgt_mode && !hba->ini_mode) { 6455 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6456 "Driver interfaces not enabled."); 6457 goto failed; 6458 } 6459 6460 /* 6461 * Initialize HBA 6462 */ 6463 6464 /* Set initial state */ 6465 mutex_enter(&EMLXS_PORT_LOCK); 6466 emlxs_diag_state = DDI_OFFDI; 6467 hba->flag |= FC_OFFLINE_MODE; 6468 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE); 6469 mutex_exit(&EMLXS_PORT_LOCK); 6470 6471 if (status = emlxs_online(hba)) { 6472 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6473 "Unable to initialize adapter."); 6474 goto failed; 6475 } 6476 init_flag |= ATTACH_ONLINE; 6477 6478 /* This is to ensure that the model property is properly set */ 6479 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 6480 hba->model_info.model); 6481 6482 /* Create the device node. */ 6483 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) == 6484 DDI_FAILURE) { 6485 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6486 "Unable to create device node."); 6487 goto failed; 6488 } 6489 init_flag |= ATTACH_NODE; 6490 6491 /* Attach initiator now */ 6492 /* This must come after emlxs_online() */ 6493 emlxs_fca_attach(hba); 6494 init_flag |= ATTACH_FCA; 6495 6496 /* Initialize kstat information */ 6497 hba->kstat = kstat_create(DRIVER_NAME, 6498 ddiinst, "statistics", "controller", 6499 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t), 6500 KSTAT_FLAG_VIRTUAL); 6501 6502 if (hba->kstat == NULL) { 6503 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6504 "kstat_create failed."); 6505 } else { 6506 hba->kstat->ks_data = (void *)&hba->stats; 6507 kstat_install(hba->kstat); 6508 init_flag |= ATTACH_KSTAT; 6509 } 6510 6511 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 6512 /* Setup virtual port properties */ 6513 emlxs_read_vport_prop(hba); 6514 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 6515 6516 6517 #ifdef DHCHAP_SUPPORT 6518 emlxs_dhc_attach(hba); 6519 init_flag |= ATTACH_DHCHAP; 6520 #endif /* DHCHAP_SUPPORT */ 6521 6522 /* Display the driver banner now */ 6523 emlxs_drv_banner(hba); 6524 6525 /* Raise the power level */ 6526 6527 /* 6528 * This will not execute emlxs_hba_resume because 6529 * EMLXS_PM_IN_ATTACH is set 6530 */ 6531 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) { 6532 /* Set power up anyway. This should not happen! */ 6533 mutex_enter(&hba->pm_lock); 6534 hba->pm_level = EMLXS_PM_ADAPTER_UP; 6535 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6536 mutex_exit(&hba->pm_lock); 6537 } else { 6538 mutex_enter(&hba->pm_lock); 6539 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6540 mutex_exit(&hba->pm_lock); 6541 } 6542 6543 #ifdef SFCT_SUPPORT 6544 /* Do this last */ 6545 emlxs_fct_attach(hba); 6546 init_flag |= ATTACH_FCT; 6547 #endif /* SFCT_SUPPORT */ 6548 6549 return (DDI_SUCCESS); 6550 6551 failed: 6552 6553 emlxs_driver_remove(dip, init_flag, 1); 6554 6555 return (DDI_FAILURE); 6556 6557 } /* emlxs_hba_attach() */ 6558 6559 6560 static int 6561 emlxs_hba_detach(dev_info_t *dip) 6562 { 6563 emlxs_hba_t *hba; 6564 emlxs_port_t *port; 6565 int ddiinst; 6566 int count; 6567 uint32_t init_flag = (uint32_t)-1; 6568 6569 ddiinst = ddi_get_instance(dip); 6570 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6571 port = &PPORT; 6572 6573 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL); 6574 6575 mutex_enter(&hba->pm_lock); 6576 hba->pm_state |= EMLXS_PM_IN_DETACH; 6577 mutex_exit(&hba->pm_lock); 6578 6579 /* Lower the power level */ 6580 /* 6581 * This will not suspend the driver since the 6582 * EMLXS_PM_IN_DETACH has been set 6583 */ 6584 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) { 6585 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6586 "Unable to lower power."); 6587 6588 mutex_enter(&hba->pm_lock); 6589 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6590 mutex_exit(&hba->pm_lock); 6591 6592 return (DDI_FAILURE); 6593 } 6594 6595 /* Take the adapter offline first, if not already */ 6596 if (emlxs_offline(hba) != 0) { 6597 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6598 "Unable to take adapter offline."); 6599 6600 mutex_enter(&hba->pm_lock); 6601 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6602 mutex_exit(&hba->pm_lock); 6603 6604 (void) emlxs_pm_raise_power(dip); 6605 6606 return (DDI_FAILURE); 6607 } 6608 /* Check ub buffer pools */ 6609 if (port->ub_pool) { 6610 mutex_enter(&EMLXS_UB_LOCK); 6611 6612 /* Wait up to 10 seconds for all ub pools to be freed */ 6613 count = 10 * 2; 6614 while (port->ub_pool && count) { 6615 mutex_exit(&EMLXS_UB_LOCK); 6616 delay(drv_usectohz(500000)); /* half second wait */ 6617 count--; 6618 mutex_enter(&EMLXS_UB_LOCK); 6619 } 6620 6621 if (port->ub_pool) { 6622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6623 "fca_unbind_port: Unsolicited buffers still " 6624 "active. port=%p. Destroying...", port); 6625 6626 /* Destroy all pools */ 6627 while (port->ub_pool) { 6628 emlxs_ub_destroy(port, port->ub_pool); 6629 } 6630 } 6631 6632 mutex_exit(&EMLXS_UB_LOCK); 6633 } 6634 init_flag &= ~ATTACH_ONLINE; 6635 6636 /* Remove the driver instance */ 6637 emlxs_driver_remove(dip, init_flag, 0); 6638 6639 return (DDI_SUCCESS); 6640 6641 } /* emlxs_hba_detach() */ 6642 6643 6644 extern int 6645 emlxs_map_bus(emlxs_hba_t *hba) 6646 { 6647 emlxs_port_t *port = &PPORT; 6648 dev_info_t *dip; 6649 ddi_device_acc_attr_t dev_attr; 6650 int status; 6651 6652 dip = (dev_info_t *)hba->dip; 6653 dev_attr = emlxs_dev_acc_attr; 6654 6655 if (hba->bus_type == SBUS_FC) { 6656 if (hba->pci_acc_handle == 0) { 6657 status = ddi_regs_map_setup(dip, 6658 SBUS_DFLY_PCI_CFG_RINDEX, 6659 (caddr_t *)&hba->pci_addr, 6660 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6661 if (status != DDI_SUCCESS) { 6662 EMLXS_MSGF(EMLXS_CONTEXT, 6663 &emlxs_attach_failed_msg, 6664 "(SBUS) ddi_regs_map_setup PCI failed. " 6665 "status=%x", status); 6666 goto failed; 6667 } 6668 } 6669 6670 if (hba->sbus_pci_handle == 0) { 6671 status = ddi_regs_map_setup(dip, 6672 SBUS_TITAN_PCI_CFG_RINDEX, 6673 (caddr_t *)&hba->sbus_pci_addr, 6674 0, 0, &dev_attr, &hba->sbus_pci_handle); 6675 if (status != DDI_SUCCESS) { 6676 EMLXS_MSGF(EMLXS_CONTEXT, 6677 &emlxs_attach_failed_msg, 6678 "(SBUS) ddi_regs_map_setup TITAN PCI " 6679 "failed. status=%x", status); 6680 goto failed; 6681 } 6682 } 6683 6684 } else { /* ****** PCI ****** */ 6685 6686 if (hba->pci_acc_handle == 0) { 6687 status = ddi_regs_map_setup(dip, 6688 PCI_CFG_RINDEX, 6689 (caddr_t *)&hba->pci_addr, 6690 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6691 if (status != DDI_SUCCESS) { 6692 EMLXS_MSGF(EMLXS_CONTEXT, 6693 &emlxs_attach_failed_msg, 6694 "(PCI) ddi_regs_map_setup PCI failed. " 6695 "status=%x", status); 6696 goto failed; 6697 } 6698 } 6699 #ifdef EMLXS_I386 6700 /* Setting up PCI configure space */ 6701 (void) ddi_put16(hba->pci_acc_handle, 6702 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 6703 CMD_CFG_VALUE | CMD_IO_ENBL); 6704 6705 #ifdef FMA_SUPPORT 6706 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle) 6707 != DDI_FM_OK) { 6708 EMLXS_MSGF(EMLXS_CONTEXT, 6709 &emlxs_invalid_access_handle_msg, NULL); 6710 goto failed; 6711 } 6712 #endif /* FMA_SUPPORT */ 6713 6714 #endif /* EMLXS_I386 */ 6715 6716 } 6717 return (0); 6718 6719 failed: 6720 6721 emlxs_unmap_bus(hba); 6722 return (ENOMEM); 6723 6724 } /* emlxs_map_bus() */ 6725 6726 6727 extern void 6728 emlxs_unmap_bus(emlxs_hba_t *hba) 6729 { 6730 if (hba->pci_acc_handle) { 6731 (void) ddi_regs_map_free(&hba->pci_acc_handle); 6732 hba->pci_acc_handle = 0; 6733 } 6734 6735 if (hba->sbus_pci_handle) { 6736 (void) ddi_regs_map_free(&hba->sbus_pci_handle); 6737 hba->sbus_pci_handle = 0; 6738 } 6739 6740 return; 6741 6742 } /* emlxs_unmap_bus() */ 6743 6744 6745 static int 6746 emlxs_get_props(emlxs_hba_t *hba) 6747 { 6748 emlxs_config_t *cfg; 6749 uint32_t i; 6750 char string[256]; 6751 uint32_t new_value; 6752 6753 /* Initialize each parameter */ 6754 for (i = 0; i < NUM_CFG_PARAM; i++) { 6755 cfg = &hba->config[i]; 6756 6757 /* Ensure strings are terminated */ 6758 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0; 6759 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0; 6760 6761 /* Set the current value to the default value */ 6762 new_value = cfg->def; 6763 6764 /* First check for the global setting */ 6765 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6766 (void *)hba->dip, DDI_PROP_DONTPASS, 6767 cfg->string, new_value); 6768 6769 /* Now check for the per adapter ddiinst setting */ 6770 (void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst, 6771 cfg->string); 6772 6773 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6774 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value); 6775 6776 /* Now check the parameter */ 6777 cfg->current = emlxs_check_parm(hba, i, new_value); 6778 } 6779 6780 return (0); 6781 6782 } /* emlxs_get_props() */ 6783 6784 6785 extern uint32_t 6786 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6787 { 6788 emlxs_port_t *port = &PPORT; 6789 uint32_t i; 6790 emlxs_config_t *cfg; 6791 emlxs_vpd_t *vpd = &VPD; 6792 6793 if (index > NUM_CFG_PARAM) { 6794 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6795 "emlxs_check_parm failed. Invalid index = %d", index); 6796 6797 return (new_value); 6798 } 6799 6800 cfg = &hba->config[index]; 6801 6802 if (new_value > cfg->hi) { 6803 new_value = cfg->def; 6804 } else if (new_value < cfg->low) { 6805 new_value = cfg->def; 6806 } 6807 6808 /* Perform additional checks */ 6809 switch (index) { 6810 case CFG_NPIV_ENABLE: 6811 if (hba->tgt_mode) { 6812 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6813 "enable-npiv: Not supported in target mode. " 6814 "Disabling."); 6815 6816 new_value = 0; 6817 } 6818 break; 6819 6820 #ifdef DHCHAP_SUPPORT 6821 case CFG_AUTH_ENABLE: 6822 if (hba->tgt_mode) { 6823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6824 "enable-auth: Not supported in target mode. " 6825 "Disabling."); 6826 6827 new_value = 0; 6828 } 6829 break; 6830 #endif /* DHCHAP_SUPPORT */ 6831 6832 case CFG_NUM_NODES: 6833 switch (new_value) { 6834 case 1: 6835 case 2: 6836 /* Must have at least 3 if not 0 */ 6837 return (3); 6838 6839 default: 6840 break; 6841 } 6842 break; 6843 6844 case CFG_LINK_SPEED: 6845 if (vpd->link_speed) { 6846 switch (new_value) { 6847 case 0: 6848 break; 6849 6850 case 1: 6851 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) { 6852 new_value = 0; 6853 6854 EMLXS_MSGF(EMLXS_CONTEXT, 6855 &emlxs_init_msg, 6856 "link-speed: 1Gb not supported " 6857 "by adapter. Switching to auto " 6858 "detect."); 6859 } 6860 break; 6861 6862 case 2: 6863 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) { 6864 new_value = 0; 6865 6866 EMLXS_MSGF(EMLXS_CONTEXT, 6867 &emlxs_init_msg, 6868 "link-speed: 2Gb not supported " 6869 "by adapter. Switching to auto " 6870 "detect."); 6871 } 6872 break; 6873 case 4: 6874 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) { 6875 new_value = 0; 6876 6877 EMLXS_MSGF(EMLXS_CONTEXT, 6878 &emlxs_init_msg, 6879 "link-speed: 4Gb not supported " 6880 "by adapter. Switching to auto " 6881 "detect."); 6882 } 6883 break; 6884 6885 case 8: 6886 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) { 6887 new_value = 0; 6888 6889 EMLXS_MSGF(EMLXS_CONTEXT, 6890 &emlxs_init_msg, 6891 "link-speed: 8Gb not supported " 6892 "by adapter. Switching to auto " 6893 "detect."); 6894 } 6895 break; 6896 6897 case 10: 6898 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) { 6899 new_value = 0; 6900 6901 EMLXS_MSGF(EMLXS_CONTEXT, 6902 &emlxs_init_msg, 6903 "link-speed: 10Gb not supported " 6904 "by adapter. Switching to auto " 6905 "detect."); 6906 } 6907 break; 6908 6909 default: 6910 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6911 "link-speed: Invalid value=%d provided. " 6912 "Switching to auto detect.", 6913 new_value); 6914 6915 new_value = 0; 6916 } 6917 } else { /* Perform basic validity check */ 6918 6919 /* Perform additional check on link speed */ 6920 switch (new_value) { 6921 case 0: 6922 case 1: 6923 case 2: 6924 case 4: 6925 case 8: 6926 case 10: 6927 /* link-speed is a valid choice */ 6928 break; 6929 6930 default: 6931 new_value = cfg->def; 6932 } 6933 } 6934 break; 6935 6936 case CFG_TOPOLOGY: 6937 /* Perform additional check on topology */ 6938 switch (new_value) { 6939 case 0: 6940 case 2: 6941 case 4: 6942 case 6: 6943 /* topology is a valid choice */ 6944 break; 6945 6946 default: 6947 return (cfg->def); 6948 } 6949 break; 6950 6951 #ifdef DHCHAP_SUPPORT 6952 case CFG_AUTH_TYPE: 6953 { 6954 uint32_t shift; 6955 uint32_t mask; 6956 6957 /* Perform additional check on auth type */ 6958 shift = 12; 6959 mask = 0xF000; 6960 for (i = 0; i < 4; i++) { 6961 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) { 6962 return (cfg->def); 6963 } 6964 6965 shift -= 4; 6966 mask >>= 4; 6967 } 6968 break; 6969 } 6970 6971 case CFG_AUTH_HASH: 6972 { 6973 uint32_t shift; 6974 uint32_t mask; 6975 6976 /* Perform additional check on auth hash */ 6977 shift = 12; 6978 mask = 0xF000; 6979 for (i = 0; i < 4; i++) { 6980 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) { 6981 return (cfg->def); 6982 } 6983 6984 shift -= 4; 6985 mask >>= 4; 6986 } 6987 break; 6988 } 6989 6990 case CFG_AUTH_GROUP: 6991 { 6992 uint32_t shift; 6993 uint32_t mask; 6994 6995 /* Perform additional check on auth group */ 6996 shift = 28; 6997 mask = 0xF0000000; 6998 for (i = 0; i < 8; i++) { 6999 if (((new_value & mask) >> shift) > 7000 DFC_AUTH_GROUP_MAX) { 7001 return (cfg->def); 7002 } 7003 7004 shift -= 4; 7005 mask >>= 4; 7006 } 7007 break; 7008 } 7009 7010 case CFG_AUTH_INTERVAL: 7011 if (new_value < 10) { 7012 return (10); 7013 } 7014 break; 7015 7016 7017 #endif /* DHCHAP_SUPPORT */ 7018 7019 } /* switch */ 7020 7021 return (new_value); 7022 7023 } /* emlxs_check_parm() */ 7024 7025 7026 extern uint32_t 7027 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 7028 { 7029 emlxs_port_t *port = &PPORT; 7030 emlxs_port_t *vport; 7031 uint32_t vpi; 7032 emlxs_config_t *cfg; 7033 uint32_t old_value; 7034 7035 if (index > NUM_CFG_PARAM) { 7036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7037 "emlxs_set_parm failed. Invalid index = %d", index); 7038 7039 return ((uint32_t)FC_FAILURE); 7040 } 7041 7042 cfg = &hba->config[index]; 7043 7044 if (!(cfg->flags & PARM_DYNAMIC)) { 7045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7046 "emlxs_set_parm failed. %s is not dynamic.", cfg->string); 7047 7048 return ((uint32_t)FC_FAILURE); 7049 } 7050 7051 /* Check new value */ 7052 old_value = new_value; 7053 new_value = emlxs_check_parm(hba, index, new_value); 7054 7055 if (old_value != new_value) { 7056 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7057 "emlxs_set_parm: %s invalid. 0x%x --> 0x%x", 7058 cfg->string, old_value, new_value); 7059 } 7060 7061 /* Return now if no actual change */ 7062 if (new_value == cfg->current) { 7063 return (FC_SUCCESS); 7064 } 7065 7066 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7067 "emlxs_set_parm: %s changing. 0x%x --> 0x%x", 7068 cfg->string, cfg->current, new_value); 7069 7070 old_value = cfg->current; 7071 cfg->current = new_value; 7072 7073 /* React to change if needed */ 7074 switch (index) { 7075 7076 case CFG_PCI_MAX_READ: 7077 /* Update MXR */ 7078 emlxs_pcix_mxr_update(hba, 1); 7079 break; 7080 7081 case CFG_SLI_MODE: 7082 /* Check SLI mode */ 7083 if ((hba->sli_mode == 3) && (new_value == 2)) { 7084 /* All vports must be disabled first */ 7085 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 7086 vport = &VPORT(vpi); 7087 7088 if (vport->flag & EMLXS_PORT_ENABLE) { 7089 /* Reset current value */ 7090 cfg->current = old_value; 7091 7092 EMLXS_MSGF(EMLXS_CONTEXT, 7093 &emlxs_sfs_debug_msg, 7094 "emlxs_set_parm failed. %s: vpi=%d " 7095 "still enabled. Value restored to " 7096 "0x%x.", cfg->string, vpi, 7097 old_value); 7098 7099 return (2); 7100 } 7101 } 7102 } 7103 break; 7104 7105 case CFG_NPIV_ENABLE: 7106 /* Check if NPIV is being disabled */ 7107 if ((old_value == 1) && (new_value == 0)) { 7108 /* All vports must be disabled first */ 7109 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 7110 vport = &VPORT(vpi); 7111 7112 if (vport->flag & EMLXS_PORT_ENABLE) { 7113 /* Reset current value */ 7114 cfg->current = old_value; 7115 7116 EMLXS_MSGF(EMLXS_CONTEXT, 7117 &emlxs_sfs_debug_msg, 7118 "emlxs_set_parm failed. %s: vpi=%d " 7119 "still enabled. Value restored to " 7120 "0x%x.", cfg->string, vpi, 7121 old_value); 7122 7123 return (2); 7124 } 7125 } 7126 } 7127 7128 /* Trigger adapter reset */ 7129 /* (void) emlxs_reset(port, FC_FCA_RESET); */ 7130 7131 break; 7132 7133 7134 case CFG_VPORT_RESTRICTED: 7135 for (vpi = 0; vpi < MAX_VPORTS; vpi++) { 7136 vport = &VPORT(vpi); 7137 7138 if (!(vport->flag & EMLXS_PORT_CONFIG)) { 7139 continue; 7140 } 7141 7142 if (vport->options & EMLXS_OPT_RESTRICT_MASK) { 7143 continue; 7144 } 7145 7146 if (new_value) { 7147 vport->flag |= EMLXS_PORT_RESTRICTED; 7148 } else { 7149 vport->flag &= ~EMLXS_PORT_RESTRICTED; 7150 } 7151 } 7152 7153 break; 7154 7155 #ifdef DHCHAP_SUPPORT 7156 case CFG_AUTH_ENABLE: 7157 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 7158 break; 7159 7160 case CFG_AUTH_TMO: 7161 hba->auth_cfg.authentication_timeout = cfg->current; 7162 break; 7163 7164 case CFG_AUTH_MODE: 7165 hba->auth_cfg.authentication_mode = cfg->current; 7166 break; 7167 7168 case CFG_AUTH_BIDIR: 7169 hba->auth_cfg.bidirectional = cfg->current; 7170 break; 7171 7172 case CFG_AUTH_TYPE: 7173 hba->auth_cfg.authentication_type_priority[0] = 7174 (cfg->current & 0xF000) >> 12; 7175 hba->auth_cfg.authentication_type_priority[1] = 7176 (cfg->current & 0x0F00) >> 8; 7177 hba->auth_cfg.authentication_type_priority[2] = 7178 (cfg->current & 0x00F0) >> 4; 7179 hba->auth_cfg.authentication_type_priority[3] = 7180 (cfg->current & 0x000F); 7181 break; 7182 7183 case CFG_AUTH_HASH: 7184 hba->auth_cfg.hash_priority[0] = 7185 (cfg->current & 0xF000) >> 12; 7186 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8; 7187 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4; 7188 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F); 7189 break; 7190 7191 case CFG_AUTH_GROUP: 7192 hba->auth_cfg.dh_group_priority[0] = 7193 (cfg->current & 0xF0000000) >> 28; 7194 hba->auth_cfg.dh_group_priority[1] = 7195 (cfg->current & 0x0F000000) >> 24; 7196 hba->auth_cfg.dh_group_priority[2] = 7197 (cfg->current & 0x00F00000) >> 20; 7198 hba->auth_cfg.dh_group_priority[3] = 7199 (cfg->current & 0x000F0000) >> 16; 7200 hba->auth_cfg.dh_group_priority[4] = 7201 (cfg->current & 0x0000F000) >> 12; 7202 hba->auth_cfg.dh_group_priority[5] = 7203 (cfg->current & 0x00000F00) >> 8; 7204 hba->auth_cfg.dh_group_priority[6] = 7205 (cfg->current & 0x000000F0) >> 4; 7206 hba->auth_cfg.dh_group_priority[7] = 7207 (cfg->current & 0x0000000F); 7208 break; 7209 7210 case CFG_AUTH_INTERVAL: 7211 hba->auth_cfg.reauthenticate_time_interval = cfg->current; 7212 break; 7213 #endif /* DHCHAP_SUPPORT */ 7214 7215 } 7216 7217 return (FC_SUCCESS); 7218 7219 } /* emlxs_set_parm() */ 7220 7221 7222 /* 7223 * emlxs_mem_alloc OS specific routine for memory allocation / mapping 7224 * 7225 * The buf_info->flags field describes the memory operation requested. 7226 * 7227 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA 7228 * Virtual address is supplied in buf_info->virt 7229 * DMA mapping flag is in buf_info->align 7230 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE) 7231 * The mapped physical address is returned buf_info->phys 7232 * 7233 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and 7234 * if FC_MBUF_DMA is set the memory is also mapped for DMA 7235 * The byte alignment of the memory request is supplied in buf_info->align 7236 * The byte size of the memory request is supplied in buf_info->size 7237 * The virtual address is returned buf_info->virt 7238 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA) 7239 */ 7240 extern uint8_t * 7241 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7242 { 7243 emlxs_port_t *port = &PPORT; 7244 ddi_dma_attr_t dma_attr; 7245 ddi_device_acc_attr_t dev_attr; 7246 uint_t cookie_count; 7247 size_t dma_reallen; 7248 ddi_dma_cookie_t dma_cookie; 7249 uint_t dma_flag; 7250 int status; 7251 7252 dma_attr = hba->dma_attr_1sg; 7253 dev_attr = emlxs_data_acc_attr; 7254 7255 if (buf_info->flags & FC_MBUF_SNGLSG) { 7256 dma_attr.dma_attr_sgllen = 1; 7257 } 7258 7259 if (buf_info->flags & FC_MBUF_DMA32) { 7260 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff; 7261 } 7262 7263 if (buf_info->flags & FC_MBUF_PHYSONLY) { 7264 7265 if (buf_info->virt == 0) { 7266 goto done; 7267 } 7268 7269 /* 7270 * Allocate the DMA handle for this DMA object 7271 */ 7272 status = ddi_dma_alloc_handle((void *)hba->dip, 7273 &dma_attr, DDI_DMA_DONTWAIT, 7274 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle); 7275 if (status != DDI_SUCCESS) { 7276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7277 "ddi_dma_alloc_handle failed: size=%x align=%x " 7278 "flags=%x", buf_info->size, buf_info->align, 7279 buf_info->flags); 7280 7281 buf_info->phys = 0; 7282 buf_info->dma_handle = 0; 7283 goto done; 7284 } 7285 7286 switch (buf_info->align) { 7287 case DMA_READ_WRITE: 7288 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT); 7289 break; 7290 case DMA_READ_ONLY: 7291 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT); 7292 break; 7293 case DMA_WRITE_ONLY: 7294 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT); 7295 break; 7296 } 7297 7298 /* Map this page of memory */ 7299 status = ddi_dma_addr_bind_handle( 7300 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7301 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7302 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie, 7303 &cookie_count); 7304 7305 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7306 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7307 "ddi_dma_addr_bind_handle failed: status=%x " 7308 "count=%x flags=%x", status, cookie_count, 7309 buf_info->flags); 7310 7311 (void) ddi_dma_free_handle( 7312 (ddi_dma_handle_t *)&buf_info->dma_handle); 7313 buf_info->phys = 0; 7314 buf_info->dma_handle = 0; 7315 goto done; 7316 } 7317 7318 if (hba->bus_type == SBUS_FC) { 7319 7320 int32_t burstsizes_limit = 0xff; 7321 int32_t ret_burst; 7322 7323 ret_burst = ddi_dma_burstsizes( 7324 buf_info->dma_handle) & burstsizes_limit; 7325 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7326 ret_burst) == DDI_FAILURE) { 7327 EMLXS_MSGF(EMLXS_CONTEXT, 7328 &emlxs_mem_alloc_failed_msg, 7329 "ddi_dma_set_sbus64 failed."); 7330 } 7331 } 7332 7333 /* Save Physical address */ 7334 buf_info->phys = dma_cookie.dmac_laddress; 7335 7336 /* 7337 * Just to be sure, let's add this 7338 */ 7339 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle, 7340 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7341 7342 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) { 7343 7344 dma_attr.dma_attr_align = buf_info->align; 7345 7346 /* 7347 * Allocate the DMA handle for this DMA object 7348 */ 7349 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr, 7350 DDI_DMA_DONTWAIT, NULL, 7351 (ddi_dma_handle_t *)&buf_info->dma_handle); 7352 if (status != DDI_SUCCESS) { 7353 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7354 "ddi_dma_alloc_handle failed: size=%x align=%x " 7355 "flags=%x", buf_info->size, buf_info->align, 7356 buf_info->flags); 7357 7358 buf_info->virt = 0; 7359 buf_info->phys = 0; 7360 buf_info->data_handle = 0; 7361 buf_info->dma_handle = 0; 7362 goto done; 7363 } 7364 7365 status = ddi_dma_mem_alloc( 7366 (ddi_dma_handle_t)buf_info->dma_handle, 7367 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT, 7368 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt, 7369 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle); 7370 7371 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) { 7372 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7373 "ddi_dma_mem_alloc failed: size=%x align=%x " 7374 "flags=%x", buf_info->size, buf_info->align, 7375 buf_info->flags); 7376 7377 (void) ddi_dma_free_handle( 7378 (ddi_dma_handle_t *)&buf_info->dma_handle); 7379 7380 buf_info->virt = 0; 7381 buf_info->phys = 0; 7382 buf_info->data_handle = 0; 7383 buf_info->dma_handle = 0; 7384 goto done; 7385 } 7386 7387 /* Map this page of memory */ 7388 status = ddi_dma_addr_bind_handle( 7389 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7390 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7391 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 7392 &dma_cookie, &cookie_count); 7393 7394 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7395 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7396 "ddi_dma_addr_bind_handle failed: status=%x " 7397 "count=%d size=%x align=%x flags=%x", status, 7398 cookie_count, buf_info->size, buf_info->align, 7399 buf_info->flags); 7400 7401 (void) ddi_dma_mem_free( 7402 (ddi_acc_handle_t *)&buf_info->data_handle); 7403 (void) ddi_dma_free_handle( 7404 (ddi_dma_handle_t *)&buf_info->dma_handle); 7405 7406 buf_info->virt = 0; 7407 buf_info->phys = 0; 7408 buf_info->dma_handle = 0; 7409 buf_info->data_handle = 0; 7410 goto done; 7411 } 7412 7413 if (hba->bus_type == SBUS_FC) { 7414 int32_t burstsizes_limit = 0xff; 7415 int32_t ret_burst; 7416 7417 ret_burst = 7418 ddi_dma_burstsizes(buf_info-> 7419 dma_handle) & burstsizes_limit; 7420 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7421 ret_burst) == DDI_FAILURE) { 7422 EMLXS_MSGF(EMLXS_CONTEXT, 7423 &emlxs_mem_alloc_failed_msg, 7424 "ddi_dma_set_sbus64 failed."); 7425 } 7426 } 7427 7428 /* Save Physical address */ 7429 buf_info->phys = dma_cookie.dmac_laddress; 7430 7431 /* Just to be sure, let's add this */ 7432 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle, 7433 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7434 7435 } else { /* allocate virtual memory */ 7436 7437 buf_info->virt = 7438 (uint32_t *)kmem_zalloc((size_t)buf_info->size, 7439 KM_NOSLEEP); 7440 buf_info->phys = 0; 7441 buf_info->data_handle = 0; 7442 buf_info->dma_handle = 0; 7443 7444 if (buf_info->virt == (uint32_t *)0) { 7445 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7446 "size=%x flags=%x", buf_info->size, 7447 buf_info->flags); 7448 } 7449 7450 } 7451 7452 done: 7453 7454 return ((uint8_t *)buf_info->virt); 7455 7456 } /* emlxs_mem_alloc() */ 7457 7458 7459 7460 /* 7461 * emlxs_mem_free: 7462 * 7463 * OS specific routine for memory de-allocation / unmapping 7464 * 7465 * The buf_info->flags field describes the memory operation requested. 7466 * 7467 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped 7468 * for DMA, but not freed. The mapped physical address to be unmapped is in 7469 * buf_info->phys 7470 * 7471 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only 7472 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in 7473 * buf_info->phys. The virtual address to be freed is in buf_info->virt 7474 */ 7475 /*ARGSUSED*/ 7476 extern void 7477 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7478 { 7479 if (buf_info->flags & FC_MBUF_PHYSONLY) { 7480 7481 if (buf_info->dma_handle) { 7482 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7483 (void) ddi_dma_free_handle( 7484 (ddi_dma_handle_t *)&buf_info->dma_handle); 7485 buf_info->dma_handle = NULL; 7486 } 7487 7488 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) { 7489 7490 if (buf_info->dma_handle) { 7491 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7492 (void) ddi_dma_mem_free( 7493 (ddi_acc_handle_t *)&buf_info->data_handle); 7494 (void) ddi_dma_free_handle( 7495 (ddi_dma_handle_t *)&buf_info->dma_handle); 7496 buf_info->dma_handle = NULL; 7497 buf_info->data_handle = NULL; 7498 } 7499 7500 } else { /* allocate virtual memory */ 7501 7502 if (buf_info->virt) { 7503 kmem_free(buf_info->virt, (size_t)buf_info->size); 7504 buf_info->virt = NULL; 7505 } 7506 } 7507 7508 } /* emlxs_mem_free() */ 7509 7510 7511 /* 7512 * A channel has a association with a msi id. 7513 * One msi id could be associated with multiple channels. 7514 */ 7515 static int 7516 emlxs_next_chan(emlxs_hba_t *hba, int msi_id) 7517 { 7518 emlxs_config_t *cfg = &CFG; 7519 EQ_DESC_t *eqp; 7520 int chan; 7521 int num_wq; 7522 7523 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 7524 /* For SLI4 round robin all WQs associated with the msi_id */ 7525 eqp = &hba->sli.sli4.eq[msi_id]; 7526 num_wq = cfg[CFG_NUM_WQ].current; 7527 7528 mutex_enter(&eqp->lastwq_lock); 7529 chan = eqp->lastwq; 7530 eqp->lastwq++; 7531 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) { 7532 eqp->lastwq -= num_wq; 7533 } 7534 mutex_exit(&eqp->lastwq_lock); 7535 7536 } else { 7537 chan = hba->channel_fcp; 7538 } 7539 return (chan); 7540 } 7541 7542 7543 static int 7544 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset) 7545 { 7546 int channel; 7547 7548 7549 /* IO to FCP2 device or a device reset always use fcp channel */ 7550 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) { 7551 return (hba->channel_fcp); 7552 } 7553 7554 channel = emlxs_next_chan(hba, 0); 7555 7556 7557 /* If channel is closed, then try fcp channel */ 7558 if (ndlp->nlp_flag[channel] & NLP_CLOSED) { 7559 channel = hba->channel_fcp; 7560 } 7561 return (channel); 7562 7563 } 7564 7565 static int32_t 7566 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp) 7567 { 7568 emlxs_hba_t *hba = HBA; 7569 fc_packet_t *pkt; 7570 emlxs_config_t *cfg; 7571 MAILBOXQ *mbq; 7572 MAILBOX *mb; 7573 uint32_t rc; 7574 7575 /* 7576 * This routine provides a alternative target reset provessing 7577 * method. Instead of sending an actual target reset to the 7578 * NPort, we will first unreg the login to that NPort. This 7579 * will cause all the outstanding IOs the quickly complete with 7580 * a NO RPI local error. Next we will force the ULP to relogin 7581 * to the NPort by sending an RSCN (for that NPort) to the 7582 * upper layer. This method should result in a fast target 7583 * reset, as far as IOs completing; however, since an actual 7584 * target reset is not sent to the NPort, it is not 100% 7585 * compatable. Things like reservations will not be broken. 7586 * By default this option is DISABLED, and its only enabled thru 7587 * a hidden configuration parameter (fast-tgt-reset). 7588 */ 7589 rc = FC_TRAN_BUSY; 7590 pkt = PRIV2PKT(sbp); 7591 cfg = &CFG; 7592 7593 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) { 7594 /* issue the mbox cmd to the sli */ 7595 mb = (MAILBOX *) mbq->mbox; 7596 bzero((void *) mb, MAILBOX_CMD_BSIZE); 7597 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi; 7598 #ifdef SLI3_SUPPORT 7599 mb->un.varUnregLogin.vpi = port->vpi; 7600 #endif /* SLI3_SUPPORT */ 7601 mb->mbxCommand = MBX_UNREG_LOGIN; 7602 mb->mbxOwner = OWN_HOST; 7603 7604 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7605 "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi, 7606 cfg[CFG_FAST_TGT_RESET_TMR].current); 7607 7608 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) 7609 == MBX_SUCCESS) { 7610 7611 ndlp->nlp_Rpi = 0; 7612 7613 mutex_enter(&sbp->mtx); 7614 sbp->node = (void *)ndlp; 7615 sbp->did = ndlp->nlp_DID; 7616 mutex_exit(&sbp->mtx); 7617 7618 if (pkt->pkt_rsplen) { 7619 bzero((uint8_t *)pkt->pkt_resp, 7620 pkt->pkt_rsplen); 7621 } 7622 if (cfg[CFG_FAST_TGT_RESET_TMR].current) { 7623 ndlp->nlp_force_rscn = hba->timer_tics + 7624 cfg[CFG_FAST_TGT_RESET_TMR].current; 7625 } 7626 7627 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0); 7628 } 7629 7630 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 7631 rc = FC_SUCCESS; 7632 } 7633 return (rc); 7634 } 7635 7636 static int32_t 7637 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp) 7638 { 7639 emlxs_hba_t *hba = HBA; 7640 fc_packet_t *pkt; 7641 emlxs_config_t *cfg; 7642 IOCBQ *iocbq; 7643 IOCB *iocb; 7644 CHANNEL *cp; 7645 NODELIST *ndlp; 7646 char *cmd; 7647 uint16_t lun; 7648 FCP_CMND *fcp_cmd; 7649 uint32_t did; 7650 uint32_t reset = 0; 7651 int channel; 7652 int32_t rval; 7653 7654 pkt = PRIV2PKT(sbp); 7655 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 7656 7657 /* Find target node object */ 7658 ndlp = emlxs_node_find_did(port, did); 7659 7660 if (!ndlp || !ndlp->nlp_active) { 7661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7662 "Node not found. did=%x", did); 7663 7664 return (FC_BADPACKET); 7665 } 7666 7667 /* When the fcp channel is closed we stop accepting any FCP cmd */ 7668 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 7669 return (FC_TRAN_BUSY); 7670 } 7671 7672 /* Snoop for target or lun reset first */ 7673 /* We always use FCP channel to send out target/lun reset fcp cmds */ 7674 /* interrupt affinity only applies to non tgt lun reset fcp cmd */ 7675 7676 cmd = (char *)pkt->pkt_cmd; 7677 lun = *((uint16_t *)cmd); 7678 lun = LE_SWAP16(lun); 7679 7680 iocbq = &sbp->iocbq; 7681 iocb = &iocbq->iocb; 7682 iocbq->node = (void *) ndlp; 7683 7684 /* Check for target reset */ 7685 if (cmd[10] & 0x20) { 7686 /* prepare iocb */ 7687 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7688 hba->channel_fcp)) != FC_SUCCESS) { 7689 7690 if (rval == 0xff) { 7691 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7692 0, 1); 7693 rval = FC_SUCCESS; 7694 } 7695 7696 return (rval); 7697 } 7698 7699 mutex_enter(&sbp->mtx); 7700 sbp->pkt_flags |= PACKET_FCP_TGT_RESET; 7701 sbp->pkt_flags |= PACKET_POLLED; 7702 mutex_exit(&sbp->mtx); 7703 7704 #ifdef SAN_DIAG_SUPPORT 7705 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET, 7706 (HBA_WWN *)&ndlp->nlp_portname, -1); 7707 #endif /* SAN_DIAG_SUPPORT */ 7708 7709 iocbq->flag |= IOCB_PRIORITY; 7710 7711 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7712 "Target Reset: did=%x", did); 7713 7714 cfg = &CFG; 7715 if (cfg[CFG_FAST_TGT_RESET].current) { 7716 if (emlxs_fast_target_reset(port, sbp, ndlp) == 7717 FC_SUCCESS) { 7718 return (FC_SUCCESS); 7719 } 7720 } 7721 7722 /* Close the node for any further normal IO */ 7723 emlxs_node_close(port, ndlp, hba->channel_fcp, 7724 pkt->pkt_timeout); 7725 7726 /* Flush the IO's on the tx queues */ 7727 (void) emlxs_tx_node_flush(port, ndlp, 7728 &hba->chan[hba->channel_fcp], 0, sbp); 7729 7730 /* This is the target reset fcp cmd */ 7731 reset = 1; 7732 } 7733 7734 /* Check for lun reset */ 7735 else if (cmd[10] & 0x10) { 7736 /* prepare iocb */ 7737 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7738 hba->channel_fcp)) != FC_SUCCESS) { 7739 7740 if (rval == 0xff) { 7741 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7742 0, 1); 7743 rval = FC_SUCCESS; 7744 } 7745 7746 return (rval); 7747 } 7748 7749 mutex_enter(&sbp->mtx); 7750 sbp->pkt_flags |= PACKET_FCP_LUN_RESET; 7751 sbp->pkt_flags |= PACKET_POLLED; 7752 mutex_exit(&sbp->mtx); 7753 7754 #ifdef SAN_DIAG_SUPPORT 7755 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET, 7756 (HBA_WWN *)&ndlp->nlp_portname, lun); 7757 #endif /* SAN_DIAG_SUPPORT */ 7758 7759 iocbq->flag |= IOCB_PRIORITY; 7760 7761 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7762 "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]); 7763 7764 /* Flush the IO's on the tx queues for this lun */ 7765 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp); 7766 7767 /* This is the lun reset fcp cmd */ 7768 reset = 1; 7769 } 7770 7771 channel = emlxs_select_fcp_channel(hba, ndlp, reset); 7772 7773 #ifdef SAN_DIAG_SUPPORT 7774 sbp->sd_start_time = gethrtime(); 7775 #endif /* SAN_DIAG_SUPPORT */ 7776 7777 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7778 emlxs_swap_fcp_pkt(sbp); 7779 #endif /* EMLXS_MODREV2X */ 7780 7781 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd; 7782 7783 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) { 7784 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE; 7785 } 7786 7787 if (reset == 0) { 7788 /* 7789 * tgt lun reset fcp cmd has been prepared 7790 * separately in the beginning 7791 */ 7792 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7793 channel)) != FC_SUCCESS) { 7794 7795 if (rval == 0xff) { 7796 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7797 0, 1); 7798 rval = FC_SUCCESS; 7799 } 7800 7801 return (rval); 7802 } 7803 } 7804 7805 cp = &hba->chan[channel]; 7806 cp->ulpSendCmd++; 7807 7808 /* Initalize sbp */ 7809 mutex_enter(&sbp->mtx); 7810 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7811 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7812 sbp->node = (void *)ndlp; 7813 sbp->lun = lun; 7814 sbp->class = iocb->ULPCLASS; 7815 sbp->did = ndlp->nlp_DID; 7816 mutex_exit(&sbp->mtx); 7817 7818 if (pkt->pkt_cmdlen) { 7819 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7820 DDI_DMA_SYNC_FORDEV); 7821 } 7822 7823 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) { 7824 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen, 7825 DDI_DMA_SYNC_FORDEV); 7826 } 7827 7828 HBASTATS.FcpIssued++; 7829 7830 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 7831 return (FC_SUCCESS); 7832 7833 } /* emlxs_send_fcp_cmd() */ 7834 7835 7836 7837 7838 #ifdef SFCT_SUPPORT 7839 static int32_t 7840 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp) 7841 { 7842 emlxs_hba_t *hba = HBA; 7843 fc_packet_t *pkt; 7844 IOCBQ *iocbq; 7845 IOCB *iocb; 7846 NODELIST *ndlp; 7847 CHANNEL *cp; 7848 uint16_t iotag; 7849 uint32_t did; 7850 ddi_dma_cookie_t *cp_cmd; 7851 7852 pkt = PRIV2PKT(sbp); 7853 7854 did = sbp->did; 7855 ndlp = sbp->node; 7856 7857 iocbq = &sbp->iocbq; 7858 iocb = &iocbq->iocb; 7859 7860 /* Make sure node is still active */ 7861 if (!ndlp->nlp_active) { 7862 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7863 "*Node not found. did=%x", did); 7864 7865 return (FC_BADPACKET); 7866 } 7867 7868 /* If gate is closed */ 7869 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 7870 return (FC_TRAN_BUSY); 7871 } 7872 7873 /* Get the iotag by registering the packet */ 7874 iotag = emlxs_register_pkt(sbp->channel, sbp); 7875 7876 if (!iotag) { 7877 /* No more command slots available, retry later */ 7878 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7879 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7880 7881 return (FC_TRAN_BUSY); 7882 } 7883 7884 /* Point of no return */ 7885 7886 cp = sbp->channel; 7887 cp->ulpSendCmd++; 7888 7889 #if (EMLXS_MODREV >= EMLXS_MODREV3) 7890 cp_cmd = pkt->pkt_cmd_cookie; 7891 #else 7892 cp_cmd = &pkt->pkt_cmd_cookie; 7893 #endif /* >= EMLXS_MODREV3 */ 7894 7895 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress); 7896 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress); 7897 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen; 7898 iocb->un.fcpt64.bdl.bdeFlags = 0; 7899 7900 if (hba->sli_mode < 3) { 7901 iocb->ULPBDECOUNT = 1; 7902 iocb->ULPLE = 1; 7903 } else { /* SLI3 */ 7904 7905 iocb->ULPBDECOUNT = 0; 7906 iocb->ULPLE = 0; 7907 iocb->unsli3.ext_iocb.ebde_count = 0; 7908 } 7909 7910 /* Initalize iocbq */ 7911 iocbq->port = (void *)port; 7912 iocbq->node = (void *)ndlp; 7913 iocbq->channel = (void *)cp; 7914 7915 /* Initalize iocb */ 7916 iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id; 7917 iocb->ULPIOTAG = iotag; 7918 iocb->ULPRSVDBYTE = 7919 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 7920 iocb->ULPOWNER = OWN_CHIP; 7921 iocb->ULPCLASS = sbp->class; 7922 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX; 7923 7924 /* Set the pkt timer */ 7925 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7926 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7927 7928 if (pkt->pkt_cmdlen) { 7929 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7930 DDI_DMA_SYNC_FORDEV); 7931 } 7932 7933 HBASTATS.FcpIssued++; 7934 7935 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 7936 7937 return (FC_SUCCESS); 7938 7939 } /* emlxs_send_fct_status() */ 7940 7941 7942 static int32_t 7943 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp) 7944 { 7945 emlxs_hba_t *hba = HBA; 7946 fc_packet_t *pkt; 7947 IOCBQ *iocbq; 7948 IOCB *iocb; 7949 NODELIST *ndlp; 7950 uint16_t iotag; 7951 uint32_t did; 7952 7953 pkt = PRIV2PKT(sbp); 7954 7955 did = sbp->did; 7956 ndlp = sbp->node; 7957 7958 7959 iocbq = &sbp->iocbq; 7960 iocb = &iocbq->iocb; 7961 7962 /* Make sure node is still active */ 7963 if ((ndlp == NULL) || (!ndlp->nlp_active)) { 7964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7965 "*Node not found. did=%x", did); 7966 7967 return (FC_BADPACKET); 7968 } 7969 7970 /* If gate is closed */ 7971 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 7972 return (FC_TRAN_BUSY); 7973 } 7974 7975 /* Get the iotag by registering the packet */ 7976 iotag = emlxs_register_pkt(sbp->channel, sbp); 7977 7978 if (!iotag) { 7979 /* No more command slots available, retry later */ 7980 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7981 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7982 7983 return (FC_TRAN_BUSY); 7984 } 7985 7986 /* Point of no return */ 7987 iocbq->port = (void *)port; 7988 iocbq->node = (void *)ndlp; 7989 iocbq->channel = (void *)sbp->channel; 7990 ((CHANNEL *)sbp->channel)->ulpSendCmd++; 7991 7992 /* 7993 * Don't give the abort priority, we want the IOCB 7994 * we are aborting to be processed first. 7995 */ 7996 iocbq->flag |= IOCB_SPECIAL; 7997 7998 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 7999 iocb->ULPIOTAG = iotag; 8000 iocb->ULPLE = 1; 8001 iocb->ULPCLASS = sbp->class; 8002 iocb->ULPOWNER = OWN_CHIP; 8003 8004 if (hba->state >= FC_LINK_UP) { 8005 /* Create the abort IOCB */ 8006 iocb->un.acxri.abortType = ABORT_TYPE_ABTS; 8007 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX; 8008 8009 } else { 8010 /* Create the close IOCB */ 8011 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX; 8012 8013 } 8014 8015 iocb->ULPRSVDBYTE = 8016 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 8017 /* Set the pkt timer */ 8018 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8019 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8020 8021 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq); 8022 8023 return (FC_SUCCESS); 8024 8025 } /* emlxs_send_fct_abort() */ 8026 8027 #endif /* SFCT_SUPPORT */ 8028 8029 8030 static int32_t 8031 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp) 8032 { 8033 emlxs_hba_t *hba = HBA; 8034 fc_packet_t *pkt; 8035 IOCBQ *iocbq; 8036 IOCB *iocb; 8037 CHANNEL *cp; 8038 uint32_t i; 8039 NODELIST *ndlp; 8040 uint32_t did; 8041 int32_t rval; 8042 8043 pkt = PRIV2PKT(sbp); 8044 cp = &hba->chan[hba->channel_ip]; 8045 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8046 8047 /* Check if node exists */ 8048 /* Broadcast did is always a success */ 8049 ndlp = emlxs_node_find_did(port, did); 8050 8051 if (!ndlp || !ndlp->nlp_active) { 8052 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8053 "Node not found. did=0x%x", did); 8054 8055 return (FC_BADPACKET); 8056 } 8057 8058 /* Check if gate is temporarily closed */ 8059 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) { 8060 return (FC_TRAN_BUSY); 8061 } 8062 8063 /* Check if an exchange has been created */ 8064 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) { 8065 /* No exchange. Try creating one */ 8066 (void) emlxs_create_xri(port, cp, ndlp); 8067 8068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8069 "Adapter Busy. Exchange not found. did=0x%x", did); 8070 8071 return (FC_TRAN_BUSY); 8072 } 8073 8074 /* ULP PATCH: pkt_cmdlen was found to be set to zero */ 8075 /* on BROADCAST commands */ 8076 if (pkt->pkt_cmdlen == 0) { 8077 /* Set the pkt_cmdlen to the cookie size */ 8078 #if (EMLXS_MODREV >= EMLXS_MODREV3) 8079 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) { 8080 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size; 8081 } 8082 #else 8083 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size; 8084 #endif /* >= EMLXS_MODREV3 */ 8085 8086 } 8087 8088 iocbq = &sbp->iocbq; 8089 iocb = &iocbq->iocb; 8090 8091 iocbq->node = (void *)ndlp; 8092 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) { 8093 8094 if (rval == 0xff) { 8095 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8096 rval = FC_SUCCESS; 8097 } 8098 8099 return (rval); 8100 } 8101 8102 cp->ulpSendCmd++; 8103 8104 /* Initalize sbp */ 8105 mutex_enter(&sbp->mtx); 8106 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8107 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8108 sbp->node = (void *)ndlp; 8109 sbp->lun = 0; 8110 sbp->class = iocb->ULPCLASS; 8111 sbp->did = did; 8112 mutex_exit(&sbp->mtx); 8113 8114 if (pkt->pkt_cmdlen) { 8115 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8116 DDI_DMA_SYNC_FORDEV); 8117 } 8118 8119 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8120 8121 return (FC_SUCCESS); 8122 8123 } /* emlxs_send_ip() */ 8124 8125 8126 static int32_t 8127 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp) 8128 { 8129 emlxs_hba_t *hba = HBA; 8130 emlxs_port_t *vport; 8131 fc_packet_t *pkt; 8132 IOCBQ *iocbq; 8133 CHANNEL *cp; 8134 uint32_t cmd; 8135 int i; 8136 ELS_PKT *els_pkt; 8137 NODELIST *ndlp; 8138 uint32_t did; 8139 char fcsp_msg[32]; 8140 int rc; 8141 int32_t rval; 8142 8143 fcsp_msg[0] = 0; 8144 pkt = PRIV2PKT(sbp); 8145 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 8146 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8147 8148 iocbq = &sbp->iocbq; 8149 8150 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8151 emlxs_swap_els_pkt(sbp); 8152 #endif /* EMLXS_MODREV2X */ 8153 8154 cmd = *((uint32_t *)pkt->pkt_cmd); 8155 cmd &= ELS_CMD_MASK; 8156 8157 /* Point of no return, except for ADISC & PLOGI */ 8158 8159 /* Check node */ 8160 switch (cmd) { 8161 case ELS_CMD_FLOGI: 8162 if (port->vpi > 0) { 8163 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8164 if (!(port->flag & EMLXS_PORT_INIT_VPI_CMPL)) { 8165 (void) emlxs_mb_init_vpi(port); 8166 if (!(port->flag & 8167 EMLXS_PORT_INIT_VPI_CMPL)) { 8168 pkt->pkt_state = 8169 FC_PKT_LOCAL_RJT; 8170 8171 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8172 emlxs_unswap_pkt(sbp); 8173 #endif /* EMLXS_MODREV2X */ 8174 8175 return (FC_FAILURE); 8176 } 8177 } 8178 } 8179 cmd = ELS_CMD_FDISC; 8180 *((uint32_t *)pkt->pkt_cmd) = cmd; 8181 } 8182 ndlp = NULL; 8183 8184 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 8185 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 8186 } 8187 8188 /* We will process these cmds at the bottom of this routine */ 8189 break; 8190 8191 case ELS_CMD_PLOGI: 8192 /* Make sure we don't log into ourself */ 8193 for (i = 0; i < MAX_VPORTS; i++) { 8194 vport = &VPORT(i); 8195 8196 if (!(vport->flag & EMLXS_PORT_BOUND)) { 8197 continue; 8198 } 8199 8200 if (did == vport->did) { 8201 pkt->pkt_state = FC_PKT_NPORT_RJT; 8202 8203 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8204 emlxs_unswap_pkt(sbp); 8205 #endif /* EMLXS_MODREV2X */ 8206 8207 return (FC_FAILURE); 8208 } 8209 } 8210 8211 ndlp = NULL; 8212 8213 /* Check if this is the first PLOGI */ 8214 /* after a PT_TO_PT connection */ 8215 if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) { 8216 MAILBOXQ *mbox; 8217 8218 /* ULP bug fix */ 8219 if (pkt->pkt_cmd_fhdr.s_id == 0) { 8220 pkt->pkt_cmd_fhdr.s_id = 8221 pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID + 8222 FP_DEFAULT_SID; 8223 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, 8224 "PLOGI: P2P Fix. sid=0-->%x did=%x", 8225 pkt->pkt_cmd_fhdr.s_id, 8226 pkt->pkt_cmd_fhdr.d_id); 8227 } 8228 8229 mutex_enter(&EMLXS_PORT_LOCK); 8230 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id); 8231 mutex_exit(&EMLXS_PORT_LOCK); 8232 8233 /* Update our service parms */ 8234 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 8235 MEM_MBOX, 1))) { 8236 emlxs_mb_config_link(hba, mbox); 8237 8238 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 8239 mbox, MBX_NOWAIT, 0); 8240 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 8241 (void) emlxs_mem_put(hba, MEM_MBOX, 8242 (uint8_t *)mbox); 8243 } 8244 8245 } 8246 } 8247 8248 /* We will process these cmds at the bottom of this routine */ 8249 break; 8250 8251 default: 8252 ndlp = emlxs_node_find_did(port, did); 8253 8254 /* If an ADISC is being sent and we have no node, */ 8255 /* then we must fail the ADISC now */ 8256 if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) { 8257 8258 /* Build the LS_RJT response */ 8259 els_pkt = (ELS_PKT *)pkt->pkt_resp; 8260 els_pkt->elsCode = 0x01; 8261 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0; 8262 els_pkt->un.lsRjt.un.b.lsRjtRsnCode = 8263 LSRJT_LOGICAL_ERR; 8264 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp = 8265 LSEXP_NOTHING_MORE; 8266 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03; 8267 8268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8269 "ADISC Rejected. Node not found. did=0x%x", did); 8270 8271 if (sbp->channel == NULL) { 8272 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8273 sbp->channel = 8274 &hba->chan[hba->channel_els]; 8275 } else { 8276 sbp->channel = 8277 &hba->chan[FC_ELS_RING]; 8278 } 8279 } 8280 8281 /* Return this as rejected by the target */ 8282 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1); 8283 8284 return (FC_SUCCESS); 8285 } 8286 } 8287 8288 /* DID == BCAST_DID is special case to indicate that */ 8289 /* RPI is being passed in seq_id field */ 8290 /* This is used by emlxs_send_logo() for target mode */ 8291 8292 /* Initalize iocbq */ 8293 iocbq->node = (void *)ndlp; 8294 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) { 8295 8296 if (rval == 0xff) { 8297 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8298 rval = FC_SUCCESS; 8299 } 8300 8301 return (rval); 8302 } 8303 8304 cp = &hba->chan[hba->channel_els]; 8305 cp->ulpSendCmd++; 8306 8307 /* Check cmd */ 8308 switch (cmd) { 8309 case ELS_CMD_PRLI: 8310 { 8311 /* 8312 * if our firmware version is 3.20 or later, 8313 * set the following bits for FC-TAPE support. 8314 */ 8315 8316 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 8317 els_pkt->un.prli.ConfmComplAllowed = 1; 8318 els_pkt->un.prli.Retry = 1; 8319 els_pkt->un.prli.TaskRetryIdReq = 1; 8320 } else { 8321 els_pkt->un.prli.ConfmComplAllowed = 0; 8322 els_pkt->un.prli.Retry = 0; 8323 els_pkt->un.prli.TaskRetryIdReq = 0; 8324 } 8325 8326 break; 8327 } 8328 8329 /* This is a patch for the ULP stack. */ 8330 8331 /* 8332 * ULP only reads our service paramters once during bind_port, 8333 * but the service parameters change due to topology. 8334 */ 8335 case ELS_CMD_FLOGI: 8336 case ELS_CMD_FDISC: 8337 case ELS_CMD_PLOGI: 8338 case ELS_CMD_PDISC: 8339 { 8340 /* Copy latest service parameters to payload */ 8341 bcopy((void *) &port->sparam, 8342 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8343 8344 if ((hba->flag & FC_NPIV_ENABLED) && 8345 (hba->flag & FC_NPIV_SUPPORTED) && 8346 (cmd == ELS_CMD_PLOGI)) { 8347 SERV_PARM *sp; 8348 emlxs_vvl_fmt_t *vvl; 8349 8350 sp = (SERV_PARM *)&els_pkt->un.logi; 8351 sp->VALID_VENDOR_VERSION = 1; 8352 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0]; 8353 vvl->un0.w0.oui = 0x0000C9; 8354 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0); 8355 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0; 8356 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1); 8357 } 8358 8359 #ifdef DHCHAP_SUPPORT 8360 emlxs_dhc_init_sp(port, did, 8361 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8362 #endif /* DHCHAP_SUPPORT */ 8363 8364 break; 8365 } 8366 8367 } 8368 8369 /* Initialize the sbp */ 8370 mutex_enter(&sbp->mtx); 8371 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8372 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8373 sbp->node = (void *)ndlp; 8374 sbp->lun = 0; 8375 sbp->did = did; 8376 mutex_exit(&sbp->mtx); 8377 8378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s", 8379 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg); 8380 8381 if (pkt->pkt_cmdlen) { 8382 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8383 DDI_DMA_SYNC_FORDEV); 8384 } 8385 8386 /* Check node */ 8387 switch (cmd) { 8388 case ELS_CMD_FLOGI: 8389 if (port->ini_mode) { 8390 /* Make sure fabric node is destroyed */ 8391 /* It should already have been destroyed at link down */ 8392 /* Unregister the fabric did and attempt a deferred */ 8393 /* iocb send */ 8394 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 8395 if (emlxs_mb_unreg_did(port, FABRIC_DID, NULL, 8396 NULL, iocbq) == 0) { 8397 /* Deferring iocb tx until */ 8398 /* completion of unreg */ 8399 return (FC_SUCCESS); 8400 } 8401 } 8402 } 8403 break; 8404 8405 case ELS_CMD_PLOGI: 8406 8407 ndlp = emlxs_node_find_did(port, did); 8408 8409 if (ndlp && ndlp->nlp_active) { 8410 /* Close the node for any further normal IO */ 8411 emlxs_node_close(port, ndlp, hba->channel_fcp, 8412 pkt->pkt_timeout + 10); 8413 emlxs_node_close(port, ndlp, hba->channel_ip, 8414 pkt->pkt_timeout + 10); 8415 8416 /* Flush tx queues */ 8417 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8418 8419 /* Flush chip queues */ 8420 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8421 } 8422 8423 break; 8424 8425 case ELS_CMD_PRLI: 8426 8427 ndlp = emlxs_node_find_did(port, did); 8428 8429 if (ndlp && ndlp->nlp_active) { 8430 /* 8431 * Close the node for any further FCP IO; 8432 * Flush all outstanding I/O only if 8433 * "Establish Image Pair" bit is set. 8434 */ 8435 emlxs_node_close(port, ndlp, hba->channel_fcp, 8436 pkt->pkt_timeout + 10); 8437 8438 if (els_pkt->un.prli.estabImagePair) { 8439 /* Flush tx queues */ 8440 (void) emlxs_tx_node_flush(port, ndlp, 8441 &hba->chan[hba->channel_fcp], 0, 0); 8442 8443 /* Flush chip queues */ 8444 (void) emlxs_chipq_node_flush(port, 8445 &hba->chan[hba->channel_fcp], ndlp, 0); 8446 } 8447 } 8448 8449 break; 8450 8451 } 8452 8453 HBASTATS.ElsCmdIssued++; 8454 8455 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8456 8457 return (FC_SUCCESS); 8458 8459 } /* emlxs_send_els() */ 8460 8461 8462 8463 8464 static int32_t 8465 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 8466 { 8467 emlxs_hba_t *hba = HBA; 8468 emlxs_config_t *cfg = &CFG; 8469 fc_packet_t *pkt; 8470 IOCBQ *iocbq; 8471 IOCB *iocb; 8472 NODELIST *ndlp; 8473 CHANNEL *cp; 8474 int i; 8475 uint32_t cmd; 8476 uint32_t ucmd; 8477 ELS_PKT *els_pkt; 8478 fc_unsol_buf_t *ubp; 8479 emlxs_ub_priv_t *ub_priv; 8480 uint32_t did; 8481 char fcsp_msg[32]; 8482 uint8_t *ub_buffer; 8483 int32_t rval; 8484 8485 fcsp_msg[0] = 0; 8486 pkt = PRIV2PKT(sbp); 8487 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 8488 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8489 8490 iocbq = &sbp->iocbq; 8491 iocb = &iocbq->iocb; 8492 8493 /* Acquire the unsolicited command this pkt is replying to */ 8494 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) { 8495 /* This is for auto replies when no ub's are used */ 8496 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT; 8497 ubp = NULL; 8498 ub_priv = NULL; 8499 ub_buffer = NULL; 8500 8501 #ifdef SFCT_SUPPORT 8502 if (sbp->fct_cmd) { 8503 fct_els_t *els = 8504 (fct_els_t *)sbp->fct_cmd->cmd_specific; 8505 ub_buffer = (uint8_t *)els->els_req_payload; 8506 } 8507 #endif /* SFCT_SUPPORT */ 8508 8509 } else { 8510 /* Find the ub buffer that goes with this reply */ 8511 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) { 8512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 8513 "ELS reply: Invalid oxid=%x", 8514 pkt->pkt_cmd_fhdr.ox_id); 8515 return (FC_BADPACKET); 8516 } 8517 8518 ub_buffer = (uint8_t *)ubp->ub_buffer; 8519 ub_priv = ubp->ub_fca_private; 8520 ucmd = ub_priv->cmd; 8521 8522 ub_priv->flags |= EMLXS_UB_REPLY; 8523 8524 /* Reset oxid to ELS command */ 8525 /* We do this because the ub is only valid */ 8526 /* until we return from this thread */ 8527 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff; 8528 } 8529 8530 /* Save the result */ 8531 sbp->ucmd = ucmd; 8532 8533 if (sbp->channel == NULL) { 8534 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8535 sbp->channel = &hba->chan[hba->channel_els]; 8536 } else { 8537 sbp->channel = &hba->chan[FC_ELS_RING]; 8538 } 8539 } 8540 8541 /* Check for interceptions */ 8542 switch (ucmd) { 8543 8544 #ifdef ULP_PATCH2 8545 case ELS_CMD_LOGO: 8546 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) { 8547 break; 8548 } 8549 8550 /* Check if this was generated by ULP and not us */ 8551 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8552 8553 /* 8554 * Since we replied to this already, 8555 * we won't need to send this now 8556 */ 8557 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8558 8559 return (FC_SUCCESS); 8560 } 8561 8562 break; 8563 #endif /* ULP_PATCH2 */ 8564 8565 #ifdef ULP_PATCH3 8566 case ELS_CMD_PRLI: 8567 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) { 8568 break; 8569 } 8570 8571 /* Check if this was generated by ULP and not us */ 8572 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8573 8574 /* 8575 * Since we replied to this already, 8576 * we won't need to send this now 8577 */ 8578 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8579 8580 return (FC_SUCCESS); 8581 } 8582 8583 break; 8584 #endif /* ULP_PATCH3 */ 8585 8586 8587 #ifdef ULP_PATCH4 8588 case ELS_CMD_PRLO: 8589 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) { 8590 break; 8591 } 8592 8593 /* Check if this was generated by ULP and not us */ 8594 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8595 /* 8596 * Since we replied to this already, 8597 * we won't need to send this now 8598 */ 8599 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8600 8601 return (FC_SUCCESS); 8602 } 8603 8604 break; 8605 #endif /* ULP_PATCH4 */ 8606 8607 #ifdef ULP_PATCH6 8608 case ELS_CMD_RSCN: 8609 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) { 8610 break; 8611 } 8612 8613 /* Check if this RSCN was generated by us */ 8614 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8615 cmd = *((uint32_t *)pkt->pkt_cmd); 8616 cmd = LE_SWAP32(cmd); 8617 cmd &= ELS_CMD_MASK; 8618 8619 /* 8620 * If ULP is accepting this, 8621 * then close affected node 8622 */ 8623 if (port->ini_mode && ub_buffer && cmd 8624 == ELS_CMD_ACC) { 8625 fc_rscn_t *rscn; 8626 uint32_t count; 8627 uint32_t *lp; 8628 8629 /* 8630 * Only the Leadville code path will 8631 * come thru here. The RSCN data is NOT 8632 * swapped properly for the Comstar code 8633 * path. 8634 */ 8635 lp = (uint32_t *)ub_buffer; 8636 rscn = (fc_rscn_t *)lp++; 8637 count = 8638 ((rscn->rscn_payload_len - 4) / 4); 8639 8640 /* Close affected ports */ 8641 for (i = 0; i < count; i++, lp++) { 8642 (void) emlxs_port_offline(port, 8643 *lp); 8644 } 8645 } 8646 8647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8648 "RSCN %s: did=%x oxid=%x rxid=%x. " 8649 "Intercepted.", emlxs_elscmd_xlate(cmd), 8650 did, pkt->pkt_cmd_fhdr.ox_id, 8651 pkt->pkt_cmd_fhdr.rx_id); 8652 8653 /* 8654 * Since we generated this RSCN, 8655 * we won't need to send this reply 8656 */ 8657 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8658 8659 return (FC_SUCCESS); 8660 } 8661 8662 break; 8663 #endif /* ULP_PATCH6 */ 8664 8665 case ELS_CMD_PLOGI: 8666 /* Check if this PLOGI was generated by us */ 8667 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8668 cmd = *((uint32_t *)pkt->pkt_cmd); 8669 cmd = LE_SWAP32(cmd); 8670 cmd &= ELS_CMD_MASK; 8671 8672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8673 "PLOGI %s: did=%x oxid=%x rxid=%x. " 8674 "Intercepted.", emlxs_elscmd_xlate(cmd), 8675 did, pkt->pkt_cmd_fhdr.ox_id, 8676 pkt->pkt_cmd_fhdr.rx_id); 8677 8678 /* 8679 * Since we generated this PLOGI, 8680 * we won't need to send this reply 8681 */ 8682 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8683 8684 return (FC_SUCCESS); 8685 } 8686 8687 break; 8688 } 8689 8690 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8691 emlxs_swap_els_pkt(sbp); 8692 #endif /* EMLXS_MODREV2X */ 8693 8694 8695 cmd = *((uint32_t *)pkt->pkt_cmd); 8696 cmd &= ELS_CMD_MASK; 8697 8698 /* Check if modifications are needed */ 8699 switch (ucmd) { 8700 case (ELS_CMD_PRLI): 8701 8702 if (cmd == ELS_CMD_ACC) { 8703 /* This is a patch for the ULP stack. */ 8704 /* ULP does not keep track of FCP2 support */ 8705 8706 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 8707 els_pkt->un.prli.ConfmComplAllowed = 1; 8708 els_pkt->un.prli.Retry = 1; 8709 els_pkt->un.prli.TaskRetryIdReq = 1; 8710 } else { 8711 els_pkt->un.prli.ConfmComplAllowed = 0; 8712 els_pkt->un.prli.Retry = 0; 8713 els_pkt->un.prli.TaskRetryIdReq = 0; 8714 } 8715 } 8716 8717 break; 8718 8719 case ELS_CMD_FLOGI: 8720 case ELS_CMD_PLOGI: 8721 case ELS_CMD_FDISC: 8722 case ELS_CMD_PDISC: 8723 8724 if (cmd == ELS_CMD_ACC) { 8725 /* This is a patch for the ULP stack. */ 8726 8727 /* 8728 * ULP only reads our service parameters 8729 * once during bind_port, but the service 8730 * parameters change due to topology. 8731 */ 8732 8733 /* Copy latest service parameters to payload */ 8734 bcopy((void *)&port->sparam, 8735 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8736 8737 #ifdef DHCHAP_SUPPORT 8738 emlxs_dhc_init_sp(port, did, 8739 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8740 #endif /* DHCHAP_SUPPORT */ 8741 8742 } 8743 8744 break; 8745 8746 } 8747 8748 /* Initalize iocbq */ 8749 iocbq->node = (void *)NULL; 8750 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) { 8751 8752 if (rval == 0xff) { 8753 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8754 rval = FC_SUCCESS; 8755 } 8756 8757 return (rval); 8758 } 8759 8760 cp = &hba->chan[hba->channel_els]; 8761 cp->ulpSendCmd++; 8762 8763 /* Initalize sbp */ 8764 mutex_enter(&sbp->mtx); 8765 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8766 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8767 sbp->node = (void *) NULL; 8768 sbp->lun = 0; 8769 sbp->class = iocb->ULPCLASS; 8770 sbp->did = did; 8771 mutex_exit(&sbp->mtx); 8772 8773 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8774 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd), 8775 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id, 8776 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg); 8777 8778 /* Process nodes */ 8779 switch (ucmd) { 8780 case ELS_CMD_RSCN: 8781 { 8782 if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) { 8783 fc_rscn_t *rscn; 8784 uint32_t count; 8785 uint32_t *lp = NULL; 8786 8787 /* 8788 * Only the Leadville code path will come thru 8789 * here. The RSCN data is NOT swapped properly 8790 * for the Comstar code path. 8791 */ 8792 lp = (uint32_t *)ub_buffer; 8793 rscn = (fc_rscn_t *)lp++; 8794 count = ((rscn->rscn_payload_len - 4) / 4); 8795 8796 /* Close affected ports */ 8797 for (i = 0; i < count; i++, lp++) { 8798 (void) emlxs_port_offline(port, *lp); 8799 } 8800 } 8801 break; 8802 } 8803 case ELS_CMD_PLOGI: 8804 8805 if (cmd == ELS_CMD_ACC) { 8806 ndlp = emlxs_node_find_did(port, did); 8807 8808 if (ndlp && ndlp->nlp_active) { 8809 /* Close the node for any further normal IO */ 8810 emlxs_node_close(port, ndlp, hba->channel_fcp, 8811 pkt->pkt_timeout + 10); 8812 emlxs_node_close(port, ndlp, hba->channel_ip, 8813 pkt->pkt_timeout + 10); 8814 8815 /* Flush tx queue */ 8816 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8817 8818 /* Flush chip queue */ 8819 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8820 } 8821 } 8822 8823 break; 8824 8825 case ELS_CMD_PRLI: 8826 8827 if (cmd == ELS_CMD_ACC) { 8828 ndlp = emlxs_node_find_did(port, did); 8829 8830 if (ndlp && ndlp->nlp_active) { 8831 /* Close the node for any further normal IO */ 8832 emlxs_node_close(port, ndlp, hba->channel_fcp, 8833 pkt->pkt_timeout + 10); 8834 8835 /* Flush tx queues */ 8836 (void) emlxs_tx_node_flush(port, ndlp, 8837 &hba->chan[hba->channel_fcp], 0, 0); 8838 8839 /* Flush chip queues */ 8840 (void) emlxs_chipq_node_flush(port, 8841 &hba->chan[hba->channel_fcp], ndlp, 0); 8842 } 8843 } 8844 8845 break; 8846 8847 case ELS_CMD_PRLO: 8848 8849 if (cmd == ELS_CMD_ACC) { 8850 ndlp = emlxs_node_find_did(port, did); 8851 8852 if (ndlp && ndlp->nlp_active) { 8853 /* Close the node for any further normal IO */ 8854 emlxs_node_close(port, ndlp, 8855 hba->channel_fcp, 60); 8856 8857 /* Flush tx queues */ 8858 (void) emlxs_tx_node_flush(port, ndlp, 8859 &hba->chan[hba->channel_fcp], 0, 0); 8860 8861 /* Flush chip queues */ 8862 (void) emlxs_chipq_node_flush(port, 8863 &hba->chan[hba->channel_fcp], ndlp, 0); 8864 } 8865 } 8866 8867 break; 8868 8869 case ELS_CMD_LOGO: 8870 8871 if (cmd == ELS_CMD_ACC) { 8872 ndlp = emlxs_node_find_did(port, did); 8873 8874 if (ndlp && ndlp->nlp_active) { 8875 /* Close the node for any further normal IO */ 8876 emlxs_node_close(port, ndlp, 8877 hba->channel_fcp, 60); 8878 emlxs_node_close(port, ndlp, 8879 hba->channel_ip, 60); 8880 8881 /* Flush tx queues */ 8882 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8883 8884 /* Flush chip queues */ 8885 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8886 } 8887 } 8888 8889 break; 8890 } 8891 8892 if (pkt->pkt_cmdlen) { 8893 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8894 DDI_DMA_SYNC_FORDEV); 8895 } 8896 8897 HBASTATS.ElsRspIssued++; 8898 8899 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8900 8901 return (FC_SUCCESS); 8902 8903 } /* emlxs_send_els_rsp() */ 8904 8905 8906 #ifdef MENLO_SUPPORT 8907 static int32_t 8908 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp) 8909 { 8910 emlxs_hba_t *hba = HBA; 8911 fc_packet_t *pkt; 8912 IOCBQ *iocbq; 8913 IOCB *iocb; 8914 CHANNEL *cp; 8915 NODELIST *ndlp; 8916 uint32_t did; 8917 uint32_t *lp; 8918 int32_t rval; 8919 8920 pkt = PRIV2PKT(sbp); 8921 did = EMLXS_MENLO_DID; 8922 lp = (uint32_t *)pkt->pkt_cmd; 8923 8924 iocbq = &sbp->iocbq; 8925 iocb = &iocbq->iocb; 8926 8927 ndlp = emlxs_node_find_did(port, did); 8928 8929 if (!ndlp || !ndlp->nlp_active) { 8930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8931 "Node not found. did=0x%x", did); 8932 8933 return (FC_BADPACKET); 8934 } 8935 8936 iocbq->node = (void *) ndlp; 8937 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 8938 8939 if (rval == 0xff) { 8940 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8941 rval = FC_SUCCESS; 8942 } 8943 8944 return (rval); 8945 } 8946 8947 cp = &hba->chan[hba->channel_ct]; 8948 cp->ulpSendCmd++; 8949 8950 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) { 8951 /* Cmd phase */ 8952 8953 /* Initalize iocb */ 8954 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id; 8955 iocb->ULPCONTEXT = 0; 8956 iocb->ULPPU = 3; 8957 8958 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8959 "%s: [%08x,%08x,%08x,%08x]", 8960 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]), 8961 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4])); 8962 8963 } else { /* FC_PKT_OUTBOUND */ 8964 8965 /* MENLO_CMD_FW_DOWNLOAD Data Phase */ 8966 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX; 8967 8968 /* Initalize iocb */ 8969 iocb->un.genreq64.param = 0; 8970 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 8971 iocb->ULPPU = 1; 8972 8973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8974 "%s: Data: rxid=0x%x size=%d", 8975 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD), 8976 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen); 8977 } 8978 8979 /* Initalize sbp */ 8980 mutex_enter(&sbp->mtx); 8981 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8982 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8983 sbp->node = (void *) ndlp; 8984 sbp->lun = 0; 8985 sbp->class = iocb->ULPCLASS; 8986 sbp->did = did; 8987 mutex_exit(&sbp->mtx); 8988 8989 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8990 DDI_DMA_SYNC_FORDEV); 8991 8992 HBASTATS.CtCmdIssued++; 8993 8994 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8995 8996 return (FC_SUCCESS); 8997 8998 } /* emlxs_send_menlo() */ 8999 #endif /* MENLO_SUPPORT */ 9000 9001 9002 static int32_t 9003 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp) 9004 { 9005 emlxs_hba_t *hba = HBA; 9006 fc_packet_t *pkt; 9007 IOCBQ *iocbq; 9008 IOCB *iocb; 9009 NODELIST *ndlp; 9010 uint32_t did; 9011 CHANNEL *cp; 9012 int32_t rval; 9013 9014 pkt = PRIV2PKT(sbp); 9015 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9016 9017 iocbq = &sbp->iocbq; 9018 iocb = &iocbq->iocb; 9019 9020 ndlp = emlxs_node_find_did(port, did); 9021 9022 if (!ndlp || !ndlp->nlp_active) { 9023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 9024 "Node not found. did=0x%x", did); 9025 9026 return (FC_BADPACKET); 9027 } 9028 9029 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9030 emlxs_swap_ct_pkt(sbp); 9031 #endif /* EMLXS_MODREV2X */ 9032 9033 iocbq->node = (void *)ndlp; 9034 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9035 9036 if (rval == 0xff) { 9037 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9038 rval = FC_SUCCESS; 9039 } 9040 9041 return (rval); 9042 } 9043 9044 cp = &hba->chan[hba->channel_ct]; 9045 cp->ulpSendCmd++; 9046 9047 /* Initalize sbp */ 9048 mutex_enter(&sbp->mtx); 9049 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9050 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9051 sbp->node = (void *)ndlp; 9052 sbp->lun = 0; 9053 sbp->class = iocb->ULPCLASS; 9054 sbp->did = did; 9055 mutex_exit(&sbp->mtx); 9056 9057 if (did == NAMESERVER_DID) { 9058 SLI_CT_REQUEST *CtCmd; 9059 uint32_t *lp0; 9060 9061 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9062 lp0 = (uint32_t *)pkt->pkt_cmd; 9063 9064 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9065 "%s: did=%x [%08x,%08x]", 9066 emlxs_ctcmd_xlate( 9067 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9068 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9069 9070 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 9071 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 9072 } 9073 9074 } else if (did == FDMI_DID) { 9075 SLI_CT_REQUEST *CtCmd; 9076 uint32_t *lp0; 9077 9078 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9079 lp0 = (uint32_t *)pkt->pkt_cmd; 9080 9081 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9082 "%s: did=%x [%08x,%08x]", 9083 emlxs_mscmd_xlate( 9084 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9085 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9086 } else { 9087 SLI_CT_REQUEST *CtCmd; 9088 uint32_t *lp0; 9089 9090 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9091 lp0 = (uint32_t *)pkt->pkt_cmd; 9092 9093 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9094 "%s: did=%x [%08x,%08x]", 9095 emlxs_rmcmd_xlate( 9096 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9097 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9098 } 9099 9100 if (pkt->pkt_cmdlen) { 9101 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9102 DDI_DMA_SYNC_FORDEV); 9103 } 9104 9105 HBASTATS.CtCmdIssued++; 9106 9107 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9108 9109 return (FC_SUCCESS); 9110 9111 } /* emlxs_send_ct() */ 9112 9113 9114 static int32_t 9115 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 9116 { 9117 emlxs_hba_t *hba = HBA; 9118 fc_packet_t *pkt; 9119 CHANNEL *cp; 9120 IOCBQ *iocbq; 9121 IOCB *iocb; 9122 uint32_t *cmd; 9123 SLI_CT_REQUEST *CtCmd; 9124 int32_t rval; 9125 9126 pkt = PRIV2PKT(sbp); 9127 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9128 cmd = (uint32_t *)pkt->pkt_cmd; 9129 9130 iocbq = &sbp->iocbq; 9131 iocb = &iocbq->iocb; 9132 9133 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9134 emlxs_swap_ct_pkt(sbp); 9135 #endif /* EMLXS_MODREV2X */ 9136 9137 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9138 9139 if (rval == 0xff) { 9140 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9141 rval = FC_SUCCESS; 9142 } 9143 9144 return (rval); 9145 } 9146 9147 cp = &hba->chan[hba->channel_ct]; 9148 cp->ulpSendCmd++; 9149 9150 /* Initalize sbp */ 9151 mutex_enter(&sbp->mtx); 9152 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9153 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9154 sbp->node = NULL; 9155 sbp->lun = 0; 9156 sbp->class = iocb->ULPCLASS; 9157 mutex_exit(&sbp->mtx); 9158 9159 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg, 9160 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ", 9161 emlxs_rmcmd_xlate(LE_SWAP16( 9162 CtCmd->CommandResponse.bits.CmdRsp)), 9163 CtCmd->ReasonCode, CtCmd->Explanation, 9164 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]), 9165 pkt->pkt_cmd_fhdr.rx_id); 9166 9167 if (pkt->pkt_cmdlen) { 9168 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9169 DDI_DMA_SYNC_FORDEV); 9170 } 9171 9172 HBASTATS.CtRspIssued++; 9173 9174 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9175 9176 return (FC_SUCCESS); 9177 9178 } /* emlxs_send_ct_rsp() */ 9179 9180 9181 /* 9182 * emlxs_get_instance() 9183 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst. 9184 */ 9185 extern uint32_t 9186 emlxs_get_instance(int32_t ddiinst) 9187 { 9188 uint32_t i; 9189 uint32_t inst; 9190 9191 mutex_enter(&emlxs_device.lock); 9192 9193 inst = MAX_FC_BRDS; 9194 for (i = 0; i < emlxs_instance_count; i++) { 9195 if (emlxs_instance[i] == ddiinst) { 9196 inst = i; 9197 break; 9198 } 9199 } 9200 9201 mutex_exit(&emlxs_device.lock); 9202 9203 return (inst); 9204 9205 } /* emlxs_get_instance() */ 9206 9207 9208 /* 9209 * emlxs_add_instance() 9210 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst. 9211 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0. 9212 */ 9213 static uint32_t 9214 emlxs_add_instance(int32_t ddiinst) 9215 { 9216 uint32_t i; 9217 9218 mutex_enter(&emlxs_device.lock); 9219 9220 /* First see if the ddiinst already exists */ 9221 for (i = 0; i < emlxs_instance_count; i++) { 9222 if (emlxs_instance[i] == ddiinst) { 9223 break; 9224 } 9225 } 9226 9227 /* If it doesn't already exist, add it */ 9228 if (i >= emlxs_instance_count) { 9229 if ((i = emlxs_instance_count) < MAX_FC_BRDS) { 9230 emlxs_instance[i] = ddiinst; 9231 emlxs_instance_count++; 9232 emlxs_device.hba_count = emlxs_instance_count; 9233 } 9234 } 9235 9236 mutex_exit(&emlxs_device.lock); 9237 9238 return (i); 9239 9240 } /* emlxs_add_instance() */ 9241 9242 9243 /*ARGSUSED*/ 9244 extern void 9245 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9246 uint32_t doneq) 9247 { 9248 emlxs_hba_t *hba; 9249 emlxs_port_t *port; 9250 emlxs_buf_t *fpkt; 9251 9252 port = sbp->port; 9253 9254 if (!port) { 9255 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg, 9256 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags); 9257 9258 return; 9259 } 9260 9261 hba = HBA; 9262 9263 mutex_enter(&sbp->mtx); 9264 9265 /* Check for error conditions */ 9266 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED | 9267 PACKET_IN_DONEQ | PACKET_IN_COMPLETION | 9268 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) { 9269 if (sbp->pkt_flags & PACKET_ULP_OWNED) { 9270 EMLXS_MSGF(EMLXS_CONTEXT, 9271 &emlxs_pkt_completion_error_msg, 9272 "Packet already returned. sbp=%p flags=%x", sbp, 9273 sbp->pkt_flags); 9274 } 9275 9276 else if (sbp->pkt_flags & PACKET_COMPLETED) { 9277 EMLXS_MSGF(EMLXS_CONTEXT, 9278 &emlxs_pkt_completion_error_msg, 9279 "Packet already completed. sbp=%p flags=%x", sbp, 9280 sbp->pkt_flags); 9281 } 9282 9283 else if (sbp->pkt_flags & PACKET_IN_DONEQ) { 9284 EMLXS_MSGF(EMLXS_CONTEXT, 9285 &emlxs_pkt_completion_error_msg, 9286 "Pkt already on done queue. sbp=%p flags=%x", sbp, 9287 sbp->pkt_flags); 9288 } 9289 9290 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) { 9291 EMLXS_MSGF(EMLXS_CONTEXT, 9292 &emlxs_pkt_completion_error_msg, 9293 "Packet already in completion. sbp=%p flags=%x", 9294 sbp, sbp->pkt_flags); 9295 } 9296 9297 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 9298 EMLXS_MSGF(EMLXS_CONTEXT, 9299 &emlxs_pkt_completion_error_msg, 9300 "Packet still on chip queue. sbp=%p flags=%x", 9301 sbp, sbp->pkt_flags); 9302 } 9303 9304 else if (sbp->pkt_flags & PACKET_IN_TXQ) { 9305 EMLXS_MSGF(EMLXS_CONTEXT, 9306 &emlxs_pkt_completion_error_msg, 9307 "Packet still on tx queue. sbp=%p flags=%x", sbp, 9308 sbp->pkt_flags); 9309 } 9310 9311 mutex_exit(&sbp->mtx); 9312 return; 9313 } 9314 9315 /* Packet is now in completion */ 9316 sbp->pkt_flags |= PACKET_IN_COMPLETION; 9317 9318 /* Set the state if not already set */ 9319 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9320 emlxs_set_pkt_state(sbp, iostat, localstat, 0); 9321 } 9322 9323 /* Check for parent flush packet */ 9324 /* If pkt has a parent flush packet then adjust its count now */ 9325 fpkt = sbp->fpkt; 9326 if (fpkt) { 9327 /* 9328 * We will try to NULL sbp->fpkt inside the 9329 * fpkt's mutex if possible 9330 */ 9331 9332 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) { 9333 mutex_enter(&fpkt->mtx); 9334 if (fpkt->flush_count) { 9335 fpkt->flush_count--; 9336 } 9337 sbp->fpkt = NULL; 9338 mutex_exit(&fpkt->mtx); 9339 } else { /* fpkt has been returned already */ 9340 9341 sbp->fpkt = NULL; 9342 } 9343 } 9344 9345 /* If pkt is polled, then wake up sleeping thread */ 9346 if (sbp->pkt_flags & PACKET_POLLED) { 9347 /* Don't set the PACKET_ULP_OWNED flag here */ 9348 /* because the polling thread will do it */ 9349 sbp->pkt_flags |= PACKET_COMPLETED; 9350 mutex_exit(&sbp->mtx); 9351 9352 /* Wake up sleeping thread */ 9353 mutex_enter(&EMLXS_PKT_LOCK); 9354 cv_broadcast(&EMLXS_PKT_CV); 9355 mutex_exit(&EMLXS_PKT_LOCK); 9356 } 9357 9358 /* If packet was generated by our driver, */ 9359 /* then complete it immediately */ 9360 else if (sbp->pkt_flags & PACKET_ALLOCATED) { 9361 mutex_exit(&sbp->mtx); 9362 9363 emlxs_iodone(sbp); 9364 } 9365 9366 /* Put the pkt on the done queue for callback */ 9367 /* completion in another thread */ 9368 else { 9369 sbp->pkt_flags |= PACKET_IN_DONEQ; 9370 sbp->next = NULL; 9371 mutex_exit(&sbp->mtx); 9372 9373 /* Put pkt on doneq, so I/O's will be completed in order */ 9374 mutex_enter(&EMLXS_PORT_LOCK); 9375 if (hba->iodone_tail == NULL) { 9376 hba->iodone_list = sbp; 9377 hba->iodone_count = 1; 9378 } else { 9379 hba->iodone_tail->next = sbp; 9380 hba->iodone_count++; 9381 } 9382 hba->iodone_tail = sbp; 9383 mutex_exit(&EMLXS_PORT_LOCK); 9384 9385 /* Trigger a thread to service the doneq */ 9386 emlxs_thread_trigger1(&hba->iodone_thread, 9387 emlxs_iodone_server); 9388 } 9389 9390 return; 9391 9392 } /* emlxs_pkt_complete() */ 9393 9394 9395 #ifdef SAN_DIAG_SUPPORT 9396 /* 9397 * This routine is called with EMLXS_PORT_LOCK held so we can just increment 9398 * normally. Don't have to use atomic operations. 9399 */ 9400 extern void 9401 emlxs_update_sd_bucket(emlxs_buf_t *sbp) 9402 { 9403 emlxs_port_t *vport; 9404 fc_packet_t *pkt; 9405 uint32_t did; 9406 hrtime_t t; 9407 hrtime_t delta_time; 9408 int i; 9409 NODELIST *ndlp; 9410 9411 vport = sbp->port; 9412 9413 if ((sd_bucket.search_type == 0) || 9414 (vport->sd_io_latency_state != SD_COLLECTING)) 9415 return; 9416 9417 /* Compute the iolatency time in microseconds */ 9418 t = gethrtime(); 9419 delta_time = t - sbp->sd_start_time; 9420 pkt = PRIV2PKT(sbp); 9421 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9422 ndlp = emlxs_node_find_did(vport, did); 9423 9424 if (ndlp) { 9425 if (delta_time >= 9426 sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) 9427 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1]. 9428 count++; 9429 else if (delta_time <= sd_bucket.values[0]) 9430 ndlp->sd_dev_bucket[0].count++; 9431 else { 9432 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) { 9433 if ((delta_time > sd_bucket.values[i-1]) && 9434 (delta_time <= sd_bucket.values[i])) { 9435 ndlp->sd_dev_bucket[i].count++; 9436 break; 9437 } 9438 } 9439 } 9440 } 9441 } 9442 #endif /* SAN_DIAG_SUPPORT */ 9443 9444 /*ARGSUSED*/ 9445 static void 9446 emlxs_iodone_server(void *arg1, void *arg2, void *arg3) 9447 { 9448 emlxs_hba_t *hba = (emlxs_hba_t *)arg1; 9449 emlxs_buf_t *sbp; 9450 9451 mutex_enter(&EMLXS_PORT_LOCK); 9452 9453 /* Remove one pkt from the doneq head and complete it */ 9454 while ((sbp = hba->iodone_list) != NULL) { 9455 if ((hba->iodone_list = sbp->next) == NULL) { 9456 hba->iodone_tail = NULL; 9457 hba->iodone_count = 0; 9458 } else { 9459 hba->iodone_count--; 9460 } 9461 9462 mutex_exit(&EMLXS_PORT_LOCK); 9463 9464 /* Prepare the pkt for completion */ 9465 mutex_enter(&sbp->mtx); 9466 sbp->next = NULL; 9467 sbp->pkt_flags &= ~PACKET_IN_DONEQ; 9468 mutex_exit(&sbp->mtx); 9469 9470 /* Complete the IO now */ 9471 emlxs_iodone(sbp); 9472 9473 /* Reacquire lock and check if more work is to be done */ 9474 mutex_enter(&EMLXS_PORT_LOCK); 9475 } 9476 9477 mutex_exit(&EMLXS_PORT_LOCK); 9478 9479 return; 9480 9481 } /* End emlxs_iodone_server */ 9482 9483 9484 static void 9485 emlxs_iodone(emlxs_buf_t *sbp) 9486 { 9487 fc_packet_t *pkt; 9488 CHANNEL *cp; 9489 9490 pkt = PRIV2PKT(sbp); 9491 9492 /* Check one more time that the pkt has not already been returned */ 9493 if (sbp->pkt_flags & PACKET_ULP_OWNED) { 9494 return; 9495 } 9496 cp = (CHANNEL *)sbp->channel; 9497 9498 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9499 emlxs_unswap_pkt(sbp); 9500 #endif /* EMLXS_MODREV2X */ 9501 9502 mutex_enter(&sbp->mtx); 9503 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED); 9504 mutex_exit(&sbp->mtx); 9505 9506 if (pkt->pkt_comp) { 9507 cp->ulpCmplCmd++; 9508 (*pkt->pkt_comp) (pkt); 9509 } 9510 9511 return; 9512 9513 } /* emlxs_iodone() */ 9514 9515 9516 9517 extern fc_unsol_buf_t * 9518 emlxs_ub_find(emlxs_port_t *port, uint32_t token) 9519 { 9520 emlxs_unsol_buf_t *pool; 9521 fc_unsol_buf_t *ubp; 9522 emlxs_ub_priv_t *ub_priv; 9523 9524 /* Check if this is a valid ub token */ 9525 if (token < EMLXS_UB_TOKEN_OFFSET) { 9526 return (NULL); 9527 } 9528 9529 mutex_enter(&EMLXS_UB_LOCK); 9530 9531 pool = port->ub_pool; 9532 while (pool) { 9533 /* Find a pool with the proper token range */ 9534 if (token >= pool->pool_first_token && 9535 token <= pool->pool_last_token) { 9536 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token - 9537 pool->pool_first_token)]; 9538 ub_priv = ubp->ub_fca_private; 9539 9540 if (ub_priv->token != token) { 9541 EMLXS_MSGF(EMLXS_CONTEXT, 9542 &emlxs_sfs_debug_msg, 9543 "ub_find: Invalid token=%x", ubp, token, 9544 ub_priv->token); 9545 9546 ubp = NULL; 9547 } 9548 9549 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { 9550 EMLXS_MSGF(EMLXS_CONTEXT, 9551 &emlxs_sfs_debug_msg, 9552 "ub_find: Buffer not in use. buffer=%p " 9553 "token=%x", ubp, token); 9554 9555 ubp = NULL; 9556 } 9557 9558 mutex_exit(&EMLXS_UB_LOCK); 9559 9560 return (ubp); 9561 } 9562 9563 pool = pool->pool_next; 9564 } 9565 9566 mutex_exit(&EMLXS_UB_LOCK); 9567 9568 return (NULL); 9569 9570 } /* emlxs_ub_find() */ 9571 9572 9573 9574 extern fc_unsol_buf_t * 9575 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, 9576 uint32_t reserve) 9577 { 9578 emlxs_hba_t *hba = HBA; 9579 emlxs_unsol_buf_t *pool; 9580 fc_unsol_buf_t *ubp; 9581 emlxs_ub_priv_t *ub_priv; 9582 uint32_t i; 9583 uint32_t resv_flag; 9584 uint32_t pool_free; 9585 uint32_t pool_free_resv; 9586 9587 mutex_enter(&EMLXS_UB_LOCK); 9588 9589 pool = port->ub_pool; 9590 while (pool) { 9591 /* Find a pool of the appropriate type and size */ 9592 if ((pool->pool_available == 0) || 9593 (pool->pool_type != type) || 9594 (pool->pool_buf_size < size)) { 9595 goto next_pool; 9596 } 9597 9598 9599 /* Adjust free counts based on availablity */ 9600 /* The free reserve count gets first priority */ 9601 pool_free_resv = 9602 min(pool->pool_free_resv, pool->pool_available); 9603 pool_free = 9604 min(pool->pool_free, 9605 (pool->pool_available - pool_free_resv)); 9606 9607 /* Initialize reserve flag */ 9608 resv_flag = reserve; 9609 9610 if (resv_flag) { 9611 if (pool_free_resv == 0) { 9612 if (pool_free == 0) { 9613 goto next_pool; 9614 } 9615 resv_flag = 0; 9616 } 9617 } else if (pool_free == 0) { 9618 goto next_pool; 9619 } 9620 9621 /* Find next available free buffer in this pool */ 9622 for (i = 0; i < pool->pool_nentries; i++) { 9623 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 9624 ub_priv = ubp->ub_fca_private; 9625 9626 if (!ub_priv->available || 9627 ub_priv->flags != EMLXS_UB_FREE) { 9628 continue; 9629 } 9630 9631 ub_priv->time = hba->timer_tics; 9632 9633 /* Timeout in 5 minutes */ 9634 ub_priv->timeout = (5 * 60); 9635 9636 ub_priv->flags = EMLXS_UB_IN_USE; 9637 9638 /* Alloc the buffer from the pool */ 9639 if (resv_flag) { 9640 ub_priv->flags |= EMLXS_UB_RESV; 9641 pool->pool_free_resv--; 9642 } else { 9643 pool->pool_free--; 9644 } 9645 9646 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 9647 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 9648 ub_priv->token, pool->pool_nentries, 9649 pool->pool_available, pool->pool_free, 9650 pool->pool_free_resv); 9651 9652 mutex_exit(&EMLXS_UB_LOCK); 9653 9654 return (ubp); 9655 } 9656 next_pool: 9657 9658 pool = pool->pool_next; 9659 } 9660 9661 mutex_exit(&EMLXS_UB_LOCK); 9662 9663 return (NULL); 9664 9665 } /* emlxs_ub_get() */ 9666 9667 9668 9669 extern void 9670 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9671 uint32_t lock) 9672 { 9673 fc_packet_t *pkt; 9674 fcp_rsp_t *fcp_rsp; 9675 uint32_t i; 9676 emlxs_xlat_err_t *tptr; 9677 emlxs_xlat_err_t *entry; 9678 9679 9680 pkt = PRIV2PKT(sbp); 9681 9682 if (lock) { 9683 mutex_enter(&sbp->mtx); 9684 } 9685 9686 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9687 sbp->pkt_flags |= PACKET_STATE_VALID; 9688 9689 /* Perform table lookup */ 9690 entry = NULL; 9691 if (iostat != IOSTAT_LOCAL_REJECT) { 9692 tptr = emlxs_iostat_tbl; 9693 for (i = 0; i < IOSTAT_MAX; i++, tptr++) { 9694 if (iostat == tptr->emlxs_status) { 9695 entry = tptr; 9696 break; 9697 } 9698 } 9699 } else { /* iostate == IOSTAT_LOCAL_REJECT */ 9700 9701 tptr = emlxs_ioerr_tbl; 9702 for (i = 0; i < IOERR_MAX; i++, tptr++) { 9703 if (localstat == tptr->emlxs_status) { 9704 entry = tptr; 9705 break; 9706 } 9707 } 9708 } 9709 9710 if (entry) { 9711 pkt->pkt_state = entry->pkt_state; 9712 pkt->pkt_reason = entry->pkt_reason; 9713 pkt->pkt_expln = entry->pkt_expln; 9714 pkt->pkt_action = entry->pkt_action; 9715 } else { 9716 /* Set defaults */ 9717 pkt->pkt_state = FC_PKT_TRAN_ERROR; 9718 pkt->pkt_reason = FC_REASON_ABORTED; 9719 pkt->pkt_expln = FC_EXPLN_NONE; 9720 pkt->pkt_action = FC_ACTION_RETRYABLE; 9721 } 9722 9723 9724 /* Set the residual counts and response frame */ 9725 /* Check if response frame was received from the chip */ 9726 /* If so, then the residual counts will already be set */ 9727 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID | 9728 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) { 9729 /* We have to create the response frame */ 9730 if (iostat == IOSTAT_SUCCESS) { 9731 pkt->pkt_resp_resid = 0; 9732 pkt->pkt_data_resid = 0; 9733 9734 if ((pkt->pkt_cmd_fhdr.type == 9735 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen && 9736 pkt->pkt_resp) { 9737 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp; 9738 9739 fcp_rsp->fcp_u.fcp_status. 9740 rsp_len_set = 1; 9741 fcp_rsp->fcp_response_len = 8; 9742 } 9743 } else { 9744 /* Otherwise assume no data */ 9745 /* and no response received */ 9746 pkt->pkt_data_resid = pkt->pkt_datalen; 9747 pkt->pkt_resp_resid = pkt->pkt_rsplen; 9748 } 9749 } 9750 } 9751 9752 if (lock) { 9753 mutex_exit(&sbp->mtx); 9754 } 9755 9756 return; 9757 9758 } /* emlxs_set_pkt_state() */ 9759 9760 9761 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9762 9763 extern void 9764 emlxs_swap_service_params(SERV_PARM *sp) 9765 { 9766 uint16_t *p; 9767 int size; 9768 int i; 9769 9770 size = (sizeof (CSP) - 4) / 2; 9771 p = (uint16_t *)&sp->cmn; 9772 for (i = 0; i < size; i++) { 9773 p[i] = LE_SWAP16(p[i]); 9774 } 9775 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov); 9776 9777 size = sizeof (CLASS_PARMS) / 2; 9778 p = (uint16_t *)&sp->cls1; 9779 for (i = 0; i < size; i++, p++) { 9780 *p = LE_SWAP16(*p); 9781 } 9782 9783 size = sizeof (CLASS_PARMS) / 2; 9784 p = (uint16_t *)&sp->cls2; 9785 for (i = 0; i < size; i++, p++) { 9786 *p = LE_SWAP16(*p); 9787 } 9788 9789 size = sizeof (CLASS_PARMS) / 2; 9790 p = (uint16_t *)&sp->cls3; 9791 for (i = 0; i < size; i++, p++) { 9792 *p = LE_SWAP16(*p); 9793 } 9794 9795 size = sizeof (CLASS_PARMS) / 2; 9796 p = (uint16_t *)&sp->cls4; 9797 for (i = 0; i < size; i++, p++) { 9798 *p = LE_SWAP16(*p); 9799 } 9800 9801 return; 9802 9803 } /* emlxs_swap_service_params() */ 9804 9805 extern void 9806 emlxs_unswap_pkt(emlxs_buf_t *sbp) 9807 { 9808 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9809 emlxs_swap_fcp_pkt(sbp); 9810 } 9811 9812 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9813 emlxs_swap_els_pkt(sbp); 9814 } 9815 9816 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 9817 emlxs_swap_ct_pkt(sbp); 9818 } 9819 9820 } /* emlxs_unswap_pkt() */ 9821 9822 9823 extern void 9824 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp) 9825 { 9826 fc_packet_t *pkt; 9827 FCP_CMND *cmd; 9828 fcp_rsp_t *rsp; 9829 uint16_t *lunp; 9830 uint32_t i; 9831 9832 mutex_enter(&sbp->mtx); 9833 9834 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9835 mutex_exit(&sbp->mtx); 9836 return; 9837 } 9838 9839 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9840 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED; 9841 } else { 9842 sbp->pkt_flags |= PACKET_FCP_SWAPPED; 9843 } 9844 9845 mutex_exit(&sbp->mtx); 9846 9847 pkt = PRIV2PKT(sbp); 9848 9849 cmd = (FCP_CMND *)pkt->pkt_cmd; 9850 rsp = (pkt->pkt_rsplen && 9851 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ? 9852 (fcp_rsp_t *)pkt->pkt_resp : NULL; 9853 9854 /* The size of data buffer needs to be swapped. */ 9855 cmd->fcpDl = LE_SWAP32(cmd->fcpDl); 9856 9857 /* 9858 * Swap first 2 words of FCP CMND payload. 9859 */ 9860 lunp = (uint16_t *)&cmd->fcpLunMsl; 9861 for (i = 0; i < 4; i++) { 9862 lunp[i] = LE_SWAP16(lunp[i]); 9863 } 9864 9865 if (rsp) { 9866 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid); 9867 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len); 9868 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len); 9869 } 9870 9871 return; 9872 9873 } /* emlxs_swap_fcp_pkt() */ 9874 9875 9876 extern void 9877 emlxs_swap_els_pkt(emlxs_buf_t *sbp) 9878 { 9879 fc_packet_t *pkt; 9880 uint32_t *cmd; 9881 uint32_t *rsp; 9882 uint32_t command; 9883 uint16_t *c; 9884 uint32_t i; 9885 uint32_t swapped; 9886 9887 mutex_enter(&sbp->mtx); 9888 9889 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9890 mutex_exit(&sbp->mtx); 9891 return; 9892 } 9893 9894 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9895 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED; 9896 swapped = 1; 9897 } else { 9898 sbp->pkt_flags |= PACKET_ELS_SWAPPED; 9899 swapped = 0; 9900 } 9901 9902 mutex_exit(&sbp->mtx); 9903 9904 pkt = PRIV2PKT(sbp); 9905 9906 cmd = (uint32_t *)pkt->pkt_cmd; 9907 rsp = (pkt->pkt_rsplen && 9908 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ? 9909 (uint32_t *)pkt->pkt_resp : NULL; 9910 9911 if (!swapped) { 9912 cmd[0] = LE_SWAP32(cmd[0]); 9913 command = cmd[0] & ELS_CMD_MASK; 9914 } else { 9915 command = cmd[0] & ELS_CMD_MASK; 9916 cmd[0] = LE_SWAP32(cmd[0]); 9917 } 9918 9919 if (rsp) { 9920 rsp[0] = LE_SWAP32(rsp[0]); 9921 } 9922 9923 switch (command) { 9924 case ELS_CMD_ACC: 9925 if (sbp->ucmd == ELS_CMD_ADISC) { 9926 /* Hard address of originator */ 9927 cmd[1] = LE_SWAP32(cmd[1]); 9928 9929 /* N_Port ID of originator */ 9930 cmd[6] = LE_SWAP32(cmd[6]); 9931 } 9932 break; 9933 9934 case ELS_CMD_PLOGI: 9935 case ELS_CMD_FLOGI: 9936 case ELS_CMD_FDISC: 9937 if (rsp) { 9938 emlxs_swap_service_params((SERV_PARM *) & rsp[1]); 9939 } 9940 break; 9941 9942 case ELS_CMD_LOGO: 9943 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */ 9944 break; 9945 9946 case ELS_CMD_RLS: 9947 cmd[1] = LE_SWAP32(cmd[1]); 9948 9949 if (rsp) { 9950 for (i = 0; i < 6; i++) { 9951 rsp[1 + i] = LE_SWAP32(rsp[1 + i]); 9952 } 9953 } 9954 break; 9955 9956 case ELS_CMD_ADISC: 9957 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */ 9958 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */ 9959 break; 9960 9961 case ELS_CMD_PRLI: 9962 c = (uint16_t *)&cmd[1]; 9963 c[1] = LE_SWAP16(c[1]); 9964 9965 cmd[4] = LE_SWAP32(cmd[4]); 9966 9967 if (rsp) { 9968 rsp[4] = LE_SWAP32(rsp[4]); 9969 } 9970 break; 9971 9972 case ELS_CMD_SCR: 9973 cmd[1] = LE_SWAP32(cmd[1]); 9974 break; 9975 9976 case ELS_CMD_LINIT: 9977 if (rsp) { 9978 rsp[1] = LE_SWAP32(rsp[1]); 9979 } 9980 break; 9981 9982 default: 9983 break; 9984 } 9985 9986 return; 9987 9988 } /* emlxs_swap_els_pkt() */ 9989 9990 9991 extern void 9992 emlxs_swap_ct_pkt(emlxs_buf_t *sbp) 9993 { 9994 fc_packet_t *pkt; 9995 uint32_t *cmd; 9996 uint32_t *rsp; 9997 uint32_t command; 9998 uint32_t i; 9999 uint32_t swapped; 10000 10001 mutex_enter(&sbp->mtx); 10002 10003 if (sbp->pkt_flags & PACKET_ALLOCATED) { 10004 mutex_exit(&sbp->mtx); 10005 return; 10006 } 10007 10008 if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 10009 sbp->pkt_flags &= ~PACKET_CT_SWAPPED; 10010 swapped = 1; 10011 } else { 10012 sbp->pkt_flags |= PACKET_CT_SWAPPED; 10013 swapped = 0; 10014 } 10015 10016 mutex_exit(&sbp->mtx); 10017 10018 pkt = PRIV2PKT(sbp); 10019 10020 cmd = (uint32_t *)pkt->pkt_cmd; 10021 rsp = (pkt->pkt_rsplen && 10022 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ? 10023 (uint32_t *)pkt->pkt_resp : NULL; 10024 10025 if (!swapped) { 10026 cmd[0] = 0x01000000; 10027 command = cmd[2]; 10028 } 10029 10030 cmd[0] = LE_SWAP32(cmd[0]); 10031 cmd[1] = LE_SWAP32(cmd[1]); 10032 cmd[2] = LE_SWAP32(cmd[2]); 10033 cmd[3] = LE_SWAP32(cmd[3]); 10034 10035 if (swapped) { 10036 command = cmd[2]; 10037 } 10038 10039 switch ((command >> 16)) { 10040 case SLI_CTNS_GA_NXT: 10041 cmd[4] = LE_SWAP32(cmd[4]); 10042 break; 10043 10044 case SLI_CTNS_GPN_ID: 10045 case SLI_CTNS_GNN_ID: 10046 case SLI_CTNS_RPN_ID: 10047 case SLI_CTNS_RNN_ID: 10048 case SLI_CTNS_RSPN_ID: 10049 cmd[4] = LE_SWAP32(cmd[4]); 10050 break; 10051 10052 case SLI_CTNS_RCS_ID: 10053 case SLI_CTNS_RPT_ID: 10054 cmd[4] = LE_SWAP32(cmd[4]); 10055 cmd[5] = LE_SWAP32(cmd[5]); 10056 break; 10057 10058 case SLI_CTNS_RFT_ID: 10059 cmd[4] = LE_SWAP32(cmd[4]); 10060 10061 /* Swap FC4 types */ 10062 for (i = 0; i < 8; i++) { 10063 cmd[5 + i] = LE_SWAP32(cmd[5 + i]); 10064 } 10065 break; 10066 10067 case SLI_CTNS_GFT_ID: 10068 if (rsp) { 10069 /* Swap FC4 types */ 10070 for (i = 0; i < 8; i++) { 10071 rsp[4 + i] = LE_SWAP32(rsp[4 + i]); 10072 } 10073 } 10074 break; 10075 10076 case SLI_CTNS_GCS_ID: 10077 case SLI_CTNS_GSPN_ID: 10078 case SLI_CTNS_GSNN_NN: 10079 case SLI_CTNS_GIP_NN: 10080 case SLI_CTNS_GIPA_NN: 10081 10082 case SLI_CTNS_GPT_ID: 10083 case SLI_CTNS_GID_NN: 10084 case SLI_CTNS_GNN_IP: 10085 case SLI_CTNS_GIPA_IP: 10086 case SLI_CTNS_GID_FT: 10087 case SLI_CTNS_GID_PT: 10088 case SLI_CTNS_GID_PN: 10089 case SLI_CTNS_RIP_NN: 10090 case SLI_CTNS_RIPA_NN: 10091 case SLI_CTNS_RSNN_NN: 10092 case SLI_CTNS_DA_ID: 10093 case SLI_CT_RESPONSE_FS_RJT: 10094 case SLI_CT_RESPONSE_FS_ACC: 10095 10096 default: 10097 break; 10098 } 10099 return; 10100 10101 } /* emlxs_swap_ct_pkt() */ 10102 10103 10104 extern void 10105 emlxs_swap_els_ub(fc_unsol_buf_t *ubp) 10106 { 10107 emlxs_ub_priv_t *ub_priv; 10108 fc_rscn_t *rscn; 10109 uint32_t count; 10110 uint32_t i; 10111 uint32_t *lp; 10112 la_els_logi_t *logi; 10113 10114 ub_priv = ubp->ub_fca_private; 10115 10116 switch (ub_priv->cmd) { 10117 case ELS_CMD_RSCN: 10118 rscn = (fc_rscn_t *)ubp->ub_buffer; 10119 10120 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len); 10121 10122 count = ((rscn->rscn_payload_len - 4) / 4); 10123 lp = (uint32_t *)ubp->ub_buffer + 1; 10124 for (i = 0; i < count; i++, lp++) { 10125 *lp = LE_SWAP32(*lp); 10126 } 10127 10128 break; 10129 10130 case ELS_CMD_FLOGI: 10131 case ELS_CMD_PLOGI: 10132 case ELS_CMD_FDISC: 10133 case ELS_CMD_PDISC: 10134 logi = (la_els_logi_t *)ubp->ub_buffer; 10135 emlxs_swap_service_params( 10136 (SERV_PARM *)&logi->common_service); 10137 break; 10138 10139 /* ULP handles this */ 10140 case ELS_CMD_LOGO: 10141 case ELS_CMD_PRLI: 10142 case ELS_CMD_PRLO: 10143 case ELS_CMD_ADISC: 10144 default: 10145 break; 10146 } 10147 10148 return; 10149 10150 } /* emlxs_swap_els_ub() */ 10151 10152 10153 #endif /* EMLXS_MODREV2X */ 10154 10155 10156 extern char * 10157 emlxs_elscmd_xlate(uint32_t elscmd) 10158 { 10159 static char buffer[32]; 10160 uint32_t i; 10161 uint32_t count; 10162 10163 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t); 10164 for (i = 0; i < count; i++) { 10165 if (elscmd == emlxs_elscmd_table[i].code) { 10166 return (emlxs_elscmd_table[i].string); 10167 } 10168 } 10169 10170 (void) sprintf(buffer, "ELS=0x%x", elscmd); 10171 return (buffer); 10172 10173 } /* emlxs_elscmd_xlate() */ 10174 10175 10176 extern char * 10177 emlxs_ctcmd_xlate(uint32_t ctcmd) 10178 { 10179 static char buffer[32]; 10180 uint32_t i; 10181 uint32_t count; 10182 10183 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t); 10184 for (i = 0; i < count; i++) { 10185 if (ctcmd == emlxs_ctcmd_table[i].code) { 10186 return (emlxs_ctcmd_table[i].string); 10187 } 10188 } 10189 10190 (void) sprintf(buffer, "cmd=0x%x", ctcmd); 10191 return (buffer); 10192 10193 } /* emlxs_ctcmd_xlate() */ 10194 10195 10196 #ifdef MENLO_SUPPORT 10197 extern char * 10198 emlxs_menlo_cmd_xlate(uint32_t cmd) 10199 { 10200 static char buffer[32]; 10201 uint32_t i; 10202 uint32_t count; 10203 10204 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t); 10205 for (i = 0; i < count; i++) { 10206 if (cmd == emlxs_menlo_cmd_table[i].code) { 10207 return (emlxs_menlo_cmd_table[i].string); 10208 } 10209 } 10210 10211 (void) sprintf(buffer, "Cmd=0x%x", cmd); 10212 return (buffer); 10213 10214 } /* emlxs_menlo_cmd_xlate() */ 10215 10216 extern char * 10217 emlxs_menlo_rsp_xlate(uint32_t rsp) 10218 { 10219 static char buffer[32]; 10220 uint32_t i; 10221 uint32_t count; 10222 10223 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t); 10224 for (i = 0; i < count; i++) { 10225 if (rsp == emlxs_menlo_rsp_table[i].code) { 10226 return (emlxs_menlo_rsp_table[i].string); 10227 } 10228 } 10229 10230 (void) sprintf(buffer, "Rsp=0x%x", rsp); 10231 return (buffer); 10232 10233 } /* emlxs_menlo_rsp_xlate() */ 10234 10235 #endif /* MENLO_SUPPORT */ 10236 10237 10238 extern char * 10239 emlxs_rmcmd_xlate(uint32_t rmcmd) 10240 { 10241 static char buffer[32]; 10242 uint32_t i; 10243 uint32_t count; 10244 10245 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t); 10246 for (i = 0; i < count; i++) { 10247 if (rmcmd == emlxs_rmcmd_table[i].code) { 10248 return (emlxs_rmcmd_table[i].string); 10249 } 10250 } 10251 10252 (void) sprintf(buffer, "RM=0x%x", rmcmd); 10253 return (buffer); 10254 10255 } /* emlxs_rmcmd_xlate() */ 10256 10257 10258 10259 extern char * 10260 emlxs_mscmd_xlate(uint16_t mscmd) 10261 { 10262 static char buffer[32]; 10263 uint32_t i; 10264 uint32_t count; 10265 10266 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t); 10267 for (i = 0; i < count; i++) { 10268 if (mscmd == emlxs_mscmd_table[i].code) { 10269 return (emlxs_mscmd_table[i].string); 10270 } 10271 } 10272 10273 (void) sprintf(buffer, "Cmd=0x%x", mscmd); 10274 return (buffer); 10275 10276 } /* emlxs_mscmd_xlate() */ 10277 10278 10279 extern char * 10280 emlxs_state_xlate(uint8_t state) 10281 { 10282 static char buffer[32]; 10283 uint32_t i; 10284 uint32_t count; 10285 10286 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t); 10287 for (i = 0; i < count; i++) { 10288 if (state == emlxs_state_table[i].code) { 10289 return (emlxs_state_table[i].string); 10290 } 10291 } 10292 10293 (void) sprintf(buffer, "State=0x%x", state); 10294 return (buffer); 10295 10296 } /* emlxs_state_xlate() */ 10297 10298 10299 extern char * 10300 emlxs_error_xlate(uint8_t errno) 10301 { 10302 static char buffer[32]; 10303 uint32_t i; 10304 uint32_t count; 10305 10306 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t); 10307 for (i = 0; i < count; i++) { 10308 if (errno == emlxs_error_table[i].code) { 10309 return (emlxs_error_table[i].string); 10310 } 10311 } 10312 10313 (void) sprintf(buffer, "Errno=0x%x", errno); 10314 return (buffer); 10315 10316 } /* emlxs_error_xlate() */ 10317 10318 10319 static int 10320 emlxs_pm_lower_power(dev_info_t *dip) 10321 { 10322 int ddiinst; 10323 int emlxinst; 10324 emlxs_config_t *cfg; 10325 int32_t rval; 10326 emlxs_hba_t *hba; 10327 10328 ddiinst = ddi_get_instance(dip); 10329 emlxinst = emlxs_get_instance(ddiinst); 10330 hba = emlxs_device.hba[emlxinst]; 10331 cfg = &CFG; 10332 10333 rval = DDI_SUCCESS; 10334 10335 /* Lower the power level */ 10336 if (cfg[CFG_PM_SUPPORT].current) { 10337 rval = 10338 pm_lower_power(dip, EMLXS_PM_ADAPTER, 10339 EMLXS_PM_ADAPTER_DOWN); 10340 } else { 10341 /* We do not have kernel support of power management enabled */ 10342 /* therefore, call our power management routine directly */ 10343 rval = 10344 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN); 10345 } 10346 10347 return (rval); 10348 10349 } /* emlxs_pm_lower_power() */ 10350 10351 10352 static int 10353 emlxs_pm_raise_power(dev_info_t *dip) 10354 { 10355 int ddiinst; 10356 int emlxinst; 10357 emlxs_config_t *cfg; 10358 int32_t rval; 10359 emlxs_hba_t *hba; 10360 10361 ddiinst = ddi_get_instance(dip); 10362 emlxinst = emlxs_get_instance(ddiinst); 10363 hba = emlxs_device.hba[emlxinst]; 10364 cfg = &CFG; 10365 10366 /* Raise the power level */ 10367 if (cfg[CFG_PM_SUPPORT].current) { 10368 rval = 10369 pm_raise_power(dip, EMLXS_PM_ADAPTER, 10370 EMLXS_PM_ADAPTER_UP); 10371 } else { 10372 /* We do not have kernel support of power management enabled */ 10373 /* therefore, call our power management routine directly */ 10374 rval = 10375 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP); 10376 } 10377 10378 return (rval); 10379 10380 } /* emlxs_pm_raise_power() */ 10381 10382 10383 #ifdef IDLE_TIMER 10384 10385 extern int 10386 emlxs_pm_busy_component(emlxs_hba_t *hba) 10387 { 10388 emlxs_config_t *cfg = &CFG; 10389 int rval; 10390 10391 hba->pm_active = 1; 10392 10393 if (hba->pm_busy) { 10394 return (DDI_SUCCESS); 10395 } 10396 10397 mutex_enter(&hba->pm_lock); 10398 10399 if (hba->pm_busy) { 10400 mutex_exit(&hba->pm_lock); 10401 return (DDI_SUCCESS); 10402 } 10403 hba->pm_busy = 1; 10404 10405 mutex_exit(&hba->pm_lock); 10406 10407 /* Attempt to notify system that we are busy */ 10408 if (cfg[CFG_PM_SUPPORT].current) { 10409 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10410 "pm_busy_component."); 10411 10412 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER); 10413 10414 if (rval != DDI_SUCCESS) { 10415 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10416 "pm_busy_component failed. ret=%d", rval); 10417 10418 /* If this attempt failed then clear our flags */ 10419 mutex_enter(&hba->pm_lock); 10420 hba->pm_busy = 0; 10421 mutex_exit(&hba->pm_lock); 10422 10423 return (rval); 10424 } 10425 } 10426 10427 return (DDI_SUCCESS); 10428 10429 } /* emlxs_pm_busy_component() */ 10430 10431 10432 extern int 10433 emlxs_pm_idle_component(emlxs_hba_t *hba) 10434 { 10435 emlxs_config_t *cfg = &CFG; 10436 int rval; 10437 10438 if (!hba->pm_busy) { 10439 return (DDI_SUCCESS); 10440 } 10441 10442 mutex_enter(&hba->pm_lock); 10443 10444 if (!hba->pm_busy) { 10445 mutex_exit(&hba->pm_lock); 10446 return (DDI_SUCCESS); 10447 } 10448 hba->pm_busy = 0; 10449 10450 mutex_exit(&hba->pm_lock); 10451 10452 if (cfg[CFG_PM_SUPPORT].current) { 10453 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10454 "pm_idle_component."); 10455 10456 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER); 10457 10458 if (rval != DDI_SUCCESS) { 10459 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10460 "pm_idle_component failed. ret=%d", rval); 10461 10462 /* If this attempt failed then */ 10463 /* reset our flags for another attempt */ 10464 mutex_enter(&hba->pm_lock); 10465 hba->pm_busy = 1; 10466 mutex_exit(&hba->pm_lock); 10467 10468 return (rval); 10469 } 10470 } 10471 10472 return (DDI_SUCCESS); 10473 10474 } /* emlxs_pm_idle_component() */ 10475 10476 10477 extern void 10478 emlxs_pm_idle_timer(emlxs_hba_t *hba) 10479 { 10480 emlxs_config_t *cfg = &CFG; 10481 10482 if (hba->pm_active) { 10483 /* Clear active flag and reset idle timer */ 10484 mutex_enter(&hba->pm_lock); 10485 hba->pm_active = 0; 10486 hba->pm_idle_timer = 10487 hba->timer_tics + cfg[CFG_PM_IDLE].current; 10488 mutex_exit(&hba->pm_lock); 10489 } 10490 10491 /* Check for idle timeout */ 10492 else if (hba->timer_tics >= hba->pm_idle_timer) { 10493 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) { 10494 mutex_enter(&hba->pm_lock); 10495 hba->pm_idle_timer = 10496 hba->timer_tics + cfg[CFG_PM_IDLE].current; 10497 mutex_exit(&hba->pm_lock); 10498 } 10499 } 10500 10501 return; 10502 10503 } /* emlxs_pm_idle_timer() */ 10504 10505 #endif /* IDLE_TIMER */ 10506 10507 10508 static void 10509 emlxs_read_vport_prop(emlxs_hba_t *hba) 10510 { 10511 emlxs_port_t *port = &PPORT; 10512 emlxs_config_t *cfg = &CFG; 10513 char **arrayp; 10514 uint8_t *s; 10515 uint8_t *np; 10516 NAME_TYPE pwwpn; 10517 NAME_TYPE wwnn; 10518 NAME_TYPE wwpn; 10519 uint32_t vpi; 10520 uint32_t cnt; 10521 uint32_t rval; 10522 uint32_t i; 10523 uint32_t j; 10524 uint32_t c1; 10525 uint32_t sum; 10526 uint32_t errors; 10527 char buffer[64]; 10528 10529 /* Check for the per adapter vport setting */ 10530 (void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst); 10531 cnt = 0; 10532 arrayp = NULL; 10533 rval = 10534 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 10535 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt); 10536 10537 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 10538 /* Check for the global vport setting */ 10539 cnt = 0; 10540 arrayp = NULL; 10541 rval = 10542 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 10543 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt); 10544 } 10545 10546 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 10547 return; 10548 } 10549 10550 for (i = 0; i < cnt; i++) { 10551 errors = 0; 10552 s = (uint8_t *)arrayp[i]; 10553 10554 if (!s) { 10555 break; 10556 } 10557 10558 np = (uint8_t *)&pwwpn; 10559 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10560 c1 = *s++; 10561 if ((c1 >= '0') && (c1 <= '9')) { 10562 sum = ((c1 - '0') << 4); 10563 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10564 sum = ((c1 - 'a' + 10) << 4); 10565 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10566 sum = ((c1 - 'A' + 10) << 4); 10567 } else { 10568 EMLXS_MSGF(EMLXS_CONTEXT, 10569 &emlxs_attach_debug_msg, 10570 "Config error: Invalid PWWPN found. " 10571 "entry=%d byte=%d hi_nibble=%c", 10572 i, j, c1); 10573 errors++; 10574 } 10575 10576 c1 = *s++; 10577 if ((c1 >= '0') && (c1 <= '9')) { 10578 sum |= (c1 - '0'); 10579 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10580 sum |= (c1 - 'a' + 10); 10581 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10582 sum |= (c1 - 'A' + 10); 10583 } else { 10584 EMLXS_MSGF(EMLXS_CONTEXT, 10585 &emlxs_attach_debug_msg, 10586 "Config error: Invalid PWWPN found. " 10587 "entry=%d byte=%d lo_nibble=%c", 10588 i, j, c1); 10589 errors++; 10590 } 10591 10592 *np++ = sum; 10593 } 10594 10595 if (*s++ != ':') { 10596 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10597 "Config error: Invalid delimiter after PWWPN. " 10598 "entry=%d", i); 10599 goto out; 10600 } 10601 10602 np = (uint8_t *)&wwnn; 10603 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10604 c1 = *s++; 10605 if ((c1 >= '0') && (c1 <= '9')) { 10606 sum = ((c1 - '0') << 4); 10607 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10608 sum = ((c1 - 'a' + 10) << 4); 10609 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10610 sum = ((c1 - 'A' + 10) << 4); 10611 } else { 10612 EMLXS_MSGF(EMLXS_CONTEXT, 10613 &emlxs_attach_debug_msg, 10614 "Config error: Invalid WWNN found. " 10615 "entry=%d byte=%d hi_nibble=%c", 10616 i, j, c1); 10617 errors++; 10618 } 10619 10620 c1 = *s++; 10621 if ((c1 >= '0') && (c1 <= '9')) { 10622 sum |= (c1 - '0'); 10623 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10624 sum |= (c1 - 'a' + 10); 10625 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10626 sum |= (c1 - 'A' + 10); 10627 } else { 10628 EMLXS_MSGF(EMLXS_CONTEXT, 10629 &emlxs_attach_debug_msg, 10630 "Config error: Invalid WWNN found. " 10631 "entry=%d byte=%d lo_nibble=%c", 10632 i, j, c1); 10633 errors++; 10634 } 10635 10636 *np++ = sum; 10637 } 10638 10639 if (*s++ != ':') { 10640 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10641 "Config error: Invalid delimiter after WWNN. " 10642 "entry=%d", i); 10643 goto out; 10644 } 10645 10646 np = (uint8_t *)&wwpn; 10647 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10648 c1 = *s++; 10649 if ((c1 >= '0') && (c1 <= '9')) { 10650 sum = ((c1 - '0') << 4); 10651 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10652 sum = ((c1 - 'a' + 10) << 4); 10653 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10654 sum = ((c1 - 'A' + 10) << 4); 10655 } else { 10656 EMLXS_MSGF(EMLXS_CONTEXT, 10657 &emlxs_attach_debug_msg, 10658 "Config error: Invalid WWPN found. " 10659 "entry=%d byte=%d hi_nibble=%c", 10660 i, j, c1); 10661 10662 errors++; 10663 } 10664 10665 c1 = *s++; 10666 if ((c1 >= '0') && (c1 <= '9')) { 10667 sum |= (c1 - '0'); 10668 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10669 sum |= (c1 - 'a' + 10); 10670 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10671 sum |= (c1 - 'A' + 10); 10672 } else { 10673 EMLXS_MSGF(EMLXS_CONTEXT, 10674 &emlxs_attach_debug_msg, 10675 "Config error: Invalid WWPN found. " 10676 "entry=%d byte=%d lo_nibble=%c", 10677 i, j, c1); 10678 10679 errors++; 10680 } 10681 10682 *np++ = sum; 10683 } 10684 10685 if (*s++ != ':') { 10686 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10687 "Config error: Invalid delimiter after WWPN. " 10688 "entry=%d", i); 10689 10690 goto out; 10691 } 10692 10693 sum = 0; 10694 do { 10695 c1 = *s++; 10696 if ((c1 < '0') || (c1 > '9')) { 10697 EMLXS_MSGF(EMLXS_CONTEXT, 10698 &emlxs_attach_debug_msg, 10699 "Config error: Invalid VPI found. " 10700 "entry=%d c=%c vpi=%d", i, c1, sum); 10701 10702 goto out; 10703 } 10704 10705 sum = (sum * 10) + (c1 - '0'); 10706 10707 } while (*s != 0); 10708 10709 vpi = sum; 10710 10711 if (errors) { 10712 continue; 10713 } 10714 10715 /* Entry has been read */ 10716 10717 /* Check if the physical port wwpn */ 10718 /* matches our physical port wwpn */ 10719 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) { 10720 continue; 10721 } 10722 10723 /* Check vpi range */ 10724 if ((vpi == 0) || (vpi >= MAX_VPORTS)) { 10725 continue; 10726 } 10727 10728 /* Check if port has already been configured */ 10729 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) { 10730 continue; 10731 } 10732 10733 /* Set the highest configured vpi */ 10734 if (vpi > hba->vpi_high) { 10735 hba->vpi_high = vpi; 10736 } 10737 10738 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn, 10739 sizeof (NAME_TYPE)); 10740 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn, 10741 sizeof (NAME_TYPE)); 10742 10743 if (hba->port[vpi].snn[0] == 0) { 10744 (void) strncpy((caddr_t)hba->port[vpi].snn, 10745 (caddr_t)hba->snn, 256); 10746 } 10747 10748 if (hba->port[vpi].spn[0] == 0) { 10749 (void) sprintf((caddr_t)hba->port[vpi].spn, 10750 "%s VPort-%d", 10751 (caddr_t)hba->spn, vpi); 10752 } 10753 10754 hba->port[vpi].flag |= 10755 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 10756 10757 if (cfg[CFG_VPORT_RESTRICTED].current) { 10758 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED; 10759 } 10760 } 10761 10762 out: 10763 10764 (void) ddi_prop_free((void *) arrayp); 10765 return; 10766 10767 } /* emlxs_read_vport_prop() */ 10768 10769 10770 extern char * 10771 emlxs_wwn_xlate(char *buffer, uint8_t *wwn) 10772 { 10773 (void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x", 10774 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff, 10775 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff); 10776 10777 return (buffer); 10778 10779 } /* emlxs_wwn_xlate() */ 10780 10781 10782 /* This is called at port online and offline */ 10783 extern void 10784 emlxs_ub_flush(emlxs_port_t *port) 10785 { 10786 emlxs_hba_t *hba = HBA; 10787 fc_unsol_buf_t *ubp; 10788 emlxs_ub_priv_t *ub_priv; 10789 emlxs_ub_priv_t *next; 10790 10791 /* Return if nothing to do */ 10792 if (!port->ub_wait_head) { 10793 return; 10794 } 10795 10796 mutex_enter(&EMLXS_PORT_LOCK); 10797 ub_priv = port->ub_wait_head; 10798 port->ub_wait_head = NULL; 10799 port->ub_wait_tail = NULL; 10800 mutex_exit(&EMLXS_PORT_LOCK); 10801 10802 while (ub_priv) { 10803 next = ub_priv->next; 10804 ubp = ub_priv->ubp; 10805 10806 /* Check if ULP is online and we have a callback function */ 10807 if ((port->ulp_statec != FC_STATE_OFFLINE) && 10808 port->ulp_unsol_cb) { 10809 /* Send ULP the ub buffer */ 10810 port->ulp_unsol_cb(port->ulp_handle, ubp, 10811 ubp->ub_frame.type); 10812 } else { /* Drop the buffer */ 10813 10814 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10815 } 10816 10817 ub_priv = next; 10818 10819 } /* while () */ 10820 10821 return; 10822 10823 } /* emlxs_ub_flush() */ 10824 10825 10826 extern void 10827 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp) 10828 { 10829 emlxs_hba_t *hba = HBA; 10830 emlxs_ub_priv_t *ub_priv; 10831 10832 ub_priv = ubp->ub_fca_private; 10833 10834 /* Check if ULP is online */ 10835 if (port->ulp_statec != FC_STATE_OFFLINE) { 10836 if (port->ulp_unsol_cb) { 10837 port->ulp_unsol_cb(port->ulp_handle, ubp, 10838 ubp->ub_frame.type); 10839 } else { 10840 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10841 } 10842 10843 return; 10844 } else { /* ULP offline */ 10845 10846 if (hba->state >= FC_LINK_UP) { 10847 /* Add buffer to queue tail */ 10848 mutex_enter(&EMLXS_PORT_LOCK); 10849 10850 if (port->ub_wait_tail) { 10851 port->ub_wait_tail->next = ub_priv; 10852 } 10853 port->ub_wait_tail = ub_priv; 10854 10855 if (!port->ub_wait_head) { 10856 port->ub_wait_head = ub_priv; 10857 } 10858 10859 mutex_exit(&EMLXS_PORT_LOCK); 10860 } else { 10861 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10862 } 10863 } 10864 10865 return; 10866 10867 } /* emlxs_ub_callback() */ 10868 10869 10870 static uint32_t 10871 emlxs_integrity_check(emlxs_hba_t *hba) 10872 { 10873 uint32_t size; 10874 uint32_t errors = 0; 10875 int ddiinst = hba->ddiinst; 10876 10877 size = 16; 10878 if (sizeof (ULP_BDL) != size) { 10879 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16", 10880 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL)); 10881 10882 errors++; 10883 } 10884 size = 8; 10885 if (sizeof (ULP_BDE) != size) { 10886 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8", 10887 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE)); 10888 10889 errors++; 10890 } 10891 size = 12; 10892 if (sizeof (ULP_BDE64) != size) { 10893 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12", 10894 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64)); 10895 10896 errors++; 10897 } 10898 size = 16; 10899 if (sizeof (HBQE_t) != size) { 10900 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16", 10901 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t)); 10902 10903 errors++; 10904 } 10905 size = 8; 10906 if (sizeof (HGP) != size) { 10907 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8", 10908 DRIVER_NAME, ddiinst, (int)sizeof (HGP)); 10909 10910 errors++; 10911 } 10912 if (sizeof (PGP) != size) { 10913 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8", 10914 DRIVER_NAME, ddiinst, (int)sizeof (PGP)); 10915 10916 errors++; 10917 } 10918 size = 4; 10919 if (sizeof (WORD5) != size) { 10920 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4", 10921 DRIVER_NAME, ddiinst, (int)sizeof (WORD5)); 10922 10923 errors++; 10924 } 10925 size = 124; 10926 if (sizeof (MAILVARIANTS) != size) { 10927 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. " 10928 "%d != 124", DRIVER_NAME, ddiinst, 10929 (int)sizeof (MAILVARIANTS)); 10930 10931 errors++; 10932 } 10933 size = 128; 10934 if (sizeof (SLI1_DESC) != size) { 10935 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128", 10936 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC)); 10937 10938 errors++; 10939 } 10940 if (sizeof (SLI2_DESC) != size) { 10941 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128", 10942 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC)); 10943 10944 errors++; 10945 } 10946 size = MBOX_SIZE; 10947 if (sizeof (MAILBOX) != size) { 10948 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d", 10949 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE); 10950 10951 errors++; 10952 } 10953 size = PCB_SIZE; 10954 if (sizeof (PCB) != size) { 10955 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d", 10956 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE); 10957 10958 errors++; 10959 } 10960 size = 260; 10961 if (sizeof (ATTRIBUTE_ENTRY) != size) { 10962 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. " 10963 "%d != 260", DRIVER_NAME, ddiinst, 10964 (int)sizeof (ATTRIBUTE_ENTRY)); 10965 10966 errors++; 10967 } 10968 size = SLI_SLIM1_SIZE; 10969 if (sizeof (SLIM1) != size) { 10970 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d", 10971 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE); 10972 10973 errors++; 10974 } 10975 size = SLI3_IOCB_CMD_SIZE; 10976 if (sizeof (IOCB) != size) { 10977 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 10978 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 10979 SLI3_IOCB_CMD_SIZE); 10980 10981 errors++; 10982 } 10983 10984 size = SLI_SLIM2_SIZE; 10985 if (sizeof (SLIM2) != size) { 10986 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d", 10987 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2), 10988 SLI_SLIM2_SIZE); 10989 10990 errors++; 10991 } 10992 return (errors); 10993 10994 } /* emlxs_integrity_check() */ 10995 10996 10997 #ifdef FMA_SUPPORT 10998 /* 10999 * FMA support 11000 */ 11001 11002 extern void 11003 emlxs_fm_init(emlxs_hba_t *hba) 11004 { 11005 ddi_iblock_cookie_t iblk; 11006 11007 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 11008 return; 11009 } 11010 11011 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11012 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 11013 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 11014 } else { 11015 emlxs_dev_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 11016 emlxs_data_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 11017 } 11018 11019 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) { 11020 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 11021 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR; 11022 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR; 11023 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR; 11024 } else { 11025 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11026 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11027 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11028 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11029 } 11030 11031 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk); 11032 11033 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 11034 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11035 pci_ereport_setup(hba->dip); 11036 } 11037 11038 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11039 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb, 11040 (void *)hba); 11041 } 11042 11043 } /* emlxs_fm_init() */ 11044 11045 11046 extern void 11047 emlxs_fm_fini(emlxs_hba_t *hba) 11048 { 11049 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 11050 return; 11051 } 11052 11053 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 11054 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11055 pci_ereport_teardown(hba->dip); 11056 } 11057 11058 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11059 ddi_fm_handler_unregister(hba->dip); 11060 } 11061 11062 (void) ddi_fm_fini(hba->dip); 11063 11064 } /* emlxs_fm_fini() */ 11065 11066 11067 extern int 11068 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle) 11069 { 11070 ddi_fm_error_t err; 11071 11072 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11073 return (DDI_FM_OK); 11074 } 11075 11076 /* Some S10 versions do not define the ahi_err structure */ 11077 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) { 11078 return (DDI_FM_OK); 11079 } 11080 11081 err.fme_status = DDI_FM_OK; 11082 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION); 11083 11084 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */ 11085 if ((void *)&ddi_fm_acc_err_clear != NULL) { 11086 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 11087 } 11088 11089 return (err.fme_status); 11090 11091 } /* emlxs_fm_check_acc_handle() */ 11092 11093 11094 extern int 11095 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle) 11096 { 11097 ddi_fm_error_t err; 11098 11099 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11100 return (DDI_FM_OK); 11101 } 11102 11103 err.fme_status = DDI_FM_OK; 11104 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION); 11105 11106 return (err.fme_status); 11107 11108 } /* emlxs_fm_check_dma_handle() */ 11109 11110 11111 extern void 11112 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail) 11113 { 11114 uint64_t ena; 11115 char buf[FM_MAX_CLASS]; 11116 11117 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 11118 return; 11119 } 11120 11121 if (detail == NULL) { 11122 return; 11123 } 11124 11125 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 11126 ena = fm_ena_generate(0, FM_ENA_FMT1); 11127 11128 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP, 11129 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 11130 11131 } /* emlxs_fm_ereport() */ 11132 11133 11134 extern void 11135 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact) 11136 { 11137 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 11138 return; 11139 } 11140 11141 if (impact == NULL) { 11142 return; 11143 } 11144 11145 if ((hba->pm_state & EMLXS_PM_IN_DETACH) && 11146 (impact == DDI_SERVICE_DEGRADED)) { 11147 impact = DDI_SERVICE_UNAFFECTED; 11148 } 11149 11150 ddi_fm_service_impact(hba->dip, impact); 11151 11152 } /* emlxs_fm_service_impact() */ 11153 11154 11155 /* 11156 * The I/O fault service error handling callback function 11157 */ 11158 /*ARGSUSED*/ 11159 extern int 11160 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 11161 const void *impl_data) 11162 { 11163 /* 11164 * as the driver can always deal with an error 11165 * in any dma or access handle, we can just return 11166 * the fme_status value. 11167 */ 11168 pci_ereport_post(dip, err, NULL); 11169 return (err->fme_status); 11170 11171 } /* emlxs_fm_error_cb() */ 11172 #endif /* FMA_SUPPORT */ 11173 11174 11175 extern void 11176 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size) 11177 { 11178 uint32_t word; 11179 uint32_t *wptr; 11180 uint32_t i; 11181 11182 wptr = (uint32_t *)buffer; 11183 11184 size += (size%4)? (4-(size%4)):0; 11185 for (i = 0; i < size / 4; i++) { 11186 word = *wptr; 11187 *wptr++ = SWAP32(word); 11188 } 11189 11190 return; 11191 11192 } /* emlxs_swap32_buffer() */ 11193 11194 11195 extern void 11196 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size) 11197 { 11198 uint32_t word; 11199 uint32_t *sptr; 11200 uint32_t *dptr; 11201 uint32_t i; 11202 11203 sptr = (uint32_t *)src; 11204 dptr = (uint32_t *)dst; 11205 11206 size += (size%4)? (4-(size%4)):0; 11207 for (i = 0; i < size / 4; i++) { 11208 word = *sptr++; 11209 *dptr++ = SWAP32(word); 11210 } 11211 11212 return; 11213 11214 } /* emlxs_swap32_buffer() */ 11215