1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Emulex. All rights reserved. 24 * Use is subject to License terms. 25 */ 26 27 #define DEF_ICFG 1 28 29 #include <emlxs.h> 30 #include <emlxs_version.h> 31 32 char emlxs_revision[] = EMLXS_REVISION; 33 char emlxs_version[] = EMLXS_VERSION; 34 char emlxs_name[] = EMLXS_NAME; 35 char emlxs_label[] = EMLXS_LABEL; 36 37 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 38 EMLXS_MSG_DEF(EMLXS_SOLARIS_C); 39 40 #ifdef MENLO_SUPPORT 41 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp); 42 #endif /* MENLO_SUPPORT */ 43 44 static void emlxs_fca_attach(emlxs_hba_t *hba); 45 static void emlxs_fca_detach(emlxs_hba_t *hba); 46 static void emlxs_drv_banner(emlxs_hba_t *hba); 47 48 static int32_t emlxs_get_props(emlxs_hba_t *hba); 49 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp); 50 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp); 51 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp); 52 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp); 53 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp); 54 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 55 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp); 56 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 57 static uint32_t emlxs_add_instance(int32_t ddiinst); 58 static void emlxs_iodone(emlxs_buf_t *sbp); 59 static int emlxs_pm_lower_power(dev_info_t *dip); 60 static int emlxs_pm_raise_power(dev_info_t *dip); 61 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, 62 uint32_t failed); 63 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3); 64 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba); 65 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code, 66 uint32_t args, uint32_t *arg); 67 68 #ifdef SLI3_SUPPORT 69 static void emlxs_read_vport_prop(emlxs_hba_t *hba); 70 #endif /* SLI3_SUPPORT */ 71 72 73 /* 74 * Driver Entry Routines. 75 */ 76 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t); 77 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t); 78 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *); 79 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *); 80 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t, 81 cred_t *, int32_t *); 82 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 83 84 85 /* 86 * FC_AL Transport Functions. 87 */ 88 static opaque_t emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *, 89 fc_fca_bind_info_t *); 90 static void emlxs_unbind_port(opaque_t); 91 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *); 92 static int32_t emlxs_get_cap(opaque_t, char *, void *); 93 static int32_t emlxs_set_cap(opaque_t, char *, void *); 94 static int32_t emlxs_get_map(opaque_t, fc_lilpmap_t *); 95 static int32_t emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t, 96 uint32_t *, uint32_t); 97 static int32_t emlxs_ub_free(opaque_t, uint32_t, uint64_t *); 98 99 static opaque_t emlxs_get_device(opaque_t, fc_portid_t); 100 static int32_t emlxs_notify(opaque_t, uint32_t); 101 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *); 102 103 /* 104 * Driver Internal Functions. 105 */ 106 107 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *); 108 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t); 109 #ifdef EMLXS_I386 110 #ifdef S11 111 static int32_t emlxs_quiesce(dev_info_t *); 112 #endif 113 #endif 114 static int32_t emlxs_hba_resume(dev_info_t *); 115 static int32_t emlxs_hba_suspend(dev_info_t *); 116 static int32_t emlxs_hba_detach(dev_info_t *); 117 static int32_t emlxs_hba_attach(dev_info_t *); 118 static void emlxs_lock_destroy(emlxs_hba_t *); 119 static void emlxs_lock_init(emlxs_hba_t *); 120 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *, 121 uint32_t, uint8_t); 122 123 char *emlxs_pm_components[] = { 124 "NAME=emlxx000", 125 "0=Device D3 State", 126 "1=Device D0 State" 127 }; 128 129 130 /* 131 * Default emlx dma limits 132 */ 133 ddi_dma_lim_t emlxs_dma_lim = { 134 (uint32_t)0, /* dlim_addr_lo */ 135 (uint32_t)0xffffffff, /* dlim_addr_hi */ 136 (uint_t)0x00ffffff, /* dlim_cntr_max */ 137 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */ 138 1, /* dlim_minxfer */ 139 0x00ffffff /* dlim_dmaspeed */ 140 }; 141 142 /* 143 * Be careful when using these attributes; the defaults listed below are 144 * (almost) the most general case, permitting allocation in almost any 145 * way supported by the LightPulse family. The sole exception is the 146 * alignment specified as requiring memory allocation on a 4-byte boundary; 147 * the Lightpulse can DMA memory on any byte boundary. 148 * 149 * The LightPulse family currently is limited to 16M transfers; 150 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields. 151 */ 152 ddi_dma_attr_t emlxs_dma_attr = { 153 DMA_ATTR_V0, /* dma_attr_version */ 154 (uint64_t)0, /* dma_attr_addr_lo */ 155 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 156 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 157 1, /* dma_attr_align */ 158 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 159 1, /* dma_attr_minxfer */ 160 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 161 (uint64_t)0xffffffff, /* dma_attr_seg */ 162 EMLXS_SGLLEN, /* dma_attr_sgllen */ 163 1, /* dma_attr_granular */ 164 0 /* dma_attr_flags */ 165 }; 166 167 ddi_dma_attr_t emlxs_dma_attr_ro = { 168 DMA_ATTR_V0, /* dma_attr_version */ 169 (uint64_t)0, /* dma_attr_addr_lo */ 170 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 171 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 172 1, /* dma_attr_align */ 173 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 174 1, /* dma_attr_minxfer */ 175 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 176 (uint64_t)0xffffffff, /* dma_attr_seg */ 177 EMLXS_SGLLEN, /* dma_attr_sgllen */ 178 1, /* dma_attr_granular */ 179 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */ 180 }; 181 182 ddi_dma_attr_t emlxs_dma_attr_1sg = { 183 DMA_ATTR_V0, /* dma_attr_version */ 184 (uint64_t)0, /* dma_attr_addr_lo */ 185 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 186 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 187 1, /* dma_attr_align */ 188 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 189 1, /* dma_attr_minxfer */ 190 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 191 (uint64_t)0xffffffff, /* dma_attr_seg */ 192 1, /* dma_attr_sgllen */ 193 1, /* dma_attr_granular */ 194 0 /* dma_attr_flags */ 195 }; 196 197 #if (EMLXS_MODREV >= EMLXS_MODREV3) 198 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = { 199 DMA_ATTR_V0, /* dma_attr_version */ 200 (uint64_t)0, /* dma_attr_addr_lo */ 201 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 202 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 203 1, /* dma_attr_align */ 204 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 205 1, /* dma_attr_minxfer */ 206 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 207 (uint64_t)0xffffffff, /* dma_attr_seg */ 208 EMLXS_SGLLEN, /* dma_attr_sgllen */ 209 1, /* dma_attr_granular */ 210 0 /* dma_attr_flags */ 211 }; 212 #endif /* >= EMLXS_MODREV3 */ 213 214 /* 215 * DDI access attributes for device 216 */ 217 ddi_device_acc_attr_t emlxs_dev_acc_attr = { 218 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 219 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */ 220 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 221 DDI_DEFAULT_ACC /* devacc_attr_access */ 222 }; 223 224 /* 225 * DDI access attributes for data 226 */ 227 ddi_device_acc_attr_t emlxs_data_acc_attr = { 228 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 229 DDI_NEVERSWAP_ACC, /* don't swap for Data */ 230 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 231 DDI_DEFAULT_ACC /* devacc_attr_access */ 232 }; 233 234 /* 235 * Fill in the FC Transport structure, 236 * as defined in the Fibre Channel Transport Programmming Guide. 237 */ 238 #if (EMLXS_MODREV == EMLXS_MODREV5) 239 static fc_fca_tran_t emlxs_fca_tran = { 240 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */ 241 MAX_VPORTS, /* fca numerb of ports */ 242 sizeof (emlxs_buf_t), /* fca pkt size */ 243 2048, /* fca cmd max */ 244 &emlxs_dma_lim, /* fca dma limits */ 245 0, /* fca iblock, to be filled in later */ 246 &emlxs_dma_attr, /* fca dma attributes */ 247 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 248 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 249 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 250 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 251 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 252 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 253 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 254 &emlxs_data_acc_attr, /* fca access atributes */ 255 0, /* fca_num_npivports */ 256 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */ 257 emlxs_bind_port, 258 emlxs_unbind_port, 259 emlxs_pkt_init, 260 emlxs_pkt_uninit, 261 emlxs_transport, 262 emlxs_get_cap, 263 emlxs_set_cap, 264 emlxs_get_map, 265 emlxs_transport, 266 emlxs_ub_alloc, 267 emlxs_ub_free, 268 emlxs_ub_release, 269 emlxs_pkt_abort, 270 emlxs_reset, 271 emlxs_port_manage, 272 emlxs_get_device, 273 emlxs_notify 274 }; 275 #endif /* EMLXS_MODREV5 */ 276 277 278 #if (EMLXS_MODREV == EMLXS_MODREV4) 279 static fc_fca_tran_t emlxs_fca_tran = { 280 FCTL_FCA_MODREV_4, /* fca_version */ 281 MAX_VPORTS, /* fca numerb of ports */ 282 sizeof (emlxs_buf_t), /* fca pkt size */ 283 2048, /* fca cmd max */ 284 &emlxs_dma_lim, /* fca dma limits */ 285 0, /* fca iblock, to be filled in later */ 286 &emlxs_dma_attr, /* fca dma attributes */ 287 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 288 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 289 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 290 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 291 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 292 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 293 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 294 &emlxs_data_acc_attr, /* fca access atributes */ 295 emlxs_bind_port, 296 emlxs_unbind_port, 297 emlxs_pkt_init, 298 emlxs_pkt_uninit, 299 emlxs_transport, 300 emlxs_get_cap, 301 emlxs_set_cap, 302 emlxs_get_map, 303 emlxs_transport, 304 emlxs_ub_alloc, 305 emlxs_ub_free, 306 emlxs_ub_release, 307 emlxs_pkt_abort, 308 emlxs_reset, 309 emlxs_port_manage, 310 emlxs_get_device, 311 emlxs_notify 312 }; 313 #endif /* EMLXS_MODEREV4 */ 314 315 316 #if (EMLXS_MODREV == EMLXS_MODREV3) 317 static fc_fca_tran_t emlxs_fca_tran = { 318 FCTL_FCA_MODREV_3, /* fca_version */ 319 MAX_VPORTS, /* fca numerb of ports */ 320 sizeof (emlxs_buf_t), /* fca pkt size */ 321 2048, /* fca cmd max */ 322 &emlxs_dma_lim, /* fca dma limits */ 323 0, /* fca iblock, to be filled in later */ 324 &emlxs_dma_attr, /* fca dma attributes */ 325 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 326 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 327 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 328 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 329 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 330 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 331 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 332 &emlxs_data_acc_attr, /* fca access atributes */ 333 emlxs_bind_port, 334 emlxs_unbind_port, 335 emlxs_pkt_init, 336 emlxs_pkt_uninit, 337 emlxs_transport, 338 emlxs_get_cap, 339 emlxs_set_cap, 340 emlxs_get_map, 341 emlxs_transport, 342 emlxs_ub_alloc, 343 emlxs_ub_free, 344 emlxs_ub_release, 345 emlxs_pkt_abort, 346 emlxs_reset, 347 emlxs_port_manage, 348 emlxs_get_device, 349 emlxs_notify 350 }; 351 #endif /* EMLXS_MODREV3 */ 352 353 354 #if (EMLXS_MODREV == EMLXS_MODREV2) 355 static fc_fca_tran_t emlxs_fca_tran = { 356 FCTL_FCA_MODREV_2, /* fca_version */ 357 MAX_VPORTS, /* number of ports */ 358 sizeof (emlxs_buf_t), /* pkt size */ 359 2048, /* max cmds */ 360 &emlxs_dma_lim, /* DMA limits */ 361 0, /* iblock, to be filled in later */ 362 &emlxs_dma_attr, /* dma attributes */ 363 &emlxs_data_acc_attr, /* access atributes */ 364 emlxs_bind_port, 365 emlxs_unbind_port, 366 emlxs_pkt_init, 367 emlxs_pkt_uninit, 368 emlxs_transport, 369 emlxs_get_cap, 370 emlxs_set_cap, 371 emlxs_get_map, 372 emlxs_transport, 373 emlxs_ub_alloc, 374 emlxs_ub_free, 375 emlxs_ub_release, 376 emlxs_pkt_abort, 377 emlxs_reset, 378 emlxs_port_manage, 379 emlxs_get_device, 380 emlxs_notify 381 }; 382 #endif /* EMLXS_MODREV2 */ 383 384 /* 385 * This is needed when the module gets loaded by the kernel 386 * so ddi library calls get resolved. 387 */ 388 #ifndef MODSYM_SUPPORT 389 char _depends_on[] = "misc/fctl"; 390 #endif /* MODSYM_SUPPORT */ 391 392 /* 393 * state pointer which the implementation uses as a place to 394 * hang a set of per-driver structures; 395 * 396 */ 397 void *emlxs_soft_state = NULL; 398 399 /* 400 * Driver Global variables. 401 */ 402 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */ 403 404 emlxs_device_t emlxs_device; 405 406 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */ 407 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */ 408 409 410 /* 411 * Single private "global" lock used to gain access to 412 * the hba_list and/or any other case where we want need to be 413 * single-threaded. 414 */ 415 uint32_t emlxs_diag_state; 416 417 /* 418 * CB ops vector. Used for administration only. 419 */ 420 static struct cb_ops emlxs_cb_ops = { 421 emlxs_open, /* cb_open */ 422 emlxs_close, /* cb_close */ 423 nodev, /* cb_strategy */ 424 nodev, /* cb_print */ 425 nodev, /* cb_dump */ 426 nodev, /* cb_read */ 427 nodev, /* cb_write */ 428 emlxs_ioctl, /* cb_ioctl */ 429 nodev, /* cb_devmap */ 430 nodev, /* cb_mmap */ 431 nodev, /* cb_segmap */ 432 nochpoll, /* cb_chpoll */ 433 ddi_prop_op, /* cb_prop_op */ 434 0, /* cb_stream */ 435 #ifdef _LP64 436 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 437 #else 438 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 439 #endif 440 CB_REV, /* rev */ 441 nodev, /* cb_aread */ 442 nodev /* cb_awrite */ 443 }; 444 445 /* Generic bus ops */ 446 static struct bus_ops emlxs_bus_ops = { 447 BUSO_REV, 448 nullbusmap, /* bus_map */ 449 NULL, /* bus_get_intrspec */ 450 NULL, /* bus_add_intrspec */ 451 NULL, /* bus_remove_intrspec */ 452 i_ddi_map_fault, /* bus_map_fault */ 453 ddi_dma_map, /* bus_dma_map */ 454 ddi_dma_allochdl, /* bus_dma_allochdl */ 455 ddi_dma_freehdl, /* bus_dma_freehdl */ 456 ddi_dma_bindhdl, /* bus_dma_bindhdl */ 457 ddi_dma_unbindhdl, /* bus_unbindhdl */ 458 ddi_dma_flush, /* bus_dma_flush */ 459 ddi_dma_win, /* bus_dma_win */ 460 ddi_dma_mctl, /* bus_dma_ctl */ 461 ddi_ctlops, /* bus_ctl */ 462 ddi_bus_prop_op, /* bus_prop_op */ 463 }; 464 465 static struct dev_ops emlxs_ops = { 466 DEVO_REV, /* rev */ 467 0, /* refcnt */ 468 emlxs_info, /* getinfo */ 469 nulldev, /* identify */ 470 nulldev, /* probe */ 471 emlxs_attach, /* attach */ 472 emlxs_detach, /* detach */ 473 nodev, /* reset */ 474 &emlxs_cb_ops, /* devo_cb_ops */ 475 &emlxs_bus_ops, /* bus ops - Gets replaced by */ 476 /* fctl_fca_busops in fc_fca_init */ 477 emlxs_power, /* power ops */ 478 #ifdef EMLXS_I386 479 #ifdef S11 480 emlxs_quiesce, /* quiesce */ 481 #endif 482 #endif 483 }; 484 485 #include <sys/modctl.h> 486 extern struct mod_ops mod_driverops; 487 488 #ifdef SAN_DIAG_SUPPORT 489 extern kmutex_t sd_bucket_mutex; 490 extern sd_bucket_info_t sd_bucket; 491 #endif /* SAN_DIAG_SUPPORT */ 492 493 /* 494 * Module linkage information for the kernel. 495 */ 496 static struct modldrv emlxs_modldrv = { 497 &mod_driverops, /* module type - driver */ 498 emlxs_name, /* module name */ 499 &emlxs_ops, /* driver ops */ 500 }; 501 502 503 /* 504 * Driver module linkage structure 505 */ 506 static struct modlinkage emlxs_modlinkage = { 507 MODREV_1, /* ml_rev - must be MODREV_1 */ 508 &emlxs_modldrv, /* ml_linkage */ 509 NULL /* end of driver linkage */ 510 }; 511 512 513 /* We only need to add entries for non-default return codes. */ 514 /* Entries do not need to be in order. */ 515 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 516 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */ 517 518 emlxs_xlat_err_t emlxs_iostat_tbl[] = { 519 /* {f/w code, pkt_state, pkt_reason, */ 520 /* pkt_expln, pkt_action} */ 521 522 /* 0x00 - Do not remove */ 523 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE, 524 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 525 526 /* 0x01 - Do not remove */ 527 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE, 528 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 529 530 /* 0x02 */ 531 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS, 532 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE}, 533 534 /* 535 * This is a default entry. 536 * The real codes are written dynamically in emlxs_els.c 537 */ 538 /* 0x09 */ 539 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE, 540 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 541 542 /* Special error code */ 543 /* 0x10 */ 544 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 545 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 546 547 /* Special error code */ 548 /* 0x11 */ 549 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, 550 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 551 552 /* CLASS 2 only */ 553 /* 0x04 */ 554 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR, 555 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 556 557 /* CLASS 2 only */ 558 /* 0x05 */ 559 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR, 560 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 561 562 /* CLASS 2 only */ 563 /* 0x06 */ 564 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY, 565 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 566 567 /* CLASS 2 only */ 568 /* 0x07 */ 569 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY, 570 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 571 }; 572 573 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t)) 574 575 576 /* We only need to add entries for non-default return codes. */ 577 /* Entries do not need to be in order. */ 578 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 579 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */ 580 581 emlxs_xlat_err_t emlxs_ioerr_tbl[] = { 582 /* {f/w code, pkt_state, pkt_reason, */ 583 /* pkt_expln, pkt_action} */ 584 585 /* 0x01 */ 586 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 587 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 588 589 /* 0x02 */ 590 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 591 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 592 593 /* 0x04 */ 594 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 595 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 596 597 /* 0x05 */ 598 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 599 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 600 601 /* 0x06 */ 602 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 603 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 604 605 /* 0x07 */ 606 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 607 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 608 609 /* 0x08 */ 610 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 611 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 612 613 /* 0x0B */ 614 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 615 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 616 617 /* 0x0D */ 618 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 619 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 620 621 /* 0x0E */ 622 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 623 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 624 625 /* 0x0F */ 626 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME, 627 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 628 629 /* 0x11 */ 630 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 631 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 632 633 /* 0x13 */ 634 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH, 635 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 636 637 /* 0x14 */ 638 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED, 639 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 640 641 /* 0x15 */ 642 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 643 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 644 645 /* 0x16 */ 646 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 647 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 648 649 /* 0x17 */ 650 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT, 651 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 652 653 /* 0x18 */ 654 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL, 655 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 656 657 /* 0x1A */ 658 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 659 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 660 661 /* 0x21 */ 662 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID, 663 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 664 665 /* Occurs at link down */ 666 /* 0x28 */ 667 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 668 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 669 670 /* 0xF0 */ 671 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 672 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 673 }; 674 675 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t)) 676 677 678 679 emlxs_table_t emlxs_error_table[] = { 680 {IOERR_SUCCESS, "No error."}, 681 {IOERR_MISSING_CONTINUE, "Missing continue."}, 682 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."}, 683 {IOERR_INTERNAL_ERROR, "Internal error."}, 684 {IOERR_INVALID_RPI, "Invalid RPI."}, 685 {IOERR_NO_XRI, "No XRI."}, 686 {IOERR_ILLEGAL_COMMAND, "Illegal command."}, 687 {IOERR_XCHG_DROPPED, "Exchange dropped."}, 688 {IOERR_ILLEGAL_FIELD, "Illegal field."}, 689 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."}, 690 {IOERR_TX_DMA_FAILED, "TX DMA failed."}, 691 {IOERR_RX_DMA_FAILED, "RX DMA failed."}, 692 {IOERR_ILLEGAL_FRAME, "Illegal frame."}, 693 {IOERR_NO_RESOURCES, "No resources."}, 694 {IOERR_ILLEGAL_LENGTH, "Illegal length."}, 695 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."}, 696 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."}, 697 {IOERR_ABORT_REQUESTED, "Abort requested."}, 698 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."}, 699 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."}, 700 {IOERR_RING_RESET, "Ring reset."}, 701 {IOERR_LINK_DOWN, "Link down."}, 702 {IOERR_CORRUPTED_DATA, "Corrupted data."}, 703 {IOERR_CORRUPTED_RPI, "Corrupted RPI."}, 704 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."}, 705 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."}, 706 {IOERR_DUP_FRAME, "Duplicate frame."}, 707 {IOERR_LINK_CONTROL_FRAME, "Link control frame."}, 708 {IOERR_BAD_HOST_ADDRESS, "Bad host address."}, 709 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."}, 710 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."}, 711 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."}, 712 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."}, 713 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."}, 714 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"}, 715 {IOERR_XRIBUF_MISSING, "XRI buffer missing"}, 716 {IOERR_ROFFSET_INVAL, "Relative offset invalid."}, 717 {IOERR_ROFFSET_MISSING, "Relative offset missing."}, 718 {IOERR_INSUF_BUFFER, "Buffer too small."}, 719 {IOERR_MISSING_SI, "ELS frame missing SI"}, 720 {IOERR_MISSING_ES, "Exhausted burst without ES"}, 721 {IOERR_INCOMP_XFER, "Transfer incomplete."}, 722 {IOERR_ABORT_TIMEOUT, "Abort timeout."} 723 724 }; /* emlxs_error_table */ 725 726 727 emlxs_table_t emlxs_state_table[] = { 728 {IOSTAT_SUCCESS, "Success."}, 729 {IOSTAT_FCP_RSP_ERROR, "FCP response error."}, 730 {IOSTAT_REMOTE_STOP, "Remote stop."}, 731 {IOSTAT_LOCAL_REJECT, "Local reject."}, 732 {IOSTAT_NPORT_RJT, "NPort reject."}, 733 {IOSTAT_FABRIC_RJT, "Fabric reject."}, 734 {IOSTAT_NPORT_BSY, "Nport busy."}, 735 {IOSTAT_FABRIC_BSY, "Fabric busy."}, 736 {IOSTAT_INTERMED_RSP, "Intermediate response."}, 737 {IOSTAT_LS_RJT, "LS reject."}, 738 {IOSTAT_CMD_REJECT, "Cmd reject."}, 739 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."}, 740 {IOSTAT_NEED_BUF_ENTRY, "Need buffer entry."}, 741 {IOSTAT_ILLEGAL_FRAME_RCVD, "Illegal frame."}, 742 {IOSTAT_DATA_UNDERRUN, "Data underrun."}, 743 {IOSTAT_DATA_OVERRUN, "Data overrun."}, 744 745 }; /* emlxs_state_table */ 746 747 748 #ifdef MENLO_SUPPORT 749 emlxs_table_t emlxs_menlo_cmd_table[] = { 750 {MENLO_CMD_INITIALIZE, "MENLO_INIT"}, 751 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"}, 752 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"}, 753 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"}, 754 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"}, 755 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"}, 756 757 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"}, 758 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"}, 759 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"}, 760 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"}, 761 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"}, 762 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"}, 763 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"}, 764 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"}, 765 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"}, 766 767 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"}, 768 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"}, 769 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"}, 770 771 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"}, 772 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"}, 773 774 {MENLO_CMD_RESET, "MENLO_RESET"}, 775 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"} 776 777 }; /* emlxs_menlo_cmd_table */ 778 779 emlxs_table_t emlxs_menlo_rsp_table[] = { 780 {MENLO_RSP_SUCCESS, "SUCCESS"}, 781 {MENLO_ERR_FAILED, "FAILED"}, 782 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"}, 783 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"}, 784 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"}, 785 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"}, 786 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"}, 787 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"}, 788 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"}, 789 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"}, 790 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"}, 791 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"}, 792 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"}, 793 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"}, 794 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"}, 795 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"}, 796 {MENLO_ERR_BUSY, "BUSY"}, 797 798 }; /* emlxs_menlo_rsp_table */ 799 800 #endif /* MENLO_SUPPORT */ 801 802 803 emlxs_table_t emlxs_mscmd_table[] = { 804 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 805 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 806 {MS_GTIN, "MS_GTIN"}, 807 {MS_GIEL, "MS_GIEL"}, 808 {MS_GIET, "MS_GIET"}, 809 {MS_GDID, "MS_GDID"}, 810 {MS_GMID, "MS_GMID"}, 811 {MS_GFN, "MS_GFN"}, 812 {MS_GIELN, "MS_GIELN"}, 813 {MS_GMAL, "MS_GMAL"}, 814 {MS_GIEIL, "MS_GIEIL"}, 815 {MS_GPL, "MS_GPL"}, 816 {MS_GPT, "MS_GPT"}, 817 {MS_GPPN, "MS_GPPN"}, 818 {MS_GAPNL, "MS_GAPNL"}, 819 {MS_GPS, "MS_GPS"}, 820 {MS_GPSC, "MS_GPSC"}, 821 {MS_GATIN, "MS_GATIN"}, 822 {MS_GSES, "MS_GSES"}, 823 {MS_GPLNL, "MS_GPLNL"}, 824 {MS_GPLT, "MS_GPLT"}, 825 {MS_GPLML, "MS_GPLML"}, 826 {MS_GPAB, "MS_GPAB"}, 827 {MS_GNPL, "MS_GNPL"}, 828 {MS_GPNL, "MS_GPNL"}, 829 {MS_GPFCP, "MS_GPFCP"}, 830 {MS_GPLI, "MS_GPLI"}, 831 {MS_GNID, "MS_GNID"}, 832 {MS_RIELN, "MS_RIELN"}, 833 {MS_RPL, "MS_RPL"}, 834 {MS_RPLN, "MS_RPLN"}, 835 {MS_RPLT, "MS_RPLT"}, 836 {MS_RPLM, "MS_RPLM"}, 837 {MS_RPAB, "MS_RPAB"}, 838 {MS_RPFCP, "MS_RPFCP"}, 839 {MS_RPLI, "MS_RPLI"}, 840 {MS_DPL, "MS_DPL"}, 841 {MS_DPLN, "MS_DPLN"}, 842 {MS_DPLM, "MS_DPLM"}, 843 {MS_DPLML, "MS_DPLML"}, 844 {MS_DPLI, "MS_DPLI"}, 845 {MS_DPAB, "MS_DPAB"}, 846 {MS_DPALL, "MS_DPALL"} 847 848 }; /* emlxs_mscmd_table */ 849 850 851 emlxs_table_t emlxs_ctcmd_table[] = { 852 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 853 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 854 {SLI_CTNS_GA_NXT, "GA_NXT"}, 855 {SLI_CTNS_GPN_ID, "GPN_ID"}, 856 {SLI_CTNS_GNN_ID, "GNN_ID"}, 857 {SLI_CTNS_GCS_ID, "GCS_ID"}, 858 {SLI_CTNS_GFT_ID, "GFT_ID"}, 859 {SLI_CTNS_GSPN_ID, "GSPN_ID"}, 860 {SLI_CTNS_GPT_ID, "GPT_ID"}, 861 {SLI_CTNS_GID_PN, "GID_PN"}, 862 {SLI_CTNS_GID_NN, "GID_NN"}, 863 {SLI_CTNS_GIP_NN, "GIP_NN"}, 864 {SLI_CTNS_GIPA_NN, "GIPA_NN"}, 865 {SLI_CTNS_GSNN_NN, "GSNN_NN"}, 866 {SLI_CTNS_GNN_IP, "GNN_IP"}, 867 {SLI_CTNS_GIPA_IP, "GIPA_IP"}, 868 {SLI_CTNS_GID_FT, "GID_FT"}, 869 {SLI_CTNS_GID_PT, "GID_PT"}, 870 {SLI_CTNS_RPN_ID, "RPN_ID"}, 871 {SLI_CTNS_RNN_ID, "RNN_ID"}, 872 {SLI_CTNS_RCS_ID, "RCS_ID"}, 873 {SLI_CTNS_RFT_ID, "RFT_ID"}, 874 {SLI_CTNS_RSPN_ID, "RSPN_ID"}, 875 {SLI_CTNS_RPT_ID, "RPT_ID"}, 876 {SLI_CTNS_RIP_NN, "RIP_NN"}, 877 {SLI_CTNS_RIPA_NN, "RIPA_NN"}, 878 {SLI_CTNS_RSNN_NN, "RSNN_NN"}, 879 {SLI_CTNS_DA_ID, "DA_ID"}, 880 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 881 882 }; /* emlxs_ctcmd_table */ 883 884 885 886 emlxs_table_t emlxs_rmcmd_table[] = { 887 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 888 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 889 {CT_OP_GSAT, "RM_GSAT"}, 890 {CT_OP_GHAT, "RM_GHAT"}, 891 {CT_OP_GPAT, "RM_GPAT"}, 892 {CT_OP_GDAT, "RM_GDAT"}, 893 {CT_OP_GPST, "RM_GPST"}, 894 {CT_OP_GDP, "RM_GDP"}, 895 {CT_OP_GDPG, "RM_GDPG"}, 896 {CT_OP_GEPS, "RM_GEPS"}, 897 {CT_OP_GLAT, "RM_GLAT"}, 898 {CT_OP_SSAT, "RM_SSAT"}, 899 {CT_OP_SHAT, "RM_SHAT"}, 900 {CT_OP_SPAT, "RM_SPAT"}, 901 {CT_OP_SDAT, "RM_SDAT"}, 902 {CT_OP_SDP, "RM_SDP"}, 903 {CT_OP_SBBS, "RM_SBBS"}, 904 {CT_OP_RPST, "RM_RPST"}, 905 {CT_OP_VFW, "RM_VFW"}, 906 {CT_OP_DFW, "RM_DFW"}, 907 {CT_OP_RES, "RM_RES"}, 908 {CT_OP_RHD, "RM_RHD"}, 909 {CT_OP_UFW, "RM_UFW"}, 910 {CT_OP_RDP, "RM_RDP"}, 911 {CT_OP_GHDR, "RM_GHDR"}, 912 {CT_OP_CHD, "RM_CHD"}, 913 {CT_OP_SSR, "RM_SSR"}, 914 {CT_OP_RSAT, "RM_RSAT"}, 915 {CT_OP_WSAT, "RM_WSAT"}, 916 {CT_OP_RSAH, "RM_RSAH"}, 917 {CT_OP_WSAH, "RM_WSAH"}, 918 {CT_OP_RACT, "RM_RACT"}, 919 {CT_OP_WACT, "RM_WACT"}, 920 {CT_OP_RKT, "RM_RKT"}, 921 {CT_OP_WKT, "RM_WKT"}, 922 {CT_OP_SSC, "RM_SSC"}, 923 {CT_OP_QHBA, "RM_QHBA"}, 924 {CT_OP_GST, "RM_GST"}, 925 {CT_OP_GFTM, "RM_GFTM"}, 926 {CT_OP_SRL, "RM_SRL"}, 927 {CT_OP_SI, "RM_SI"}, 928 {CT_OP_SRC, "RM_SRC"}, 929 {CT_OP_GPB, "RM_GPB"}, 930 {CT_OP_SPB, "RM_SPB"}, 931 {CT_OP_RPB, "RM_RPB"}, 932 {CT_OP_RAPB, "RM_RAPB"}, 933 {CT_OP_GBC, "RM_GBC"}, 934 {CT_OP_GBS, "RM_GBS"}, 935 {CT_OP_SBS, "RM_SBS"}, 936 {CT_OP_GANI, "RM_GANI"}, 937 {CT_OP_GRV, "RM_GRV"}, 938 {CT_OP_GAPBS, "RM_GAPBS"}, 939 {CT_OP_APBC, "RM_APBC"}, 940 {CT_OP_GDT, "RM_GDT"}, 941 {CT_OP_GDLMI, "RM_GDLMI"}, 942 {CT_OP_GANA, "RM_GANA"}, 943 {CT_OP_GDLV, "RM_GDLV"}, 944 {CT_OP_GWUP, "RM_GWUP"}, 945 {CT_OP_GLM, "RM_GLM"}, 946 {CT_OP_GABS, "RM_GABS"}, 947 {CT_OP_SABS, "RM_SABS"}, 948 {CT_OP_RPR, "RM_RPR"}, 949 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 950 951 }; /* emlxs_rmcmd_table */ 952 953 954 emlxs_table_t emlxs_elscmd_table[] = { 955 {ELS_CMD_ACC, "ACC"}, 956 {ELS_CMD_LS_RJT, "LS_RJT"}, 957 {ELS_CMD_PLOGI, "PLOGI"}, 958 {ELS_CMD_FLOGI, "FLOGI"}, 959 {ELS_CMD_LOGO, "LOGO"}, 960 {ELS_CMD_ABTX, "ABTX"}, 961 {ELS_CMD_RCS, "RCS"}, 962 {ELS_CMD_RES, "RES"}, 963 {ELS_CMD_RSS, "RSS"}, 964 {ELS_CMD_RSI, "RSI"}, 965 {ELS_CMD_ESTS, "ESTS"}, 966 {ELS_CMD_ESTC, "ESTC"}, 967 {ELS_CMD_ADVC, "ADVC"}, 968 {ELS_CMD_RTV, "RTV"}, 969 {ELS_CMD_RLS, "RLS"}, 970 {ELS_CMD_ECHO, "ECHO"}, 971 {ELS_CMD_TEST, "TEST"}, 972 {ELS_CMD_RRQ, "RRQ"}, 973 {ELS_CMD_PRLI, "PRLI"}, 974 {ELS_CMD_PRLO, "PRLO"}, 975 {ELS_CMD_SCN, "SCN"}, 976 {ELS_CMD_TPLS, "TPLS"}, 977 {ELS_CMD_GPRLO, "GPRLO"}, 978 {ELS_CMD_GAID, "GAID"}, 979 {ELS_CMD_FACT, "FACT"}, 980 {ELS_CMD_FDACT, "FDACT"}, 981 {ELS_CMD_NACT, "NACT"}, 982 {ELS_CMD_NDACT, "NDACT"}, 983 {ELS_CMD_QoSR, "QoSR"}, 984 {ELS_CMD_RVCS, "RVCS"}, 985 {ELS_CMD_PDISC, "PDISC"}, 986 {ELS_CMD_FDISC, "FDISC"}, 987 {ELS_CMD_ADISC, "ADISC"}, 988 {ELS_CMD_FARP, "FARP"}, 989 {ELS_CMD_FARPR, "FARPR"}, 990 {ELS_CMD_FAN, "FAN"}, 991 {ELS_CMD_RSCN, "RSCN"}, 992 {ELS_CMD_SCR, "SCR"}, 993 {ELS_CMD_LINIT, "LINIT"}, 994 {ELS_CMD_RNID, "RNID"}, 995 {ELS_CMD_AUTH, "AUTH"} 996 997 }; /* emlxs_elscmd_table */ 998 999 1000 /* 1001 * 1002 * Device Driver Entry Routines 1003 * 1004 */ 1005 1006 #ifdef MODSYM_SUPPORT 1007 static void emlxs_fca_modclose(); 1008 static int emlxs_fca_modopen(); 1009 emlxs_modsym_t emlxs_modsym; 1010 1011 static int 1012 emlxs_fca_modopen() 1013 { 1014 int err; 1015 1016 if (emlxs_modsym.mod_fctl) { 1017 return (EEXIST); 1018 } 1019 1020 /* Leadville (fctl) */ 1021 err = 0; 1022 emlxs_modsym.mod_fctl = 1023 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err); 1024 if (!emlxs_modsym.mod_fctl) { 1025 cmn_err(CE_WARN, 1026 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d", 1027 DRIVER_NAME, err); 1028 1029 goto failed; 1030 } 1031 1032 err = 0; 1033 /* Check if the fctl fc_fca_attach is present */ 1034 emlxs_modsym.fc_fca_attach = 1035 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", 1036 &err); 1037 if ((void *)emlxs_modsym.fc_fca_attach == NULL) { 1038 cmn_err(CE_WARN, 1039 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME); 1040 goto failed; 1041 } 1042 1043 err = 0; 1044 /* Check if the fctl fc_fca_detach is present */ 1045 emlxs_modsym.fc_fca_detach = 1046 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", 1047 &err); 1048 if ((void *)emlxs_modsym.fc_fca_detach == NULL) { 1049 cmn_err(CE_WARN, 1050 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME); 1051 goto failed; 1052 } 1053 1054 err = 0; 1055 /* Check if the fctl fc_fca_init is present */ 1056 emlxs_modsym.fc_fca_init = 1057 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err); 1058 if ((void *)emlxs_modsym.fc_fca_init == NULL) { 1059 cmn_err(CE_WARN, 1060 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME); 1061 goto failed; 1062 } 1063 1064 return (0); 1065 1066 failed: 1067 1068 emlxs_fca_modclose(); 1069 1070 return (ENODEV); 1071 1072 1073 } /* emlxs_fca_modopen() */ 1074 1075 1076 static void 1077 emlxs_fca_modclose() 1078 { 1079 if (emlxs_modsym.mod_fctl) { 1080 (void) ddi_modclose(emlxs_modsym.mod_fctl); 1081 emlxs_modsym.mod_fctl = 0; 1082 } 1083 1084 emlxs_modsym.fc_fca_attach = NULL; 1085 emlxs_modsym.fc_fca_detach = NULL; 1086 emlxs_modsym.fc_fca_init = NULL; 1087 1088 return; 1089 1090 } /* emlxs_fca_modclose() */ 1091 1092 #endif /* MODSYM_SUPPORT */ 1093 1094 1095 1096 /* 1097 * Global driver initialization, called once when driver is loaded 1098 */ 1099 int 1100 _init(void) 1101 { 1102 int ret; 1103 char buf[64]; 1104 1105 /* 1106 * First init call for this driver, 1107 * so initialize the emlxs_dev_ctl structure. 1108 */ 1109 bzero(&emlxs_device, sizeof (emlxs_device)); 1110 1111 #ifdef MODSYM_SUPPORT 1112 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t)); 1113 #endif /* MODSYM_SUPPORT */ 1114 1115 (void) sprintf(buf, "%s_device mutex", DRIVER_NAME); 1116 mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL); 1117 1118 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp); 1119 emlxs_device.drv_timestamp = ddi_get_time(); 1120 1121 for (ret = 0; ret < MAX_FC_BRDS; ret++) { 1122 emlxs_instance[ret] = (uint32_t)-1; 1123 } 1124 1125 /* 1126 * Provide for one ddiinst of the emlxs_dev_ctl structure 1127 * for each possible board in the system. 1128 */ 1129 if ((ret = ddi_soft_state_init(&emlxs_soft_state, 1130 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) { 1131 cmn_err(CE_WARN, 1132 "?%s: _init: ddi_soft_state_init failed. rval=%x", 1133 DRIVER_NAME, ret); 1134 1135 return (ret); 1136 } 1137 1138 if ((ret = mod_install(&emlxs_modlinkage)) != 0) { 1139 (void) ddi_soft_state_fini(&emlxs_soft_state); 1140 } 1141 1142 #ifdef SAN_DIAG_SUPPORT 1143 (void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME); 1144 mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL); 1145 #endif /* SAN_DIAG_SUPPORT */ 1146 1147 return (ret); 1148 1149 } /* _init() */ 1150 1151 1152 /* 1153 * Called when driver is unloaded. 1154 */ 1155 int 1156 _fini(void) 1157 { 1158 int ret; 1159 1160 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) { 1161 return (ret); 1162 } 1163 #ifdef MODSYM_SUPPORT 1164 /* Close SFS */ 1165 emlxs_fca_modclose(); 1166 #ifdef SFCT_SUPPORT 1167 /* Close FCT */ 1168 emlxs_fct_modclose(); 1169 #endif /* SFCT_SUPPORT */ 1170 #endif /* MODSYM_SUPPORT */ 1171 1172 /* 1173 * Destroy the soft state structure 1174 */ 1175 (void) ddi_soft_state_fini(&emlxs_soft_state); 1176 1177 /* Destroy the global device lock */ 1178 mutex_destroy(&emlxs_device.lock); 1179 1180 #ifdef SAN_DIAG_SUPPORT 1181 mutex_destroy(&sd_bucket_mutex); 1182 #endif /* SAN_DIAG_SUPPORT */ 1183 1184 return (ret); 1185 1186 } /* _fini() */ 1187 1188 1189 1190 int 1191 _info(struct modinfo *modinfop) 1192 { 1193 1194 return (mod_info(&emlxs_modlinkage, modinfop)); 1195 1196 } /* _info() */ 1197 1198 1199 /* 1200 * Attach an ddiinst of an emlx host adapter. 1201 * Allocate data structures, initialize the adapter and we're ready to fly. 1202 */ 1203 static int 1204 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1205 { 1206 emlxs_hba_t *hba; 1207 int ddiinst; 1208 int emlxinst; 1209 int rval; 1210 1211 switch (cmd) { 1212 case DDI_ATTACH: 1213 /* If successful this will set EMLXS_PM_IN_ATTACH */ 1214 rval = emlxs_hba_attach(dip); 1215 break; 1216 1217 case DDI_PM_RESUME: 1218 /* This will resume the driver */ 1219 rval = emlxs_pm_raise_power(dip); 1220 break; 1221 1222 case DDI_RESUME: 1223 /* This will resume the driver */ 1224 rval = emlxs_hba_resume(dip); 1225 break; 1226 1227 default: 1228 rval = DDI_FAILURE; 1229 } 1230 1231 if (rval == DDI_SUCCESS) { 1232 ddiinst = ddi_get_instance(dip); 1233 emlxinst = emlxs_get_instance(ddiinst); 1234 hba = emlxs_device.hba[emlxinst]; 1235 1236 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) { 1237 1238 /* Enable driver dump feature */ 1239 mutex_enter(&EMLXS_PORT_LOCK); 1240 hba->flag |= FC_DUMP_SAFE; 1241 mutex_exit(&EMLXS_PORT_LOCK); 1242 } 1243 } 1244 1245 return (rval); 1246 1247 } /* emlxs_attach() */ 1248 1249 1250 /* 1251 * Detach/prepare driver to unload (see detach(9E)). 1252 */ 1253 static int 1254 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1255 { 1256 emlxs_hba_t *hba; 1257 emlxs_port_t *port; 1258 int ddiinst; 1259 int emlxinst; 1260 int rval; 1261 1262 ddiinst = ddi_get_instance(dip); 1263 emlxinst = emlxs_get_instance(ddiinst); 1264 hba = emlxs_device.hba[emlxinst]; 1265 1266 if (hba == NULL) { 1267 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME); 1268 1269 return (DDI_FAILURE); 1270 } 1271 1272 if (hba == (emlxs_hba_t *)-1) { 1273 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.", 1274 DRIVER_NAME); 1275 1276 return (DDI_FAILURE); 1277 } 1278 1279 port = &PPORT; 1280 rval = DDI_SUCCESS; 1281 1282 /* Check driver dump */ 1283 mutex_enter(&EMLXS_PORT_LOCK); 1284 1285 if (hba->flag & FC_DUMP_ACTIVE) { 1286 mutex_exit(&EMLXS_PORT_LOCK); 1287 1288 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1289 "emlxs_detach: Driver busy. Driver dump active."); 1290 1291 return (DDI_FAILURE); 1292 } 1293 1294 hba->flag &= ~FC_DUMP_SAFE; 1295 mutex_exit(&EMLXS_PORT_LOCK); 1296 1297 switch (cmd) { 1298 case DDI_DETACH: 1299 1300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1301 "DDI_DETACH"); 1302 1303 rval = emlxs_hba_detach(dip); 1304 1305 if (rval != DDI_SUCCESS) { 1306 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1307 "Unable to detach."); 1308 } 1309 break; 1310 1311 1312 case DDI_PM_SUSPEND: 1313 1314 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1315 "DDI_PM_SUSPEND"); 1316 1317 /* This will suspend the driver */ 1318 rval = emlxs_pm_lower_power(dip); 1319 1320 if (rval != DDI_SUCCESS) { 1321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1322 "Unable to lower power."); 1323 } 1324 1325 break; 1326 1327 1328 case DDI_SUSPEND: 1329 1330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1331 "DDI_SUSPEND"); 1332 1333 /* Suspend the driver */ 1334 rval = emlxs_hba_suspend(dip); 1335 1336 if (rval != DDI_SUCCESS) { 1337 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1338 "Unable to suspend driver."); 1339 } 1340 break; 1341 1342 1343 default: 1344 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x", 1345 DRIVER_NAME, cmd); 1346 rval = DDI_FAILURE; 1347 } 1348 1349 if (rval == DDI_FAILURE) { 1350 /* Re-Enable driver dump feature */ 1351 mutex_enter(&EMLXS_PORT_LOCK); 1352 hba->flag |= FC_DUMP_SAFE; 1353 mutex_exit(&EMLXS_PORT_LOCK); 1354 } 1355 1356 return (rval); 1357 1358 } /* emlxs_detach() */ 1359 1360 1361 /* EMLXS_PORT_LOCK must be held when calling this */ 1362 extern void 1363 emlxs_port_init(emlxs_port_t *port) 1364 { 1365 emlxs_hba_t *hba = HBA; 1366 1367 /* Initialize the base node */ 1368 bzero((caddr_t)&port->node_base, sizeof (NODELIST)); 1369 port->node_base.nlp_Rpi = 0; 1370 port->node_base.nlp_DID = 0xffffff; 1371 port->node_base.nlp_list_next = NULL; 1372 port->node_base.nlp_list_prev = NULL; 1373 port->node_base.nlp_active = 1; 1374 port->node_base.nlp_base = 1; 1375 port->node_count = 0; 1376 1377 if (!(port->flag & EMLXS_PORT_ENABLE)) { 1378 uint8_t dummy_wwn[8] = 1379 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1380 1381 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn, 1382 sizeof (NAME_TYPE)); 1383 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn, 1384 sizeof (NAME_TYPE)); 1385 } 1386 1387 if (!(port->flag & EMLXS_PORT_CONFIG)) { 1388 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256); 1389 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256); 1390 } 1391 1392 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam, 1393 sizeof (SERV_PARM)); 1394 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName, 1395 sizeof (NAME_TYPE)); 1396 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName, 1397 sizeof (NAME_TYPE)); 1398 1399 return; 1400 1401 } /* emlxs_port_init() */ 1402 1403 1404 1405 /* 1406 * emlxs_bind_port 1407 * 1408 * Arguments: 1409 * 1410 * dip: the dev_info pointer for the ddiinst 1411 * port_info: pointer to info handed back to the transport 1412 * bind_info: pointer to info from the transport 1413 * 1414 * Return values: a port handle for this port, NULL for failure 1415 * 1416 */ 1417 static opaque_t 1418 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info, 1419 fc_fca_bind_info_t *bind_info) 1420 { 1421 emlxs_hba_t *hba; 1422 emlxs_port_t *port; 1423 emlxs_port_t *vport; 1424 int ddiinst; 1425 emlxs_vpd_t *vpd; 1426 emlxs_config_t *cfg; 1427 char *dptr; 1428 char buffer[16]; 1429 uint32_t length; 1430 uint32_t len; 1431 char topology[32]; 1432 char linkspeed[32]; 1433 1434 ddiinst = ddi_get_instance(dip); 1435 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 1436 port = &PPORT; 1437 1438 ddiinst = hba->ddiinst; 1439 vpd = &VPD; 1440 cfg = &CFG; 1441 1442 mutex_enter(&EMLXS_PORT_LOCK); 1443 1444 if (bind_info->port_num > 0) { 1445 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1446 if (!(hba->flag & FC_NPIV_ENABLED) || 1447 !(bind_info->port_npiv) || 1448 (bind_info->port_num > hba->vpi_max)) 1449 #elif (EMLXS_MODREV >= EMLXS_MODREV3) 1450 if (!(hba->flag & FC_NPIV_ENABLED) || 1451 (bind_info->port_num > hba->vpi_high)) 1452 #endif 1453 { 1454 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1455 "emlxs_port_bind: Port %d not supported.", 1456 bind_info->port_num); 1457 1458 mutex_exit(&EMLXS_PORT_LOCK); 1459 1460 port_info->pi_error = FC_OUTOFBOUNDS; 1461 return (NULL); 1462 } 1463 } 1464 1465 /* Get true port pointer */ 1466 port = &VPORT(bind_info->port_num); 1467 1468 if (port->tgt_mode) { 1469 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1470 "emlxs_port_bind: Port %d is in target mode.", 1471 bind_info->port_num); 1472 1473 mutex_exit(&EMLXS_PORT_LOCK); 1474 1475 port_info->pi_error = FC_OUTOFBOUNDS; 1476 return (NULL); 1477 } 1478 1479 if (!port->ini_mode) { 1480 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1481 "emlxs_port_bind: Port %d is not in initiator mode.", 1482 bind_info->port_num); 1483 1484 mutex_exit(&EMLXS_PORT_LOCK); 1485 1486 port_info->pi_error = FC_OUTOFBOUNDS; 1487 return (NULL); 1488 } 1489 1490 /* Make sure the port is not already bound to the transport */ 1491 if (port->flag & EMLXS_PORT_BOUND) { 1492 1493 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1494 "emlxs_port_bind: Port %d already bound. flag=%x", 1495 bind_info->port_num, port->flag); 1496 1497 mutex_exit(&EMLXS_PORT_LOCK); 1498 1499 port_info->pi_error = FC_ALREADY; 1500 return (NULL); 1501 } 1502 1503 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1504 "fca_bind_port: Port %d: port_info=%p bind_info=%p", 1505 bind_info->port_num, port_info, bind_info); 1506 1507 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1508 if (bind_info->port_npiv) { 1509 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn, 1510 sizeof (NAME_TYPE)); 1511 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn, 1512 sizeof (NAME_TYPE)); 1513 if (port->snn[0] == 0) { 1514 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 1515 256); 1516 } 1517 1518 if (port->spn[0] == 0) { 1519 (void) sprintf((caddr_t)port->spn, "%s VPort-%d", 1520 (caddr_t)hba->spn, port->vpi); 1521 } 1522 port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 1523 } 1524 #endif /* >= EMLXS_MODREV5 */ 1525 1526 /* 1527 * Restricted login should apply both physical and 1528 * virtual ports. 1529 */ 1530 if (cfg[CFG_VPORT_RESTRICTED].current) { 1531 port->flag |= EMLXS_PORT_RESTRICTED; 1532 } 1533 1534 /* Perform generic port initialization */ 1535 emlxs_port_init(port); 1536 1537 /* Perform SFS specific initialization */ 1538 port->ulp_handle = bind_info->port_handle; 1539 port->ulp_statec_cb = bind_info->port_statec_cb; 1540 port->ulp_unsol_cb = bind_info->port_unsol_cb; 1541 port->ub_count = EMLXS_UB_TOKEN_OFFSET; 1542 port->ub_pool = NULL; 1543 1544 /* Update the port info structure */ 1545 1546 /* Set the topology and state */ 1547 if ((hba->state < FC_LINK_UP) || 1548 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) || 1549 !(hba->flag & FC_NPIV_SUPPORTED)))) { 1550 port_info->pi_port_state = FC_STATE_OFFLINE; 1551 port_info->pi_topology = FC_TOP_UNKNOWN; 1552 } 1553 #ifdef MENLO_SUPPORT 1554 else if (hba->flag & FC_MENLO_MODE) { 1555 port_info->pi_port_state = FC_STATE_OFFLINE; 1556 port_info->pi_topology = FC_TOP_UNKNOWN; 1557 } 1558 #endif /* MENLO_SUPPORT */ 1559 else { 1560 /* Check for loop topology */ 1561 if (hba->topology == TOPOLOGY_LOOP) { 1562 port_info->pi_port_state = FC_STATE_LOOP; 1563 (void) strcpy(topology, ", loop"); 1564 1565 if (hba->flag & FC_FABRIC_ATTACHED) { 1566 port_info->pi_topology = FC_TOP_PUBLIC_LOOP; 1567 } else { 1568 port_info->pi_topology = FC_TOP_PRIVATE_LOOP; 1569 } 1570 } else { 1571 port_info->pi_topology = FC_TOP_FABRIC; 1572 port_info->pi_port_state = FC_STATE_ONLINE; 1573 (void) strcpy(topology, ", fabric"); 1574 } 1575 1576 /* Set the link speed */ 1577 switch (hba->linkspeed) { 1578 case 0: 1579 (void) strcpy(linkspeed, "Gb"); 1580 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1581 break; 1582 1583 case LA_1GHZ_LINK: 1584 (void) strcpy(linkspeed, "1Gb"); 1585 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1586 break; 1587 case LA_2GHZ_LINK: 1588 (void) strcpy(linkspeed, "2Gb"); 1589 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED; 1590 break; 1591 case LA_4GHZ_LINK: 1592 (void) strcpy(linkspeed, "4Gb"); 1593 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED; 1594 break; 1595 case LA_8GHZ_LINK: 1596 (void) strcpy(linkspeed, "8Gb"); 1597 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED; 1598 break; 1599 case LA_10GHZ_LINK: 1600 (void) strcpy(linkspeed, "10Gb"); 1601 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED; 1602 break; 1603 default: 1604 (void) sprintf(linkspeed, "unknown(0x%x)", 1605 hba->linkspeed); 1606 break; 1607 } 1608 1609 /* Adjusting port context for link up messages */ 1610 vport = port; 1611 port = &PPORT; 1612 if (vport->vpi == 0) { 1613 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s", 1614 linkspeed, topology); 1615 } else if (!(hba->flag & FC_NPIV_LINKUP)) { 1616 hba->flag |= FC_NPIV_LINKUP; 1617 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg, 1618 "%s%s", linkspeed, topology); 1619 } 1620 port = vport; 1621 1622 } 1623 1624 /* Save initial state */ 1625 port->ulp_statec = port_info->pi_port_state; 1626 1627 /* 1628 * The transport needs a copy of the common service parameters 1629 * for this port. The transport can get any updates through 1630 * the getcap entry point. 1631 */ 1632 bcopy((void *) &port->sparam, 1633 (void *) &port_info->pi_login_params.common_service, 1634 sizeof (SERV_PARM)); 1635 1636 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 1637 /* Swap the service parameters for ULP */ 1638 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params. 1639 common_service); 1640 #endif /* EMLXS_MODREV2X */ 1641 1642 port_info->pi_login_params.common_service.btob_credit = 0xffff; 1643 1644 bcopy((void *) &port->wwnn, 1645 (void *) &port_info->pi_login_params.node_ww_name, 1646 sizeof (NAME_TYPE)); 1647 1648 bcopy((void *) &port->wwpn, 1649 (void *) &port_info->pi_login_params.nport_ww_name, 1650 sizeof (NAME_TYPE)); 1651 1652 /* 1653 * We need to turn off CLASS2 support. 1654 * Otherwise, FC transport will use CLASS2 as default class 1655 * and never try with CLASS3. 1656 */ 1657 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1658 #if (EMLXS_MODREVX >= EMLXS_MODREV3X) 1659 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) { 1660 port_info->pi_login_params.class_1.class_opt &= ~0x0080; 1661 } 1662 1663 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) { 1664 port_info->pi_login_params.class_2.class_opt &= ~0x0080; 1665 } 1666 #else /* EMLXS_SPARC or EMLXS_MODREV2X */ 1667 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) { 1668 port_info->pi_login_params.class_1.class_opt &= ~0x8000; 1669 } 1670 1671 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) { 1672 port_info->pi_login_params.class_2.class_opt &= ~0x8000; 1673 } 1674 #endif /* >= EMLXS_MODREV3X */ 1675 #endif /* >= EMLXS_MODREV3 */ 1676 1677 1678 #if (EMLXS_MODREV <= EMLXS_MODREV2) 1679 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) { 1680 port_info->pi_login_params.class_1.data[0] &= ~0x80; 1681 } 1682 1683 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) { 1684 port_info->pi_login_params.class_2.data[0] &= ~0x80; 1685 } 1686 #endif /* <= EMLXS_MODREV2 */ 1687 1688 /* Additional parameters */ 1689 port_info->pi_s_id.port_id = port->did; 1690 port_info->pi_s_id.priv_lilp_posit = 0; 1691 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current; 1692 1693 /* Initialize the RNID parameters */ 1694 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params)); 1695 1696 (void) sprintf((char *)port_info->pi_rnid_params.params.global_id, 1697 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType, 1698 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 1699 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 1700 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 1701 1702 port_info->pi_rnid_params.params.unit_type = RNID_HBA; 1703 port_info->pi_rnid_params.params.port_id = port->did; 1704 port_info->pi_rnid_params.params.ip_version = RNID_IPV4; 1705 1706 /* Initialize the port attributes */ 1707 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs)); 1708 1709 (void) strcpy(port_info->pi_attrs.manufacturer, "Emulex"); 1710 1711 port_info->pi_rnid_params.status = FC_SUCCESS; 1712 1713 (void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num); 1714 1715 (void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)", 1716 vpd->fw_version, vpd->fw_label); 1717 1718 #ifdef EMLXS_I386 1719 (void) sprintf(port_info->pi_attrs.option_rom_version, 1720 "Boot:%s", vpd->boot_version); 1721 #else /* EMLXS_SPARC */ 1722 (void) sprintf(port_info->pi_attrs.option_rom_version, 1723 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version); 1724 #endif /* EMLXS_I386 */ 1725 1726 1727 (void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)", 1728 emlxs_version, emlxs_revision); 1729 1730 (void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME); 1731 1732 port_info->pi_attrs.vendor_specific_id = 1733 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX); 1734 1735 port_info->pi_attrs.supported_cos = SWAP_DATA32(FC_NS_CLASS3); 1736 1737 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE; 1738 1739 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1740 1741 port_info->pi_rnid_params.params.num_attached = 0; 1742 1743 /* 1744 * Copy the serial number string (right most 16 chars) into the right 1745 * justified local buffer 1746 */ 1747 bzero(buffer, sizeof (buffer)); 1748 length = strlen(vpd->serial_num); 1749 len = (length > 16) ? 16 : length; 1750 bcopy(&vpd->serial_num[(length - len)], 1751 &buffer[(sizeof (buffer) - len)], len); 1752 1753 port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index; 1754 1755 #endif /* >= EMLXS_MODREV5 */ 1756 1757 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4)) 1758 1759 port_info->pi_rnid_params.params.num_attached = 0; 1760 1761 if (hba->flag & FC_NPIV_ENABLED) { 1762 uint8_t byte; 1763 uint8_t *wwpn; 1764 uint32_t i; 1765 uint32_t j; 1766 1767 /* Copy the WWPN as a string into the local buffer */ 1768 wwpn = (uint8_t *)&hba->wwpn; 1769 for (i = 0; i < 16; i++) { 1770 byte = *wwpn++; 1771 j = ((byte & 0xf0) >> 4); 1772 if (j <= 9) { 1773 buffer[i] = 1774 (char)((uint8_t)'0' + (uint8_t)j); 1775 } else { 1776 buffer[i] = 1777 (char)((uint8_t)'A' + (uint8_t)(j - 1778 10)); 1779 } 1780 1781 i++; 1782 j = (byte & 0xf); 1783 if (j <= 9) { 1784 buffer[i] = 1785 (char)((uint8_t)'0' + (uint8_t)j); 1786 } else { 1787 buffer[i] = 1788 (char)((uint8_t)'A' + (uint8_t)(j - 1789 10)); 1790 } 1791 } 1792 1793 port_info->pi_attrs.hba_fru_details.port_index = port->vpi; 1794 } else { 1795 /* Copy the serial number string (right most 16 chars) */ 1796 /* into the right justified local buffer */ 1797 bzero(buffer, sizeof (buffer)); 1798 length = strlen(vpd->serial_num); 1799 len = (length > 16) ? 16 : length; 1800 bcopy(&vpd->serial_num[(length - len)], 1801 &buffer[(sizeof (buffer) - len)], len); 1802 1803 port_info->pi_attrs.hba_fru_details.port_index = 1804 vpd->port_index; 1805 } 1806 1807 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */ 1808 1809 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1810 1811 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high; 1812 dptr[0] = buffer[0]; 1813 dptr[1] = buffer[1]; 1814 dptr[2] = buffer[2]; 1815 dptr[3] = buffer[3]; 1816 dptr[4] = buffer[4]; 1817 dptr[5] = buffer[5]; 1818 dptr[6] = buffer[6]; 1819 dptr[7] = buffer[7]; 1820 port_info->pi_attrs.hba_fru_details.high = 1821 SWAP_DATA64(port_info->pi_attrs.hba_fru_details.high); 1822 1823 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low; 1824 dptr[0] = buffer[8]; 1825 dptr[1] = buffer[9]; 1826 dptr[2] = buffer[10]; 1827 dptr[3] = buffer[11]; 1828 dptr[4] = buffer[12]; 1829 dptr[5] = buffer[13]; 1830 dptr[6] = buffer[14]; 1831 dptr[7] = buffer[15]; 1832 port_info->pi_attrs.hba_fru_details.low = 1833 SWAP_DATA64(port_info->pi_attrs.hba_fru_details.low); 1834 1835 #endif /* >= EMLXS_MODREV3 */ 1836 1837 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1838 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name, 1839 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN); 1840 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name, 1841 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN); 1842 #endif /* >= EMLXS_MODREV4 */ 1843 1844 (void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev); 1845 1846 /* Set the hba speed limit */ 1847 if (vpd->link_speed & LMT_10GB_CAPABLE) { 1848 port_info->pi_attrs.supported_speed |= 1849 FC_HBA_PORTSPEED_10GBIT; 1850 } 1851 if (vpd->link_speed & LMT_8GB_CAPABLE) { 1852 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT; 1853 } 1854 if (vpd->link_speed & LMT_4GB_CAPABLE) { 1855 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT; 1856 } 1857 if (vpd->link_speed & LMT_2GB_CAPABLE) { 1858 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT; 1859 } 1860 if (vpd->link_speed & LMT_1GB_CAPABLE) { 1861 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT; 1862 } 1863 1864 /* Set the hba model info */ 1865 (void) strcpy(port_info->pi_attrs.model, hba->model_info.model); 1866 (void) strcpy(port_info->pi_attrs.model_description, 1867 hba->model_info.model_desc); 1868 1869 1870 /* Log information */ 1871 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1872 "Bind info: port_num = %d", bind_info->port_num); 1873 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1874 "Bind info: port_handle = %p", bind_info->port_handle); 1875 1876 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1877 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1878 "Bind info: port_npiv = %d", bind_info->port_npiv); 1879 #endif /* >= EMLXS_MODREV5 */ 1880 1881 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1882 "Port info: pi_topology = %x", port_info->pi_topology); 1883 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1884 "Port info: pi_error = %x", port_info->pi_error); 1885 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1886 "Port info: pi_port_state = %x", port_info->pi_port_state); 1887 1888 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1889 "Port info: port_id = %x", port_info->pi_s_id.port_id); 1890 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1891 "Port info: priv_lilp_posit = %x", 1892 port_info->pi_s_id.priv_lilp_posit); 1893 1894 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1895 "Port info: hard_addr = %x", 1896 port_info->pi_hard_addr.hard_addr); 1897 1898 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1899 "Port info: rnid.status = %x", 1900 port_info->pi_rnid_params.status); 1901 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1902 "Port info: rnid.global_id = %16s", 1903 port_info->pi_rnid_params.params.global_id); 1904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1905 "Port info: rnid.unit_type = %x", 1906 port_info->pi_rnid_params.params.unit_type); 1907 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1908 "Port info: rnid.port_id = %x", 1909 port_info->pi_rnid_params.params.port_id); 1910 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1911 "Port info: rnid.num_attached = %x", 1912 port_info->pi_rnid_params.params.num_attached); 1913 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1914 "Port info: rnid.ip_version = %x", 1915 port_info->pi_rnid_params.params.ip_version); 1916 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1917 "Port info: rnid.udp_port = %x", 1918 port_info->pi_rnid_params.params.udp_port); 1919 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1920 "Port info: rnid.ip_addr = %16s", 1921 port_info->pi_rnid_params.params.ip_addr); 1922 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1923 "Port info: rnid.spec_id_resv = %x", 1924 port_info->pi_rnid_params.params.specific_id_resv); 1925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1926 "Port info: rnid.topo_flags = %x", 1927 port_info->pi_rnid_params.params.topo_flags); 1928 1929 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1930 "Port info: manufacturer = %s", 1931 port_info->pi_attrs.manufacturer); 1932 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1933 "Port info: serial_num = %s", 1934 port_info->pi_attrs.serial_number); 1935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1936 "Port info: model = %s", port_info->pi_attrs.model); 1937 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1938 "Port info: model_description = %s", 1939 port_info->pi_attrs.model_description); 1940 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1941 "Port info: hardware_version = %s", 1942 port_info->pi_attrs.hardware_version); 1943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1944 "Port info: driver_version = %s", 1945 port_info->pi_attrs.driver_version); 1946 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1947 "Port info: option_rom_version = %s", 1948 port_info->pi_attrs.option_rom_version); 1949 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1950 "Port info: firmware_version = %s", 1951 port_info->pi_attrs.firmware_version); 1952 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1953 "Port info: driver_name = %s", 1954 port_info->pi_attrs.driver_name); 1955 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1956 "Port info: vendor_specific_id = %x", 1957 port_info->pi_attrs.vendor_specific_id); 1958 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1959 "Port info: supported_cos = %x", 1960 port_info->pi_attrs.supported_cos); 1961 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1962 "Port info: supported_speed = %x", 1963 port_info->pi_attrs.supported_speed); 1964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1965 "Port info: max_frame_size = %x", 1966 port_info->pi_attrs.max_frame_size); 1967 1968 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1969 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1970 "Port info: fru_port_index = %x", 1971 port_info->pi_attrs.hba_fru_details.port_index); 1972 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1973 "Port info: fru_high = %llx", 1974 port_info->pi_attrs.hba_fru_details.high); 1975 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1976 "Port info: fru_low = %llx", 1977 port_info->pi_attrs.hba_fru_details.low); 1978 #endif /* >= EMLXS_MODREV3 */ 1979 1980 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1982 "Port info: sym_node_name = %s", 1983 port_info->pi_attrs.sym_node_name); 1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1985 "Port info: sym_port_name = %s", 1986 port_info->pi_attrs.sym_port_name); 1987 #endif /* >= EMLXS_MODREV4 */ 1988 1989 /* Set the bound flag */ 1990 port->flag |= EMLXS_PORT_BOUND; 1991 hba->num_of_ports++; 1992 1993 mutex_exit(&EMLXS_PORT_LOCK); 1994 1995 return ((opaque_t)port); 1996 1997 } /* emlxs_bind_port() */ 1998 1999 2000 static void 2001 emlxs_unbind_port(opaque_t fca_port_handle) 2002 { 2003 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2004 emlxs_hba_t *hba = HBA; 2005 uint32_t count; 2006 2007 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2008 "fca_unbind_port: port=%p", port); 2009 2010 /* Check ub buffer pools */ 2011 if (port->ub_pool) { 2012 mutex_enter(&EMLXS_UB_LOCK); 2013 2014 /* Wait up to 10 seconds for all ub pools to be freed */ 2015 count = 10 * 2; 2016 while (port->ub_pool && count) { 2017 mutex_exit(&EMLXS_UB_LOCK); 2018 delay(drv_usectohz(500000)); /* half second wait */ 2019 count--; 2020 mutex_enter(&EMLXS_UB_LOCK); 2021 } 2022 2023 if (port->ub_pool) { 2024 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2025 "fca_unbind_port: Unsolicited buffers still " 2026 "active. port=%p. Destroying...", port); 2027 2028 /* Destroy all pools */ 2029 while (port->ub_pool) { 2030 emlxs_ub_destroy(port, port->ub_pool); 2031 } 2032 } 2033 2034 mutex_exit(&EMLXS_UB_LOCK); 2035 } 2036 2037 /* Destroy & flush all port nodes, if they exist */ 2038 if (port->node_count) { 2039 (void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0); 2040 } 2041 #if (EMLXS_MODREV >= EMLXS_MODREV5) 2042 if ((hba->flag & FC_NPIV_ENABLED) && 2043 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) { 2044 (void) emlxs_mb_unreg_vpi(port); 2045 } 2046 #endif 2047 2048 mutex_enter(&EMLXS_PORT_LOCK); 2049 2050 if (!(port->flag & EMLXS_PORT_BOUND)) { 2051 mutex_exit(&EMLXS_PORT_LOCK); 2052 return; 2053 } 2054 2055 port->flag &= ~EMLXS_PORT_BOUND; 2056 hba->num_of_ports--; 2057 2058 port->ulp_handle = 0; 2059 port->ulp_statec = FC_STATE_OFFLINE; 2060 port->ulp_statec_cb = NULL; 2061 port->ulp_unsol_cb = NULL; 2062 2063 mutex_exit(&EMLXS_PORT_LOCK); 2064 2065 return; 2066 2067 } /* emlxs_unbind_port() */ 2068 2069 2070 /*ARGSUSED*/ 2071 extern int 2072 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 2073 { 2074 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2075 emlxs_hba_t *hba = HBA; 2076 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 2077 2078 if (!sbp) { 2079 return (FC_FAILURE); 2080 } 2081 2082 bzero((void *)sbp, sizeof (emlxs_buf_t)); 2083 2084 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg); 2085 sbp->pkt_flags = 2086 PACKET_VALID | PACKET_RETURNED; 2087 sbp->port = port; 2088 sbp->pkt = pkt; 2089 sbp->iocbq.sbp = sbp; 2090 2091 return (FC_SUCCESS); 2092 2093 } /* emlxs_pkt_init() */ 2094 2095 2096 2097 static void 2098 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp) 2099 { 2100 emlxs_hba_t *hba = HBA; 2101 emlxs_config_t *cfg = &CFG; 2102 fc_packet_t *pkt = PRIV2PKT(sbp); 2103 uint32_t *iptr; 2104 2105 mutex_enter(&sbp->mtx); 2106 2107 /* Reinitialize */ 2108 sbp->pkt = pkt; 2109 sbp->port = port; 2110 sbp->bmp = NULL; 2111 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED); 2112 sbp->iotag = 0; 2113 sbp->ticks = 0; 2114 sbp->abort_attempts = 0; 2115 sbp->fpkt = NULL; 2116 sbp->flush_count = 0; 2117 sbp->next = NULL; 2118 2119 if (!port->tgt_mode) { 2120 sbp->node = NULL; 2121 sbp->did = 0; 2122 sbp->lun = 0; 2123 sbp->class = 0; 2124 sbp->ring = NULL; 2125 sbp->class = 0; 2126 } 2127 2128 bzero((void *)&sbp->iocbq, sizeof (IOCBQ)); 2129 sbp->iocbq.sbp = sbp; 2130 2131 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp || 2132 ddi_in_panic()) { 2133 sbp->pkt_flags |= PACKET_POLLED; 2134 } 2135 2136 /* Prepare the fc packet */ 2137 pkt->pkt_state = FC_PKT_SUCCESS; 2138 pkt->pkt_reason = 0; 2139 pkt->pkt_action = 0; 2140 pkt->pkt_expln = 0; 2141 pkt->pkt_data_resid = 0; 2142 pkt->pkt_resp_resid = 0; 2143 2144 /* Make sure all pkt's have a proper timeout */ 2145 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 2146 /* This disables all IOCB on chip timeouts */ 2147 pkt->pkt_timeout = 0x80000000; 2148 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) { 2149 pkt->pkt_timeout = 60; 2150 } 2151 2152 /* Clear the response buffer */ 2153 if (pkt->pkt_rsplen) { 2154 /* Check for FCP commands */ 2155 if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) || 2156 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) { 2157 iptr = (uint32_t *)pkt->pkt_resp; 2158 iptr[2] = 0; 2159 iptr[3] = 0; 2160 } else { 2161 bzero(pkt->pkt_resp, pkt->pkt_rsplen); 2162 } 2163 } 2164 2165 mutex_exit(&sbp->mtx); 2166 2167 return; 2168 2169 } /* emlxs_initialize_pkt() */ 2170 2171 2172 2173 /* 2174 * We may not need this routine 2175 */ 2176 /*ARGSUSED*/ 2177 extern int 2178 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt) 2179 { 2180 emlxs_buf_t *sbp = PKT2PRIV(pkt); 2181 2182 if (!sbp) { 2183 return (FC_FAILURE); 2184 } 2185 2186 if (!(sbp->pkt_flags & PACKET_VALID)) { 2187 return (FC_FAILURE); 2188 } 2189 2190 sbp->pkt_flags &= ~PACKET_VALID; 2191 mutex_destroy(&sbp->mtx); 2192 2193 return (FC_SUCCESS); 2194 2195 } /* emlxs_pkt_uninit() */ 2196 2197 2198 static int 2199 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2200 { 2201 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2202 emlxs_hba_t *hba = HBA; 2203 int32_t rval; 2204 2205 if (!(port->flag & EMLXS_PORT_BOUND)) { 2206 return (FC_CAP_ERROR); 2207 } 2208 2209 if (strcmp(cap, FC_NODE_WWN) == 0) { 2210 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2211 "fca_get_cap: FC_NODE_WWN"); 2212 2213 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE)); 2214 rval = FC_CAP_FOUND; 2215 2216 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2217 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2218 "fca_get_cap: FC_LOGIN_PARAMS"); 2219 2220 /* 2221 * We need to turn off CLASS2 support. 2222 * Otherwise, FC transport will use CLASS2 as default class 2223 * and never try with CLASS3. 2224 */ 2225 hba->sparam.cls2.classValid = 0; 2226 2227 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM)); 2228 2229 rval = FC_CAP_FOUND; 2230 2231 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2232 int32_t *num_bufs; 2233 emlxs_config_t *cfg = &CFG; 2234 2235 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2236 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)", 2237 cfg[CFG_UB_BUFS].current); 2238 2239 num_bufs = (int32_t *)ptr; 2240 2241 /* We multiply by MAX_VPORTS because ULP uses a */ 2242 /* formula to calculate ub bufs from this */ 2243 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS); 2244 2245 rval = FC_CAP_FOUND; 2246 2247 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2248 int32_t *size; 2249 2250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2251 "fca_get_cap: FC_CAP_PAYLOAD_SIZE"); 2252 2253 size = (int32_t *)ptr; 2254 *size = -1; 2255 rval = FC_CAP_FOUND; 2256 2257 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2258 fc_reset_action_t *action; 2259 2260 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2261 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR"); 2262 2263 action = (fc_reset_action_t *)ptr; 2264 *action = FC_RESET_RETURN_ALL; 2265 rval = FC_CAP_FOUND; 2266 2267 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) { 2268 fc_dma_behavior_t *behavior; 2269 2270 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2271 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF"); 2272 2273 behavior = (fc_dma_behavior_t *)ptr; 2274 *behavior = FC_ALLOW_STREAMING; 2275 rval = FC_CAP_FOUND; 2276 2277 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) { 2278 fc_fcp_dma_t *fcp_dma; 2279 2280 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2281 "fca_get_cap: FC_CAP_FCP_DMA"); 2282 2283 fcp_dma = (fc_fcp_dma_t *)ptr; 2284 *fcp_dma = FC_DVMA_SPACE; 2285 rval = FC_CAP_FOUND; 2286 2287 } else { 2288 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2289 "fca_get_cap: Unknown capability. [%s]", cap); 2290 2291 rval = FC_CAP_ERROR; 2292 2293 } 2294 2295 return (rval); 2296 2297 } /* emlxs_get_cap() */ 2298 2299 2300 2301 static int 2302 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2303 { 2304 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2305 2306 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2307 "fca_set_cap: cap=[%s] arg=%p", cap, ptr); 2308 2309 return (FC_CAP_ERROR); 2310 2311 } /* emlxs_set_cap() */ 2312 2313 2314 static opaque_t 2315 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id) 2316 { 2317 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2318 2319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2320 "fca_get_device: did=%x", d_id); 2321 2322 return (NULL); 2323 2324 } /* emlxs_get_device() */ 2325 2326 2327 static int32_t 2328 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd) 2329 { 2330 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2331 2332 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x", 2333 cmd); 2334 2335 return (FC_SUCCESS); 2336 2337 } /* emlxs_notify */ 2338 2339 2340 2341 static int 2342 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf) 2343 { 2344 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2345 emlxs_hba_t *hba = HBA; 2346 uint32_t lilp_length; 2347 2348 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2349 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf, 2350 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2], 2351 port->alpa_map[3], port->alpa_map[4]); 2352 2353 if (!(port->flag & EMLXS_PORT_BOUND)) { 2354 return (FC_NOMAP); 2355 } 2356 2357 if (hba->topology != TOPOLOGY_LOOP) { 2358 return (FC_NOMAP); 2359 } 2360 2361 /* Check if alpa map is available */ 2362 if (port->alpa_map[0] != 0) { 2363 mapbuf->lilp_magic = MAGIC_LILP; 2364 } else { /* No LILP map available */ 2365 2366 /* Set lilp_magic to MAGIC_LISA and this will */ 2367 /* trigger an ALPA scan in ULP */ 2368 mapbuf->lilp_magic = MAGIC_LISA; 2369 } 2370 2371 mapbuf->lilp_myalpa = port->did; 2372 2373 /* The first byte of the alpa_map is the lilp map length */ 2374 /* Add one to include the lilp length byte itself */ 2375 lilp_length = (uint32_t)port->alpa_map[0] + 1; 2376 2377 /* Make sure the max transfer is 128 bytes */ 2378 if (lilp_length > 128) { 2379 lilp_length = 128; 2380 } 2381 2382 /* We start copying from the lilp_length field */ 2383 /* in order to get a word aligned address */ 2384 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length, 2385 lilp_length); 2386 2387 return (FC_SUCCESS); 2388 2389 } /* emlxs_get_map() */ 2390 2391 2392 2393 extern int 2394 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt) 2395 { 2396 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2397 emlxs_hba_t *hba = HBA; 2398 emlxs_buf_t *sbp; 2399 uint32_t rval; 2400 uint32_t pkt_flags; 2401 2402 /* Make sure adapter is online */ 2403 if (!(hba->flag & FC_ONLINE_MODE)) { 2404 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 2405 "Adapter offline."); 2406 2407 return (FC_OFFLINE); 2408 } 2409 2410 /* Validate packet */ 2411 sbp = PKT2PRIV(pkt); 2412 2413 /* Make sure ULP was told that the port was online */ 2414 if ((port->ulp_statec == FC_STATE_OFFLINE) && 2415 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2416 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 2417 "Port offline."); 2418 2419 return (FC_OFFLINE); 2420 } 2421 2422 if (sbp->port != port) { 2423 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2424 "Invalid port handle. sbp=%p port=%p flags=%x", sbp, 2425 sbp->port, sbp->pkt_flags); 2426 return (FC_BADPACKET); 2427 } 2428 2429 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_RETURNED))) { 2430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2431 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp, 2432 sbp->port, sbp->pkt_flags); 2433 return (FC_BADPACKET); 2434 } 2435 #ifdef SFCT_SUPPORT 2436 if (port->tgt_mode && !sbp->fct_cmd && 2437 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2438 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2439 "Packet blocked. Target mode."); 2440 return (FC_TRANSPORT_ERROR); 2441 } 2442 #endif /* SFCT_SUPPORT */ 2443 2444 #ifdef IDLE_TIMER 2445 emlxs_pm_busy_component(hba); 2446 #endif /* IDLE_TIMER */ 2447 2448 /* Prepare the packet for transport */ 2449 emlxs_initialize_pkt(port, sbp); 2450 2451 /* Save a copy of the pkt flags. */ 2452 /* We will check the polling flag later */ 2453 pkt_flags = sbp->pkt_flags; 2454 2455 /* Send the packet */ 2456 switch (pkt->pkt_tran_type) { 2457 case FC_PKT_FCP_READ: 2458 case FC_PKT_FCP_WRITE: 2459 rval = emlxs_send_fcp_cmd(port, sbp); 2460 break; 2461 2462 case FC_PKT_IP_WRITE: 2463 case FC_PKT_BROADCAST: 2464 rval = emlxs_send_ip(port, sbp); 2465 break; 2466 2467 case FC_PKT_EXCHANGE: 2468 switch (pkt->pkt_cmd_fhdr.type) { 2469 case FC_TYPE_SCSI_FCP: 2470 rval = emlxs_send_fcp_cmd(port, sbp); 2471 break; 2472 2473 case FC_TYPE_FC_SERVICES: 2474 rval = emlxs_send_ct(port, sbp); 2475 break; 2476 2477 #ifdef MENLO_SUPPORT 2478 case EMLXS_MENLO_TYPE: 2479 rval = emlxs_send_menlo(port, sbp); 2480 break; 2481 #endif /* MENLO_SUPPORT */ 2482 2483 default: 2484 rval = emlxs_send_els(port, sbp); 2485 } 2486 break; 2487 2488 case FC_PKT_OUTBOUND: 2489 switch (pkt->pkt_cmd_fhdr.type) { 2490 #ifdef SFCT_SUPPORT 2491 case FC_TYPE_SCSI_FCP: 2492 rval = emlxs_send_fct_status(port, sbp); 2493 break; 2494 2495 case FC_TYPE_BASIC_LS: 2496 rval = emlxs_send_fct_abort(port, sbp); 2497 break; 2498 #endif /* SFCT_SUPPORT */ 2499 2500 case FC_TYPE_FC_SERVICES: 2501 rval = emlxs_send_ct_rsp(port, sbp); 2502 break; 2503 #ifdef MENLO_SUPPORT 2504 case EMLXS_MENLO_TYPE: 2505 rval = emlxs_send_menlo(port, sbp); 2506 break; 2507 #endif /* MENLO_SUPPORT */ 2508 2509 default: 2510 rval = emlxs_send_els_rsp(port, sbp); 2511 } 2512 break; 2513 2514 default: 2515 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2516 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type); 2517 rval = FC_TRANSPORT_ERROR; 2518 break; 2519 } 2520 2521 /* Check if send was not successful */ 2522 if (rval != FC_SUCCESS) { 2523 /* Return packet to ULP */ 2524 mutex_enter(&sbp->mtx); 2525 sbp->pkt_flags |= PACKET_RETURNED; 2526 mutex_exit(&sbp->mtx); 2527 2528 return (rval); 2529 } 2530 2531 /* Check if this packet should be polled for completion before */ 2532 /* returning. This check must be done with a saved copy of the */ 2533 /* pkt_flags because the packet itself could already be freed from */ 2534 /* memory if it was not polled. */ 2535 if (pkt_flags & PACKET_POLLED) { 2536 emlxs_poll(port, sbp); 2537 } 2538 2539 return (FC_SUCCESS); 2540 2541 } /* emlxs_transport() */ 2542 2543 2544 2545 static void 2546 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp) 2547 { 2548 emlxs_hba_t *hba = HBA; 2549 fc_packet_t *pkt = PRIV2PKT(sbp); 2550 clock_t timeout; 2551 clock_t time; 2552 uint32_t att_bit; 2553 emlxs_ring_t *rp; 2554 2555 mutex_enter(&EMLXS_PORT_LOCK); 2556 hba->io_poll_count++; 2557 mutex_exit(&EMLXS_PORT_LOCK); 2558 2559 /* Check for panic situation */ 2560 if (ddi_in_panic()) { 2561 /* 2562 * In panic situations there will be one thread with 2563 * no interrrupts (hard or soft) and no timers 2564 */ 2565 2566 /* 2567 * We must manually poll everything in this thread 2568 * to keep the driver going. 2569 */ 2570 rp = (emlxs_ring_t *)sbp->ring; 2571 switch (rp->ringno) { 2572 case FC_FCP_RING: 2573 att_bit = HA_R0ATT; 2574 break; 2575 2576 case FC_IP_RING: 2577 att_bit = HA_R1ATT; 2578 break; 2579 2580 case FC_ELS_RING: 2581 att_bit = HA_R2ATT; 2582 break; 2583 2584 case FC_CT_RING: 2585 att_bit = HA_R3ATT; 2586 break; 2587 } 2588 2589 /* Keep polling the chip until our IO is completed */ 2590 /* Driver's timer will not function during panics. */ 2591 /* Therefore, timer checks must be performed manually. */ 2592 (void) drv_getparm(LBOLT, &time); 2593 timeout = time + drv_usectohz(1000000); 2594 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2595 emlxs_sli_poll_intr(hba, att_bit); 2596 (void) drv_getparm(LBOLT, &time); 2597 2598 /* Trigger timer checks periodically */ 2599 if (time >= timeout) { 2600 emlxs_timer_checks(hba); 2601 timeout = time + drv_usectohz(1000000); 2602 } 2603 } 2604 } else { 2605 /* Wait for IO completion */ 2606 /* The driver's timer will detect */ 2607 /* any timeout and abort the I/O. */ 2608 mutex_enter(&EMLXS_PKT_LOCK); 2609 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2610 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK); 2611 } 2612 mutex_exit(&EMLXS_PKT_LOCK); 2613 } 2614 2615 /* Check for fcp reset pkt */ 2616 if (sbp->pkt_flags & PACKET_FCP_RESET) { 2617 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2618 /* Flush the IO's on the chipq */ 2619 (void) emlxs_chipq_node_flush(port, 2620 &hba->ring[FC_FCP_RING], sbp->node, sbp); 2621 } else { 2622 /* Flush the IO's on the chipq for this lun */ 2623 (void) emlxs_chipq_lun_flush(port, 2624 sbp->node, sbp->lun, sbp); 2625 } 2626 2627 if (sbp->flush_count == 0) { 2628 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2629 goto done; 2630 } 2631 2632 /* Set the timeout so the flush has time to complete */ 2633 timeout = emlxs_timeout(hba, 60); 2634 (void) drv_getparm(LBOLT, &time); 2635 while ((time < timeout) && sbp->flush_count > 0) { 2636 delay(drv_usectohz(500000)); 2637 (void) drv_getparm(LBOLT, &time); 2638 } 2639 2640 if (sbp->flush_count == 0) { 2641 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2642 goto done; 2643 } 2644 2645 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2646 "sbp=%p flush_count=%d. Waiting...", sbp, 2647 sbp->flush_count); 2648 2649 /* Let's try this one more time */ 2650 2651 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2652 /* Flush the IO's on the chipq */ 2653 (void) emlxs_chipq_node_flush(port, 2654 &hba->ring[FC_FCP_RING], sbp->node, sbp); 2655 } else { 2656 /* Flush the IO's on the chipq for this lun */ 2657 (void) emlxs_chipq_lun_flush(port, 2658 sbp->node, sbp->lun, sbp); 2659 } 2660 2661 /* Reset the timeout so the flush has time to complete */ 2662 timeout = emlxs_timeout(hba, 60); 2663 (void) drv_getparm(LBOLT, &time); 2664 while ((time < timeout) && sbp->flush_count > 0) { 2665 delay(drv_usectohz(500000)); 2666 (void) drv_getparm(LBOLT, &time); 2667 } 2668 2669 if (sbp->flush_count == 0) { 2670 emlxs_node_open(port, sbp->node, FC_FCP_RING); 2671 goto done; 2672 } 2673 2674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2675 "sbp=%p flush_count=%d. Resetting link.", sbp, 2676 sbp->flush_count); 2677 2678 /* Let's first try to reset the link */ 2679 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 2680 2681 if (sbp->flush_count == 0) { 2682 goto done; 2683 } 2684 2685 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2686 "sbp=%p flush_count=%d. Resetting HBA.", sbp, 2687 sbp->flush_count); 2688 2689 /* If that doesn't work, reset the adapter */ 2690 (void) emlxs_reset(port, FC_FCA_RESET); 2691 2692 if (sbp->flush_count != 0) { 2693 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2694 "sbp=%p flush_count=%d. Giving up.", sbp, 2695 sbp->flush_count); 2696 } 2697 2698 } 2699 /* PACKET_FCP_RESET */ 2700 done: 2701 2702 /* Packet has been declared completed and is now ready to be returned */ 2703 2704 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 2705 emlxs_unswap_pkt(sbp); 2706 #endif /* EMLXS_MODREV2X */ 2707 2708 mutex_enter(&sbp->mtx); 2709 sbp->pkt_flags |= PACKET_RETURNED; 2710 mutex_exit(&sbp->mtx); 2711 2712 mutex_enter(&EMLXS_PORT_LOCK); 2713 hba->io_poll_count--; 2714 mutex_exit(&EMLXS_PORT_LOCK); 2715 2716 /* Make ULP completion callback if required */ 2717 if (pkt->pkt_comp) { 2718 (*pkt->pkt_comp) (pkt); 2719 } 2720 2721 return; 2722 2723 } /* emlxs_poll() */ 2724 2725 2726 static int 2727 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size, 2728 uint32_t *count, uint32_t type) 2729 { 2730 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2731 emlxs_hba_t *hba = HBA; 2732 2733 char *err = NULL; 2734 emlxs_unsol_buf_t *pool; 2735 emlxs_unsol_buf_t *new_pool; 2736 int32_t i; 2737 int result; 2738 uint32_t free_resv; 2739 uint32_t free; 2740 emlxs_config_t *cfg = &CFG; 2741 fc_unsol_buf_t *ubp; 2742 emlxs_ub_priv_t *ub_priv; 2743 2744 if (port->tgt_mode) { 2745 if (tokens && count) { 2746 bzero(tokens, (sizeof (uint64_t) * (*count))); 2747 } 2748 return (FC_SUCCESS); 2749 } 2750 2751 if (!(port->flag & EMLXS_PORT_BOUND)) { 2752 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2753 "ub_alloc failed: Port not bound! size=%x count=%d " 2754 "type=%x", size, *count, type); 2755 2756 return (FC_FAILURE); 2757 } 2758 2759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2760 "ub_alloc: size=%x count=%d type=%x", size, *count, type); 2761 2762 if (count && (*count > EMLXS_MAX_UBUFS)) { 2763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2764 "ub_alloc failed: Too many unsolicted buffers requested. " 2765 "count=%x", *count); 2766 2767 return (FC_FAILURE); 2768 2769 } 2770 2771 if (tokens == NULL) { 2772 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2773 "ub_alloc failed: Token array is NULL."); 2774 2775 return (FC_FAILURE); 2776 } 2777 2778 /* Clear the token array */ 2779 bzero(tokens, (sizeof (uint64_t) * (*count))); 2780 2781 free_resv = 0; 2782 free = *count; 2783 switch (type) { 2784 case FC_TYPE_BASIC_LS: 2785 err = "BASIC_LS"; 2786 break; 2787 case FC_TYPE_EXTENDED_LS: 2788 err = "EXTENDED_LS"; 2789 free = *count / 2; /* Hold 50% for normal use */ 2790 free_resv = *count - free; /* Reserve 50% for RSCN use */ 2791 break; 2792 case FC_TYPE_IS8802: 2793 err = "IS8802"; 2794 break; 2795 case FC_TYPE_IS8802_SNAP: 2796 err = "IS8802_SNAP"; 2797 2798 if (cfg[CFG_NETWORK_ON].current == 0) { 2799 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2800 "ub_alloc failed: IP support is disabled."); 2801 2802 return (FC_FAILURE); 2803 } 2804 break; 2805 case FC_TYPE_SCSI_FCP: 2806 err = "SCSI_FCP"; 2807 break; 2808 case FC_TYPE_SCSI_GPP: 2809 err = "SCSI_GPP"; 2810 break; 2811 case FC_TYPE_HIPP_FP: 2812 err = "HIPP_FP"; 2813 break; 2814 case FC_TYPE_IPI3_MASTER: 2815 err = "IPI3_MASTER"; 2816 break; 2817 case FC_TYPE_IPI3_SLAVE: 2818 err = "IPI3_SLAVE"; 2819 break; 2820 case FC_TYPE_IPI3_PEER: 2821 err = "IPI3_PEER"; 2822 break; 2823 case FC_TYPE_FC_SERVICES: 2824 err = "FC_SERVICES"; 2825 break; 2826 } 2827 2828 mutex_enter(&EMLXS_UB_LOCK); 2829 2830 /* 2831 * Walk through the list of the unsolicited buffers 2832 * for this ddiinst of emlx. 2833 */ 2834 2835 pool = port->ub_pool; 2836 2837 /* 2838 * The emlxs_ub_alloc() can be called more than once with different 2839 * size. We will reject the call if there are 2840 * duplicate size with the same FC-4 type. 2841 */ 2842 while (pool) { 2843 if ((pool->pool_type == type) && 2844 (pool->pool_buf_size == size)) { 2845 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2846 "ub_alloc failed: Unsolicited buffer pool for %s " 2847 "of size 0x%x bytes already exists.", err, size); 2848 2849 result = FC_FAILURE; 2850 goto fail; 2851 } 2852 2853 pool = pool->pool_next; 2854 } 2855 2856 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t), 2857 KM_SLEEP); 2858 if (new_pool == NULL) { 2859 result = FC_FAILURE; 2860 goto fail; 2861 } 2862 2863 new_pool->pool_next = NULL; 2864 new_pool->pool_type = type; 2865 new_pool->pool_buf_size = size; 2866 new_pool->pool_nentries = *count; 2867 new_pool->pool_available = new_pool->pool_nentries; 2868 new_pool->pool_free = free; 2869 new_pool->pool_free_resv = free_resv; 2870 new_pool->fc_ubufs = 2871 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP); 2872 2873 if (new_pool->fc_ubufs == NULL) { 2874 kmem_free(new_pool, sizeof (emlxs_unsol_buf_t)); 2875 result = FC_FAILURE; 2876 goto fail; 2877 } 2878 2879 new_pool->pool_first_token = port->ub_count; 2880 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries; 2881 2882 for (i = 0; i < new_pool->pool_nentries; i++) { 2883 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i]; 2884 ubp->ub_port_handle = port->ulp_handle; 2885 ubp->ub_token = (uint64_t)((unsigned long)ubp); 2886 ubp->ub_bufsize = size; 2887 ubp->ub_class = FC_TRAN_CLASS3; 2888 ubp->ub_port_private = NULL; 2889 ubp->ub_fca_private = 2890 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t), 2891 KM_SLEEP); 2892 2893 if (ubp->ub_fca_private == NULL) { 2894 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2895 "ub_alloc failed: Unable to allocate fca_private " 2896 "object."); 2897 2898 result = FC_FAILURE; 2899 goto fail; 2900 } 2901 2902 /* 2903 * Initialize emlxs_ub_priv_t 2904 */ 2905 ub_priv = ubp->ub_fca_private; 2906 ub_priv->ubp = ubp; 2907 ub_priv->port = port; 2908 ub_priv->flags = EMLXS_UB_FREE; 2909 ub_priv->available = 1; 2910 ub_priv->pool = new_pool; 2911 ub_priv->time = 0; 2912 ub_priv->timeout = 0; 2913 ub_priv->token = port->ub_count; 2914 ub_priv->cmd = 0; 2915 2916 /* Allocate the actual buffer */ 2917 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP); 2918 2919 /* Check if we were not successful */ 2920 if (ubp->ub_buffer == NULL) { 2921 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2922 "ub_alloc failed: Unable to allocate buffer."); 2923 2924 /* Free the private area of the current object */ 2925 kmem_free(ubp->ub_fca_private, 2926 sizeof (emlxs_ub_priv_t)); 2927 2928 result = FC_FAILURE; 2929 goto fail; 2930 } 2931 2932 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 2933 "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp, 2934 ub_priv->token, ubp->ub_bufsize, type); 2935 2936 tokens[i] = (uint64_t)((unsigned long)ubp); 2937 port->ub_count++; 2938 } 2939 2940 /* Add the pool to the top of the pool list */ 2941 new_pool->pool_prev = NULL; 2942 new_pool->pool_next = port->ub_pool; 2943 2944 if (port->ub_pool) { 2945 port->ub_pool->pool_prev = new_pool; 2946 } 2947 port->ub_pool = new_pool; 2948 2949 /* Set the post counts */ 2950 if (type == FC_TYPE_IS8802_SNAP) { 2951 MAILBOXQ *mbox; 2952 2953 port->ub_post[FC_IP_RING] += new_pool->pool_nentries; 2954 2955 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 2956 MEM_MBOX | MEM_PRI))) { 2957 emlxs_mb_config_farp(hba, (MAILBOX *)mbox); 2958 if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mbox, 2959 MBX_NOWAIT, 0) != MBX_BUSY) { 2960 (void) emlxs_mem_put(hba, MEM_MBOX, 2961 (uint8_t *)mbox); 2962 } 2963 } 2964 port->flag |= EMLXS_PORT_IP_UP; 2965 } else if (type == FC_TYPE_EXTENDED_LS) { 2966 port->ub_post[FC_ELS_RING] += new_pool->pool_nentries; 2967 } else if (type == FC_TYPE_FC_SERVICES) { 2968 port->ub_post[FC_CT_RING] += new_pool->pool_nentries; 2969 } 2970 2971 mutex_exit(&EMLXS_UB_LOCK); 2972 2973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2974 "%d unsolicited buffers allocated for %s of size 0x%x bytes.", 2975 *count, err, size); 2976 2977 return (FC_SUCCESS); 2978 2979 fail: 2980 2981 /* Clean the pool */ 2982 for (i = 0; tokens[i] != NULL; i++) { 2983 /* Get the buffer object */ 2984 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 2985 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 2986 2987 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 2988 "ub_alloc failed: Freed buffer=%p token=%x size=%x " 2989 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type); 2990 2991 /* Free the actual buffer */ 2992 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 2993 2994 /* Free the private area of the buffer object */ 2995 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 2996 2997 tokens[i] = 0; 2998 port->ub_count--; 2999 } 3000 3001 /* Free the array of buffer objects in the pool */ 3002 kmem_free((caddr_t)new_pool->fc_ubufs, 3003 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries)); 3004 3005 /* Free the pool object */ 3006 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t)); 3007 3008 mutex_exit(&EMLXS_UB_LOCK); 3009 3010 return (result); 3011 3012 } /* emlxs_ub_alloc() */ 3013 3014 3015 static void 3016 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp) 3017 { 3018 emlxs_hba_t *hba = HBA; 3019 emlxs_ub_priv_t *ub_priv; 3020 fc_packet_t *pkt; 3021 ELS_PKT *els; 3022 uint32_t sid; 3023 3024 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3025 3026 if (hba->state <= FC_LINK_DOWN) { 3027 return; 3028 } 3029 3030 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + 3031 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) { 3032 return; 3033 } 3034 3035 sid = SWAP_DATA24_LO(ubp->ub_frame.s_id); 3036 3037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg, 3038 "%s dropped: sid=%x. Rejecting.", 3039 emlxs_elscmd_xlate(ub_priv->cmd), sid); 3040 3041 pkt->pkt_tran_type = FC_PKT_OUTBOUND; 3042 pkt->pkt_timeout = (2 * hba->fc_ratov); 3043 3044 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) { 3045 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3; 3046 pkt->pkt_tran_flags |= FC_TRAN_CLASS2; 3047 } 3048 3049 /* Build the fc header */ 3050 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id; 3051 pkt->pkt_cmd_fhdr.r_ctl = 3052 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL; 3053 pkt->pkt_cmd_fhdr.s_id = SWAP_DATA24_LO(port->did); 3054 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 3055 pkt->pkt_cmd_fhdr.f_ctl = 3056 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ; 3057 pkt->pkt_cmd_fhdr.seq_id = 0; 3058 pkt->pkt_cmd_fhdr.df_ctl = 0; 3059 pkt->pkt_cmd_fhdr.seq_cnt = 0; 3060 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff; 3061 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id; 3062 pkt->pkt_cmd_fhdr.ro = 0; 3063 3064 /* Build the command */ 3065 els = (ELS_PKT *) pkt->pkt_cmd; 3066 els->elsCode = 0x01; 3067 els->un.lsRjt.un.b.lsRjtRsvd0 = 0; 3068 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3069 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3070 els->un.lsRjt.un.b.vendorUnique = 0x02; 3071 3072 /* Send the pkt later in another thread */ 3073 (void) emlxs_pkt_send(pkt, 0); 3074 3075 return; 3076 3077 } /* emlxs_ub_els_reject() */ 3078 3079 extern int 3080 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3081 { 3082 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3083 emlxs_hba_t *hba = HBA; 3084 fc_unsol_buf_t *ubp; 3085 emlxs_ub_priv_t *ub_priv; 3086 uint32_t i; 3087 uint32_t time; 3088 emlxs_unsol_buf_t *pool; 3089 3090 if (count == 0) { 3091 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3092 "ub_release: Nothing to do. count=%d", count); 3093 3094 return (FC_SUCCESS); 3095 } 3096 3097 if (!(port->flag & EMLXS_PORT_BOUND)) { 3098 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3099 "ub_release failed: Port not bound. count=%d token[0]=%p", 3100 count, tokens[0]); 3101 3102 return (FC_UNBOUND); 3103 } 3104 3105 mutex_enter(&EMLXS_UB_LOCK); 3106 3107 if (!port->ub_pool) { 3108 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3109 "ub_release failed: No pools! count=%d token[0]=%p", 3110 count, tokens[0]); 3111 3112 mutex_exit(&EMLXS_UB_LOCK); 3113 return (FC_UB_BADTOKEN); 3114 } 3115 3116 for (i = 0; i < count; i++) { 3117 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3118 3119 if (!ubp) { 3120 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3121 "ub_release failed: count=%d tokens[%d]=0", count, 3122 i); 3123 3124 mutex_exit(&EMLXS_UB_LOCK); 3125 return (FC_UB_BADTOKEN); 3126 } 3127 3128 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3129 3130 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3131 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3132 "ub_release failed: Dead buffer found. ubp=%p", 3133 ubp); 3134 3135 mutex_exit(&EMLXS_UB_LOCK); 3136 return (FC_UB_BADTOKEN); 3137 } 3138 3139 if (ub_priv->flags == EMLXS_UB_FREE) { 3140 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3141 "ub_release: Buffer already free! ubp=%p token=%x", 3142 ubp, ub_priv->token); 3143 3144 continue; 3145 } 3146 3147 /* Check for dropped els buffer */ 3148 /* ULP will do this sometimes without sending a reply */ 3149 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) && 3150 !(ub_priv->flags & EMLXS_UB_REPLY)) { 3151 emlxs_ub_els_reject(port, ubp); 3152 } 3153 3154 /* Mark the buffer free */ 3155 ub_priv->flags = EMLXS_UB_FREE; 3156 bzero(ubp->ub_buffer, ubp->ub_bufsize); 3157 3158 time = hba->timer_tics - ub_priv->time; 3159 ub_priv->time = 0; 3160 ub_priv->timeout = 0; 3161 3162 pool = ub_priv->pool; 3163 3164 if (ub_priv->flags & EMLXS_UB_RESV) { 3165 pool->pool_free_resv++; 3166 } else { 3167 pool->pool_free++; 3168 } 3169 3170 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3171 "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)", 3172 ubp, ub_priv->token, time, ub_priv->available, 3173 pool->pool_nentries, pool->pool_available, 3174 pool->pool_free, pool->pool_free_resv); 3175 3176 /* Check if pool can be destroyed now */ 3177 if ((pool->pool_available == 0) && 3178 (pool->pool_free + pool->pool_free_resv == 3179 pool->pool_nentries)) { 3180 emlxs_ub_destroy(port, pool); 3181 } 3182 } 3183 3184 mutex_exit(&EMLXS_UB_LOCK); 3185 3186 return (FC_SUCCESS); 3187 3188 } /* emlxs_ub_release() */ 3189 3190 3191 static int 3192 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3193 { 3194 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3195 emlxs_unsol_buf_t *pool; 3196 fc_unsol_buf_t *ubp; 3197 emlxs_ub_priv_t *ub_priv; 3198 uint32_t i; 3199 3200 if (port->tgt_mode) { 3201 return (FC_SUCCESS); 3202 } 3203 3204 if (count == 0) { 3205 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3206 "ub_free: Nothing to do. count=%d token[0]=%p", count, 3207 tokens[0]); 3208 3209 return (FC_SUCCESS); 3210 } 3211 3212 if (!(port->flag & EMLXS_PORT_BOUND)) { 3213 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3214 "ub_free: Port not bound. count=%d token[0]=%p", count, 3215 tokens[0]); 3216 3217 return (FC_SUCCESS); 3218 } 3219 3220 mutex_enter(&EMLXS_UB_LOCK); 3221 3222 if (!port->ub_pool) { 3223 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3224 "ub_free failed: No pools! count=%d token[0]=%p", count, 3225 tokens[0]); 3226 3227 mutex_exit(&EMLXS_UB_LOCK); 3228 return (FC_UB_BADTOKEN); 3229 } 3230 3231 /* Process buffer list */ 3232 for (i = 0; i < count; i++) { 3233 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3234 3235 if (!ubp) { 3236 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3237 "ub_free failed: count=%d tokens[%d]=0", count, 3238 i); 3239 3240 mutex_exit(&EMLXS_UB_LOCK); 3241 return (FC_UB_BADTOKEN); 3242 } 3243 3244 /* Mark buffer unavailable */ 3245 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3246 3247 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3248 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3249 "ub_free failed: Dead buffer found. ubp=%p", ubp); 3250 3251 mutex_exit(&EMLXS_UB_LOCK); 3252 return (FC_UB_BADTOKEN); 3253 } 3254 3255 ub_priv->available = 0; 3256 3257 /* Mark one less buffer available in the parent pool */ 3258 pool = ub_priv->pool; 3259 3260 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3261 "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 3262 ub_priv->token, pool->pool_nentries, 3263 pool->pool_available - 1, pool->pool_free, 3264 pool->pool_free_resv); 3265 3266 if (pool->pool_available) { 3267 pool->pool_available--; 3268 3269 /* Check if pool can be destroyed */ 3270 if ((pool->pool_available == 0) && 3271 (pool->pool_free + pool->pool_free_resv == 3272 pool->pool_nentries)) { 3273 emlxs_ub_destroy(port, pool); 3274 } 3275 } 3276 } 3277 3278 mutex_exit(&EMLXS_UB_LOCK); 3279 3280 return (FC_SUCCESS); 3281 3282 } /* emlxs_ub_free() */ 3283 3284 3285 /* EMLXS_UB_LOCK must be held when calling this routine */ 3286 extern void 3287 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool) 3288 { 3289 emlxs_unsol_buf_t *next; 3290 emlxs_unsol_buf_t *prev; 3291 fc_unsol_buf_t *ubp; 3292 uint32_t i; 3293 3294 /* Remove the pool object from the pool list */ 3295 next = pool->pool_next; 3296 prev = pool->pool_prev; 3297 3298 if (port->ub_pool == pool) { 3299 port->ub_pool = next; 3300 } 3301 3302 if (prev) { 3303 prev->pool_next = next; 3304 } 3305 3306 if (next) { 3307 next->pool_prev = prev; 3308 } 3309 3310 pool->pool_prev = NULL; 3311 pool->pool_next = NULL; 3312 3313 /* Clear the post counts */ 3314 switch (pool->pool_type) { 3315 case FC_TYPE_IS8802_SNAP: 3316 port->ub_post[FC_IP_RING] -= pool->pool_nentries; 3317 break; 3318 3319 case FC_TYPE_EXTENDED_LS: 3320 port->ub_post[FC_ELS_RING] -= pool->pool_nentries; 3321 break; 3322 3323 case FC_TYPE_FC_SERVICES: 3324 port->ub_post[FC_CT_RING] -= pool->pool_nentries; 3325 break; 3326 } 3327 3328 /* Now free the pool memory */ 3329 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3330 "ub_destroy: pool=%p type=%d size=%d count=%d", pool, 3331 pool->pool_type, pool->pool_buf_size, pool->pool_nentries); 3332 3333 /* Process the array of buffer objects in the pool */ 3334 for (i = 0; i < pool->pool_nentries; i++) { 3335 /* Get the buffer object */ 3336 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 3337 3338 /* Free the memory the buffer object represents */ 3339 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3340 3341 /* Free the private area of the buffer object */ 3342 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3343 } 3344 3345 /* Free the array of buffer objects in the pool */ 3346 kmem_free((caddr_t)pool->fc_ubufs, 3347 (sizeof (fc_unsol_buf_t)*pool->pool_nentries)); 3348 3349 /* Free the pool object */ 3350 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t)); 3351 3352 return; 3353 3354 } /* emlxs_ub_destroy() */ 3355 3356 3357 /*ARGSUSED*/ 3358 extern int 3359 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 3360 { 3361 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3362 emlxs_hba_t *hba = HBA; 3363 3364 emlxs_buf_t *sbp; 3365 NODELIST *nlp; 3366 NODELIST *prev_nlp; 3367 uint8_t ringno; 3368 RING *rp; 3369 clock_t timeout; 3370 clock_t time; 3371 int32_t pkt_ret; 3372 IOCBQ *iocbq; 3373 IOCBQ *next; 3374 IOCBQ *prev; 3375 uint32_t found; 3376 uint32_t att_bit; 3377 uint32_t pass = 0; 3378 3379 sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 3380 iocbq = &sbp->iocbq; 3381 nlp = (NODELIST *)sbp->node; 3382 rp = (RING *)sbp->ring; 3383 ringno = (rp) ? rp->ringno : 0; 3384 3385 if (!(port->flag & EMLXS_PORT_BOUND)) { 3386 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3387 "Port not bound."); 3388 return (FC_UNBOUND); 3389 } 3390 3391 if (!(hba->flag & FC_ONLINE_MODE)) { 3392 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3393 "Adapter offline."); 3394 return (FC_OFFLINE); 3395 } 3396 3397 /* ULP requires the aborted pkt to be completed */ 3398 /* back to ULP before returning from this call. */ 3399 /* SUN knows of problems with this call so they suggested that we */ 3400 /* always return a FC_FAILURE for this call, until it is worked out. */ 3401 3402 /* Check if pkt is no good */ 3403 if (!(sbp->pkt_flags & PACKET_VALID) || 3404 (sbp->pkt_flags & PACKET_RETURNED)) { 3405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3406 "Bad sbp. flags=%x", sbp->pkt_flags); 3407 return (FC_FAILURE); 3408 } 3409 3410 /* Tag this now */ 3411 /* This will prevent any thread except ours from completing it */ 3412 mutex_enter(&sbp->mtx); 3413 3414 /* Check again if we still own this */ 3415 if (!(sbp->pkt_flags & PACKET_VALID) || 3416 (sbp->pkt_flags & PACKET_RETURNED)) { 3417 mutex_exit(&sbp->mtx); 3418 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3419 "Bad sbp. flags=%x", sbp->pkt_flags); 3420 return (FC_FAILURE); 3421 } 3422 3423 /* Check if pkt is a real polled command */ 3424 if (!(sbp->pkt_flags & PACKET_IN_ABORT) && 3425 (sbp->pkt_flags & PACKET_POLLED)) { 3426 mutex_exit(&sbp->mtx); 3427 3428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3429 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp, 3430 sbp->pkt_flags); 3431 return (FC_FAILURE); 3432 } 3433 3434 sbp->pkt_flags |= PACKET_POLLED; 3435 sbp->pkt_flags |= PACKET_IN_ABORT; 3436 3437 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | 3438 PACKET_IN_TIMEOUT)) { 3439 mutex_exit(&sbp->mtx); 3440 3441 /* Do nothing, pkt already on its way out */ 3442 goto done; 3443 } 3444 3445 mutex_exit(&sbp->mtx); 3446 3447 begin: 3448 pass++; 3449 3450 mutex_enter(&EMLXS_RINGTX_LOCK); 3451 3452 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3453 /* Find it on the queue */ 3454 found = 0; 3455 if (iocbq->flag & IOCB_PRIORITY) { 3456 /* Search the priority queue */ 3457 prev = NULL; 3458 next = (IOCBQ *) nlp->nlp_ptx[ringno].q_first; 3459 3460 while (next) { 3461 if (next == iocbq) { 3462 /* Remove it */ 3463 if (prev) { 3464 prev->next = iocbq->next; 3465 } 3466 3467 if (nlp->nlp_ptx[ringno].q_last == 3468 (void *)iocbq) { 3469 nlp->nlp_ptx[ringno].q_last = 3470 (void *)prev; 3471 } 3472 3473 if (nlp->nlp_ptx[ringno].q_first == 3474 (void *)iocbq) { 3475 nlp->nlp_ptx[ringno].q_first = 3476 (void *)iocbq->next; 3477 } 3478 3479 nlp->nlp_ptx[ringno].q_cnt--; 3480 iocbq->next = NULL; 3481 found = 1; 3482 break; 3483 } 3484 3485 prev = next; 3486 next = next->next; 3487 } 3488 } else { 3489 /* Search the normal queue */ 3490 prev = NULL; 3491 next = (IOCBQ *) nlp->nlp_tx[ringno].q_first; 3492 3493 while (next) { 3494 if (next == iocbq) { 3495 /* Remove it */ 3496 if (prev) { 3497 prev->next = iocbq->next; 3498 } 3499 3500 if (nlp->nlp_tx[ringno].q_last == 3501 (void *)iocbq) { 3502 nlp->nlp_tx[ringno].q_last = 3503 (void *)prev; 3504 } 3505 3506 if (nlp->nlp_tx[ringno].q_first == 3507 (void *)iocbq) { 3508 nlp->nlp_tx[ringno].q_first = 3509 (void *)iocbq->next; 3510 } 3511 3512 nlp->nlp_tx[ringno].q_cnt--; 3513 iocbq->next = NULL; 3514 found = 1; 3515 break; 3516 } 3517 3518 prev = next; 3519 next = (IOCBQ *) next->next; 3520 } 3521 } 3522 3523 if (!found) { 3524 mutex_exit(&EMLXS_RINGTX_LOCK); 3525 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3526 "I/O not found in driver. sbp=%p flags=%x", sbp, 3527 sbp->pkt_flags); 3528 goto done; 3529 } 3530 3531 /* Check if node still needs servicing */ 3532 if ((nlp->nlp_ptx[ringno].q_first) || 3533 (nlp->nlp_tx[ringno].q_first && 3534 !(nlp->nlp_flag[ringno] & NLP_CLOSED))) { 3535 3536 /* 3537 * If this is the base node, 3538 * then don't shift the pointers 3539 */ 3540 /* We want to drain the base node before moving on */ 3541 if (!nlp->nlp_base) { 3542 /* Just shift ring queue */ 3543 /* pointers to next node */ 3544 rp->nodeq.q_last = (void *) nlp; 3545 rp->nodeq.q_first = nlp->nlp_next[ringno]; 3546 } 3547 } else { 3548 /* Remove node from ring queue */ 3549 3550 /* If this is the only node on list */ 3551 if (rp->nodeq.q_first == (void *)nlp && 3552 rp->nodeq.q_last == (void *)nlp) { 3553 rp->nodeq.q_last = NULL; 3554 rp->nodeq.q_first = NULL; 3555 rp->nodeq.q_cnt = 0; 3556 } else if (rp->nodeq.q_first == (void *)nlp) { 3557 rp->nodeq.q_first = nlp->nlp_next[ringno]; 3558 ((NODELIST *) rp->nodeq.q_last)-> 3559 nlp_next[ringno] = rp->nodeq.q_first; 3560 rp->nodeq.q_cnt--; 3561 } else { 3562 /* 3563 * This is a little more difficult find the 3564 * previous node in the circular ring queue 3565 */ 3566 prev_nlp = nlp; 3567 while (prev_nlp->nlp_next[ringno] != nlp) { 3568 prev_nlp = prev_nlp->nlp_next[ringno]; 3569 } 3570 3571 prev_nlp->nlp_next[ringno] = 3572 nlp->nlp_next[ringno]; 3573 3574 if (rp->nodeq.q_last == (void *)nlp) { 3575 rp->nodeq.q_last = (void *)prev_nlp; 3576 } 3577 rp->nodeq.q_cnt--; 3578 3579 } 3580 3581 /* Clear node */ 3582 nlp->nlp_next[ringno] = NULL; 3583 } 3584 3585 mutex_enter(&sbp->mtx); 3586 3587 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3588 sbp->pkt_flags &= ~PACKET_IN_TXQ; 3589 hba->ring_tx_count[ringno]--; 3590 } 3591 3592 mutex_exit(&sbp->mtx); 3593 3594 /* Free the ulpIoTag and the bmp */ 3595 (void) emlxs_unregister_pkt(rp, sbp->iotag, 0); 3596 3597 mutex_exit(&EMLXS_RINGTX_LOCK); 3598 3599 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 3600 IOERR_ABORT_REQUESTED, 1); 3601 3602 goto done; 3603 } 3604 3605 mutex_exit(&EMLXS_RINGTX_LOCK); 3606 3607 3608 /* Check the chip queue */ 3609 mutex_enter(&EMLXS_FCTAB_LOCK(ringno)); 3610 3611 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) && 3612 !(sbp->pkt_flags & PACKET_XRI_CLOSED) && 3613 (sbp == rp->fc_table[sbp->iotag])) { 3614 3615 /* Create the abort IOCB */ 3616 if (hba->state >= FC_LINK_UP) { 3617 iocbq = 3618 emlxs_create_abort_xri_cn(port, sbp->node, 3619 sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS); 3620 3621 mutex_enter(&sbp->mtx); 3622 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3623 sbp->ticks = 3624 hba->timer_tics + (4 * hba->fc_ratov) + 10; 3625 sbp->abort_attempts++; 3626 mutex_exit(&sbp->mtx); 3627 } else { 3628 iocbq = 3629 emlxs_create_close_xri_cn(port, sbp->node, 3630 sbp->iotag, rp); 3631 3632 mutex_enter(&sbp->mtx); 3633 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3634 sbp->ticks = hba->timer_tics + 30; 3635 sbp->abort_attempts++; 3636 mutex_exit(&sbp->mtx); 3637 } 3638 3639 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3640 3641 /* Send this iocbq */ 3642 if (iocbq) { 3643 emlxs_sli_issue_iocb_cmd(hba, rp, iocbq); 3644 iocbq = NULL; 3645 } 3646 3647 goto done; 3648 } 3649 3650 mutex_exit(&EMLXS_FCTAB_LOCK(ringno)); 3651 3652 /* Pkt was not on any queues */ 3653 3654 /* Check again if we still own this */ 3655 if (!(sbp->pkt_flags & PACKET_VALID) || 3656 (sbp->pkt_flags & 3657 (PACKET_RETURNED | PACKET_IN_COMPLETION | 3658 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3659 goto done; 3660 } 3661 3662 /* Apparently the pkt was not found. Let's delay and try again */ 3663 if (pass < 5) { 3664 delay(drv_usectohz(5000000)); /* 5 seconds */ 3665 3666 /* Check again if we still own this */ 3667 if (!(sbp->pkt_flags & PACKET_VALID) || 3668 (sbp->pkt_flags & 3669 (PACKET_RETURNED | PACKET_IN_COMPLETION | 3670 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3671 goto done; 3672 } 3673 3674 goto begin; 3675 } 3676 3677 force_it: 3678 3679 /* Force the completion now */ 3680 3681 /* Unregister the pkt */ 3682 (void) emlxs_unregister_pkt(rp, sbp->iotag, 1); 3683 3684 /* Now complete it */ 3685 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 3686 1); 3687 3688 done: 3689 3690 /* Now wait for the pkt to complete */ 3691 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3692 /* Set thread timeout */ 3693 timeout = emlxs_timeout(hba, 30); 3694 3695 /* Check for panic situation */ 3696 if (ddi_in_panic()) { 3697 3698 /* 3699 * In panic situations there will be one thread with no 3700 * interrrupts (hard or soft) and no timers 3701 */ 3702 3703 /* 3704 * We must manually poll everything in this thread 3705 * to keep the driver going. 3706 */ 3707 3708 rp = (emlxs_ring_t *)sbp->ring; 3709 switch (rp->ringno) { 3710 case FC_FCP_RING: 3711 att_bit = HA_R0ATT; 3712 break; 3713 3714 case FC_IP_RING: 3715 att_bit = HA_R1ATT; 3716 break; 3717 3718 case FC_ELS_RING: 3719 att_bit = HA_R2ATT; 3720 break; 3721 3722 case FC_CT_RING: 3723 att_bit = HA_R3ATT; 3724 break; 3725 } 3726 3727 /* Keep polling the chip until our IO is completed */ 3728 (void) drv_getparm(LBOLT, &time); 3729 while ((time < timeout) && 3730 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3731 emlxs_sli_poll_intr(hba, att_bit); 3732 (void) drv_getparm(LBOLT, &time); 3733 } 3734 } else { 3735 /* Wait for IO completion or timeout */ 3736 mutex_enter(&EMLXS_PKT_LOCK); 3737 pkt_ret = 0; 3738 while ((pkt_ret != -1) && 3739 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3740 pkt_ret = 3741 cv_timedwait(&EMLXS_PKT_CV, 3742 &EMLXS_PKT_LOCK, timeout); 3743 } 3744 mutex_exit(&EMLXS_PKT_LOCK); 3745 } 3746 3747 /* Check if timeout occured. This is not good. */ 3748 /* Something happened to our IO. */ 3749 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3750 /* Force the completion now */ 3751 goto force_it; 3752 } 3753 } 3754 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 3755 emlxs_unswap_pkt(sbp); 3756 #endif /* EMLXS_MODREV2X */ 3757 3758 /* Check again if we still own this */ 3759 if ((sbp->pkt_flags & PACKET_VALID) && 3760 !(sbp->pkt_flags & PACKET_RETURNED)) { 3761 mutex_enter(&sbp->mtx); 3762 if ((sbp->pkt_flags & PACKET_VALID) && 3763 !(sbp->pkt_flags & PACKET_RETURNED)) { 3764 sbp->pkt_flags |= PACKET_RETURNED; 3765 } 3766 mutex_exit(&sbp->mtx); 3767 } 3768 #ifdef ULP_PATCH5 3769 return (FC_FAILURE); 3770 3771 #else 3772 return (FC_SUCCESS); 3773 3774 #endif /* ULP_PATCH5 */ 3775 3776 3777 } /* emlxs_pkt_abort() */ 3778 3779 3780 extern int32_t 3781 emlxs_reset(opaque_t fca_port_handle, uint32_t cmd) 3782 { 3783 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3784 emlxs_hba_t *hba = HBA; 3785 int rval; 3786 int ret; 3787 clock_t timeout; 3788 3789 if (!(port->flag & EMLXS_PORT_BOUND)) { 3790 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3791 "fca_reset failed. Port not bound."); 3792 3793 return (FC_UNBOUND); 3794 } 3795 3796 switch (cmd) { 3797 case FC_FCA_LINK_RESET: 3798 3799 if (!(hba->flag & FC_ONLINE_MODE) || 3800 (hba->state <= FC_LINK_DOWN)) { 3801 return (FC_SUCCESS); 3802 } 3803 3804 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3805 "fca_reset: Resetting Link."); 3806 3807 mutex_enter(&EMLXS_LINKUP_LOCK); 3808 hba->linkup_wait_flag = TRUE; 3809 mutex_exit(&EMLXS_LINKUP_LOCK); 3810 3811 if (emlxs_reset_link(hba, 1)) { 3812 mutex_enter(&EMLXS_LINKUP_LOCK); 3813 hba->linkup_wait_flag = FALSE; 3814 mutex_exit(&EMLXS_LINKUP_LOCK); 3815 3816 return (FC_FAILURE); 3817 } 3818 3819 mutex_enter(&EMLXS_LINKUP_LOCK); 3820 timeout = emlxs_timeout(hba, 60); 3821 ret = 0; 3822 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) { 3823 ret = 3824 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK, 3825 timeout); 3826 } 3827 3828 hba->linkup_wait_flag = FALSE; 3829 mutex_exit(&EMLXS_LINKUP_LOCK); 3830 3831 if (ret == -1) { 3832 return (FC_FAILURE); 3833 } 3834 3835 return (FC_SUCCESS); 3836 3837 case FC_FCA_CORE: 3838 #ifdef DUMP_SUPPORT 3839 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3840 "fca_reset: Core dump."); 3841 3842 /* Schedule a USER dump */ 3843 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 3844 3845 /* Wait for dump to complete */ 3846 emlxs_dump_wait(hba); 3847 3848 return (FC_SUCCESS); 3849 #endif /* DUMP_SUPPORT */ 3850 3851 case FC_FCA_RESET: 3852 case FC_FCA_RESET_CORE: 3853 3854 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3855 "fca_reset: Resetting Adapter."); 3856 3857 rval = FC_SUCCESS; 3858 3859 if (emlxs_offline(hba) == 0) { 3860 (void) emlxs_online(hba); 3861 } else { 3862 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3863 "fca_reset: Adapter reset failed. Device busy."); 3864 3865 rval = FC_DEVICE_BUSY; 3866 } 3867 3868 return (rval); 3869 3870 default: 3871 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3872 "fca_reset: Unknown command. cmd=%x", cmd); 3873 3874 break; 3875 } 3876 3877 return (FC_FAILURE); 3878 3879 } /* emlxs_reset() */ 3880 3881 3882 extern uint32_t emlxs_core_dump(emlxs_hba_t *hba, char *buffer, 3883 uint32_t size); 3884 extern uint32_t emlxs_core_size(emlxs_hba_t *hba); 3885 3886 extern int 3887 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm) 3888 { 3889 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3890 emlxs_hba_t *hba = HBA; 3891 int32_t ret; 3892 emlxs_vpd_t *vpd = &VPD; 3893 3894 3895 ret = FC_SUCCESS; 3896 3897 if (!(port->flag & EMLXS_PORT_BOUND)) { 3898 return (FC_UNBOUND); 3899 } 3900 3901 3902 #ifdef IDLE_TIMER 3903 emlxs_pm_busy_component(hba); 3904 #endif /* IDLE_TIMER */ 3905 3906 switch (pm->pm_cmd_code) { 3907 3908 case FC_PORT_GET_FW_REV: 3909 { 3910 char buffer[128]; 3911 3912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3913 "fca_port_manage: FC_PORT_GET_FW_REV"); 3914 3915 (void) sprintf(buffer, "%s %s", hba->model_info.model, 3916 vpd->fw_version); 3917 bzero(pm->pm_data_buf, pm->pm_data_len); 3918 3919 if (pm->pm_data_len < strlen(buffer) + 1) { 3920 ret = FC_NOMEM; 3921 3922 break; 3923 } 3924 3925 (void) strcpy(pm->pm_data_buf, buffer); 3926 break; 3927 } 3928 3929 case FC_PORT_GET_FCODE_REV: 3930 { 3931 char buffer[128]; 3932 3933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3934 "fca_port_manage: FC_PORT_GET_FCODE_REV"); 3935 3936 /* Force update here just to be sure */ 3937 emlxs_get_fcode_version(hba); 3938 3939 (void) sprintf(buffer, "%s %s", hba->model_info.model, 3940 vpd->fcode_version); 3941 bzero(pm->pm_data_buf, pm->pm_data_len); 3942 3943 if (pm->pm_data_len < strlen(buffer) + 1) { 3944 ret = FC_NOMEM; 3945 break; 3946 } 3947 3948 (void) strcpy(pm->pm_data_buf, buffer); 3949 break; 3950 } 3951 3952 case FC_PORT_GET_DUMP_SIZE: 3953 { 3954 #ifdef DUMP_SUPPORT 3955 uint32_t dump_size = 0; 3956 3957 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3958 "fca_port_manage: FC_PORT_GET_DUMP_SIZE"); 3959 3960 if (pm->pm_data_len < sizeof (uint32_t)) { 3961 ret = FC_NOMEM; 3962 break; 3963 } 3964 3965 (void) emlxs_get_dump(hba, NULL, &dump_size); 3966 3967 *((uint32_t *)pm->pm_data_buf) = dump_size; 3968 3969 #else 3970 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3971 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported."); 3972 3973 #endif /* DUMP_SUPPORT */ 3974 3975 break; 3976 } 3977 3978 case FC_PORT_GET_DUMP: 3979 { 3980 #ifdef DUMP_SUPPORT 3981 uint32_t dump_size = 0; 3982 3983 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3984 "fca_port_manage: FC_PORT_GET_DUMP"); 3985 3986 (void) emlxs_get_dump(hba, NULL, &dump_size); 3987 3988 if (pm->pm_data_len < dump_size) { 3989 ret = FC_NOMEM; 3990 break; 3991 } 3992 3993 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf, 3994 (uint32_t *)&dump_size); 3995 #else 3996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3997 "fca_port_manage: FC_PORT_GET_DUMP unsupported."); 3998 3999 #endif /* DUMP_SUPPORT */ 4000 4001 break; 4002 } 4003 4004 case FC_PORT_FORCE_DUMP: 4005 { 4006 #ifdef DUMP_SUPPORT 4007 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4008 "fca_port_manage: FC_PORT_FORCE_DUMP"); 4009 4010 /* Schedule a USER dump */ 4011 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 4012 4013 /* Wait for dump to complete */ 4014 emlxs_dump_wait(hba); 4015 #else 4016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4017 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported."); 4018 4019 #endif /* DUMP_SUPPORT */ 4020 break; 4021 } 4022 4023 case FC_PORT_LINK_STATE: 4024 { 4025 uint32_t *link_state; 4026 4027 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4028 "fca_port_manage: FC_PORT_LINK_STATE"); 4029 4030 if (pm->pm_stat_len != sizeof (*link_state)) { 4031 ret = FC_NOMEM; 4032 break; 4033 } 4034 4035 if (pm->pm_cmd_buf != NULL) { 4036 /* 4037 * Can't look beyond the FCA port. 4038 */ 4039 ret = FC_INVALID_REQUEST; 4040 break; 4041 } 4042 4043 link_state = (uint32_t *)pm->pm_stat_buf; 4044 4045 /* Set the state */ 4046 if (hba->state >= FC_LINK_UP) { 4047 /* Check for loop topology */ 4048 if (hba->topology == TOPOLOGY_LOOP) { 4049 *link_state = FC_STATE_LOOP; 4050 } else { 4051 *link_state = FC_STATE_ONLINE; 4052 } 4053 4054 /* Set the link speed */ 4055 switch (hba->linkspeed) { 4056 case LA_2GHZ_LINK: 4057 *link_state |= FC_STATE_2GBIT_SPEED; 4058 break; 4059 case LA_4GHZ_LINK: 4060 *link_state |= FC_STATE_4GBIT_SPEED; 4061 break; 4062 case LA_8GHZ_LINK: 4063 *link_state |= FC_STATE_8GBIT_SPEED; 4064 break; 4065 case LA_10GHZ_LINK: 4066 *link_state |= FC_STATE_10GBIT_SPEED; 4067 break; 4068 case LA_1GHZ_LINK: 4069 default: 4070 *link_state |= FC_STATE_1GBIT_SPEED; 4071 break; 4072 } 4073 } else { 4074 *link_state = FC_STATE_OFFLINE; 4075 } 4076 4077 break; 4078 } 4079 4080 4081 case FC_PORT_ERR_STATS: 4082 case FC_PORT_RLS: 4083 { 4084 MAILBOX *mb; 4085 fc_rls_acc_t *bp; 4086 4087 if (!(hba->flag & FC_ONLINE_MODE)) { 4088 return (FC_OFFLINE); 4089 } 4090 4091 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4092 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS"); 4093 4094 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) { 4095 ret = FC_NOMEM; 4096 break; 4097 } 4098 4099 if ((mb = (MAILBOX *)emlxs_mem_get(hba, 4100 MEM_MBOX | MEM_PRI)) == 0) { 4101 ret = FC_NOMEM; 4102 break; 4103 } 4104 4105 emlxs_mb_read_lnk_stat(hba, mb); 4106 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) 4107 != MBX_SUCCESS) { 4108 ret = FC_PBUSY; 4109 } else { 4110 bp = (fc_rls_acc_t *)pm->pm_data_buf; 4111 4112 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt; 4113 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt; 4114 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt; 4115 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt; 4116 bp->rls_invalid_word = 4117 mb->un.varRdLnk.invalidXmitWord; 4118 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt; 4119 } 4120 4121 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb); 4122 break; 4123 } 4124 4125 case FC_PORT_DOWNLOAD_FW: 4126 if (!(hba->flag & FC_ONLINE_MODE)) { 4127 return (FC_OFFLINE); 4128 } 4129 4130 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4131 "fca_port_manage: FC_PORT_DOWNLOAD_FW"); 4132 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4133 pm->pm_data_len, 1); 4134 break; 4135 4136 case FC_PORT_DOWNLOAD_FCODE: 4137 if (!(hba->flag & FC_ONLINE_MODE)) { 4138 return (FC_OFFLINE); 4139 } 4140 4141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4142 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE"); 4143 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4144 pm->pm_data_len, 1); 4145 break; 4146 4147 case FC_PORT_DIAG: 4148 { 4149 uint32_t errno = 0; 4150 uint32_t did = 0; 4151 uint32_t pattern = 0; 4152 4153 switch (pm->pm_cmd_flags) { 4154 case EMLXS_DIAG_BIU: 4155 4156 if (!(hba->flag & FC_ONLINE_MODE)) { 4157 return (FC_OFFLINE); 4158 } 4159 4160 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4161 "fca_port_manage: EMLXS_DIAG_BIU"); 4162 4163 if (pm->pm_data_len) { 4164 pattern = *((uint32_t *)pm->pm_data_buf); 4165 } 4166 4167 errno = emlxs_diag_biu_run(hba, pattern); 4168 4169 if (pm->pm_stat_len == sizeof (errno)) { 4170 *(int *)pm->pm_stat_buf = errno; 4171 } 4172 4173 break; 4174 4175 4176 case EMLXS_DIAG_POST: 4177 4178 if (!(hba->flag & FC_ONLINE_MODE)) { 4179 return (FC_OFFLINE); 4180 } 4181 4182 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4183 "fca_port_manage: EMLXS_DIAG_POST"); 4184 4185 errno = emlxs_diag_post_run(hba); 4186 4187 if (pm->pm_stat_len == sizeof (errno)) { 4188 *(int *)pm->pm_stat_buf = errno; 4189 } 4190 4191 break; 4192 4193 4194 case EMLXS_DIAG_ECHO: 4195 4196 if (!(hba->flag & FC_ONLINE_MODE)) { 4197 return (FC_OFFLINE); 4198 } 4199 4200 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4201 "fca_port_manage: EMLXS_DIAG_ECHO"); 4202 4203 if (pm->pm_cmd_len != sizeof (uint32_t)) { 4204 ret = FC_INVALID_REQUEST; 4205 break; 4206 } 4207 4208 did = *((uint32_t *)pm->pm_cmd_buf); 4209 4210 if (pm->pm_data_len) { 4211 pattern = *((uint32_t *)pm->pm_data_buf); 4212 } 4213 4214 errno = emlxs_diag_echo_run(port, did, pattern); 4215 4216 if (pm->pm_stat_len == sizeof (errno)) { 4217 *(int *)pm->pm_stat_buf = errno; 4218 } 4219 4220 break; 4221 4222 4223 case EMLXS_PARM_GET_NUM: 4224 { 4225 uint32_t *num; 4226 emlxs_config_t *cfg; 4227 uint32_t i; 4228 uint32_t count; 4229 4230 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4231 "fca_port_manage: EMLXS_PARM_GET_NUM"); 4232 4233 if (pm->pm_stat_len < sizeof (uint32_t)) { 4234 ret = FC_NOMEM; 4235 break; 4236 } 4237 4238 num = (uint32_t *)pm->pm_stat_buf; 4239 count = 0; 4240 cfg = &CFG; 4241 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4242 if (!(cfg->flags & PARM_HIDDEN)) { 4243 count++; 4244 } 4245 4246 } 4247 4248 *num = count; 4249 4250 break; 4251 } 4252 4253 case EMLXS_PARM_GET_LIST: 4254 { 4255 emlxs_parm_t *parm; 4256 emlxs_config_t *cfg; 4257 uint32_t i; 4258 uint32_t max_count; 4259 4260 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4261 "fca_port_manage: EMLXS_PARM_GET_LIST"); 4262 4263 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4264 ret = FC_NOMEM; 4265 break; 4266 } 4267 4268 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t); 4269 4270 parm = (emlxs_parm_t *)pm->pm_stat_buf; 4271 cfg = &CFG; 4272 for (i = 0; i < NUM_CFG_PARAM && max_count; i++, 4273 cfg++) { 4274 if (!(cfg->flags & PARM_HIDDEN)) { 4275 (void) strcpy(parm->label, cfg->string); 4276 parm->min = cfg->low; 4277 parm->max = cfg->hi; 4278 parm->def = cfg->def; 4279 parm->current = cfg->current; 4280 parm->flags = cfg->flags; 4281 (void) strcpy(parm->help, cfg->help); 4282 parm++; 4283 max_count--; 4284 } 4285 } 4286 4287 break; 4288 } 4289 4290 case EMLXS_PARM_GET: 4291 { 4292 emlxs_parm_t *parm_in; 4293 emlxs_parm_t *parm_out; 4294 emlxs_config_t *cfg; 4295 uint32_t i; 4296 uint32_t len; 4297 4298 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4299 EMLXS_MSGF(EMLXS_CONTEXT, 4300 &emlxs_sfs_debug_msg, 4301 "fca_port_manage: EMLXS_PARM_GET. " 4302 "inbuf too small."); 4303 4304 ret = FC_BADCMD; 4305 break; 4306 } 4307 4308 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4309 EMLXS_MSGF(EMLXS_CONTEXT, 4310 &emlxs_sfs_debug_msg, 4311 "fca_port_manage: EMLXS_PARM_GET. " 4312 "outbuf too small"); 4313 4314 ret = FC_BADCMD; 4315 break; 4316 } 4317 4318 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4319 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4320 len = strlen(parm_in->label); 4321 cfg = &CFG; 4322 ret = FC_BADOBJECT; 4323 4324 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4325 "fca_port_manage: EMLXS_PARM_GET: %s", 4326 parm_in->label); 4327 4328 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4329 if (len == strlen(cfg->string) && 4330 (strcmp(parm_in->label, 4331 cfg->string) == 0)) { 4332 (void) strcpy(parm_out->label, 4333 cfg->string); 4334 parm_out->min = cfg->low; 4335 parm_out->max = cfg->hi; 4336 parm_out->def = cfg->def; 4337 parm_out->current = cfg->current; 4338 parm_out->flags = cfg->flags; 4339 (void) strcpy(parm_out->help, 4340 cfg->help); 4341 4342 ret = FC_SUCCESS; 4343 break; 4344 } 4345 } 4346 4347 break; 4348 } 4349 4350 case EMLXS_PARM_SET: 4351 { 4352 emlxs_parm_t *parm_in; 4353 emlxs_parm_t *parm_out; 4354 emlxs_config_t *cfg; 4355 uint32_t i; 4356 uint32_t len; 4357 4358 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4359 EMLXS_MSGF(EMLXS_CONTEXT, 4360 &emlxs_sfs_debug_msg, 4361 "fca_port_manage: EMLXS_PARM_GET. " 4362 "inbuf too small."); 4363 4364 ret = FC_BADCMD; 4365 break; 4366 } 4367 4368 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4369 EMLXS_MSGF(EMLXS_CONTEXT, 4370 &emlxs_sfs_debug_msg, 4371 "fca_port_manage: EMLXS_PARM_GET. " 4372 "outbuf too small"); 4373 ret = FC_BADCMD; 4374 break; 4375 } 4376 4377 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4378 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4379 len = strlen(parm_in->label); 4380 cfg = &CFG; 4381 ret = FC_BADOBJECT; 4382 4383 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4384 "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d", 4385 parm_in->label, parm_in->current, 4386 parm_in->current); 4387 4388 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4389 /* Find matching parameter string */ 4390 if (len == strlen(cfg->string) && 4391 (strcmp(parm_in->label, 4392 cfg->string) == 0)) { 4393 /* Attempt to update parameter */ 4394 if (emlxs_set_parm(hba, i, 4395 parm_in->current) == FC_SUCCESS) { 4396 (void) strcpy(parm_out->label, 4397 cfg->string); 4398 parm_out->min = cfg->low; 4399 parm_out->max = cfg->hi; 4400 parm_out->def = cfg->def; 4401 parm_out->current = 4402 cfg->current; 4403 parm_out->flags = cfg->flags; 4404 (void) strcpy(parm_out->help, 4405 cfg->help); 4406 4407 ret = FC_SUCCESS; 4408 } 4409 4410 break; 4411 } 4412 } 4413 4414 break; 4415 } 4416 4417 case EMLXS_LOG_GET: 4418 { 4419 emlxs_log_req_t *req; 4420 emlxs_log_resp_t *resp; 4421 uint32_t len; 4422 4423 /* Check command size */ 4424 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) { 4425 ret = FC_BADCMD; 4426 break; 4427 } 4428 4429 /* Get the request */ 4430 req = (emlxs_log_req_t *)pm->pm_cmd_buf; 4431 4432 /* Calculate the response length from the request */ 4433 len = sizeof (emlxs_log_resp_t) + 4434 (req->count * MAX_LOG_MSG_LENGTH); 4435 4436 /* Check the response buffer length */ 4437 if (pm->pm_stat_len < len) { 4438 ret = FC_BADCMD; 4439 break; 4440 } 4441 4442 /* Get the response pointer */ 4443 resp = (emlxs_log_resp_t *)pm->pm_stat_buf; 4444 4445 /* Get the request log enties */ 4446 (void) emlxs_msg_log_get(hba, req, resp); 4447 4448 ret = FC_SUCCESS; 4449 break; 4450 } 4451 4452 case EMLXS_GET_BOOT_REV: 4453 { 4454 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4455 "fca_port_manage: EMLXS_GET_BOOT_REV"); 4456 4457 if (pm->pm_stat_len < strlen(vpd->boot_version)) { 4458 ret = FC_NOMEM; 4459 break; 4460 } 4461 4462 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4463 (void) sprintf(pm->pm_stat_buf, "%s %s", 4464 hba->model_info.model, vpd->boot_version); 4465 4466 break; 4467 } 4468 4469 case EMLXS_DOWNLOAD_BOOT: 4470 if (!(hba->flag & FC_ONLINE_MODE)) { 4471 return (FC_OFFLINE); 4472 } 4473 4474 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4475 "fca_port_manage: EMLXS_DOWNLOAD_BOOT"); 4476 4477 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4478 pm->pm_data_len, 1); 4479 break; 4480 4481 case EMLXS_DOWNLOAD_CFL: 4482 { 4483 uint32_t *buffer; 4484 uint32_t region; 4485 uint32_t length; 4486 4487 if (!(hba->flag & FC_ONLINE_MODE)) { 4488 return (FC_OFFLINE); 4489 } 4490 4491 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4492 "fca_port_manage: EMLXS_DOWNLOAD_CFL"); 4493 4494 /* Extract the region number from the first word. */ 4495 buffer = (uint32_t *)pm->pm_data_buf; 4496 region = *buffer++; 4497 4498 /* Adjust the image length for the header word */ 4499 length = pm->pm_data_len - 4; 4500 4501 ret = 4502 emlxs_cfl_download(hba, region, (caddr_t)buffer, 4503 length); 4504 break; 4505 } 4506 4507 case EMLXS_VPD_GET: 4508 { 4509 emlxs_vpd_desc_t *vpd_out; 4510 4511 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4512 "fca_port_manage: EMLXS_VPD_GET"); 4513 4514 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) { 4515 ret = FC_BADCMD; 4516 break; 4517 } 4518 4519 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf; 4520 bzero(vpd_out, sizeof (emlxs_vpd_desc_t)); 4521 4522 (void) strncpy(vpd_out->id, vpd->id, 4523 sizeof (vpd_out->id)); 4524 (void) strncpy(vpd_out->part_num, vpd->part_num, 4525 sizeof (vpd_out->part_num)); 4526 (void) strncpy(vpd_out->eng_change, vpd->eng_change, 4527 sizeof (vpd_out->eng_change)); 4528 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer, 4529 sizeof (vpd_out->manufacturer)); 4530 (void) strncpy(vpd_out->serial_num, vpd->serial_num, 4531 sizeof (vpd_out->serial_num)); 4532 (void) strncpy(vpd_out->model, vpd->model, 4533 sizeof (vpd_out->model)); 4534 (void) strncpy(vpd_out->model_desc, vpd->model_desc, 4535 sizeof (vpd_out->model_desc)); 4536 (void) strncpy(vpd_out->port_num, vpd->port_num, 4537 sizeof (vpd_out->port_num)); 4538 (void) strncpy(vpd_out->prog_types, vpd->prog_types, 4539 sizeof (vpd_out->prog_types)); 4540 4541 ret = FC_SUCCESS; 4542 4543 break; 4544 } 4545 4546 case EMLXS_GET_FCIO_REV: 4547 { 4548 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4549 "fca_port_manage: EMLXS_GET_FCIO_REV"); 4550 4551 if (pm->pm_stat_len < sizeof (uint32_t)) { 4552 ret = FC_NOMEM; 4553 break; 4554 } 4555 4556 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4557 *(uint32_t *)pm->pm_stat_buf = FCIO_REV; 4558 4559 break; 4560 } 4561 4562 case EMLXS_GET_DFC_REV: 4563 { 4564 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4565 "fca_port_manage: EMLXS_GET_DFC_REV"); 4566 4567 if (pm->pm_stat_len < sizeof (uint32_t)) { 4568 ret = FC_NOMEM; 4569 break; 4570 } 4571 4572 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4573 *(uint32_t *)pm->pm_stat_buf = DFC_REV; 4574 4575 break; 4576 } 4577 4578 case EMLXS_SET_BOOT_STATE: 4579 case EMLXS_SET_BOOT_STATE_old: 4580 { 4581 uint32_t state; 4582 4583 if (!(hba->flag & FC_ONLINE_MODE)) { 4584 return (FC_OFFLINE); 4585 } 4586 4587 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4588 EMLXS_MSGF(EMLXS_CONTEXT, 4589 &emlxs_sfs_debug_msg, 4590 "fca_port_manage: EMLXS_SET_BOOT_STATE"); 4591 ret = FC_BADCMD; 4592 break; 4593 } 4594 4595 state = *(uint32_t *)pm->pm_cmd_buf; 4596 4597 if (state == 0) { 4598 EMLXS_MSGF(EMLXS_CONTEXT, 4599 &emlxs_sfs_debug_msg, 4600 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4601 "Disable"); 4602 ret = emlxs_boot_code_disable(hba); 4603 } else { 4604 EMLXS_MSGF(EMLXS_CONTEXT, 4605 &emlxs_sfs_debug_msg, 4606 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4607 "Enable"); 4608 ret = emlxs_boot_code_enable(hba); 4609 } 4610 4611 break; 4612 } 4613 4614 case EMLXS_GET_BOOT_STATE: 4615 case EMLXS_GET_BOOT_STATE_old: 4616 { 4617 if (!(hba->flag & FC_ONLINE_MODE)) { 4618 return (FC_OFFLINE); 4619 } 4620 4621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4622 "fca_port_manage: EMLXS_GET_BOOT_STATE"); 4623 4624 if (pm->pm_stat_len < sizeof (uint32_t)) { 4625 ret = FC_NOMEM; 4626 break; 4627 } 4628 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4629 4630 ret = emlxs_boot_code_state(hba); 4631 4632 if (ret == FC_SUCCESS) { 4633 *(uint32_t *)pm->pm_stat_buf = 1; 4634 ret = FC_SUCCESS; 4635 } else if (ret == FC_FAILURE) { 4636 ret = FC_SUCCESS; 4637 } 4638 4639 break; 4640 } 4641 4642 case EMLXS_HW_ERROR_TEST: 4643 { 4644 if (!(hba->flag & FC_ONLINE_MODE)) { 4645 return (FC_OFFLINE); 4646 } 4647 4648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4649 "fca_port_manage: EMLXS_HW_ERROR_TEST"); 4650 4651 /* Trigger a mailbox timeout */ 4652 hba->mbox_timer = hba->timer_tics; 4653 4654 break; 4655 } 4656 4657 case EMLXS_TEST_CODE: 4658 { 4659 uint32_t *cmd; 4660 4661 if (!(hba->flag & FC_ONLINE_MODE)) { 4662 return (FC_OFFLINE); 4663 } 4664 4665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4666 "fca_port_manage: EMLXS_TEST_CODE"); 4667 4668 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4669 EMLXS_MSGF(EMLXS_CONTEXT, 4670 &emlxs_sfs_debug_msg, 4671 "fca_port_manage: EMLXS_TEST_CODE. " 4672 "inbuf to small."); 4673 4674 ret = FC_BADCMD; 4675 break; 4676 } 4677 4678 cmd = (uint32_t *)pm->pm_cmd_buf; 4679 4680 ret = emlxs_test(hba, cmd[0], 4681 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]); 4682 4683 break; 4684 } 4685 4686 default: 4687 4688 ret = FC_INVALID_REQUEST; 4689 break; 4690 } 4691 4692 break; 4693 4694 } 4695 4696 case FC_PORT_INITIALIZE: 4697 if (!(hba->flag & FC_ONLINE_MODE)) { 4698 return (FC_OFFLINE); 4699 } 4700 4701 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4702 "fca_port_manage: FC_PORT_INITIALIZE"); 4703 break; 4704 4705 case FC_PORT_LOOPBACK: 4706 if (!(hba->flag & FC_ONLINE_MODE)) { 4707 return (FC_OFFLINE); 4708 } 4709 4710 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4711 "fca_port_manage: FC_PORT_LOOPBACK"); 4712 break; 4713 4714 case FC_PORT_BYPASS: 4715 if (!(hba->flag & FC_ONLINE_MODE)) { 4716 return (FC_OFFLINE); 4717 } 4718 4719 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4720 "fca_port_manage: FC_PORT_BYPASS"); 4721 ret = FC_INVALID_REQUEST; 4722 break; 4723 4724 case FC_PORT_UNBYPASS: 4725 if (!(hba->flag & FC_ONLINE_MODE)) { 4726 return (FC_OFFLINE); 4727 } 4728 4729 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4730 "fca_port_manage: FC_PORT_UNBYPASS"); 4731 ret = FC_INVALID_REQUEST; 4732 break; 4733 4734 case FC_PORT_GET_NODE_ID: 4735 { 4736 fc_rnid_t *rnid; 4737 4738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4739 "fca_port_manage: FC_PORT_GET_NODE_ID"); 4740 4741 bzero(pm->pm_data_buf, pm->pm_data_len); 4742 4743 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4744 ret = FC_NOMEM; 4745 break; 4746 } 4747 4748 rnid = (fc_rnid_t *)pm->pm_data_buf; 4749 4750 (void) sprintf((char *)rnid->global_id, 4751 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", 4752 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, 4753 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 4754 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 4755 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 4756 4757 rnid->unit_type = RNID_HBA; 4758 rnid->port_id = port->did; 4759 rnid->ip_version = RNID_IPV4; 4760 4761 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4762 "GET_NODE_ID: wwpn: %s", rnid->global_id); 4763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4764 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 4765 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4766 "GET_NODE_ID: port_id: 0x%x", rnid->port_id); 4767 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4768 "GET_NODE_ID: num_attach: %d", rnid->num_attached); 4769 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4770 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 4771 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4772 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 4773 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4774 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr); 4775 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4776 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 4777 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4778 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 4779 4780 ret = FC_SUCCESS; 4781 break; 4782 } 4783 4784 case FC_PORT_SET_NODE_ID: 4785 { 4786 fc_rnid_t *rnid; 4787 4788 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4789 "fca_port_manage: FC_PORT_SET_NODE_ID"); 4790 4791 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4792 ret = FC_NOMEM; 4793 break; 4794 } 4795 4796 rnid = (fc_rnid_t *)pm->pm_data_buf; 4797 4798 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4799 "SET_NODE_ID: wwpn: %s", rnid->global_id); 4800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4801 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 4802 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4803 "SET_NODE_ID: port_id: 0x%x", rnid->port_id); 4804 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4805 "SET_NODE_ID: num_attach: %d", rnid->num_attached); 4806 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4807 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 4808 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4809 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 4810 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4811 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr); 4812 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4813 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 4814 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4815 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 4816 4817 ret = FC_SUCCESS; 4818 break; 4819 } 4820 4821 default: 4822 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4823 "fca_port_manage: code=%x", pm->pm_cmd_code); 4824 ret = FC_INVALID_REQUEST; 4825 break; 4826 4827 } 4828 4829 return (ret); 4830 4831 } /* emlxs_port_manage() */ 4832 4833 4834 /*ARGSUSED*/ 4835 static uint32_t 4836 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, 4837 uint32_t *arg) 4838 { 4839 uint32_t rval = 0; 4840 emlxs_port_t *port = &PPORT; 4841 4842 switch (test_code) { 4843 #ifdef TEST_SUPPORT 4844 case 1: /* SCSI underrun */ 4845 { 4846 hba->underrun_counter = (args)? arg[0]:1; 4847 break; 4848 } 4849 #endif /* TEST_SUPPORT */ 4850 4851 default: 4852 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4853 "emlxs_test: Unsupported test code. (0x%x)", test_code); 4854 rval = FC_INVALID_REQUEST; 4855 } 4856 4857 return (rval); 4858 4859 } /* emlxs_test() */ 4860 4861 4862 /* 4863 * Given the device number, return the devinfo pointer or the ddiinst number. 4864 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even 4865 * before attach. 4866 * 4867 * Translate "dev_t" to a pointer to the associated "dev_info_t". 4868 */ 4869 /*ARGSUSED*/ 4870 static int 4871 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 4872 { 4873 emlxs_hba_t *hba; 4874 int32_t ddiinst; 4875 4876 ddiinst = getminor((dev_t)arg); 4877 4878 switch (infocmd) { 4879 case DDI_INFO_DEVT2DEVINFO: 4880 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 4881 if (hba) 4882 *result = hba->dip; 4883 else 4884 *result = NULL; 4885 break; 4886 4887 case DDI_INFO_DEVT2INSTANCE: 4888 *result = (void *)((unsigned long)ddiinst); 4889 break; 4890 4891 default: 4892 return (DDI_FAILURE); 4893 } 4894 4895 return (DDI_SUCCESS); 4896 4897 } /* emlxs_info() */ 4898 4899 4900 static int32_t 4901 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level) 4902 { 4903 emlxs_hba_t *hba; 4904 emlxs_port_t *port; 4905 int32_t ddiinst; 4906 int rval = DDI_SUCCESS; 4907 4908 ddiinst = ddi_get_instance(dip); 4909 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 4910 port = &PPORT; 4911 4912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4913 "fca_power: comp=%x level=%x", comp, level); 4914 4915 if (hba == NULL || comp != EMLXS_PM_ADAPTER) { 4916 return (DDI_FAILURE); 4917 } 4918 4919 mutex_enter(&hba->pm_lock); 4920 4921 /* If we are already at the proper level then return success */ 4922 if (hba->pm_level == level) { 4923 mutex_exit(&hba->pm_lock); 4924 return (DDI_SUCCESS); 4925 } 4926 4927 switch (level) { 4928 case EMLXS_PM_ADAPTER_UP: 4929 4930 /* 4931 * If we are already in emlxs_attach, 4932 * let emlxs_hba_attach take care of things 4933 */ 4934 if (hba->pm_state & EMLXS_PM_IN_ATTACH) { 4935 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4936 break; 4937 } 4938 4939 /* Check if adapter is suspended */ 4940 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 4941 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4942 4943 /* Try to resume the port */ 4944 rval = emlxs_hba_resume(dip); 4945 4946 if (rval != DDI_SUCCESS) { 4947 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4948 } 4949 break; 4950 } 4951 4952 /* Set adapter up */ 4953 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4954 break; 4955 4956 case EMLXS_PM_ADAPTER_DOWN: 4957 4958 4959 /* 4960 * If we are already in emlxs_detach, 4961 * let emlxs_hba_detach take care of things 4962 */ 4963 if (hba->pm_state & EMLXS_PM_IN_DETACH) { 4964 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4965 break; 4966 } 4967 4968 /* Check if adapter is not suspended */ 4969 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 4970 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4971 4972 /* Try to suspend the port */ 4973 rval = emlxs_hba_suspend(dip); 4974 4975 if (rval != DDI_SUCCESS) { 4976 hba->pm_level = EMLXS_PM_ADAPTER_UP; 4977 } 4978 4979 break; 4980 } 4981 4982 /* Set adapter down */ 4983 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 4984 break; 4985 4986 default: 4987 rval = DDI_FAILURE; 4988 break; 4989 4990 } 4991 4992 mutex_exit(&hba->pm_lock); 4993 4994 return (rval); 4995 4996 } /* emlxs_power() */ 4997 4998 4999 #ifdef EMLXS_I386 5000 #ifdef S11 5001 /* 5002 * quiesce(9E) entry point. 5003 * 5004 * This function is called when the system is single-thread at hight PIL 5005 * with preemption disabled. Therefore, this function must not be blocked. 5006 * 5007 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 5008 * DDI_FAILURE indicates an eerror condition and should almost never happen. 5009 */ 5010 static int 5011 emlxs_quiesce(dev_info_t *dip) 5012 { 5013 emlxs_hba_t *hba; 5014 emlxs_port_t *port; 5015 int32_t ddiinst; 5016 int rval = DDI_SUCCESS; 5017 5018 ddiinst = ddi_get_instance(dip); 5019 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5020 port = &PPORT; 5021 5022 if (hba == NULL || port == NULL) { 5023 return (DDI_FAILURE); 5024 } 5025 5026 if (emlxs_sli_hba_reset(hba, 0, 0) == 0) { 5027 return (rval); 5028 } else { 5029 return (DDI_FAILURE); 5030 } 5031 5032 } /* emlxs_quiesce */ 5033 #endif 5034 #endif /* EMLXS_I386 */ 5035 5036 5037 static int 5038 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p) 5039 { 5040 emlxs_hba_t *hba; 5041 emlxs_port_t *port; 5042 int ddiinst; 5043 5044 ddiinst = getminor(*dev_p); 5045 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5046 5047 if (hba == NULL) { 5048 return (ENXIO); 5049 } 5050 5051 port = &PPORT; 5052 5053 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5055 "open failed: Driver suspended."); 5056 return (ENXIO); 5057 } 5058 5059 if (otype != OTYP_CHR) { 5060 return (EINVAL); 5061 } 5062 5063 if (drv_priv(cred_p)) { 5064 return (EPERM); 5065 } 5066 5067 mutex_enter(&EMLXS_IOCTL_LOCK); 5068 5069 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) { 5070 mutex_exit(&EMLXS_IOCTL_LOCK); 5071 return (EBUSY); 5072 } 5073 5074 if (flag & FEXCL) { 5075 if (hba->ioctl_flags & EMLXS_OPEN) { 5076 mutex_exit(&EMLXS_IOCTL_LOCK); 5077 return (EBUSY); 5078 } 5079 5080 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE; 5081 } 5082 5083 hba->ioctl_flags |= EMLXS_OPEN; 5084 5085 mutex_exit(&EMLXS_IOCTL_LOCK); 5086 5087 return (0); 5088 5089 } /* emlxs_open() */ 5090 5091 5092 /*ARGSUSED*/ 5093 static int 5094 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p) 5095 { 5096 emlxs_hba_t *hba; 5097 int ddiinst; 5098 5099 ddiinst = getminor(dev); 5100 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5101 5102 if (hba == NULL) { 5103 return (ENXIO); 5104 } 5105 5106 if (otype != OTYP_CHR) { 5107 return (EINVAL); 5108 } 5109 5110 mutex_enter(&EMLXS_IOCTL_LOCK); 5111 5112 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5113 mutex_exit(&EMLXS_IOCTL_LOCK); 5114 return (ENODEV); 5115 } 5116 5117 hba->ioctl_flags &= ~EMLXS_OPEN; 5118 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE; 5119 5120 mutex_exit(&EMLXS_IOCTL_LOCK); 5121 5122 return (0); 5123 5124 } /* emlxs_close() */ 5125 5126 5127 /*ARGSUSED*/ 5128 static int 5129 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode, 5130 cred_t *cred_p, int32_t *rval_p) 5131 { 5132 emlxs_hba_t *hba; 5133 emlxs_port_t *port; 5134 int rval = 0; /* return code */ 5135 int ddiinst; 5136 5137 ddiinst = getminor(dev); 5138 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5139 5140 if (hba == NULL) { 5141 return (ENXIO); 5142 } 5143 5144 port = &PPORT; 5145 5146 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5147 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5148 "ioctl failed: Driver suspended."); 5149 5150 return (ENXIO); 5151 } 5152 5153 mutex_enter(&EMLXS_IOCTL_LOCK); 5154 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5155 mutex_exit(&EMLXS_IOCTL_LOCK); 5156 return (ENXIO); 5157 } 5158 mutex_exit(&EMLXS_IOCTL_LOCK); 5159 5160 #ifdef IDLE_TIMER 5161 emlxs_pm_busy_component(hba); 5162 #endif /* IDLE_TIMER */ 5163 5164 switch (cmd) { 5165 #ifdef DFC_SUPPORT 5166 case EMLXS_DFC_COMMAND: 5167 rval = emlxs_dfc_manage(hba, (void *)arg, mode); 5168 break; 5169 #endif /* DFC_SUPPORT */ 5170 5171 default: 5172 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5173 "ioctl: Invalid command received. cmd=%x", cmd); 5174 rval = EINVAL; 5175 } 5176 5177 done: 5178 return (rval); 5179 5180 } /* emlxs_ioctl() */ 5181 5182 5183 5184 /* 5185 * 5186 * Device Driver Common Routines 5187 * 5188 */ 5189 5190 /* emlxs_pm_lock must be held for this call */ 5191 static int 5192 emlxs_hba_resume(dev_info_t *dip) 5193 { 5194 emlxs_hba_t *hba; 5195 emlxs_port_t *port; 5196 int ddiinst; 5197 5198 ddiinst = ddi_get_instance(dip); 5199 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5200 port = &PPORT; 5201 5202 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL); 5203 5204 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5205 return (DDI_SUCCESS); 5206 } 5207 5208 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5209 5210 /* Take the adapter online */ 5211 if (emlxs_power_up(hba)) { 5212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg, 5213 "Unable to take adapter online."); 5214 5215 hba->pm_state |= EMLXS_PM_SUSPENDED; 5216 5217 return (DDI_FAILURE); 5218 } 5219 5220 return (DDI_SUCCESS); 5221 5222 } /* emlxs_hba_resume() */ 5223 5224 5225 /* emlxs_pm_lock must be held for this call */ 5226 static int 5227 emlxs_hba_suspend(dev_info_t *dip) 5228 { 5229 emlxs_hba_t *hba; 5230 emlxs_port_t *port; 5231 int ddiinst; 5232 5233 ddiinst = ddi_get_instance(dip); 5234 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5235 port = &PPORT; 5236 5237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL); 5238 5239 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5240 return (DDI_SUCCESS); 5241 } 5242 5243 hba->pm_state |= EMLXS_PM_SUSPENDED; 5244 5245 /* Take the adapter offline */ 5246 if (emlxs_power_down(hba)) { 5247 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5248 5249 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg, 5250 "Unable to take adapter offline."); 5251 5252 return (DDI_FAILURE); 5253 } 5254 5255 return (DDI_SUCCESS); 5256 5257 } /* emlxs_hba_suspend() */ 5258 5259 5260 5261 static void 5262 emlxs_lock_init(emlxs_hba_t *hba) 5263 { 5264 emlxs_port_t *port = &PPORT; 5265 int32_t ddiinst; 5266 char buf[64]; 5267 uint32_t i; 5268 5269 ddiinst = hba->ddiinst; 5270 5271 /* Initialize the power management */ 5272 (void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst); 5273 mutex_init(&hba->pm_lock, buf, MUTEX_DRIVER, (void *)hba->intr_arg); 5274 5275 (void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst); 5276 mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, 5277 (void *)hba->intr_arg); 5278 5279 (void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst); 5280 cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL); 5281 5282 (void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst); 5283 mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, 5284 (void *)hba->intr_arg); 5285 5286 (void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst); 5287 mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, 5288 (void *)hba->intr_arg); 5289 5290 (void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst); 5291 cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL); 5292 5293 (void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst); 5294 mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER, 5295 (void *)hba->intr_arg); 5296 5297 (void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst); 5298 cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL); 5299 5300 (void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst); 5301 mutex_init(&EMLXS_RINGTX_LOCK, buf, MUTEX_DRIVER, 5302 (void *)hba->intr_arg); 5303 5304 for (i = 0; i < MAX_RINGS; i++) { 5305 (void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME, 5306 ddiinst, i); 5307 mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER, 5308 (void *)hba->intr_arg); 5309 5310 (void) sprintf(buf, "%s%d_fctab%d_lock mutex", DRIVER_NAME, 5311 ddiinst, i); 5312 mutex_init(&EMLXS_FCTAB_LOCK(i), buf, MUTEX_DRIVER, 5313 (void *)hba->intr_arg); 5314 } 5315 5316 (void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst); 5317 mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER, 5318 (void *)hba->intr_arg); 5319 5320 (void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst); 5321 mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER, 5322 (void *)hba->intr_arg); 5323 5324 (void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst); 5325 mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, 5326 (void *)hba->intr_arg); 5327 5328 #ifdef DUMP_SUPPORT 5329 (void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst); 5330 mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER, 5331 (void *)hba->intr_arg); 5332 #endif /* DUMP_SUPPORT */ 5333 5334 /* Create per port locks */ 5335 for (i = 0; i < MAX_VPORTS; i++) { 5336 port = &VPORT(i); 5337 5338 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL); 5339 5340 if (i == 0) { 5341 (void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME, 5342 ddiinst); 5343 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5344 (void *)hba->intr_arg); 5345 5346 (void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME, 5347 ddiinst); 5348 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5349 5350 (void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME, 5351 ddiinst); 5352 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5353 (void *)hba->intr_arg); 5354 } else { 5355 (void) sprintf(buf, "%s%d.%d_pkt_lock mutex", 5356 DRIVER_NAME, ddiinst, port->vpi); 5357 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5358 (void *)hba->intr_arg); 5359 5360 (void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME, 5361 ddiinst, port->vpi); 5362 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5363 5364 (void) sprintf(buf, "%s%d.%d_ub_lock mutex", 5365 DRIVER_NAME, ddiinst, port->vpi); 5366 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5367 (void *)hba->intr_arg); 5368 } 5369 } 5370 5371 return; 5372 5373 } /* emlxs_lock_init() */ 5374 5375 5376 5377 static void 5378 emlxs_lock_destroy(emlxs_hba_t *hba) 5379 { 5380 emlxs_port_t *port = &PPORT; 5381 uint32_t i; 5382 5383 mutex_destroy(&EMLXS_TIMER_LOCK); 5384 cv_destroy(&hba->timer_lock_cv); 5385 5386 mutex_destroy(&EMLXS_PORT_LOCK); 5387 5388 cv_destroy(&EMLXS_MBOX_CV); 5389 cv_destroy(&EMLXS_LINKUP_CV); 5390 5391 mutex_destroy(&EMLXS_LINKUP_LOCK); 5392 mutex_destroy(&EMLXS_MBOX_LOCK); 5393 5394 mutex_destroy(&EMLXS_RINGTX_LOCK); 5395 5396 for (i = 0; i < MAX_RINGS; i++) { 5397 mutex_destroy(&EMLXS_CMD_RING_LOCK(i)); 5398 mutex_destroy(&EMLXS_FCTAB_LOCK(i)); 5399 } 5400 5401 mutex_destroy(&EMLXS_MEMGET_LOCK); 5402 mutex_destroy(&EMLXS_MEMPUT_LOCK); 5403 mutex_destroy(&EMLXS_IOCTL_LOCK); 5404 mutex_destroy(&hba->pm_lock); 5405 5406 #ifdef DUMP_SUPPORT 5407 mutex_destroy(&EMLXS_DUMP_LOCK); 5408 #endif /* DUMP_SUPPORT */ 5409 5410 /* Destroy per port locks */ 5411 for (i = 0; i < MAX_VPORTS; i++) { 5412 port = &VPORT(i); 5413 rw_destroy(&port->node_rwlock); 5414 mutex_destroy(&EMLXS_PKT_LOCK); 5415 cv_destroy(&EMLXS_PKT_CV); 5416 mutex_destroy(&EMLXS_UB_LOCK); 5417 } 5418 5419 return; 5420 5421 } /* emlxs_lock_destroy() */ 5422 5423 5424 /* init_flag values */ 5425 #define ATTACH_SOFT_STATE 0x00000001 5426 #define ATTACH_FCA_TRAN 0x00000002 5427 #define ATTACH_HBA 0x00000004 5428 #define ATTACH_LOG 0x00000008 5429 #define ATTACH_MAP_BUS 0x00000010 5430 #define ATTACH_INTR_INIT 0x00000020 5431 #define ATTACH_PROP 0x00000040 5432 #define ATTACH_LOCK 0x00000080 5433 #define ATTACH_THREAD 0x00000100 5434 #define ATTACH_INTR_ADD 0x00000200 5435 #define ATTACH_ONLINE 0x00000400 5436 #define ATTACH_NODE 0x00000800 5437 #define ATTACH_FCT 0x00001000 5438 #define ATTACH_FCA 0x00002000 5439 #define ATTACH_KSTAT 0x00004000 5440 #define ATTACH_DHCHAP 0x00008000 5441 #define ATTACH_FM 0x00010000 5442 #define ATTACH_MAP_SLI 0x00020000 5443 #define ATTACH_SPAWN 0x00040000 5444 5445 static void 5446 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed) 5447 { 5448 emlxs_hba_t *hba = NULL; 5449 int ddiinst; 5450 5451 ddiinst = ddi_get_instance(dip); 5452 5453 if (init_flag & ATTACH_HBA) { 5454 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5455 5456 if (init_flag & ATTACH_SPAWN) { 5457 emlxs_thread_spawn_destroy(hba); 5458 } 5459 5460 if (init_flag & ATTACH_ONLINE) { 5461 (void) emlxs_offline(hba); 5462 } 5463 5464 if (init_flag & ATTACH_INTR_ADD) { 5465 (void) EMLXS_INTR_REMOVE(hba); 5466 } 5467 #ifdef SFCT_SUPPORT 5468 if (init_flag & ATTACH_FCT) { 5469 emlxs_fct_detach(hba); 5470 } 5471 #endif /* SFCT_SUPPORT */ 5472 5473 #ifdef DHCHAP_SUPPORT 5474 if (init_flag & ATTACH_DHCHAP) { 5475 emlxs_dhc_detach(hba); 5476 } 5477 #endif /* DHCHAP_SUPPORT */ 5478 5479 if (init_flag & ATTACH_KSTAT) { 5480 kstat_delete(hba->kstat); 5481 } 5482 5483 if (init_flag & ATTACH_FCA) { 5484 emlxs_fca_detach(hba); 5485 } 5486 5487 if (init_flag & ATTACH_NODE) { 5488 (void) ddi_remove_minor_node(hba->dip, "devctl"); 5489 } 5490 5491 if (init_flag & ATTACH_THREAD) { 5492 emlxs_thread_destroy(&hba->iodone_thread); 5493 } 5494 5495 if (init_flag & ATTACH_PROP) { 5496 (void) ddi_prop_remove_all(hba->dip); 5497 } 5498 5499 if (init_flag & ATTACH_LOCK) { 5500 emlxs_lock_destroy(hba); 5501 } 5502 5503 if (init_flag & ATTACH_INTR_INIT) { 5504 (void) EMLXS_INTR_UNINIT(hba); 5505 } 5506 5507 if (init_flag & ATTACH_MAP_BUS) { 5508 emlxs_unmap_bus(hba); 5509 } 5510 5511 if (init_flag & ATTACH_MAP_SLI) { 5512 emlxs_sli_unmap_hdw(hba); 5513 } 5514 5515 #ifdef FMA_SUPPORT 5516 if (init_flag & ATTACH_FM) { 5517 emlxs_fm_fini(hba); 5518 } 5519 #endif /* FMA_SUPPORT */ 5520 5521 if (init_flag & ATTACH_LOG) { 5522 (void) emlxs_msg_log_destroy(hba); 5523 } 5524 5525 if (init_flag & ATTACH_FCA_TRAN) { 5526 (void) ddi_set_driver_private(hba->dip, NULL); 5527 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t)); 5528 hba->fca_tran = NULL; 5529 } 5530 5531 if (init_flag & ATTACH_HBA) { 5532 emlxs_device.log[hba->emlxinst] = 0; 5533 emlxs_device.hba[hba->emlxinst] = 5534 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0)); 5535 5536 #ifdef DUMP_SUPPORT 5537 emlxs_device.dump_txtfile[hba->emlxinst] = 0; 5538 emlxs_device.dump_dmpfile[hba->emlxinst] = 0; 5539 emlxs_device.dump_ceefile[hba->emlxinst] = 0; 5540 #endif /* DUMP_SUPPORT */ 5541 5542 } 5543 } 5544 5545 if (init_flag & ATTACH_SOFT_STATE) { 5546 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst); 5547 } 5548 5549 return; 5550 5551 } /* emlxs_driver_remove() */ 5552 5553 5554 5555 /* This determines which ports will be initiator mode */ 5556 static void 5557 emlxs_fca_init(emlxs_hba_t *hba) 5558 { 5559 emlxs_port_t *port = &PPORT; 5560 emlxs_port_t *vport; 5561 uint32_t i; 5562 5563 if (!hba->ini_mode) { 5564 return; 5565 } 5566 #ifdef MODSYM_SUPPORT 5567 /* Open SFS */ 5568 (void) emlxs_fca_modopen(); 5569 #endif /* MODSYM_SUPPORT */ 5570 5571 /* Check if SFS present */ 5572 if (((void *)MODSYM(fc_fca_init) == NULL) || 5573 ((void *)MODSYM(fc_fca_attach) == NULL)) { 5574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5575 "SFS not present. Initiator mode disabled."); 5576 goto failed; 5577 } 5578 5579 /* Setup devops for SFS */ 5580 MODSYM(fc_fca_init)(&emlxs_ops); 5581 5582 /* Check if our SFS driver interface matches the current SFS stack */ 5583 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) { 5584 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5585 "SFS/FCA version mismatch. FCA=0x%x", 5586 hba->fca_tran->fca_version); 5587 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5588 "SFS present. Initiator mode disabled."); 5589 5590 goto failed; 5591 } 5592 5593 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5594 "SFS present. Initiator mode enabled."); 5595 5596 return; 5597 5598 failed: 5599 5600 hba->ini_mode = 0; 5601 for (i = 0; i < MAX_VPORTS; i++) { 5602 vport = &VPORT(i); 5603 vport->ini_mode = 0; 5604 } 5605 5606 return; 5607 5608 } /* emlxs_fca_init() */ 5609 5610 5611 /* This determines which ports will be initiator or target mode */ 5612 static void 5613 emlxs_set_mode(emlxs_hba_t *hba) 5614 { 5615 emlxs_port_t *port = &PPORT; 5616 emlxs_port_t *vport; 5617 uint32_t i; 5618 uint32_t tgt_mode = 0; 5619 5620 #ifdef SFCT_SUPPORT 5621 emlxs_config_t *cfg; 5622 5623 cfg = &hba->config[CFG_TARGET_MODE]; 5624 tgt_mode = cfg->current; 5625 5626 port->fct_flags = 0; 5627 #endif /* SFCT_SUPPORT */ 5628 5629 /* Initialize physical port */ 5630 if (tgt_mode) { 5631 hba->tgt_mode = 1; 5632 hba->ini_mode = 0; 5633 5634 port->tgt_mode = 1; 5635 port->ini_mode = 0; 5636 } else { 5637 hba->tgt_mode = 0; 5638 hba->ini_mode = 1; 5639 5640 port->tgt_mode = 0; 5641 port->ini_mode = 1; 5642 } 5643 5644 /* Initialize virtual ports */ 5645 /* Virtual ports take on the mode of the parent physical port */ 5646 for (i = 1; i < MAX_VPORTS; i++) { 5647 vport = &VPORT(i); 5648 5649 #ifdef SFCT_SUPPORT 5650 vport->fct_flags = 0; 5651 #endif /* SFCT_SUPPORT */ 5652 5653 vport->ini_mode = port->ini_mode; 5654 vport->tgt_mode = port->tgt_mode; 5655 } 5656 5657 /* Check if initiator mode is requested */ 5658 if (hba->ini_mode) { 5659 emlxs_fca_init(hba); 5660 } else { 5661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5662 "Initiator mode not enabled."); 5663 } 5664 5665 #ifdef SFCT_SUPPORT 5666 /* Check if target mode is requested */ 5667 if (hba->tgt_mode) { 5668 emlxs_fct_init(hba); 5669 } else { 5670 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5671 "Target mode not enabled."); 5672 } 5673 #endif /* SFCT_SUPPORT */ 5674 5675 return; 5676 5677 } /* emlxs_set_mode() */ 5678 5679 5680 5681 static void 5682 emlxs_fca_attach(emlxs_hba_t *hba) 5683 { 5684 /* Update our transport structure */ 5685 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg; 5686 hba->fca_tran->fca_cmd_max = hba->io_throttle; 5687 5688 #if (EMLXS_MODREV >= EMLXS_MODREV5) 5689 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn, 5690 sizeof (NAME_TYPE)); 5691 #endif /* >= EMLXS_MODREV5 */ 5692 5693 return; 5694 5695 } /* emlxs_fca_attach() */ 5696 5697 5698 static void 5699 emlxs_fca_detach(emlxs_hba_t *hba) 5700 { 5701 uint32_t i; 5702 emlxs_port_t *vport; 5703 5704 if (hba->ini_mode) { 5705 if ((void *)MODSYM(fc_fca_detach) != NULL) { 5706 MODSYM(fc_fca_detach)(hba->dip); 5707 } 5708 5709 hba->ini_mode = 0; 5710 5711 for (i = 0; i < MAX_VPORTS; i++) { 5712 vport = &VPORT(i); 5713 vport->ini_mode = 0; 5714 } 5715 } 5716 5717 return; 5718 5719 } /* emlxs_fca_detach() */ 5720 5721 5722 5723 static void 5724 emlxs_drv_banner(emlxs_hba_t *hba) 5725 { 5726 emlxs_port_t *port = &PPORT; 5727 uint32_t i; 5728 char msi_mode[16]; 5729 char npiv_mode[16]; 5730 emlxs_vpd_t *vpd = &VPD; 5731 emlxs_config_t *cfg = &CFG; 5732 uint8_t *wwpn; 5733 uint8_t *wwnn; 5734 5735 /* Display firmware library one time */ 5736 if (emlxs_instance_count == 1) { 5737 emlxs_fw_show(hba); 5738 } 5739 5740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label, 5741 emlxs_revision); 5742 5743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5744 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model, 5745 hba->model_info.device_id, hba->model_info.ssdid, 5746 hba->model_info.id); 5747 5748 #ifdef EMLXS_I386 5749 5750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5751 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label, 5752 vpd->boot_version); 5753 5754 #else /* EMLXS_SPARC */ 5755 5756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5757 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version, 5758 vpd->fw_label, vpd->boot_version, vpd->fcode_version); 5759 5760 #endif /* EMLXS_I386 */ 5761 5762 (void) strcpy(msi_mode, " INTX:1"); 5763 5764 #ifdef MSI_SUPPORT 5765 if (hba->intr_flags & EMLXS_MSI_ENABLED) { 5766 switch (hba->intr_type) { 5767 case DDI_INTR_TYPE_FIXED: 5768 (void) strcpy(msi_mode, " MSI:0"); 5769 break; 5770 5771 case DDI_INTR_TYPE_MSI: 5772 (void) sprintf(msi_mode, " MSI:%d", hba->intr_count); 5773 break; 5774 5775 case DDI_INTR_TYPE_MSIX: 5776 (void) sprintf(msi_mode, " MSIX:%d", hba->intr_count); 5777 break; 5778 } 5779 } 5780 #endif 5781 5782 (void) strcpy(npiv_mode, ""); 5783 5784 #ifdef SLI3_SUPPORT 5785 if (hba->flag & FC_NPIV_ENABLED) { 5786 (void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max); 5787 } else { 5788 (void) strcpy(npiv_mode, " NPIV:0"); 5789 } 5790 #endif /* SLI3_SUPPORT */ 5791 5792 5793 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "SLI:%d%s%s%s%s", 5794 hba->sli_mode, msi_mode, npiv_mode, 5795 ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":"")); 5796 5797 wwpn = (uint8_t *)&hba->wwpn; 5798 wwnn = (uint8_t *)&hba->wwnn; 5799 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5800 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 5801 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 5802 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], 5803 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5], 5804 wwnn[6], wwnn[7]); 5805 5806 #ifdef SLI3_SUPPORT 5807 for (i = 0; i < MAX_VPORTS; i++) { 5808 port = &VPORT(i); 5809 5810 if (!(port->flag & EMLXS_PORT_CONFIG)) { 5811 continue; 5812 } 5813 5814 wwpn = (uint8_t *)&port->wwpn; 5815 wwnn = (uint8_t *)&port->wwnn; 5816 5817 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 5818 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 5819 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 5820 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], 5821 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], 5822 wwnn[4], wwnn[5], wwnn[6], wwnn[7]); 5823 } 5824 port = &PPORT; 5825 5826 #ifdef NPIV_SUPPORT 5827 /* 5828 * No dependency for Restricted login parameter. 5829 */ 5830 if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) { 5831 port->flag |= EMLXS_PORT_RESTRICTED; 5832 } else { 5833 port->flag &= ~EMLXS_PORT_RESTRICTED; 5834 } 5835 #endif /* NPIV_SUPPORT */ 5836 5837 #endif /* SLI3_SUPPORT */ 5838 5839 /* 5840 * Announce the device: ddi_report_dev() prints a banner at boot time, 5841 * announcing the device pointed to by dip. 5842 */ 5843 (void) ddi_report_dev(hba->dip); 5844 5845 return; 5846 5847 } /* emlxs_drv_banner() */ 5848 5849 5850 extern void 5851 emlxs_get_fcode_version(emlxs_hba_t *hba) 5852 { 5853 emlxs_vpd_t *vpd = &VPD; 5854 char *prop_str; 5855 int status; 5856 5857 /* Setup fcode version property */ 5858 prop_str = NULL; 5859 status = 5860 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0, 5861 "fcode-version", (char **)&prop_str); 5862 5863 if (status == DDI_PROP_SUCCESS) { 5864 bcopy(prop_str, vpd->fcode_version, strlen(prop_str)); 5865 (void) ddi_prop_free((void *)prop_str); 5866 } else { 5867 (void) strcpy(vpd->fcode_version, "none"); 5868 } 5869 5870 return; 5871 5872 } /* emlxs_get_fcode_version() */ 5873 5874 5875 static int 5876 emlxs_hba_attach(dev_info_t *dip) 5877 { 5878 emlxs_hba_t *hba; 5879 emlxs_port_t *port; 5880 emlxs_config_t *cfg; 5881 char *prop_str; 5882 int ddiinst; 5883 int32_t emlxinst; 5884 int status; 5885 uint32_t rval; 5886 uint32_t init_flag = 0; 5887 char local_pm_components[32]; 5888 #ifdef EMLXS_I386 5889 uint32_t i; 5890 #endif /* EMLXS_I386 */ 5891 5892 ddiinst = ddi_get_instance(dip); 5893 emlxinst = emlxs_add_instance(ddiinst); 5894 5895 if (emlxinst >= MAX_FC_BRDS) { 5896 cmn_err(CE_WARN, 5897 "?%s: fca_hba_attach failed. Too many driver ddiinsts. " 5898 "inst=%x", DRIVER_NAME, ddiinst); 5899 return (DDI_FAILURE); 5900 } 5901 5902 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) { 5903 return (DDI_FAILURE); 5904 } 5905 5906 if (emlxs_device.hba[emlxinst]) { 5907 return (DDI_SUCCESS); 5908 } 5909 5910 /* An adapter can accidentally be plugged into a slave-only PCI slot */ 5911 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 5912 cmn_err(CE_WARN, 5913 "?%s%d: fca_hba_attach failed. Device in slave-only slot.", 5914 DRIVER_NAME, ddiinst); 5915 return (DDI_FAILURE); 5916 } 5917 5918 /* Allocate emlxs_dev_ctl structure. */ 5919 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) { 5920 cmn_err(CE_WARN, 5921 "?%s%d: fca_hba_attach failed. Unable to allocate soft " 5922 "state.", DRIVER_NAME, ddiinst); 5923 return (DDI_FAILURE); 5924 } 5925 init_flag |= ATTACH_SOFT_STATE; 5926 5927 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state, 5928 ddiinst)) == NULL) { 5929 cmn_err(CE_WARN, 5930 "?%s%d: fca_hba_attach failed. Unable to get soft state.", 5931 DRIVER_NAME, ddiinst); 5932 goto failed; 5933 } 5934 bzero((char *)hba, sizeof (emlxs_hba_t)); 5935 5936 emlxs_device.hba[emlxinst] = hba; 5937 emlxs_device.log[emlxinst] = &hba->log; 5938 5939 #ifdef DUMP_SUPPORT 5940 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile; 5941 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile; 5942 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile; 5943 #endif /* DUMP_SUPPORT */ 5944 5945 hba->dip = dip; 5946 hba->emlxinst = emlxinst; 5947 hba->ddiinst = ddiinst; 5948 hba->ini_mode = 0; 5949 hba->tgt_mode = 0; 5950 hba->mem_bpl_size = MEM_BPL_SIZE; 5951 5952 init_flag |= ATTACH_HBA; 5953 5954 /* Enable the physical port on this HBA */ 5955 port = &PPORT; 5956 port->hba = hba; 5957 port->vpi = 0; 5958 port->flag |= EMLXS_PORT_ENABLE; 5959 5960 /* Allocate a transport structure */ 5961 hba->fca_tran = 5962 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP); 5963 if (hba->fca_tran == NULL) { 5964 cmn_err(CE_WARN, 5965 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran " 5966 "memory.", DRIVER_NAME, ddiinst); 5967 goto failed; 5968 } 5969 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran, 5970 sizeof (fc_fca_tran_t)); 5971 5972 /* Set the transport structure pointer in our dip */ 5973 /* SFS may panic if we are in target only mode */ 5974 /* We will update the transport structure later */ 5975 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran); 5976 init_flag |= ATTACH_FCA_TRAN; 5977 5978 /* Perform driver integrity check */ 5979 rval = emlxs_integrity_check(hba); 5980 if (rval) { 5981 cmn_err(CE_WARN, 5982 "?%s%d: fca_hba_attach failed. Driver integrity check " 5983 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval); 5984 goto failed; 5985 } 5986 5987 cfg = &CFG; 5988 5989 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg)); 5990 #ifdef MSI_SUPPORT 5991 if ((void *)&ddi_intr_get_supported_types != NULL) { 5992 hba->intr_flags |= EMLXS_MSI_ENABLED; 5993 } 5994 #endif /* MSI_SUPPORT */ 5995 5996 5997 /* Create the msg log file */ 5998 if (emlxs_msg_log_create(hba) == 0) { 5999 cmn_err(CE_WARN, 6000 "?%s%d: fca_hba_attach failed. Unable to create message " 6001 "log", DRIVER_NAME, ddiinst); 6002 goto failed; 6003 6004 } 6005 init_flag |= ATTACH_LOG; 6006 6007 /* We can begin to use EMLXS_MSGF from this point on */ 6008 6009 /* 6010 * Find the I/O bus type If it is not a SBUS card, 6011 * then it is a PCI card. Default is PCI_FC (0). 6012 */ 6013 prop_str = NULL; 6014 status = ddi_prop_lookup_string(DDI_DEV_T_ANY, 6015 (dev_info_t *)dip, 0, "name", (char **)&prop_str); 6016 6017 if (status == DDI_PROP_SUCCESS) { 6018 if (strncmp(prop_str, "lpfs", 4) == 0) { 6019 hba->bus_type = SBUS_FC; 6020 } 6021 6022 (void) ddi_prop_free((void *)prop_str); 6023 } 6024 #ifdef EMLXS_I386 6025 /* Update BPL size based on max_xfer_size */ 6026 i = cfg[CFG_MAX_XFER_SIZE].current; 6027 if (i > 688128) { 6028 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6029 hba->mem_bpl_size = 4096; 6030 } else if (i > 339968) { 6031 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6032 hba->mem_bpl_size = 2048; 6033 } else { 6034 hba->mem_bpl_size = 1024; 6035 } 6036 6037 /* Update dma_attr_sgllen based on BPL size */ 6038 i = BPL_TO_SGLLEN(hba->mem_bpl_size); 6039 emlxs_dma_attr.dma_attr_sgllen = i; 6040 emlxs_dma_attr_ro.dma_attr_sgllen = i; 6041 emlxs_dma_attr_fcip_rsp.dma_attr_sgllen = i; 6042 #endif /* EMLXS_I386 */ 6043 6044 /* 6045 * Copy DDS from the config method and update configuration parameters 6046 */ 6047 (void) emlxs_get_props(hba); 6048 6049 #ifdef FMA_SUPPORT 6050 hba->fm_caps = cfg[CFG_FM_CAPS].current; 6051 6052 emlxs_fm_init(hba); 6053 6054 init_flag |= ATTACH_FM; 6055 #endif /* FMA_SUPPORT */ 6056 6057 if (emlxs_map_bus(hba)) { 6058 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6059 "Unable to map memory"); 6060 goto failed; 6061 6062 } 6063 init_flag |= ATTACH_MAP_BUS; 6064 6065 /* Attempt to identify the adapter */ 6066 rval = emlxs_init_adapter_info(hba); 6067 6068 if (rval == 0) { 6069 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6070 "Unable to get adapter info. Id:%d Device id:0x%x " 6071 "Model:%s", hba->model_info.id, 6072 hba->model_info.device_id, hba->model_info.model); 6073 goto failed; 6074 } 6075 6076 /* Check if adapter is not supported */ 6077 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) { 6078 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6079 "Unsupported adapter found. Id:%d Device id:0x%x " 6080 "SSDID:0x%x Model:%s", hba->model_info.id, 6081 hba->model_info.device_id, 6082 hba->model_info.ssdid, hba->model_info.model); 6083 goto failed; 6084 } 6085 6086 if (emlxs_sli_map_hdw(hba)) { 6087 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6088 "Unable to map memory"); 6089 goto failed; 6090 6091 } 6092 init_flag |= ATTACH_MAP_SLI; 6093 6094 /* Initialize the interrupts. But don't add them yet */ 6095 status = EMLXS_INTR_INIT(hba, 0); 6096 if (status != DDI_SUCCESS) { 6097 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6098 "Unable to initalize interrupt(s)."); 6099 goto failed; 6100 6101 } 6102 init_flag |= ATTACH_INTR_INIT; 6103 6104 /* Initialize LOCKs */ 6105 emlxs_lock_init(hba); 6106 init_flag |= ATTACH_LOCK; 6107 6108 /* Initialize the power management */ 6109 mutex_enter(&hba->pm_lock); 6110 hba->pm_state = EMLXS_PM_IN_ATTACH; 6111 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 6112 hba->pm_busy = 0; 6113 #ifdef IDLE_TIMER 6114 hba->pm_active = 1; 6115 hba->pm_idle_timer = 0; 6116 #endif /* IDLE_TIMER */ 6117 mutex_exit(&hba->pm_lock); 6118 6119 /* Set the pm component name */ 6120 (void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME, 6121 ddiinst); 6122 emlxs_pm_components[0] = local_pm_components; 6123 6124 /* Check if power management support is enabled */ 6125 if (cfg[CFG_PM_SUPPORT].current) { 6126 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 6127 "pm-components", emlxs_pm_components, 6128 sizeof (emlxs_pm_components) / 6129 sizeof (emlxs_pm_components[0])) != 6130 DDI_PROP_SUCCESS) { 6131 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6132 "Unable to create pm components."); 6133 goto failed; 6134 } 6135 } 6136 6137 /* Needed for suspend and resume support */ 6138 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state", 6139 "needs-suspend-resume"); 6140 init_flag |= ATTACH_PROP; 6141 6142 emlxs_thread_spawn_create(hba); 6143 init_flag |= ATTACH_SPAWN; 6144 6145 emlxs_thread_create(hba, &hba->iodone_thread); 6146 init_flag |= ATTACH_THREAD; 6147 6148 /* Setup initiator / target ports */ 6149 emlxs_set_mode(hba); 6150 6151 /* If driver did not attach to either stack, */ 6152 /* then driver attach failed */ 6153 if (!hba->tgt_mode && !hba->ini_mode) { 6154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6155 "Driver interfaces not enabled."); 6156 goto failed; 6157 } 6158 6159 /* 6160 * Initialize HBA 6161 */ 6162 6163 /* Set initial state */ 6164 mutex_enter(&EMLXS_PORT_LOCK); 6165 emlxs_diag_state = DDI_OFFDI; 6166 hba->flag |= FC_OFFLINE_MODE; 6167 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE); 6168 mutex_exit(&EMLXS_PORT_LOCK); 6169 6170 if (status = emlxs_online(hba)) { 6171 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6172 "Unable to initialize adapter."); 6173 goto failed; 6174 } 6175 init_flag |= ATTACH_ONLINE; 6176 6177 /* This is to ensure that the model property is properly set */ 6178 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 6179 hba->model_info.model); 6180 6181 /* Create the device node. */ 6182 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) == 6183 DDI_FAILURE) { 6184 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6185 "Unable to create device node."); 6186 goto failed; 6187 } 6188 init_flag |= ATTACH_NODE; 6189 6190 /* Attach initiator now */ 6191 /* This must come after emlxs_online() */ 6192 emlxs_fca_attach(hba); 6193 init_flag |= ATTACH_FCA; 6194 6195 /* Initialize kstat information */ 6196 hba->kstat = kstat_create(DRIVER_NAME, 6197 ddiinst, "statistics", "controller", 6198 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t), 6199 KSTAT_FLAG_VIRTUAL); 6200 6201 if (hba->kstat == NULL) { 6202 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6203 "kstat_create failed."); 6204 } else { 6205 hba->kstat->ks_data = (void *)&hba->stats; 6206 kstat_install(hba->kstat); 6207 init_flag |= ATTACH_KSTAT; 6208 } 6209 6210 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 6211 /* Setup virtual port properties */ 6212 emlxs_read_vport_prop(hba); 6213 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 6214 6215 6216 #ifdef DHCHAP_SUPPORT 6217 emlxs_dhc_attach(hba); 6218 init_flag |= ATTACH_DHCHAP; 6219 #endif /* DHCHAP_SUPPORT */ 6220 6221 /* Display the driver banner now */ 6222 emlxs_drv_banner(hba); 6223 6224 /* Raise the power level */ 6225 6226 /* 6227 * This will not execute emlxs_hba_resume because 6228 * EMLXS_PM_IN_ATTACH is set 6229 */ 6230 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) { 6231 /* Set power up anyway. This should not happen! */ 6232 mutex_enter(&hba->pm_lock); 6233 hba->pm_level = EMLXS_PM_ADAPTER_UP; 6234 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6235 mutex_exit(&hba->pm_lock); 6236 } else { 6237 mutex_enter(&hba->pm_lock); 6238 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6239 mutex_exit(&hba->pm_lock); 6240 } 6241 6242 #ifdef SFCT_SUPPORT 6243 /* Do this last */ 6244 emlxs_fct_attach(hba); 6245 init_flag |= ATTACH_FCT; 6246 #endif /* SFCT_SUPPORT */ 6247 6248 return (DDI_SUCCESS); 6249 6250 failed: 6251 6252 emlxs_driver_remove(dip, init_flag, 1); 6253 6254 return (DDI_FAILURE); 6255 6256 } /* emlxs_hba_attach() */ 6257 6258 6259 static int 6260 emlxs_hba_detach(dev_info_t *dip) 6261 { 6262 emlxs_hba_t *hba; 6263 emlxs_port_t *port; 6264 int ddiinst; 6265 uint32_t init_flag = (uint32_t)-1; 6266 6267 ddiinst = ddi_get_instance(dip); 6268 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6269 port = &PPORT; 6270 6271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL); 6272 6273 mutex_enter(&hba->pm_lock); 6274 hba->pm_state |= EMLXS_PM_IN_DETACH; 6275 mutex_exit(&hba->pm_lock); 6276 6277 /* Lower the power level */ 6278 /* 6279 * This will not suspend the driver since the 6280 * EMLXS_PM_IN_DETACH has been set 6281 */ 6282 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) { 6283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6284 "Unable to lower power."); 6285 6286 mutex_enter(&hba->pm_lock); 6287 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6288 mutex_exit(&hba->pm_lock); 6289 6290 return (DDI_FAILURE); 6291 } 6292 6293 /* Take the adapter offline first, if not already */ 6294 if (emlxs_offline(hba) != 0) { 6295 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6296 "Unable to take adapter offline."); 6297 6298 mutex_enter(&hba->pm_lock); 6299 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6300 mutex_exit(&hba->pm_lock); 6301 6302 (void) emlxs_pm_raise_power(dip); 6303 6304 return (DDI_FAILURE); 6305 } 6306 init_flag &= ~ATTACH_ONLINE; 6307 6308 /* Remove the driver instance */ 6309 emlxs_driver_remove(dip, init_flag, 0); 6310 6311 return (DDI_SUCCESS); 6312 6313 } /* emlxs_hba_detach() */ 6314 6315 6316 extern int 6317 emlxs_map_bus(emlxs_hba_t *hba) 6318 { 6319 emlxs_port_t *port = &PPORT; 6320 dev_info_t *dip; 6321 ddi_device_acc_attr_t dev_attr; 6322 int status; 6323 6324 dip = (dev_info_t *)hba->dip; 6325 dev_attr = emlxs_dev_acc_attr; 6326 6327 if (hba->bus_type == SBUS_FC) { 6328 if (hba->pci_acc_handle == 0) { 6329 status = ddi_regs_map_setup(dip, 6330 SBUS_DFLY_PCI_CFG_RINDEX, 6331 (caddr_t *)&hba->pci_addr, 6332 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6333 if (status != DDI_SUCCESS) { 6334 EMLXS_MSGF(EMLXS_CONTEXT, 6335 &emlxs_attach_failed_msg, 6336 "(SBUS) ddi_regs_map_setup PCI failed. " 6337 "status=%x", status); 6338 goto failed; 6339 } 6340 } 6341 6342 if (hba->sbus_pci_handle == 0) { 6343 status = ddi_regs_map_setup(dip, 6344 SBUS_TITAN_PCI_CFG_RINDEX, 6345 (caddr_t *)&hba->sbus_pci_addr, 6346 0, 0, &dev_attr, &hba->sbus_pci_handle); 6347 if (status != DDI_SUCCESS) { 6348 EMLXS_MSGF(EMLXS_CONTEXT, 6349 &emlxs_attach_failed_msg, 6350 "(SBUS) ddi_regs_map_setup TITAN PCI " 6351 "failed. status=%x", status); 6352 goto failed; 6353 } 6354 } 6355 6356 } else { /* ****** PCI ****** */ 6357 6358 if (hba->pci_acc_handle == 0) { 6359 status = ddi_regs_map_setup(dip, 6360 PCI_CFG_RINDEX, 6361 (caddr_t *)&hba->pci_addr, 6362 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6363 if (status != DDI_SUCCESS) { 6364 EMLXS_MSGF(EMLXS_CONTEXT, 6365 &emlxs_attach_failed_msg, 6366 "(PCI) ddi_regs_map_setup PCI failed. " 6367 "status=%x", status); 6368 goto failed; 6369 } 6370 } 6371 #ifdef EMLXS_I386 6372 /* Setting up PCI configure space */ 6373 (void) ddi_put16(hba->pci_acc_handle, 6374 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 6375 CMD_CFG_VALUE | CMD_IO_ENBL); 6376 #endif /* EMLXS_I386 */ 6377 6378 } 6379 return (0); 6380 6381 failed: 6382 6383 emlxs_unmap_bus(hba); 6384 return (ENOMEM); 6385 6386 } /* emlxs_map_bus() */ 6387 6388 6389 extern void 6390 emlxs_unmap_bus(emlxs_hba_t *hba) 6391 { 6392 if (hba->pci_acc_handle) { 6393 (void) ddi_regs_map_free(&hba->pci_acc_handle); 6394 hba->pci_acc_handle = 0; 6395 } 6396 6397 if (hba->sbus_pci_handle) { 6398 (void) ddi_regs_map_free(&hba->sbus_pci_handle); 6399 hba->sbus_pci_handle = 0; 6400 } 6401 6402 return; 6403 6404 } /* emlxs_unmap_bus() */ 6405 6406 6407 static int 6408 emlxs_get_props(emlxs_hba_t *hba) 6409 { 6410 emlxs_config_t *cfg; 6411 uint32_t i; 6412 char string[256]; 6413 uint32_t new_value; 6414 6415 /* Initialize each parameter */ 6416 for (i = 0; i < NUM_CFG_PARAM; i++) { 6417 cfg = &hba->config[i]; 6418 6419 /* Ensure strings are terminated */ 6420 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0; 6421 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0; 6422 6423 /* Set the current value to the default value */ 6424 new_value = cfg->def; 6425 6426 /* First check for the global setting */ 6427 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6428 (void *)hba->dip, DDI_PROP_DONTPASS, 6429 cfg->string, new_value); 6430 6431 /* Now check for the per adapter ddiinst setting */ 6432 (void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst, 6433 cfg->string); 6434 6435 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6436 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value); 6437 6438 /* Now check the parameter */ 6439 cfg->current = emlxs_check_parm(hba, i, new_value); 6440 } 6441 6442 return (0); 6443 6444 } /* emlxs_get_props() */ 6445 6446 6447 extern uint32_t 6448 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6449 { 6450 emlxs_port_t *port = &PPORT; 6451 uint32_t i; 6452 emlxs_config_t *cfg; 6453 emlxs_vpd_t *vpd = &VPD; 6454 6455 if (index > NUM_CFG_PARAM) { 6456 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6457 "emlxs_check_parm failed. Invalid index = %d", index); 6458 6459 return (new_value); 6460 } 6461 6462 cfg = &hba->config[index]; 6463 6464 if (new_value > cfg->hi) { 6465 new_value = cfg->def; 6466 } else if (new_value < cfg->low) { 6467 new_value = cfg->def; 6468 } 6469 6470 /* Perform additional checks */ 6471 switch (index) { 6472 #ifdef NPIV_SUPPORT 6473 case CFG_NPIV_ENABLE: 6474 if (hba->tgt_mode) { 6475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6476 "enable-npiv: Not supported in target mode. " 6477 "Disabling."); 6478 6479 new_value = 0; 6480 } 6481 break; 6482 #endif /* NPIV_SUPPORT */ 6483 6484 #ifdef DHCHAP_SUPPORT 6485 case CFG_AUTH_ENABLE: 6486 if (hba->tgt_mode) { 6487 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6488 "enable-auth: Not supported in target mode. " 6489 "Disabling."); 6490 6491 new_value = 0; 6492 } 6493 break; 6494 #endif /* DHCHAP_SUPPORT */ 6495 6496 case CFG_NUM_NODES: 6497 switch (new_value) { 6498 case 1: 6499 case 2: 6500 /* Must have at least 3 if not 0 */ 6501 return (3); 6502 6503 default: 6504 break; 6505 } 6506 break; 6507 6508 case CFG_LINK_SPEED: 6509 if (vpd->link_speed) { 6510 switch (new_value) { 6511 case 0: 6512 break; 6513 6514 case 1: 6515 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) { 6516 new_value = 0; 6517 6518 EMLXS_MSGF(EMLXS_CONTEXT, 6519 &emlxs_init_msg, 6520 "link-speed: 1Gb not supported " 6521 "by adapter. Switching to auto " 6522 "detect."); 6523 } 6524 break; 6525 6526 case 2: 6527 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) { 6528 new_value = 0; 6529 6530 EMLXS_MSGF(EMLXS_CONTEXT, 6531 &emlxs_init_msg, 6532 "link-speed: 2Gb not supported " 6533 "by adapter. Switching to auto " 6534 "detect."); 6535 } 6536 break; 6537 case 4: 6538 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) { 6539 new_value = 0; 6540 6541 EMLXS_MSGF(EMLXS_CONTEXT, 6542 &emlxs_init_msg, 6543 "link-speed: 4Gb not supported " 6544 "by adapter. Switching to auto " 6545 "detect."); 6546 } 6547 break; 6548 6549 case 8: 6550 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) { 6551 new_value = 0; 6552 6553 EMLXS_MSGF(EMLXS_CONTEXT, 6554 &emlxs_init_msg, 6555 "link-speed: 8Gb not supported " 6556 "by adapter. Switching to auto " 6557 "detect."); 6558 } 6559 break; 6560 6561 case 10: 6562 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) { 6563 new_value = 0; 6564 6565 EMLXS_MSGF(EMLXS_CONTEXT, 6566 &emlxs_init_msg, 6567 "link-speed: 10Gb not supported " 6568 "by adapter. Switching to auto " 6569 "detect."); 6570 } 6571 break; 6572 6573 default: 6574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6575 "link-speed: Invalid value=%d provided. " 6576 "Switching to auto detect.", 6577 new_value); 6578 6579 new_value = 0; 6580 } 6581 } else { /* Perform basic validity check */ 6582 6583 /* Perform additional check on link speed */ 6584 switch (new_value) { 6585 case 0: 6586 case 1: 6587 case 2: 6588 case 4: 6589 case 8: 6590 case 10: 6591 /* link-speed is a valid choice */ 6592 break; 6593 6594 default: 6595 new_value = cfg->def; 6596 } 6597 } 6598 break; 6599 6600 case CFG_TOPOLOGY: 6601 /* Perform additional check on topology */ 6602 switch (new_value) { 6603 case 0: 6604 case 2: 6605 case 4: 6606 case 6: 6607 /* topology is a valid choice */ 6608 break; 6609 6610 default: 6611 return (cfg->def); 6612 } 6613 break; 6614 6615 #ifdef DHCHAP_SUPPORT 6616 case CFG_AUTH_TYPE: 6617 { 6618 uint32_t shift; 6619 uint32_t mask; 6620 6621 /* Perform additional check on auth type */ 6622 shift = 12; 6623 mask = 0xF000; 6624 for (i = 0; i < 4; i++) { 6625 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) { 6626 return (cfg->def); 6627 } 6628 6629 shift -= 4; 6630 mask >>= 4; 6631 } 6632 break; 6633 } 6634 6635 case CFG_AUTH_HASH: 6636 { 6637 uint32_t shift; 6638 uint32_t mask; 6639 6640 /* Perform additional check on auth hash */ 6641 shift = 12; 6642 mask = 0xF000; 6643 for (i = 0; i < 4; i++) { 6644 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) { 6645 return (cfg->def); 6646 } 6647 6648 shift -= 4; 6649 mask >>= 4; 6650 } 6651 break; 6652 } 6653 6654 case CFG_AUTH_GROUP: 6655 { 6656 uint32_t shift; 6657 uint32_t mask; 6658 6659 /* Perform additional check on auth group */ 6660 shift = 28; 6661 mask = 0xF0000000; 6662 for (i = 0; i < 8; i++) { 6663 if (((new_value & mask) >> shift) > 6664 DFC_AUTH_GROUP_MAX) { 6665 return (cfg->def); 6666 } 6667 6668 shift -= 4; 6669 mask >>= 4; 6670 } 6671 break; 6672 } 6673 6674 case CFG_AUTH_INTERVAL: 6675 if (new_value < 10) { 6676 return (10); 6677 } 6678 break; 6679 6680 6681 #endif /* DHCHAP_SUPPORT */ 6682 6683 } /* switch */ 6684 6685 return (new_value); 6686 6687 } /* emlxs_check_parm() */ 6688 6689 6690 extern uint32_t 6691 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6692 { 6693 emlxs_port_t *port = &PPORT; 6694 emlxs_port_t *vport; 6695 uint32_t vpi; 6696 emlxs_config_t *cfg; 6697 uint32_t old_value; 6698 6699 if (index > NUM_CFG_PARAM) { 6700 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6701 "emlxs_set_parm failed. Invalid index = %d", index); 6702 6703 return ((uint32_t)FC_FAILURE); 6704 } 6705 6706 cfg = &hba->config[index]; 6707 6708 if (!(cfg->flags & PARM_DYNAMIC)) { 6709 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6710 "emlxs_set_parm failed. %s is not dynamic.", cfg->string); 6711 6712 return ((uint32_t)FC_FAILURE); 6713 } 6714 6715 /* Check new value */ 6716 old_value = new_value; 6717 new_value = emlxs_check_parm(hba, index, new_value); 6718 6719 if (old_value != new_value) { 6720 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6721 "emlxs_set_parm: %s invalid. 0x%x --> 0x%x", 6722 cfg->string, old_value, new_value); 6723 } 6724 6725 /* Return now if no actual change */ 6726 if (new_value == cfg->current) { 6727 return (FC_SUCCESS); 6728 } 6729 6730 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6731 "emlxs_set_parm: %s changing. 0x%x --> 0x%x", 6732 cfg->string, cfg->current, new_value); 6733 6734 old_value = cfg->current; 6735 cfg->current = new_value; 6736 6737 /* React to change if needed */ 6738 switch (index) { 6739 case CFG_PCI_MAX_READ: 6740 /* Update MXR */ 6741 emlxs_pcix_mxr_update(hba, 1); 6742 break; 6743 6744 #ifdef SLI3_SUPPORT 6745 case CFG_SLI_MODE: 6746 /* Check SLI mode */ 6747 if ((hba->sli_mode == 3) && (new_value == 2)) { 6748 /* All vports must be disabled first */ 6749 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 6750 vport = &VPORT(vpi); 6751 6752 if (vport->flag & EMLXS_PORT_ENABLE) { 6753 /* Reset current value */ 6754 cfg->current = old_value; 6755 6756 EMLXS_MSGF(EMLXS_CONTEXT, 6757 &emlxs_sfs_debug_msg, 6758 "emlxs_set_parm failed. %s: vpi=%d " 6759 "still enabled. Value restored to " 6760 "0x%x.", cfg->string, vpi, 6761 old_value); 6762 6763 return (2); 6764 } 6765 } 6766 } 6767 break; 6768 6769 #ifdef NPIV_SUPPORT 6770 case CFG_NPIV_ENABLE: 6771 /* Check if NPIV is being disabled */ 6772 if ((old_value == 1) && (new_value == 0)) { 6773 /* All vports must be disabled first */ 6774 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 6775 vport = &VPORT(vpi); 6776 6777 if (vport->flag & EMLXS_PORT_ENABLE) { 6778 /* Reset current value */ 6779 cfg->current = old_value; 6780 6781 EMLXS_MSGF(EMLXS_CONTEXT, 6782 &emlxs_sfs_debug_msg, 6783 "emlxs_set_parm failed. %s: vpi=%d " 6784 "still enabled. Value restored to " 6785 "0x%x.", cfg->string, vpi, 6786 old_value); 6787 6788 return (2); 6789 } 6790 } 6791 } 6792 6793 /* Trigger adapter reset */ 6794 /* (void) emlxs_reset(port, FC_FCA_RESET); */ 6795 6796 break; 6797 6798 6799 case CFG_VPORT_RESTRICTED: 6800 for (vpi = 0; vpi < MAX_VPORTS; vpi++) { 6801 vport = &VPORT(vpi); 6802 6803 if (!(vport->flag & EMLXS_PORT_CONFIG)) { 6804 continue; 6805 } 6806 6807 if (vport->options & EMLXS_OPT_RESTRICT_MASK) { 6808 continue; 6809 } 6810 6811 if (new_value) { 6812 vport->flag |= EMLXS_PORT_RESTRICTED; 6813 } else { 6814 vport->flag &= ~EMLXS_PORT_RESTRICTED; 6815 } 6816 } 6817 6818 break; 6819 #endif /* NPIV_SUPPORT */ 6820 #endif /* SLI3_SUPPORT */ 6821 6822 #ifdef DHCHAP_SUPPORT 6823 case CFG_AUTH_ENABLE: 6824 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 6825 break; 6826 6827 case CFG_AUTH_TMO: 6828 hba->auth_cfg.authentication_timeout = cfg->current; 6829 break; 6830 6831 case CFG_AUTH_MODE: 6832 hba->auth_cfg.authentication_mode = cfg->current; 6833 break; 6834 6835 case CFG_AUTH_BIDIR: 6836 hba->auth_cfg.bidirectional = cfg->current; 6837 break; 6838 6839 case CFG_AUTH_TYPE: 6840 hba->auth_cfg.authentication_type_priority[0] = 6841 (cfg->current & 0xF000) >> 12; 6842 hba->auth_cfg.authentication_type_priority[1] = 6843 (cfg->current & 0x0F00) >> 8; 6844 hba->auth_cfg.authentication_type_priority[2] = 6845 (cfg->current & 0x00F0) >> 4; 6846 hba->auth_cfg.authentication_type_priority[3] = 6847 (cfg->current & 0x000F); 6848 break; 6849 6850 case CFG_AUTH_HASH: 6851 hba->auth_cfg.hash_priority[0] = 6852 (cfg->current & 0xF000) >> 12; 6853 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8; 6854 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4; 6855 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F); 6856 break; 6857 6858 case CFG_AUTH_GROUP: 6859 hba->auth_cfg.dh_group_priority[0] = 6860 (cfg->current & 0xF0000000) >> 28; 6861 hba->auth_cfg.dh_group_priority[1] = 6862 (cfg->current & 0x0F000000) >> 24; 6863 hba->auth_cfg.dh_group_priority[2] = 6864 (cfg->current & 0x00F00000) >> 20; 6865 hba->auth_cfg.dh_group_priority[3] = 6866 (cfg->current & 0x000F0000) >> 16; 6867 hba->auth_cfg.dh_group_priority[4] = 6868 (cfg->current & 0x0000F000) >> 12; 6869 hba->auth_cfg.dh_group_priority[5] = 6870 (cfg->current & 0x00000F00) >> 8; 6871 hba->auth_cfg.dh_group_priority[6] = 6872 (cfg->current & 0x000000F0) >> 4; 6873 hba->auth_cfg.dh_group_priority[7] = 6874 (cfg->current & 0x0000000F); 6875 break; 6876 6877 case CFG_AUTH_INTERVAL: 6878 hba->auth_cfg.reauthenticate_time_interval = cfg->current; 6879 break; 6880 #endif /* DHCAHP_SUPPORT */ 6881 6882 } 6883 6884 return (FC_SUCCESS); 6885 6886 } /* emlxs_set_parm() */ 6887 6888 6889 /* 6890 * emlxs_mem_alloc OS specific routine for memory allocation / mapping 6891 * 6892 * The buf_info->flags field describes the memory operation requested. 6893 * 6894 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA 6895 * Virtual address is supplied in buf_info->virt 6896 * DMA mapping flag is in buf_info->align 6897 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE) 6898 * The mapped physical address is returned buf_info->phys 6899 * 6900 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and 6901 * if FC_MBUF_DMA is set the memory is also mapped for DMA 6902 * The byte alignment of the memory request is supplied in buf_info->align 6903 * The byte size of the memory request is supplied in buf_info->size 6904 * The virtual address is returned buf_info->virt 6905 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA) 6906 */ 6907 extern uint8_t * 6908 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info) 6909 { 6910 emlxs_port_t *port = &PPORT; 6911 ddi_dma_attr_t dma_attr; 6912 ddi_device_acc_attr_t dev_attr; 6913 uint_t cookie_count; 6914 size_t dma_reallen; 6915 ddi_dma_cookie_t dma_cookie; 6916 uint_t dma_flag; 6917 int status; 6918 6919 dma_attr = emlxs_dma_attr_1sg; 6920 dev_attr = emlxs_data_acc_attr; 6921 6922 if (buf_info->flags & FC_MBUF_SNGLSG) { 6923 buf_info->flags &= ~FC_MBUF_SNGLSG; 6924 dma_attr.dma_attr_sgllen = 1; 6925 } 6926 6927 if (buf_info->flags & FC_MBUF_DMA32) { 6928 buf_info->flags &= ~FC_MBUF_DMA32; 6929 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff; 6930 } 6931 6932 buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL); 6933 6934 switch (buf_info->flags) { 6935 case 0: /* allocate host memory */ 6936 6937 buf_info->virt = 6938 (uint32_t *)kmem_zalloc((size_t)buf_info->size, 6939 KM_SLEEP); 6940 buf_info->phys = 0; 6941 buf_info->data_handle = 0; 6942 buf_info->dma_handle = 0; 6943 6944 if (buf_info->virt == (uint32_t *)0) { 6945 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6946 "size=%x align=%x flags=%x", buf_info->size, 6947 buf_info->align, buf_info->flags); 6948 } 6949 break; 6950 6951 case FC_MBUF_PHYSONLY: 6952 case FC_MBUF_DMA | FC_MBUF_PHYSONLY: /* fill in physical address */ 6953 6954 if (buf_info->virt == 0) 6955 break; 6956 6957 /* 6958 * Allocate the DMA handle for this DMA object 6959 */ 6960 status = ddi_dma_alloc_handle((void *)hba->dip, 6961 &dma_attr, DDI_DMA_SLEEP, 6962 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle); 6963 if (status != DDI_SUCCESS) { 6964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6965 "ddi_dma_alloc_handle failed: size=%x align=%x " 6966 "flags=%x", buf_info->size, buf_info->align, 6967 buf_info->flags); 6968 6969 buf_info->phys = 0; 6970 buf_info->dma_handle = 0; 6971 break; 6972 } 6973 6974 switch (buf_info->align) { 6975 case DMA_READ_WRITE: 6976 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT); 6977 break; 6978 case DMA_READ_ONLY: 6979 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT); 6980 break; 6981 case DMA_WRITE_ONLY: 6982 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT); 6983 break; 6984 } 6985 6986 /* Map this page of memory */ 6987 status = ddi_dma_addr_bind_handle( 6988 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 6989 (caddr_t)buf_info->virt, (size_t)buf_info->size, 6990 dma_flag, DDI_DMA_SLEEP, NULL, &dma_cookie, 6991 &cookie_count); 6992 6993 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 6994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 6995 "ddi_dma_addr_bind_handle failed: status=%x " 6996 "count=%x flags=%x", status, cookie_count, 6997 buf_info->flags); 6998 6999 (void) ddi_dma_free_handle( 7000 (ddi_dma_handle_t *)&buf_info->dma_handle); 7001 buf_info->phys = 0; 7002 buf_info->dma_handle = 0; 7003 break; 7004 } 7005 7006 if (hba->bus_type == SBUS_FC) { 7007 7008 int32_t burstsizes_limit = 0xff; 7009 int32_t ret_burst; 7010 7011 ret_burst = ddi_dma_burstsizes( 7012 buf_info->dma_handle) & burstsizes_limit; 7013 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7014 ret_burst) == DDI_FAILURE) { 7015 EMLXS_MSGF(EMLXS_CONTEXT, 7016 &emlxs_mem_alloc_failed_msg, 7017 "ddi_dma_set_sbus64 failed."); 7018 } 7019 } 7020 7021 /* Save Physical address */ 7022 buf_info->phys = dma_cookie.dmac_laddress; 7023 7024 /* 7025 * Just to be sure, let's add this 7026 */ 7027 emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle, 7028 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7029 7030 break; 7031 7032 case FC_MBUF_DMA: /* allocate and map DMA mem */ 7033 7034 dma_attr.dma_attr_align = buf_info->align; 7035 7036 /* 7037 * Allocate the DMA handle for this DMA object 7038 */ 7039 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr, 7040 DDI_DMA_SLEEP, NULL, 7041 (ddi_dma_handle_t *)&buf_info->dma_handle); 7042 if (status != DDI_SUCCESS) { 7043 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7044 "ddi_dma_alloc_handle failed: size=%x align=%x " 7045 "flags=%x", buf_info->size, buf_info->align, 7046 buf_info->flags); 7047 7048 buf_info->virt = 0; 7049 buf_info->phys = 0; 7050 buf_info->data_handle = 0; 7051 buf_info->dma_handle = 0; 7052 break; 7053 } 7054 7055 status = ddi_dma_mem_alloc( 7056 (ddi_dma_handle_t)buf_info->dma_handle, 7057 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT, 7058 DDI_DMA_SLEEP, NULL, (caddr_t *)&buf_info->virt, 7059 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle); 7060 7061 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) { 7062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7063 "ddi_dma_mem_alloc failed: size=%x align=%x " 7064 "flags=%x", buf_info->size, buf_info->align, 7065 buf_info->flags); 7066 7067 (void) ddi_dma_free_handle( 7068 (ddi_dma_handle_t *)&buf_info->dma_handle); 7069 7070 buf_info->virt = 0; 7071 buf_info->phys = 0; 7072 buf_info->data_handle = 0; 7073 buf_info->dma_handle = 0; 7074 break; 7075 } 7076 7077 /* Map this page of memory */ 7078 status = ddi_dma_addr_bind_handle( 7079 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7080 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7081 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 7082 &dma_cookie, &cookie_count); 7083 7084 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7085 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7086 "ddi_dma_addr_bind_handle failed: status=%x " 7087 "count=%d size=%x align=%x flags=%x", status, 7088 cookie_count, buf_info->size, buf_info->align, 7089 buf_info->flags); 7090 7091 (void) ddi_dma_mem_free( 7092 (ddi_acc_handle_t *)&buf_info->data_handle); 7093 (void) ddi_dma_free_handle( 7094 (ddi_dma_handle_t *)&buf_info->dma_handle); 7095 7096 buf_info->virt = 0; 7097 buf_info->phys = 0; 7098 buf_info->dma_handle = 0; 7099 buf_info->data_handle = 0; 7100 break; 7101 } 7102 7103 if (hba->bus_type == SBUS_FC) { 7104 int32_t burstsizes_limit = 0xff; 7105 int32_t ret_burst; 7106 7107 ret_burst = 7108 ddi_dma_burstsizes(buf_info-> 7109 dma_handle) & burstsizes_limit; 7110 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7111 ret_burst) == DDI_FAILURE) { 7112 EMLXS_MSGF(EMLXS_CONTEXT, 7113 &emlxs_mem_alloc_failed_msg, 7114 "ddi_dma_set_sbus64 failed."); 7115 } 7116 } 7117 7118 /* Save Physical address */ 7119 buf_info->phys = dma_cookie.dmac_laddress; 7120 7121 /* Just to be sure, let's add this */ 7122 emlxs_mpdata_sync((ddi_dma_handle_t)buf_info->dma_handle, 7123 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7124 7125 break; 7126 } /* End of switch */ 7127 7128 return ((uint8_t *)buf_info->virt); 7129 7130 } /* emlxs_mem_alloc() */ 7131 7132 7133 7134 /* 7135 * emlxs_mem_free: 7136 * 7137 * OS specific routine for memory de-allocation / unmapping 7138 * 7139 * The buf_info->flags field describes the memory operation requested. 7140 * 7141 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped 7142 * for DMA, but not freed. The mapped physical address to be unmapped is in 7143 * buf_info->phys 7144 * 7145 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only 7146 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in 7147 * buf_info->phys. The virtual address to be freed is in buf_info->virt 7148 */ 7149 /*ARGSUSED*/ 7150 extern void 7151 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7152 { 7153 buf_info->flags &= ~(FC_MBUF_UNLOCK | FC_MBUF_IOCTL); 7154 7155 switch (buf_info->flags) { 7156 case 0: /* free host memory */ 7157 7158 if (buf_info->virt) { 7159 kmem_free(buf_info->virt, (size_t)buf_info->size); 7160 buf_info->virt = NULL; 7161 } 7162 7163 break; 7164 7165 case FC_MBUF_PHYSONLY: 7166 case FC_MBUF_DMA | FC_MBUF_PHYSONLY: /* nothing to do */ 7167 7168 if (buf_info->dma_handle) { 7169 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7170 (void) ddi_dma_free_handle( 7171 (ddi_dma_handle_t *)&buf_info->dma_handle); 7172 buf_info->dma_handle = NULL; 7173 } 7174 7175 break; 7176 7177 case FC_MBUF_DMA: /* unmap free DMA-able memory */ 7178 7179 7180 if (buf_info->dma_handle) { 7181 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7182 (void) ddi_dma_mem_free( 7183 (ddi_acc_handle_t *)&buf_info->data_handle); 7184 (void) ddi_dma_free_handle( 7185 (ddi_dma_handle_t *)&buf_info->dma_handle); 7186 buf_info->dma_handle = NULL; 7187 buf_info->data_handle = NULL; 7188 } 7189 7190 break; 7191 } 7192 7193 } /* emlxs_mem_free() */ 7194 7195 7196 static int32_t 7197 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp) 7198 { 7199 emlxs_hba_t *hba = HBA; 7200 fc_packet_t *pkt; 7201 IOCBQ *iocbq; 7202 IOCB *iocb; 7203 RING *rp; 7204 NODELIST *ndlp; 7205 char *cmd; 7206 uint16_t lun; 7207 FCP_CMND *fcp_cmd; 7208 uint32_t did; 7209 7210 pkt = PRIV2PKT(sbp); 7211 fcp_cmd = (FCP_CMND *)pkt->pkt_cmd; 7212 rp = &hba->ring[FC_FCP_RING]; 7213 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7214 7215 /* Find target node object */ 7216 ndlp = emlxs_node_find_did(port, did); 7217 7218 if (!ndlp || !ndlp->nlp_active) { 7219 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7220 "Node not found. did=%x", did); 7221 7222 return (FC_BADPACKET); 7223 } 7224 7225 /* If gate is closed */ 7226 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7227 return (FC_TRAN_BUSY); 7228 } 7229 7230 #ifdef SAN_DIAG_SUPPORT 7231 sbp->sd_start_time = gethrtime(); 7232 #endif /* SAN_DIAG_SUPPORT */ 7233 7234 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7235 emlxs_swap_fcp_pkt(sbp); 7236 #endif /* EMLXS_MODREV2X */ 7237 7238 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) { 7239 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE; 7240 } 7241 7242 iocbq = &sbp->iocbq; 7243 iocb = &iocbq->iocb; 7244 7245 iocbq->node = (void *)ndlp; 7246 if (emlxs_sli_prep_fcp_iocb(port, sbp) != FC_SUCCESS) { 7247 return (FC_TRAN_BUSY); 7248 } 7249 7250 /* Snoop for target or lun resets */ 7251 cmd = (char *)pkt->pkt_cmd; 7252 lun = *((uint16_t *)cmd); 7253 lun = SWAP_DATA16(lun); 7254 7255 /* Check for target reset */ 7256 if (cmd[10] & 0x20) { 7257 mutex_enter(&sbp->mtx); 7258 sbp->pkt_flags |= PACKET_FCP_TGT_RESET; 7259 sbp->pkt_flags |= PACKET_POLLED; 7260 mutex_exit(&sbp->mtx); 7261 7262 #ifdef SAN_DIAG_SUPPORT 7263 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET, 7264 (HBA_WWN *)&ndlp->nlp_portname, -1); 7265 #endif 7266 7267 iocbq->flag |= IOCB_PRIORITY; 7268 7269 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7270 "Target Reset: did=%x", did); 7271 7272 /* Close the node for any further normal IO */ 7273 emlxs_node_close(port, ndlp, FC_FCP_RING, pkt->pkt_timeout); 7274 7275 /* Flush the IO's on the tx queues */ 7276 (void) emlxs_tx_node_flush(port, ndlp, rp, 0, sbp); 7277 } 7278 7279 /* Check for lun reset */ 7280 else if (cmd[10] & 0x10) { 7281 mutex_enter(&sbp->mtx); 7282 sbp->pkt_flags |= PACKET_FCP_LUN_RESET; 7283 sbp->pkt_flags |= PACKET_POLLED; 7284 mutex_exit(&sbp->mtx); 7285 7286 #ifdef SAN_DIAG_SUPPORT 7287 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET, 7288 (HBA_WWN *)&ndlp->nlp_portname, lun); 7289 #endif 7290 7291 iocbq->flag |= IOCB_PRIORITY; 7292 7293 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7294 "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]); 7295 7296 /* Flush the IO's on the tx queues for this lun */ 7297 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp); 7298 } 7299 7300 /* Initalize sbp */ 7301 mutex_enter(&sbp->mtx); 7302 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7303 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7304 sbp->node = (void *)ndlp; 7305 sbp->lun = lun; 7306 sbp->class = iocb->ulpClass; 7307 sbp->did = ndlp->nlp_DID; 7308 mutex_exit(&sbp->mtx); 7309 7310 if (pkt->pkt_cmdlen) { 7311 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7312 DDI_DMA_SYNC_FORDEV); 7313 } 7314 7315 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) { 7316 emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen, 7317 DDI_DMA_SYNC_FORDEV); 7318 } 7319 7320 HBASTATS.FcpIssued++; 7321 7322 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_FCP_RING], iocbq); 7323 7324 return (FC_SUCCESS); 7325 7326 } /* emlxs_send_fcp_cmd() */ 7327 7328 7329 #ifdef SFCT_SUPPORT 7330 static int32_t 7331 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp) 7332 { 7333 emlxs_hba_t *hba = HBA; 7334 fc_packet_t *pkt; 7335 IOCBQ *iocbq; 7336 IOCB *iocb; 7337 NODELIST *ndlp; 7338 uint16_t iotag; 7339 uint32_t did; 7340 ddi_dma_cookie_t *cp_cmd; 7341 7342 pkt = PRIV2PKT(sbp); 7343 7344 did = sbp->did; 7345 ndlp = sbp->node; 7346 7347 iocbq = &sbp->iocbq; 7348 iocb = &iocbq->iocb; 7349 7350 /* Make sure node is still active */ 7351 if (!ndlp->nlp_active) { 7352 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7353 "*Node not found. did=%x", did); 7354 7355 return (FC_BADPACKET); 7356 } 7357 7358 /* If gate is closed */ 7359 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7360 return (FC_TRAN_BUSY); 7361 } 7362 7363 /* Get the iotag by registering the packet */ 7364 iotag = emlxs_register_pkt(sbp->ring, sbp); 7365 7366 if (!iotag) { 7367 /* No more command slots available, retry later */ 7368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7369 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7370 7371 return (FC_TRAN_BUSY); 7372 } 7373 7374 /* Point of no return */ 7375 7376 #if (EMLXS_MODREV >= EMLXS_MODREV3) 7377 cp_cmd = pkt->pkt_cmd_cookie; 7378 #else 7379 cp_cmd = &pkt->pkt_cmd_cookie; 7380 #endif /* >= EMLXS_MODREV3 */ 7381 7382 iocb->un.fcpt64.bdl.addrHigh = putPaddrHigh(cp_cmd->dmac_laddress); 7383 iocb->un.fcpt64.bdl.addrLow = putPaddrLow(cp_cmd->dmac_laddress); 7384 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen; 7385 iocb->un.fcpt64.bdl.bdeFlags = 0; 7386 7387 if (hba->sli_mode < 3) { 7388 iocb->ulpBdeCount = 1; 7389 iocb->ulpLe = 1; 7390 } else { /* SLI3 */ 7391 7392 iocb->ulpBdeCount = 0; 7393 iocb->ulpLe = 0; 7394 iocb->unsli3.ext_iocb.ebde_count = 0; 7395 } 7396 7397 /* Initalize iocbq */ 7398 iocbq->port = (void *)port; 7399 iocbq->node = (void *)ndlp; 7400 iocbq->ring = (void *)sbp->ring; 7401 7402 /* Initalize iocb */ 7403 iocb->ulpContext = (uint16_t)pkt->pkt_cmd_fhdr.rx_id; 7404 iocb->ulpIoTag = iotag; 7405 iocb->ulpRsvdByte = 7406 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 7407 iocb->ulpOwner = OWN_CHIP; 7408 iocb->ulpClass = sbp->class; 7409 iocb->ulpCommand = CMD_FCP_TRSP64_CX; 7410 7411 /* Set the pkt timer */ 7412 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7413 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7414 7415 if (pkt->pkt_cmdlen) { 7416 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7417 DDI_DMA_SYNC_FORDEV); 7418 } 7419 7420 HBASTATS.FcpIssued++; 7421 7422 emlxs_sli_issue_iocb_cmd(hba, sbp->ring, iocbq); 7423 7424 return (FC_SUCCESS); 7425 7426 } /* emlxs_send_fct_status() */ 7427 7428 7429 static int32_t 7430 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp) 7431 { 7432 emlxs_hba_t *hba = HBA; 7433 fc_packet_t *pkt; 7434 IOCBQ *iocbq; 7435 IOCB *iocb; 7436 NODELIST *ndlp; 7437 uint16_t iotag; 7438 uint32_t did; 7439 7440 pkt = PRIV2PKT(sbp); 7441 7442 did = sbp->did; 7443 ndlp = sbp->node; 7444 7445 7446 iocbq = &sbp->iocbq; 7447 iocb = &iocbq->iocb; 7448 7449 /* Make sure node is still active */ 7450 if ((ndlp == NULL) || (!ndlp->nlp_active)) { 7451 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7452 "*Node not found. did=%x", did); 7453 7454 return (FC_BADPACKET); 7455 } 7456 7457 /* If gate is closed */ 7458 if (ndlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED) { 7459 return (FC_TRAN_BUSY); 7460 } 7461 7462 /* Get the iotag by registering the packet */ 7463 iotag = emlxs_register_pkt(sbp->ring, sbp); 7464 7465 if (!iotag) { 7466 /* No more command slots available, retry later */ 7467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7468 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7469 7470 return (FC_TRAN_BUSY); 7471 } 7472 7473 /* Point of no return */ 7474 iocbq->port = (void *)port; 7475 iocbq->node = (void *)ndlp; 7476 iocbq->ring = (void *)sbp->ring; 7477 /* 7478 * Don't give the abort priority, we want the IOCB 7479 * we are aborting to be processed first. 7480 */ 7481 iocbq->flag |= IOCB_SPECIAL; 7482 7483 iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id; 7484 iocb->ulpIoTag = iotag; 7485 iocb->ulpLe = 1; 7486 iocb->ulpClass = sbp->class; 7487 iocb->ulpOwner = OWN_CHIP; 7488 7489 if (hba->state >= FC_LINK_UP) { 7490 /* Create the abort IOCB */ 7491 iocb->un.acxri.abortType = ABORT_TYPE_ABTS; 7492 iocb->ulpCommand = CMD_ABORT_XRI_CX; 7493 7494 } else { 7495 /* Create the close IOCB */ 7496 iocb->ulpCommand = CMD_CLOSE_XRI_CX; 7497 7498 } 7499 7500 iocb->ulpRsvdByte = 7501 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 7502 /* Set the pkt timer */ 7503 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7504 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7505 7506 emlxs_sli_issue_iocb_cmd(hba, sbp->ring, iocbq); 7507 7508 return (FC_SUCCESS); 7509 7510 } /* emlxs_send_fct_abort() */ 7511 7512 #endif /* SFCT_SUPPORT */ 7513 7514 7515 static int32_t 7516 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp) 7517 { 7518 emlxs_hba_t *hba = HBA; 7519 fc_packet_t *pkt; 7520 IOCBQ *iocbq; 7521 IOCB *iocb; 7522 RING *rp; 7523 uint32_t i; 7524 NODELIST *ndlp; 7525 uint32_t did; 7526 7527 pkt = PRIV2PKT(sbp); 7528 rp = &hba->ring[FC_IP_RING]; 7529 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7530 7531 /* Check if node exists */ 7532 /* Broadcast did is always a success */ 7533 ndlp = emlxs_node_find_did(port, did); 7534 7535 if (!ndlp || !ndlp->nlp_active) { 7536 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7537 "Node not found. did=0x%x", did); 7538 7539 return (FC_BADPACKET); 7540 } 7541 7542 /* Check if gate is temporarily closed */ 7543 if (ndlp->nlp_flag[FC_IP_RING] & NLP_CLOSED) { 7544 return (FC_TRAN_BUSY); 7545 } 7546 7547 /* Check if an exchange has been created */ 7548 if ((ndlp->nlp_Xri == 0) && (did != Bcast_DID)) { 7549 /* No exchange. Try creating one */ 7550 (void) emlxs_create_xri(port, rp, ndlp); 7551 7552 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7553 "Adapter Busy. Exchange not found. did=0x%x", did); 7554 7555 return (FC_TRAN_BUSY); 7556 } 7557 7558 /* ULP PATCH: pkt_cmdlen was found to be set to zero */ 7559 /* on BROADCAST commands */ 7560 if (pkt->pkt_cmdlen == 0) { 7561 /* Set the pkt_cmdlen to the cookie size */ 7562 #if (EMLXS_MODREV >= EMLXS_MODREV3) 7563 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) { 7564 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size; 7565 } 7566 #else 7567 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size; 7568 #endif /* >= EMLXS_MODREV3 */ 7569 7570 } 7571 7572 iocbq = &sbp->iocbq; 7573 iocb = &iocbq->iocb; 7574 7575 iocbq->node = (void *)ndlp; 7576 if (emlxs_sli_prep_ip_iocb(port, sbp) != FC_SUCCESS) { 7577 return (FC_TRAN_BUSY); 7578 } 7579 7580 /* Initalize sbp */ 7581 mutex_enter(&sbp->mtx); 7582 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7583 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7584 sbp->node = (void *)ndlp; 7585 sbp->lun = 0; 7586 sbp->class = iocb->ulpClass; 7587 sbp->did = did; 7588 mutex_exit(&sbp->mtx); 7589 7590 if (pkt->pkt_cmdlen) { 7591 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7592 DDI_DMA_SYNC_FORDEV); 7593 } 7594 7595 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_IP_RING], iocbq); 7596 7597 return (FC_SUCCESS); 7598 7599 } /* emlxs_send_ip() */ 7600 7601 7602 static int32_t 7603 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp) 7604 { 7605 emlxs_hba_t *hba = HBA; 7606 emlxs_port_t *vport; 7607 fc_packet_t *pkt; 7608 IOCBQ *iocbq; 7609 IOCB *iocb; 7610 uint32_t cmd; 7611 int i; 7612 ELS_PKT *els_pkt; 7613 NODELIST *ndlp; 7614 uint32_t did; 7615 char fcsp_msg[32]; 7616 7617 fcsp_msg[0] = 0; 7618 pkt = PRIV2PKT(sbp); 7619 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 7620 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7621 7622 iocbq = &sbp->iocbq; 7623 iocb = &iocbq->iocb; 7624 7625 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7626 emlxs_swap_els_pkt(sbp); 7627 #endif /* EMLXS_MODREV2X */ 7628 7629 cmd = *((uint32_t *)pkt->pkt_cmd); 7630 cmd &= ELS_CMD_MASK; 7631 7632 /* Point of no return, except for ADISC & PLOGI */ 7633 7634 /* Check node */ 7635 switch (cmd) { 7636 case ELS_CMD_FLOGI: 7637 if (port->vpi > 0) { 7638 cmd = ELS_CMD_FDISC; 7639 *((uint32_t *)pkt->pkt_cmd) = cmd; 7640 } 7641 ndlp = NULL; 7642 7643 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 7644 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 7645 } 7646 7647 /* We will process these cmds at the bottom of this routine */ 7648 break; 7649 7650 case ELS_CMD_PLOGI: 7651 /* Make sure we don't log into ourself */ 7652 for (i = 0; i < MAX_VPORTS; i++) { 7653 vport = &VPORT(i); 7654 7655 if (!(vport->flag & EMLXS_PORT_BOUND)) { 7656 continue; 7657 } 7658 7659 if (did == vport->did) { 7660 pkt->pkt_state = FC_PKT_NPORT_RJT; 7661 7662 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7663 emlxs_unswap_pkt(sbp); 7664 #endif /* EMLXS_MODREV2X */ 7665 7666 return (FC_FAILURE); 7667 } 7668 } 7669 7670 ndlp = NULL; 7671 7672 /* Check if this is the first PLOGI */ 7673 /* after a PT_TO_PT connection */ 7674 if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) { 7675 MAILBOXQ *mbox; 7676 7677 /* ULP bug fix */ 7678 if (pkt->pkt_cmd_fhdr.s_id == 0) { 7679 pkt->pkt_cmd_fhdr.s_id = 7680 pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID + 7681 FP_DEFAULT_SID; 7682 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, 7683 "PLOGI: P2P Fix. sid=0-->%x did=%x", 7684 pkt->pkt_cmd_fhdr.s_id, 7685 pkt->pkt_cmd_fhdr.d_id); 7686 } 7687 7688 mutex_enter(&EMLXS_PORT_LOCK); 7689 port->did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.s_id); 7690 mutex_exit(&EMLXS_PORT_LOCK); 7691 7692 /* Update our service parms */ 7693 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 7694 MEM_MBOX | MEM_PRI))) { 7695 emlxs_mb_config_link(hba, (MAILBOX *) mbox); 7696 7697 if (emlxs_sli_issue_mbox_cmd(hba, 7698 (MAILBOX *)mbox, MBX_NOWAIT, 0) 7699 != MBX_BUSY) { 7700 (void) emlxs_mem_put(hba, MEM_MBOX, 7701 (uint8_t *)mbox); 7702 } 7703 7704 } 7705 } 7706 7707 /* We will process these cmds at the bottom of this routine */ 7708 break; 7709 7710 default: 7711 ndlp = emlxs_node_find_did(port, did); 7712 7713 /* If an ADISC is being sent and we have no node, */ 7714 /* then we must fail the ADISC now */ 7715 if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) { 7716 7717 /* Build the LS_RJT response */ 7718 els_pkt = (ELS_PKT *)pkt->pkt_resp; 7719 els_pkt->elsCode = 0x01; 7720 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0; 7721 els_pkt->un.lsRjt.un.b.lsRjtRsnCode = 7722 LSRJT_LOGICAL_ERR; 7723 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp = 7724 LSEXP_NOTHING_MORE; 7725 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03; 7726 7727 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7728 "ADISC Rejected. Node not found. did=0x%x", did); 7729 7730 /* Return this as rejected by the target */ 7731 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1); 7732 7733 return (FC_SUCCESS); 7734 } 7735 } 7736 7737 /* DID == Bcast_DID is special case to indicate that */ 7738 /* RPI is being passed in seq_id field */ 7739 /* This is used by emlxs_send_logo() for target mode */ 7740 7741 /* Initalize iocbq */ 7742 iocbq->node = (void *)ndlp; 7743 if (emlxs_sli_prep_els_iocb(port, sbp) != FC_SUCCESS) { 7744 return (FC_TRAN_BUSY); 7745 } 7746 7747 /* Check cmd */ 7748 switch (cmd) { 7749 case ELS_CMD_PRLI: 7750 { 7751 /* 7752 * if our firmware version is 3.20 or later, 7753 * set the following bits for FC-TAPE support. 7754 */ 7755 7756 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 7757 els_pkt->un.prli.ConfmComplAllowed = 1; 7758 els_pkt->un.prli.Retry = 1; 7759 els_pkt->un.prli.TaskRetryIdReq = 1; 7760 } else { 7761 els_pkt->un.prli.ConfmComplAllowed = 0; 7762 els_pkt->un.prli.Retry = 0; 7763 els_pkt->un.prli.TaskRetryIdReq = 0; 7764 } 7765 7766 break; 7767 } 7768 7769 /* This is a patch for the ULP stack. */ 7770 7771 /* 7772 * ULP only reads our service paramters once during bind_port, 7773 * but the service parameters change due to topology. 7774 */ 7775 case ELS_CMD_FLOGI: 7776 case ELS_CMD_FDISC: 7777 case ELS_CMD_PLOGI: 7778 case ELS_CMD_PDISC: 7779 { 7780 /* Copy latest service parameters to payload */ 7781 bcopy((void *) &port->sparam, 7782 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 7783 7784 #ifdef NPIV_SUPPORT 7785 if ((hba->flag & FC_NPIV_ENABLED) && 7786 (hba->flag & FC_NPIV_SUPPORTED) && 7787 (cmd == ELS_CMD_PLOGI)) { 7788 SERV_PARM *sp; 7789 emlxs_vvl_fmt_t *vvl; 7790 7791 sp = (SERV_PARM *)&els_pkt->un.logi; 7792 sp->valid_vendor_version = 1; 7793 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0]; 7794 vvl->un0.w0.oui = 0x0000C9; 7795 vvl->un0.word0 = SWAP_DATA32(vvl->un0.word0); 7796 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0; 7797 vvl->un1.word1 = SWAP_DATA32(vvl->un1.word1); 7798 } 7799 #endif /* NPIV_SUPPORT */ 7800 7801 #ifdef DHCHAP_SUPPORT 7802 emlxs_dhc_init_sp(port, did, 7803 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 7804 #endif /* DHCHAP_SUPPORT */ 7805 7806 break; 7807 } 7808 7809 } 7810 7811 /* Initialize the sbp */ 7812 mutex_enter(&sbp->mtx); 7813 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7814 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7815 sbp->node = (void *)ndlp; 7816 sbp->lun = 0; 7817 sbp->class = iocb->ulpClass; 7818 sbp->did = did; 7819 mutex_exit(&sbp->mtx); 7820 7821 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s", 7822 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg); 7823 7824 if (pkt->pkt_cmdlen) { 7825 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7826 DDI_DMA_SYNC_FORDEV); 7827 } 7828 7829 /* Check node */ 7830 switch (cmd) { 7831 case ELS_CMD_FLOGI: 7832 if (port->ini_mode) { 7833 /* Make sure fabric node is destroyed */ 7834 /* It should already have been destroyed at link down */ 7835 /* Unregister the fabric did and attempt a deferred */ 7836 /* iocb send */ 7837 if (emlxs_mb_unreg_did(port, Fabric_DID, NULL, NULL, 7838 iocbq) == 0) { 7839 /* Deferring iocb tx until */ 7840 /* completion of unreg */ 7841 return (FC_SUCCESS); 7842 } 7843 } 7844 break; 7845 7846 case ELS_CMD_PLOGI: 7847 7848 ndlp = emlxs_node_find_did(port, did); 7849 7850 if (ndlp && ndlp->nlp_active) { 7851 /* Close the node for any further normal IO */ 7852 emlxs_node_close(port, ndlp, FC_FCP_RING, 7853 pkt->pkt_timeout + 10); 7854 emlxs_node_close(port, ndlp, FC_IP_RING, 7855 pkt->pkt_timeout + 10); 7856 7857 /* Flush tx queues */ 7858 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 7859 7860 /* Flush chip queues */ 7861 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 7862 } 7863 7864 break; 7865 7866 case ELS_CMD_PRLI: 7867 7868 ndlp = emlxs_node_find_did(port, did); 7869 7870 if (ndlp && ndlp->nlp_active) { 7871 /* Close the node for any further FCP IO */ 7872 emlxs_node_close(port, ndlp, FC_FCP_RING, 7873 pkt->pkt_timeout + 10); 7874 7875 /* Flush tx queues */ 7876 (void) emlxs_tx_node_flush(port, ndlp, 7877 &hba->ring[FC_FCP_RING], 0, 0); 7878 7879 /* Flush chip queues */ 7880 (void) emlxs_chipq_node_flush(port, 7881 &hba->ring[FC_FCP_RING], ndlp, 0); 7882 } 7883 7884 break; 7885 7886 } 7887 7888 HBASTATS.ElsCmdIssued++; 7889 7890 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq); 7891 7892 return (FC_SUCCESS); 7893 7894 } /* emlxs_send_els() */ 7895 7896 7897 7898 7899 static int32_t 7900 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 7901 { 7902 emlxs_hba_t *hba = HBA; 7903 fc_packet_t *pkt; 7904 IOCBQ *iocbq; 7905 IOCB *iocb; 7906 NODELIST *ndlp; 7907 int i; 7908 uint32_t cmd; 7909 uint32_t ucmd; 7910 ELS_PKT *els_pkt; 7911 fc_unsol_buf_t *ubp; 7912 emlxs_ub_priv_t *ub_priv; 7913 uint32_t did; 7914 char fcsp_msg[32]; 7915 uint8_t *ub_buffer; 7916 7917 fcsp_msg[0] = 0; 7918 pkt = PRIV2PKT(sbp); 7919 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 7920 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 7921 7922 iocbq = &sbp->iocbq; 7923 iocb = &iocbq->iocb; 7924 7925 /* Acquire the unsolicited command this pkt is replying to */ 7926 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) { 7927 /* This is for auto replies when no ub's are used */ 7928 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT; 7929 ubp = NULL; 7930 ub_priv = NULL; 7931 ub_buffer = NULL; 7932 7933 #ifdef SFCT_SUPPORT 7934 if (sbp->fct_cmd) { 7935 fct_els_t *els = 7936 (fct_els_t *)sbp->fct_cmd->cmd_specific; 7937 ub_buffer = (uint8_t *)els->els_req_payload; 7938 } 7939 #endif /* SFCT_SUPPORT */ 7940 7941 } else { 7942 /* Find the ub buffer that goes with this reply */ 7943 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) { 7944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 7945 "ELS reply: Invalid oxid=%x", 7946 pkt->pkt_cmd_fhdr.ox_id); 7947 return (FC_BADPACKET); 7948 } 7949 7950 ub_buffer = (uint8_t *)ubp->ub_buffer; 7951 ub_priv = ubp->ub_fca_private; 7952 ucmd = ub_priv->cmd; 7953 7954 ub_priv->flags |= EMLXS_UB_REPLY; 7955 7956 /* Reset oxid to ELS command */ 7957 /* We do this because the ub is only valid */ 7958 /* until we return from this thread */ 7959 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff; 7960 } 7961 7962 /* Save the result */ 7963 sbp->ucmd = ucmd; 7964 7965 /* Check for interceptions */ 7966 switch (ucmd) { 7967 7968 #ifdef ULP_PATCH2 7969 case ELS_CMD_LOGO: 7970 { 7971 /* Check if this was generated by ULP and not us */ 7972 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 7973 7974 /* 7975 * Since we replied to this already, 7976 * we won't need to send this now 7977 */ 7978 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 7979 7980 return (FC_SUCCESS); 7981 } 7982 7983 break; 7984 } 7985 #endif 7986 7987 #ifdef ULP_PATCH3 7988 case ELS_CMD_PRLI: 7989 { 7990 /* Check if this was generated by ULP and not us */ 7991 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 7992 7993 /* 7994 * Since we replied to this already, 7995 * we won't need to send this now 7996 */ 7997 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 7998 7999 return (FC_SUCCESS); 8000 } 8001 8002 break; 8003 } 8004 #endif 8005 8006 8007 #ifdef ULP_PATCH4 8008 case ELS_CMD_PRLO: 8009 { 8010 /* Check if this was generated by ULP and not us */ 8011 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8012 /* 8013 * Since we replied to this already, 8014 * we won't need to send this now 8015 */ 8016 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8017 8018 return (FC_SUCCESS); 8019 } 8020 8021 break; 8022 } 8023 #endif 8024 8025 #ifdef ULP_PATCH6 8026 case ELS_CMD_RSCN: 8027 { 8028 /* Check if this RSCN was generated by us */ 8029 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8030 cmd = *((uint32_t *)pkt->pkt_cmd); 8031 cmd = SWAP_DATA32(cmd); 8032 cmd &= ELS_CMD_MASK; 8033 8034 /* 8035 * If ULP is accepting this, 8036 * then close affected node 8037 */ 8038 if (port->ini_mode && ub_buffer && cmd 8039 == ELS_CMD_ACC) { 8040 fc_rscn_t *rscn; 8041 uint32_t count; 8042 uint32_t *lp; 8043 8044 /* 8045 * Only the Leadville code path will 8046 * come thru here. The RSCN data is NOT 8047 * swapped properly for the Comstar code 8048 * path. 8049 */ 8050 lp = (uint32_t *)ub_buffer; 8051 rscn = (fc_rscn_t *)lp++; 8052 count = 8053 ((rscn->rscn_payload_len - 4) / 4); 8054 8055 /* Close affected ports */ 8056 for (i = 0; i < count; i++, lp++) { 8057 (void) emlxs_port_offline(port, 8058 *lp); 8059 } 8060 } 8061 8062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8063 "RSCN %s: did=%x oxid=%x rxid=%x. " 8064 "Intercepted.", emlxs_elscmd_xlate(cmd), 8065 did, pkt->pkt_cmd_fhdr.ox_id, 8066 pkt->pkt_cmd_fhdr.rx_id); 8067 8068 /* 8069 * Since we generated this RSCN, 8070 * we won't need to send this reply 8071 */ 8072 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8073 8074 return (FC_SUCCESS); 8075 } 8076 8077 break; 8078 } 8079 #endif 8080 8081 case ELS_CMD_PLOGI: 8082 { 8083 /* Check if this PLOGI was generated by us */ 8084 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8085 cmd = *((uint32_t *)pkt->pkt_cmd); 8086 cmd = SWAP_DATA32(cmd); 8087 cmd &= ELS_CMD_MASK; 8088 8089 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8090 "PLOGI %s: did=%x oxid=%x rxid=%x. " 8091 "Intercepted.", emlxs_elscmd_xlate(cmd), 8092 did, pkt->pkt_cmd_fhdr.ox_id, 8093 pkt->pkt_cmd_fhdr.rx_id); 8094 8095 /* 8096 * Since we generated this PLOGI, 8097 * we won't need to send this reply 8098 */ 8099 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8100 8101 return (FC_SUCCESS); 8102 } 8103 8104 break; 8105 } 8106 8107 } 8108 8109 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8110 emlxs_swap_els_pkt(sbp); 8111 #endif /* EMLXS_MODREV2X */ 8112 8113 8114 cmd = *((uint32_t *)pkt->pkt_cmd); 8115 cmd &= ELS_CMD_MASK; 8116 8117 /* Check if modifications are needed */ 8118 switch (ucmd) { 8119 case (ELS_CMD_PRLI): 8120 8121 if (cmd == ELS_CMD_ACC) { 8122 /* This is a patch for the ULP stack. */ 8123 /* ULP does not keep track of FCP2 support */ 8124 8125 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 8126 els_pkt->un.prli.ConfmComplAllowed = 1; 8127 els_pkt->un.prli.Retry = 1; 8128 els_pkt->un.prli.TaskRetryIdReq = 1; 8129 } else { 8130 els_pkt->un.prli.ConfmComplAllowed = 0; 8131 els_pkt->un.prli.Retry = 0; 8132 els_pkt->un.prli.TaskRetryIdReq = 0; 8133 } 8134 } 8135 8136 break; 8137 8138 case ELS_CMD_FLOGI: 8139 case ELS_CMD_PLOGI: 8140 case ELS_CMD_FDISC: 8141 case ELS_CMD_PDISC: 8142 8143 if (cmd == ELS_CMD_ACC) { 8144 /* This is a patch for the ULP stack. */ 8145 8146 /* 8147 * ULP only reads our service parameters 8148 * once during bind_port, but the service 8149 * parameters change due to topology. 8150 */ 8151 8152 /* Copy latest service parameters to payload */ 8153 bcopy((void *)&port->sparam, 8154 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8155 8156 #ifdef DHCHAP_SUPPORT 8157 emlxs_dhc_init_sp(port, did, 8158 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8159 #endif /* DHCHAP_SUPPORT */ 8160 8161 } 8162 8163 break; 8164 8165 } 8166 8167 /* Initalize iocbq */ 8168 iocbq->node = (void *)NULL; 8169 if (emlxs_sli_prep_els_iocb(port, sbp) != FC_SUCCESS) { 8170 return (FC_TRAN_BUSY); 8171 } 8172 8173 /* Initalize sbp */ 8174 mutex_enter(&sbp->mtx); 8175 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8176 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8177 sbp->node = (void *) NULL; 8178 sbp->lun = 0; 8179 sbp->class = iocb->ulpClass; 8180 sbp->did = did; 8181 mutex_exit(&sbp->mtx); 8182 8183 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8184 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd), 8185 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id, 8186 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg); 8187 8188 /* Process nodes */ 8189 switch (ucmd) { 8190 case ELS_CMD_RSCN: 8191 { 8192 if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) { 8193 fc_rscn_t *rscn; 8194 uint32_t count; 8195 uint32_t *lp = NULL; 8196 8197 /* 8198 * Only the Leadville code path will come thru 8199 * here. The RSCN data is NOT swapped properly 8200 * for the Comstar code path. 8201 */ 8202 lp = (uint32_t *)ub_buffer; 8203 rscn = (fc_rscn_t *)lp++; 8204 count = ((rscn->rscn_payload_len - 4) / 4); 8205 8206 /* Close affected ports */ 8207 for (i = 0; i < count; i++, lp++) { 8208 (void) emlxs_port_offline(port, *lp); 8209 } 8210 } 8211 break; 8212 } 8213 case ELS_CMD_PLOGI: 8214 8215 if (cmd == ELS_CMD_ACC) { 8216 ndlp = emlxs_node_find_did(port, did); 8217 8218 if (ndlp && ndlp->nlp_active) { 8219 /* Close the node for any further normal IO */ 8220 emlxs_node_close(port, ndlp, FC_FCP_RING, 8221 pkt->pkt_timeout + 10); 8222 emlxs_node_close(port, ndlp, FC_IP_RING, 8223 pkt->pkt_timeout + 10); 8224 8225 /* Flush tx queue */ 8226 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8227 8228 /* Flush chip queue */ 8229 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8230 } 8231 } 8232 8233 break; 8234 8235 case ELS_CMD_PRLI: 8236 8237 if (cmd == ELS_CMD_ACC) { 8238 ndlp = emlxs_node_find_did(port, did); 8239 8240 if (ndlp && ndlp->nlp_active) { 8241 /* Close the node for any further normal IO */ 8242 emlxs_node_close(port, ndlp, FC_FCP_RING, 8243 pkt->pkt_timeout + 10); 8244 8245 /* Flush tx queues */ 8246 (void) emlxs_tx_node_flush(port, ndlp, 8247 &hba->ring[FC_FCP_RING], 0, 0); 8248 8249 /* Flush chip queues */ 8250 (void) emlxs_chipq_node_flush(port, 8251 &hba->ring[FC_FCP_RING], ndlp, 0); 8252 } 8253 } 8254 8255 break; 8256 8257 case ELS_CMD_PRLO: 8258 8259 if (cmd == ELS_CMD_ACC) { 8260 ndlp = emlxs_node_find_did(port, did); 8261 8262 if (ndlp && ndlp->nlp_active) { 8263 /* Close the node for any further normal IO */ 8264 emlxs_node_close(port, ndlp, FC_FCP_RING, 60); 8265 8266 /* Flush tx queues */ 8267 (void) emlxs_tx_node_flush(port, ndlp, 8268 &hba->ring[FC_FCP_RING], 0, 0); 8269 8270 /* Flush chip queues */ 8271 (void) emlxs_chipq_node_flush(port, 8272 &hba->ring[FC_FCP_RING], ndlp, 0); 8273 } 8274 } 8275 8276 break; 8277 8278 case ELS_CMD_LOGO: 8279 8280 if (cmd == ELS_CMD_ACC) { 8281 ndlp = emlxs_node_find_did(port, did); 8282 8283 if (ndlp && ndlp->nlp_active) { 8284 /* Close the node for any further normal IO */ 8285 emlxs_node_close(port, ndlp, FC_FCP_RING, 60); 8286 emlxs_node_close(port, ndlp, FC_IP_RING, 60); 8287 8288 /* Flush tx queues */ 8289 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8290 8291 /* Flush chip queues */ 8292 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8293 } 8294 } 8295 8296 break; 8297 } 8298 8299 if (pkt->pkt_cmdlen) { 8300 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8301 DDI_DMA_SYNC_FORDEV); 8302 } 8303 8304 HBASTATS.ElsRspIssued++; 8305 8306 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_ELS_RING], iocbq); 8307 8308 return (FC_SUCCESS); 8309 8310 } /* emlxs_send_els_rsp() */ 8311 8312 8313 #ifdef MENLO_SUPPORT 8314 static int32_t 8315 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp) 8316 { 8317 emlxs_hba_t *hba = HBA; 8318 fc_packet_t *pkt; 8319 IOCBQ *iocbq; 8320 IOCB *iocb; 8321 NODELIST *ndlp; 8322 uint32_t did; 8323 uint32_t *lp; 8324 8325 pkt = PRIV2PKT(sbp); 8326 did = EMLXS_MENLO_DID; 8327 lp = (uint32_t *)pkt->pkt_cmd; 8328 8329 iocbq = &sbp->iocbq; 8330 iocb = &iocbq->iocb; 8331 8332 ndlp = emlxs_node_find_did(port, did); 8333 8334 if (!ndlp || !ndlp->nlp_active) { 8335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8336 "Node not found. did=0x%x", did); 8337 8338 return (FC_BADPACKET); 8339 } 8340 8341 iocbq->node = (void *) ndlp; 8342 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8343 return (FC_TRAN_BUSY); 8344 } 8345 8346 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) { 8347 /* Cmd phase */ 8348 8349 /* Initalize iocb */ 8350 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id; 8351 iocb->ulpContext = 0; 8352 iocb->ulpPU = 3; 8353 8354 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8355 "%s: [%08x,%08x,%08x,%08x]", 8356 emlxs_menlo_cmd_xlate(SWAP_LONG(lp[0])), SWAP_LONG(lp[1]), 8357 SWAP_LONG(lp[2]), SWAP_LONG(lp[3]), SWAP_LONG(lp[4])); 8358 8359 } else { /* FC_PKT_OUTBOUND */ 8360 8361 /* MENLO_CMD_FW_DOWNLOAD Data Phase */ 8362 iocb->ulpCommand = CMD_GEN_REQUEST64_CX; 8363 8364 /* Initalize iocb */ 8365 iocb->un.genreq64.param = 0; 8366 iocb->ulpContext = pkt->pkt_cmd_fhdr.rx_id; 8367 iocb->ulpPU = 1; 8368 8369 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8370 "%s: Data: rxid=0x%x size=%d", 8371 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD), 8372 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen); 8373 } 8374 8375 /* Initalize sbp */ 8376 mutex_enter(&sbp->mtx); 8377 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8378 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8379 sbp->node = (void *) ndlp; 8380 sbp->lun = 0; 8381 sbp->class = iocb->ulpClass; 8382 sbp->did = did; 8383 mutex_exit(&sbp->mtx); 8384 8385 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8386 DDI_DMA_SYNC_FORDEV); 8387 8388 HBASTATS.CtCmdIssued++; 8389 8390 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8391 8392 return (FC_SUCCESS); 8393 8394 } /* emlxs_send_menlo() */ 8395 #endif /* MENLO_SUPPORT */ 8396 8397 8398 static int32_t 8399 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp) 8400 { 8401 emlxs_hba_t *hba = HBA; 8402 fc_packet_t *pkt; 8403 IOCBQ *iocbq; 8404 IOCB *iocb; 8405 NODELIST *ndlp; 8406 uint32_t did; 8407 8408 pkt = PRIV2PKT(sbp); 8409 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8410 8411 iocbq = &sbp->iocbq; 8412 iocb = &iocbq->iocb; 8413 8414 ndlp = emlxs_node_find_did(port, did); 8415 8416 if (!ndlp || !ndlp->nlp_active) { 8417 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8418 "Node not found. did=0x%x", did); 8419 8420 return (FC_BADPACKET); 8421 } 8422 8423 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8424 emlxs_swap_ct_pkt(sbp); 8425 #endif /* EMLXS_MODREV2X */ 8426 8427 iocbq->node = (void *)ndlp; 8428 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8429 return (FC_TRAN_BUSY); 8430 } 8431 8432 /* Initalize sbp */ 8433 mutex_enter(&sbp->mtx); 8434 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8435 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8436 sbp->node = (void *)ndlp; 8437 sbp->lun = 0; 8438 sbp->class = iocb->ulpClass; 8439 sbp->did = did; 8440 mutex_exit(&sbp->mtx); 8441 8442 if (did == NameServer_DID) { 8443 SLI_CT_REQUEST *CtCmd; 8444 uint32_t *lp0; 8445 8446 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8447 lp0 = (uint32_t *)pkt->pkt_cmd; 8448 8449 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8450 "%s: did=%x [%08x,%08x]", 8451 emlxs_ctcmd_xlate( 8452 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8453 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8454 8455 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 8456 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 8457 } 8458 8459 } else if (did == FDMI_DID) { 8460 SLI_CT_REQUEST *CtCmd; 8461 uint32_t *lp0; 8462 8463 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8464 lp0 = (uint32_t *)pkt->pkt_cmd; 8465 8466 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8467 "%s: did=%x [%08x,%08x]", 8468 emlxs_mscmd_xlate( 8469 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8470 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8471 } else { 8472 SLI_CT_REQUEST *CtCmd; 8473 uint32_t *lp0; 8474 8475 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8476 lp0 = (uint32_t *)pkt->pkt_cmd; 8477 8478 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 8479 "%s: did=%x [%08x,%08x]", 8480 emlxs_rmcmd_xlate( 8481 SWAP_DATA16(CtCmd->CommandResponse.bits.CmdRsp)), 8482 did, SWAP_DATA32(lp0[4]), SWAP_DATA32(lp0[5])); 8483 } 8484 8485 if (pkt->pkt_cmdlen) { 8486 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8487 DDI_DMA_SYNC_FORDEV); 8488 } 8489 8490 HBASTATS.CtCmdIssued++; 8491 8492 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8493 8494 return (FC_SUCCESS); 8495 8496 } /* emlxs_send_ct() */ 8497 8498 8499 static int32_t 8500 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 8501 { 8502 emlxs_hba_t *hba = HBA; 8503 fc_packet_t *pkt; 8504 IOCBQ *iocbq; 8505 IOCB *iocb; 8506 uint32_t did; 8507 uint32_t *cmd; 8508 SLI_CT_REQUEST *CtCmd; 8509 8510 pkt = PRIV2PKT(sbp); 8511 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8512 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 8513 cmd = (uint32_t *)pkt->pkt_cmd; 8514 8515 iocbq = &sbp->iocbq; 8516 iocb = &iocbq->iocb; 8517 8518 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8519 emlxs_swap_ct_pkt(sbp); 8520 #endif /* EMLXS_MODREV2X */ 8521 8522 iocbq->node = (void *)NULL; 8523 if (emlxs_sli_prep_ct_iocb(port, sbp) != FC_SUCCESS) { 8524 return (FC_TRAN_BUSY); 8525 } 8526 8527 /* Initalize sbp */ 8528 mutex_enter(&sbp->mtx); 8529 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8530 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8531 sbp->node = NULL; 8532 sbp->lun = 0; 8533 sbp->class = iocb->ulpClass; 8534 sbp->did = did; 8535 mutex_exit(&sbp->mtx); 8536 8537 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg, 8538 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ", 8539 emlxs_rmcmd_xlate(SWAP_DATA16( 8540 CtCmd->CommandResponse.bits.CmdRsp)), 8541 CtCmd->ReasonCode, CtCmd->Explanation, 8542 SWAP_DATA32(cmd[4]), SWAP_DATA32(cmd[5]), 8543 pkt->pkt_cmd_fhdr.rx_id); 8544 8545 if (pkt->pkt_cmdlen) { 8546 emlxs_mpdata_sync(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8547 DDI_DMA_SYNC_FORDEV); 8548 } 8549 8550 HBASTATS.CtRspIssued++; 8551 8552 emlxs_sli_issue_iocb_cmd(hba, &hba->ring[FC_CT_RING], iocbq); 8553 8554 return (FC_SUCCESS); 8555 8556 } /* emlxs_send_ct_rsp() */ 8557 8558 8559 /* 8560 * emlxs_get_instance() 8561 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst. 8562 */ 8563 extern uint32_t 8564 emlxs_get_instance(int32_t ddiinst) 8565 { 8566 uint32_t i; 8567 uint32_t inst; 8568 8569 mutex_enter(&emlxs_device.lock); 8570 8571 inst = MAX_FC_BRDS; 8572 for (i = 0; i < emlxs_instance_count; i++) { 8573 if (emlxs_instance[i] == ddiinst) { 8574 inst = i; 8575 break; 8576 } 8577 } 8578 8579 mutex_exit(&emlxs_device.lock); 8580 8581 return (inst); 8582 8583 } /* emlxs_get_instance() */ 8584 8585 8586 /* 8587 * emlxs_add_instance() 8588 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst. 8589 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0. 8590 */ 8591 static uint32_t 8592 emlxs_add_instance(int32_t ddiinst) 8593 { 8594 uint32_t i; 8595 8596 mutex_enter(&emlxs_device.lock); 8597 8598 /* First see if the ddiinst already exists */ 8599 for (i = 0; i < emlxs_instance_count; i++) { 8600 if (emlxs_instance[i] == ddiinst) { 8601 break; 8602 } 8603 } 8604 8605 /* If it doesn't already exist, add it */ 8606 if (i >= emlxs_instance_count) { 8607 if ((i = emlxs_instance_count) < MAX_FC_BRDS) { 8608 emlxs_instance[i] = ddiinst; 8609 emlxs_instance_count++; 8610 emlxs_device.hba_count = emlxs_instance_count; 8611 } 8612 } 8613 8614 mutex_exit(&emlxs_device.lock); 8615 8616 return (i); 8617 8618 } /* emlxs_add_instance() */ 8619 8620 8621 /*ARGSUSED*/ 8622 extern void 8623 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 8624 uint32_t doneq) 8625 { 8626 emlxs_hba_t *hba; 8627 emlxs_port_t *port; 8628 emlxs_buf_t *fpkt; 8629 8630 port = sbp->port; 8631 8632 if (!port) { 8633 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg, 8634 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags); 8635 8636 return; 8637 } 8638 8639 hba = HBA; 8640 8641 mutex_enter(&sbp->mtx); 8642 8643 /* Check for error conditions */ 8644 if (sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED | 8645 PACKET_IN_DONEQ | PACKET_IN_COMPLETION | 8646 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) { 8647 if (sbp->pkt_flags & PACKET_RETURNED) { 8648 EMLXS_MSGF(EMLXS_CONTEXT, 8649 &emlxs_pkt_completion_error_msg, 8650 "Packet already returned. sbp=%p flags=%x", sbp, 8651 sbp->pkt_flags); 8652 } 8653 8654 else if (sbp->pkt_flags & PACKET_COMPLETED) { 8655 EMLXS_MSGF(EMLXS_CONTEXT, 8656 &emlxs_pkt_completion_error_msg, 8657 "Packet already completed. sbp=%p flags=%x", sbp, 8658 sbp->pkt_flags); 8659 } 8660 8661 else if (sbp->pkt_flags & PACKET_IN_DONEQ) { 8662 EMLXS_MSGF(EMLXS_CONTEXT, 8663 &emlxs_pkt_completion_error_msg, 8664 "Pkt already on done queue. sbp=%p flags=%x", sbp, 8665 sbp->pkt_flags); 8666 } 8667 8668 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) { 8669 EMLXS_MSGF(EMLXS_CONTEXT, 8670 &emlxs_pkt_completion_error_msg, 8671 "Packet already in completion. sbp=%p flags=%x", 8672 sbp, sbp->pkt_flags); 8673 } 8674 8675 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 8676 EMLXS_MSGF(EMLXS_CONTEXT, 8677 &emlxs_pkt_completion_error_msg, 8678 "Packet still on chip queue. sbp=%p flags=%x", 8679 sbp, sbp->pkt_flags); 8680 } 8681 8682 else if (sbp->pkt_flags & PACKET_IN_TXQ) { 8683 EMLXS_MSGF(EMLXS_CONTEXT, 8684 &emlxs_pkt_completion_error_msg, 8685 "Packet still on tx queue. sbp=%p flags=%x", sbp, 8686 sbp->pkt_flags); 8687 } 8688 8689 mutex_exit(&sbp->mtx); 8690 return; 8691 } 8692 8693 /* Packet is now in completion */ 8694 sbp->pkt_flags |= PACKET_IN_COMPLETION; 8695 8696 /* Set the state if not already set */ 8697 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 8698 emlxs_set_pkt_state(sbp, iostat, localstat, 0); 8699 } 8700 8701 /* Check for parent flush packet */ 8702 /* If pkt has a parent flush packet then adjust its count now */ 8703 fpkt = sbp->fpkt; 8704 if (fpkt) { 8705 /* 8706 * We will try to NULL sbp->fpkt inside the 8707 * fpkt's mutex if possible 8708 */ 8709 8710 if (!(fpkt->pkt_flags & PACKET_RETURNED)) { 8711 mutex_enter(&fpkt->mtx); 8712 if (fpkt->flush_count) { 8713 fpkt->flush_count--; 8714 } 8715 sbp->fpkt = NULL; 8716 mutex_exit(&fpkt->mtx); 8717 } else { /* fpkt has been returned already */ 8718 8719 sbp->fpkt = NULL; 8720 } 8721 } 8722 8723 /* If pkt is polled, then wake up sleeping thread */ 8724 if (sbp->pkt_flags & PACKET_POLLED) { 8725 /* Don't set the PACKET_RETURNED flag here */ 8726 /* because the polling thread will do it */ 8727 sbp->pkt_flags |= PACKET_COMPLETED; 8728 mutex_exit(&sbp->mtx); 8729 8730 /* Wake up sleeping thread */ 8731 mutex_enter(&EMLXS_PKT_LOCK); 8732 cv_broadcast(&EMLXS_PKT_CV); 8733 mutex_exit(&EMLXS_PKT_LOCK); 8734 } 8735 8736 /* If packet was generated by our driver, */ 8737 /* then complete it immediately */ 8738 else if (sbp->pkt_flags & PACKET_ALLOCATED) { 8739 mutex_exit(&sbp->mtx); 8740 8741 emlxs_iodone(sbp); 8742 } 8743 8744 /* Put the pkt on the done queue for callback */ 8745 /* completion in another thread */ 8746 else { 8747 sbp->pkt_flags |= PACKET_IN_DONEQ; 8748 sbp->next = NULL; 8749 mutex_exit(&sbp->mtx); 8750 8751 /* Put pkt on doneq, so I/O's will be completed in order */ 8752 mutex_enter(&EMLXS_PORT_LOCK); 8753 if (hba->iodone_tail == NULL) { 8754 hba->iodone_list = sbp; 8755 hba->iodone_count = 1; 8756 } else { 8757 hba->iodone_tail->next = sbp; 8758 hba->iodone_count++; 8759 } 8760 hba->iodone_tail = sbp; 8761 mutex_exit(&EMLXS_PORT_LOCK); 8762 8763 /* Trigger a thread to service the doneq */ 8764 emlxs_thread_trigger1(&hba->iodone_thread, 8765 emlxs_iodone_server); 8766 } 8767 8768 return; 8769 8770 } /* emlxs_pkt_complete() */ 8771 8772 8773 #ifdef SAN_DIAG_SUPPORT 8774 /* 8775 * This routine is called with EMLXS_PORT_LOCK held so we can just increment 8776 * normally. Don't have to use atomic operations. 8777 */ 8778 extern void 8779 emlxs_update_sd_bucket(emlxs_buf_t *sbp) 8780 { 8781 emlxs_port_t *vport; 8782 fc_packet_t *pkt; 8783 uint32_t did; 8784 hrtime_t t; 8785 hrtime_t delta_time; 8786 int i; 8787 NODELIST *ndlp; 8788 8789 vport = sbp->port; 8790 8791 if ((sd_bucket.search_type == 0) || 8792 (vport->sd_io_latency_state != SD_COLLECTING)) 8793 return; 8794 8795 /* Compute the iolatency time in microseconds */ 8796 t = gethrtime(); 8797 delta_time = t - sbp->sd_start_time; 8798 pkt = PRIV2PKT(sbp); 8799 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id); 8800 ndlp = emlxs_node_find_did(vport, did); 8801 8802 if (ndlp) { 8803 if (delta_time >= 8804 sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) 8805 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1]. 8806 count++; 8807 else if (delta_time <= sd_bucket.values[0]) 8808 ndlp->sd_dev_bucket[0].count++; 8809 else { 8810 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) { 8811 if ((delta_time > sd_bucket.values[i-1]) && 8812 (delta_time <= sd_bucket.values[i])) { 8813 ndlp->sd_dev_bucket[i].count++; 8814 break; 8815 } 8816 } 8817 } 8818 } 8819 } 8820 #endif /* SAN_DIAG_SUPPORT */ 8821 8822 /*ARGSUSED*/ 8823 static void 8824 emlxs_iodone_server(void *arg1, void *arg2, void *arg3) 8825 { 8826 emlxs_hba_t *hba = (emlxs_hba_t *)arg1; 8827 emlxs_buf_t *sbp; 8828 8829 mutex_enter(&EMLXS_PORT_LOCK); 8830 8831 /* Remove one pkt from the doneq head and complete it */ 8832 while ((sbp = hba->iodone_list) != NULL) { 8833 if ((hba->iodone_list = sbp->next) == NULL) { 8834 hba->iodone_tail = NULL; 8835 hba->iodone_count = 0; 8836 } else { 8837 hba->iodone_count--; 8838 } 8839 8840 mutex_exit(&EMLXS_PORT_LOCK); 8841 8842 /* Prepare the pkt for completion */ 8843 mutex_enter(&sbp->mtx); 8844 sbp->next = NULL; 8845 sbp->pkt_flags &= ~PACKET_IN_DONEQ; 8846 mutex_exit(&sbp->mtx); 8847 8848 /* Complete the IO now */ 8849 emlxs_iodone(sbp); 8850 8851 /* Reacquire lock and check if more work is to be done */ 8852 mutex_enter(&EMLXS_PORT_LOCK); 8853 } 8854 8855 mutex_exit(&EMLXS_PORT_LOCK); 8856 8857 return; 8858 8859 } /* End emlxs_iodone_server */ 8860 8861 8862 static void 8863 emlxs_iodone(emlxs_buf_t *sbp) 8864 { 8865 fc_packet_t *pkt; 8866 8867 pkt = PRIV2PKT(sbp); 8868 8869 /* Check one more time that the pkt has not already been returned */ 8870 if (sbp->pkt_flags & PACKET_RETURNED) { 8871 return; 8872 } 8873 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8874 emlxs_unswap_pkt(sbp); 8875 #endif /* EMLXS_MODREV2X */ 8876 8877 mutex_enter(&sbp->mtx); 8878 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_RETURNED); 8879 mutex_exit(&sbp->mtx); 8880 8881 if (pkt->pkt_comp) { 8882 (*pkt->pkt_comp) (pkt); 8883 } 8884 8885 return; 8886 8887 } /* emlxs_iodone() */ 8888 8889 8890 8891 extern fc_unsol_buf_t * 8892 emlxs_ub_find(emlxs_port_t *port, uint32_t token) 8893 { 8894 emlxs_unsol_buf_t *pool; 8895 fc_unsol_buf_t *ubp; 8896 emlxs_ub_priv_t *ub_priv; 8897 8898 /* Check if this is a valid ub token */ 8899 if (token < EMLXS_UB_TOKEN_OFFSET) { 8900 return (NULL); 8901 } 8902 8903 mutex_enter(&EMLXS_UB_LOCK); 8904 8905 pool = port->ub_pool; 8906 while (pool) { 8907 /* Find a pool with the proper token range */ 8908 if (token >= pool->pool_first_token && 8909 token <= pool->pool_last_token) { 8910 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token - 8911 pool->pool_first_token)]; 8912 ub_priv = ubp->ub_fca_private; 8913 8914 if (ub_priv->token != token) { 8915 EMLXS_MSGF(EMLXS_CONTEXT, 8916 &emlxs_sfs_debug_msg, 8917 "ub_find: Invalid token=%x", ubp, token, 8918 ub_priv->token); 8919 8920 ubp = NULL; 8921 } 8922 8923 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { 8924 EMLXS_MSGF(EMLXS_CONTEXT, 8925 &emlxs_sfs_debug_msg, 8926 "ub_find: Buffer not in use. buffer=%p " 8927 "token=%x", ubp, token); 8928 8929 ubp = NULL; 8930 } 8931 8932 mutex_exit(&EMLXS_UB_LOCK); 8933 8934 return (ubp); 8935 } 8936 8937 pool = pool->pool_next; 8938 } 8939 8940 mutex_exit(&EMLXS_UB_LOCK); 8941 8942 return (NULL); 8943 8944 } /* emlxs_ub_find() */ 8945 8946 8947 8948 extern fc_unsol_buf_t * 8949 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, 8950 uint32_t reserve) 8951 { 8952 emlxs_hba_t *hba = HBA; 8953 emlxs_unsol_buf_t *pool; 8954 fc_unsol_buf_t *ubp; 8955 emlxs_ub_priv_t *ub_priv; 8956 uint32_t i; 8957 uint32_t resv_flag; 8958 uint32_t pool_free; 8959 uint32_t pool_free_resv; 8960 8961 mutex_enter(&EMLXS_UB_LOCK); 8962 8963 pool = port->ub_pool; 8964 while (pool) { 8965 /* Find a pool of the appropriate type and size */ 8966 if ((pool->pool_available == 0) || 8967 (pool->pool_type != type) || 8968 (pool->pool_buf_size < size)) { 8969 goto next_pool; 8970 } 8971 8972 8973 /* Adjust free counts based on availablity */ 8974 /* The free reserve count gets first priority */ 8975 pool_free_resv = 8976 min(pool->pool_free_resv, pool->pool_available); 8977 pool_free = 8978 min(pool->pool_free, 8979 (pool->pool_available - pool_free_resv)); 8980 8981 /* Initialize reserve flag */ 8982 resv_flag = reserve; 8983 8984 if (resv_flag) { 8985 if (pool_free_resv == 0) { 8986 if (pool_free == 0) { 8987 goto next_pool; 8988 } 8989 resv_flag = 0; 8990 } 8991 } else if (pool_free == 0) { 8992 goto next_pool; 8993 } 8994 8995 /* Find next available free buffer in this pool */ 8996 for (i = 0; i < pool->pool_nentries; i++) { 8997 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 8998 ub_priv = ubp->ub_fca_private; 8999 9000 if (!ub_priv->available || 9001 ub_priv->flags != EMLXS_UB_FREE) { 9002 continue; 9003 } 9004 9005 ub_priv->time = hba->timer_tics; 9006 9007 /* Timeout in 5 minutes */ 9008 ub_priv->timeout = (5 * 60); 9009 9010 ub_priv->flags = EMLXS_UB_IN_USE; 9011 9012 /* Alloc the buffer from the pool */ 9013 if (resv_flag) { 9014 ub_priv->flags |= EMLXS_UB_RESV; 9015 pool->pool_free_resv--; 9016 } else { 9017 pool->pool_free--; 9018 } 9019 9020 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 9021 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 9022 ub_priv->token, pool->pool_nentries, 9023 pool->pool_available, pool->pool_free, 9024 pool->pool_free_resv); 9025 9026 mutex_exit(&EMLXS_UB_LOCK); 9027 9028 return (ubp); 9029 } 9030 next_pool: 9031 9032 pool = pool->pool_next; 9033 } 9034 9035 mutex_exit(&EMLXS_UB_LOCK); 9036 9037 return (NULL); 9038 9039 } /* emlxs_ub_get() */ 9040 9041 9042 9043 extern void 9044 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9045 uint32_t lock) 9046 { 9047 fc_packet_t *pkt; 9048 fcp_rsp_t *fcp_rsp; 9049 uint32_t i; 9050 emlxs_xlat_err_t *tptr; 9051 emlxs_xlat_err_t *entry; 9052 9053 9054 pkt = PRIV2PKT(sbp); 9055 9056 if (lock) { 9057 mutex_enter(&sbp->mtx); 9058 } 9059 9060 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9061 sbp->pkt_flags |= PACKET_STATE_VALID; 9062 9063 /* Perform table lookup */ 9064 entry = NULL; 9065 if (iostat != IOSTAT_LOCAL_REJECT) { 9066 tptr = emlxs_iostat_tbl; 9067 for (i = 0; i < IOSTAT_MAX; i++, tptr++) { 9068 if (iostat == tptr->emlxs_status) { 9069 entry = tptr; 9070 break; 9071 } 9072 } 9073 } else { /* iostate == IOSTAT_LOCAL_REJECT */ 9074 9075 tptr = emlxs_ioerr_tbl; 9076 for (i = 0; i < IOERR_MAX; i++, tptr++) { 9077 if (localstat == tptr->emlxs_status) { 9078 entry = tptr; 9079 break; 9080 } 9081 } 9082 } 9083 9084 if (entry) { 9085 pkt->pkt_state = entry->pkt_state; 9086 pkt->pkt_reason = entry->pkt_reason; 9087 pkt->pkt_expln = entry->pkt_expln; 9088 pkt->pkt_action = entry->pkt_action; 9089 } else { 9090 /* Set defaults */ 9091 pkt->pkt_state = FC_PKT_TRAN_ERROR; 9092 pkt->pkt_reason = FC_REASON_ABORTED; 9093 pkt->pkt_expln = FC_EXPLN_NONE; 9094 pkt->pkt_action = FC_ACTION_RETRYABLE; 9095 } 9096 9097 9098 /* Set the residual counts and response frame */ 9099 /* Check if response frame was received from the chip */ 9100 /* If so, then the residual counts will already be set */ 9101 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID | 9102 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) { 9103 /* We have to create the response frame */ 9104 if (iostat == IOSTAT_SUCCESS) { 9105 pkt->pkt_resp_resid = 0; 9106 pkt->pkt_data_resid = 0; 9107 9108 if ((pkt->pkt_cmd_fhdr.type == 9109 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen && 9110 pkt->pkt_resp) { 9111 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp; 9112 9113 fcp_rsp->fcp_u.fcp_status. 9114 rsp_len_set = 1; 9115 fcp_rsp->fcp_response_len = 8; 9116 } 9117 } else { 9118 /* Otherwise assume no data */ 9119 /* and no response received */ 9120 pkt->pkt_data_resid = pkt->pkt_datalen; 9121 pkt->pkt_resp_resid = pkt->pkt_rsplen; 9122 } 9123 } 9124 } 9125 9126 if (lock) { 9127 mutex_exit(&sbp->mtx); 9128 } 9129 9130 return; 9131 9132 } /* emlxs_set_pkt_state() */ 9133 9134 9135 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9136 9137 extern void 9138 emlxs_swap_service_params(SERV_PARM *sp) 9139 { 9140 uint16_t *p; 9141 int size; 9142 int i; 9143 9144 size = (sizeof (CSP) - 4) / 2; 9145 p = (uint16_t *)&sp->cmn; 9146 for (i = 0; i < size; i++) { 9147 p[i] = SWAP_DATA16(p[i]); 9148 } 9149 sp->cmn.e_d_tov = SWAP_DATA32(sp->cmn.e_d_tov); 9150 9151 size = sizeof (CLASS_PARMS) / 2; 9152 p = (uint16_t *)&sp->cls1; 9153 for (i = 0; i < size; i++, p++) { 9154 *p = SWAP_DATA16(*p); 9155 } 9156 9157 size = sizeof (CLASS_PARMS) / 2; 9158 p = (uint16_t *)&sp->cls2; 9159 for (i = 0; i < size; i++, p++) { 9160 *p = SWAP_DATA16(*p); 9161 } 9162 9163 size = sizeof (CLASS_PARMS) / 2; 9164 p = (uint16_t *)&sp->cls3; 9165 for (i = 0; i < size; i++, p++) { 9166 *p = SWAP_DATA16(*p); 9167 } 9168 9169 size = sizeof (CLASS_PARMS) / 2; 9170 p = (uint16_t *)&sp->cls4; 9171 for (i = 0; i < size; i++, p++) { 9172 *p = SWAP_DATA16(*p); 9173 } 9174 9175 return; 9176 9177 } /* emlxs_swap_service_params() */ 9178 9179 extern void 9180 emlxs_unswap_pkt(emlxs_buf_t *sbp) 9181 { 9182 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9183 emlxs_swap_fcp_pkt(sbp); 9184 } 9185 9186 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9187 emlxs_swap_els_pkt(sbp); 9188 } 9189 9190 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 9191 emlxs_swap_ct_pkt(sbp); 9192 } 9193 9194 } /* emlxs_unswap_pkt() */ 9195 9196 9197 extern void 9198 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp) 9199 { 9200 fc_packet_t *pkt; 9201 FCP_CMND *cmd; 9202 fcp_rsp_t *rsp; 9203 uint16_t *lunp; 9204 uint32_t i; 9205 9206 mutex_enter(&sbp->mtx); 9207 9208 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9209 mutex_exit(&sbp->mtx); 9210 return; 9211 } 9212 9213 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9214 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED; 9215 } else { 9216 sbp->pkt_flags |= PACKET_FCP_SWAPPED; 9217 } 9218 9219 mutex_exit(&sbp->mtx); 9220 9221 pkt = PRIV2PKT(sbp); 9222 9223 cmd = (FCP_CMND *)pkt->pkt_cmd; 9224 rsp = (pkt->pkt_rsplen && 9225 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ? 9226 (fcp_rsp_t *)pkt->pkt_resp : NULL; 9227 9228 /* The size of data buffer needs to be swapped. */ 9229 cmd->fcpDl = SWAP_DATA32(cmd->fcpDl); 9230 9231 /* 9232 * Swap first 2 words of FCP CMND payload. 9233 */ 9234 lunp = (uint16_t *)&cmd->fcpLunMsl; 9235 for (i = 0; i < 4; i++) { 9236 lunp[i] = SWAP_DATA16(lunp[i]); 9237 } 9238 9239 if (rsp) { 9240 rsp->fcp_resid = SWAP_DATA32(rsp->fcp_resid); 9241 rsp->fcp_sense_len = SWAP_DATA32(rsp->fcp_sense_len); 9242 rsp->fcp_response_len = SWAP_DATA32(rsp->fcp_response_len); 9243 } 9244 9245 return; 9246 9247 } /* emlxs_swap_fcp_pkt() */ 9248 9249 9250 extern void 9251 emlxs_swap_els_pkt(emlxs_buf_t *sbp) 9252 { 9253 fc_packet_t *pkt; 9254 uint32_t *cmd; 9255 uint32_t *rsp; 9256 uint32_t command; 9257 uint16_t *c; 9258 uint32_t i; 9259 uint32_t swapped; 9260 9261 mutex_enter(&sbp->mtx); 9262 9263 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9264 mutex_exit(&sbp->mtx); 9265 return; 9266 } 9267 9268 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9269 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED; 9270 swapped = 1; 9271 } else { 9272 sbp->pkt_flags |= PACKET_ELS_SWAPPED; 9273 swapped = 0; 9274 } 9275 9276 mutex_exit(&sbp->mtx); 9277 9278 pkt = PRIV2PKT(sbp); 9279 9280 cmd = (uint32_t *)pkt->pkt_cmd; 9281 rsp = (pkt->pkt_rsplen && 9282 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ? 9283 (uint32_t *)pkt->pkt_resp : NULL; 9284 9285 if (!swapped) { 9286 cmd[0] = SWAP_DATA32(cmd[0]); 9287 command = cmd[0] & ELS_CMD_MASK; 9288 } else { 9289 command = cmd[0] & ELS_CMD_MASK; 9290 cmd[0] = SWAP_DATA32(cmd[0]); 9291 } 9292 9293 if (rsp) { 9294 rsp[0] = SWAP_DATA32(rsp[0]); 9295 } 9296 9297 switch (command) { 9298 case ELS_CMD_ACC: 9299 if (sbp->ucmd == ELS_CMD_ADISC) { 9300 /* Hard address of originator */ 9301 cmd[1] = SWAP_DATA32(cmd[1]); 9302 9303 /* N_Port ID of originator */ 9304 cmd[6] = SWAP_DATA32(cmd[6]); 9305 } 9306 break; 9307 9308 case ELS_CMD_PLOGI: 9309 case ELS_CMD_FLOGI: 9310 case ELS_CMD_FDISC: 9311 if (rsp) { 9312 emlxs_swap_service_params((SERV_PARM *) & rsp[1]); 9313 } 9314 break; 9315 9316 case ELS_CMD_RLS: 9317 cmd[1] = SWAP_DATA32(cmd[1]); 9318 9319 if (rsp) { 9320 for (i = 0; i < 6; i++) { 9321 rsp[1 + i] = SWAP_DATA32(rsp[1 + i]); 9322 } 9323 } 9324 break; 9325 9326 case ELS_CMD_ADISC: 9327 cmd[1] = SWAP_DATA32(cmd[1]); /* Hard address of originator */ 9328 cmd[6] = SWAP_DATA32(cmd[6]); /* N_Port ID of originator */ 9329 break; 9330 9331 case ELS_CMD_PRLI: 9332 c = (uint16_t *)&cmd[1]; 9333 c[1] = SWAP_DATA16(c[1]); 9334 9335 cmd[4] = SWAP_DATA32(cmd[4]); 9336 9337 if (rsp) { 9338 rsp[4] = SWAP_DATA32(rsp[4]); 9339 } 9340 break; 9341 9342 case ELS_CMD_SCR: 9343 cmd[1] = SWAP_DATA32(cmd[1]); 9344 break; 9345 9346 case ELS_CMD_LINIT: 9347 if (rsp) { 9348 rsp[1] = SWAP_DATA32(rsp[1]); 9349 } 9350 break; 9351 9352 default: 9353 break; 9354 } 9355 9356 return; 9357 9358 } /* emlxs_swap_els_pkt() */ 9359 9360 9361 extern void 9362 emlxs_swap_ct_pkt(emlxs_buf_t *sbp) 9363 { 9364 fc_packet_t *pkt; 9365 uint32_t *cmd; 9366 uint32_t *rsp; 9367 uint32_t command; 9368 uint32_t i; 9369 uint32_t swapped; 9370 9371 mutex_enter(&sbp->mtx); 9372 9373 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9374 mutex_exit(&sbp->mtx); 9375 return; 9376 } 9377 9378 if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 9379 sbp->pkt_flags &= ~PACKET_CT_SWAPPED; 9380 swapped = 1; 9381 } else { 9382 sbp->pkt_flags |= PACKET_CT_SWAPPED; 9383 swapped = 0; 9384 } 9385 9386 mutex_exit(&sbp->mtx); 9387 9388 pkt = PRIV2PKT(sbp); 9389 9390 cmd = (uint32_t *)pkt->pkt_cmd; 9391 rsp = (pkt->pkt_rsplen && 9392 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ? 9393 (uint32_t *)pkt->pkt_resp : NULL; 9394 9395 if (!swapped) { 9396 cmd[0] = 0x01000000; 9397 command = cmd[2]; 9398 } 9399 9400 cmd[0] = SWAP_DATA32(cmd[0]); 9401 cmd[1] = SWAP_DATA32(cmd[1]); 9402 cmd[2] = SWAP_DATA32(cmd[2]); 9403 cmd[3] = SWAP_DATA32(cmd[3]); 9404 9405 if (swapped) { 9406 command = cmd[2]; 9407 } 9408 9409 switch ((command >> 16)) { 9410 case SLI_CTNS_GA_NXT: 9411 cmd[4] = SWAP_DATA32(cmd[4]); 9412 break; 9413 9414 case SLI_CTNS_GPN_ID: 9415 case SLI_CTNS_GNN_ID: 9416 case SLI_CTNS_RPN_ID: 9417 case SLI_CTNS_RNN_ID: 9418 cmd[4] = SWAP_DATA32(cmd[4]); 9419 break; 9420 9421 case SLI_CTNS_RCS_ID: 9422 case SLI_CTNS_RPT_ID: 9423 cmd[4] = SWAP_DATA32(cmd[4]); 9424 cmd[5] = SWAP_DATA32(cmd[5]); 9425 break; 9426 9427 case SLI_CTNS_RFT_ID: 9428 cmd[4] = SWAP_DATA32(cmd[4]); 9429 9430 /* Swap FC4 types */ 9431 for (i = 0; i < 8; i++) { 9432 cmd[5 + i] = SWAP_DATA32(cmd[5 + i]); 9433 } 9434 break; 9435 9436 case SLI_CTNS_GFT_ID: 9437 if (rsp) { 9438 /* Swap FC4 types */ 9439 for (i = 0; i < 8; i++) { 9440 rsp[4 + i] = SWAP_DATA32(rsp[4 + i]); 9441 } 9442 } 9443 break; 9444 9445 case SLI_CTNS_GCS_ID: 9446 case SLI_CTNS_GSPN_ID: 9447 case SLI_CTNS_GSNN_NN: 9448 case SLI_CTNS_GIP_NN: 9449 case SLI_CTNS_GIPA_NN: 9450 9451 case SLI_CTNS_GPT_ID: 9452 case SLI_CTNS_GID_NN: 9453 case SLI_CTNS_GNN_IP: 9454 case SLI_CTNS_GIPA_IP: 9455 case SLI_CTNS_GID_FT: 9456 case SLI_CTNS_GID_PT: 9457 case SLI_CTNS_GID_PN: 9458 case SLI_CTNS_RSPN_ID: 9459 case SLI_CTNS_RIP_NN: 9460 case SLI_CTNS_RIPA_NN: 9461 case SLI_CTNS_RSNN_NN: 9462 case SLI_CTNS_DA_ID: 9463 case SLI_CT_RESPONSE_FS_RJT: 9464 case SLI_CT_RESPONSE_FS_ACC: 9465 9466 default: 9467 break; 9468 } 9469 return; 9470 9471 } /* emlxs_swap_ct_pkt() */ 9472 9473 9474 extern void 9475 emlxs_swap_els_ub(fc_unsol_buf_t *ubp) 9476 { 9477 emlxs_ub_priv_t *ub_priv; 9478 fc_rscn_t *rscn; 9479 uint32_t count; 9480 uint32_t i; 9481 uint32_t *lp; 9482 la_els_logi_t *logi; 9483 9484 ub_priv = ubp->ub_fca_private; 9485 9486 switch (ub_priv->cmd) { 9487 case ELS_CMD_RSCN: 9488 rscn = (fc_rscn_t *)ubp->ub_buffer; 9489 9490 rscn->rscn_payload_len = SWAP_DATA16(rscn->rscn_payload_len); 9491 9492 count = ((rscn->rscn_payload_len - 4) / 4); 9493 lp = (uint32_t *)ubp->ub_buffer + 1; 9494 for (i = 0; i < count; i++, lp++) { 9495 *lp = SWAP_DATA32(*lp); 9496 } 9497 9498 break; 9499 9500 case ELS_CMD_FLOGI: 9501 case ELS_CMD_PLOGI: 9502 case ELS_CMD_FDISC: 9503 case ELS_CMD_PDISC: 9504 logi = (la_els_logi_t *)ubp->ub_buffer; 9505 emlxs_swap_service_params( 9506 (SERV_PARM *)&logi->common_service); 9507 break; 9508 9509 /* ULP handles this */ 9510 case ELS_CMD_LOGO: 9511 case ELS_CMD_PRLI: 9512 case ELS_CMD_PRLO: 9513 case ELS_CMD_ADISC: 9514 default: 9515 break; 9516 } 9517 9518 return; 9519 9520 } /* emlxs_swap_els_ub() */ 9521 9522 9523 #endif /* EMLXS_MODREV2X */ 9524 9525 9526 extern char * 9527 emlxs_elscmd_xlate(uint32_t elscmd) 9528 { 9529 static char buffer[32]; 9530 uint32_t i; 9531 uint32_t count; 9532 9533 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t); 9534 for (i = 0; i < count; i++) { 9535 if (elscmd == emlxs_elscmd_table[i].code) { 9536 return (emlxs_elscmd_table[i].string); 9537 } 9538 } 9539 9540 (void) sprintf(buffer, "ELS=0x%x", elscmd); 9541 return (buffer); 9542 9543 } /* emlxs_elscmd_xlate() */ 9544 9545 9546 extern char * 9547 emlxs_ctcmd_xlate(uint32_t ctcmd) 9548 { 9549 static char buffer[32]; 9550 uint32_t i; 9551 uint32_t count; 9552 9553 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t); 9554 for (i = 0; i < count; i++) { 9555 if (ctcmd == emlxs_ctcmd_table[i].code) { 9556 return (emlxs_ctcmd_table[i].string); 9557 } 9558 } 9559 9560 (void) sprintf(buffer, "cmd=0x%x", ctcmd); 9561 return (buffer); 9562 9563 } /* emlxs_ctcmd_xlate() */ 9564 9565 9566 #ifdef MENLO_SUPPORT 9567 extern char * 9568 emlxs_menlo_cmd_xlate(uint32_t cmd) 9569 { 9570 static char buffer[32]; 9571 uint32_t i; 9572 uint32_t count; 9573 9574 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t); 9575 for (i = 0; i < count; i++) { 9576 if (cmd == emlxs_menlo_cmd_table[i].code) { 9577 return (emlxs_menlo_cmd_table[i].string); 9578 } 9579 } 9580 9581 (void) sprintf(buffer, "Cmd=0x%x", cmd); 9582 return (buffer); 9583 9584 } /* emlxs_menlo_cmd_xlate() */ 9585 9586 extern char * 9587 emlxs_menlo_rsp_xlate(uint32_t rsp) 9588 { 9589 static char buffer[32]; 9590 uint32_t i; 9591 uint32_t count; 9592 9593 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t); 9594 for (i = 0; i < count; i++) { 9595 if (rsp == emlxs_menlo_rsp_table[i].code) { 9596 return (emlxs_menlo_rsp_table[i].string); 9597 } 9598 } 9599 9600 (void) sprintf(buffer, "Rsp=0x%x", rsp); 9601 return (buffer); 9602 9603 } /* emlxs_menlo_rsp_xlate() */ 9604 9605 #endif /* MENLO_SUPPORT */ 9606 9607 9608 extern char * 9609 emlxs_rmcmd_xlate(uint32_t rmcmd) 9610 { 9611 static char buffer[32]; 9612 uint32_t i; 9613 uint32_t count; 9614 9615 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t); 9616 for (i = 0; i < count; i++) { 9617 if (rmcmd == emlxs_rmcmd_table[i].code) { 9618 return (emlxs_rmcmd_table[i].string); 9619 } 9620 } 9621 9622 (void) sprintf(buffer, "RM=0x%x", rmcmd); 9623 return (buffer); 9624 9625 } /* emlxs_rmcmd_xlate() */ 9626 9627 9628 9629 extern char * 9630 emlxs_mscmd_xlate(uint16_t mscmd) 9631 { 9632 static char buffer[32]; 9633 uint32_t i; 9634 uint32_t count; 9635 9636 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t); 9637 for (i = 0; i < count; i++) { 9638 if (mscmd == emlxs_mscmd_table[i].code) { 9639 return (emlxs_mscmd_table[i].string); 9640 } 9641 } 9642 9643 (void) sprintf(buffer, "Cmd=0x%x", mscmd); 9644 return (buffer); 9645 9646 } /* emlxs_mscmd_xlate() */ 9647 9648 9649 extern char * 9650 emlxs_state_xlate(uint8_t state) 9651 { 9652 static char buffer[32]; 9653 uint32_t i; 9654 uint32_t count; 9655 9656 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t); 9657 for (i = 0; i < count; i++) { 9658 if (state == emlxs_state_table[i].code) { 9659 return (emlxs_state_table[i].string); 9660 } 9661 } 9662 9663 (void) sprintf(buffer, "State=0x%x", state); 9664 return (buffer); 9665 9666 } /* emlxs_state_xlate() */ 9667 9668 9669 extern char * 9670 emlxs_error_xlate(uint8_t errno) 9671 { 9672 static char buffer[32]; 9673 uint32_t i; 9674 uint32_t count; 9675 9676 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t); 9677 for (i = 0; i < count; i++) { 9678 if (errno == emlxs_error_table[i].code) { 9679 return (emlxs_error_table[i].string); 9680 } 9681 } 9682 9683 (void) sprintf(buffer, "Errno=0x%x", errno); 9684 return (buffer); 9685 9686 } /* emlxs_error_xlate() */ 9687 9688 9689 static int 9690 emlxs_pm_lower_power(dev_info_t *dip) 9691 { 9692 int ddiinst; 9693 int emlxinst; 9694 emlxs_config_t *cfg; 9695 int32_t rval; 9696 emlxs_hba_t *hba; 9697 9698 ddiinst = ddi_get_instance(dip); 9699 emlxinst = emlxs_get_instance(ddiinst); 9700 hba = emlxs_device.hba[emlxinst]; 9701 cfg = &CFG; 9702 9703 rval = DDI_SUCCESS; 9704 9705 /* Lower the power level */ 9706 if (cfg[CFG_PM_SUPPORT].current) { 9707 rval = 9708 pm_lower_power(dip, EMLXS_PM_ADAPTER, 9709 EMLXS_PM_ADAPTER_DOWN); 9710 } else { 9711 /* We do not have kernel support of power management enabled */ 9712 /* therefore, call our power management routine directly */ 9713 rval = 9714 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN); 9715 } 9716 9717 return (rval); 9718 9719 } /* emlxs_pm_lower_power() */ 9720 9721 9722 static int 9723 emlxs_pm_raise_power(dev_info_t *dip) 9724 { 9725 int ddiinst; 9726 int emlxinst; 9727 emlxs_config_t *cfg; 9728 int32_t rval; 9729 emlxs_hba_t *hba; 9730 9731 ddiinst = ddi_get_instance(dip); 9732 emlxinst = emlxs_get_instance(ddiinst); 9733 hba = emlxs_device.hba[emlxinst]; 9734 cfg = &CFG; 9735 9736 /* Raise the power level */ 9737 if (cfg[CFG_PM_SUPPORT].current) { 9738 rval = 9739 pm_raise_power(dip, EMLXS_PM_ADAPTER, 9740 EMLXS_PM_ADAPTER_UP); 9741 } else { 9742 /* We do not have kernel support of power management enabled */ 9743 /* therefore, call our power management routine directly */ 9744 rval = 9745 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP); 9746 } 9747 9748 return (rval); 9749 9750 } /* emlxs_pm_raise_power() */ 9751 9752 9753 #ifdef IDLE_TIMER 9754 9755 extern int 9756 emlxs_pm_busy_component(emlxs_hba_t *hba) 9757 { 9758 emlxs_config_t *cfg = &CFG; 9759 int rval; 9760 9761 hba->pm_active = 1; 9762 9763 if (hba->pm_busy) { 9764 return (DDI_SUCCESS); 9765 } 9766 9767 mutex_enter(&hba->pm_lock); 9768 9769 if (hba->pm_busy) { 9770 mutex_exit(&hba->pm_lock); 9771 return (DDI_SUCCESS); 9772 } 9773 hba->pm_busy = 1; 9774 9775 mutex_exit(&hba->pm_lock); 9776 9777 /* Attempt to notify system that we are busy */ 9778 if (cfg[CFG_PM_SUPPORT].current) { 9779 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9780 "pm_busy_component."); 9781 9782 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER); 9783 9784 if (rval != DDI_SUCCESS) { 9785 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9786 "pm_busy_component failed. ret=%d", rval); 9787 9788 /* If this attempt failed then clear our flags */ 9789 mutex_enter(&hba->pm_lock); 9790 hba->pm_busy = 0; 9791 mutex_exit(&hba->pm_lock); 9792 9793 return (rval); 9794 } 9795 } 9796 9797 return (DDI_SUCCESS); 9798 9799 } /* emlxs_pm_busy_component() */ 9800 9801 9802 extern int 9803 emlxs_pm_idle_component(emlxs_hba_t *hba) 9804 { 9805 emlxs_config_t *cfg = &CFG; 9806 int rval; 9807 9808 if (!hba->pm_busy) { 9809 return (DDI_SUCCESS); 9810 } 9811 9812 mutex_enter(&hba->pm_lock); 9813 9814 if (!hba->pm_busy) { 9815 mutex_exit(&hba->pm_lock); 9816 return (DDI_SUCCESS); 9817 } 9818 hba->pm_busy = 0; 9819 9820 mutex_exit(&hba->pm_lock); 9821 9822 if (cfg[CFG_PM_SUPPORT].current) { 9823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9824 "pm_idle_component."); 9825 9826 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER); 9827 9828 if (rval != DDI_SUCCESS) { 9829 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 9830 "pm_idle_component failed. ret=%d", rval); 9831 9832 /* If this attempt failed then */ 9833 /* reset our flags for another attempt */ 9834 mutex_enter(&hba->pm_lock); 9835 hba->pm_busy = 1; 9836 mutex_exit(&hba->pm_lock); 9837 9838 return (rval); 9839 } 9840 } 9841 9842 return (DDI_SUCCESS); 9843 9844 } /* emlxs_pm_idle_component() */ 9845 9846 9847 extern void 9848 emlxs_pm_idle_timer(emlxs_hba_t *hba) 9849 { 9850 emlxs_config_t *cfg = &CFG; 9851 9852 if (hba->pm_active) { 9853 /* Clear active flag and reset idle timer */ 9854 mutex_enter(&hba->pm_lock); 9855 hba->pm_active = 0; 9856 hba->pm_idle_timer = 9857 hba->timer_tics + cfg[CFG_PM_IDLE].current; 9858 mutex_exit(&hba->pm_lock); 9859 } 9860 9861 /* Check for idle timeout */ 9862 else if (hba->timer_tics >= hba->pm_idle_timer) { 9863 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) { 9864 mutex_enter(&hba->pm_lock); 9865 hba->pm_idle_timer = 9866 hba->timer_tics + cfg[CFG_PM_IDLE].current; 9867 mutex_exit(&hba->pm_lock); 9868 } 9869 } 9870 9871 return; 9872 9873 } /* emlxs_pm_idle_timer() */ 9874 9875 #endif /* IDLE_TIMER */ 9876 9877 9878 #ifdef SLI3_SUPPORT 9879 static void 9880 emlxs_read_vport_prop(emlxs_hba_t *hba) 9881 { 9882 emlxs_port_t *port = &PPORT; 9883 emlxs_config_t *cfg = &CFG; 9884 char **arrayp; 9885 uint8_t *s; 9886 uint8_t *np; 9887 NAME_TYPE pwwpn; 9888 NAME_TYPE wwnn; 9889 NAME_TYPE wwpn; 9890 uint32_t vpi; 9891 uint32_t cnt; 9892 uint32_t rval; 9893 uint32_t i; 9894 uint32_t j; 9895 uint32_t c1; 9896 uint32_t sum; 9897 uint32_t errors; 9898 char buffer[64]; 9899 9900 /* Check for the per adapter vport setting */ 9901 (void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst); 9902 cnt = 0; 9903 arrayp = NULL; 9904 rval = 9905 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 9906 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt); 9907 9908 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 9909 /* Check for the global vport setting */ 9910 cnt = 0; 9911 arrayp = NULL; 9912 rval = 9913 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 9914 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt); 9915 } 9916 9917 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 9918 return; 9919 } 9920 9921 for (i = 0; i < cnt; i++) { 9922 errors = 0; 9923 s = (uint8_t *)arrayp[i]; 9924 9925 if (!s) { 9926 break; 9927 } 9928 9929 np = (uint8_t *)&pwwpn; 9930 for (j = 0; j < sizeof (NAME_TYPE); j++) { 9931 c1 = *s++; 9932 if ((c1 >= '0') && (c1 <= '9')) { 9933 sum = ((c1 - '0') << 4); 9934 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9935 sum = ((c1 - 'a' + 10) << 4); 9936 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9937 sum = ((c1 - 'A' + 10) << 4); 9938 } else { 9939 EMLXS_MSGF(EMLXS_CONTEXT, 9940 &emlxs_attach_debug_msg, 9941 "Config error: Invalid PWWPN found. " 9942 "entry=%d byte=%d hi_nibble=%c", 9943 i, j, c1); 9944 errors++; 9945 } 9946 9947 c1 = *s++; 9948 if ((c1 >= '0') && (c1 <= '9')) { 9949 sum |= (c1 - '0'); 9950 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9951 sum |= (c1 - 'a' + 10); 9952 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9953 sum |= (c1 - 'A' + 10); 9954 } else { 9955 EMLXS_MSGF(EMLXS_CONTEXT, 9956 &emlxs_attach_debug_msg, 9957 "Config error: Invalid PWWPN found. " 9958 "entry=%d byte=%d lo_nibble=%c", 9959 i, j, c1); 9960 errors++; 9961 } 9962 9963 *np++ = sum; 9964 } 9965 9966 if (*s++ != ':') { 9967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 9968 "Config error: Invalid delimiter after PWWPN. " 9969 "entry=%d", i); 9970 goto out; 9971 } 9972 9973 np = (uint8_t *)&wwnn; 9974 for (j = 0; j < sizeof (NAME_TYPE); j++) { 9975 c1 = *s++; 9976 if ((c1 >= '0') && (c1 <= '9')) { 9977 sum = ((c1 - '0') << 4); 9978 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9979 sum = ((c1 - 'a' + 10) << 4); 9980 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9981 sum = ((c1 - 'A' + 10) << 4); 9982 } else { 9983 EMLXS_MSGF(EMLXS_CONTEXT, 9984 &emlxs_attach_debug_msg, 9985 "Config error: Invalid WWNN found. " 9986 "entry=%d byte=%d hi_nibble=%c", 9987 i, j, c1); 9988 errors++; 9989 } 9990 9991 c1 = *s++; 9992 if ((c1 >= '0') && (c1 <= '9')) { 9993 sum |= (c1 - '0'); 9994 } else if ((c1 >= 'a') && (c1 <= 'f')) { 9995 sum |= (c1 - 'a' + 10); 9996 } else if ((c1 >= 'A') && (c1 <= 'F')) { 9997 sum |= (c1 - 'A' + 10); 9998 } else { 9999 EMLXS_MSGF(EMLXS_CONTEXT, 10000 &emlxs_attach_debug_msg, 10001 "Config error: Invalid WWNN found. " 10002 "entry=%d byte=%d lo_nibble=%c", 10003 i, j, c1); 10004 errors++; 10005 } 10006 10007 *np++ = sum; 10008 } 10009 10010 if (*s++ != ':') { 10011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10012 "Config error: Invalid delimiter after WWNN. " 10013 "entry=%d", i); 10014 goto out; 10015 } 10016 10017 np = (uint8_t *)&wwpn; 10018 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10019 c1 = *s++; 10020 if ((c1 >= '0') && (c1 <= '9')) { 10021 sum = ((c1 - '0') << 4); 10022 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10023 sum = ((c1 - 'a' + 10) << 4); 10024 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10025 sum = ((c1 - 'A' + 10) << 4); 10026 } else { 10027 EMLXS_MSGF(EMLXS_CONTEXT, 10028 &emlxs_attach_debug_msg, 10029 "Config error: Invalid WWPN found. " 10030 "entry=%d byte=%d hi_nibble=%c", 10031 i, j, c1); 10032 10033 errors++; 10034 } 10035 10036 c1 = *s++; 10037 if ((c1 >= '0') && (c1 <= '9')) { 10038 sum |= (c1 - '0'); 10039 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10040 sum |= (c1 - 'a' + 10); 10041 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10042 sum |= (c1 - 'A' + 10); 10043 } else { 10044 EMLXS_MSGF(EMLXS_CONTEXT, 10045 &emlxs_attach_debug_msg, 10046 "Config error: Invalid WWPN found. " 10047 "entry=%d byte=%d lo_nibble=%c", 10048 i, j, c1); 10049 10050 errors++; 10051 } 10052 10053 *np++ = sum; 10054 } 10055 10056 if (*s++ != ':') { 10057 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10058 "Config error: Invalid delimiter after WWPN. " 10059 "entry=%d", i); 10060 10061 goto out; 10062 } 10063 10064 sum = 0; 10065 do { 10066 c1 = *s++; 10067 if ((c1 < '0') || (c1 > '9')) { 10068 EMLXS_MSGF(EMLXS_CONTEXT, 10069 &emlxs_attach_debug_msg, 10070 "Config error: Invalid VPI found. " 10071 "entry=%d c=%c vpi=%d", i, c1, sum); 10072 10073 goto out; 10074 } 10075 10076 sum = (sum * 10) + (c1 - '0'); 10077 10078 } while (*s != 0); 10079 10080 vpi = sum; 10081 10082 if (errors) { 10083 continue; 10084 } 10085 10086 /* Entry has been read */ 10087 10088 /* Check if the physical port wwpn */ 10089 /* matches our physical port wwpn */ 10090 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) { 10091 continue; 10092 } 10093 10094 /* Check vpi range */ 10095 if ((vpi == 0) || (vpi >= MAX_VPORTS)) { 10096 continue; 10097 } 10098 10099 /* Check if port has already been configured */ 10100 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) { 10101 continue; 10102 } 10103 10104 /* Set the highest configured vpi */ 10105 if (vpi >= hba->vpi_high) { 10106 hba->vpi_high = vpi; 10107 } 10108 10109 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn, 10110 sizeof (NAME_TYPE)); 10111 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn, 10112 sizeof (NAME_TYPE)); 10113 10114 if (hba->port[vpi].snn[0] == 0) { 10115 (void) strncpy((caddr_t)hba->port[vpi].snn, 10116 (caddr_t)hba->snn, 256); 10117 } 10118 10119 if (hba->port[vpi].spn[0] == 0) { 10120 (void) sprintf((caddr_t)hba->port[vpi].spn, 10121 "%s VPort-%d", 10122 (caddr_t)hba->spn, vpi); 10123 } 10124 10125 hba->port[vpi].flag |= 10126 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 10127 10128 #ifdef NPIV_SUPPORT 10129 if (cfg[CFG_VPORT_RESTRICTED].current) { 10130 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED; 10131 } 10132 #endif /* NPIV_SUPPORT */ 10133 } 10134 10135 out: 10136 10137 (void) ddi_prop_free((void *) arrayp); 10138 return; 10139 10140 } /* emlxs_read_vport_prop() */ 10141 10142 #endif /* SLI3_SUPPORT */ 10143 10144 10145 10146 extern char * 10147 emlxs_wwn_xlate(char *buffer, uint8_t *wwn) 10148 { 10149 (void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x", 10150 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff, 10151 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff); 10152 10153 return (buffer); 10154 10155 } /* emlxs_wwn_xlate() */ 10156 10157 10158 /* This is called at port online and offline */ 10159 extern void 10160 emlxs_ub_flush(emlxs_port_t *port) 10161 { 10162 emlxs_hba_t *hba = HBA; 10163 fc_unsol_buf_t *ubp; 10164 emlxs_ub_priv_t *ub_priv; 10165 emlxs_ub_priv_t *next; 10166 10167 /* Return if nothing to do */ 10168 if (!port->ub_wait_head) { 10169 return; 10170 } 10171 10172 mutex_enter(&EMLXS_PORT_LOCK); 10173 ub_priv = port->ub_wait_head; 10174 port->ub_wait_head = NULL; 10175 port->ub_wait_tail = NULL; 10176 mutex_exit(&EMLXS_PORT_LOCK); 10177 10178 while (ub_priv) { 10179 next = ub_priv->next; 10180 ubp = ub_priv->ubp; 10181 10182 /* Check if ULP is online and we have a callback function */ 10183 if ((port->ulp_statec != FC_STATE_OFFLINE) && 10184 port->ulp_unsol_cb) { 10185 /* Send ULP the ub buffer */ 10186 port->ulp_unsol_cb(port->ulp_handle, ubp, 10187 ubp->ub_frame.type); 10188 } else { /* Drop the buffer */ 10189 10190 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10191 } 10192 10193 ub_priv = next; 10194 10195 } /* while() */ 10196 10197 return; 10198 10199 } /* emlxs_ub_flush() */ 10200 10201 10202 extern void 10203 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp) 10204 { 10205 emlxs_hba_t *hba = HBA; 10206 emlxs_ub_priv_t *ub_priv; 10207 10208 ub_priv = ubp->ub_fca_private; 10209 10210 /* Check if ULP is online */ 10211 if (port->ulp_statec != FC_STATE_OFFLINE) { 10212 if (port->ulp_unsol_cb) { 10213 port->ulp_unsol_cb(port->ulp_handle, ubp, 10214 ubp->ub_frame.type); 10215 } else { 10216 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10217 } 10218 10219 return; 10220 } else { /* ULP offline */ 10221 10222 if (hba->state >= FC_LINK_UP) { 10223 /* Add buffer to queue tail */ 10224 mutex_enter(&EMLXS_PORT_LOCK); 10225 10226 if (port->ub_wait_tail) { 10227 port->ub_wait_tail->next = ub_priv; 10228 } 10229 port->ub_wait_tail = ub_priv; 10230 10231 if (!port->ub_wait_head) { 10232 port->ub_wait_head = ub_priv; 10233 } 10234 10235 mutex_exit(&EMLXS_PORT_LOCK); 10236 } else { 10237 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10238 } 10239 } 10240 10241 return; 10242 10243 } /* emlxs_ub_callback() */ 10244 10245 10246 static uint32_t 10247 emlxs_integrity_check(emlxs_hba_t *hba) 10248 { 10249 uint32_t size; 10250 uint32_t errors = 0; 10251 int ddiinst = hba->ddiinst; 10252 10253 size = 16; 10254 if (sizeof (ULP_BDL) != size) { 10255 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16", 10256 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL)); 10257 10258 errors++; 10259 } 10260 size = 8; 10261 if (sizeof (ULP_BDE) != size) { 10262 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8", 10263 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE)); 10264 10265 errors++; 10266 } 10267 size = 12; 10268 if (sizeof (ULP_BDE64) != size) { 10269 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12", 10270 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64)); 10271 10272 errors++; 10273 } 10274 size = 16; 10275 if (sizeof (HBQE_t) != size) { 10276 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16", 10277 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t)); 10278 10279 errors++; 10280 } 10281 size = 8; 10282 if (sizeof (HGP) != size) { 10283 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8", 10284 DRIVER_NAME, ddiinst, (int)sizeof (HGP)); 10285 10286 errors++; 10287 } 10288 if (sizeof (PGP) != size) { 10289 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8", 10290 DRIVER_NAME, ddiinst, (int)sizeof (PGP)); 10291 10292 errors++; 10293 } 10294 size = 4; 10295 if (sizeof (WORD5) != size) { 10296 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4", 10297 DRIVER_NAME, ddiinst, (int)sizeof (WORD5)); 10298 10299 errors++; 10300 } 10301 size = 124; 10302 if (sizeof (MAILVARIANTS) != size) { 10303 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. " 10304 "%d != 124", DRIVER_NAME, ddiinst, 10305 (int)sizeof (MAILVARIANTS)); 10306 10307 errors++; 10308 } 10309 size = 128; 10310 if (sizeof (SLI1_DESC) != size) { 10311 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128", 10312 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC)); 10313 10314 errors++; 10315 } 10316 if (sizeof (SLI2_DESC) != size) { 10317 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128", 10318 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC)); 10319 10320 errors++; 10321 } 10322 size = MBOX_SIZE; 10323 if (sizeof (MAILBOX) != size) { 10324 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d", 10325 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE); 10326 10327 errors++; 10328 } 10329 size = PCB_SIZE; 10330 if (sizeof (PCB) != size) { 10331 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d", 10332 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE); 10333 10334 errors++; 10335 } 10336 size = 260; 10337 if (sizeof (ATTRIBUTE_ENTRY) != size) { 10338 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. " 10339 "%d != 260", DRIVER_NAME, ddiinst, 10340 (int)sizeof (ATTRIBUTE_ENTRY)); 10341 10342 errors++; 10343 } 10344 size = SLI_SLIM1_SIZE; 10345 if (sizeof (SLIM1) != size) { 10346 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d", 10347 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE); 10348 10349 errors++; 10350 } 10351 #ifdef SLI3_SUPPORT 10352 size = SLI3_IOCB_CMD_SIZE; 10353 if (sizeof (IOCB) != size) { 10354 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 10355 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 10356 SLI3_IOCB_CMD_SIZE); 10357 10358 errors++; 10359 } 10360 #else 10361 size = SLI2_IOCB_CMD_SIZE; 10362 if (sizeof (IOCB) != size) { 10363 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 10364 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 10365 SLI2_IOCB_CMD_SIZE); 10366 10367 errors++; 10368 } 10369 #endif /* SLI3_SUPPORT */ 10370 10371 size = SLI_SLIM2_SIZE; 10372 if (sizeof (SLIM2) != size) { 10373 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d", 10374 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2), 10375 SLI_SLIM2_SIZE); 10376 10377 errors++; 10378 } 10379 return (errors); 10380 10381 } /* emlxs_integrity_check() */ 10382 10383 10384 #ifdef FMA_SUPPORT 10385 /* 10386 * FMA support 10387 */ 10388 10389 extern void 10390 emlxs_fm_init(emlxs_hba_t *hba) 10391 { 10392 ddi_iblock_cookie_t iblk; 10393 10394 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 10395 return; 10396 } 10397 10398 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 10399 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 10400 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 10401 } else { 10402 emlxs_dev_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 10403 emlxs_data_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 10404 } 10405 10406 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) { 10407 emlxs_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 10408 emlxs_dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR; 10409 emlxs_dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR; 10410 emlxs_dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR; 10411 } else { 10412 emlxs_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 10413 emlxs_dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR; 10414 emlxs_dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR; 10415 emlxs_dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR; 10416 } 10417 10418 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk); 10419 10420 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 10421 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10422 pci_ereport_setup(hba->dip); 10423 } 10424 10425 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10426 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb, 10427 (void *)hba); 10428 } 10429 10430 } /* emlxs_fm_init() */ 10431 10432 10433 extern void 10434 emlxs_fm_fini(emlxs_hba_t *hba) 10435 { 10436 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 10437 return; 10438 } 10439 10440 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 10441 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10442 pci_ereport_teardown(hba->dip); 10443 } 10444 10445 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 10446 ddi_fm_handler_unregister(hba->dip); 10447 } 10448 10449 (void) ddi_fm_fini(hba->dip); 10450 10451 } /* emlxs_fm_fini() */ 10452 10453 10454 extern int 10455 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle) 10456 { 10457 ddi_fm_error_t err; 10458 10459 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 10460 return (DDI_FM_OK); 10461 } 10462 10463 /* Some S10 versions do not define the ahi_err structure */ 10464 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) { 10465 return (DDI_FM_OK); 10466 } 10467 10468 err.fme_status = DDI_FM_OK; 10469 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION); 10470 10471 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */ 10472 if ((void *)&ddi_fm_acc_err_clear != NULL) { 10473 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 10474 } 10475 10476 return (err.fme_status); 10477 10478 } /* emlxs_fm_check_acc_handle() */ 10479 10480 10481 extern int 10482 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle) 10483 { 10484 ddi_fm_error_t err; 10485 10486 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 10487 return (DDI_FM_OK); 10488 } 10489 10490 err.fme_status = DDI_FM_OK; 10491 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION); 10492 10493 return (err.fme_status); 10494 10495 } /* emlxs_fm_check_dma_handle() */ 10496 10497 10498 extern void 10499 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail) 10500 { 10501 uint64_t ena; 10502 char buf[FM_MAX_CLASS]; 10503 10504 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 10505 return; 10506 } 10507 10508 if (detail == NULL) { 10509 return; 10510 } 10511 10512 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 10513 ena = fm_ena_generate(0, FM_ENA_FMT1); 10514 10515 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP, 10516 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 10517 10518 } /* emlxs_fm_ereport() */ 10519 10520 10521 extern void 10522 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact) 10523 { 10524 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 10525 return; 10526 } 10527 10528 if (impact == NULL) { 10529 return; 10530 } 10531 10532 ddi_fm_service_impact(hba->dip, impact); 10533 10534 } 10535 10536 10537 /* 10538 * The I/O fault service error handling callback function 10539 */ 10540 /*ARGSUSED*/ 10541 extern int 10542 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 10543 const void *impl_data) 10544 { 10545 /* 10546 * as the driver can always deal with an error 10547 * in any dma or access handle, we can just return 10548 * the fme_status value. 10549 */ 10550 pci_ereport_post(dip, err, NULL); 10551 return (err->fme_status); 10552 10553 } /* emlxs_fm_error_cb() */ 10554 #endif /* FMA_SUPPORT */ 10555