1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * until MSIX supported, assume msi, use 2 for msix 39 */ 40 uint32_t nxge_msi_enable = 1; /* debug: turn msi off */ 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 47 uint32_t nxge_rbr_spare_size = 0; 48 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 49 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 50 uint32_t nxge_no_msg = 0; /* control message display */ 51 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 52 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 53 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 54 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 55 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 56 boolean_t nxge_jumbo_enable = B_FALSE; 57 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 58 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 59 60 /* 61 * Debugging flags: 62 * nxge_no_tx_lb : transmit load balancing 63 * nxge_tx_lb_policy: 0 - TCP port (default) 64 * 3 - DEST MAC 65 */ 66 uint32_t nxge_no_tx_lb = 0; 67 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 68 69 /* 70 * Add tunable to reduce the amount of time spent in the 71 * ISR doing Rx Processing. 72 */ 73 uint32_t nxge_max_rx_pkts = 1024; 74 75 /* 76 * Tunables to manage the receive buffer blocks. 77 * 78 * nxge_rx_threshold_hi: copy all buffers. 79 * nxge_rx_bcopy_size_type: receive buffer block size type. 80 * nxge_rx_threshold_lo: copy only up to tunable block size type. 81 */ 82 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 83 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 84 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 85 86 rtrace_t npi_rtracebuf; 87 88 #if defined(sun4v) 89 /* 90 * Hypervisor N2/NIU services information. 91 */ 92 static hsvc_info_t niu_hsvc = { 93 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 94 NIU_MINOR_VER, "nxge" 95 }; 96 #endif 97 98 /* 99 * Function Prototypes 100 */ 101 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 102 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 103 static void nxge_unattach(p_nxge_t); 104 105 #if NXGE_PROPERTY 106 static void nxge_remove_hard_properties(p_nxge_t); 107 #endif 108 109 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 110 111 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 112 static void nxge_destroy_mutexes(p_nxge_t); 113 114 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 115 static void nxge_unmap_regs(p_nxge_t nxgep); 116 #ifdef NXGE_DEBUG 117 static void nxge_test_map_regs(p_nxge_t nxgep); 118 #endif 119 120 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 121 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 122 static void nxge_remove_intrs(p_nxge_t nxgep); 123 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 124 125 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 126 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 127 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 128 static void nxge_intrs_enable(p_nxge_t nxgep); 129 static void nxge_intrs_disable(p_nxge_t nxgep); 130 131 static void nxge_suspend(p_nxge_t); 132 static nxge_status_t nxge_resume(p_nxge_t); 133 134 static nxge_status_t nxge_setup_dev(p_nxge_t); 135 static void nxge_destroy_dev(p_nxge_t); 136 137 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 138 static void nxge_free_mem_pool(p_nxge_t); 139 140 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 141 static void nxge_free_rx_mem_pool(p_nxge_t); 142 143 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 144 static void nxge_free_tx_mem_pool(p_nxge_t); 145 146 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 147 struct ddi_dma_attr *, 148 size_t, ddi_device_acc_attr_t *, uint_t, 149 p_nxge_dma_common_t); 150 151 static void nxge_dma_mem_free(p_nxge_dma_common_t); 152 153 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 154 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 155 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 156 157 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 158 p_nxge_dma_common_t *, size_t); 159 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 160 161 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 162 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 163 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 164 165 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 166 p_nxge_dma_common_t *, 167 size_t); 168 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 169 170 static int nxge_init_common_dev(p_nxge_t); 171 static void nxge_uninit_common_dev(p_nxge_t); 172 173 /* 174 * The next declarations are for the GLDv3 interface. 175 */ 176 static int nxge_m_start(void *); 177 static void nxge_m_stop(void *); 178 static int nxge_m_unicst(void *, const uint8_t *); 179 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 180 static int nxge_m_promisc(void *, boolean_t); 181 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 182 static void nxge_m_resources(void *); 183 mblk_t *nxge_m_tx(void *arg, mblk_t *); 184 static nxge_status_t nxge_mac_register(p_nxge_t); 185 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 186 mac_addr_slot_t slot); 187 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 188 boolean_t factory); 189 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 190 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 191 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 192 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 193 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 194 195 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 196 #define MAX_DUMP_SZ 256 197 198 #define NXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 199 200 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 201 static mac_callbacks_t nxge_m_callbacks = { 202 NXGE_M_CALLBACK_FLAGS, 203 nxge_m_stat, 204 nxge_m_start, 205 nxge_m_stop, 206 nxge_m_promisc, 207 nxge_m_multicst, 208 nxge_m_unicst, 209 nxge_m_tx, 210 nxge_m_resources, 211 nxge_m_ioctl, 212 nxge_m_getcapab 213 }; 214 215 void 216 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 217 218 /* 219 * These global variables control the message 220 * output. 221 */ 222 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 223 uint64_t nxge_debug_level = 0; 224 225 /* 226 * This list contains the instance structures for the Neptune 227 * devices present in the system. The lock exists to guarantee 228 * mutually exclusive access to the list. 229 */ 230 void *nxge_list = NULL; 231 232 void *nxge_hw_list = NULL; 233 nxge_os_mutex_t nxge_common_lock; 234 235 nxge_os_mutex_t nxge_mii_lock; 236 static uint32_t nxge_mii_lock_init = 0; 237 nxge_os_mutex_t nxge_mdio_lock; 238 static uint32_t nxge_mdio_lock_init = 0; 239 240 extern uint64_t npi_debug_level; 241 242 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 243 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 244 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 245 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 246 extern void nxge_fm_init(p_nxge_t, 247 ddi_device_acc_attr_t *, 248 ddi_device_acc_attr_t *, 249 ddi_dma_attr_t *); 250 extern void nxge_fm_fini(p_nxge_t); 251 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 252 253 /* 254 * Count used to maintain the number of buffers being used 255 * by Neptune instances and loaned up to the upper layers. 256 */ 257 uint32_t nxge_mblks_pending = 0; 258 259 /* 260 * Device register access attributes for PIO. 261 */ 262 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 263 DDI_DEVICE_ATTR_V0, 264 DDI_STRUCTURE_LE_ACC, 265 DDI_STRICTORDER_ACC, 266 }; 267 268 /* 269 * Device descriptor access attributes for DMA. 270 */ 271 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 272 DDI_DEVICE_ATTR_V0, 273 DDI_STRUCTURE_LE_ACC, 274 DDI_STRICTORDER_ACC 275 }; 276 277 /* 278 * Device buffer access attributes for DMA. 279 */ 280 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 281 DDI_DEVICE_ATTR_V0, 282 DDI_STRUCTURE_BE_ACC, 283 DDI_STRICTORDER_ACC 284 }; 285 286 ddi_dma_attr_t nxge_desc_dma_attr = { 287 DMA_ATTR_V0, /* version number. */ 288 0, /* low address */ 289 0xffffffffffffffff, /* high address */ 290 0xffffffffffffffff, /* address counter max */ 291 #ifndef NIU_PA_WORKAROUND 292 0x100000, /* alignment */ 293 #else 294 0x2000, 295 #endif 296 0xfc00fc, /* dlim_burstsizes */ 297 0x1, /* minimum transfer size */ 298 0xffffffffffffffff, /* maximum transfer size */ 299 0xffffffffffffffff, /* maximum segment size */ 300 1, /* scatter/gather list length */ 301 (unsigned int) 1, /* granularity */ 302 0 /* attribute flags */ 303 }; 304 305 ddi_dma_attr_t nxge_tx_dma_attr = { 306 DMA_ATTR_V0, /* version number. */ 307 0, /* low address */ 308 0xffffffffffffffff, /* high address */ 309 0xffffffffffffffff, /* address counter max */ 310 #if defined(_BIG_ENDIAN) 311 0x2000, /* alignment */ 312 #else 313 0x1000, /* alignment */ 314 #endif 315 0xfc00fc, /* dlim_burstsizes */ 316 0x1, /* minimum transfer size */ 317 0xffffffffffffffff, /* maximum transfer size */ 318 0xffffffffffffffff, /* maximum segment size */ 319 5, /* scatter/gather list length */ 320 (unsigned int) 1, /* granularity */ 321 0 /* attribute flags */ 322 }; 323 324 ddi_dma_attr_t nxge_rx_dma_attr = { 325 DMA_ATTR_V0, /* version number. */ 326 0, /* low address */ 327 0xffffffffffffffff, /* high address */ 328 0xffffffffffffffff, /* address counter max */ 329 0x2000, /* alignment */ 330 0xfc00fc, /* dlim_burstsizes */ 331 0x1, /* minimum transfer size */ 332 0xffffffffffffffff, /* maximum transfer size */ 333 0xffffffffffffffff, /* maximum segment size */ 334 1, /* scatter/gather list length */ 335 (unsigned int) 1, /* granularity */ 336 0 /* attribute flags */ 337 }; 338 339 ddi_dma_lim_t nxge_dma_limits = { 340 (uint_t)0, /* dlim_addr_lo */ 341 (uint_t)0xffffffff, /* dlim_addr_hi */ 342 (uint_t)0xffffffff, /* dlim_cntr_max */ 343 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 344 0x1, /* dlim_minxfer */ 345 1024 /* dlim_speed */ 346 }; 347 348 dma_method_t nxge_force_dma = DVMA; 349 350 /* 351 * dma chunk sizes. 352 * 353 * Try to allocate the largest possible size 354 * so that fewer number of dma chunks would be managed 355 */ 356 #ifdef NIU_PA_WORKAROUND 357 size_t alloc_sizes [] = {0x2000}; 358 #else 359 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 360 0x10000, 0x20000, 0x40000, 0x80000, 361 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000}; 362 #endif 363 364 /* 365 * Translate "dev_t" to a pointer to the associated "dev_info_t". 366 */ 367 368 static int 369 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 370 { 371 p_nxge_t nxgep = NULL; 372 int instance; 373 int status = DDI_SUCCESS; 374 nxge_status_t nxge_status = NXGE_OK; 375 uint8_t portn; 376 nxge_mmac_t *mmac_info; 377 378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 379 380 /* 381 * Get the device instance since we'll need to setup 382 * or retrieve a soft state for this instance. 383 */ 384 instance = ddi_get_instance(dip); 385 386 switch (cmd) { 387 case DDI_ATTACH: 388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 389 break; 390 391 case DDI_RESUME: 392 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 393 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 394 if (nxgep == NULL) { 395 status = DDI_FAILURE; 396 break; 397 } 398 if (nxgep->dip != dip) { 399 status = DDI_FAILURE; 400 break; 401 } 402 if (nxgep->suspended == DDI_PM_SUSPEND) { 403 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 404 } else { 405 nxge_status = nxge_resume(nxgep); 406 } 407 goto nxge_attach_exit; 408 409 case DDI_PM_RESUME: 410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 411 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 412 if (nxgep == NULL) { 413 status = DDI_FAILURE; 414 break; 415 } 416 if (nxgep->dip != dip) { 417 status = DDI_FAILURE; 418 break; 419 } 420 nxge_status = nxge_resume(nxgep); 421 goto nxge_attach_exit; 422 423 default: 424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 425 status = DDI_FAILURE; 426 goto nxge_attach_exit; 427 } 428 429 430 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 431 status = DDI_FAILURE; 432 goto nxge_attach_exit; 433 } 434 435 nxgep = ddi_get_soft_state(nxge_list, instance); 436 if (nxgep == NULL) { 437 goto nxge_attach_fail; 438 } 439 440 nxgep->drv_state = 0; 441 nxgep->dip = dip; 442 nxgep->instance = instance; 443 nxgep->p_dip = ddi_get_parent(dip); 444 nxgep->nxge_debug_level = nxge_debug_level; 445 npi_debug_level = nxge_debug_level; 446 447 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr, 448 &nxge_rx_dma_attr); 449 450 status = nxge_map_regs(nxgep); 451 if (status != NXGE_OK) { 452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 453 goto nxge_attach_fail; 454 } 455 456 status = nxge_init_common_dev(nxgep); 457 if (status != NXGE_OK) { 458 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 459 "nxge_init_common_dev failed")); 460 goto nxge_attach_fail; 461 } 462 463 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 464 nxgep->mac.portnum = portn; 465 if ((portn == 0) || (portn == 1)) 466 nxgep->mac.porttype = PORT_TYPE_XMAC; 467 else 468 nxgep->mac.porttype = PORT_TYPE_BMAC; 469 /* 470 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 471 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 472 * The two types of MACs have different characterizations. 473 */ 474 mmac_info = &nxgep->nxge_mmac_info; 475 if (nxgep->function_num < 2) { 476 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 477 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 478 } else { 479 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 480 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 481 } 482 /* 483 * Setup the Ndd parameters for the this instance. 484 */ 485 nxge_init_param(nxgep); 486 487 /* 488 * Setup Register Tracing Buffer. 489 */ 490 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 491 492 /* init stats ptr */ 493 nxge_init_statsp(nxgep); 494 status = nxge_get_xcvr_type(nxgep); 495 496 if (status != NXGE_OK) { 497 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_attach: " 498 " Couldn't determine card type" 499 " .... exit ")); 500 goto nxge_attach_fail; 501 } 502 503 if ((nxgep->niu_type == NEPTUNE) && 504 (nxgep->mac.portmode == PORT_10G_FIBER)) { 505 nxgep->niu_type = NEPTUNE_2; 506 } 507 508 status = nxge_get_config_properties(nxgep); 509 510 if (status != NXGE_OK) { 511 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed")); 512 goto nxge_attach_fail; 513 } 514 515 nxge_get_xcvr_properties(nxgep); 516 517 /* 518 * Setup the Kstats for the driver. 519 */ 520 nxge_setup_kstats(nxgep); 521 522 nxge_setup_param(nxgep); 523 524 status = nxge_setup_system_dma_pages(nxgep); 525 if (status != NXGE_OK) { 526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 527 goto nxge_attach_fail; 528 } 529 530 #if defined(sun4v) 531 if (nxgep->niu_type == N2_NIU) { 532 nxgep->niu_hsvc_available = B_FALSE; 533 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 534 if ((status = 535 hsvc_register(&nxgep->niu_hsvc, 536 &nxgep->niu_min_ver)) != 0) { 537 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 538 "nxge_attach: " 539 "%s: cannot negotiate " 540 "hypervisor services " 541 "revision %d " 542 "group: 0x%lx " 543 "major: 0x%lx minor: 0x%lx " 544 "errno: %d", 545 niu_hsvc.hsvc_modname, 546 niu_hsvc.hsvc_rev, 547 niu_hsvc.hsvc_group, 548 niu_hsvc.hsvc_major, 549 niu_hsvc.hsvc_minor, 550 status)); 551 status = DDI_FAILURE; 552 goto nxge_attach_fail; 553 } 554 555 nxgep->niu_hsvc_available = B_TRUE; 556 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 557 "NIU Hypervisor service enabled")); 558 } 559 #endif 560 561 nxge_hw_id_init(nxgep); 562 nxge_hw_init_niu_common(nxgep); 563 564 status = nxge_setup_mutexes(nxgep); 565 if (status != NXGE_OK) { 566 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 567 goto nxge_attach_fail; 568 } 569 570 status = nxge_setup_dev(nxgep); 571 if (status != DDI_SUCCESS) { 572 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 573 goto nxge_attach_fail; 574 } 575 576 status = nxge_add_intrs(nxgep); 577 if (status != DDI_SUCCESS) { 578 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 579 goto nxge_attach_fail; 580 } 581 status = nxge_add_soft_intrs(nxgep); 582 if (status != DDI_SUCCESS) { 583 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed")); 584 goto nxge_attach_fail; 585 } 586 587 /* 588 * Enable interrupts. 589 */ 590 nxge_intrs_enable(nxgep); 591 592 if ((status = nxge_mac_register(nxgep)) != DDI_SUCCESS) { 593 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 594 "unable to register to mac layer (%d)", status)); 595 goto nxge_attach_fail; 596 } 597 598 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 599 600 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)", 601 instance)); 602 603 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 604 605 goto nxge_attach_exit; 606 607 nxge_attach_fail: 608 nxge_unattach(nxgep); 609 if (nxge_status != NXGE_OK) 610 nxge_status = (NXGE_ERROR | NXGE_DDI_FAILED); 611 nxgep = NULL; 612 613 nxge_attach_exit: 614 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 615 status)); 616 617 return (status); 618 } 619 620 static int 621 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 622 { 623 int status = DDI_SUCCESS; 624 int instance; 625 p_nxge_t nxgep = NULL; 626 627 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 628 instance = ddi_get_instance(dip); 629 nxgep = ddi_get_soft_state(nxge_list, instance); 630 if (nxgep == NULL) { 631 status = DDI_FAILURE; 632 goto nxge_detach_exit; 633 } 634 635 switch (cmd) { 636 case DDI_DETACH: 637 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 638 break; 639 640 case DDI_PM_SUSPEND: 641 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 642 nxgep->suspended = DDI_PM_SUSPEND; 643 nxge_suspend(nxgep); 644 break; 645 646 case DDI_SUSPEND: 647 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 648 if (nxgep->suspended != DDI_PM_SUSPEND) { 649 nxgep->suspended = DDI_SUSPEND; 650 nxge_suspend(nxgep); 651 } 652 break; 653 654 default: 655 status = DDI_FAILURE; 656 } 657 658 if (cmd != DDI_DETACH) 659 goto nxge_detach_exit; 660 661 /* 662 * Stop the xcvr polling. 663 */ 664 nxgep->suspended = cmd; 665 666 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 667 668 if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 669 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 670 "<== nxge_detach status = 0x%08X", status)); 671 return (DDI_FAILURE); 672 } 673 674 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 675 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 676 677 nxge_unattach(nxgep); 678 nxgep = NULL; 679 680 nxge_detach_exit: 681 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 682 status)); 683 684 return (status); 685 } 686 687 static void 688 nxge_unattach(p_nxge_t nxgep) 689 { 690 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 691 692 if (nxgep == NULL || nxgep->dev_regs == NULL) { 693 return; 694 } 695 696 if (nxgep->nxge_hw_p) { 697 nxge_uninit_common_dev(nxgep); 698 nxgep->nxge_hw_p = NULL; 699 } 700 701 if (nxgep->nxge_timerid) { 702 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 703 nxgep->nxge_timerid = 0; 704 } 705 706 #if defined(sun4v) 707 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 708 (void) hsvc_unregister(&nxgep->niu_hsvc); 709 nxgep->niu_hsvc_available = B_FALSE; 710 } 711 #endif 712 /* 713 * Stop any further interrupts. 714 */ 715 nxge_remove_intrs(nxgep); 716 717 /* remove soft interrups */ 718 nxge_remove_soft_intrs(nxgep); 719 720 /* 721 * Stop the device and free resources. 722 */ 723 nxge_destroy_dev(nxgep); 724 725 /* 726 * Tear down the ndd parameters setup. 727 */ 728 nxge_destroy_param(nxgep); 729 730 /* 731 * Tear down the kstat setup. 732 */ 733 nxge_destroy_kstats(nxgep); 734 735 /* 736 * Destroy all mutexes. 737 */ 738 nxge_destroy_mutexes(nxgep); 739 740 /* 741 * Remove the list of ndd parameters which 742 * were setup during attach. 743 */ 744 if (nxgep->dip) { 745 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 746 " nxge_unattach: remove all properties")); 747 748 (void) ddi_prop_remove_all(nxgep->dip); 749 } 750 751 #if NXGE_PROPERTY 752 nxge_remove_hard_properties(nxgep); 753 #endif 754 755 /* 756 * Unmap the register setup. 757 */ 758 nxge_unmap_regs(nxgep); 759 760 nxge_fm_fini(nxgep); 761 762 ddi_soft_state_free(nxge_list, nxgep->instance); 763 764 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 765 } 766 767 static char n2_siu_name[] = "niu"; 768 769 static nxge_status_t 770 nxge_map_regs(p_nxge_t nxgep) 771 { 772 int ddi_status = DDI_SUCCESS; 773 p_dev_regs_t dev_regs; 774 char buf[MAXPATHLEN + 1]; 775 char *devname; 776 #ifdef NXGE_DEBUG 777 char *sysname; 778 #endif 779 off_t regsize; 780 nxge_status_t status = NXGE_OK; 781 #if !defined(_BIG_ENDIAN) 782 off_t pci_offset; 783 uint16_t pcie_devctl; 784 #endif 785 786 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 787 nxgep->dev_regs = NULL; 788 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 789 dev_regs->nxge_regh = NULL; 790 dev_regs->nxge_pciregh = NULL; 791 dev_regs->nxge_msix_regh = NULL; 792 dev_regs->nxge_vir_regh = NULL; 793 dev_regs->nxge_vir2_regh = NULL; 794 nxgep->niu_type = NEPTUNE; 795 796 devname = ddi_pathname(nxgep->dip, buf); 797 ASSERT(strlen(devname) > 0); 798 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 799 "nxge_map_regs: pathname devname %s", devname)); 800 801 if (strstr(devname, n2_siu_name)) { 802 /* N2/NIU */ 803 nxgep->niu_type = N2_NIU; 804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 805 "nxge_map_regs: N2/NIU devname %s", devname)); 806 /* get function number */ 807 nxgep->function_num = 808 (devname[strlen(devname) -1] == '1' ? 1 : 0); 809 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 810 "nxge_map_regs: N2/NIU function number %d", 811 nxgep->function_num)); 812 } else { 813 int *prop_val; 814 uint_t prop_len; 815 uint8_t func_num; 816 817 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 818 0, "reg", 819 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 820 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 821 "Reg property not found")); 822 ddi_status = DDI_FAILURE; 823 goto nxge_map_regs_fail0; 824 825 } else { 826 func_num = (prop_val[0] >> 8) & 0x7; 827 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 828 "Reg property found: fun # %d", 829 func_num)); 830 nxgep->function_num = func_num; 831 ddi_prop_free(prop_val); 832 } 833 } 834 835 switch (nxgep->niu_type) { 836 case NEPTUNE: 837 case NEPTUNE_2: 838 default: 839 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 840 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 841 "nxge_map_regs: pci config size 0x%x", regsize)); 842 843 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 844 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 845 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 846 if (ddi_status != DDI_SUCCESS) { 847 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 848 "ddi_map_regs, nxge bus config regs failed")); 849 goto nxge_map_regs_fail0; 850 } 851 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 852 "nxge_map_reg: PCI config addr 0x%0llx " 853 " handle 0x%0llx", dev_regs->nxge_pciregp, 854 dev_regs->nxge_pciregh)); 855 /* 856 * IMP IMP 857 * workaround for bit swapping bug in HW 858 * which ends up in no-snoop = yes 859 * resulting, in DMA not synched properly 860 */ 861 #if !defined(_BIG_ENDIAN) 862 /* workarounds for x86 systems */ 863 pci_offset = 0x80 + PCIE_DEVCTL; 864 pcie_devctl = 0x0; 865 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 866 pcie_devctl |= PCIE_DEVCTL_RO_EN; 867 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 868 pcie_devctl); 869 #endif 870 871 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 872 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 873 "nxge_map_regs: pio size 0x%x", regsize)); 874 /* set up the device mapped register */ 875 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 876 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 877 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 878 if (ddi_status != DDI_SUCCESS) { 879 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 880 "ddi_map_regs for Neptune global reg failed")); 881 goto nxge_map_regs_fail1; 882 } 883 884 /* set up the msi/msi-x mapped register */ 885 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 886 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 887 "nxge_map_regs: msix size 0x%x", regsize)); 888 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 889 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 890 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 891 if (ddi_status != DDI_SUCCESS) { 892 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 893 "ddi_map_regs for msi reg failed")); 894 goto nxge_map_regs_fail2; 895 } 896 897 /* set up the vio region mapped register */ 898 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 899 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 900 "nxge_map_regs: vio size 0x%x", regsize)); 901 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 902 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 903 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 904 905 if (ddi_status != DDI_SUCCESS) { 906 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 907 "ddi_map_regs for nxge vio reg failed")); 908 goto nxge_map_regs_fail3; 909 } 910 nxgep->dev_regs = dev_regs; 911 912 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 913 NPI_PCI_ADD_HANDLE_SET(nxgep, 914 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 915 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 916 NPI_MSI_ADD_HANDLE_SET(nxgep, 917 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 918 919 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 920 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 921 922 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 923 NPI_REG_ADD_HANDLE_SET(nxgep, 924 (npi_reg_ptr_t)dev_regs->nxge_regp); 925 926 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 927 NPI_VREG_ADD_HANDLE_SET(nxgep, 928 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 929 930 break; 931 932 case N2_NIU: 933 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 934 /* 935 * Set up the device mapped register (FWARC 2006/556) 936 * (changed back to 1: reg starts at 1!) 937 */ 938 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 939 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 940 "nxge_map_regs: dev size 0x%x", regsize)); 941 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 942 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 943 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 944 945 if (ddi_status != DDI_SUCCESS) { 946 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 947 "ddi_map_regs for N2/NIU, global reg failed ")); 948 goto nxge_map_regs_fail1; 949 } 950 951 /* set up the vio region mapped register */ 952 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 953 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 954 "nxge_map_regs: vio (1) size 0x%x", regsize)); 955 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 956 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 957 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 958 959 if (ddi_status != DDI_SUCCESS) { 960 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 961 "ddi_map_regs for nxge vio reg failed")); 962 goto nxge_map_regs_fail2; 963 } 964 /* set up the vio region mapped register */ 965 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 966 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 967 "nxge_map_regs: vio (3) size 0x%x", regsize)); 968 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 969 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 970 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 971 972 if (ddi_status != DDI_SUCCESS) { 973 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 974 "ddi_map_regs for nxge vio2 reg failed")); 975 goto nxge_map_regs_fail3; 976 } 977 nxgep->dev_regs = dev_regs; 978 979 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 980 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 981 982 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 983 NPI_REG_ADD_HANDLE_SET(nxgep, 984 (npi_reg_ptr_t)dev_regs->nxge_regp); 985 986 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 987 NPI_VREG_ADD_HANDLE_SET(nxgep, 988 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 989 990 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 991 NPI_V2REG_ADD_HANDLE_SET(nxgep, 992 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 993 994 break; 995 } 996 997 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 998 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 999 1000 goto nxge_map_regs_exit; 1001 nxge_map_regs_fail3: 1002 if (dev_regs->nxge_msix_regh) { 1003 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1004 } 1005 if (dev_regs->nxge_vir_regh) { 1006 ddi_regs_map_free(&dev_regs->nxge_regh); 1007 } 1008 nxge_map_regs_fail2: 1009 if (dev_regs->nxge_regh) { 1010 ddi_regs_map_free(&dev_regs->nxge_regh); 1011 } 1012 nxge_map_regs_fail1: 1013 if (dev_regs->nxge_pciregh) { 1014 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1015 } 1016 nxge_map_regs_fail0: 1017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1018 kmem_free(dev_regs, sizeof (dev_regs_t)); 1019 1020 nxge_map_regs_exit: 1021 if (ddi_status != DDI_SUCCESS) 1022 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1024 return (status); 1025 } 1026 1027 static void 1028 nxge_unmap_regs(p_nxge_t nxgep) 1029 { 1030 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1031 if (nxgep->dev_regs) { 1032 if (nxgep->dev_regs->nxge_pciregh) { 1033 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1034 "==> nxge_unmap_regs: bus")); 1035 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1036 nxgep->dev_regs->nxge_pciregh = NULL; 1037 } 1038 if (nxgep->dev_regs->nxge_regh) { 1039 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1040 "==> nxge_unmap_regs: device registers")); 1041 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1042 nxgep->dev_regs->nxge_regh = NULL; 1043 } 1044 if (nxgep->dev_regs->nxge_msix_regh) { 1045 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1046 "==> nxge_unmap_regs: device interrupts")); 1047 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1048 nxgep->dev_regs->nxge_msix_regh = NULL; 1049 } 1050 if (nxgep->dev_regs->nxge_vir_regh) { 1051 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1052 "==> nxge_unmap_regs: vio region")); 1053 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1054 nxgep->dev_regs->nxge_vir_regh = NULL; 1055 } 1056 if (nxgep->dev_regs->nxge_vir2_regh) { 1057 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1058 "==> nxge_unmap_regs: vio2 region")); 1059 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1060 nxgep->dev_regs->nxge_vir2_regh = NULL; 1061 } 1062 1063 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1064 nxgep->dev_regs = NULL; 1065 } 1066 1067 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1068 } 1069 1070 static nxge_status_t 1071 nxge_setup_mutexes(p_nxge_t nxgep) 1072 { 1073 int ddi_status = DDI_SUCCESS; 1074 nxge_status_t status = NXGE_OK; 1075 nxge_classify_t *classify_ptr; 1076 int partition; 1077 1078 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1079 1080 /* 1081 * Get the interrupt cookie so the mutexes can be 1082 * Initialized. 1083 */ 1084 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1085 &nxgep->interrupt_cookie); 1086 if (ddi_status != DDI_SUCCESS) { 1087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1088 "<== nxge_setup_mutexes: failed 0x%x", ddi_status)); 1089 goto nxge_setup_mutexes_exit; 1090 } 1091 1092 /* Initialize global mutex */ 1093 1094 if (nxge_mdio_lock_init == 0) { 1095 MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 1096 } 1097 atomic_add_32(&nxge_mdio_lock_init, 1); 1098 1099 if (nxge_mii_lock_init == 0) { 1100 MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 1101 } 1102 atomic_add_32(&nxge_mii_lock_init, 1); 1103 1104 nxgep->drv_state |= STATE_MDIO_LOCK_INIT; 1105 nxgep->drv_state |= STATE_MII_LOCK_INIT; 1106 1107 /* 1108 * Initialize mutex's for this device. 1109 */ 1110 MUTEX_INIT(nxgep->genlock, NULL, 1111 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1112 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1113 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1114 MUTEX_INIT(&nxgep->mif_lock, NULL, 1115 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1116 RW_INIT(&nxgep->filter_lock, NULL, 1117 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1118 1119 classify_ptr = &nxgep->classifier; 1120 /* 1121 * FFLP Mutexes are never used in interrupt context 1122 * as fflp operation can take very long time to 1123 * complete and hence not suitable to invoke from interrupt 1124 * handlers. 1125 */ 1126 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1127 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1128 if (nxgep->niu_type == NEPTUNE) { 1129 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1130 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1131 for (partition = 0; partition < MAX_PARTITION; partition++) { 1132 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1133 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1134 } 1135 } 1136 1137 nxge_setup_mutexes_exit: 1138 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1139 "<== nxge_setup_mutexes status = %x", status)); 1140 1141 if (ddi_status != DDI_SUCCESS) 1142 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1143 1144 return (status); 1145 } 1146 1147 static void 1148 nxge_destroy_mutexes(p_nxge_t nxgep) 1149 { 1150 int partition; 1151 nxge_classify_t *classify_ptr; 1152 1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1154 RW_DESTROY(&nxgep->filter_lock); 1155 MUTEX_DESTROY(&nxgep->mif_lock); 1156 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1157 MUTEX_DESTROY(nxgep->genlock); 1158 1159 classify_ptr = &nxgep->classifier; 1160 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1161 1162 /* free data structures, based on HW type */ 1163 if (nxgep->niu_type == NEPTUNE) { 1164 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1165 for (partition = 0; partition < MAX_PARTITION; partition++) { 1166 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1167 } 1168 } 1169 if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) { 1170 if (nxge_mdio_lock_init == 1) { 1171 MUTEX_DESTROY(&nxge_mdio_lock); 1172 } 1173 atomic_add_32(&nxge_mdio_lock_init, -1); 1174 } 1175 if (nxgep->drv_state & STATE_MII_LOCK_INIT) { 1176 if (nxge_mii_lock_init == 1) { 1177 MUTEX_DESTROY(&nxge_mii_lock); 1178 } 1179 atomic_add_32(&nxge_mii_lock_init, -1); 1180 } 1181 1182 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1183 } 1184 1185 nxge_status_t 1186 nxge_init(p_nxge_t nxgep) 1187 { 1188 nxge_status_t status = NXGE_OK; 1189 1190 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1191 1192 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1193 return (status); 1194 } 1195 1196 /* 1197 * Allocate system memory for the receive/transmit buffer blocks 1198 * and receive/transmit descriptor rings. 1199 */ 1200 status = nxge_alloc_mem_pool(nxgep); 1201 if (status != NXGE_OK) { 1202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1203 goto nxge_init_fail1; 1204 } 1205 1206 /* 1207 * Initialize and enable TXC registers 1208 * (Globally enable TX controller, 1209 * enable a port, configure dma channel bitmap, 1210 * configure the max burst size). 1211 */ 1212 status = nxge_txc_init(nxgep); 1213 if (status != NXGE_OK) { 1214 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n")); 1215 goto nxge_init_fail2; 1216 } 1217 1218 /* 1219 * Initialize and enable TXDMA channels. 1220 */ 1221 status = nxge_init_txdma_channels(nxgep); 1222 if (status != NXGE_OK) { 1223 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1224 goto nxge_init_fail3; 1225 } 1226 1227 /* 1228 * Initialize and enable RXDMA channels. 1229 */ 1230 status = nxge_init_rxdma_channels(nxgep); 1231 if (status != NXGE_OK) { 1232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1233 goto nxge_init_fail4; 1234 } 1235 1236 /* 1237 * Initialize TCAM and FCRAM (Neptune). 1238 */ 1239 status = nxge_classify_init(nxgep); 1240 if (status != NXGE_OK) { 1241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1242 goto nxge_init_fail5; 1243 } 1244 1245 /* 1246 * Initialize ZCP 1247 */ 1248 status = nxge_zcp_init(nxgep); 1249 if (status != NXGE_OK) { 1250 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1251 goto nxge_init_fail5; 1252 } 1253 1254 /* 1255 * Initialize IPP. 1256 */ 1257 status = nxge_ipp_init(nxgep); 1258 if (status != NXGE_OK) { 1259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1260 goto nxge_init_fail5; 1261 } 1262 1263 /* 1264 * Initialize the MAC block. 1265 */ 1266 status = nxge_mac_init(nxgep); 1267 if (status != NXGE_OK) { 1268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1269 goto nxge_init_fail5; 1270 } 1271 1272 nxge_intrs_enable(nxgep); 1273 1274 /* 1275 * Enable hardware interrupts. 1276 */ 1277 nxge_intr_hw_enable(nxgep); 1278 nxgep->drv_state |= STATE_HW_INITIALIZED; 1279 1280 goto nxge_init_exit; 1281 1282 nxge_init_fail5: 1283 nxge_uninit_rxdma_channels(nxgep); 1284 nxge_init_fail4: 1285 nxge_uninit_txdma_channels(nxgep); 1286 nxge_init_fail3: 1287 (void) nxge_txc_uninit(nxgep); 1288 nxge_init_fail2: 1289 nxge_free_mem_pool(nxgep); 1290 nxge_init_fail1: 1291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1292 "<== nxge_init status (failed) = 0x%08x", status)); 1293 return (status); 1294 1295 nxge_init_exit: 1296 1297 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1298 status)); 1299 return (status); 1300 } 1301 1302 1303 timeout_id_t 1304 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1305 { 1306 if ((nxgep->suspended == 0) || 1307 (nxgep->suspended == DDI_RESUME)) { 1308 return (timeout(func, (caddr_t)nxgep, 1309 drv_usectohz(1000 * msec))); 1310 } 1311 return (NULL); 1312 } 1313 1314 /*ARGSUSED*/ 1315 void 1316 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1317 { 1318 if (timerid) { 1319 (void) untimeout(timerid); 1320 } 1321 } 1322 1323 void 1324 nxge_uninit(p_nxge_t nxgep) 1325 { 1326 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1327 1328 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1329 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1330 "==> nxge_uninit: not initialized")); 1331 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1332 "<== nxge_uninit")); 1333 return; 1334 } 1335 1336 /* stop timer */ 1337 if (nxgep->nxge_timerid) { 1338 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1339 nxgep->nxge_timerid = 0; 1340 } 1341 1342 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1343 (void) nxge_intr_hw_disable(nxgep); 1344 1345 /* 1346 * Reset the receive MAC side. 1347 */ 1348 (void) nxge_rx_mac_disable(nxgep); 1349 1350 /* Disable and soft reset the IPP */ 1351 (void) nxge_ipp_disable(nxgep); 1352 1353 /* Free classification resources */ 1354 (void) nxge_classify_uninit(nxgep); 1355 1356 /* 1357 * Reset the transmit/receive DMA side. 1358 */ 1359 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1360 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1361 1362 nxge_uninit_txdma_channels(nxgep); 1363 nxge_uninit_rxdma_channels(nxgep); 1364 1365 /* 1366 * Reset the transmit MAC side. 1367 */ 1368 (void) nxge_tx_mac_disable(nxgep); 1369 1370 nxge_free_mem_pool(nxgep); 1371 1372 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1373 1374 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1375 1376 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1377 "nxge_mblks_pending %d", nxge_mblks_pending)); 1378 } 1379 1380 void 1381 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1382 { 1383 uint64_t reg; 1384 uint64_t regdata; 1385 int i, retry; 1386 1387 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1388 regdata = 0; 1389 retry = 1; 1390 1391 for (i = 0; i < retry; i++) { 1392 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1393 } 1394 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1395 } 1396 1397 void 1398 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1399 { 1400 uint64_t reg; 1401 uint64_t buf[2]; 1402 1403 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1404 reg = buf[0]; 1405 1406 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1407 } 1408 1409 1410 nxge_os_mutex_t nxgedebuglock; 1411 int nxge_debug_init = 0; 1412 1413 /*ARGSUSED*/ 1414 /*VARARGS*/ 1415 void 1416 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1417 { 1418 char msg_buffer[1048]; 1419 char prefix_buffer[32]; 1420 int instance; 1421 uint64_t debug_level; 1422 int cmn_level = CE_CONT; 1423 va_list ap; 1424 1425 debug_level = (nxgep == NULL) ? nxge_debug_level : 1426 nxgep->nxge_debug_level; 1427 1428 if ((level & debug_level) || 1429 (level == NXGE_NOTE) || 1430 (level == NXGE_ERR_CTL)) { 1431 /* do the msg processing */ 1432 if (nxge_debug_init == 0) { 1433 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1434 nxge_debug_init = 1; 1435 } 1436 1437 MUTEX_ENTER(&nxgedebuglock); 1438 1439 if ((level & NXGE_NOTE)) { 1440 cmn_level = CE_NOTE; 1441 } 1442 1443 if (level & NXGE_ERR_CTL) { 1444 cmn_level = CE_WARN; 1445 } 1446 1447 va_start(ap, fmt); 1448 (void) vsprintf(msg_buffer, fmt, ap); 1449 va_end(ap); 1450 if (nxgep == NULL) { 1451 instance = -1; 1452 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1453 } else { 1454 instance = nxgep->instance; 1455 (void) sprintf(prefix_buffer, 1456 "%s%d :", "nxge", instance); 1457 } 1458 1459 MUTEX_EXIT(&nxgedebuglock); 1460 cmn_err(cmn_level, "!%s %s\n", 1461 prefix_buffer, msg_buffer); 1462 1463 } 1464 } 1465 1466 char * 1467 nxge_dump_packet(char *addr, int size) 1468 { 1469 uchar_t *ap = (uchar_t *)addr; 1470 int i; 1471 static char etherbuf[1024]; 1472 char *cp = etherbuf; 1473 char digits[] = "0123456789abcdef"; 1474 1475 if (!size) 1476 size = 60; 1477 1478 if (size > MAX_DUMP_SZ) { 1479 /* Dump the leading bytes */ 1480 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1481 if (*ap > 0x0f) 1482 *cp++ = digits[*ap >> 4]; 1483 *cp++ = digits[*ap++ & 0xf]; 1484 *cp++ = ':'; 1485 } 1486 for (i = 0; i < 20; i++) 1487 *cp++ = '.'; 1488 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1489 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1490 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1491 if (*ap > 0x0f) 1492 *cp++ = digits[*ap >> 4]; 1493 *cp++ = digits[*ap++ & 0xf]; 1494 *cp++ = ':'; 1495 } 1496 } else { 1497 for (i = 0; i < size; i++) { 1498 if (*ap > 0x0f) 1499 *cp++ = digits[*ap >> 4]; 1500 *cp++ = digits[*ap++ & 0xf]; 1501 *cp++ = ':'; 1502 } 1503 } 1504 *--cp = 0; 1505 return (etherbuf); 1506 } 1507 1508 #ifdef NXGE_DEBUG 1509 static void 1510 nxge_test_map_regs(p_nxge_t nxgep) 1511 { 1512 ddi_acc_handle_t cfg_handle; 1513 p_pci_cfg_t cfg_ptr; 1514 ddi_acc_handle_t dev_handle; 1515 char *dev_ptr; 1516 ddi_acc_handle_t pci_config_handle; 1517 uint32_t regval; 1518 int i; 1519 1520 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1521 1522 dev_handle = nxgep->dev_regs->nxge_regh; 1523 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1524 1525 if (nxgep->niu_type == NEPTUNE) { 1526 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1527 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1528 1529 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1530 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1531 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1532 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1533 &cfg_ptr->vendorid)); 1534 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1535 "\tvendorid 0x%x devid 0x%x", 1536 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1537 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1538 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1539 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1540 "bar1c 0x%x", 1541 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1542 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1543 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1544 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1545 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1546 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1547 "base 28 0x%x bar2c 0x%x\n", 1548 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1549 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1550 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1551 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1552 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1553 "\nNeptune PCI BAR: base30 0x%x\n", 1554 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1555 1556 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1557 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1558 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1559 "first 0x%llx second 0x%llx third 0x%llx " 1560 "last 0x%llx ", 1561 NXGE_PIO_READ64(dev_handle, 1562 (uint64_t *)(dev_ptr + 0), 0), 1563 NXGE_PIO_READ64(dev_handle, 1564 (uint64_t *)(dev_ptr + 8), 0), 1565 NXGE_PIO_READ64(dev_handle, 1566 (uint64_t *)(dev_ptr + 16), 0), 1567 NXGE_PIO_READ64(cfg_handle, 1568 (uint64_t *)(dev_ptr + 24), 0))); 1569 } 1570 } 1571 1572 #endif 1573 1574 static void 1575 nxge_suspend(p_nxge_t nxgep) 1576 { 1577 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1578 1579 nxge_intrs_disable(nxgep); 1580 nxge_destroy_dev(nxgep); 1581 1582 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1583 } 1584 1585 static nxge_status_t 1586 nxge_resume(p_nxge_t nxgep) 1587 { 1588 nxge_status_t status = NXGE_OK; 1589 1590 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1591 nxgep->suspended = DDI_RESUME; 1592 1593 nxge_global_reset(nxgep); 1594 nxgep->suspended = 0; 1595 1596 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1597 "<== nxge_resume status = 0x%x", status)); 1598 return (status); 1599 } 1600 1601 static nxge_status_t 1602 nxge_setup_dev(p_nxge_t nxgep) 1603 { 1604 nxge_status_t status = NXGE_OK; 1605 1606 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1607 nxgep->mac.portnum)); 1608 1609 status = nxge_xcvr_find(nxgep); 1610 if (status != NXGE_OK) { 1611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1612 " nxge_setup_dev status " 1613 " (xcvr find 0x%08x)", status)); 1614 goto nxge_setup_dev_exit; 1615 } 1616 1617 status = nxge_link_init(nxgep); 1618 1619 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1621 "port%d Bad register acc handle", nxgep->mac.portnum)); 1622 status = NXGE_ERROR; 1623 } 1624 1625 if (status != NXGE_OK) { 1626 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1627 " nxge_setup_dev status " 1628 "(xcvr init 0x%08x)", status)); 1629 goto nxge_setup_dev_exit; 1630 } 1631 1632 nxge_setup_dev_exit: 1633 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1634 "<== nxge_setup_dev port %d status = 0x%08x", 1635 nxgep->mac.portnum, status)); 1636 1637 return (status); 1638 } 1639 1640 static void 1641 nxge_destroy_dev(p_nxge_t nxgep) 1642 { 1643 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1644 1645 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1646 1647 (void) nxge_hw_stop(nxgep); 1648 1649 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1650 } 1651 1652 static nxge_status_t 1653 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1654 { 1655 int ddi_status = DDI_SUCCESS; 1656 uint_t count; 1657 ddi_dma_cookie_t cookie; 1658 uint_t iommu_pagesize; 1659 nxge_status_t status = NXGE_OK; 1660 1661 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1662 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1663 if (nxgep->niu_type != N2_NIU) { 1664 iommu_pagesize = dvma_pagesize(nxgep->dip); 1665 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1666 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1667 " default_block_size %d iommu_pagesize %d", 1668 nxgep->sys_page_sz, 1669 ddi_ptob(nxgep->dip, (ulong_t)1), 1670 nxgep->rx_default_block_size, 1671 iommu_pagesize)); 1672 1673 if (iommu_pagesize != 0) { 1674 if (nxgep->sys_page_sz == iommu_pagesize) { 1675 if (iommu_pagesize > 0x4000) 1676 nxgep->sys_page_sz = 0x4000; 1677 } else { 1678 if (nxgep->sys_page_sz > iommu_pagesize) 1679 nxgep->sys_page_sz = iommu_pagesize; 1680 } 1681 } 1682 } 1683 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1684 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1685 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1686 "default_block_size %d page mask %d", 1687 nxgep->sys_page_sz, 1688 ddi_ptob(nxgep->dip, (ulong_t)1), 1689 nxgep->rx_default_block_size, 1690 nxgep->sys_page_mask)); 1691 1692 1693 switch (nxgep->sys_page_sz) { 1694 default: 1695 nxgep->sys_page_sz = 0x1000; 1696 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1697 nxgep->rx_default_block_size = 0x1000; 1698 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1699 break; 1700 case 0x1000: 1701 nxgep->rx_default_block_size = 0x1000; 1702 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1703 break; 1704 case 0x2000: 1705 nxgep->rx_default_block_size = 0x2000; 1706 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1707 break; 1708 case 0x4000: 1709 nxgep->rx_default_block_size = 0x4000; 1710 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1711 break; 1712 case 0x8000: 1713 nxgep->rx_default_block_size = 0x8000; 1714 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1715 break; 1716 } 1717 1718 #ifndef USE_RX_BIG_BUF 1719 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1720 #else 1721 nxgep->rx_default_block_size = 0x2000; 1722 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1723 #endif 1724 /* 1725 * Get the system DMA burst size. 1726 */ 1727 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1728 DDI_DMA_DONTWAIT, 0, 1729 &nxgep->dmasparehandle); 1730 if (ddi_status != DDI_SUCCESS) { 1731 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1732 "ddi_dma_alloc_handle: failed " 1733 " status 0x%x", ddi_status)); 1734 goto nxge_get_soft_properties_exit; 1735 } 1736 1737 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 1738 (caddr_t)nxgep->dmasparehandle, 1739 sizeof (nxgep->dmasparehandle), 1740 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 1741 DDI_DMA_DONTWAIT, 0, 1742 &cookie, &count); 1743 if (ddi_status != DDI_DMA_MAPPED) { 1744 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1745 "Binding spare handle to find system" 1746 " burstsize failed.")); 1747 ddi_status = DDI_FAILURE; 1748 goto nxge_get_soft_properties_fail1; 1749 } 1750 1751 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 1752 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 1753 1754 nxge_get_soft_properties_fail1: 1755 ddi_dma_free_handle(&nxgep->dmasparehandle); 1756 1757 nxge_get_soft_properties_exit: 1758 1759 if (ddi_status != DDI_SUCCESS) 1760 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1761 1762 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1763 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 1764 return (status); 1765 } 1766 1767 static nxge_status_t 1768 nxge_alloc_mem_pool(p_nxge_t nxgep) 1769 { 1770 nxge_status_t status = NXGE_OK; 1771 1772 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 1773 1774 status = nxge_alloc_rx_mem_pool(nxgep); 1775 if (status != NXGE_OK) { 1776 return (NXGE_ERROR); 1777 } 1778 1779 status = nxge_alloc_tx_mem_pool(nxgep); 1780 if (status != NXGE_OK) { 1781 nxge_free_rx_mem_pool(nxgep); 1782 return (NXGE_ERROR); 1783 } 1784 1785 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 1786 return (NXGE_OK); 1787 } 1788 1789 static void 1790 nxge_free_mem_pool(p_nxge_t nxgep) 1791 { 1792 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 1793 1794 nxge_free_rx_mem_pool(nxgep); 1795 nxge_free_tx_mem_pool(nxgep); 1796 1797 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 1798 } 1799 1800 static nxge_status_t 1801 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 1802 { 1803 int i, j; 1804 uint32_t ndmas, st_rdc; 1805 p_nxge_dma_pt_cfg_t p_all_cfgp; 1806 p_nxge_hw_pt_cfg_t p_cfgp; 1807 p_nxge_dma_pool_t dma_poolp; 1808 p_nxge_dma_common_t *dma_buf_p; 1809 p_nxge_dma_pool_t dma_cntl_poolp; 1810 p_nxge_dma_common_t *dma_cntl_p; 1811 size_t rx_buf_alloc_size; 1812 size_t rx_cntl_alloc_size; 1813 uint32_t *num_chunks; /* per dma */ 1814 nxge_status_t status = NXGE_OK; 1815 1816 uint32_t nxge_port_rbr_size; 1817 uint32_t nxge_port_rbr_spare_size; 1818 uint32_t nxge_port_rcr_size; 1819 1820 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 1821 1822 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 1823 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1824 st_rdc = p_cfgp->start_rdc; 1825 ndmas = p_cfgp->max_rdcs; 1826 1827 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1828 " nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1829 1830 /* 1831 * Allocate memory for each receive DMA channel. 1832 */ 1833 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 1834 KM_SLEEP); 1835 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1836 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1837 1838 dma_cntl_poolp = (p_nxge_dma_pool_t) 1839 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 1840 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 1841 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 1842 1843 num_chunks = (uint32_t *)KMEM_ZALLOC( 1844 sizeof (uint32_t) * ndmas, KM_SLEEP); 1845 1846 /* 1847 * Assume that each DMA channel will be configured with default 1848 * block size. 1849 * rbr block counts are mod of batch count (16). 1850 */ 1851 nxge_port_rbr_size = p_all_cfgp->rbr_size; 1852 nxge_port_rcr_size = p_all_cfgp->rcr_size; 1853 1854 if (!nxge_port_rbr_size) { 1855 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 1856 } 1857 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 1858 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 1859 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 1860 } 1861 1862 p_all_cfgp->rbr_size = nxge_port_rbr_size; 1863 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 1864 1865 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 1866 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 1867 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 1868 } 1869 1870 /* 1871 * N2/NIU has limitation on the descriptor sizes (contiguous 1872 * memory allocation on data buffers to 4M (contig_mem_alloc) 1873 * and little endian for control buffers (must use the ddi/dki mem alloc 1874 * function). 1875 */ 1876 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1877 if (nxgep->niu_type == N2_NIU) { 1878 nxge_port_rbr_spare_size = 0; 1879 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 1880 (!ISP2(nxge_port_rbr_size))) { 1881 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 1882 } 1883 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 1884 (!ISP2(nxge_port_rcr_size))) { 1885 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 1886 } 1887 } 1888 #endif 1889 1890 rx_buf_alloc_size = (nxgep->rx_default_block_size * 1891 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 1892 1893 /* 1894 * Addresses of receive block ring, receive completion ring and the 1895 * mailbox must be all cache-aligned (64 bytes). 1896 */ 1897 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 1898 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1899 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 1900 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1901 1902 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 1903 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 1904 "nxge_port_rcr_size = %d " 1905 "rx_cntl_alloc_size = %d", 1906 nxge_port_rbr_size, nxge_port_rbr_spare_size, 1907 nxge_port_rcr_size, 1908 rx_cntl_alloc_size)); 1909 1910 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1911 if (nxgep->niu_type == N2_NIU) { 1912 if (!ISP2(rx_buf_alloc_size)) { 1913 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1914 "==> nxge_alloc_rx_mem_pool: " 1915 " must be power of 2")); 1916 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1917 goto nxge_alloc_rx_mem_pool_exit; 1918 } 1919 1920 if (rx_buf_alloc_size > (1 << 22)) { 1921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1922 "==> nxge_alloc_rx_mem_pool: " 1923 " limit size to 4M")); 1924 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1925 goto nxge_alloc_rx_mem_pool_exit; 1926 } 1927 1928 if (rx_cntl_alloc_size < 0x2000) { 1929 rx_cntl_alloc_size = 0x2000; 1930 } 1931 } 1932 #endif 1933 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 1934 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 1935 1936 /* 1937 * Allocate memory for receive buffers and descriptor rings. 1938 * Replace allocation functions with interface functions provided 1939 * by the partition manager when it is available. 1940 */ 1941 /* 1942 * Allocate memory for the receive buffer blocks. 1943 */ 1944 for (i = 0; i < ndmas; i++) { 1945 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1946 " nxge_alloc_rx_mem_pool to alloc mem: " 1947 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1948 i, dma_buf_p[i], &dma_buf_p[i])); 1949 num_chunks[i] = 0; 1950 status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i], 1951 rx_buf_alloc_size, 1952 nxgep->rx_default_block_size, &num_chunks[i]); 1953 if (status != NXGE_OK) { 1954 break; 1955 } 1956 st_rdc++; 1957 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1958 " nxge_alloc_rx_mem_pool DONE alloc mem: " 1959 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1960 dma_buf_p[i], &dma_buf_p[i])); 1961 } 1962 if (i < ndmas) { 1963 goto nxge_alloc_rx_mem_fail1; 1964 } 1965 /* 1966 * Allocate memory for descriptor rings and mailbox. 1967 */ 1968 st_rdc = p_cfgp->start_rdc; 1969 for (j = 0; j < ndmas; j++) { 1970 status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j], 1971 rx_cntl_alloc_size); 1972 if (status != NXGE_OK) { 1973 break; 1974 } 1975 st_rdc++; 1976 } 1977 if (j < ndmas) { 1978 goto nxge_alloc_rx_mem_fail2; 1979 } 1980 1981 dma_poolp->ndmas = ndmas; 1982 dma_poolp->num_chunks = num_chunks; 1983 dma_poolp->buf_allocated = B_TRUE; 1984 nxgep->rx_buf_pool_p = dma_poolp; 1985 dma_poolp->dma_buf_pool_p = dma_buf_p; 1986 1987 dma_cntl_poolp->ndmas = ndmas; 1988 dma_cntl_poolp->buf_allocated = B_TRUE; 1989 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 1990 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 1991 1992 goto nxge_alloc_rx_mem_pool_exit; 1993 1994 nxge_alloc_rx_mem_fail2: 1995 /* Free control buffers */ 1996 j--; 1997 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1998 "==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 1999 for (; j >= 0; j--) { 2000 nxge_free_rx_cntl_dma(nxgep, 2001 (p_nxge_dma_common_t)dma_cntl_p[i]); 2002 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2003 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", 2004 j)); 2005 } 2006 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2007 "==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 2008 2009 nxge_alloc_rx_mem_fail1: 2010 /* Free data buffers */ 2011 i--; 2012 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2013 "==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 2014 for (; i >= 0; i--) { 2015 nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2016 num_chunks[i]); 2017 } 2018 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2019 "==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 2020 2021 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2022 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2023 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2024 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2025 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2026 2027 nxge_alloc_rx_mem_pool_exit: 2028 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2029 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2030 2031 return (status); 2032 } 2033 2034 static void 2035 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2036 { 2037 uint32_t i, ndmas; 2038 p_nxge_dma_pool_t dma_poolp; 2039 p_nxge_dma_common_t *dma_buf_p; 2040 p_nxge_dma_pool_t dma_cntl_poolp; 2041 p_nxge_dma_common_t *dma_cntl_p; 2042 uint32_t *num_chunks; 2043 2044 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2045 2046 dma_poolp = nxgep->rx_buf_pool_p; 2047 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2048 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2049 "<== nxge_free_rx_mem_pool " 2050 "(null rx buf pool or buf not allocated")); 2051 return; 2052 } 2053 2054 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2055 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2056 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2057 "<== nxge_free_rx_mem_pool " 2058 "(null rx cntl buf pool or cntl buf not allocated")); 2059 return; 2060 } 2061 2062 dma_buf_p = dma_poolp->dma_buf_pool_p; 2063 num_chunks = dma_poolp->num_chunks; 2064 2065 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2066 ndmas = dma_cntl_poolp->ndmas; 2067 2068 for (i = 0; i < ndmas; i++) { 2069 nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2070 } 2071 2072 for (i = 0; i < ndmas; i++) { 2073 nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]); 2074 } 2075 2076 for (i = 0; i < ndmas; i++) { 2077 KMEM_FREE(dma_buf_p[i], 2078 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2079 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2080 } 2081 2082 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2083 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2084 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2085 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2086 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2087 2088 nxgep->rx_buf_pool_p = NULL; 2089 nxgep->rx_cntl_pool_p = NULL; 2090 2091 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2092 } 2093 2094 2095 static nxge_status_t 2096 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2097 p_nxge_dma_common_t *dmap, 2098 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2099 { 2100 p_nxge_dma_common_t rx_dmap; 2101 nxge_status_t status = NXGE_OK; 2102 size_t total_alloc_size; 2103 size_t allocated = 0; 2104 int i, size_index, array_size; 2105 2106 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2107 2108 rx_dmap = (p_nxge_dma_common_t) 2109 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2110 KM_SLEEP); 2111 2112 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2113 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2114 dma_channel, alloc_size, block_size, dmap)); 2115 2116 total_alloc_size = alloc_size; 2117 2118 #if defined(RX_USE_RECLAIM_POST) 2119 total_alloc_size = alloc_size + alloc_size/4; 2120 #endif 2121 2122 i = 0; 2123 size_index = 0; 2124 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2125 while ((alloc_sizes[size_index] < alloc_size) && 2126 (size_index < array_size)) 2127 size_index++; 2128 if (size_index >= array_size) { 2129 size_index = array_size - 1; 2130 } 2131 2132 while ((allocated < total_alloc_size) && 2133 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2134 rx_dmap[i].dma_chunk_index = i; 2135 rx_dmap[i].block_size = block_size; 2136 rx_dmap[i].alength = alloc_sizes[size_index]; 2137 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2138 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2139 rx_dmap[i].dma_channel = dma_channel; 2140 rx_dmap[i].contig_alloc_type = B_FALSE; 2141 2142 /* 2143 * N2/NIU: data buffers must be contiguous as the driver 2144 * needs to call Hypervisor api to set up 2145 * logical pages. 2146 */ 2147 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2148 rx_dmap[i].contig_alloc_type = B_TRUE; 2149 } 2150 2151 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2152 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2153 "i %d nblocks %d alength %d", 2154 dma_channel, i, &rx_dmap[i], block_size, 2155 i, rx_dmap[i].nblocks, 2156 rx_dmap[i].alength)); 2157 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2158 &nxge_rx_dma_attr, 2159 rx_dmap[i].alength, 2160 &nxge_dev_buf_dma_acc_attr, 2161 DDI_DMA_READ | DDI_DMA_STREAMING, 2162 (p_nxge_dma_common_t)(&rx_dmap[i])); 2163 if (status != NXGE_OK) { 2164 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2165 " nxge_alloc_rx_buf_dma: Alloc Failed ")); 2166 size_index--; 2167 } else { 2168 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2169 " alloc_rx_buf_dma allocated rdc %d " 2170 "chunk %d size %x dvma %x bufp %llx ", 2171 dma_channel, i, rx_dmap[i].alength, 2172 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 2173 i++; 2174 allocated += alloc_sizes[size_index]; 2175 } 2176 } 2177 2178 2179 if (allocated < total_alloc_size) { 2180 goto nxge_alloc_rx_mem_fail1; 2181 } 2182 2183 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2184 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2185 dma_channel, i)); 2186 *num_chunks = i; 2187 *dmap = rx_dmap; 2188 2189 goto nxge_alloc_rx_mem_exit; 2190 2191 nxge_alloc_rx_mem_fail1: 2192 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2193 2194 nxge_alloc_rx_mem_exit: 2195 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2196 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2197 2198 return (status); 2199 } 2200 2201 /*ARGSUSED*/ 2202 static void 2203 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2204 uint32_t num_chunks) 2205 { 2206 int i; 2207 2208 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2209 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2210 2211 for (i = 0; i < num_chunks; i++) { 2212 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2213 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2214 i, dmap)); 2215 nxge_dma_mem_free(dmap++); 2216 } 2217 2218 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2219 } 2220 2221 /*ARGSUSED*/ 2222 static nxge_status_t 2223 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2224 p_nxge_dma_common_t *dmap, size_t size) 2225 { 2226 p_nxge_dma_common_t rx_dmap; 2227 nxge_status_t status = NXGE_OK; 2228 2229 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2230 2231 rx_dmap = (p_nxge_dma_common_t) 2232 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2233 2234 rx_dmap->contig_alloc_type = B_FALSE; 2235 2236 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2237 &nxge_desc_dma_attr, 2238 size, 2239 &nxge_dev_desc_dma_acc_attr, 2240 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2241 rx_dmap); 2242 if (status != NXGE_OK) { 2243 goto nxge_alloc_rx_cntl_dma_fail1; 2244 } 2245 2246 *dmap = rx_dmap; 2247 goto nxge_alloc_rx_cntl_dma_exit; 2248 2249 nxge_alloc_rx_cntl_dma_fail1: 2250 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2251 2252 nxge_alloc_rx_cntl_dma_exit: 2253 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2254 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2255 2256 return (status); 2257 } 2258 2259 /*ARGSUSED*/ 2260 static void 2261 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2262 { 2263 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2264 2265 nxge_dma_mem_free(dmap); 2266 2267 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2268 } 2269 2270 static nxge_status_t 2271 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2272 { 2273 nxge_status_t status = NXGE_OK; 2274 int i, j; 2275 uint32_t ndmas, st_tdc; 2276 p_nxge_dma_pt_cfg_t p_all_cfgp; 2277 p_nxge_hw_pt_cfg_t p_cfgp; 2278 p_nxge_dma_pool_t dma_poolp; 2279 p_nxge_dma_common_t *dma_buf_p; 2280 p_nxge_dma_pool_t dma_cntl_poolp; 2281 p_nxge_dma_common_t *dma_cntl_p; 2282 size_t tx_buf_alloc_size; 2283 size_t tx_cntl_alloc_size; 2284 uint32_t *num_chunks; /* per dma */ 2285 2286 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2287 2288 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2289 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2290 st_tdc = p_cfgp->start_tdc; 2291 ndmas = p_cfgp->max_tdcs; 2292 2293 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: " 2294 "p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d", 2295 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs)); 2296 /* 2297 * Allocate memory for each transmit DMA channel. 2298 */ 2299 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2300 KM_SLEEP); 2301 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2302 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2303 2304 dma_cntl_poolp = (p_nxge_dma_pool_t) 2305 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2306 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2307 sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP); 2308 2309 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2310 /* 2311 * N2/NIU has limitation on the descriptor sizes (contiguous 2312 * memory allocation on data buffers to 4M (contig_mem_alloc) 2313 * and little endian for control buffers (must use the ddi/dki mem alloc 2314 * function). The transmit ring is limited to 8K (includes the 2315 * mailbox). 2316 */ 2317 if (nxgep->niu_type == N2_NIU) { 2318 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2319 (!ISP2(nxge_tx_ring_size))) { 2320 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2321 } 2322 } 2323 #endif 2324 2325 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2326 2327 /* 2328 * Assume that each DMA channel will be configured with default 2329 * transmit bufer size for copying transmit data. 2330 * (For packet payload over this limit, packets will not be 2331 * copied.) 2332 */ 2333 tx_buf_alloc_size = (nxge_bcopy_thresh * nxge_tx_ring_size); 2334 2335 /* 2336 * Addresses of transmit descriptor ring and the 2337 * mailbox must be all cache-aligned (64 bytes). 2338 */ 2339 tx_cntl_alloc_size = nxge_tx_ring_size; 2340 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2341 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2342 2343 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2344 if (nxgep->niu_type == N2_NIU) { 2345 if (!ISP2(tx_buf_alloc_size)) { 2346 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2347 "==> nxge_alloc_tx_mem_pool: " 2348 " must be power of 2")); 2349 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2350 goto nxge_alloc_tx_mem_pool_exit; 2351 } 2352 2353 if (tx_buf_alloc_size > (1 << 22)) { 2354 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2355 "==> nxge_alloc_tx_mem_pool: " 2356 " limit size to 4M")); 2357 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2358 goto nxge_alloc_tx_mem_pool_exit; 2359 } 2360 2361 if (tx_cntl_alloc_size < 0x2000) { 2362 tx_cntl_alloc_size = 0x2000; 2363 } 2364 } 2365 #endif 2366 2367 num_chunks = (uint32_t *)KMEM_ZALLOC( 2368 sizeof (uint32_t) * ndmas, KM_SLEEP); 2369 2370 /* 2371 * Allocate memory for transmit buffers and descriptor rings. 2372 * Replace allocation functions with interface functions provided 2373 * by the partition manager when it is available. 2374 * 2375 * Allocate memory for the transmit buffer pool. 2376 */ 2377 for (i = 0; i < ndmas; i++) { 2378 num_chunks[i] = 0; 2379 status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i], 2380 tx_buf_alloc_size, 2381 nxge_bcopy_thresh, &num_chunks[i]); 2382 if (status != NXGE_OK) { 2383 break; 2384 } 2385 st_tdc++; 2386 } 2387 if (i < ndmas) { 2388 goto nxge_alloc_tx_mem_pool_fail1; 2389 } 2390 2391 st_tdc = p_cfgp->start_tdc; 2392 /* 2393 * Allocate memory for descriptor rings and mailbox. 2394 */ 2395 for (j = 0; j < ndmas; j++) { 2396 status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j], 2397 tx_cntl_alloc_size); 2398 if (status != NXGE_OK) { 2399 break; 2400 } 2401 st_tdc++; 2402 } 2403 if (j < ndmas) { 2404 goto nxge_alloc_tx_mem_pool_fail2; 2405 } 2406 2407 dma_poolp->ndmas = ndmas; 2408 dma_poolp->num_chunks = num_chunks; 2409 dma_poolp->buf_allocated = B_TRUE; 2410 dma_poolp->dma_buf_pool_p = dma_buf_p; 2411 nxgep->tx_buf_pool_p = dma_poolp; 2412 2413 dma_cntl_poolp->ndmas = ndmas; 2414 dma_cntl_poolp->buf_allocated = B_TRUE; 2415 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2416 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2417 2418 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2419 "==> nxge_alloc_tx_mem_pool: start_tdc %d " 2420 "ndmas %d poolp->ndmas %d", 2421 st_tdc, ndmas, dma_poolp->ndmas)); 2422 2423 goto nxge_alloc_tx_mem_pool_exit; 2424 2425 nxge_alloc_tx_mem_pool_fail2: 2426 /* Free control buffers */ 2427 j--; 2428 for (; j >= 0; j--) { 2429 nxge_free_tx_cntl_dma(nxgep, 2430 (p_nxge_dma_common_t)dma_cntl_p[i]); 2431 } 2432 2433 nxge_alloc_tx_mem_pool_fail1: 2434 /* Free data buffers */ 2435 i--; 2436 for (; i >= 0; i--) { 2437 nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i], 2438 num_chunks[i]); 2439 } 2440 2441 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2442 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2443 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2444 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2445 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2446 2447 nxge_alloc_tx_mem_pool_exit: 2448 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2449 "<== nxge_alloc_tx_mem_pool:status 0x%08x", status)); 2450 2451 return (status); 2452 } 2453 2454 static nxge_status_t 2455 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2456 p_nxge_dma_common_t *dmap, size_t alloc_size, 2457 size_t block_size, uint32_t *num_chunks) 2458 { 2459 p_nxge_dma_common_t tx_dmap; 2460 nxge_status_t status = NXGE_OK; 2461 size_t total_alloc_size; 2462 size_t allocated = 0; 2463 int i, size_index, array_size; 2464 2465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2466 2467 tx_dmap = (p_nxge_dma_common_t) 2468 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2469 KM_SLEEP); 2470 2471 total_alloc_size = alloc_size; 2472 i = 0; 2473 size_index = 0; 2474 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2475 while ((alloc_sizes[size_index] < alloc_size) && 2476 (size_index < array_size)) 2477 size_index++; 2478 if (size_index >= array_size) { 2479 size_index = array_size - 1; 2480 } 2481 2482 while ((allocated < total_alloc_size) && 2483 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2484 2485 tx_dmap[i].dma_chunk_index = i; 2486 tx_dmap[i].block_size = block_size; 2487 tx_dmap[i].alength = alloc_sizes[size_index]; 2488 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2489 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2490 tx_dmap[i].dma_channel = dma_channel; 2491 tx_dmap[i].contig_alloc_type = B_FALSE; 2492 2493 /* 2494 * N2/NIU: data buffers must be contiguous as the driver 2495 * needs to call Hypervisor api to set up 2496 * logical pages. 2497 */ 2498 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2499 tx_dmap[i].contig_alloc_type = B_TRUE; 2500 } 2501 2502 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2503 &nxge_tx_dma_attr, 2504 tx_dmap[i].alength, 2505 &nxge_dev_buf_dma_acc_attr, 2506 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2507 (p_nxge_dma_common_t)(&tx_dmap[i])); 2508 if (status != NXGE_OK) { 2509 size_index--; 2510 } else { 2511 i++; 2512 allocated += alloc_sizes[size_index]; 2513 } 2514 } 2515 2516 if (allocated < total_alloc_size) { 2517 goto nxge_alloc_tx_mem_fail1; 2518 } 2519 2520 *num_chunks = i; 2521 *dmap = tx_dmap; 2522 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2523 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2524 *dmap, i)); 2525 goto nxge_alloc_tx_mem_exit; 2526 2527 nxge_alloc_tx_mem_fail1: 2528 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2529 2530 nxge_alloc_tx_mem_exit: 2531 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2532 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 2533 2534 return (status); 2535 } 2536 2537 /*ARGSUSED*/ 2538 static void 2539 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2540 uint32_t num_chunks) 2541 { 2542 int i; 2543 2544 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 2545 2546 for (i = 0; i < num_chunks; i++) { 2547 nxge_dma_mem_free(dmap++); 2548 } 2549 2550 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 2551 } 2552 2553 /*ARGSUSED*/ 2554 static nxge_status_t 2555 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2556 p_nxge_dma_common_t *dmap, size_t size) 2557 { 2558 p_nxge_dma_common_t tx_dmap; 2559 nxge_status_t status = NXGE_OK; 2560 2561 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 2562 tx_dmap = (p_nxge_dma_common_t) 2563 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2564 2565 tx_dmap->contig_alloc_type = B_FALSE; 2566 2567 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2568 &nxge_desc_dma_attr, 2569 size, 2570 &nxge_dev_desc_dma_acc_attr, 2571 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2572 tx_dmap); 2573 if (status != NXGE_OK) { 2574 goto nxge_alloc_tx_cntl_dma_fail1; 2575 } 2576 2577 *dmap = tx_dmap; 2578 goto nxge_alloc_tx_cntl_dma_exit; 2579 2580 nxge_alloc_tx_cntl_dma_fail1: 2581 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 2582 2583 nxge_alloc_tx_cntl_dma_exit: 2584 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2585 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 2586 2587 return (status); 2588 } 2589 2590 /*ARGSUSED*/ 2591 static void 2592 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2593 { 2594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 2595 2596 nxge_dma_mem_free(dmap); 2597 2598 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 2599 } 2600 2601 static void 2602 nxge_free_tx_mem_pool(p_nxge_t nxgep) 2603 { 2604 uint32_t i, ndmas; 2605 p_nxge_dma_pool_t dma_poolp; 2606 p_nxge_dma_common_t *dma_buf_p; 2607 p_nxge_dma_pool_t dma_cntl_poolp; 2608 p_nxge_dma_common_t *dma_cntl_p; 2609 uint32_t *num_chunks; 2610 2611 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool")); 2612 2613 dma_poolp = nxgep->tx_buf_pool_p; 2614 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2615 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2616 "<== nxge_free_tx_mem_pool " 2617 "(null rx buf pool or buf not allocated")); 2618 return; 2619 } 2620 2621 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 2622 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2624 "<== nxge_free_tx_mem_pool " 2625 "(null tx cntl buf pool or cntl buf not allocated")); 2626 return; 2627 } 2628 2629 dma_buf_p = dma_poolp->dma_buf_pool_p; 2630 num_chunks = dma_poolp->num_chunks; 2631 2632 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2633 ndmas = dma_cntl_poolp->ndmas; 2634 2635 for (i = 0; i < ndmas; i++) { 2636 nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]); 2637 } 2638 2639 for (i = 0; i < ndmas; i++) { 2640 nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]); 2641 } 2642 2643 for (i = 0; i < ndmas; i++) { 2644 KMEM_FREE(dma_buf_p[i], 2645 sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2646 KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t)); 2647 } 2648 2649 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2650 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t)); 2651 KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t)); 2652 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t)); 2653 KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t)); 2654 2655 nxgep->tx_buf_pool_p = NULL; 2656 nxgep->tx_cntl_pool_p = NULL; 2657 2658 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool")); 2659 } 2660 2661 /*ARGSUSED*/ 2662 static nxge_status_t 2663 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 2664 struct ddi_dma_attr *dma_attrp, 2665 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2666 p_nxge_dma_common_t dma_p) 2667 { 2668 caddr_t kaddrp; 2669 int ddi_status = DDI_SUCCESS; 2670 boolean_t contig_alloc_type; 2671 2672 contig_alloc_type = dma_p->contig_alloc_type; 2673 2674 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 2675 /* 2676 * contig_alloc_type for contiguous memory only allowed 2677 * for N2/NIU. 2678 */ 2679 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2680 "nxge_dma_mem_alloc: alloc type not allows (%d)", 2681 dma_p->contig_alloc_type)); 2682 return (NXGE_ERROR | NXGE_DDI_FAILED); 2683 } 2684 2685 dma_p->dma_handle = NULL; 2686 dma_p->acc_handle = NULL; 2687 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 2688 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 2689 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 2690 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2691 if (ddi_status != DDI_SUCCESS) { 2692 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2693 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2694 return (NXGE_ERROR | NXGE_DDI_FAILED); 2695 } 2696 2697 switch (contig_alloc_type) { 2698 case B_FALSE: 2699 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, 2700 acc_attr_p, 2701 xfer_flags, 2702 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2703 &dma_p->acc_handle); 2704 if (ddi_status != DDI_SUCCESS) { 2705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2706 "nxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2707 ddi_dma_free_handle(&dma_p->dma_handle); 2708 dma_p->dma_handle = NULL; 2709 return (NXGE_ERROR | NXGE_DDI_FAILED); 2710 } 2711 if (dma_p->alength < length) { 2712 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2713 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 2714 "< length.")); 2715 ddi_dma_mem_free(&dma_p->acc_handle); 2716 ddi_dma_free_handle(&dma_p->dma_handle); 2717 dma_p->acc_handle = NULL; 2718 dma_p->dma_handle = NULL; 2719 return (NXGE_ERROR); 2720 } 2721 2722 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2723 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2724 &dma_p->dma_cookie, &dma_p->ncookies); 2725 if (ddi_status != DDI_DMA_MAPPED) { 2726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2727 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2728 "(staus 0x%x ncookies %d.)", ddi_status, 2729 dma_p->ncookies)); 2730 if (dma_p->acc_handle) { 2731 ddi_dma_mem_free(&dma_p->acc_handle); 2732 dma_p->acc_handle = NULL; 2733 } 2734 ddi_dma_free_handle(&dma_p->dma_handle); 2735 dma_p->dma_handle = NULL; 2736 return (NXGE_ERROR | NXGE_DDI_FAILED); 2737 } 2738 2739 if (dma_p->ncookies != 1) { 2740 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2741 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 2742 "> 1 cookie" 2743 "(staus 0x%x ncookies %d.)", ddi_status, 2744 dma_p->ncookies)); 2745 if (dma_p->acc_handle) { 2746 ddi_dma_mem_free(&dma_p->acc_handle); 2747 dma_p->acc_handle = NULL; 2748 } 2749 ddi_dma_free_handle(&dma_p->dma_handle); 2750 dma_p->dma_handle = NULL; 2751 return (NXGE_ERROR); 2752 } 2753 break; 2754 2755 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2756 case B_TRUE: 2757 kaddrp = (caddr_t)contig_mem_alloc(length); 2758 if (kaddrp == NULL) { 2759 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2760 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 2761 ddi_dma_free_handle(&dma_p->dma_handle); 2762 return (NXGE_ERROR | NXGE_DDI_FAILED); 2763 } 2764 2765 dma_p->alength = length; 2766 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2767 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2768 &dma_p->dma_cookie, &dma_p->ncookies); 2769 if (ddi_status != DDI_DMA_MAPPED) { 2770 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2771 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 2772 "(status 0x%x ncookies %d.)", ddi_status, 2773 dma_p->ncookies)); 2774 2775 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2776 "==> nxge_dma_mem_alloc: (not mapped)" 2777 "length %lu (0x%x) " 2778 "free contig kaddrp $%p " 2779 "va_to_pa $%p", 2780 length, length, 2781 kaddrp, 2782 va_to_pa(kaddrp))); 2783 2784 2785 contig_mem_free((void *)kaddrp, length); 2786 ddi_dma_free_handle(&dma_p->dma_handle); 2787 2788 dma_p->dma_handle = NULL; 2789 dma_p->acc_handle = NULL; 2790 dma_p->alength = NULL; 2791 dma_p->kaddrp = NULL; 2792 2793 return (NXGE_ERROR | NXGE_DDI_FAILED); 2794 } 2795 2796 if (dma_p->ncookies != 1 || 2797 (dma_p->dma_cookie.dmac_laddress == NULL)) { 2798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2799 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 2800 "cookie or " 2801 "dmac_laddress is NULL $%p size %d " 2802 " (status 0x%x ncookies %d.)", 2803 ddi_status, 2804 dma_p->dma_cookie.dmac_laddress, 2805 dma_p->dma_cookie.dmac_size, 2806 dma_p->ncookies)); 2807 2808 contig_mem_free((void *)kaddrp, length); 2809 ddi_dma_free_handle(&dma_p->dma_handle); 2810 2811 dma_p->alength = 0; 2812 dma_p->dma_handle = NULL; 2813 dma_p->acc_handle = NULL; 2814 dma_p->kaddrp = NULL; 2815 2816 return (NXGE_ERROR | NXGE_DDI_FAILED); 2817 } 2818 break; 2819 2820 #else 2821 case B_TRUE: 2822 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2823 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 2824 return (NXGE_ERROR | NXGE_DDI_FAILED); 2825 #endif 2826 } 2827 2828 dma_p->kaddrp = kaddrp; 2829 dma_p->last_kaddrp = (unsigned char *)kaddrp + 2830 dma_p->alength - RXBUF_64B_ALIGNED; 2831 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2832 dma_p->last_ioaddr_pp = 2833 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 2834 dma_p->alength - RXBUF_64B_ALIGNED; 2835 2836 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2837 2838 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2839 dma_p->orig_ioaddr_pp = 2840 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 2841 dma_p->orig_alength = length; 2842 dma_p->orig_kaddrp = kaddrp; 2843 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 2844 #endif 2845 2846 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 2847 "dma buffer allocated: dma_p $%p " 2848 "return dmac_ladress from cookie $%p cookie dmac_size %d " 2849 "dma_p->ioaddr_p $%p " 2850 "dma_p->orig_ioaddr_p $%p " 2851 "orig_vatopa $%p " 2852 "alength %d (0x%x) " 2853 "kaddrp $%p " 2854 "length %d (0x%x)", 2855 dma_p, 2856 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 2857 dma_p->ioaddr_pp, 2858 dma_p->orig_ioaddr_pp, 2859 dma_p->orig_vatopa, 2860 dma_p->alength, dma_p->alength, 2861 kaddrp, 2862 length, length)); 2863 2864 return (NXGE_OK); 2865 } 2866 2867 static void 2868 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 2869 { 2870 if (dma_p->dma_handle != NULL) { 2871 if (dma_p->ncookies) { 2872 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2873 dma_p->ncookies = 0; 2874 } 2875 ddi_dma_free_handle(&dma_p->dma_handle); 2876 dma_p->dma_handle = NULL; 2877 } 2878 2879 if (dma_p->acc_handle != NULL) { 2880 ddi_dma_mem_free(&dma_p->acc_handle); 2881 dma_p->acc_handle = NULL; 2882 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2883 } 2884 2885 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2886 if (dma_p->contig_alloc_type && 2887 dma_p->orig_kaddrp && dma_p->orig_alength) { 2888 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 2889 "kaddrp $%p (orig_kaddrp $%p)" 2890 "mem type %d ", 2891 "orig_alength %d " 2892 "alength 0x%x (%d)", 2893 dma_p->kaddrp, 2894 dma_p->orig_kaddrp, 2895 dma_p->contig_alloc_type, 2896 dma_p->orig_alength, 2897 dma_p->alength, dma_p->alength)); 2898 2899 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 2900 dma_p->orig_alength = NULL; 2901 dma_p->orig_kaddrp = NULL; 2902 dma_p->contig_alloc_type = B_FALSE; 2903 } 2904 #endif 2905 dma_p->kaddrp = NULL; 2906 dma_p->alength = NULL; 2907 } 2908 2909 /* 2910 * nxge_m_start() -- start transmitting and receiving. 2911 * 2912 * This function is called by the MAC layer when the first 2913 * stream is open to prepare the hardware ready for sending 2914 * and transmitting packets. 2915 */ 2916 static int 2917 nxge_m_start(void *arg) 2918 { 2919 p_nxge_t nxgep = (p_nxge_t)arg; 2920 2921 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 2922 2923 MUTEX_ENTER(nxgep->genlock); 2924 if (nxge_init(nxgep) != NXGE_OK) { 2925 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2926 "<== nxge_m_start: initialization failed")); 2927 MUTEX_EXIT(nxgep->genlock); 2928 return (EIO); 2929 } 2930 2931 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 2932 goto nxge_m_start_exit; 2933 /* 2934 * Start timer to check the system error and tx hangs 2935 */ 2936 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 2937 NXGE_CHECK_TIMER); 2938 2939 nxgep->link_notify = B_TRUE; 2940 2941 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 2942 2943 nxge_m_start_exit: 2944 MUTEX_EXIT(nxgep->genlock); 2945 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 2946 return (0); 2947 } 2948 2949 /* 2950 * nxge_m_stop(): stop transmitting and receiving. 2951 */ 2952 static void 2953 nxge_m_stop(void *arg) 2954 { 2955 p_nxge_t nxgep = (p_nxge_t)arg; 2956 2957 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 2958 2959 nxge_intrs_disable(nxgep); 2960 2961 if (nxgep->nxge_timerid) { 2962 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 2963 nxgep->nxge_timerid = 0; 2964 } 2965 2966 MUTEX_ENTER(nxgep->genlock); 2967 nxge_uninit(nxgep); 2968 2969 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 2970 2971 MUTEX_EXIT(nxgep->genlock); 2972 2973 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 2974 } 2975 2976 static int 2977 nxge_m_unicst(void *arg, const uint8_t *macaddr) 2978 { 2979 p_nxge_t nxgep = (p_nxge_t)arg; 2980 struct ether_addr addrp; 2981 2982 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 2983 2984 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 2985 if (nxge_set_mac_addr(nxgep, &addrp)) { 2986 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2987 "<== nxge_m_unicst: set unitcast failed")); 2988 return (EINVAL); 2989 } 2990 2991 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 2992 2993 return (0); 2994 } 2995 2996 static int 2997 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 2998 { 2999 p_nxge_t nxgep = (p_nxge_t)arg; 3000 struct ether_addr addrp; 3001 3002 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3003 "==> nxge_m_multicst: add %d", add)); 3004 3005 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3006 if (add) { 3007 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3008 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3009 "<== nxge_m_multicst: add multicast failed")); 3010 return (EINVAL); 3011 } 3012 } else { 3013 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3014 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3015 "<== nxge_m_multicst: del multicast failed")); 3016 return (EINVAL); 3017 } 3018 } 3019 3020 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3021 3022 return (0); 3023 } 3024 3025 static int 3026 nxge_m_promisc(void *arg, boolean_t on) 3027 { 3028 p_nxge_t nxgep = (p_nxge_t)arg; 3029 3030 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3031 "==> nxge_m_promisc: on %d", on)); 3032 3033 if (nxge_set_promisc(nxgep, on)) { 3034 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3035 "<== nxge_m_promisc: set promisc failed")); 3036 return (EINVAL); 3037 } 3038 3039 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3040 "<== nxge_m_promisc: on %d", on)); 3041 3042 return (0); 3043 } 3044 3045 static void 3046 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3047 { 3048 p_nxge_t nxgep = (p_nxge_t)arg; 3049 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 3050 boolean_t need_privilege; 3051 int err; 3052 int cmd; 3053 3054 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3055 3056 iocp = (struct iocblk *)mp->b_rptr; 3057 iocp->ioc_error = 0; 3058 need_privilege = B_TRUE; 3059 cmd = iocp->ioc_cmd; 3060 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3061 switch (cmd) { 3062 default: 3063 miocnak(wq, mp, 0, EINVAL); 3064 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3065 return; 3066 3067 case LB_GET_INFO_SIZE: 3068 case LB_GET_INFO: 3069 case LB_GET_MODE: 3070 need_privilege = B_FALSE; 3071 break; 3072 case LB_SET_MODE: 3073 break; 3074 3075 case ND_GET: 3076 need_privilege = B_FALSE; 3077 break; 3078 case ND_SET: 3079 break; 3080 3081 case NXGE_GET_MII: 3082 case NXGE_PUT_MII: 3083 case NXGE_GET64: 3084 case NXGE_PUT64: 3085 case NXGE_GET_TX_RING_SZ: 3086 case NXGE_GET_TX_DESC: 3087 case NXGE_TX_SIDE_RESET: 3088 case NXGE_RX_SIDE_RESET: 3089 case NXGE_GLOBAL_RESET: 3090 case NXGE_RESET_MAC: 3091 case NXGE_TX_REGS_DUMP: 3092 case NXGE_RX_REGS_DUMP: 3093 case NXGE_INT_REGS_DUMP: 3094 case NXGE_VIR_INT_REGS_DUMP: 3095 case NXGE_PUT_TCAM: 3096 case NXGE_GET_TCAM: 3097 case NXGE_RTRACE: 3098 case NXGE_RDUMP: 3099 3100 need_privilege = B_FALSE; 3101 break; 3102 case NXGE_INJECT_ERR: 3103 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3104 nxge_err_inject(nxgep, wq, mp); 3105 break; 3106 } 3107 3108 if (need_privilege) { 3109 if (secpolicy_net_config != NULL) 3110 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3111 else 3112 err = drv_priv(iocp->ioc_cr); 3113 if (err != 0) { 3114 miocnak(wq, mp, 0, err); 3115 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3116 "<== nxge_m_ioctl: no priv")); 3117 return; 3118 } 3119 } 3120 3121 switch (cmd) { 3122 case ND_GET: 3123 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command")); 3124 case ND_SET: 3125 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command")); 3126 nxge_param_ioctl(nxgep, wq, mp, iocp); 3127 break; 3128 3129 case LB_GET_MODE: 3130 case LB_SET_MODE: 3131 case LB_GET_INFO_SIZE: 3132 case LB_GET_INFO: 3133 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3134 break; 3135 3136 case NXGE_GET_MII: 3137 case NXGE_PUT_MII: 3138 case NXGE_PUT_TCAM: 3139 case NXGE_GET_TCAM: 3140 case NXGE_GET64: 3141 case NXGE_PUT64: 3142 case NXGE_GET_TX_RING_SZ: 3143 case NXGE_GET_TX_DESC: 3144 case NXGE_TX_SIDE_RESET: 3145 case NXGE_RX_SIDE_RESET: 3146 case NXGE_GLOBAL_RESET: 3147 case NXGE_RESET_MAC: 3148 case NXGE_TX_REGS_DUMP: 3149 case NXGE_RX_REGS_DUMP: 3150 case NXGE_INT_REGS_DUMP: 3151 case NXGE_VIR_INT_REGS_DUMP: 3152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3153 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3154 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3155 break; 3156 } 3157 3158 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3159 } 3160 3161 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3162 3163 static void 3164 nxge_m_resources(void *arg) 3165 { 3166 p_nxge_t nxgep = arg; 3167 mac_rx_fifo_t mrf; 3168 p_rx_rcr_rings_t rcr_rings; 3169 p_rx_rcr_ring_t *rcr_p; 3170 uint32_t i, ndmas; 3171 nxge_status_t status; 3172 3173 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3174 3175 MUTEX_ENTER(nxgep->genlock); 3176 3177 /* 3178 * CR 6492541 Check to see if the drv_state has been initialized, 3179 * if not * call nxge_init(). 3180 */ 3181 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3182 status = nxge_init(nxgep); 3183 if (status != NXGE_OK) 3184 goto nxge_m_resources_exit; 3185 } 3186 3187 mrf.mrf_type = MAC_RX_FIFO; 3188 mrf.mrf_blank = nxge_rx_hw_blank; 3189 mrf.mrf_arg = (void *)nxgep; 3190 3191 mrf.mrf_normal_blank_time = 128; 3192 mrf.mrf_normal_pkt_count = 8; 3193 rcr_rings = nxgep->rx_rcr_rings; 3194 rcr_p = rcr_rings->rcr_rings; 3195 ndmas = rcr_rings->ndmas; 3196 3197 /* 3198 * Export our receive resources to the MAC layer. 3199 */ 3200 for (i = 0; i < ndmas; i++) { 3201 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle = 3202 mac_resource_add(nxgep->mach, 3203 (mac_resource_t *)&mrf); 3204 3205 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3206 "==> nxge_m_resources: vdma %d dma %d " 3207 "rcrptr 0x%016llx mac_handle 0x%016llx", 3208 i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc, 3209 rcr_p[i], 3210 ((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle)); 3211 } 3212 3213 nxge_m_resources_exit: 3214 MUTEX_EXIT(nxgep->genlock); 3215 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3216 } 3217 3218 static void 3219 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3220 { 3221 p_nxge_mmac_stats_t mmac_stats; 3222 int i; 3223 nxge_mmac_t *mmac_info; 3224 3225 mmac_info = &nxgep->nxge_mmac_info; 3226 3227 mmac_stats = &nxgep->statsp->mmac_stats; 3228 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3229 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3230 3231 for (i = 0; i < ETHERADDRL; i++) { 3232 if (factory) { 3233 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3234 = mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i]; 3235 } else { 3236 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3237 = mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 3238 } 3239 } 3240 } 3241 3242 /* 3243 * nxge_altmac_set() -- Set an alternate MAC address 3244 */ 3245 static int 3246 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3247 { 3248 uint8_t addrn; 3249 uint8_t portn; 3250 npi_mac_addr_t altmac; 3251 3252 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3253 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3254 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3255 3256 portn = nxgep->mac.portnum; 3257 addrn = (uint8_t)slot - 1; 3258 3259 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3260 addrn, &altmac) != NPI_SUCCESS) 3261 return (EIO); 3262 /* 3263 * Enable comparison with the alternate MAC address. 3264 * While the first alternate addr is enabled by bit 1 of register 3265 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3266 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3267 * accordingly before calling npi_mac_altaddr_entry. 3268 */ 3269 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3270 addrn = (uint8_t)slot - 1; 3271 else 3272 addrn = (uint8_t)slot; 3273 3274 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3275 != NPI_SUCCESS) 3276 return (EIO); 3277 3278 return (0); 3279 } 3280 3281 /* 3282 * nxeg_m_mmac_add() - find an unused address slot, set the address 3283 * value to the one specified, enable the port to start filtering on 3284 * the new MAC address. Returns 0 on success. 3285 */ 3286 static int 3287 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3288 { 3289 p_nxge_t nxgep = arg; 3290 mac_addr_slot_t slot; 3291 nxge_mmac_t *mmac_info; 3292 int err; 3293 nxge_status_t status; 3294 3295 mutex_enter(nxgep->genlock); 3296 3297 /* 3298 * Make sure that nxge is initialized, if _start() has 3299 * not been called. 3300 */ 3301 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3302 status = nxge_init(nxgep); 3303 if (status != NXGE_OK) { 3304 mutex_exit(nxgep->genlock); 3305 return (ENXIO); 3306 } 3307 } 3308 3309 mmac_info = &nxgep->nxge_mmac_info; 3310 if (mmac_info->naddrfree == 0) { 3311 mutex_exit(nxgep->genlock); 3312 return (ENOSPC); 3313 } 3314 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3315 maddr->mma_addrlen)) { 3316 mutex_exit(nxgep->genlock); 3317 return (EINVAL); 3318 } 3319 /* 3320 * Search for the first available slot. Because naddrfree 3321 * is not zero, we are guaranteed to find one. 3322 * Slot 0 is for unique (primary) MAC. The first alternate 3323 * MAC slot is slot 1. 3324 * Each of the first two ports of Neptune has 16 alternate 3325 * MAC slots but only the first 7 (of 15) slots have assigned factory 3326 * MAC addresses. We first search among the slots without bundled 3327 * factory MACs. If we fail to find one in that range, then we 3328 * search the slots with bundled factory MACs. A factory MAC 3329 * will be wasted while the slot is used with a user MAC address. 3330 * But the slot could be used by factory MAC again after calling 3331 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 3332 */ 3333 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 3334 for (slot = mmac_info->num_factory_mmac + 1; 3335 slot <= mmac_info->num_mmac; slot++) { 3336 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3337 break; 3338 } 3339 if (slot > mmac_info->num_mmac) { 3340 for (slot = 1; slot <= mmac_info->num_factory_mmac; 3341 slot++) { 3342 if (!(mmac_info->mac_pool[slot].flags 3343 & MMAC_SLOT_USED)) 3344 break; 3345 } 3346 } 3347 } else { 3348 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 3349 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3350 break; 3351 } 3352 } 3353 ASSERT(slot <= mmac_info->num_mmac); 3354 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 3355 mutex_exit(nxgep->genlock); 3356 return (err); 3357 } 3358 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 3359 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 3360 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3361 mmac_info->naddrfree--; 3362 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3363 3364 maddr->mma_slot = slot; 3365 3366 mutex_exit(nxgep->genlock); 3367 return (0); 3368 } 3369 3370 /* 3371 * This function reserves an unused slot and programs the slot and the HW 3372 * with a factory mac address. 3373 */ 3374 static int 3375 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 3376 { 3377 p_nxge_t nxgep = arg; 3378 mac_addr_slot_t slot; 3379 nxge_mmac_t *mmac_info; 3380 int err; 3381 nxge_status_t status; 3382 3383 mutex_enter(nxgep->genlock); 3384 3385 /* 3386 * Make sure that nxge is initialized, if _start() has 3387 * not been called. 3388 */ 3389 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3390 status = nxge_init(nxgep); 3391 if (status != NXGE_OK) { 3392 mutex_exit(nxgep->genlock); 3393 return (ENXIO); 3394 } 3395 } 3396 3397 mmac_info = &nxgep->nxge_mmac_info; 3398 if (mmac_info->naddrfree == 0) { 3399 mutex_exit(nxgep->genlock); 3400 return (ENOSPC); 3401 } 3402 3403 slot = maddr->mma_slot; 3404 if (slot == -1) { /* -1: Take the first available slot */ 3405 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 3406 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 3407 break; 3408 } 3409 if (slot > mmac_info->num_factory_mmac) { 3410 mutex_exit(nxgep->genlock); 3411 return (ENOSPC); 3412 } 3413 } 3414 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 3415 /* 3416 * Do not support factory MAC at a slot greater than 3417 * num_factory_mmac even when there are available factory 3418 * MAC addresses because the alternate MACs are bundled with 3419 * slot[1] through slot[num_factory_mmac] 3420 */ 3421 mutex_exit(nxgep->genlock); 3422 return (EINVAL); 3423 } 3424 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3425 mutex_exit(nxgep->genlock); 3426 return (EBUSY); 3427 } 3428 /* Verify the address to be reserved */ 3429 if (!mac_unicst_verify(nxgep->mach, 3430 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 3431 mutex_exit(nxgep->genlock); 3432 return (EINVAL); 3433 } 3434 if (err = nxge_altmac_set(nxgep, 3435 mmac_info->factory_mac_pool[slot], slot)) { 3436 mutex_exit(nxgep->genlock); 3437 return (err); 3438 } 3439 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 3440 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3441 mmac_info->naddrfree--; 3442 3443 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 3444 mutex_exit(nxgep->genlock); 3445 3446 /* Pass info back to the caller */ 3447 maddr->mma_slot = slot; 3448 maddr->mma_addrlen = ETHERADDRL; 3449 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 3450 3451 return (0); 3452 } 3453 3454 /* 3455 * Remove the specified mac address and update the HW not to filter 3456 * the mac address anymore. 3457 */ 3458 static int 3459 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 3460 { 3461 p_nxge_t nxgep = arg; 3462 nxge_mmac_t *mmac_info; 3463 uint8_t addrn; 3464 uint8_t portn; 3465 int err = 0; 3466 nxge_status_t status; 3467 3468 mutex_enter(nxgep->genlock); 3469 3470 /* 3471 * Make sure that nxge is initialized, if _start() has 3472 * not been called. 3473 */ 3474 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3475 status = nxge_init(nxgep); 3476 if (status != NXGE_OK) { 3477 mutex_exit(nxgep->genlock); 3478 return (ENXIO); 3479 } 3480 } 3481 3482 mmac_info = &nxgep->nxge_mmac_info; 3483 if (slot < 1 || slot > mmac_info->num_mmac) { 3484 mutex_exit(nxgep->genlock); 3485 return (EINVAL); 3486 } 3487 3488 portn = nxgep->mac.portnum; 3489 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3490 addrn = (uint8_t)slot - 1; 3491 else 3492 addrn = (uint8_t)slot; 3493 3494 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3495 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 3496 == NPI_SUCCESS) { 3497 mmac_info->naddrfree++; 3498 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 3499 /* 3500 * Regardless if the MAC we just stopped filtering 3501 * is a user addr or a facory addr, we must set 3502 * the MMAC_VENDOR_ADDR flag if this slot has an 3503 * associated factory MAC to indicate that a factory 3504 * MAC is available. 3505 */ 3506 if (slot <= mmac_info->num_factory_mmac) { 3507 mmac_info->mac_pool[slot].flags 3508 |= MMAC_VENDOR_ADDR; 3509 } 3510 /* 3511 * Clear mac_pool[slot].addr so that kstat shows 0 3512 * alternate MAC address if the slot is not used. 3513 * (But nxge_m_mmac_get returns the factory MAC even 3514 * when the slot is not used!) 3515 */ 3516 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 3517 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3518 } else { 3519 err = EIO; 3520 } 3521 } else { 3522 err = EINVAL; 3523 } 3524 3525 mutex_exit(nxgep->genlock); 3526 return (err); 3527 } 3528 3529 3530 /* 3531 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 3532 */ 3533 static int 3534 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 3535 { 3536 p_nxge_t nxgep = arg; 3537 mac_addr_slot_t slot; 3538 nxge_mmac_t *mmac_info; 3539 int err = 0; 3540 nxge_status_t status; 3541 3542 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3543 maddr->mma_addrlen)) 3544 return (EINVAL); 3545 3546 slot = maddr->mma_slot; 3547 3548 mutex_enter(nxgep->genlock); 3549 3550 /* 3551 * Make sure that nxge is initialized, if _start() has 3552 * not been called. 3553 */ 3554 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3555 status = nxge_init(nxgep); 3556 if (status != NXGE_OK) { 3557 mutex_exit(nxgep->genlock); 3558 return (ENXIO); 3559 } 3560 } 3561 3562 mmac_info = &nxgep->nxge_mmac_info; 3563 if (slot < 1 || slot > mmac_info->num_mmac) { 3564 mutex_exit(nxgep->genlock); 3565 return (EINVAL); 3566 } 3567 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3568 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 3569 != 0) { 3570 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 3571 ETHERADDRL); 3572 /* 3573 * Assume that the MAC passed down from the caller 3574 * is not a factory MAC address (The user should 3575 * call mmac_remove followed by mmac_reserve if 3576 * he wants to use the factory MAC for this slot). 3577 */ 3578 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 3579 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 3580 } 3581 } else { 3582 err = EINVAL; 3583 } 3584 mutex_exit(nxgep->genlock); 3585 return (err); 3586 } 3587 3588 /* 3589 * nxge_m_mmac_get() - Get the MAC address and other information 3590 * related to the slot. mma_flags should be set to 0 in the call. 3591 * Note: although kstat shows MAC address as zero when a slot is 3592 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 3593 * to the caller as long as the slot is not using a user MAC address. 3594 * The following table shows the rules, 3595 * 3596 * USED VENDOR mma_addr 3597 * ------------------------------------------------------------ 3598 * (1) Slot uses a user MAC: yes no user MAC 3599 * (2) Slot uses a factory MAC: yes yes factory MAC 3600 * (3) Slot is not used but is 3601 * factory MAC capable: no yes factory MAC 3602 * (4) Slot is not used and is 3603 * not factory MAC capable: no no 0 3604 * ------------------------------------------------------------ 3605 */ 3606 static int 3607 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 3608 { 3609 nxge_t *nxgep = arg; 3610 mac_addr_slot_t slot; 3611 nxge_mmac_t *mmac_info; 3612 nxge_status_t status; 3613 3614 slot = maddr->mma_slot; 3615 3616 mutex_enter(nxgep->genlock); 3617 3618 /* 3619 * Make sure that nxge is initialized, if _start() has 3620 * not been called. 3621 */ 3622 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3623 status = nxge_init(nxgep); 3624 if (status != NXGE_OK) { 3625 mutex_exit(nxgep->genlock); 3626 return (ENXIO); 3627 } 3628 } 3629 3630 mmac_info = &nxgep->nxge_mmac_info; 3631 3632 if (slot < 1 || slot > mmac_info->num_mmac) { 3633 mutex_exit(nxgep->genlock); 3634 return (EINVAL); 3635 } 3636 maddr->mma_flags = 0; 3637 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 3638 maddr->mma_flags |= MMAC_SLOT_USED; 3639 3640 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 3641 maddr->mma_flags |= MMAC_VENDOR_ADDR; 3642 bcopy(mmac_info->factory_mac_pool[slot], 3643 maddr->mma_addr, ETHERADDRL); 3644 maddr->mma_addrlen = ETHERADDRL; 3645 } else { 3646 if (maddr->mma_flags & MMAC_SLOT_USED) { 3647 bcopy(mmac_info->mac_pool[slot].addr, 3648 maddr->mma_addr, ETHERADDRL); 3649 maddr->mma_addrlen = ETHERADDRL; 3650 } else { 3651 bzero(maddr->mma_addr, ETHERADDRL); 3652 maddr->mma_addrlen = 0; 3653 } 3654 } 3655 mutex_exit(nxgep->genlock); 3656 return (0); 3657 } 3658 3659 3660 static boolean_t 3661 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3662 { 3663 nxge_t *nxgep = arg; 3664 uint32_t *txflags = cap_data; 3665 multiaddress_capab_t *mmacp = cap_data; 3666 3667 switch (cap) { 3668 case MAC_CAPAB_HCKSUM: 3669 *txflags = HCKSUM_INET_PARTIAL; 3670 break; 3671 case MAC_CAPAB_POLL: 3672 /* 3673 * There's nothing for us to fill in, simply returning 3674 * B_TRUE stating that we support polling is sufficient. 3675 */ 3676 break; 3677 3678 case MAC_CAPAB_MULTIADDRESS: 3679 mutex_enter(nxgep->genlock); 3680 3681 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 3682 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 3683 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 3684 /* 3685 * maddr_handle is driver's private data, passed back to 3686 * entry point functions as arg. 3687 */ 3688 mmacp->maddr_handle = nxgep; 3689 mmacp->maddr_add = nxge_m_mmac_add; 3690 mmacp->maddr_remove = nxge_m_mmac_remove; 3691 mmacp->maddr_modify = nxge_m_mmac_modify; 3692 mmacp->maddr_get = nxge_m_mmac_get; 3693 mmacp->maddr_reserve = nxge_m_mmac_reserve; 3694 3695 mutex_exit(nxgep->genlock); 3696 break; 3697 default: 3698 return (B_FALSE); 3699 } 3700 return (B_TRUE); 3701 } 3702 3703 /* 3704 * Module loading and removing entry points. 3705 */ 3706 3707 static struct cb_ops nxge_cb_ops = { 3708 nodev, /* cb_open */ 3709 nodev, /* cb_close */ 3710 nodev, /* cb_strategy */ 3711 nodev, /* cb_print */ 3712 nodev, /* cb_dump */ 3713 nodev, /* cb_read */ 3714 nodev, /* cb_write */ 3715 nodev, /* cb_ioctl */ 3716 nodev, /* cb_devmap */ 3717 nodev, /* cb_mmap */ 3718 nodev, /* cb_segmap */ 3719 nochpoll, /* cb_chpoll */ 3720 ddi_prop_op, /* cb_prop_op */ 3721 NULL, 3722 D_MP, /* cb_flag */ 3723 CB_REV, /* rev */ 3724 nodev, /* int (*cb_aread)() */ 3725 nodev /* int (*cb_awrite)() */ 3726 }; 3727 3728 static struct dev_ops nxge_dev_ops = { 3729 DEVO_REV, /* devo_rev */ 3730 0, /* devo_refcnt */ 3731 nulldev, 3732 nulldev, /* devo_identify */ 3733 nulldev, /* devo_probe */ 3734 nxge_attach, /* devo_attach */ 3735 nxge_detach, /* devo_detach */ 3736 nodev, /* devo_reset */ 3737 &nxge_cb_ops, /* devo_cb_ops */ 3738 (struct bus_ops *)NULL, /* devo_bus_ops */ 3739 ddi_power /* devo_power */ 3740 }; 3741 3742 extern struct mod_ops mod_driverops; 3743 3744 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet %I%" 3745 3746 /* 3747 * Module linkage information for the kernel. 3748 */ 3749 static struct modldrv nxge_modldrv = { 3750 &mod_driverops, 3751 NXGE_DESC_VER, 3752 &nxge_dev_ops 3753 }; 3754 3755 static struct modlinkage modlinkage = { 3756 MODREV_1, (void *) &nxge_modldrv, NULL 3757 }; 3758 3759 int 3760 _init(void) 3761 { 3762 int status; 3763 3764 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3765 mac_init_ops(&nxge_dev_ops, "nxge"); 3766 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 3767 if (status != 0) { 3768 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3769 "failed to init device soft state")); 3770 goto _init_exit; 3771 } 3772 3773 status = mod_install(&modlinkage); 3774 if (status != 0) { 3775 ddi_soft_state_fini(&nxge_list); 3776 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 3777 goto _init_exit; 3778 } 3779 3780 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3781 3782 _init_exit: 3783 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3784 3785 return (status); 3786 } 3787 3788 int 3789 _fini(void) 3790 { 3791 int status; 3792 3793 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3794 3795 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3796 3797 if (nxge_mblks_pending) 3798 return (EBUSY); 3799 3800 status = mod_remove(&modlinkage); 3801 if (status != DDI_SUCCESS) { 3802 NXGE_DEBUG_MSG((NULL, MOD_CTL, 3803 "Module removal failed 0x%08x", 3804 status)); 3805 goto _fini_exit; 3806 } 3807 3808 mac_fini_ops(&nxge_dev_ops); 3809 3810 ddi_soft_state_fini(&nxge_list); 3811 3812 MUTEX_DESTROY(&nxge_common_lock); 3813 _fini_exit: 3814 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3815 3816 return (status); 3817 } 3818 3819 int 3820 _info(struct modinfo *modinfop) 3821 { 3822 int status; 3823 3824 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3825 status = mod_info(&modlinkage, modinfop); 3826 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3827 3828 return (status); 3829 } 3830 3831 /*ARGSUSED*/ 3832 static nxge_status_t 3833 nxge_add_intrs(p_nxge_t nxgep) 3834 { 3835 3836 int intr_types; 3837 int type = 0; 3838 int ddi_status = DDI_SUCCESS; 3839 nxge_status_t status = NXGE_OK; 3840 3841 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 3842 3843 nxgep->nxge_intr_type.intr_registered = B_FALSE; 3844 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 3845 nxgep->nxge_intr_type.msi_intx_cnt = 0; 3846 nxgep->nxge_intr_type.intr_added = 0; 3847 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 3848 nxgep->nxge_intr_type.intr_type = 0; 3849 3850 if (nxgep->niu_type == N2_NIU) { 3851 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3852 } else if (nxge_msi_enable) { 3853 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 3854 } 3855 3856 /* Get the supported interrupt types */ 3857 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 3858 != DDI_SUCCESS) { 3859 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 3860 "ddi_intr_get_supported_types failed: status 0x%08x", 3861 ddi_status)); 3862 return (NXGE_ERROR | NXGE_DDI_FAILED); 3863 } 3864 nxgep->nxge_intr_type.intr_types = intr_types; 3865 3866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3867 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3868 3869 /* 3870 * Solaris MSIX is not supported yet. use MSI for now. 3871 * nxge_msi_enable (1): 3872 * 1 - MSI 2 - MSI-X others - FIXED 3873 */ 3874 switch (nxge_msi_enable) { 3875 default: 3876 type = DDI_INTR_TYPE_FIXED; 3877 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3878 "use fixed (intx emulation) type %08x", 3879 type)); 3880 break; 3881 3882 case 2: 3883 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3884 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3885 if (intr_types & DDI_INTR_TYPE_MSIX) { 3886 type = DDI_INTR_TYPE_MSIX; 3887 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3888 "ddi_intr_get_supported_types: MSIX 0x%08x", 3889 type)); 3890 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3891 type = DDI_INTR_TYPE_MSI; 3892 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3893 "ddi_intr_get_supported_types: MSI 0x%08x", 3894 type)); 3895 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3896 type = DDI_INTR_TYPE_FIXED; 3897 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3898 "ddi_intr_get_supported_types: MSXED0x%08x", 3899 type)); 3900 } 3901 break; 3902 3903 case 1: 3904 if (intr_types & DDI_INTR_TYPE_MSI) { 3905 type = DDI_INTR_TYPE_MSI; 3906 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 3907 "ddi_intr_get_supported_types: MSI 0x%08x", 3908 type)); 3909 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3910 type = DDI_INTR_TYPE_MSIX; 3911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3912 "ddi_intr_get_supported_types: MSIX 0x%08x", 3913 type)); 3914 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3915 type = DDI_INTR_TYPE_FIXED; 3916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3917 "ddi_intr_get_supported_types: MSXED0x%08x", 3918 type)); 3919 } 3920 } 3921 3922 nxgep->nxge_intr_type.intr_type = type; 3923 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3924 type == DDI_INTR_TYPE_FIXED) && 3925 nxgep->nxge_intr_type.niu_msi_enable) { 3926 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 3927 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3928 " nxge_add_intrs: " 3929 " nxge_add_intrs_adv failed: status 0x%08x", 3930 status)); 3931 return (status); 3932 } else { 3933 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 3934 "interrupts registered : type %d", type)); 3935 nxgep->nxge_intr_type.intr_registered = B_TRUE; 3936 3937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 3938 "\nAdded advanced nxge add_intr_adv " 3939 "intr type 0x%x\n", type)); 3940 3941 return (status); 3942 } 3943 } 3944 3945 if (!nxgep->nxge_intr_type.intr_registered) { 3946 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 3947 "failed to register interrupts")); 3948 return (NXGE_ERROR | NXGE_DDI_FAILED); 3949 } 3950 3951 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 3952 return (status); 3953 } 3954 3955 /*ARGSUSED*/ 3956 static nxge_status_t 3957 nxge_add_soft_intrs(p_nxge_t nxgep) 3958 { 3959 3960 int ddi_status = DDI_SUCCESS; 3961 nxge_status_t status = NXGE_OK; 3962 3963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 3964 3965 nxgep->resched_id = NULL; 3966 nxgep->resched_running = B_FALSE; 3967 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 3968 &nxgep->resched_id, 3969 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 3970 if (ddi_status != DDI_SUCCESS) { 3971 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 3972 "ddi_add_softintrs failed: status 0x%08x", 3973 ddi_status)); 3974 return (NXGE_ERROR | NXGE_DDI_FAILED); 3975 } 3976 3977 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 3978 3979 return (status); 3980 } 3981 3982 static nxge_status_t 3983 nxge_add_intrs_adv(p_nxge_t nxgep) 3984 { 3985 int intr_type; 3986 p_nxge_intr_t intrp; 3987 3988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 3989 3990 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 3991 intr_type = intrp->intr_type; 3992 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 3993 intr_type)); 3994 3995 switch (intr_type) { 3996 case DDI_INTR_TYPE_MSI: /* 0x2 */ 3997 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 3998 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 3999 4000 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 4001 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 4002 4003 default: 4004 return (NXGE_ERROR); 4005 } 4006 } 4007 4008 4009 /*ARGSUSED*/ 4010 static nxge_status_t 4011 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 4012 { 4013 dev_info_t *dip = nxgep->dip; 4014 p_nxge_ldg_t ldgp; 4015 p_nxge_intr_t intrp; 4016 uint_t *inthandler; 4017 void *arg1, *arg2; 4018 int behavior; 4019 int nintrs, navail; 4020 int nactual, nrequired; 4021 int inum = 0; 4022 int x, y; 4023 int ddi_status = DDI_SUCCESS; 4024 nxge_status_t status = NXGE_OK; 4025 4026 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 4027 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4028 intrp->start_inum = 0; 4029 4030 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4031 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4032 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4033 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4034 "nintrs: %d", ddi_status, nintrs)); 4035 return (NXGE_ERROR | NXGE_DDI_FAILED); 4036 } 4037 4038 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4039 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4040 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4041 "ddi_intr_get_navail() failed, status: 0x%x%, " 4042 "nintrs: %d", ddi_status, navail)); 4043 return (NXGE_ERROR | NXGE_DDI_FAILED); 4044 } 4045 4046 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4047 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 4048 nintrs, navail)); 4049 4050 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 4051 /* MSI must be power of 2 */ 4052 if ((navail & 16) == 16) { 4053 navail = 16; 4054 } else if ((navail & 8) == 8) { 4055 navail = 8; 4056 } else if ((navail & 4) == 4) { 4057 navail = 4; 4058 } else if ((navail & 2) == 2) { 4059 navail = 2; 4060 } else { 4061 navail = 1; 4062 } 4063 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4064 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 4065 "navail %d", nintrs, navail)); 4066 } 4067 4068 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4069 DDI_INTR_ALLOC_NORMAL); 4070 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4071 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4072 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4073 navail, &nactual, behavior); 4074 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4076 " ddi_intr_alloc() failed: %d", 4077 ddi_status)); 4078 kmem_free(intrp->htable, intrp->intr_size); 4079 return (NXGE_ERROR | NXGE_DDI_FAILED); 4080 } 4081 4082 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4083 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4084 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4085 " ddi_intr_get_pri() failed: %d", 4086 ddi_status)); 4087 /* Free already allocated interrupts */ 4088 for (y = 0; y < nactual; y++) { 4089 (void) ddi_intr_free(intrp->htable[y]); 4090 } 4091 4092 kmem_free(intrp->htable, intrp->intr_size); 4093 return (NXGE_ERROR | NXGE_DDI_FAILED); 4094 } 4095 4096 nrequired = 0; 4097 switch (nxgep->niu_type) { 4098 case NEPTUNE: 4099 case NEPTUNE_2: 4100 default: 4101 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4102 break; 4103 4104 case N2_NIU: 4105 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4106 break; 4107 } 4108 4109 if (status != NXGE_OK) { 4110 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4111 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 4112 "failed: 0x%x", status)); 4113 /* Free already allocated interrupts */ 4114 for (y = 0; y < nactual; y++) { 4115 (void) ddi_intr_free(intrp->htable[y]); 4116 } 4117 4118 kmem_free(intrp->htable, intrp->intr_size); 4119 return (status); 4120 } 4121 4122 ldgp = nxgep->ldgvp->ldgp; 4123 for (x = 0; x < nrequired; x++, ldgp++) { 4124 ldgp->vector = (uint8_t)x; 4125 ldgp->intdata = SID_DATA(ldgp->func, x); 4126 arg1 = ldgp->ldvp; 4127 arg2 = nxgep; 4128 if (ldgp->nldvs == 1) { 4129 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4130 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4131 "nxge_add_intrs_adv_type: " 4132 "arg1 0x%x arg2 0x%x: " 4133 "1-1 int handler (entry %d intdata 0x%x)\n", 4134 arg1, arg2, 4135 x, ldgp->intdata)); 4136 } else if (ldgp->nldvs > 1) { 4137 inthandler = (uint_t *)ldgp->sys_intr_handler; 4138 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4139 "nxge_add_intrs_adv_type: " 4140 "arg1 0x%x arg2 0x%x: " 4141 "nldevs %d int handler " 4142 "(entry %d intdata 0x%x)\n", 4143 arg1, arg2, 4144 ldgp->nldvs, x, ldgp->intdata)); 4145 } 4146 4147 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4148 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 4149 "htable 0x%llx", x, intrp->htable[x])); 4150 4151 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4152 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4153 != DDI_SUCCESS) { 4154 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4155 "==> nxge_add_intrs_adv_type: failed #%d " 4156 "status 0x%x", x, ddi_status)); 4157 for (y = 0; y < intrp->intr_added; y++) { 4158 (void) ddi_intr_remove_handler( 4159 intrp->htable[y]); 4160 } 4161 /* Free already allocated intr */ 4162 for (y = 0; y < nactual; y++) { 4163 (void) ddi_intr_free(intrp->htable[y]); 4164 } 4165 kmem_free(intrp->htable, intrp->intr_size); 4166 4167 (void) nxge_ldgv_uninit(nxgep); 4168 4169 return (NXGE_ERROR | NXGE_DDI_FAILED); 4170 } 4171 intrp->intr_added++; 4172 } 4173 4174 intrp->msi_intx_cnt = nactual; 4175 4176 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4177 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 4178 navail, nactual, 4179 intrp->msi_intx_cnt, 4180 intrp->intr_added)); 4181 4182 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4183 4184 (void) nxge_intr_ldgv_init(nxgep); 4185 4186 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 4187 4188 return (status); 4189 } 4190 4191 /*ARGSUSED*/ 4192 static nxge_status_t 4193 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 4194 { 4195 dev_info_t *dip = nxgep->dip; 4196 p_nxge_ldg_t ldgp; 4197 p_nxge_intr_t intrp; 4198 uint_t *inthandler; 4199 void *arg1, *arg2; 4200 int behavior; 4201 int nintrs, navail; 4202 int nactual, nrequired; 4203 int inum = 0; 4204 int x, y; 4205 int ddi_status = DDI_SUCCESS; 4206 nxge_status_t status = NXGE_OK; 4207 4208 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 4209 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4210 intrp->start_inum = 0; 4211 4212 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 4213 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 4214 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4215 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 4216 "nintrs: %d", status, nintrs)); 4217 return (NXGE_ERROR | NXGE_DDI_FAILED); 4218 } 4219 4220 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 4221 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 4222 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4223 "ddi_intr_get_navail() failed, status: 0x%x%, " 4224 "nintrs: %d", ddi_status, navail)); 4225 return (NXGE_ERROR | NXGE_DDI_FAILED); 4226 } 4227 4228 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4229 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4230 nintrs, navail)); 4231 4232 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4233 DDI_INTR_ALLOC_NORMAL); 4234 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4235 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4236 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4237 navail, &nactual, behavior); 4238 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4239 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4240 " ddi_intr_alloc() failed: %d", 4241 ddi_status)); 4242 kmem_free(intrp->htable, intrp->intr_size); 4243 return (NXGE_ERROR | NXGE_DDI_FAILED); 4244 } 4245 4246 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4247 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4248 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4249 " ddi_intr_get_pri() failed: %d", 4250 ddi_status)); 4251 /* Free already allocated interrupts */ 4252 for (y = 0; y < nactual; y++) { 4253 (void) ddi_intr_free(intrp->htable[y]); 4254 } 4255 4256 kmem_free(intrp->htable, intrp->intr_size); 4257 return (NXGE_ERROR | NXGE_DDI_FAILED); 4258 } 4259 4260 nrequired = 0; 4261 switch (nxgep->niu_type) { 4262 case NEPTUNE: 4263 case NEPTUNE_2: 4264 default: 4265 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 4266 break; 4267 4268 case N2_NIU: 4269 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 4270 break; 4271 } 4272 4273 if (status != NXGE_OK) { 4274 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4275 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 4276 "failed: 0x%x", status)); 4277 /* Free already allocated interrupts */ 4278 for (y = 0; y < nactual; y++) { 4279 (void) ddi_intr_free(intrp->htable[y]); 4280 } 4281 4282 kmem_free(intrp->htable, intrp->intr_size); 4283 return (status); 4284 } 4285 4286 ldgp = nxgep->ldgvp->ldgp; 4287 for (x = 0; x < nrequired; x++, ldgp++) { 4288 ldgp->vector = (uint8_t)x; 4289 if (nxgep->niu_type != N2_NIU) { 4290 ldgp->intdata = SID_DATA(ldgp->func, x); 4291 } 4292 4293 arg1 = ldgp->ldvp; 4294 arg2 = nxgep; 4295 if (ldgp->nldvs == 1) { 4296 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4297 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4298 "nxge_add_intrs_adv_type_fix: " 4299 "1-1 int handler(%d) ldg %d ldv %d " 4300 "arg1 $%p arg2 $%p\n", 4301 x, ldgp->ldg, ldgp->ldvp->ldv, 4302 arg1, arg2)); 4303 } else if (ldgp->nldvs > 1) { 4304 inthandler = (uint_t *)ldgp->sys_intr_handler; 4305 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4306 "nxge_add_intrs_adv_type_fix: " 4307 "shared ldv %d int handler(%d) ldv %d ldg %d" 4308 "arg1 0x%016llx arg2 0x%016llx\n", 4309 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4310 arg1, arg2)); 4311 } 4312 4313 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4314 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 4315 != DDI_SUCCESS) { 4316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4317 "==> nxge_add_intrs_adv_type_fix: failed #%d " 4318 "status 0x%x", x, ddi_status)); 4319 for (y = 0; y < intrp->intr_added; y++) { 4320 (void) ddi_intr_remove_handler( 4321 intrp->htable[y]); 4322 } 4323 for (y = 0; y < nactual; y++) { 4324 (void) ddi_intr_free(intrp->htable[y]); 4325 } 4326 /* Free already allocated intr */ 4327 kmem_free(intrp->htable, intrp->intr_size); 4328 4329 (void) nxge_ldgv_uninit(nxgep); 4330 4331 return (NXGE_ERROR | NXGE_DDI_FAILED); 4332 } 4333 intrp->intr_added++; 4334 } 4335 4336 intrp->msi_intx_cnt = nactual; 4337 4338 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4339 4340 status = nxge_intr_ldgv_init(nxgep); 4341 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 4342 4343 return (status); 4344 } 4345 4346 static void 4347 nxge_remove_intrs(p_nxge_t nxgep) 4348 { 4349 int i, inum; 4350 p_nxge_intr_t intrp; 4351 4352 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 4353 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4354 if (!intrp->intr_registered) { 4355 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4356 "<== nxge_remove_intrs: interrupts not registered")); 4357 return; 4358 } 4359 4360 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 4361 4362 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4363 (void) ddi_intr_block_disable(intrp->htable, 4364 intrp->intr_added); 4365 } else { 4366 for (i = 0; i < intrp->intr_added; i++) { 4367 (void) ddi_intr_disable(intrp->htable[i]); 4368 } 4369 } 4370 4371 for (inum = 0; inum < intrp->intr_added; inum++) { 4372 if (intrp->htable[inum]) { 4373 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4374 } 4375 } 4376 4377 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4378 if (intrp->htable[inum]) { 4379 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 4380 "nxge_remove_intrs: ddi_intr_free inum %d " 4381 "msi_intx_cnt %d intr_added %d", 4382 inum, 4383 intrp->msi_intx_cnt, 4384 intrp->intr_added)); 4385 4386 (void) ddi_intr_free(intrp->htable[inum]); 4387 } 4388 } 4389 4390 kmem_free(intrp->htable, intrp->intr_size); 4391 intrp->intr_registered = B_FALSE; 4392 intrp->intr_enabled = B_FALSE; 4393 intrp->msi_intx_cnt = 0; 4394 intrp->intr_added = 0; 4395 4396 (void) nxge_ldgv_uninit(nxgep); 4397 4398 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 4399 } 4400 4401 /*ARGSUSED*/ 4402 static void 4403 nxge_remove_soft_intrs(p_nxge_t nxgep) 4404 { 4405 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 4406 if (nxgep->resched_id) { 4407 ddi_remove_softintr(nxgep->resched_id); 4408 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4409 "==> nxge_remove_soft_intrs: removed")); 4410 nxgep->resched_id = NULL; 4411 } 4412 4413 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 4414 } 4415 4416 /*ARGSUSED*/ 4417 static void 4418 nxge_intrs_enable(p_nxge_t nxgep) 4419 { 4420 p_nxge_intr_t intrp; 4421 int i; 4422 int status; 4423 4424 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 4425 4426 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4427 4428 if (!intrp->intr_registered) { 4429 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 4430 "interrupts are not registered")); 4431 return; 4432 } 4433 4434 if (intrp->intr_enabled) { 4435 NXGE_DEBUG_MSG((nxgep, INT_CTL, 4436 "<== nxge_intrs_enable: already enabled")); 4437 return; 4438 } 4439 4440 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4441 status = ddi_intr_block_enable(intrp->htable, 4442 intrp->intr_added); 4443 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4444 "block enable - status 0x%x total inums #%d\n", 4445 status, intrp->intr_added)); 4446 } else { 4447 for (i = 0; i < intrp->intr_added; i++) { 4448 status = ddi_intr_enable(intrp->htable[i]); 4449 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 4450 "ddi_intr_enable:enable - status 0x%x " 4451 "total inums %d enable inum #%d\n", 4452 status, intrp->intr_added, i)); 4453 if (status == DDI_SUCCESS) { 4454 intrp->intr_enabled = B_TRUE; 4455 } 4456 } 4457 } 4458 4459 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 4460 } 4461 4462 /*ARGSUSED*/ 4463 static void 4464 nxge_intrs_disable(p_nxge_t nxgep) 4465 { 4466 p_nxge_intr_t intrp; 4467 int i; 4468 4469 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 4470 4471 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 4472 4473 if (!intrp->intr_registered) { 4474 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 4475 "interrupts are not registered")); 4476 return; 4477 } 4478 4479 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4480 (void) ddi_intr_block_disable(intrp->htable, 4481 intrp->intr_added); 4482 } else { 4483 for (i = 0; i < intrp->intr_added; i++) { 4484 (void) ddi_intr_disable(intrp->htable[i]); 4485 } 4486 } 4487 4488 intrp->intr_enabled = B_FALSE; 4489 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 4490 } 4491 4492 static nxge_status_t 4493 nxge_mac_register(p_nxge_t nxgep) 4494 { 4495 mac_register_t *macp; 4496 int status; 4497 4498 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 4499 4500 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4501 return (NXGE_ERROR); 4502 4503 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4504 macp->m_driver = nxgep; 4505 macp->m_dip = nxgep->dip; 4506 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 4507 macp->m_callbacks = &nxge_m_callbacks; 4508 macp->m_min_sdu = 0; 4509 macp->m_max_sdu = nxgep->mac.maxframesize - 4510 sizeof (struct ether_header) - ETHERFCSL - 4; 4511 4512 status = mac_register(macp, &nxgep->mach); 4513 mac_free(macp); 4514 4515 if (status != 0) { 4516 cmn_err(CE_WARN, 4517 "!nxge_mac_register failed (status %d instance %d)", 4518 status, nxgep->instance); 4519 return (NXGE_ERROR); 4520 } 4521 4522 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 4523 "(instance %d)", nxgep->instance)); 4524 4525 return (NXGE_OK); 4526 } 4527 4528 void 4529 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 4530 { 4531 ssize_t size; 4532 mblk_t *nmp; 4533 uint8_t blk_id; 4534 uint8_t chan; 4535 uint32_t err_id; 4536 err_inject_t *eip; 4537 4538 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 4539 4540 size = 1024; 4541 nmp = mp->b_cont; 4542 eip = (err_inject_t *)nmp->b_rptr; 4543 blk_id = eip->blk_id; 4544 err_id = eip->err_id; 4545 chan = eip->chan; 4546 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 4547 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 4548 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 4549 switch (blk_id) { 4550 case MAC_BLK_ID: 4551 break; 4552 case TXMAC_BLK_ID: 4553 break; 4554 case RXMAC_BLK_ID: 4555 break; 4556 case MIF_BLK_ID: 4557 break; 4558 case IPP_BLK_ID: 4559 nxge_ipp_inject_err(nxgep, err_id); 4560 break; 4561 case TXC_BLK_ID: 4562 nxge_txc_inject_err(nxgep, err_id); 4563 break; 4564 case TXDMA_BLK_ID: 4565 nxge_txdma_inject_err(nxgep, err_id, chan); 4566 break; 4567 case RXDMA_BLK_ID: 4568 nxge_rxdma_inject_err(nxgep, err_id, chan); 4569 break; 4570 case ZCP_BLK_ID: 4571 nxge_zcp_inject_err(nxgep, err_id); 4572 break; 4573 case ESPC_BLK_ID: 4574 break; 4575 case FFLP_BLK_ID: 4576 break; 4577 case PHY_BLK_ID: 4578 break; 4579 case ETHER_SERDES_BLK_ID: 4580 break; 4581 case PCIE_SERDES_BLK_ID: 4582 break; 4583 case VIR_BLK_ID: 4584 break; 4585 } 4586 4587 nmp->b_wptr = nmp->b_rptr + size; 4588 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 4589 4590 miocack(wq, mp, (int)size, 0); 4591 } 4592 4593 static int 4594 nxge_init_common_dev(p_nxge_t nxgep) 4595 { 4596 p_nxge_hw_list_t hw_p; 4597 dev_info_t *p_dip; 4598 4599 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 4600 4601 p_dip = nxgep->p_dip; 4602 MUTEX_ENTER(&nxge_common_lock); 4603 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4604 "==> nxge_init_common_dev:func # %d", 4605 nxgep->function_num)); 4606 /* 4607 * Loop through existing per neptune hardware list. 4608 */ 4609 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4610 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4611 "==> nxge_init_common_device:func # %d " 4612 "hw_p $%p parent dip $%p", 4613 nxgep->function_num, 4614 hw_p, 4615 p_dip)); 4616 if (hw_p->parent_devp == p_dip) { 4617 nxgep->nxge_hw_p = hw_p; 4618 hw_p->ndevs++; 4619 hw_p->nxge_p[nxgep->function_num] = nxgep; 4620 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4621 "==> nxge_init_common_device:func # %d " 4622 "hw_p $%p parent dip $%p " 4623 "ndevs %d (found)", 4624 nxgep->function_num, 4625 hw_p, 4626 p_dip, 4627 hw_p->ndevs)); 4628 break; 4629 } 4630 } 4631 4632 if (hw_p == NULL) { 4633 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4634 "==> nxge_init_common_device:func # %d " 4635 "parent dip $%p (new)", 4636 nxgep->function_num, 4637 p_dip)); 4638 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 4639 hw_p->parent_devp = p_dip; 4640 hw_p->magic = NXGE_NEPTUNE_MAGIC; 4641 nxgep->nxge_hw_p = hw_p; 4642 hw_p->ndevs++; 4643 hw_p->nxge_p[nxgep->function_num] = nxgep; 4644 hw_p->next = nxge_hw_list; 4645 4646 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4647 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4648 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4649 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 4650 MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL); 4651 4652 nxge_hw_list = hw_p; 4653 } 4654 4655 MUTEX_EXIT(&nxge_common_lock); 4656 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4657 "==> nxge_init_common_device (nxge_hw_list) $%p", 4658 nxge_hw_list)); 4659 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 4660 4661 return (NXGE_OK); 4662 } 4663 4664 static void 4665 nxge_uninit_common_dev(p_nxge_t nxgep) 4666 { 4667 p_nxge_hw_list_t hw_p, h_hw_p; 4668 dev_info_t *p_dip; 4669 4670 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 4671 if (nxgep->nxge_hw_p == NULL) { 4672 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4673 "<== nxge_uninit_common_device (no common)")); 4674 return; 4675 } 4676 4677 MUTEX_ENTER(&nxge_common_lock); 4678 h_hw_p = nxge_hw_list; 4679 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 4680 p_dip = hw_p->parent_devp; 4681 if (nxgep->nxge_hw_p == hw_p && 4682 p_dip == nxgep->p_dip && 4683 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 4684 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 4685 4686 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4687 "==> nxge_uninit_common_device:func # %d " 4688 "hw_p $%p parent dip $%p " 4689 "ndevs %d (found)", 4690 nxgep->function_num, 4691 hw_p, 4692 p_dip, 4693 hw_p->ndevs)); 4694 4695 nxgep->nxge_hw_p = NULL; 4696 if (hw_p->ndevs) { 4697 hw_p->ndevs--; 4698 } 4699 hw_p->nxge_p[nxgep->function_num] = NULL; 4700 if (!hw_p->ndevs) { 4701 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 4702 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 4703 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 4704 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 4705 MUTEX_DESTROY(&hw_p->nxge_mii_lock); 4706 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4707 "==> nxge_uninit_common_device: " 4708 "func # %d " 4709 "hw_p $%p parent dip $%p " 4710 "ndevs %d (last)", 4711 nxgep->function_num, 4712 hw_p, 4713 p_dip, 4714 hw_p->ndevs)); 4715 4716 if (hw_p == nxge_hw_list) { 4717 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4718 "==> nxge_uninit_common_device:" 4719 "remove head func # %d " 4720 "hw_p $%p parent dip $%p " 4721 "ndevs %d (head)", 4722 nxgep->function_num, 4723 hw_p, 4724 p_dip, 4725 hw_p->ndevs)); 4726 nxge_hw_list = hw_p->next; 4727 } else { 4728 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4729 "==> nxge_uninit_common_device:" 4730 "remove middle func # %d " 4731 "hw_p $%p parent dip $%p " 4732 "ndevs %d (middle)", 4733 nxgep->function_num, 4734 hw_p, 4735 p_dip, 4736 hw_p->ndevs)); 4737 h_hw_p->next = hw_p->next; 4738 } 4739 4740 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 4741 } 4742 break; 4743 } else { 4744 h_hw_p = hw_p; 4745 } 4746 } 4747 4748 MUTEX_EXIT(&nxge_common_lock); 4749 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 4750 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 4751 nxge_hw_list)); 4752 4753 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 4754 } 4755