1 /* 2 * Copyright (C) 2007-2014 VMware, Inc. All rights reserved. 3 * 4 * The contents of this file are subject to the terms of the Common 5 * Development and Distribution License (the "License") version 1.0 6 * and no later version. You may not use this file except in 7 * compliance with the License. 8 * 9 * You can obtain a copy of the License at 10 * http://www.opensource.org/licenses/cddl1.php 11 * 12 * See the License for the specific language governing permissions 13 * and limitations under the License. 14 */ 15 16 /* 17 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 18 */ 19 20 #include <vmxnet3.h> 21 22 /* 23 * This driver is based on VMware's version 3227872, and contains additional 24 * enhancements (see README.txt). 25 */ 26 #define BUILD_NUMBER_NUMERIC 3227872 27 28 /* 29 * TODO: 30 * - Tx data ring 31 * - MAC_CAPAB_POLL support 32 * - Dynamic RX pool 33 */ 34 35 static int vmxnet3_getstat(void *, uint_t, uint64_t *); 36 static int vmxnet3_start(void *); 37 static void vmxnet3_stop(void *); 38 static int vmxnet3_setpromisc(void *, boolean_t); 39 static void vmxnet3_ioctl(void *arg, queue_t *wq, mblk_t *mp); 40 static int vmxnet3_multicst(void *, boolean_t, const uint8_t *); 41 static int vmxnet3_unicst(void *, const uint8_t *); 42 static boolean_t vmxnet3_getcapab(void *, mac_capab_t, void *); 43 static int vmxnet3_get_prop(void *, const char *, mac_prop_id_t, uint_t, 44 void *); 45 static int vmxnet3_set_prop(void *, const char *, mac_prop_id_t, uint_t, 46 const void *); 47 static void vmxnet3_prop_info(void *, const char *, mac_prop_id_t, 48 mac_prop_info_handle_t); 49 50 int vmxnet3s_debug = 0; 51 52 /* MAC callbacks */ 53 static mac_callbacks_t vmxnet3_mac_callbacks = { 54 .mc_callbacks = MC_GETCAPAB | MC_IOCTL | MC_SETPROP | MC_PROPINFO, 55 .mc_getstat = vmxnet3_getstat, 56 .mc_start = vmxnet3_start, 57 .mc_stop = vmxnet3_stop, 58 .mc_setpromisc = vmxnet3_setpromisc, 59 .mc_multicst = vmxnet3_multicst, 60 .mc_unicst = vmxnet3_unicst, 61 .mc_tx = vmxnet3_tx, 62 .mc_ioctl = vmxnet3_ioctl, 63 .mc_getcapab = vmxnet3_getcapab, 64 .mc_getprop = vmxnet3_get_prop, 65 .mc_setprop = vmxnet3_set_prop, 66 .mc_propinfo = vmxnet3_prop_info 67 }; 68 69 /* Tx DMA engine description */ 70 static ddi_dma_attr_t vmxnet3_dma_attrs_tx = { 71 .dma_attr_version = DMA_ATTR_V0, 72 .dma_attr_addr_lo = 0x0000000000000000ull, 73 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull, 74 .dma_attr_count_max = 0xFFFFFFFFFFFFFFFFull, 75 .dma_attr_align = 0x0000000000000001ull, 76 .dma_attr_burstsizes = 0x0000000000000001ull, 77 .dma_attr_minxfer = 0x00000001, 78 .dma_attr_maxxfer = 0x000000000000FFFFull, 79 .dma_attr_seg = 0xFFFFFFFFFFFFFFFFull, 80 .dma_attr_sgllen = -1, 81 .dma_attr_granular = 0x00000001, 82 .dma_attr_flags = 0 83 }; 84 85 /* --- */ 86 87 /* 88 * Fetch the statistics of a vmxnet3 device. 89 * 90 * Returns: 91 * DDI_SUCCESS or DDI_FAILURE. 92 */ 93 static int 94 vmxnet3_getstat(void *data, uint_t stat, uint64_t *val) 95 { 96 vmxnet3_softc_t *dp = data; 97 UPT1_TxStats *txStats; 98 UPT1_RxStats *rxStats; 99 100 VMXNET3_DEBUG(dp, 3, "getstat(%u)\n", stat); 101 102 if (!dp->devEnabled) { 103 return (DDI_FAILURE); 104 } 105 106 txStats = &VMXNET3_TQDESC(dp)->stats; 107 rxStats = &VMXNET3_RQDESC(dp)->stats; 108 109 /* 110 * First touch the related register 111 */ 112 switch (stat) { 113 case MAC_STAT_MULTIRCV: 114 case MAC_STAT_BRDCSTRCV: 115 case MAC_STAT_MULTIXMT: 116 case MAC_STAT_BRDCSTXMT: 117 case MAC_STAT_NORCVBUF: 118 case MAC_STAT_IERRORS: 119 case MAC_STAT_NOXMTBUF: 120 case MAC_STAT_OERRORS: 121 case MAC_STAT_RBYTES: 122 case MAC_STAT_IPACKETS: 123 case MAC_STAT_OBYTES: 124 case MAC_STAT_OPACKETS: 125 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 126 break; 127 case MAC_STAT_IFSPEED: 128 case MAC_STAT_COLLISIONS: 129 case ETHER_STAT_LINK_DUPLEX: 130 /* nothing */ 131 break; 132 default: 133 return (DDI_FAILURE); 134 } 135 136 /* 137 * Then fetch the corresponding stat 138 */ 139 switch (stat) { 140 case MAC_STAT_IFSPEED: 141 *val = dp->linkSpeed; 142 break; 143 case MAC_STAT_MULTIRCV: 144 *val = rxStats->mcastPktsRxOK; 145 break; 146 case MAC_STAT_BRDCSTRCV: 147 *val = rxStats->bcastPktsRxOK; 148 break; 149 case MAC_STAT_MULTIXMT: 150 *val = txStats->mcastPktsTxOK; 151 break; 152 case MAC_STAT_BRDCSTXMT: 153 *val = txStats->bcastPktsTxOK; 154 break; 155 case MAC_STAT_NORCVBUF: 156 *val = rxStats->pktsRxOutOfBuf + dp->rx_alloc_failed; 157 break; 158 case MAC_STAT_IERRORS: 159 *val = rxStats->pktsRxError; 160 break; 161 case MAC_STAT_NOXMTBUF: 162 *val = txStats->pktsTxDiscard + dp->tx_pullup_failed; 163 break; 164 case MAC_STAT_OERRORS: 165 *val = txStats->pktsTxError + dp->tx_error; 166 break; 167 case MAC_STAT_COLLISIONS: 168 *val = 0; 169 break; 170 case MAC_STAT_RBYTES: 171 *val = rxStats->ucastBytesRxOK + rxStats->mcastBytesRxOK + 172 rxStats->bcastBytesRxOK; 173 break; 174 case MAC_STAT_IPACKETS: 175 *val = rxStats->ucastPktsRxOK + rxStats->mcastPktsRxOK + 176 rxStats->bcastPktsRxOK; 177 break; 178 case MAC_STAT_OBYTES: 179 *val = txStats->ucastBytesTxOK + txStats->mcastBytesTxOK + 180 txStats->bcastBytesTxOK; 181 break; 182 case MAC_STAT_OPACKETS: 183 *val = txStats->ucastPktsTxOK + txStats->mcastPktsTxOK + 184 txStats->bcastPktsTxOK; 185 break; 186 case ETHER_STAT_LINK_DUPLEX: 187 *val = LINK_DUPLEX_FULL; 188 break; 189 default: 190 ASSERT(B_FALSE); 191 } 192 193 return (DDI_SUCCESS); 194 } 195 196 /* 197 * Allocate and initialize the shared data structures of a vmxnet3 device. 198 * 199 * Returns: 200 * DDI_SUCCESS or DDI_FAILURE. 201 */ 202 static int 203 vmxnet3_prepare_drivershared(vmxnet3_softc_t *dp) 204 { 205 Vmxnet3_DriverShared *ds; 206 size_t allocSize = sizeof (Vmxnet3_DriverShared); 207 208 if (vmxnet3_alloc_dma_mem_1(dp, &dp->sharedData, allocSize, 209 B_TRUE) != DDI_SUCCESS) { 210 return (DDI_FAILURE); 211 } 212 ds = VMXNET3_DS(dp); 213 (void) memset(ds, 0, allocSize); 214 215 allocSize = sizeof (Vmxnet3_TxQueueDesc) + sizeof (Vmxnet3_RxQueueDesc); 216 if (vmxnet3_alloc_dma_mem_128(dp, &dp->queueDescs, allocSize, 217 B_TRUE) != DDI_SUCCESS) { 218 vmxnet3_free_dma_mem(&dp->sharedData); 219 return (DDI_FAILURE); 220 } 221 (void) memset(dp->queueDescs.buf, 0, allocSize); 222 223 ds->magic = VMXNET3_REV1_MAGIC; 224 225 /* Take care of most of devRead */ 226 ds->devRead.misc.driverInfo.version = BUILD_NUMBER_NUMERIC; 227 #ifdef _LP64 228 ds->devRead.misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_64; 229 #else 230 ds->devRead.misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_32; 231 #endif 232 ds->devRead.misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_SOLARIS; 233 ds->devRead.misc.driverInfo.gos.gosVer = 10; 234 ds->devRead.misc.driverInfo.vmxnet3RevSpt = 1; 235 ds->devRead.misc.driverInfo.uptVerSpt = 1; 236 237 ds->devRead.misc.uptFeatures = UPT1_F_RXCSUM; 238 ds->devRead.misc.mtu = dp->cur_mtu; 239 240 /* XXX: ds->devRead.misc.maxNumRxSG */ 241 ds->devRead.misc.numTxQueues = 1; 242 ds->devRead.misc.numRxQueues = 1; 243 ds->devRead.misc.queueDescPA = dp->queueDescs.bufPA; 244 ds->devRead.misc.queueDescLen = allocSize; 245 246 /* TxQueue and RxQueue information is filled in other functions */ 247 ds->devRead.intrConf.autoMask = (dp->intrMaskMode == VMXNET3_IMM_AUTO); 248 ds->devRead.intrConf.numIntrs = 1; 249 /* XXX: ds->intr.modLevels */ 250 ds->devRead.intrConf.eventIntrIdx = 0; 251 252 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAL, 253 VMXNET3_ADDR_LO(dp->sharedData.bufPA)); 254 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAH, 255 VMXNET3_ADDR_HI(dp->sharedData.bufPA)); 256 257 return (DDI_SUCCESS); 258 } 259 260 /* 261 * Destroy the shared data structures of a vmxnet3 device. 262 */ 263 static void 264 vmxnet3_destroy_drivershared(vmxnet3_softc_t *dp) 265 { 266 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAL, 0); 267 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAH, 0); 268 269 vmxnet3_free_dma_mem(&dp->queueDescs); 270 vmxnet3_free_dma_mem(&dp->sharedData); 271 } 272 273 /* 274 * Allocate and initialize the command ring of a queue. 275 * 276 * Returns: 277 * DDI_SUCCESS or DDI_FAILURE. 278 */ 279 static int 280 vmxnet3_alloc_cmdring(vmxnet3_softc_t *dp, vmxnet3_cmdring_t *cmdRing) 281 { 282 size_t ringSize = cmdRing->size * sizeof (Vmxnet3_TxDesc); 283 284 if (vmxnet3_alloc_dma_mem_512(dp, &cmdRing->dma, ringSize, 285 B_TRUE) != DDI_SUCCESS) { 286 return (DDI_FAILURE); 287 } 288 (void) memset(cmdRing->dma.buf, 0, ringSize); 289 cmdRing->avail = cmdRing->size; 290 cmdRing->next2fill = 0; 291 cmdRing->gen = VMXNET3_INIT_GEN; 292 293 return (DDI_SUCCESS); 294 } 295 296 /* 297 * Allocate and initialize the completion ring of a queue. 298 * 299 * Returns: 300 * DDI_SUCCESS or DDI_FAILURE. 301 */ 302 static int 303 vmxnet3_alloc_compring(vmxnet3_softc_t *dp, vmxnet3_compring_t *compRing) 304 { 305 size_t ringSize = compRing->size * sizeof (Vmxnet3_TxCompDesc); 306 307 if (vmxnet3_alloc_dma_mem_512(dp, &compRing->dma, ringSize, 308 B_TRUE) != DDI_SUCCESS) { 309 return (DDI_FAILURE); 310 } 311 (void) memset(compRing->dma.buf, 0, ringSize); 312 compRing->next2comp = 0; 313 compRing->gen = VMXNET3_INIT_GEN; 314 315 return (DDI_SUCCESS); 316 } 317 318 /* 319 * Initialize the tx queue of a vmxnet3 device. 320 * 321 * Returns: 322 * DDI_SUCCESS or DDI_FAILURE. 323 */ 324 static int 325 vmxnet3_prepare_txqueue(vmxnet3_softc_t *dp) 326 { 327 Vmxnet3_TxQueueDesc *tqdesc = VMXNET3_TQDESC(dp); 328 vmxnet3_txqueue_t *txq = &dp->txQueue; 329 330 ASSERT(!(txq->cmdRing.size & VMXNET3_RING_SIZE_MASK)); 331 ASSERT(!(txq->compRing.size & VMXNET3_RING_SIZE_MASK)); 332 ASSERT(!txq->cmdRing.dma.buf && !txq->compRing.dma.buf); 333 334 if (vmxnet3_alloc_cmdring(dp, &txq->cmdRing) != DDI_SUCCESS) { 335 goto error; 336 } 337 tqdesc->conf.txRingBasePA = txq->cmdRing.dma.bufPA; 338 tqdesc->conf.txRingSize = txq->cmdRing.size; 339 tqdesc->conf.dataRingBasePA = 0; 340 tqdesc->conf.dataRingSize = 0; 341 342 if (vmxnet3_alloc_compring(dp, &txq->compRing) != DDI_SUCCESS) { 343 goto error_cmdring; 344 } 345 tqdesc->conf.compRingBasePA = txq->compRing.dma.bufPA; 346 tqdesc->conf.compRingSize = txq->compRing.size; 347 348 txq->metaRing = kmem_zalloc(txq->cmdRing.size * 349 sizeof (vmxnet3_metatx_t), KM_SLEEP); 350 ASSERT(txq->metaRing); 351 352 if (vmxnet3_txqueue_init(dp, txq) != DDI_SUCCESS) { 353 goto error_mpring; 354 } 355 356 return (DDI_SUCCESS); 357 358 error_mpring: 359 kmem_free(txq->metaRing, txq->cmdRing.size * sizeof (vmxnet3_metatx_t)); 360 vmxnet3_free_dma_mem(&txq->compRing.dma); 361 error_cmdring: 362 vmxnet3_free_dma_mem(&txq->cmdRing.dma); 363 error: 364 return (DDI_FAILURE); 365 } 366 367 /* 368 * Initialize the rx queue of a vmxnet3 device. 369 * 370 * Returns: 371 * DDI_SUCCESS or DDI_FAILURE. 372 */ 373 static int 374 vmxnet3_prepare_rxqueue(vmxnet3_softc_t *dp) 375 { 376 Vmxnet3_RxQueueDesc *rqdesc = VMXNET3_RQDESC(dp); 377 vmxnet3_rxqueue_t *rxq = &dp->rxQueue; 378 379 ASSERT(!(rxq->cmdRing.size & VMXNET3_RING_SIZE_MASK)); 380 ASSERT(!(rxq->compRing.size & VMXNET3_RING_SIZE_MASK)); 381 ASSERT(!rxq->cmdRing.dma.buf && !rxq->compRing.dma.buf); 382 383 if (vmxnet3_alloc_cmdring(dp, &rxq->cmdRing) != DDI_SUCCESS) { 384 goto error; 385 } 386 rqdesc->conf.rxRingBasePA[0] = rxq->cmdRing.dma.bufPA; 387 rqdesc->conf.rxRingSize[0] = rxq->cmdRing.size; 388 rqdesc->conf.rxRingBasePA[1] = 0; 389 rqdesc->conf.rxRingSize[1] = 0; 390 391 if (vmxnet3_alloc_compring(dp, &rxq->compRing) != DDI_SUCCESS) { 392 goto error_cmdring; 393 } 394 rqdesc->conf.compRingBasePA = rxq->compRing.dma.bufPA; 395 rqdesc->conf.compRingSize = rxq->compRing.size; 396 397 rxq->bufRing = kmem_zalloc(rxq->cmdRing.size * 398 sizeof (vmxnet3_bufdesc_t), KM_SLEEP); 399 ASSERT(rxq->bufRing); 400 401 if (vmxnet3_rxqueue_init(dp, rxq) != DDI_SUCCESS) { 402 goto error_bufring; 403 } 404 405 return (DDI_SUCCESS); 406 407 error_bufring: 408 kmem_free(rxq->bufRing, rxq->cmdRing.size * sizeof (vmxnet3_bufdesc_t)); 409 vmxnet3_free_dma_mem(&rxq->compRing.dma); 410 error_cmdring: 411 vmxnet3_free_dma_mem(&rxq->cmdRing.dma); 412 error: 413 return (DDI_FAILURE); 414 } 415 416 /* 417 * Destroy the tx queue of a vmxnet3 device. 418 */ 419 static void 420 vmxnet3_destroy_txqueue(vmxnet3_softc_t *dp) 421 { 422 vmxnet3_txqueue_t *txq = &dp->txQueue; 423 424 ASSERT(txq->metaRing); 425 ASSERT(txq->cmdRing.dma.buf && txq->compRing.dma.buf); 426 427 vmxnet3_txqueue_fini(dp, txq); 428 429 kmem_free(txq->metaRing, txq->cmdRing.size * sizeof (vmxnet3_metatx_t)); 430 431 vmxnet3_free_dma_mem(&txq->cmdRing.dma); 432 vmxnet3_free_dma_mem(&txq->compRing.dma); 433 } 434 435 /* 436 * Destroy the rx queue of a vmxnet3 device. 437 */ 438 static void 439 vmxnet3_destroy_rxqueue(vmxnet3_softc_t *dp) 440 { 441 vmxnet3_rxqueue_t *rxq = &dp->rxQueue; 442 443 ASSERT(rxq->bufRing); 444 ASSERT(rxq->cmdRing.dma.buf && rxq->compRing.dma.buf); 445 446 vmxnet3_rxqueue_fini(dp, rxq); 447 448 kmem_free(rxq->bufRing, rxq->cmdRing.size * sizeof (vmxnet3_bufdesc_t)); 449 450 vmxnet3_free_dma_mem(&rxq->cmdRing.dma); 451 vmxnet3_free_dma_mem(&rxq->compRing.dma); 452 } 453 454 /* 455 * Apply new RX filters settings to a vmxnet3 device. 456 */ 457 static void 458 vmxnet3_refresh_rxfilter(vmxnet3_softc_t *dp) 459 { 460 Vmxnet3_DriverShared *ds = VMXNET3_DS(dp); 461 462 ds->devRead.rxFilterConf.rxMode = dp->rxMode; 463 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); 464 } 465 466 /* 467 * Fetch the link state of a vmxnet3 device. 468 */ 469 static void 470 vmxnet3_refresh_linkstate(vmxnet3_softc_t *dp) 471 { 472 uint32_t ret32; 473 474 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 475 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD); 476 if (ret32 & 1) { 477 dp->linkState = LINK_STATE_UP; 478 dp->linkSpeed = (ret32 >> 16) * 1000000ULL; 479 } else { 480 dp->linkState = LINK_STATE_DOWN; 481 dp->linkSpeed = 0; 482 } 483 } 484 485 /* 486 * Start a vmxnet3 device: allocate and initialize the shared data 487 * structures and send a start command to the device. 488 * 489 * Returns: 490 * DDI_SUCCESS or DDI_FAILURE. 491 */ 492 static int 493 vmxnet3_start(void *data) 494 { 495 vmxnet3_softc_t *dp = data; 496 Vmxnet3_TxQueueDesc *tqdesc; 497 Vmxnet3_RxQueueDesc *rqdesc; 498 int txQueueSize, rxQueueSize; 499 uint32_t ret32; 500 501 VMXNET3_DEBUG(dp, 1, "start()\n"); 502 503 /* 504 * Allocate vmxnet3's shared data and advertise its PA 505 */ 506 if (vmxnet3_prepare_drivershared(dp) != DDI_SUCCESS) { 507 VMXNET3_WARN(dp, "vmxnet3_prepare_drivershared() failed\n"); 508 goto error; 509 } 510 tqdesc = VMXNET3_TQDESC(dp); 511 rqdesc = VMXNET3_RQDESC(dp); 512 513 /* 514 * Create and initialize the tx queue 515 */ 516 txQueueSize = vmxnet3_getprop(dp, "TxRingSize", 32, 4096, 517 VMXNET3_DEF_TX_RING_SIZE); 518 if (!(txQueueSize & VMXNET3_RING_SIZE_MASK)) { 519 dp->txQueue.cmdRing.size = txQueueSize; 520 dp->txQueue.compRing.size = txQueueSize; 521 dp->txQueue.sharedCtrl = &tqdesc->ctrl; 522 if (vmxnet3_prepare_txqueue(dp) != DDI_SUCCESS) { 523 VMXNET3_WARN(dp, "vmxnet3_prepare_txqueue() failed\n"); 524 goto error_shared_data; 525 } 526 } else { 527 VMXNET3_WARN(dp, "invalid tx ring size (%d)\n", txQueueSize); 528 goto error_shared_data; 529 } 530 531 /* 532 * Create and initialize the rx queue 533 */ 534 rxQueueSize = vmxnet3_getprop(dp, "RxRingSize", 32, 4096, 535 VMXNET3_DEF_RX_RING_SIZE); 536 if (!(rxQueueSize & VMXNET3_RING_SIZE_MASK)) { 537 dp->rxQueue.cmdRing.size = rxQueueSize; 538 dp->rxQueue.compRing.size = rxQueueSize; 539 dp->rxQueue.sharedCtrl = &rqdesc->ctrl; 540 if (vmxnet3_prepare_rxqueue(dp) != DDI_SUCCESS) { 541 VMXNET3_WARN(dp, "vmxnet3_prepare_rxqueue() failed\n"); 542 goto error_tx_queue; 543 } 544 } else { 545 VMXNET3_WARN(dp, "invalid rx ring size (%d)\n", rxQueueSize); 546 goto error_tx_queue; 547 } 548 549 /* 550 * Allocate the Tx DMA handle 551 */ 552 if (ddi_dma_alloc_handle(dp->dip, &vmxnet3_dma_attrs_tx, DDI_DMA_SLEEP, 553 NULL, &dp->txDmaHandle) != DDI_SUCCESS) { 554 VMXNET3_WARN(dp, "ddi_dma_alloc_handle() failed\n"); 555 goto error_rx_queue; 556 } 557 558 /* 559 * Activate the device 560 */ 561 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); 562 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD); 563 if (ret32) { 564 VMXNET3_WARN(dp, "ACTIVATE_DEV failed: 0x%x\n", ret32); 565 goto error_txhandle; 566 } 567 dp->devEnabled = B_TRUE; 568 569 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_RXPROD, 570 dp->txQueue.cmdRing.size - 1); 571 572 /* 573 * Update the RX filters, must be done after ACTIVATE_DEV 574 */ 575 dp->rxMode = VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST; 576 vmxnet3_refresh_rxfilter(dp); 577 578 /* 579 * Get the link state now because no events will be generated 580 */ 581 vmxnet3_refresh_linkstate(dp); 582 mac_link_update(dp->mac, dp->linkState); 583 584 /* 585 * Finally, unmask the interrupt 586 */ 587 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 0); 588 589 return (DDI_SUCCESS); 590 591 error_txhandle: 592 ddi_dma_free_handle(&dp->txDmaHandle); 593 error_rx_queue: 594 vmxnet3_destroy_rxqueue(dp); 595 error_tx_queue: 596 vmxnet3_destroy_txqueue(dp); 597 error_shared_data: 598 vmxnet3_destroy_drivershared(dp); 599 error: 600 return (DDI_FAILURE); 601 } 602 603 /* 604 * Stop a vmxnet3 device: send a stop command to the device and 605 * de-allocate the shared data structures. 606 */ 607 static void 608 vmxnet3_stop(void *data) 609 { 610 vmxnet3_softc_t *dp = data; 611 612 VMXNET3_DEBUG(dp, 1, "stop()\n"); 613 614 /* 615 * Take the 2 locks related to asynchronous events. 616 * These events should always check dp->devEnabled before poking dp. 617 */ 618 mutex_enter(&dp->intrLock); 619 mutex_enter(&dp->rxPoolLock); 620 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 1); 621 dp->devEnabled = B_FALSE; 622 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 623 mutex_exit(&dp->rxPoolLock); 624 mutex_exit(&dp->intrLock); 625 626 ddi_dma_free_handle(&dp->txDmaHandle); 627 628 vmxnet3_destroy_rxqueue(dp); 629 vmxnet3_destroy_txqueue(dp); 630 631 vmxnet3_destroy_drivershared(dp); 632 } 633 634 /* 635 * Set or unset promiscuous mode on a vmxnet3 device. 636 * 637 * Returns: 638 * DDI_SUCCESS. 639 */ 640 static int 641 vmxnet3_setpromisc(void *data, boolean_t promisc) 642 { 643 vmxnet3_softc_t *dp = data; 644 645 VMXNET3_DEBUG(dp, 2, "setpromisc(%s)\n", promisc ? "TRUE" : "FALSE"); 646 647 if (promisc) { 648 dp->rxMode |= VMXNET3_RXM_PROMISC; 649 } else { 650 dp->rxMode &= ~VMXNET3_RXM_PROMISC; 651 } 652 653 vmxnet3_refresh_rxfilter(dp); 654 655 return (DDI_SUCCESS); 656 } 657 658 /* 659 * Add or remove a multicast address from/to a vmxnet3 device. 660 * 661 * Returns: 662 * DDI_SUCCESS or DDI_FAILURE. 663 */ 664 static int 665 vmxnet3_multicst(void *data, boolean_t add, const uint8_t *macaddr) 666 { 667 vmxnet3_softc_t *dp = data; 668 vmxnet3_dmabuf_t newMfTable; 669 int ret = DDI_SUCCESS; 670 uint16_t macIdx; 671 size_t allocSize; 672 673 VMXNET3_DEBUG(dp, 2, "multicst(%s, "MACADDR_FMT")\n", 674 add ? "add" : "remove", MACADDR_FMT_ARGS(macaddr)); 675 676 /* 677 * First lookup the position of the given MAC to check if it is 678 * present in the existing MF table. 679 */ 680 for (macIdx = 0; macIdx < dp->mfTable.bufLen; macIdx += 6) { 681 if (memcmp(&dp->mfTable.buf[macIdx], macaddr, 6) == 0) { 682 break; 683 } 684 } 685 686 /* 687 * Check for 2 situations we can handle gracefully by bailing out: 688 * Adding an already existing filter or removing a non-existing one. 689 */ 690 if (add && macIdx < dp->mfTable.bufLen) { 691 VMXNET3_WARN(dp, MACADDR_FMT " already in MC filter list " 692 "@ %u\n", MACADDR_FMT_ARGS(macaddr), macIdx / 6); 693 ASSERT(B_FALSE); 694 goto done; 695 } 696 if (!add && macIdx == dp->mfTable.bufLen) { 697 VMXNET3_WARN(dp, MACADDR_FMT " not in MC filter list @ %u\n", 698 MACADDR_FMT_ARGS(macaddr), macIdx / 6); 699 ASSERT(B_FALSE); 700 goto done; 701 } 702 703 /* 704 * Create the new MF table 705 */ 706 allocSize = dp->mfTable.bufLen + (add ? 6 : -6); 707 if (allocSize) { 708 ret = vmxnet3_alloc_dma_mem_1(dp, &newMfTable, allocSize, 709 B_TRUE); 710 ASSERT(ret == DDI_SUCCESS); 711 if (add) { 712 (void) memcpy(newMfTable.buf, dp->mfTable.buf, 713 dp->mfTable.bufLen); 714 (void) memcpy(newMfTable.buf + dp->mfTable.bufLen, 715 macaddr, 6); 716 } else { 717 (void) memcpy(newMfTable.buf, dp->mfTable.buf, 718 macIdx); 719 (void) memcpy(newMfTable.buf + macIdx, 720 dp->mfTable.buf + macIdx + 6, 721 dp->mfTable.bufLen - macIdx - 6); 722 } 723 } else { 724 newMfTable.buf = NULL; 725 newMfTable.bufPA = 0; 726 newMfTable.bufLen = 0; 727 } 728 729 /* 730 * Now handle 2 corner cases: if we're creating the first filter or 731 * removing the last one, we have to update rxMode accordingly. 732 */ 733 if (add && newMfTable.bufLen == 6) { 734 ASSERT(!(dp->rxMode & VMXNET3_RXM_MCAST)); 735 dp->rxMode |= VMXNET3_RXM_MCAST; 736 vmxnet3_refresh_rxfilter(dp); 737 } 738 if (!add && dp->mfTable.bufLen == 6) { 739 ASSERT(newMfTable.buf == NULL); 740 ASSERT(dp->rxMode & VMXNET3_RXM_MCAST); 741 dp->rxMode &= ~VMXNET3_RXM_MCAST; 742 vmxnet3_refresh_rxfilter(dp); 743 } 744 745 /* 746 * Now replace the old MF table with the new one 747 */ 748 if (dp->mfTable.buf) { 749 vmxnet3_free_dma_mem(&dp->mfTable); 750 } 751 dp->mfTable = newMfTable; 752 VMXNET3_DS(dp)->devRead.rxFilterConf.mfTablePA = newMfTable.bufPA; 753 VMXNET3_DS(dp)->devRead.rxFilterConf.mfTableLen = newMfTable.bufLen; 754 755 done: 756 /* Always update the filters */ 757 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_MAC_FILTERS); 758 759 return (ret); 760 } 761 762 /* 763 * Set the mac address of a vmxnet3 device. 764 * 765 * Returns: 766 * DDI_SUCCESS. 767 */ 768 static int 769 vmxnet3_unicst(void *data, const uint8_t *macaddr) 770 { 771 vmxnet3_softc_t *dp = data; 772 uint32_t val32; 773 774 VMXNET3_DEBUG(dp, 2, "unicst("MACADDR_FMT")\n", 775 MACADDR_FMT_ARGS(macaddr)); 776 777 val32 = *((uint32_t *)(macaddr + 0)); 778 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_MACL, val32); 779 val32 = *((uint16_t *)(macaddr + 4)); 780 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_MACH, val32); 781 782 (void) memcpy(dp->macaddr, macaddr, 6); 783 784 return (DDI_SUCCESS); 785 } 786 787 /* 788 * Change the MTU as seen by the driver. This is only supported when 789 * the mac is stopped. 790 * 791 * Returns: 792 * EBUSY if the device is enabled. 793 * EINVAL for invalid MTU values. 794 * 0 on success. 795 */ 796 static int 797 vmxnet3_change_mtu(vmxnet3_softc_t *dp, uint32_t new_mtu) 798 { 799 int ret; 800 801 if (dp->devEnabled) 802 return (EBUSY); 803 804 if (new_mtu == dp->cur_mtu) { 805 VMXNET3_WARN(dp, "New MTU is same as old mtu : %d.\n", new_mtu); 806 return (0); 807 } 808 809 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) { 810 VMXNET3_WARN(dp, "New MTU not in valid range [%d, %d].\n", 811 VMXNET3_MIN_MTU, VMXNET3_MAX_MTU); 812 return (EINVAL); 813 } else if (new_mtu > ETHERMTU && !dp->allow_jumbo) { 814 VMXNET3_WARN(dp, "MTU cannot be greater than %d because " 815 "accept-jumbo is not enabled.\n", ETHERMTU); 816 return (EINVAL); 817 } 818 819 dp->cur_mtu = new_mtu; 820 821 if ((ret = mac_maxsdu_update(dp->mac, new_mtu)) != 0) 822 VMXNET3_WARN(dp, "Unable to update mac with %d mtu: %d", 823 new_mtu, ret); 824 825 return (ret); 826 } 827 828 /* ARGSUSED */ 829 static int 830 vmxnet3_get_prop(void *data, const char *prop_name, mac_prop_id_t prop_id, 831 uint_t prop_val_size, void *prop_val) 832 { 833 vmxnet3_softc_t *dp = data; 834 int ret = 0; 835 836 switch (prop_id) { 837 case MAC_PROP_MTU: 838 ASSERT(prop_val_size >= sizeof (uint32_t)); 839 bcopy(&dp->cur_mtu, prop_val, sizeof (uint32_t)); 840 break; 841 default: 842 VMXNET3_WARN(dp, "vmxnet3_get_prop property %d not supported", 843 prop_id); 844 ret = ENOTSUP; 845 } 846 return (ret); 847 } 848 849 /* ARGSUSED */ 850 static int 851 vmxnet3_set_prop(void *data, const char *prop_name, mac_prop_id_t prop_id, 852 uint_t prop_val_size, const void *prop_val) 853 { 854 vmxnet3_softc_t *dp = data; 855 int ret; 856 857 switch (prop_id) { 858 case MAC_PROP_MTU: { 859 uint32_t new_mtu; 860 ASSERT(prop_val_size >= sizeof (uint32_t)); 861 bcopy(prop_val, &new_mtu, sizeof (new_mtu)); 862 ret = vmxnet3_change_mtu(dp, new_mtu); 863 break; 864 } 865 default: 866 VMXNET3_WARN(dp, "vmxnet3_set_prop property %d not supported", 867 prop_id); 868 ret = ENOTSUP; 869 } 870 871 return (ret); 872 } 873 874 /* ARGSUSED */ 875 static void 876 vmxnet3_prop_info(void *data, const char *prop_name, mac_prop_id_t prop_id, 877 mac_prop_info_handle_t prop_handle) 878 { 879 vmxnet3_softc_t *dp = data; 880 881 switch (prop_id) { 882 case MAC_PROP_MTU: 883 mac_prop_info_set_range_uint32(prop_handle, VMXNET3_MIN_MTU, 884 VMXNET3_MAX_MTU); 885 break; 886 default: 887 VMXNET3_WARN(dp, "vmxnet3_prop_info: property %d not supported", 888 prop_id); 889 } 890 } 891 892 /* 893 * DDI/DDK callback to handle IOCTL in driver. Currently it only handles 894 * ND_SET ioctl. Rest all are ignored. The ND_SET is used to set/reset 895 * accept-jumbo ndd parameted for the interface. 896 * 897 * Side effects: 898 * MTU can be changed and device can be reset. An ACK or NACK is conveyed 899 * to the calling function from the mblk which was used to call this 900 * function. 901 */ 902 static void 903 vmxnet3_ioctl(void *arg, queue_t *wq, mblk_t *mp) 904 { 905 vmxnet3_softc_t *dp = arg; 906 int ret = EINVAL; 907 IOCP iocp; 908 mblk_t *mp1; 909 char *valp, *param; 910 int data; 911 912 iocp = (void *)mp->b_rptr; 913 iocp->ioc_error = 0; 914 915 switch (iocp->ioc_cmd) { 916 case ND_SET: 917 /* 918 * The mblk in continuation would contain the ndd parameter name 919 * and data value to be set 920 */ 921 mp1 = mp->b_cont; 922 if (!mp1) { 923 VMXNET3_WARN(dp, "Error locating parameter name.\n"); 924 ret = EINVAL; 925 break; 926 } 927 928 /* Force null termination */ 929 mp1->b_datap->db_lim[-1] = '\0'; 930 931 /* 932 * From /usr/src/uts/common/inet/nd.c : nd_getset() 933 * "logic throughout nd_xxx assumes single data block for ioctl. 934 * However, existing code sends in some big buffers." 935 */ 936 if (mp1->b_cont) { 937 freemsg(mp1->b_cont); 938 mp1->b_cont = NULL; 939 } 940 941 valp = (char *)mp1->b_rptr; /* Points to param name */ 942 ASSERT(valp); 943 param = valp; 944 VMXNET3_DEBUG(dp, 3, "ND Set ioctl for %s\n", param); 945 946 /* 947 * Go past the end of this null terminated string to get the 948 * data value. 949 */ 950 while (*valp && valp <= (char *)mp1->b_wptr) 951 valp++; 952 953 if (valp > (char *)mp1->b_wptr) { 954 /* 955 * We are already beyond the readable area of mblk and 956 * still haven't found the end of param string. 957 */ 958 VMXNET3_WARN(dp, 959 "No data value found to be set to param\n"); 960 data = -1; 961 } else { 962 /* Now this points to data string */ 963 valp++; 964 /* Get numeric value of first letter */ 965 data = (int)*valp - (int)'0'; 966 } 967 968 if (strcmp("accept-jumbo", param) == 0) { 969 if (data == 1) { 970 VMXNET3_DEBUG(dp, 2, 971 "Accepting jumbo frames\n"); 972 dp->allow_jumbo = B_TRUE; 973 ret = vmxnet3_change_mtu(dp, VMXNET3_MAX_MTU); 974 } else if (data == 0) { 975 VMXNET3_DEBUG(dp, 2, 976 "Rejecting jumbo frames\n"); 977 dp->allow_jumbo = B_FALSE; 978 ret = vmxnet3_change_mtu(dp, ETHERMTU); 979 } else { 980 VMXNET3_WARN(dp, "Invalid data value to be set," 981 " use 0 or 1\n"); 982 ret = -1; 983 } 984 } 985 freemsg(mp1); 986 mp->b_cont = NULL; 987 break; 988 989 default: 990 if (mp->b_cont) { 991 freemsg(mp->b_cont); 992 mp->b_cont = NULL; 993 } 994 ret = -1; 995 break; 996 } 997 998 if (ret == 0) 999 miocack(wq, mp, 0, 0); 1000 else 1001 miocnak(wq, mp, 0, EINVAL); 1002 } 1003 1004 /* 1005 * Get the capabilities of a vmxnet3 device. 1006 * 1007 * Returns: 1008 * B_TRUE if the capability is supported, B_FALSE otherwise. 1009 */ 1010 static boolean_t 1011 vmxnet3_getcapab(void *data, mac_capab_t capab, void *arg) 1012 { 1013 vmxnet3_softc_t *dp = data; 1014 boolean_t ret; 1015 1016 switch (capab) { 1017 case MAC_CAPAB_HCKSUM: { 1018 uint32_t *txflags = arg; 1019 *txflags = HCKSUM_INET_PARTIAL; 1020 ret = B_TRUE; 1021 break; 1022 } 1023 case MAC_CAPAB_LSO: { 1024 mac_capab_lso_t *lso = arg; 1025 lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 1026 lso->lso_basic_tcp_ipv4.lso_max = IP_MAXPACKET; 1027 ret = vmxnet3_getprop(dp, "EnableLSO", 0, 1, 1); 1028 break; 1029 } 1030 default: 1031 ret = B_FALSE; 1032 } 1033 1034 VMXNET3_DEBUG(dp, 2, "getcapab(0x%x) -> %s\n", capab, 1035 ret ? "yes" : "no"); 1036 1037 return (ret); 1038 } 1039 1040 /* 1041 * Reset a vmxnet3 device. Only to be used when the device is wedged. 1042 * 1043 * Side effects: 1044 * The device is reset. 1045 */ 1046 static void 1047 vmxnet3_reset(void *data) 1048 { 1049 int ret; 1050 1051 vmxnet3_softc_t *dp = data; 1052 1053 VMXNET3_DEBUG(dp, 1, "vmxnet3_reset()\n"); 1054 1055 atomic_inc_32(&dp->reset_count); 1056 vmxnet3_stop(dp); 1057 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 1058 if ((ret = vmxnet3_start(dp)) != DDI_SUCCESS) 1059 VMXNET3_WARN(dp, "failed to reset the device: %d", ret); 1060 } 1061 1062 /* 1063 * Process pending events on a vmxnet3 device. 1064 * 1065 * Returns: 1066 * B_TRUE if the link state changed, B_FALSE otherwise. 1067 */ 1068 static boolean_t 1069 vmxnet3_intr_events(vmxnet3_softc_t *dp) 1070 { 1071 Vmxnet3_DriverShared *ds = VMXNET3_DS(dp); 1072 boolean_t linkStateChanged = B_FALSE; 1073 uint32_t events = ds->ecr; 1074 1075 if (events) { 1076 VMXNET3_DEBUG(dp, 2, "events(0x%x)\n", events); 1077 if (events & (VMXNET3_ECR_RQERR | VMXNET3_ECR_TQERR)) { 1078 Vmxnet3_TxQueueDesc *tqdesc = VMXNET3_TQDESC(dp); 1079 Vmxnet3_RxQueueDesc *rqdesc = VMXNET3_RQDESC(dp); 1080 1081 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, 1082 VMXNET3_CMD_GET_QUEUE_STATUS); 1083 if (tqdesc->status.stopped) { 1084 VMXNET3_WARN(dp, "tq error 0x%x\n", 1085 tqdesc->status.error); 1086 } 1087 if (rqdesc->status.stopped) { 1088 VMXNET3_WARN(dp, "rq error 0x%x\n", 1089 rqdesc->status.error); 1090 } 1091 1092 if (ddi_taskq_dispatch(dp->resetTask, vmxnet3_reset, 1093 dp, DDI_NOSLEEP) == DDI_SUCCESS) { 1094 VMXNET3_WARN(dp, "reset scheduled\n"); 1095 } else { 1096 VMXNET3_WARN(dp, 1097 "ddi_taskq_dispatch() failed\n"); 1098 } 1099 } 1100 if (events & VMXNET3_ECR_LINK) { 1101 vmxnet3_refresh_linkstate(dp); 1102 linkStateChanged = B_TRUE; 1103 } 1104 if (events & VMXNET3_ECR_DIC) { 1105 VMXNET3_DEBUG(dp, 1, "device implementation change\n"); 1106 } 1107 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_ECR, events); 1108 } 1109 1110 return (linkStateChanged); 1111 } 1112 1113 /* 1114 * Interrupt handler of a vmxnet3 device. 1115 * 1116 * Returns: 1117 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED. 1118 */ 1119 /* ARGSUSED1 */ 1120 static uint_t 1121 vmxnet3_intr(caddr_t data1, caddr_t data2) 1122 { 1123 vmxnet3_softc_t *dp = (void *) data1; 1124 1125 VMXNET3_DEBUG(dp, 3, "intr()\n"); 1126 1127 mutex_enter(&dp->intrLock); 1128 1129 if (dp->devEnabled) { 1130 boolean_t linkStateChanged; 1131 boolean_t mustUpdateTx; 1132 mblk_t *mps; 1133 1134 if (dp->intrType == DDI_INTR_TYPE_FIXED && 1135 !VMXNET3_BAR1_GET32(dp, VMXNET3_REG_ICR)) { 1136 goto intr_unclaimed; 1137 } 1138 1139 if (dp->intrMaskMode == VMXNET3_IMM_ACTIVE) { 1140 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 1); 1141 } 1142 1143 linkStateChanged = vmxnet3_intr_events(dp); 1144 mustUpdateTx = vmxnet3_tx_complete(dp, &dp->txQueue); 1145 mps = vmxnet3_rx_intr(dp, &dp->rxQueue); 1146 1147 mutex_exit(&dp->intrLock); 1148 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 0); 1149 1150 if (linkStateChanged) { 1151 mac_link_update(dp->mac, dp->linkState); 1152 } 1153 if (mustUpdateTx) { 1154 mac_tx_update(dp->mac); 1155 } 1156 if (mps) { 1157 mac_rx(dp->mac, NULL, mps); 1158 } 1159 1160 return (DDI_INTR_CLAIMED); 1161 } 1162 1163 intr_unclaimed: 1164 mutex_exit(&dp->intrLock); 1165 return (DDI_INTR_UNCLAIMED); 1166 } 1167 1168 static int 1169 vmxnet3_kstat_update(kstat_t *ksp, int rw) 1170 { 1171 vmxnet3_softc_t *dp = ksp->ks_private; 1172 vmxnet3_kstats_t *statp = ksp->ks_data; 1173 1174 if (rw == KSTAT_WRITE) 1175 return (EACCES); 1176 1177 statp->reset_count.value.ul = dp->reset_count; 1178 statp->tx_pullup_needed.value.ul = dp->tx_pullup_needed; 1179 statp->tx_ring_full.value.ul = dp->tx_ring_full; 1180 statp->rx_alloc_buf.value.ul = dp->rx_alloc_buf; 1181 1182 return (0); 1183 } 1184 1185 static int 1186 vmxnet3_kstat_init(vmxnet3_softc_t *dp) 1187 { 1188 vmxnet3_kstats_t *statp; 1189 1190 dp->devKstats = kstat_create(VMXNET3_MODNAME, dp->instance, 1191 "statistics", "dev", KSTAT_TYPE_NAMED, 1192 sizeof (vmxnet3_kstats_t) / sizeof (kstat_named_t), 0); 1193 if (dp->devKstats == NULL) 1194 return (DDI_FAILURE); 1195 1196 dp->devKstats->ks_update = vmxnet3_kstat_update; 1197 dp->devKstats->ks_private = dp; 1198 1199 statp = dp->devKstats->ks_data; 1200 1201 kstat_named_init(&statp->reset_count, "reset_count", KSTAT_DATA_ULONG); 1202 kstat_named_init(&statp->tx_pullup_needed, "tx_pullup_needed", 1203 KSTAT_DATA_ULONG); 1204 kstat_named_init(&statp->tx_ring_full, "tx_ring_full", 1205 KSTAT_DATA_ULONG); 1206 kstat_named_init(&statp->rx_alloc_buf, "rx_alloc_buf", 1207 KSTAT_DATA_ULONG); 1208 1209 kstat_install(dp->devKstats); 1210 1211 return (DDI_SUCCESS); 1212 } 1213 1214 /* 1215 * Probe and attach a vmxnet3 instance to the stack. 1216 * 1217 * Returns: 1218 * DDI_SUCCESS or DDI_FAILURE. 1219 */ 1220 static int 1221 vmxnet3_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1222 { 1223 vmxnet3_softc_t *dp; 1224 mac_register_t *macr; 1225 uint16_t vendorId, devId, ret16; 1226 uint32_t ret32; 1227 int ret, err; 1228 uint_t uret; 1229 1230 if (cmd != DDI_ATTACH) { 1231 goto error; 1232 } 1233 1234 /* 1235 * Allocate the soft state 1236 */ 1237 dp = kmem_zalloc(sizeof (vmxnet3_softc_t), KM_SLEEP); 1238 ASSERT(dp); 1239 1240 dp->dip = dip; 1241 dp->instance = ddi_get_instance(dip); 1242 dp->cur_mtu = ETHERMTU; 1243 dp->allow_jumbo = B_TRUE; 1244 1245 VMXNET3_DEBUG(dp, 1, "attach()\n"); 1246 1247 ddi_set_driver_private(dip, dp); 1248 1249 /* 1250 * Get access to the PCI bus configuration space 1251 */ 1252 if (pci_config_setup(dip, &dp->pciHandle) != DDI_SUCCESS) { 1253 VMXNET3_WARN(dp, "pci_config_setup() failed\n"); 1254 goto error_soft_state; 1255 } 1256 1257 /* 1258 * Make sure the chip is a vmxnet3 device 1259 */ 1260 vendorId = pci_config_get16(dp->pciHandle, PCI_CONF_VENID); 1261 devId = pci_config_get16(dp->pciHandle, PCI_CONF_DEVID); 1262 if (vendorId != PCI_VENDOR_ID_VMWARE || 1263 devId != PCI_DEVICE_ID_VMWARE_VMXNET3) { 1264 VMXNET3_WARN(dp, "wrong PCI venid/devid (0x%x, 0x%x)\n", 1265 vendorId, devId); 1266 goto error_pci_config; 1267 } 1268 1269 /* 1270 * Make sure we can access the registers through the I/O space 1271 */ 1272 ret16 = pci_config_get16(dp->pciHandle, PCI_CONF_COMM); 1273 ret16 |= PCI_COMM_IO | PCI_COMM_ME; 1274 pci_config_put16(dp->pciHandle, PCI_CONF_COMM, ret16); 1275 1276 /* 1277 * Map the I/O space in memory 1278 */ 1279 if (ddi_regs_map_setup(dip, 1, &dp->bar0, 0, 0, &vmxnet3_dev_attr, 1280 &dp->bar0Handle) != DDI_SUCCESS) { 1281 VMXNET3_WARN(dp, "ddi_regs_map_setup() for BAR0 failed\n"); 1282 goto error_pci_config; 1283 } 1284 1285 if (ddi_regs_map_setup(dip, 2, &dp->bar1, 0, 0, &vmxnet3_dev_attr, 1286 &dp->bar1Handle) != DDI_SUCCESS) { 1287 VMXNET3_WARN(dp, "ddi_regs_map_setup() for BAR1 failed\n"); 1288 goto error_regs_map_0; 1289 } 1290 1291 /* 1292 * Check the version number of the virtual device 1293 */ 1294 if (VMXNET3_BAR1_GET32(dp, VMXNET3_REG_VRRS) & 1) { 1295 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_VRRS, 1); 1296 } else { 1297 VMXNET3_WARN(dp, "incompatible h/w version\n"); 1298 goto error_regs_map_1; 1299 } 1300 1301 if (VMXNET3_BAR1_GET32(dp, VMXNET3_REG_UVRS) & 1) { 1302 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_UVRS, 1); 1303 } else { 1304 VMXNET3_WARN(dp, "incompatible upt version\n"); 1305 goto error_regs_map_1; 1306 } 1307 1308 if (vmxnet3_kstat_init(dp) != DDI_SUCCESS) { 1309 VMXNET3_WARN(dp, "unable to initialize kstats"); 1310 goto error_regs_map_1; 1311 } 1312 1313 /* 1314 * Read the MAC address from the device 1315 */ 1316 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_MACL); 1317 *((uint32_t *)(dp->macaddr + 0)) = ret32; 1318 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_MACH); 1319 *((uint16_t *)(dp->macaddr + 4)) = ret32; 1320 1321 /* 1322 * Register with the MAC framework 1323 */ 1324 if (!(macr = mac_alloc(MAC_VERSION))) { 1325 VMXNET3_WARN(dp, "mac_alloc() failed\n"); 1326 goto error_kstat; 1327 } 1328 1329 macr->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 1330 macr->m_driver = dp; 1331 macr->m_dip = dip; 1332 macr->m_instance = 0; 1333 macr->m_src_addr = dp->macaddr; 1334 macr->m_dst_addr = NULL; 1335 macr->m_callbacks = &vmxnet3_mac_callbacks; 1336 macr->m_min_sdu = 0; 1337 macr->m_max_sdu = ETHERMTU; 1338 macr->m_margin = VLAN_TAGSZ; 1339 macr->m_pdata = NULL; 1340 macr->m_pdata_size = 0; 1341 1342 ret = mac_register(macr, &dp->mac); 1343 mac_free(macr); 1344 if (ret != DDI_SUCCESS) { 1345 VMXNET3_WARN(dp, "mac_register() failed\n"); 1346 goto error_kstat; 1347 } 1348 1349 /* 1350 * Register the interrupt(s) in this order of preference: 1351 * MSI-X, MSI, INTx 1352 */ 1353 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); 1354 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD); 1355 switch (ret32 & 0x3) { 1356 case VMXNET3_IT_AUTO: 1357 case VMXNET3_IT_MSIX: 1358 dp->intrType = DDI_INTR_TYPE_MSIX; 1359 err = ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1, 1360 &ret, DDI_INTR_ALLOC_STRICT); 1361 if (err == DDI_SUCCESS) 1362 break; 1363 VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_MSIX failed, err:%d\n", 1364 err); 1365 /* FALLTHROUGH */ 1366 case VMXNET3_IT_MSI: 1367 dp->intrType = DDI_INTR_TYPE_MSI; 1368 if (ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1, 1369 &ret, DDI_INTR_ALLOC_STRICT) == DDI_SUCCESS) 1370 break; 1371 VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_MSI failed\n"); 1372 /* FALLTHROUGH */ 1373 case VMXNET3_IT_INTX: 1374 dp->intrType = DDI_INTR_TYPE_FIXED; 1375 if (ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1, 1376 &ret, DDI_INTR_ALLOC_STRICT) == DDI_SUCCESS) { 1377 break; 1378 } 1379 VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_INTX failed\n"); 1380 /* FALLTHROUGH */ 1381 default: 1382 VMXNET3_WARN(dp, "ddi_intr_alloc() failed\n"); 1383 goto error_mac; 1384 } 1385 dp->intrMaskMode = (ret32 >> 2) & 0x3; 1386 if (dp->intrMaskMode == VMXNET3_IMM_LAZY) { 1387 VMXNET3_WARN(dp, "Lazy masking is not supported\n"); 1388 goto error_intr; 1389 } 1390 1391 if (ddi_intr_get_pri(dp->intrHandle, &uret) != DDI_SUCCESS) { 1392 VMXNET3_WARN(dp, "ddi_intr_get_pri() failed\n"); 1393 goto error_intr; 1394 } 1395 1396 VMXNET3_DEBUG(dp, 2, "intrType=0x%x, intrMaskMode=0x%x, intrPrio=%u\n", 1397 dp->intrType, dp->intrMaskMode, uret); 1398 1399 /* 1400 * Create a task queue to reset the device if it wedges. 1401 */ 1402 dp->resetTask = ddi_taskq_create(dip, "vmxnet3_reset_task", 1, 1403 TASKQ_DEFAULTPRI, 0); 1404 if (!dp->resetTask) { 1405 VMXNET3_WARN(dp, "ddi_taskq_create() failed()\n"); 1406 goto error_intr; 1407 } 1408 1409 /* 1410 * Initialize our mutexes now that we know the interrupt priority 1411 * This _must_ be done before ddi_intr_enable() 1412 */ 1413 mutex_init(&dp->intrLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret)); 1414 mutex_init(&dp->txLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret)); 1415 mutex_init(&dp->rxPoolLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret)); 1416 1417 if (ddi_intr_add_handler(dp->intrHandle, vmxnet3_intr, 1418 dp, NULL) != DDI_SUCCESS) { 1419 VMXNET3_WARN(dp, "ddi_intr_add_handler() failed\n"); 1420 goto error_mutexes; 1421 } 1422 1423 err = ddi_intr_get_cap(dp->intrHandle, &dp->intrCap); 1424 if (err != DDI_SUCCESS) { 1425 VMXNET3_WARN(dp, "ddi_intr_get_cap() failed %d", err); 1426 goto error_intr_handler; 1427 } 1428 1429 if (dp->intrCap & DDI_INTR_FLAG_BLOCK) { 1430 err = ddi_intr_block_enable(&dp->intrHandle, 1); 1431 if (err != DDI_SUCCESS) { 1432 VMXNET3_WARN(dp, "ddi_intr_block_enable() failed, " 1433 "err:%d\n", err); 1434 goto error_intr_handler; 1435 } 1436 } else { 1437 err = ddi_intr_enable(dp->intrHandle); 1438 if ((err != DDI_SUCCESS)) { 1439 VMXNET3_WARN(dp, "ddi_intr_enable() failed, err:%d\n", 1440 err); 1441 goto error_intr_handler; 1442 } 1443 } 1444 1445 return (DDI_SUCCESS); 1446 1447 error_intr_handler: 1448 (void) ddi_intr_remove_handler(dp->intrHandle); 1449 error_mutexes: 1450 mutex_destroy(&dp->rxPoolLock); 1451 mutex_destroy(&dp->txLock); 1452 mutex_destroy(&dp->intrLock); 1453 ddi_taskq_destroy(dp->resetTask); 1454 error_intr: 1455 (void) ddi_intr_free(dp->intrHandle); 1456 error_mac: 1457 (void) mac_unregister(dp->mac); 1458 error_kstat: 1459 kstat_delete(dp->devKstats); 1460 error_regs_map_1: 1461 ddi_regs_map_free(&dp->bar1Handle); 1462 error_regs_map_0: 1463 ddi_regs_map_free(&dp->bar0Handle); 1464 error_pci_config: 1465 pci_config_teardown(&dp->pciHandle); 1466 error_soft_state: 1467 kmem_free(dp, sizeof (vmxnet3_softc_t)); 1468 error: 1469 return (DDI_FAILURE); 1470 } 1471 1472 /* 1473 * Detach a vmxnet3 instance from the stack. 1474 * 1475 * Returns: 1476 * DDI_SUCCESS or DDI_FAILURE. 1477 */ 1478 static int 1479 vmxnet3_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1480 { 1481 vmxnet3_softc_t *dp = ddi_get_driver_private(dip); 1482 unsigned int retries = 0; 1483 int ret; 1484 1485 VMXNET3_DEBUG(dp, 1, "detach()\n"); 1486 1487 if (cmd != DDI_DETACH) { 1488 return (DDI_FAILURE); 1489 } 1490 1491 while (dp->rxNumBufs) { 1492 if (retries++ < 10) { 1493 VMXNET3_WARN(dp, "rx pending (%u), waiting 1 second\n", 1494 dp->rxNumBufs); 1495 delay(drv_usectohz(1000000)); 1496 } else { 1497 VMXNET3_WARN(dp, "giving up\n"); 1498 return (DDI_FAILURE); 1499 } 1500 } 1501 1502 if (dp->intrCap & DDI_INTR_FLAG_BLOCK) { 1503 ret = ddi_intr_block_disable(&dp->intrHandle, 1); 1504 } else { 1505 ret = ddi_intr_disable(dp->intrHandle); 1506 } 1507 if (ret != DDI_SUCCESS) { 1508 VMXNET3_WARN(dp, "unable to disable interrupts"); 1509 return (DDI_FAILURE); 1510 } 1511 if (ddi_intr_remove_handler(dp->intrHandle) != DDI_SUCCESS) { 1512 VMXNET3_WARN(dp, "unable to remove interrupt handler"); 1513 return (DDI_FAILURE); 1514 } 1515 (void) ddi_intr_free(dp->intrHandle); 1516 1517 VERIFY(mac_unregister(dp->mac) == 0); 1518 1519 kstat_delete(dp->devKstats); 1520 1521 if (dp->mfTable.buf) { 1522 vmxnet3_free_dma_mem(&dp->mfTable); 1523 } 1524 1525 mutex_destroy(&dp->rxPoolLock); 1526 mutex_destroy(&dp->txLock); 1527 mutex_destroy(&dp->intrLock); 1528 ddi_taskq_destroy(dp->resetTask); 1529 1530 ddi_regs_map_free(&dp->bar1Handle); 1531 ddi_regs_map_free(&dp->bar0Handle); 1532 pci_config_teardown(&dp->pciHandle); 1533 1534 kmem_free(dp, sizeof (vmxnet3_softc_t)); 1535 1536 return (DDI_SUCCESS); 1537 } 1538 1539 /* 1540 * Structures used by the module loader 1541 */ 1542 1543 #define VMXNET3_IDENT "VMware Ethernet v3 " VMXNET3_DRIVER_VERSION_STRING 1544 1545 DDI_DEFINE_STREAM_OPS( 1546 vmxnet3_dev_ops, 1547 nulldev, 1548 nulldev, 1549 vmxnet3_attach, 1550 vmxnet3_detach, 1551 nodev, 1552 NULL, 1553 D_NEW | D_MP, 1554 NULL, 1555 ddi_quiesce_not_supported); 1556 1557 static struct modldrv vmxnet3_modldrv = { 1558 &mod_driverops, /* drv_modops */ 1559 VMXNET3_IDENT, /* drv_linkinfo */ 1560 &vmxnet3_dev_ops /* drv_dev_ops */ 1561 }; 1562 1563 static struct modlinkage vmxnet3_modlinkage = { 1564 MODREV_1, /* ml_rev */ 1565 { &vmxnet3_modldrv, NULL } /* ml_linkage */ 1566 }; 1567 1568 /* Module load entry point */ 1569 int 1570 _init(void) 1571 { 1572 int ret; 1573 1574 mac_init_ops(&vmxnet3_dev_ops, VMXNET3_MODNAME); 1575 ret = mod_install(&vmxnet3_modlinkage); 1576 if (ret != DDI_SUCCESS) { 1577 mac_fini_ops(&vmxnet3_dev_ops); 1578 } 1579 1580 return (ret); 1581 } 1582 1583 /* Module unload entry point */ 1584 int 1585 _fini(void) 1586 { 1587 int ret; 1588 1589 ret = mod_remove(&vmxnet3_modlinkage); 1590 if (ret == DDI_SUCCESS) { 1591 mac_fini_ops(&vmxnet3_dev_ops); 1592 } 1593 1594 return (ret); 1595 } 1596 1597 /* Module info entry point */ 1598 int 1599 _info(struct modinfo *modinfop) 1600 { 1601 return (mod_info(&vmxnet3_modlinkage, modinfop)); 1602 } 1603 1604 void 1605 vmxnet3_log(int level, vmxnet3_softc_t *dp, char *fmt, ...) 1606 { 1607 dev_err(dp->dip, level, fmt); 1608 } 1609