1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved. 14 * Copyright 2019 Joyent, Inc. 15 * Copyright 2017 Tegile Systems, Inc. All rights reserved. 16 * Copyright 2020 RackTop Systems, Inc. 17 * Copyright 2020 Ryan Zezeski 18 */ 19 20 /* 21 * i40e - Intel 10/40 Gb Ethernet driver 22 * 23 * The i40e driver is the main software device driver for the Intel 40 Gb family 24 * of devices. Note that these devices come in many flavors with both 40 GbE 25 * ports and 10 GbE ports. This device is the successor to the 82599 family of 26 * devices (ixgbe). 27 * 28 * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE 29 * devices defined in the XL710 controller (previously known as Fortville) are a 30 * rather different beast and have a small switch embedded inside of them. In 31 * addition, the way that most of the programming is done has been overhauled. 32 * As opposed to just using PCIe memory mapped registers, it also has an 33 * administrative queue which is used to communicate with firmware running on 34 * the chip. 35 * 36 * Each physical function in the hardware shows up as a device that this driver 37 * will bind to. The hardware splits many resources evenly across all of the 38 * physical functions present on the device, while other resources are instead 39 * shared across the entire card and its up to the device driver to 40 * intelligently partition them. 41 * 42 * ------------ 43 * Organization 44 * ------------ 45 * 46 * This driver is made up of several files which have their own theory 47 * statements spread across them. We'll touch on the high level purpose of each 48 * file here, and then we'll get into more discussion on how the device is 49 * generally modelled with respect to the interfaces in illumos. 50 * 51 * i40e_gld.c: This file contains all of the bindings to MAC and the networking 52 * stack. 53 * 54 * i40e_intr.c: This file contains all of the interrupt service routines and 55 * contains logic to enable and disable interrupts on the hardware. 56 * It also contains the logic to map hardware resources such as the 57 * rings to and from interrupts and controls their ability to fire. 58 * 59 * There is a big theory statement on interrupts present there. 60 * 61 * i40e_main.c: The file that you're currently in. It interfaces with the 62 * traditional OS DDI interfaces and is in charge of configuring 63 * the device. 64 * 65 * i40e_osdep.[ch]: These files contain interfaces and definitions needed to 66 * work with Intel's common code for the device. 67 * 68 * i40e_stats.c: This file contains the general work and logic around our 69 * kstats. A theory statement on their organization and use of the 70 * hardware exists there. 71 * 72 * i40e_sw.h: This header file contains all of the primary structure definitions 73 * and constants that are used across the entire driver. 74 * 75 * i40e_transceiver.c: This file contains all of the logic for sending and 76 * receiving data. It contains all of the ring and DMA 77 * allocation logic, as well as, the actual interfaces to 78 * send and receive data. 79 * 80 * A big theory statement on ring management, descriptors, 81 * and how it ties into the OS is present there. 82 * 83 * -------------- 84 * General Design 85 * -------------- 86 * 87 * Before we go too far into the general way we've laid out data structures and 88 * the like, it's worth taking some time to explain how the hardware is 89 * organized. This organization informs a lot of how we do things at this time 90 * in the driver. 91 * 92 * Each physical device consists of a number of one or more ports, which are 93 * considered physical functions in the PCI sense and thus each get enumerated 94 * by the system, resulting in an instance being created and attached to. While 95 * there are many resources that are unique to each physical function eg. 96 * instance of the device, there are many that are shared across all of them. 97 * Several resources have an amount reserved for each Virtual Station Interface 98 * (VSI) and then a static pool of resources, available for all functions on the 99 * card. 100 * 101 * The most important resource in hardware are its transmit and receive queue 102 * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3 103 * parlance. There are a set number of these on each device; however, they are 104 * statically partitioned among all of the different physical functions. 105 * 106 * 'Fortville' (the code name for this device family) is basically a switch. To 107 * map MAC addresses and other things to queues, we end up having to create 108 * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct 109 * traffic to a queue. A VSI owns a collection of queues and has a series of 110 * forwarding rules that point to it. One way to think of this is to treat it 111 * like MAC does a VNIC. When MAC refers to a group, a collection of rings and 112 * classification resources, that is a VSI in i40e. 113 * 114 * The sets of VSIs is shared across the entire device, though there may be some 115 * amount that are reserved to each PF. Because the GLDv3 does not let us change 116 * the number of groups dynamically, we instead statically divide this amount 117 * evenly between all the functions that exist. In addition, we have the same 118 * problem with the mac address forwarding rules. There are a static number that 119 * exist shared across all the functions. 120 * 121 * To handle both of these resources, what we end up doing is going through and 122 * determining which functions belong to the same device. Nominally one might do 123 * this by having a nexus driver; however, a prime requirement for a nexus 124 * driver is identifying the various children and activating them. While it is 125 * possible to get this information from NVRAM, we would end up duplicating a 126 * lot of the PCI enumeration logic. Really, at the end of the day, the device 127 * doesn't give us the traditional identification properties we want from a 128 * nexus driver. 129 * 130 * Instead, we rely on some properties that are guaranteed to be unique. While 131 * it might be tempting to leverage the PBA or serial number of the device from 132 * NVRAM, there is nothing that says that two devices can't be mis-programmed to 133 * have the same values in NVRAM. Instead, we uniquely identify a group of 134 * functions based on their parent in the /devices tree, their PCI bus and PCI 135 * function identifiers. Using either on their own may not be sufficient. 136 * 137 * For each unique PCI device that we encounter, we'll create a i40e_device_t. 138 * From there, because we don't have a good way to tell the GLDv3 about sharing 139 * resources between everything, we'll end up just dividing the resources 140 * evenly between all of the functions. Longer term, if we don't have to declare 141 * to the GLDv3 that these resources are shared, then we'll maintain a pool and 142 * have each PF allocate from the pool in the device, thus if only two of four 143 * ports are being used, for example, then all of the resources can still be 144 * used. 145 * 146 * ------------------------------------------- 147 * Transmit and Receive Queue Pair Allocations 148 * ------------------------------------------- 149 * 150 * NVRAM ends up assigning each PF its own share of the transmit and receive LAN 151 * queue pairs, we have no way of modifying it, only observing it. From there, 152 * it's up to us to map these queues to VSIs and VFs. Since we don't support any 153 * VFs at this time, we only focus on assignments to VSIs. 154 * 155 * At the moment, we used a static mapping of transmit/receive queue pairs to a 156 * given VSI (eg. rings to a group). Though in the fullness of time, we want to 157 * make this something which is fully dynamic and take advantage of documented, 158 * but not yet available functionality for adding filters based on VXLAN and 159 * other encapsulation technologies. 160 * 161 * ------------------------------------- 162 * Broadcast, Multicast, and Promiscuous 163 * ------------------------------------- 164 * 165 * As part of the GLDv3, we need to make sure that we can handle receiving 166 * broadcast and multicast traffic. As well as enabling promiscuous mode when 167 * requested. GLDv3 requires that all broadcast and multicast traffic be 168 * retrieved by the default group, eg. the first one. This is the same thing as 169 * the default VSI. 170 * 171 * To receieve broadcast traffic, we enable it through the admin queue, rather 172 * than use one of our filters for it. For multicast traffic, we reserve a 173 * certain number of the hash filters and assign them to a given PF. When we 174 * exceed those, we then switch to using promiscuous mode for multicast traffic. 175 * 176 * More specifically, once we exceed the number of filters (indicated because 177 * the i40e_t`i40e_resources.ifr_nmcastfilt == 178 * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle 179 * promiscuous mode. If promiscuous mode is toggled then we keep track of the 180 * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count. 181 * That will stay enabled until that count reaches zero indicating that we have 182 * only added multicast addresses that we have a corresponding entry for. 183 * 184 * Because MAC itself wants to toggle promiscuous mode, which includes both 185 * unicast and multicast traffic, we go through and keep track of that 186 * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on 187 * member. 188 * 189 * -------------- 190 * VSI Management 191 * -------------- 192 * 193 * The PFs share 384 VSIs. The firmware creates one VSI per PF by default. 194 * During chip start we retrieve the SEID of this VSI and assign it as the 195 * default VSI for our VEB (one VEB per PF). We then add additional VSIs to 196 * the VEB up to the determined number of rx groups: i40e_t`i40e_num_rx_groups. 197 * We currently cap this number to I40E_GROUP_MAX to a) make sure all PFs can 198 * allocate the same number of VSIs, and b) to keep the interrupt multiplexing 199 * under control. In the future, when we improve the interrupt allocation, we 200 * may want to revisit this cap to make better use of the available VSIs. The 201 * VSI allocation and configuration can be found in i40e_chip_start(). 202 * 203 * ---------------- 204 * Structure Layout 205 * ---------------- 206 * 207 * The following images relates the core data structures together. The primary 208 * structure in the system is the i40e_t. It itself contains multiple rings, 209 * i40e_trqpair_t's which contain the various transmit and receive data. The 210 * receive data is stored outside of the i40e_trqpair_t and instead in the 211 * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps 212 * track of per-physical device state. Finally, for every active descriptor, 213 * there is a corresponding control block, which is where the 214 * i40e_rx_control_block_t and the i40e_tx_control_block_t come from. 215 * 216 * +-----------------------+ +-----------------------+ 217 * | Global i40e_t list | | Global Device list | 218 * | | +--| | 219 * | i40e_glist | | | i40e_dlist | 220 * +-----------------------+ | +-----------------------+ 221 * | v 222 * | +------------------------+ +-----------------------+ 223 * | | Device-wide Structure |----->| Device-wide Structure |--> ... 224 * | | i40e_device_t | | i40e_device_t | 225 * | | | +-----------------------+ 226 * | | dev_info_t * ------+--> Parent in devices tree. 227 * | | uint_t ------+--> PCI bus number 228 * | | uint_t ------+--> PCI device number 229 * | | uint_t ------+--> Number of functions 230 * | | i40e_switch_rsrcs_t ---+--> Captured total switch resources 231 * | | list_t ------+-------------+ 232 * | +------------------------+ | 233 * | ^ | 234 * | +--------+ | 235 * | | v 236 * | +---------------------------+ | +-------------------+ 237 * +->| GLDv3 Device, per PF |-----|-->| GLDv3 Device (PF) |--> ... 238 * | i40e_t | | | i40e_t | 239 * | **Primary Structure** | | +-------------------+ 240 * | | | 241 * | i40e_device_t * --+-----+ 242 * | i40e_state_t --+---> Device State 243 * | i40e_hw_t --+---> Intel common code structure 244 * | mac_handle_t --+---> GLDv3 handle to MAC 245 * | ddi_periodic_t --+---> Link activity timer 246 * | i40e_vsi_t * --+---> Array of VSIs 247 * | i40e_func_rsrc_t --+---> Available hardware resources 248 * | i40e_switch_rsrc_t * --+---> Switch resource snapshot 249 * | i40e_sdu --+---> Current MTU 250 * | i40e_frame_max --+---> Current HW frame size 251 * | i40e_uaddr_t * --+---> Array of assigned unicast MACs 252 * | i40e_maddr_t * --+---> Array of assigned multicast MACs 253 * | i40e_mcast_promisccount --+---> Active multicast state 254 * | i40e_promisc_on --+---> Current promiscuous mode state 255 * | uint_t --+---> Number of transmit/receive pairs 256 * | i40e_rx_group_t * --+---> Array of Rx groups 257 * | kstat_t * --+---> PF kstats 258 * | i40e_pf_stats_t --+---> PF kstat backing data 259 * | i40e_trqpair_t * --+---------+ 260 * +---------------------------+ | 261 * | 262 * v 263 * +-------------------------------+ +-----------------------------+ 264 * | Transmit/Receive Queue Pair |-------| Transmit/Receive Queue Pair |->... 265 * | i40e_trqpair_t | | i40e_trqpair_t | 266 * + Ring Data Structure | +-----------------------------+ 267 * | | 268 * | mac_ring_handle_t +--> MAC RX ring handle 269 * | mac_ring_handle_t +--> MAC TX ring handle 270 * | i40e_rxq_stat_t --+--> RX Queue stats 271 * | i40e_txq_stat_t --+--> TX Queue stats 272 * | uint32_t (tx ring size) +--> TX Ring Size 273 * | uint32_t (tx free list size) +--> TX Free List Size 274 * | i40e_dma_buffer_t --------+--> TX Descriptor ring DMA 275 * | i40e_tx_desc_t * --------+--> TX descriptor ring 276 * | volatile unt32_t * +--> TX Write back head 277 * | uint32_t -------+--> TX ring head 278 * | uint32_t -------+--> TX ring tail 279 * | uint32_t -------+--> Num TX desc free 280 * | i40e_tx_control_block_t * --+--> TX control block array ---+ 281 * | i40e_tx_control_block_t ** --+--> TCB work list ----+ 282 * | i40e_tx_control_block_t ** --+--> TCB free list ---+ 283 * | uint32_t -------+--> Free TCB count | 284 * | i40e_rx_data_t * -------+--+ v 285 * +-------------------------------+ | +---------------------------+ 286 * | | Per-TX Frame Metadata | 287 * | | i40e_tx_control_block_t | 288 * +--------------------+ | | 289 * | mblk to transmit <--+--- mblk_t * | 290 * | type of transmit <--+--- i40e_tx_type_t | 291 * | TX DMA handle <--+--- ddi_dma_handle_t | 292 * v TX DMA buffer <--+--- i40e_dma_buffer_t | 293 * +------------------------------+ +---------------------------+ 294 * | Core Receive Data | 295 * | i40e_rx_data_t | 296 * | | 297 * | i40e_dma_buffer_t --+--> RX descriptor DMA Data 298 * | i40e_rx_desc_t --+--> RX descriptor ring 299 * | uint32_t --+--> Next free desc. 300 * | i40e_rx_control_block_t * --+--> RX Control Block Array ---+ 301 * | i40e_rx_control_block_t ** --+--> RCB work list ---+ 302 * | i40e_rx_control_block_t ** --+--> RCB free list ---+ 303 * +------------------------------+ | 304 * ^ | 305 * | +---------------------------+ | 306 * | | Per-RX Frame Metadata |<---------------+ 307 * | | i40e_rx_control_block_t | 308 * | | | 309 * | | mblk_t * ----+--> Received mblk_t data 310 * | | uint32_t ----+--> Reference count 311 * | | i40e_dma_buffer_t ----+--> Receive data DMA info 312 * | | frtn_t ----+--> mblk free function info 313 * +-----+-- i40e_rx_data_t * | 314 * +---------------------------+ 315 * 316 * ------------- 317 * Lock Ordering 318 * ------------- 319 * 320 * In order to ensure that we don't deadlock, the following represents the 321 * lock order being used. When grabbing locks, follow the following order. Lower 322 * numbers are more important. Thus, the i40e_glock which is number 0, must be 323 * taken before any other locks in the driver. On the other hand, the 324 * i40e_t`i40e_stat_lock, has the highest number because it's the least 325 * important lock. Note, that just because one lock is higher than another does 326 * not mean that all intermediary locks are required. 327 * 328 * 0) i40e_glock 329 * 1) i40e_t`i40e_general_lock 330 * 331 * 2) i40e_trqpair_t`itrq_rx_lock 332 * 3) i40e_trqpair_t`itrq_tx_lock 333 * 4) i40e_t`i40e_rx_pending_lock 334 * 5) i40e_trqpair_t`itrq_tcb_lock 335 * 336 * 6) i40e_t`i40e_stat_lock 337 * 338 * Rules and expectations: 339 * 340 * 1) A thread holding locks belong to one PF should not hold locks belonging to 341 * a second. If for some reason this becomes necessary, locks should be grabbed 342 * based on the list order in the i40e_device_t, which implies that the 343 * i40e_glock is held. 344 * 345 * 2) When grabbing locks between multiple transmit and receive queues, the 346 * locks for the lowest number transmit/receive queue should be grabbed first. 347 * 348 * 3) When grabbing both the transmit and receive lock for a given queue, always 349 * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock. 350 * 351 * 4) The following pairs of locks are not expected to be held at the same time: 352 * 353 * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock 354 * 355 * ----------- 356 * Future Work 357 * ----------- 358 * 359 * At the moment the i40e_t driver is rather bare bones, allowing us to start 360 * getting data flowing and folks using it while we develop additional features. 361 * While bugs have been filed to cover this future work, the following gives an 362 * overview of expected work: 363 * 364 * o DMA binding and breaking up the locking in ring recycling. 365 * o Enhanced detection of device errors 366 * o Participation in IRM 367 * o FMA device reset 368 * o Stall detection, temperature error detection, etc. 369 * o More dynamic resource pools 370 */ 371 372 #include "i40e_sw.h" 373 374 static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.3"; 375 376 /* 377 * The i40e_glock primarily protects the lists below and the i40e_device_t 378 * structures. 379 */ 380 static kmutex_t i40e_glock; 381 static list_t i40e_glist; 382 static list_t i40e_dlist; 383 384 /* 385 * Access attributes for register mapping. 386 */ 387 static ddi_device_acc_attr_t i40e_regs_acc_attr = { 388 DDI_DEVICE_ATTR_V1, 389 DDI_STRUCTURE_LE_ACC, 390 DDI_STRICTORDER_ACC, 391 DDI_FLAGERR_ACC 392 }; 393 394 /* 395 * Logging function for this driver. 396 */ 397 static void 398 i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt, 399 va_list ap) 400 { 401 char buf[1024]; 402 403 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 404 405 if (i40e == NULL) { 406 cmn_err(level, (console) ? "%s: %s" : "!%s: %s", 407 I40E_MODULE_NAME, buf); 408 } else { 409 dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s", 410 buf); 411 } 412 } 413 414 /* 415 * Because there's the stupid trailing-comma problem with the C preprocessor 416 * and variable arguments, I need to instantiate these. Pardon the redundant 417 * code. 418 */ 419 /*PRINTFLIKE2*/ 420 void 421 i40e_error(i40e_t *i40e, const char *fmt, ...) 422 { 423 va_list ap; 424 425 va_start(ap, fmt); 426 i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap); 427 va_end(ap); 428 } 429 430 /*PRINTFLIKE2*/ 431 void 432 i40e_log(i40e_t *i40e, const char *fmt, ...) 433 { 434 va_list ap; 435 436 va_start(ap, fmt); 437 i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap); 438 va_end(ap); 439 } 440 441 /*PRINTFLIKE2*/ 442 void 443 i40e_notice(i40e_t *i40e, const char *fmt, ...) 444 { 445 va_list ap; 446 447 va_start(ap, fmt); 448 i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap); 449 va_end(ap); 450 } 451 452 /* 453 * Various parts of the driver need to know if the controller is from the X722 454 * family, which has a few additional capabilities and different programming 455 * means. We don't consider virtual functions as part of this as they are quite 456 * different and will require substantially more work. 457 */ 458 static boolean_t 459 i40e_is_x722(i40e_t *i40e) 460 { 461 return (i40e->i40e_hw_space.mac.type == I40E_MAC_X722); 462 } 463 464 static void 465 i40e_device_rele(i40e_t *i40e) 466 { 467 i40e_device_t *idp = i40e->i40e_device; 468 469 if (idp == NULL) 470 return; 471 472 mutex_enter(&i40e_glock); 473 VERIFY(idp->id_nreg > 0); 474 list_remove(&idp->id_i40e_list, i40e); 475 idp->id_nreg--; 476 if (idp->id_nreg == 0) { 477 list_remove(&i40e_dlist, idp); 478 list_destroy(&idp->id_i40e_list); 479 kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) * 480 idp->id_rsrcs_alloc); 481 kmem_free(idp, sizeof (i40e_device_t)); 482 } 483 i40e->i40e_device = NULL; 484 mutex_exit(&i40e_glock); 485 } 486 487 static i40e_device_t * 488 i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device) 489 { 490 i40e_device_t *idp; 491 mutex_enter(&i40e_glock); 492 for (idp = list_head(&i40e_dlist); idp != NULL; 493 idp = list_next(&i40e_dlist, idp)) { 494 if (idp->id_parent == parent && idp->id_pci_bus == bus && 495 idp->id_pci_device == device) { 496 break; 497 } 498 } 499 500 if (idp != NULL) { 501 VERIFY(idp->id_nreg < idp->id_nfuncs); 502 idp->id_nreg++; 503 } else { 504 i40e_hw_t *hw = &i40e->i40e_hw_space; 505 ASSERT(hw->num_ports > 0); 506 ASSERT(hw->num_partitions > 0); 507 508 /* 509 * The Intel common code doesn't exactly keep the number of PCI 510 * functions. But it calculates it during discovery of 511 * partitions and ports. So what we do is undo the calculation 512 * that it does originally, as functions are evenly spread 513 * across ports in the rare case of partitions. 514 */ 515 idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP); 516 idp->id_parent = parent; 517 idp->id_pci_bus = bus; 518 idp->id_pci_device = device; 519 idp->id_nfuncs = hw->num_ports * hw->num_partitions; 520 idp->id_nreg = 1; 521 idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc; 522 idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual; 523 idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) * 524 idp->id_rsrcs_alloc, KM_SLEEP); 525 bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs, 526 sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc); 527 list_create(&idp->id_i40e_list, sizeof (i40e_t), 528 offsetof(i40e_t, i40e_dlink)); 529 530 list_insert_tail(&i40e_dlist, idp); 531 } 532 533 list_insert_tail(&idp->id_i40e_list, i40e); 534 mutex_exit(&i40e_glock); 535 536 return (idp); 537 } 538 539 static void 540 i40e_link_state_set(i40e_t *i40e, link_state_t state) 541 { 542 if (i40e->i40e_link_state == state) 543 return; 544 545 i40e->i40e_link_state = state; 546 mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state); 547 } 548 549 /* 550 * This is a basic link check routine. Mostly we're using this just to see 551 * if we can get any accurate information about the state of the link being 552 * up or down, as well as updating the link state, speed, etc. information. 553 */ 554 void 555 i40e_link_check(i40e_t *i40e) 556 { 557 i40e_hw_t *hw = &i40e->i40e_hw_space; 558 boolean_t ls; 559 int ret; 560 561 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 562 563 hw->phy.get_link_info = B_TRUE; 564 if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) { 565 i40e->i40e_s_link_status_errs++; 566 i40e->i40e_s_link_status_lasterr = ret; 567 return; 568 } 569 570 /* 571 * Firmware abstracts all of the mac and phy information for us, so we 572 * can use i40e_get_link_status to determine the current state. 573 */ 574 if (ls == B_TRUE) { 575 enum i40e_aq_link_speed speed; 576 577 speed = i40e_get_link_speed(hw); 578 579 /* 580 * Translate from an i40e value to a value in Mbits/s. 581 */ 582 switch (speed) { 583 case I40E_LINK_SPEED_100MB: 584 i40e->i40e_link_speed = 100; 585 break; 586 case I40E_LINK_SPEED_1GB: 587 i40e->i40e_link_speed = 1000; 588 break; 589 case I40E_LINK_SPEED_10GB: 590 i40e->i40e_link_speed = 10000; 591 break; 592 case I40E_LINK_SPEED_20GB: 593 i40e->i40e_link_speed = 20000; 594 break; 595 case I40E_LINK_SPEED_40GB: 596 i40e->i40e_link_speed = 40000; 597 break; 598 case I40E_LINK_SPEED_25GB: 599 i40e->i40e_link_speed = 25000; 600 break; 601 default: 602 i40e->i40e_link_speed = 0; 603 break; 604 } 605 606 /* 607 * At this time, hardware does not support half-duplex 608 * operation, hence why we don't ask the hardware about our 609 * current speed. 610 */ 611 i40e->i40e_link_duplex = LINK_DUPLEX_FULL; 612 i40e_link_state_set(i40e, LINK_STATE_UP); 613 } else { 614 i40e->i40e_link_speed = 0; 615 i40e->i40e_link_duplex = 0; 616 i40e_link_state_set(i40e, LINK_STATE_DOWN); 617 } 618 } 619 620 static void 621 i40e_rem_intrs(i40e_t *i40e) 622 { 623 int i, rc; 624 625 for (i = 0; i < i40e->i40e_intr_count; i++) { 626 rc = ddi_intr_free(i40e->i40e_intr_handles[i]); 627 if (rc != DDI_SUCCESS) { 628 i40e_log(i40e, "failed to free interrupt %d: %d", 629 i, rc); 630 } 631 } 632 633 kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size); 634 i40e->i40e_intr_handles = NULL; 635 } 636 637 static void 638 i40e_rem_intr_handlers(i40e_t *i40e) 639 { 640 int i, rc; 641 642 for (i = 0; i < i40e->i40e_intr_count; i++) { 643 rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]); 644 if (rc != DDI_SUCCESS) { 645 i40e_log(i40e, "failed to remove interrupt %d: %d", 646 i, rc); 647 } 648 } 649 } 650 651 /* 652 * illumos Fault Management Architecture (FMA) support. 653 */ 654 655 int 656 i40e_check_acc_handle(ddi_acc_handle_t handle) 657 { 658 ddi_fm_error_t de; 659 660 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 661 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 662 return (de.fme_status); 663 } 664 665 int 666 i40e_check_dma_handle(ddi_dma_handle_t handle) 667 { 668 ddi_fm_error_t de; 669 670 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 671 return (de.fme_status); 672 } 673 674 /* 675 * Fault service error handling callback function. 676 */ 677 /* ARGSUSED */ 678 static int 679 i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 680 { 681 pci_ereport_post(dip, err, NULL); 682 return (err->fme_status); 683 } 684 685 static void 686 i40e_fm_init(i40e_t *i40e) 687 { 688 ddi_iblock_cookie_t iblk; 689 690 i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, 691 i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable", 692 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 693 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 694 695 if (i40e->i40e_fm_capabilities < 0) { 696 i40e->i40e_fm_capabilities = 0; 697 } else if (i40e->i40e_fm_capabilities > 0xf) { 698 i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE | 699 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 700 DDI_FM_ERRCB_CAPABLE; 701 } 702 703 /* 704 * Only register with IO Fault Services if we have some capability 705 */ 706 if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 707 i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 708 } else { 709 i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 710 } 711 712 if (i40e->i40e_fm_capabilities) { 713 ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk); 714 715 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) || 716 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) { 717 pci_ereport_setup(i40e->i40e_dip); 718 } 719 720 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) { 721 ddi_fm_handler_register(i40e->i40e_dip, 722 i40e_fm_error_cb, (void*)i40e); 723 } 724 } 725 726 if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 727 i40e_init_dma_attrs(i40e, B_TRUE); 728 } else { 729 i40e_init_dma_attrs(i40e, B_FALSE); 730 } 731 } 732 733 static void 734 i40e_fm_fini(i40e_t *i40e) 735 { 736 if (i40e->i40e_fm_capabilities) { 737 738 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) || 739 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) 740 pci_ereport_teardown(i40e->i40e_dip); 741 742 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) 743 ddi_fm_handler_unregister(i40e->i40e_dip); 744 745 ddi_fm_fini(i40e->i40e_dip); 746 } 747 } 748 749 void 750 i40e_fm_ereport(i40e_t *i40e, char *detail) 751 { 752 uint64_t ena; 753 char buf[FM_MAX_CLASS]; 754 755 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 756 ena = fm_ena_generate(0, FM_ENA_FMT1); 757 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) { 758 ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP, 759 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 760 } 761 } 762 763 /* 764 * Here we're trying to set the SEID of the default VSI. In general, 765 * when we come through and look at this shortly after attach, we 766 * expect there to only be a single element present, which is the 767 * default VSI. Importantly, each PF seems to not see any other 768 * devices, in part because of the simple switch mode that we're 769 * using. If for some reason, we see more artifacts, we'll need to 770 * revisit what we're doing here. 771 */ 772 static boolean_t 773 i40e_set_def_vsi_seid(i40e_t *i40e) 774 { 775 i40e_hw_t *hw = &i40e->i40e_hw_space; 776 struct i40e_aqc_get_switch_config_resp *sw_config; 777 uint8_t aq_buf[I40E_AQ_LARGE_BUF]; 778 uint16_t next = 0; 779 int rc; 780 781 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 782 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 783 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next, 784 NULL); 785 if (rc != I40E_SUCCESS) { 786 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d", 787 rc, hw->aq.asq_last_status); 788 return (B_FALSE); 789 } 790 791 if (LE_16(sw_config->header.num_reported) != 1) { 792 i40e_error(i40e, "encountered multiple (%d) switching units " 793 "during attach, not proceeding", 794 LE_16(sw_config->header.num_reported)); 795 return (B_FALSE); 796 } 797 798 I40E_DEF_VSI_SEID(i40e) = sw_config->element[0].seid; 799 return (B_TRUE); 800 } 801 802 /* 803 * Get the SEID of the uplink MAC. 804 */ 805 static int 806 i40e_get_mac_seid(i40e_t *i40e) 807 { 808 i40e_hw_t *hw = &i40e->i40e_hw_space; 809 struct i40e_aqc_get_switch_config_resp *sw_config; 810 uint8_t aq_buf[I40E_AQ_LARGE_BUF]; 811 uint16_t next = 0; 812 int rc; 813 814 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 815 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 816 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next, 817 NULL); 818 if (rc != I40E_SUCCESS) { 819 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d", 820 rc, hw->aq.asq_last_status); 821 return (-1); 822 } 823 824 return (LE_16(sw_config->element[0].uplink_seid)); 825 } 826 827 /* 828 * We need to fill the i40e_hw_t structure with the capabilities of this PF. We 829 * must also provide the memory for it; however, we don't need to keep it around 830 * to the call to the common code. It takes it and parses it into an internal 831 * structure. 832 */ 833 static boolean_t 834 i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw) 835 { 836 struct i40e_aqc_list_capabilities_element_resp *buf; 837 int rc; 838 size_t len; 839 uint16_t needed; 840 int nelems = I40E_HW_CAP_DEFAULT; 841 842 len = nelems * sizeof (*buf); 843 844 for (;;) { 845 ASSERT(len > 0); 846 buf = kmem_alloc(len, KM_SLEEP); 847 rc = i40e_aq_discover_capabilities(hw, buf, len, 848 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 849 kmem_free(buf, len); 850 851 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM && 852 nelems == I40E_HW_CAP_DEFAULT) { 853 if (nelems == needed) { 854 i40e_error(i40e, "Capability discovery failed " 855 "due to byzantine common code"); 856 return (B_FALSE); 857 } 858 len = needed; 859 continue; 860 } else if (rc != I40E_SUCCESS || 861 hw->aq.asq_last_status != I40E_AQ_RC_OK) { 862 i40e_error(i40e, "Capability discovery failed: %d", rc); 863 return (B_FALSE); 864 } 865 866 break; 867 } 868 869 return (B_TRUE); 870 } 871 872 /* 873 * Obtain the switch's capabilities as seen by this PF and keep it around for 874 * our later use. 875 */ 876 static boolean_t 877 i40e_get_switch_resources(i40e_t *i40e) 878 { 879 i40e_hw_t *hw = &i40e->i40e_hw_space; 880 uint8_t cnt = 2; 881 uint8_t act; 882 size_t size; 883 i40e_switch_rsrc_t *buf; 884 885 for (;;) { 886 enum i40e_status_code ret; 887 size = cnt * sizeof (i40e_switch_rsrc_t); 888 ASSERT(size > 0); 889 if (size > UINT16_MAX) 890 return (B_FALSE); 891 buf = kmem_alloc(size, KM_SLEEP); 892 893 ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf, 894 cnt, NULL); 895 if (ret == I40E_ERR_ADMIN_QUEUE_ERROR && 896 hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) { 897 kmem_free(buf, size); 898 cnt += I40E_SWITCH_CAP_DEFAULT; 899 continue; 900 } else if (ret != I40E_SUCCESS) { 901 kmem_free(buf, size); 902 i40e_error(i40e, 903 "failed to retrieve switch statistics: %d", ret); 904 return (B_FALSE); 905 } 906 907 break; 908 } 909 910 i40e->i40e_switch_rsrc_alloc = cnt; 911 i40e->i40e_switch_rsrc_actual = act; 912 i40e->i40e_switch_rsrcs = buf; 913 914 return (B_TRUE); 915 } 916 917 static void 918 i40e_cleanup_resources(i40e_t *i40e) 919 { 920 if (i40e->i40e_uaddrs != NULL) { 921 kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) * 922 i40e->i40e_resources.ifr_nmacfilt); 923 i40e->i40e_uaddrs = NULL; 924 } 925 926 if (i40e->i40e_maddrs != NULL) { 927 kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) * 928 i40e->i40e_resources.ifr_nmcastfilt); 929 i40e->i40e_maddrs = NULL; 930 } 931 932 if (i40e->i40e_switch_rsrcs != NULL) { 933 size_t sz = sizeof (i40e_switch_rsrc_t) * 934 i40e->i40e_switch_rsrc_alloc; 935 ASSERT(sz > 0); 936 kmem_free(i40e->i40e_switch_rsrcs, sz); 937 i40e->i40e_switch_rsrcs = NULL; 938 } 939 940 if (i40e->i40e_device != NULL) 941 i40e_device_rele(i40e); 942 } 943 944 static boolean_t 945 i40e_get_available_resources(i40e_t *i40e) 946 { 947 dev_info_t *parent; 948 uint16_t bus, device, func; 949 uint_t nregs; 950 int *regs, i; 951 i40e_device_t *idp; 952 i40e_hw_t *hw = &i40e->i40e_hw_space; 953 954 parent = ddi_get_parent(i40e->i40e_dip); 955 956 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg", 957 ®s, &nregs) != DDI_PROP_SUCCESS) { 958 return (B_FALSE); 959 } 960 961 if (nregs < 1) { 962 ddi_prop_free(regs); 963 return (B_FALSE); 964 } 965 966 bus = PCI_REG_BUS_G(regs[0]); 967 device = PCI_REG_DEV_G(regs[0]); 968 func = PCI_REG_FUNC_G(regs[0]); 969 ddi_prop_free(regs); 970 971 i40e->i40e_hw_space.bus.func = func; 972 i40e->i40e_hw_space.bus.device = device; 973 974 if (i40e_get_switch_resources(i40e) == B_FALSE) { 975 return (B_FALSE); 976 } 977 978 /* 979 * To calculate the total amount of a resource we have available, we 980 * need to add how many our i40e_t thinks it has guaranteed, if any, and 981 * then we need to go through and divide the number of available on the 982 * device, which was snapshotted before anyone should have allocated 983 * anything, and use that to derive how many are available from the 984 * pool. Longer term, we may want to turn this into something that's 985 * more of a pool-like resource that everything can share (though that 986 * may require some more assistance from MAC). 987 * 988 * Though for transmit and receive queue pairs, we just have to ask 989 * firmware instead. 990 */ 991 idp = i40e_device_find(i40e, parent, bus, device); 992 i40e->i40e_device = idp; 993 i40e->i40e_resources.ifr_nvsis = 0; 994 i40e->i40e_resources.ifr_nvsis_used = 0; 995 i40e->i40e_resources.ifr_nmacfilt = 0; 996 i40e->i40e_resources.ifr_nmacfilt_used = 0; 997 i40e->i40e_resources.ifr_nmcastfilt = 0; 998 i40e->i40e_resources.ifr_nmcastfilt_used = 0; 999 1000 for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) { 1001 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i]; 1002 1003 switch (srp->resource_type) { 1004 case I40E_AQ_RESOURCE_TYPE_VSI: 1005 i40e->i40e_resources.ifr_nvsis += 1006 LE_16(srp->guaranteed); 1007 i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used); 1008 break; 1009 case I40E_AQ_RESOURCE_TYPE_MACADDR: 1010 i40e->i40e_resources.ifr_nmacfilt += 1011 LE_16(srp->guaranteed); 1012 i40e->i40e_resources.ifr_nmacfilt_used = 1013 LE_16(srp->used); 1014 break; 1015 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: 1016 i40e->i40e_resources.ifr_nmcastfilt += 1017 LE_16(srp->guaranteed); 1018 i40e->i40e_resources.ifr_nmcastfilt_used = 1019 LE_16(srp->used); 1020 break; 1021 default: 1022 break; 1023 } 1024 } 1025 1026 for (i = 0; i < idp->id_rsrcs_act; i++) { 1027 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i]; 1028 switch (srp->resource_type) { 1029 case I40E_AQ_RESOURCE_TYPE_VSI: 1030 i40e->i40e_resources.ifr_nvsis += 1031 LE_16(srp->total_unalloced) / idp->id_nfuncs; 1032 break; 1033 case I40E_AQ_RESOURCE_TYPE_MACADDR: 1034 i40e->i40e_resources.ifr_nmacfilt += 1035 LE_16(srp->total_unalloced) / idp->id_nfuncs; 1036 break; 1037 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: 1038 i40e->i40e_resources.ifr_nmcastfilt += 1039 LE_16(srp->total_unalloced) / idp->id_nfuncs; 1040 default: 1041 break; 1042 } 1043 } 1044 1045 i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp; 1046 i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp; 1047 1048 i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) * 1049 i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP); 1050 i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) * 1051 i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP); 1052 1053 /* 1054 * Initialize these as multicast addresses to indicate it's invalid for 1055 * sanity purposes. Think of it like 0xdeadbeef. 1056 */ 1057 for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++) 1058 i40e->i40e_uaddrs[i].iua_mac[0] = 0x01; 1059 1060 return (B_TRUE); 1061 } 1062 1063 static boolean_t 1064 i40e_enable_interrupts(i40e_t *i40e) 1065 { 1066 int i, rc; 1067 1068 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) { 1069 rc = ddi_intr_block_enable(i40e->i40e_intr_handles, 1070 i40e->i40e_intr_count); 1071 if (rc != DDI_SUCCESS) { 1072 i40e_error(i40e, "Interrupt block-enable failed: %d", 1073 rc); 1074 return (B_FALSE); 1075 } 1076 } else { 1077 for (i = 0; i < i40e->i40e_intr_count; i++) { 1078 rc = ddi_intr_enable(i40e->i40e_intr_handles[i]); 1079 if (rc != DDI_SUCCESS) { 1080 i40e_error(i40e, 1081 "Failed to enable interrupt %d: %d", i, rc); 1082 while (--i >= 0) { 1083 (void) ddi_intr_disable( 1084 i40e->i40e_intr_handles[i]); 1085 } 1086 return (B_FALSE); 1087 } 1088 } 1089 } 1090 1091 return (B_TRUE); 1092 } 1093 1094 static boolean_t 1095 i40e_disable_interrupts(i40e_t *i40e) 1096 { 1097 int i, rc; 1098 1099 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) { 1100 rc = ddi_intr_block_disable(i40e->i40e_intr_handles, 1101 i40e->i40e_intr_count); 1102 if (rc != DDI_SUCCESS) { 1103 i40e_error(i40e, 1104 "Interrupt block-disabled failed: %d", rc); 1105 return (B_FALSE); 1106 } 1107 } else { 1108 for (i = 0; i < i40e->i40e_intr_count; i++) { 1109 rc = ddi_intr_disable(i40e->i40e_intr_handles[i]); 1110 if (rc != DDI_SUCCESS) { 1111 i40e_error(i40e, 1112 "Failed to disable interrupt %d: %d", 1113 i, rc); 1114 return (B_FALSE); 1115 } 1116 } 1117 } 1118 1119 return (B_TRUE); 1120 } 1121 1122 /* 1123 * Free receive & transmit rings. 1124 */ 1125 static void 1126 i40e_free_trqpairs(i40e_t *i40e) 1127 { 1128 i40e_trqpair_t *itrq; 1129 1130 if (i40e->i40e_rx_groups != NULL) { 1131 kmem_free(i40e->i40e_rx_groups, 1132 sizeof (i40e_rx_group_t) * i40e->i40e_num_rx_groups); 1133 i40e->i40e_rx_groups = NULL; 1134 } 1135 1136 if (i40e->i40e_trqpairs != NULL) { 1137 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) { 1138 itrq = &i40e->i40e_trqpairs[i]; 1139 mutex_destroy(&itrq->itrq_rx_lock); 1140 mutex_destroy(&itrq->itrq_tx_lock); 1141 mutex_destroy(&itrq->itrq_tcb_lock); 1142 1143 /* 1144 * Should have already been cleaned up by start/stop, 1145 * etc. 1146 */ 1147 ASSERT(itrq->itrq_txkstat == NULL); 1148 ASSERT(itrq->itrq_rxkstat == NULL); 1149 } 1150 1151 kmem_free(i40e->i40e_trqpairs, 1152 sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs); 1153 i40e->i40e_trqpairs = NULL; 1154 } 1155 1156 cv_destroy(&i40e->i40e_rx_pending_cv); 1157 mutex_destroy(&i40e->i40e_rx_pending_lock); 1158 mutex_destroy(&i40e->i40e_general_lock); 1159 } 1160 1161 /* 1162 * Allocate transmit and receive rings, as well as other data structures that we 1163 * need. 1164 */ 1165 static boolean_t 1166 i40e_alloc_trqpairs(i40e_t *i40e) 1167 { 1168 void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri); 1169 1170 /* 1171 * Now that we have the priority for the interrupts, initialize 1172 * all relevant locks. 1173 */ 1174 mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri); 1175 mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri); 1176 cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL); 1177 1178 i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) * 1179 i40e->i40e_num_trqpairs, KM_SLEEP); 1180 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) { 1181 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 1182 1183 itrq->itrq_i40e = i40e; 1184 mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri); 1185 mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri); 1186 mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri); 1187 itrq->itrq_index = i; 1188 } 1189 1190 i40e->i40e_rx_groups = kmem_zalloc(sizeof (i40e_rx_group_t) * 1191 i40e->i40e_num_rx_groups, KM_SLEEP); 1192 1193 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) { 1194 i40e_rx_group_t *rxg = &i40e->i40e_rx_groups[i]; 1195 1196 rxg->irg_index = i; 1197 rxg->irg_i40e = i40e; 1198 } 1199 1200 return (B_TRUE); 1201 } 1202 1203 1204 1205 /* 1206 * Unless a .conf file already overrode i40e_t structure values, they will 1207 * be 0, and need to be set in conjunction with the now-available HW report. 1208 */ 1209 /* ARGSUSED */ 1210 static void 1211 i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw) 1212 { 1213 if (i40e->i40e_num_trqpairs_per_vsi == 0) { 1214 if (i40e_is_x722(i40e)) { 1215 i40e->i40e_num_trqpairs_per_vsi = 1216 I40E_722_MAX_TC_QUEUES; 1217 } else { 1218 i40e->i40e_num_trqpairs_per_vsi = 1219 I40E_710_MAX_TC_QUEUES; 1220 } 1221 } 1222 1223 if (i40e->i40e_num_rx_groups == 0) { 1224 i40e->i40e_num_rx_groups = I40E_DEF_NUM_RX_GROUPS; 1225 } 1226 } 1227 1228 /* 1229 * Free any resources required by, or setup by, the Intel common code. 1230 */ 1231 static void 1232 i40e_common_code_fini(i40e_t *i40e) 1233 { 1234 i40e_hw_t *hw = &i40e->i40e_hw_space; 1235 int rc; 1236 1237 rc = i40e_shutdown_lan_hmc(hw); 1238 if (rc != I40E_SUCCESS) 1239 i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc); 1240 1241 rc = i40e_shutdown_adminq(hw); 1242 if (rc != I40E_SUCCESS) 1243 i40e_error(i40e, "failed to shutdown admin queue: %d", rc); 1244 } 1245 1246 /* 1247 * Initialize and call Intel common-code routines, includes some setup 1248 * the common code expects from the driver. Also prints on failure, so 1249 * the caller doesn't have to. 1250 */ 1251 static boolean_t 1252 i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw) 1253 { 1254 int rc; 1255 1256 i40e_clear_hw(hw); 1257 rc = i40e_pf_reset(hw); 1258 if (rc != 0) { 1259 i40e_error(i40e, "failed to reset hardware: %d", rc); 1260 i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE); 1261 return (B_FALSE); 1262 } 1263 1264 rc = i40e_init_shared_code(hw); 1265 if (rc != 0) { 1266 i40e_error(i40e, "failed to initialize i40e core: %d", rc); 1267 return (B_FALSE); 1268 } 1269 1270 hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE; 1271 hw->aq.num_asq_entries = I40E_DEF_ADMINQ_SIZE; 1272 hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ; 1273 hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ; 1274 1275 rc = i40e_init_adminq(hw); 1276 if (rc != 0) { 1277 i40e_error(i40e, "failed to initialize firmware admin queue: " 1278 "%d, potential firmware version mismatch", rc); 1279 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE); 1280 return (B_FALSE); 1281 } 1282 1283 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1284 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) { 1285 i40e_log(i40e, "The driver for the device detected a newer " 1286 "version of the NVM image (%d.%d) than expected (%d.%d).\n" 1287 "Please install the most recent version of the network " 1288 "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, 1289 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR); 1290 } else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 1291 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) { 1292 i40e_log(i40e, "The driver for the device detected an older" 1293 " version of the NVM image (%d.%d) than expected (%d.%d)." 1294 "\nPlease update the NVM image.\n", 1295 hw->aq.api_maj_ver, hw->aq.api_min_ver, 1296 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR - 1); 1297 } 1298 1299 i40e_clear_pxe_mode(hw); 1300 1301 /* 1302 * We need to call this so that the common code can discover 1303 * capabilities of the hardware, which it uses throughout the rest. 1304 */ 1305 if (!i40e_get_hw_capabilities(i40e, hw)) { 1306 i40e_error(i40e, "failed to obtain hardware capabilities"); 1307 return (B_FALSE); 1308 } 1309 1310 if (i40e_get_available_resources(i40e) == B_FALSE) { 1311 i40e_error(i40e, "failed to obtain hardware resources"); 1312 return (B_FALSE); 1313 } 1314 1315 i40e_hw_to_instance(i40e, hw); 1316 1317 rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 1318 hw->func_caps.num_rx_qp, 0, 0); 1319 if (rc != 0) { 1320 i40e_error(i40e, "failed to initialize hardware memory cache: " 1321 "%d", rc); 1322 return (B_FALSE); 1323 } 1324 1325 rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 1326 if (rc != 0) { 1327 i40e_error(i40e, "failed to configure hardware memory cache: " 1328 "%d", rc); 1329 return (B_FALSE); 1330 } 1331 1332 (void) i40e_aq_stop_lldp(hw, TRUE, NULL); 1333 1334 rc = i40e_get_mac_addr(hw, hw->mac.addr); 1335 if (rc != I40E_SUCCESS) { 1336 i40e_error(i40e, "failed to retrieve hardware mac address: %d", 1337 rc); 1338 return (B_FALSE); 1339 } 1340 1341 rc = i40e_validate_mac_addr(hw->mac.addr); 1342 if (rc != 0) { 1343 i40e_error(i40e, "failed to validate internal mac address: " 1344 "%d", rc); 1345 return (B_FALSE); 1346 } 1347 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 1348 if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) != 1349 I40E_SUCCESS) { 1350 i40e_error(i40e, "failed to retrieve port mac address: %d", 1351 rc); 1352 return (B_FALSE); 1353 } 1354 1355 /* 1356 * We need to obtain the Default Virtual Station SEID (VSI) 1357 * before we can perform other operations on the device. 1358 */ 1359 if (!i40e_set_def_vsi_seid(i40e)) { 1360 i40e_error(i40e, "failed to obtain Default VSI SEID"); 1361 return (B_FALSE); 1362 } 1363 1364 return (B_TRUE); 1365 } 1366 1367 static void 1368 i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e) 1369 { 1370 int rc; 1371 1372 if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR) 1373 (void) i40e_disable_interrupts(i40e); 1374 1375 if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) && 1376 i40e->i40e_periodic_id != 0) { 1377 ddi_periodic_delete(i40e->i40e_periodic_id); 1378 i40e->i40e_periodic_id = 0; 1379 } 1380 1381 if (i40e->i40e_attach_progress & I40E_ATTACH_UFM_INIT) 1382 ddi_ufm_fini(i40e->i40e_ufmh); 1383 1384 if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) { 1385 rc = mac_unregister(i40e->i40e_mac_hdl); 1386 if (rc != 0) { 1387 i40e_error(i40e, "failed to unregister from mac: %d", 1388 rc); 1389 } 1390 } 1391 1392 if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) { 1393 i40e_stats_fini(i40e); 1394 } 1395 1396 if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR) 1397 i40e_rem_intr_handlers(i40e); 1398 1399 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS) 1400 i40e_free_trqpairs(i40e); 1401 1402 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR) 1403 i40e_rem_intrs(i40e); 1404 1405 if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE) 1406 i40e_common_code_fini(i40e); 1407 1408 i40e_cleanup_resources(i40e); 1409 1410 if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS) 1411 (void) ddi_prop_remove_all(devinfo); 1412 1413 if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP && 1414 i40e->i40e_osdep_space.ios_reg_handle != NULL) { 1415 ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle); 1416 i40e->i40e_osdep_space.ios_reg_handle = NULL; 1417 } 1418 1419 if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) && 1420 i40e->i40e_osdep_space.ios_cfg_handle != NULL) { 1421 pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle); 1422 i40e->i40e_osdep_space.ios_cfg_handle = NULL; 1423 } 1424 1425 if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT) 1426 i40e_fm_fini(i40e); 1427 1428 kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ); 1429 kmem_free(i40e, sizeof (i40e_t)); 1430 1431 ddi_set_driver_private(devinfo, NULL); 1432 } 1433 1434 static boolean_t 1435 i40e_final_init(i40e_t *i40e) 1436 { 1437 i40e_hw_t *hw = &i40e->i40e_hw_space; 1438 struct i40e_osdep *osdep = OS_DEP(hw); 1439 uint8_t pbanum[I40E_PBANUM_STRLEN]; 1440 enum i40e_status_code irc; 1441 char buf[I40E_DDI_PROP_LEN]; 1442 1443 pbanum[0] = '\0'; 1444 irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum)); 1445 if (irc != I40E_SUCCESS) { 1446 i40e_log(i40e, "failed to read PBA string: %d", irc); 1447 } else { 1448 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1449 "printed-board-assembly", (char *)pbanum); 1450 } 1451 1452 #ifdef DEBUG 1453 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver, 1454 hw->aq.fw_min_ver) < sizeof (buf)); 1455 ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf)); 1456 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver, 1457 hw->aq.api_min_ver) < sizeof (buf)); 1458 #endif 1459 1460 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver, 1461 hw->aq.fw_min_ver); 1462 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1463 "firmware-version", buf); 1464 (void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build); 1465 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1466 "firmware-build", buf); 1467 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver, 1468 hw->aq.api_min_ver); 1469 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1470 "api-version", buf); 1471 1472 if (!i40e_set_hw_bus_info(hw)) 1473 return (B_FALSE); 1474 1475 if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) { 1476 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 1477 return (B_FALSE); 1478 } 1479 1480 return (B_TRUE); 1481 } 1482 1483 static void 1484 i40e_identify_hardware(i40e_t *i40e) 1485 { 1486 i40e_hw_t *hw = &i40e->i40e_hw_space; 1487 struct i40e_osdep *osdep = &i40e->i40e_osdep_space; 1488 1489 hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID); 1490 hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID); 1491 hw->revision_id = pci_config_get8(osdep->ios_cfg_handle, 1492 PCI_CONF_REVID); 1493 hw->subsystem_device_id = 1494 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID); 1495 hw->subsystem_vendor_id = 1496 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID); 1497 1498 /* 1499 * Note that we set the hardware's bus information later on, in 1500 * i40e_get_available_resources(). The common code doesn't seem to 1501 * require that it be set in any ways, it seems to be mostly for 1502 * book-keeping. 1503 */ 1504 } 1505 1506 static boolean_t 1507 i40e_regs_map(i40e_t *i40e) 1508 { 1509 dev_info_t *devinfo = i40e->i40e_dip; 1510 i40e_hw_t *hw = &i40e->i40e_hw_space; 1511 struct i40e_osdep *osdep = &i40e->i40e_osdep_space; 1512 off_t memsize; 1513 int ret; 1514 1515 if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) != 1516 DDI_SUCCESS) { 1517 i40e_error(i40e, "Used invalid register set to map PCIe regs"); 1518 return (B_FALSE); 1519 } 1520 1521 if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET, 1522 (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr, 1523 &osdep->ios_reg_handle)) != DDI_SUCCESS) { 1524 i40e_error(i40e, "failed to map device registers: %d", ret); 1525 return (B_FALSE); 1526 } 1527 1528 osdep->ios_reg_size = memsize; 1529 return (B_TRUE); 1530 } 1531 1532 /* 1533 * Update parameters required when a new MTU has been configured. Calculate the 1534 * maximum frame size, as well as, size our DMA buffers which we size in 1535 * increments of 1K. 1536 */ 1537 void 1538 i40e_update_mtu(i40e_t *i40e) 1539 { 1540 uint32_t rx, tx; 1541 1542 i40e->i40e_frame_max = i40e->i40e_sdu + 1543 sizeof (struct ether_vlan_header) + ETHERFCSL; 1544 1545 rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT; 1546 i40e->i40e_rx_buf_size = ((rx >> 10) + 1547 ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10; 1548 1549 tx = i40e->i40e_frame_max; 1550 i40e->i40e_tx_buf_size = ((tx >> 10) + 1551 ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10; 1552 } 1553 1554 static int 1555 i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def) 1556 { 1557 int val; 1558 1559 val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS, 1560 prop, def); 1561 if (val > max) 1562 val = max; 1563 if (val < min) 1564 val = min; 1565 return (val); 1566 } 1567 1568 static void 1569 i40e_init_properties(i40e_t *i40e) 1570 { 1571 i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu", 1572 I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU); 1573 1574 i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force", 1575 I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE); 1576 1577 i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable", 1578 B_FALSE, B_TRUE, B_TRUE); 1579 1580 i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size", 1581 I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE, 1582 I40E_DEF_TX_RING_SIZE); 1583 if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) { 1584 i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size, 1585 I40E_DESC_ALIGN); 1586 } 1587 1588 i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold", 1589 I40E_MIN_TX_BLOCK_THRESH, 1590 i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE, 1591 I40E_DEF_TX_BLOCK_THRESH); 1592 1593 i40e->i40e_num_rx_groups = i40e_get_prop(i40e, "rx_num_groups", 1594 I40E_MIN_NUM_RX_GROUPS, I40E_MAX_NUM_RX_GROUPS, 1595 I40E_DEF_NUM_RX_GROUPS); 1596 1597 i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size", 1598 I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE, 1599 I40E_DEF_RX_RING_SIZE); 1600 if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) { 1601 i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size, 1602 I40E_DESC_ALIGN); 1603 } 1604 1605 i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr", 1606 I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR, 1607 I40E_DEF_RX_LIMIT_PER_INTR); 1608 1609 i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable", 1610 B_FALSE, B_TRUE, B_TRUE); 1611 1612 i40e->i40e_tx_lso_enable = i40e_get_prop(i40e, "tx_lso_enable", 1613 B_FALSE, B_TRUE, B_TRUE); 1614 1615 i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable", 1616 B_FALSE, B_TRUE, B_TRUE); 1617 1618 i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold", 1619 I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH, 1620 I40E_DEF_RX_DMA_THRESH); 1621 1622 i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold", 1623 I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH, 1624 I40E_DEF_TX_DMA_THRESH); 1625 1626 i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle", 1627 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR); 1628 1629 i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle", 1630 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR); 1631 1632 i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle", 1633 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR); 1634 1635 if (!i40e->i40e_mr_enable) { 1636 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX; 1637 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX; 1638 } 1639 1640 i40e_update_mtu(i40e); 1641 } 1642 1643 /* 1644 * There are a few constraints on interrupts that we're currently imposing, some 1645 * of which are restrictions from hardware. For a fuller treatment, see 1646 * i40e_intr.c. 1647 * 1648 * Currently, to use MSI-X we require two interrupts be available though in 1649 * theory we should participate in IRM and happily use more interrupts. 1650 * 1651 * Hardware only supports a single MSI being programmed and therefore if we 1652 * don't have MSI-X interrupts available at this time, then we ratchet down the 1653 * number of rings and groups available. Obviously, we only bother with a single 1654 * fixed interrupt. 1655 */ 1656 static boolean_t 1657 i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type) 1658 { 1659 i40e_hw_t *hw = &i40e->i40e_hw_space; 1660 ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle; 1661 int request, count, actual, rc, min; 1662 uint32_t reg; 1663 1664 switch (intr_type) { 1665 case DDI_INTR_TYPE_FIXED: 1666 case DDI_INTR_TYPE_MSI: 1667 request = 1; 1668 min = 1; 1669 break; 1670 case DDI_INTR_TYPE_MSIX: 1671 min = 2; 1672 if (!i40e->i40e_mr_enable) { 1673 request = 2; 1674 break; 1675 } 1676 reg = I40E_READ_REG(hw, I40E_GLPCI_CNF2); 1677 /* 1678 * Should this read fail, we will drop back to using 1679 * MSI or fixed interrupts. 1680 */ 1681 if (i40e_check_acc_handle(rh) != DDI_FM_OK) { 1682 ddi_fm_service_impact(i40e->i40e_dip, 1683 DDI_SERVICE_DEGRADED); 1684 return (B_FALSE); 1685 } 1686 request = (reg & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1687 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1688 request++; /* the register value is n - 1 */ 1689 break; 1690 default: 1691 panic("bad interrupt type passed to i40e_alloc_intr_handles: " 1692 "%d", intr_type); 1693 } 1694 1695 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 1696 if (rc != DDI_SUCCESS || count < min) { 1697 i40e_log(i40e, "Get interrupt number failed, " 1698 "returned %d, count %d", rc, count); 1699 return (B_FALSE); 1700 } 1701 1702 rc = ddi_intr_get_navail(devinfo, intr_type, &count); 1703 if (rc != DDI_SUCCESS || count < min) { 1704 i40e_log(i40e, "Get AVAILABLE interrupt number failed, " 1705 "returned %d, count %d", rc, count); 1706 return (B_FALSE); 1707 } 1708 1709 actual = 0; 1710 i40e->i40e_intr_count = 0; 1711 i40e->i40e_intr_count_max = 0; 1712 i40e->i40e_intr_count_min = 0; 1713 1714 i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t); 1715 ASSERT(i40e->i40e_intr_size != 0); 1716 i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP); 1717 1718 rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0, 1719 min(request, count), &actual, DDI_INTR_ALLOC_NORMAL); 1720 if (rc != DDI_SUCCESS) { 1721 i40e_log(i40e, "Interrupt allocation failed with %d.", rc); 1722 goto alloc_handle_fail; 1723 } 1724 1725 i40e->i40e_intr_count = actual; 1726 i40e->i40e_intr_count_max = request; 1727 i40e->i40e_intr_count_min = min; 1728 1729 if (actual < min) { 1730 i40e_log(i40e, "actual (%d) is less than minimum (%d).", 1731 actual, min); 1732 goto alloc_handle_fail; 1733 } 1734 1735 /* 1736 * Record the priority and capabilities for our first vector. Once 1737 * we have it, that's our priority until detach time. Even if we 1738 * eventually participate in IRM, our priority shouldn't change. 1739 */ 1740 rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri); 1741 if (rc != DDI_SUCCESS) { 1742 i40e_log(i40e, 1743 "Getting interrupt priority failed with %d.", rc); 1744 goto alloc_handle_fail; 1745 } 1746 1747 rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap); 1748 if (rc != DDI_SUCCESS) { 1749 i40e_log(i40e, 1750 "Getting interrupt capabilities failed with %d.", rc); 1751 goto alloc_handle_fail; 1752 } 1753 1754 i40e->i40e_intr_type = intr_type; 1755 return (B_TRUE); 1756 1757 alloc_handle_fail: 1758 1759 i40e_rem_intrs(i40e); 1760 return (B_FALSE); 1761 } 1762 1763 static boolean_t 1764 i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo) 1765 { 1766 i40e_hw_t *hw = &i40e->i40e_hw_space; 1767 int intr_types, rc; 1768 uint_t max_trqpairs; 1769 1770 if (i40e_is_x722(i40e)) { 1771 max_trqpairs = I40E_722_MAX_TC_QUEUES; 1772 } else { 1773 max_trqpairs = I40E_710_MAX_TC_QUEUES; 1774 } 1775 1776 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 1777 if (rc != DDI_SUCCESS) { 1778 i40e_error(i40e, "failed to get supported interrupt types: %d", 1779 rc); 1780 return (B_FALSE); 1781 } 1782 1783 i40e->i40e_intr_type = 0; 1784 1785 /* 1786 * We need to determine the number of queue pairs per traffic 1787 * class. We only have one traffic class (TC0), so we'll base 1788 * this off the number of interrupts provided. Furthermore, 1789 * since we only use one traffic class, the number of queues 1790 * per traffic class and per VSI are the same. 1791 */ 1792 if ((intr_types & DDI_INTR_TYPE_MSIX) && 1793 (i40e->i40e_intr_force <= I40E_INTR_MSIX) && 1794 (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX))) { 1795 uint32_t n, qp_cap, num_trqpairs; 1796 1797 /* 1798 * While we want the number of queue pairs to match 1799 * the number of interrupts, we must keep stay in 1800 * bounds of the maximum number of queues per traffic 1801 * class. We subtract one from i40e_intr_count to 1802 * account for interrupt zero; which is currently 1803 * restricted to admin queue commands and other 1804 * interrupt causes. 1805 */ 1806 n = MIN(i40e->i40e_intr_count - 1, max_trqpairs); 1807 ASSERT3U(n, >, 0); 1808 1809 /* 1810 * Round up to the nearest power of two to ensure that 1811 * the QBASE aligns with the TC size which must be 1812 * programmed as a power of two. See the queue mapping 1813 * description in section 7.4.9.5.5.1. 1814 * 1815 * If i40e_intr_count - 1 is not a power of two then 1816 * some queue pairs on the same VSI will have to share 1817 * an interrupt. 1818 * 1819 * We may want to revisit this logic in a future where 1820 * we have more interrupts and more VSIs. Otherwise, 1821 * each VSI will use as many interrupts as possible. 1822 * Using more QPs per VSI means better RSS for each 1823 * group, but at the same time may require more 1824 * sharing of interrupts across VSIs. This may be a 1825 * good candidate for a .conf tunable. 1826 */ 1827 n = 0x1 << ddi_fls(n); 1828 i40e->i40e_num_trqpairs_per_vsi = n; 1829 1830 /* 1831 * Make sure the number of tx/rx qpairs does not exceed 1832 * the device's capabilities. 1833 */ 1834 ASSERT3U(i40e->i40e_num_rx_groups, >, 0); 1835 qp_cap = MIN(hw->func_caps.num_rx_qp, hw->func_caps.num_tx_qp); 1836 num_trqpairs = i40e->i40e_num_trqpairs_per_vsi * 1837 i40e->i40e_num_rx_groups; 1838 if (num_trqpairs > qp_cap) { 1839 i40e->i40e_num_rx_groups = MAX(1, qp_cap / 1840 i40e->i40e_num_trqpairs_per_vsi); 1841 num_trqpairs = i40e->i40e_num_trqpairs_per_vsi * 1842 i40e->i40e_num_rx_groups; 1843 i40e_log(i40e, "Rx groups restricted to %u", 1844 i40e->i40e_num_rx_groups); 1845 } 1846 ASSERT3U(num_trqpairs, >, 0); 1847 i40e->i40e_num_trqpairs = num_trqpairs; 1848 return (B_TRUE); 1849 } 1850 1851 /* 1852 * We only use multiple transmit/receive pairs when MSI-X interrupts are 1853 * available due to the fact that the device basically only supports a 1854 * single MSI interrupt. 1855 */ 1856 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX; 1857 i40e->i40e_num_trqpairs_per_vsi = i40e->i40e_num_trqpairs; 1858 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX; 1859 1860 if ((intr_types & DDI_INTR_TYPE_MSI) && 1861 (i40e->i40e_intr_force <= I40E_INTR_MSI)) { 1862 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI)) 1863 return (B_TRUE); 1864 } 1865 1866 if (intr_types & DDI_INTR_TYPE_FIXED) { 1867 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED)) 1868 return (B_TRUE); 1869 } 1870 1871 return (B_FALSE); 1872 } 1873 1874 /* 1875 * Map different interrupts to MSI-X vectors. 1876 */ 1877 static boolean_t 1878 i40e_map_intrs_to_vectors(i40e_t *i40e) 1879 { 1880 if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) { 1881 return (B_TRUE); 1882 } 1883 1884 /* 1885 * Each queue pair is mapped to a single interrupt, so 1886 * transmit and receive interrupts for a given queue share the 1887 * same vector. Vector zero is reserved for the admin queue. 1888 */ 1889 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) { 1890 uint_t vector = i % (i40e->i40e_intr_count - 1); 1891 1892 i40e->i40e_trqpairs[i].itrq_rx_intrvec = vector + 1; 1893 i40e->i40e_trqpairs[i].itrq_tx_intrvec = vector + 1; 1894 } 1895 1896 return (B_TRUE); 1897 } 1898 1899 static boolean_t 1900 i40e_add_intr_handlers(i40e_t *i40e) 1901 { 1902 int rc, vector; 1903 1904 switch (i40e->i40e_intr_type) { 1905 case DDI_INTR_TYPE_MSIX: 1906 for (vector = 0; vector < i40e->i40e_intr_count; vector++) { 1907 rc = ddi_intr_add_handler( 1908 i40e->i40e_intr_handles[vector], 1909 (ddi_intr_handler_t *)i40e_intr_msix, i40e, 1910 (void *)(uintptr_t)vector); 1911 if (rc != DDI_SUCCESS) { 1912 i40e_log(i40e, "Add interrupt handler (MSI-X) " 1913 "failed: return %d, vector %d", rc, vector); 1914 for (vector--; vector >= 0; vector--) { 1915 (void) ddi_intr_remove_handler( 1916 i40e->i40e_intr_handles[vector]); 1917 } 1918 return (B_FALSE); 1919 } 1920 } 1921 break; 1922 case DDI_INTR_TYPE_MSI: 1923 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0], 1924 (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL); 1925 if (rc != DDI_SUCCESS) { 1926 i40e_log(i40e, "Add interrupt handler (MSI) failed: " 1927 "return %d", rc); 1928 return (B_FALSE); 1929 } 1930 break; 1931 case DDI_INTR_TYPE_FIXED: 1932 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0], 1933 (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL); 1934 if (rc != DDI_SUCCESS) { 1935 i40e_log(i40e, "Add interrupt handler (legacy) failed:" 1936 " return %d", rc); 1937 return (B_FALSE); 1938 } 1939 break; 1940 default: 1941 /* Cast to pacify lint */ 1942 panic("i40e_intr_type %p contains an unknown type: %d", 1943 (void *)i40e, i40e->i40e_intr_type); 1944 } 1945 1946 return (B_TRUE); 1947 } 1948 1949 /* 1950 * Perform periodic checks. Longer term, we should be thinking about additional 1951 * things here: 1952 * 1953 * o Stall Detection 1954 * o Temperature sensor detection 1955 * o Device resetting 1956 * o Statistics updating to avoid wraparound 1957 */ 1958 static void 1959 i40e_timer(void *arg) 1960 { 1961 i40e_t *i40e = arg; 1962 1963 mutex_enter(&i40e->i40e_general_lock); 1964 i40e_link_check(i40e); 1965 mutex_exit(&i40e->i40e_general_lock); 1966 } 1967 1968 /* 1969 * Get the hardware state, and scribble away anything that needs scribbling. 1970 */ 1971 static void 1972 i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw) 1973 { 1974 int rc; 1975 1976 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 1977 1978 (void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL); 1979 i40e_link_check(i40e); 1980 1981 /* 1982 * Try and determine our PHY. Note that we may have to retry to and 1983 * delay to detect fiber correctly. 1984 */ 1985 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy, 1986 NULL); 1987 if (rc == I40E_ERR_UNKNOWN_PHY) { 1988 i40e_msec_delay(200); 1989 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, 1990 &i40e->i40e_phy, NULL); 1991 } 1992 1993 if (rc != I40E_SUCCESS) { 1994 if (rc == I40E_ERR_UNKNOWN_PHY) { 1995 i40e_error(i40e, "encountered unknown PHY type, " 1996 "not attaching."); 1997 } else { 1998 i40e_error(i40e, "error getting physical capabilities: " 1999 "%d, %d", rc, hw->aq.asq_last_status); 2000 } 2001 } 2002 2003 rc = i40e_update_link_info(hw); 2004 if (rc != I40E_SUCCESS) { 2005 i40e_error(i40e, "failed to update link information: %d", rc); 2006 } 2007 2008 /* 2009 * In general, we don't want to mask off (as in stop from being a cause) 2010 * any of the interrupts that the phy might be able to generate. 2011 */ 2012 rc = i40e_aq_set_phy_int_mask(hw, 0, NULL); 2013 if (rc != I40E_SUCCESS) { 2014 i40e_error(i40e, "failed to update phy link mask: %d", rc); 2015 } 2016 } 2017 2018 /* 2019 * Go through and re-initialize any existing filters that we may have set up for 2020 * this device. Note that we would only expect them to exist if hardware had 2021 * already been initialized and we had just reset it. While we're not 2022 * implementing this yet, we're keeping this around for when we add reset 2023 * capabilities, so this isn't forgotten. 2024 */ 2025 /* ARGSUSED */ 2026 static void 2027 i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw) 2028 { 2029 } 2030 2031 /* 2032 * Set the properties which have common values across all the VSIs. 2033 * Consult the "Add VSI" command section (7.4.9.5.5.1) for a 2034 * complete description of these properties. 2035 */ 2036 static void 2037 i40e_set_shared_vsi_props(i40e_t *i40e, 2038 struct i40e_aqc_vsi_properties_data *info, uint_t vsi_idx) 2039 { 2040 uint_t tc_queues; 2041 uint16_t vsi_qp_base; 2042 2043 /* 2044 * It's important that we use bitwise-OR here; callers to this 2045 * function might enable other sections before calling this 2046 * function. 2047 */ 2048 info->valid_sections |= LE_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID | 2049 I40E_AQ_VSI_PROP_VLAN_VALID); 2050 2051 /* 2052 * Calculate the starting QP index for this VSI. This base is 2053 * relative to the PF queue space; so a value of 0 for PF#1 2054 * represents the absolute index PFLAN_QALLOC_FIRSTQ for PF#1. 2055 */ 2056 vsi_qp_base = vsi_idx * i40e->i40e_num_trqpairs_per_vsi; 2057 info->mapping_flags = LE_16(I40E_AQ_VSI_QUE_MAP_CONTIG); 2058 info->queue_mapping[0] = 2059 LE_16((vsi_qp_base << I40E_AQ_VSI_QUEUE_SHIFT) & 2060 I40E_AQ_VSI_QUEUE_MASK); 2061 2062 /* 2063 * tc_queues determines the size of the traffic class, where 2064 * the size is 2^^tc_queues to a maximum of 64 for the X710 2065 * and 128 for the X722. 2066 * 2067 * Some examples: 2068 * i40e_num_trqpairs_per_vsi == 1 => tc_queues = 0, 2^^0 = 1. 2069 * i40e_num_trqpairs_per_vsi == 7 => tc_queues = 3, 2^^3 = 8. 2070 * i40e_num_trqpairs_per_vsi == 8 => tc_queues = 3, 2^^3 = 8. 2071 * i40e_num_trqpairs_per_vsi == 9 => tc_queues = 4, 2^^4 = 16. 2072 * i40e_num_trqpairs_per_vsi == 17 => tc_queues = 5, 2^^5 = 32. 2073 * i40e_num_trqpairs_per_vsi == 64 => tc_queues = 6, 2^^6 = 64. 2074 */ 2075 tc_queues = ddi_fls(i40e->i40e_num_trqpairs_per_vsi - 1); 2076 2077 /* 2078 * The TC queue mapping is in relation to the VSI queue space. 2079 * Since we are only using one traffic class (TC0) we always 2080 * start at queue offset 0. 2081 */ 2082 info->tc_mapping[0] = 2083 LE_16(((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) & 2084 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) | 2085 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) & 2086 I40E_AQ_VSI_TC_QUE_NUMBER_MASK)); 2087 2088 /* 2089 * I40E_AQ_VSI_PVLAN_MODE_ALL ("VLAN driver insertion mode") 2090 * 2091 * Allow tagged and untagged packets to be sent to this 2092 * VSI from the host. 2093 * 2094 * I40E_AQ_VSI_PVLAN_EMOD_NOTHING ("VLAN and UP expose mode") 2095 * 2096 * Leave the tag on the frame and place no VLAN 2097 * information in the descriptor. We want this mode 2098 * because our MAC layer will take care of the VLAN tag, 2099 * if there is one. 2100 */ 2101 info->port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2102 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2103 } 2104 2105 /* 2106 * Delete the VSI at this index, if one exists. We assume there is no 2107 * action we can take if this command fails but to log the failure. 2108 */ 2109 static void 2110 i40e_delete_vsi(i40e_t *i40e, uint_t idx) 2111 { 2112 i40e_hw_t *hw = &i40e->i40e_hw_space; 2113 uint16_t seid = i40e->i40e_vsis[idx].iv_seid; 2114 2115 if (seid != 0) { 2116 int rc; 2117 2118 rc = i40e_aq_delete_element(hw, seid, NULL); 2119 2120 if (rc != I40E_SUCCESS) { 2121 i40e_error(i40e, "Failed to delete VSI %d: %d", 2122 rc, hw->aq.asq_last_status); 2123 } 2124 2125 i40e->i40e_vsis[idx].iv_seid = 0; 2126 } 2127 } 2128 2129 /* 2130 * Add a new VSI. 2131 */ 2132 static boolean_t 2133 i40e_add_vsi(i40e_t *i40e, i40e_hw_t *hw, uint_t idx) 2134 { 2135 struct i40e_vsi_context ctx; 2136 i40e_rx_group_t *rxg; 2137 int rc; 2138 2139 /* 2140 * The default VSI is created by the controller. This function 2141 * creates new, non-defualt VSIs only. 2142 */ 2143 ASSERT3U(idx, !=, 0); 2144 2145 bzero(&ctx, sizeof (struct i40e_vsi_context)); 2146 ctx.uplink_seid = i40e->i40e_veb_seid; 2147 ctx.pf_num = hw->pf_id; 2148 ctx.flags = I40E_AQ_VSI_TYPE_PF; 2149 ctx.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 2150 i40e_set_shared_vsi_props(i40e, &ctx.info, idx); 2151 2152 rc = i40e_aq_add_vsi(hw, &ctx, NULL); 2153 if (rc != I40E_SUCCESS) { 2154 i40e_error(i40e, "i40e_aq_add_vsi() failed %d: %d", rc, 2155 hw->aq.asq_last_status); 2156 return (B_FALSE); 2157 } 2158 2159 rxg = &i40e->i40e_rx_groups[idx]; 2160 rxg->irg_vsi_seid = ctx.seid; 2161 i40e->i40e_vsis[idx].iv_number = ctx.vsi_number; 2162 i40e->i40e_vsis[idx].iv_seid = ctx.seid; 2163 i40e->i40e_vsis[idx].iv_stats_id = LE_16(ctx.info.stat_counter_idx); 2164 2165 if (i40e_stat_vsi_init(i40e, idx) == B_FALSE) 2166 return (B_FALSE); 2167 2168 return (B_TRUE); 2169 } 2170 2171 /* 2172 * Configure the hardware for the Default Virtual Station Interface (VSI). 2173 */ 2174 static boolean_t 2175 i40e_config_def_vsi(i40e_t *i40e, i40e_hw_t *hw) 2176 { 2177 struct i40e_vsi_context ctx; 2178 i40e_rx_group_t *def_rxg; 2179 int err; 2180 struct i40e_aqc_remove_macvlan_element_data filt; 2181 2182 bzero(&ctx, sizeof (struct i40e_vsi_context)); 2183 ctx.seid = I40E_DEF_VSI_SEID(i40e); 2184 ctx.pf_num = hw->pf_id; 2185 err = i40e_aq_get_vsi_params(hw, &ctx, NULL); 2186 if (err != I40E_SUCCESS) { 2187 i40e_error(i40e, "get VSI params failed with %d", err); 2188 return (B_FALSE); 2189 } 2190 2191 ctx.info.valid_sections = 0; 2192 i40e->i40e_vsis[0].iv_number = ctx.vsi_number; 2193 i40e->i40e_vsis[0].iv_stats_id = LE_16(ctx.info.stat_counter_idx); 2194 if (i40e_stat_vsi_init(i40e, 0) == B_FALSE) 2195 return (B_FALSE); 2196 2197 i40e_set_shared_vsi_props(i40e, &ctx.info, I40E_DEF_VSI_IDX); 2198 2199 err = i40e_aq_update_vsi_params(hw, &ctx, NULL); 2200 if (err != I40E_SUCCESS) { 2201 i40e_error(i40e, "Update VSI params failed with %d", err); 2202 return (B_FALSE); 2203 } 2204 2205 def_rxg = &i40e->i40e_rx_groups[0]; 2206 def_rxg->irg_vsi_seid = I40E_DEF_VSI_SEID(i40e); 2207 2208 /* 2209 * We have seen three different behaviors in regards to the 2210 * Default VSI and its implicit L2 MAC+VLAN filter. 2211 * 2212 * 1. It has an implicit filter for the factory MAC address 2213 * and this filter counts against 'ifr_nmacfilt_used'. 2214 * 2215 * 2. It has an implicit filter for the factory MAC address 2216 * and this filter DOES NOT count against 'ifr_nmacfilt_used'. 2217 * 2218 * 3. It DOES NOT have an implicit filter. 2219 * 2220 * All three of these cases are accounted for below. If we 2221 * fail to remove the L2 filter (ENOENT) then we assume there 2222 * wasn't one. Otherwise, if we successfully remove the 2223 * filter, we make sure to update the 'ifr_nmacfilt_used' 2224 * count accordingly. 2225 * 2226 * We remove this filter to prevent duplicate delivery of 2227 * packets destined for the primary MAC address as DLS will 2228 * create the same filter on a non-default VSI for the primary 2229 * MAC client. 2230 * 2231 * If you change the following code please test it across as 2232 * many X700 series controllers and firmware revisions as you 2233 * can. 2234 */ 2235 bzero(&filt, sizeof (filt)); 2236 bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL); 2237 filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 2238 filt.vlan_tag = 0; 2239 2240 ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1); 2241 i40e_log(i40e, "Num L2 filters: %u", 2242 i40e->i40e_resources.ifr_nmacfilt_used); 2243 2244 err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1, 2245 NULL); 2246 if (err == I40E_SUCCESS) { 2247 i40e_log(i40e, 2248 "Removed L2 filter from Default VSI with SEID %u", 2249 I40E_DEF_VSI_SEID(i40e)); 2250 } else if (hw->aq.asq_last_status == ENOENT) { 2251 i40e_log(i40e, 2252 "No L2 filter for Default VSI with SEID %u", 2253 I40E_DEF_VSI_SEID(i40e)); 2254 } else { 2255 i40e_error(i40e, "Failed to remove L2 filter from" 2256 " Default VSI with SEID %u: %d (%d)", 2257 I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status); 2258 2259 return (B_FALSE); 2260 } 2261 2262 /* 2263 * As mentioned above, the controller created an implicit L2 2264 * filter for the primary MAC. We want to remove both the 2265 * filter and decrement the filter count. However, not all 2266 * controllers count this implicit filter against the total 2267 * MAC filter count. So here we are making sure it is either 2268 * one or zero. If it is one, then we know it is for the 2269 * implicit filter and we should decrement since we just 2270 * removed the filter above. If it is zero then we know the 2271 * controller that does not count the implicit filter, and it 2272 * was enough to just remove it; we leave the count alone. 2273 * But if it is neither, then we have never seen a controller 2274 * like this before and we should fail to attach. 2275 * 2276 * It is unfortunate that this code must exist but the 2277 * behavior of this implicit L2 filter and its corresponding 2278 * count were dicovered through empirical testing. The 2279 * programming manuals hint at this filter but do not 2280 * explicitly call out the exact behavior. 2281 */ 2282 if (i40e->i40e_resources.ifr_nmacfilt_used == 1) { 2283 i40e->i40e_resources.ifr_nmacfilt_used--; 2284 } else { 2285 if (i40e->i40e_resources.ifr_nmacfilt_used != 0) { 2286 i40e_error(i40e, "Unexpected L2 filter count: %u" 2287 " (expected 0)", 2288 i40e->i40e_resources.ifr_nmacfilt_used); 2289 return (B_FALSE); 2290 } 2291 } 2292 2293 return (B_TRUE); 2294 } 2295 2296 static boolean_t 2297 i40e_config_rss_key_x722(i40e_t *i40e, i40e_hw_t *hw) 2298 { 2299 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) { 2300 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1]; 2301 struct i40e_aqc_get_set_rss_key_data key; 2302 const char *u8seed; 2303 enum i40e_status_code status; 2304 uint16_t vsi_number = i40e->i40e_vsis[i].iv_number; 2305 2306 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed)); 2307 u8seed = (char *)seed; 2308 2309 CTASSERT(sizeof (key) >= (sizeof (key.standard_rss_key) + 2310 sizeof (key.extended_hash_key))); 2311 2312 bcopy(u8seed, key.standard_rss_key, 2313 sizeof (key.standard_rss_key)); 2314 bcopy(&u8seed[sizeof (key.standard_rss_key)], 2315 key.extended_hash_key, sizeof (key.extended_hash_key)); 2316 2317 ASSERT3U(vsi_number, !=, 0); 2318 status = i40e_aq_set_rss_key(hw, vsi_number, &key); 2319 2320 if (status != I40E_SUCCESS) { 2321 i40e_error(i40e, "failed to set RSS key for VSI %u: %d", 2322 vsi_number, status); 2323 return (B_FALSE); 2324 } 2325 } 2326 2327 return (B_TRUE); 2328 } 2329 2330 /* 2331 * Configure the RSS key. For the X710 controller family, this is set on a 2332 * per-PF basis via registers. For the X722, this is done on a per-VSI basis 2333 * through the admin queue. 2334 */ 2335 static boolean_t 2336 i40e_config_rss_key(i40e_t *i40e, i40e_hw_t *hw) 2337 { 2338 if (i40e_is_x722(i40e)) { 2339 if (!i40e_config_rss_key_x722(i40e, hw)) 2340 return (B_FALSE); 2341 } else { 2342 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1]; 2343 2344 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed)); 2345 for (uint_t i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 2346 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed[i]); 2347 } 2348 2349 return (B_TRUE); 2350 } 2351 2352 /* 2353 * Populate the LUT. The size of each entry in the LUT depends on the controller 2354 * family, with the X722 using a known 7-bit width. On the X710 controller, this 2355 * is programmed through its control registers where as on the X722 this is 2356 * configured through the admin queue. Also of note, the X722 allows the LUT to 2357 * be set on a per-PF or VSI basis. At this time we use the PF setting. If we 2358 * decide to use the per-VSI LUT in the future, then we will need to modify the 2359 * i40e_add_vsi() function to set the RSS LUT bits in the queueing section. 2360 * 2361 * We populate the LUT in a round robin fashion with the rx queue indices from 0 2362 * to i40e_num_trqpairs_per_vsi - 1. 2363 */ 2364 static boolean_t 2365 i40e_config_rss_hlut(i40e_t *i40e, i40e_hw_t *hw) 2366 { 2367 uint32_t *hlut; 2368 uint8_t lut_mask; 2369 uint_t i; 2370 boolean_t ret = B_FALSE; 2371 2372 /* 2373 * We always configure the PF with a table size of 512 bytes in 2374 * i40e_chip_start(). 2375 */ 2376 hlut = kmem_alloc(I40E_HLUT_TABLE_SIZE, KM_NOSLEEP); 2377 if (hlut == NULL) { 2378 i40e_error(i40e, "i40e_config_rss() buffer allocation failed"); 2379 return (B_FALSE); 2380 } 2381 2382 /* 2383 * The width of the X722 is apparently defined to be 7 bits, regardless 2384 * of the capability. 2385 */ 2386 if (i40e_is_x722(i40e)) { 2387 lut_mask = (1 << 7) - 1; 2388 } else { 2389 lut_mask = (1 << hw->func_caps.rss_table_entry_width) - 1; 2390 } 2391 2392 for (i = 0; i < I40E_HLUT_TABLE_SIZE; i++) { 2393 ((uint8_t *)hlut)[i] = 2394 (i % i40e->i40e_num_trqpairs_per_vsi) & lut_mask; 2395 } 2396 2397 if (i40e_is_x722(i40e)) { 2398 enum i40e_status_code status; 2399 2400 status = i40e_aq_set_rss_lut(hw, 0, B_TRUE, (uint8_t *)hlut, 2401 I40E_HLUT_TABLE_SIZE); 2402 2403 if (status != I40E_SUCCESS) { 2404 i40e_error(i40e, "failed to set RSS LUT %d: %d", 2405 status, hw->aq.asq_last_status); 2406 goto out; 2407 } 2408 } else { 2409 for (i = 0; i < I40E_HLUT_TABLE_SIZE >> 2; i++) { 2410 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), hlut[i]); 2411 } 2412 } 2413 ret = B_TRUE; 2414 out: 2415 kmem_free(hlut, I40E_HLUT_TABLE_SIZE); 2416 return (ret); 2417 } 2418 2419 /* 2420 * Set up RSS. 2421 * 1. Seed the hash key. 2422 * 2. Enable PCTYPEs for the hash filter. 2423 * 3. Populate the LUT. 2424 */ 2425 static boolean_t 2426 i40e_config_rss(i40e_t *i40e, i40e_hw_t *hw) 2427 { 2428 uint64_t hena; 2429 2430 /* 2431 * 1. Seed the hash key 2432 */ 2433 if (!i40e_config_rss_key(i40e, hw)) 2434 return (B_FALSE); 2435 2436 /* 2437 * 2. Configure PCTYPES 2438 */ 2439 hena = (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | 2440 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | 2441 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | 2442 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | 2443 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | 2444 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | 2445 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | 2446 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | 2447 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | 2448 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | 2449 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); 2450 2451 /* 2452 * Add additional types supported by the X722 controller. 2453 */ 2454 if (i40e_is_x722(i40e)) { 2455 hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 2456 (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | 2457 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | 2458 (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 2459 (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | 2460 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); 2461 } 2462 2463 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); 2464 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); 2465 2466 /* 2467 * 3. Populate LUT 2468 */ 2469 return (i40e_config_rss_hlut(i40e, hw)); 2470 } 2471 2472 /* 2473 * Wrapper to kick the chipset on. 2474 */ 2475 static boolean_t 2476 i40e_chip_start(i40e_t *i40e) 2477 { 2478 i40e_hw_t *hw = &i40e->i40e_hw_space; 2479 struct i40e_filter_control_settings filter; 2480 int rc; 2481 uint8_t err; 2482 2483 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 2484 (hw->aq.fw_maj_ver < 4)) { 2485 i40e_msec_delay(75); 2486 if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) != 2487 I40E_SUCCESS) { 2488 i40e_error(i40e, "failed to restart link: admin queue " 2489 "error: %d", hw->aq.asq_last_status); 2490 return (B_FALSE); 2491 } 2492 } 2493 2494 /* Determine hardware state */ 2495 i40e_get_hw_state(i40e, hw); 2496 2497 /* For now, we always disable Ethernet Flow Control. */ 2498 hw->fc.requested_mode = I40E_FC_NONE; 2499 rc = i40e_set_fc(hw, &err, B_TRUE); 2500 if (rc != I40E_SUCCESS) { 2501 i40e_error(i40e, "Setting flow control failed, returned %d" 2502 " with error: 0x%x", rc, err); 2503 return (B_FALSE); 2504 } 2505 2506 /* Initialize mac addresses. */ 2507 i40e_init_macaddrs(i40e, hw); 2508 2509 /* 2510 * Set up the filter control. If the hash lut size is changed from 2511 * I40E_HASH_LUT_SIZE_512 then I40E_HLUT_TABLE_SIZE and 2512 * i40e_config_rss_hlut() will need to be updated. 2513 */ 2514 bzero(&filter, sizeof (filter)); 2515 filter.enable_ethtype = TRUE; 2516 filter.enable_macvlan = TRUE; 2517 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 2518 2519 rc = i40e_set_filter_control(hw, &filter); 2520 if (rc != I40E_SUCCESS) { 2521 i40e_error(i40e, "i40e_set_filter_control() returned %d", rc); 2522 return (B_FALSE); 2523 } 2524 2525 i40e_intr_chip_init(i40e); 2526 2527 rc = i40e_get_mac_seid(i40e); 2528 if (rc == -1) { 2529 i40e_error(i40e, "failed to obtain MAC Uplink SEID"); 2530 return (B_FALSE); 2531 } 2532 i40e->i40e_mac_seid = (uint16_t)rc; 2533 2534 /* 2535 * Create a VEB in order to support multiple VSIs. Each VSI 2536 * functions as a MAC group. This call sets the PF's MAC as 2537 * the uplink port and the PF's default VSI as the default 2538 * downlink port. 2539 */ 2540 rc = i40e_aq_add_veb(hw, i40e->i40e_mac_seid, I40E_DEF_VSI_SEID(i40e), 2541 0x1, B_TRUE, &i40e->i40e_veb_seid, B_FALSE, NULL); 2542 if (rc != I40E_SUCCESS) { 2543 i40e_error(i40e, "i40e_aq_add_veb() failed %d: %d", rc, 2544 hw->aq.asq_last_status); 2545 return (B_FALSE); 2546 } 2547 2548 if (!i40e_config_def_vsi(i40e, hw)) 2549 return (B_FALSE); 2550 2551 for (uint_t i = 1; i < i40e->i40e_num_rx_groups; i++) { 2552 if (!i40e_add_vsi(i40e, hw, i)) 2553 return (B_FALSE); 2554 } 2555 2556 if (!i40e_config_rss(i40e, hw)) 2557 return (B_FALSE); 2558 2559 i40e_flush(hw); 2560 2561 return (B_TRUE); 2562 } 2563 2564 /* 2565 * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information. 2566 */ 2567 static void 2568 i40e_shutdown_rx_rings(i40e_t *i40e) 2569 { 2570 int i; 2571 uint32_t reg; 2572 2573 i40e_hw_t *hw = &i40e->i40e_hw_space; 2574 2575 /* 2576 * Step 1. The interrupt linked list (see i40e_intr.c for more 2577 * information) should have already been cleared before calling this 2578 * function. 2579 */ 2580 #ifdef DEBUG 2581 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) { 2582 for (i = 1; i < i40e->i40e_intr_count; i++) { 2583 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1)); 2584 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2585 } 2586 } else { 2587 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0); 2588 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2589 } 2590 2591 #endif /* DEBUG */ 2592 2593 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2594 /* 2595 * Step 1. Request the queue by clearing QENA_REQ. It may not be 2596 * set due to unwinding from failures and a partially enabled 2597 * ring set. 2598 */ 2599 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2600 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK)) 2601 continue; 2602 VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) == 2603 I40E_QRX_ENA_QENA_REQ_MASK); 2604 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 2605 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg); 2606 } 2607 2608 /* 2609 * Step 2. Wait for the disable to take, by having QENA_STAT in the FPM 2610 * be cleared. Note that we could still receive data in the queue during 2611 * this time. We don't actually wait for this now and instead defer this 2612 * to i40e_shutdown_rings_wait(), after we've interleaved disabling the 2613 * TX queues as well. 2614 */ 2615 } 2616 2617 static void 2618 i40e_shutdown_tx_rings(i40e_t *i40e) 2619 { 2620 int i; 2621 uint32_t reg; 2622 2623 i40e_hw_t *hw = &i40e->i40e_hw_space; 2624 2625 /* 2626 * Step 1. The interrupt linked list should already have been cleared. 2627 */ 2628 #ifdef DEBUG 2629 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) { 2630 for (i = 1; i < i40e->i40e_intr_count; i++) { 2631 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1)); 2632 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2633 } 2634 } else { 2635 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0); 2636 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2637 2638 } 2639 #endif /* DEBUG */ 2640 2641 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2642 /* 2643 * Step 2. Set the SET_QDIS flag for every queue. 2644 */ 2645 i40e_pre_tx_queue_cfg(hw, i, B_FALSE); 2646 } 2647 2648 /* 2649 * Step 3. Wait at least 400 usec (can be done once for all queues). 2650 */ 2651 drv_usecwait(500); 2652 2653 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2654 /* 2655 * Step 4. Clear the QENA_REQ flag which tells hardware to 2656 * quiesce. If QENA_REQ is not already set then that means that 2657 * we likely already tried to disable this queue. 2658 */ 2659 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2660 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK)) 2661 continue; 2662 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 2663 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg); 2664 } 2665 2666 /* 2667 * Step 5. Wait for all drains to finish. This will be done by the 2668 * hardware removing the QENA_STAT flag from the queue. Rather than 2669 * waiting here, we interleave it with all the others in 2670 * i40e_shutdown_rings_wait(). 2671 */ 2672 } 2673 2674 /* 2675 * Wait for all the rings to be shut down. e.g. Steps 2 and 5 from the above 2676 * functions. 2677 */ 2678 static boolean_t 2679 i40e_shutdown_rings_wait(i40e_t *i40e) 2680 { 2681 int i, try; 2682 i40e_hw_t *hw = &i40e->i40e_hw_space; 2683 2684 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2685 uint32_t reg; 2686 2687 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) { 2688 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2689 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) 2690 break; 2691 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2692 } 2693 2694 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) { 2695 i40e_error(i40e, "timed out disabling rx queue %d", 2696 i); 2697 return (B_FALSE); 2698 } 2699 2700 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) { 2701 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2702 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2703 break; 2704 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2705 } 2706 2707 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) { 2708 i40e_error(i40e, "timed out disabling tx queue %d", 2709 i); 2710 return (B_FALSE); 2711 } 2712 } 2713 2714 return (B_TRUE); 2715 } 2716 2717 static boolean_t 2718 i40e_shutdown_rings(i40e_t *i40e) 2719 { 2720 i40e_shutdown_rx_rings(i40e); 2721 i40e_shutdown_tx_rings(i40e); 2722 return (i40e_shutdown_rings_wait(i40e)); 2723 } 2724 2725 static void 2726 i40e_setup_rx_descs(i40e_trqpair_t *itrq) 2727 { 2728 int i; 2729 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2730 2731 for (i = 0; i < rxd->rxd_ring_size; i++) { 2732 i40e_rx_control_block_t *rcb; 2733 i40e_rx_desc_t *rdesc; 2734 2735 rcb = rxd->rxd_work_list[i]; 2736 rdesc = &rxd->rxd_desc_ring[i]; 2737 2738 rdesc->read.pkt_addr = 2739 CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address); 2740 rdesc->read.hdr_addr = 0; 2741 } 2742 } 2743 2744 static boolean_t 2745 i40e_setup_rx_hmc(i40e_trqpair_t *itrq) 2746 { 2747 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2748 i40e_t *i40e = itrq->itrq_i40e; 2749 i40e_hw_t *hw = &i40e->i40e_hw_space; 2750 2751 struct i40e_hmc_obj_rxq rctx; 2752 int err; 2753 2754 bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq)); 2755 rctx.base = rxd->rxd_desc_area.dmab_dma_address / 2756 I40E_HMC_RX_CTX_UNIT; 2757 rctx.qlen = rxd->rxd_ring_size; 2758 VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN); 2759 VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX); 2760 rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 2761 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; 2762 rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT; 2763 rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE; 2764 rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE; 2765 rctx.fc_ena = I40E_HMC_RX_FC_DISABLE; 2766 rctx.l2tsel = I40E_HMC_RX_L2TAGORDER; 2767 rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE; 2768 rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE; 2769 rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP; 2770 rctx.rxmax = i40e->i40e_frame_max; 2771 rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE; 2772 rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE; 2773 rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE; 2774 rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE; 2775 rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR; 2776 2777 /* 2778 * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2. 2779 */ 2780 rctx.prefena = I40E_HMC_RX_PREFENA; 2781 2782 err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index); 2783 if (err != I40E_SUCCESS) { 2784 i40e_error(i40e, "failed to clear rx queue %d context: %d", 2785 itrq->itrq_index, err); 2786 return (B_FALSE); 2787 } 2788 2789 err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx); 2790 if (err != I40E_SUCCESS) { 2791 i40e_error(i40e, "failed to set rx queue %d context: %d", 2792 itrq->itrq_index, err); 2793 return (B_FALSE); 2794 } 2795 2796 return (B_TRUE); 2797 } 2798 2799 /* 2800 * Take care of setting up the descriptor rings and actually programming the 2801 * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the 2802 * rx rings. 2803 */ 2804 static boolean_t 2805 i40e_setup_rx_rings(i40e_t *i40e) 2806 { 2807 int i; 2808 i40e_hw_t *hw = &i40e->i40e_hw_space; 2809 2810 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2811 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 2812 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2813 uint32_t reg; 2814 2815 /* 2816 * Step 1. Program all receive ring descriptors. 2817 */ 2818 i40e_setup_rx_descs(itrq); 2819 2820 /* 2821 * Step 2. Program the queue's FPM/HMC context. 2822 */ 2823 if (i40e_setup_rx_hmc(itrq) == B_FALSE) 2824 return (B_FALSE); 2825 2826 /* 2827 * Step 3. Clear the queue's tail pointer and set it to the end 2828 * of the space. 2829 */ 2830 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), 0); 2831 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), rxd->rxd_ring_size - 1); 2832 2833 /* 2834 * Step 4. Enable the queue via the QENA_REQ. 2835 */ 2836 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2837 VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK | 2838 I40E_QRX_ENA_QENA_STAT_MASK)); 2839 reg |= I40E_QRX_ENA_QENA_REQ_MASK; 2840 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg); 2841 } 2842 2843 /* 2844 * Note, we wait for every queue to be enabled before we start checking. 2845 * This will hopefully cause most queues to be enabled at this point. 2846 */ 2847 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2848 uint32_t j, reg; 2849 2850 /* 2851 * Step 5. Verify that QENA_STAT has been set. It's promised 2852 * that this should occur within about 10 us, but like other 2853 * systems, we give the card a bit more time. 2854 */ 2855 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) { 2856 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2857 2858 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 2859 break; 2860 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2861 } 2862 2863 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 2864 i40e_error(i40e, "failed to enable rx queue %d, timed " 2865 "out.", i); 2866 return (B_FALSE); 2867 } 2868 } 2869 2870 return (B_TRUE); 2871 } 2872 2873 static boolean_t 2874 i40e_setup_tx_hmc(i40e_trqpair_t *itrq) 2875 { 2876 i40e_t *i40e = itrq->itrq_i40e; 2877 i40e_hw_t *hw = &i40e->i40e_hw_space; 2878 2879 struct i40e_hmc_obj_txq tctx; 2880 struct i40e_vsi_context context; 2881 int err; 2882 2883 bzero(&tctx, sizeof (struct i40e_hmc_obj_txq)); 2884 tctx.new_context = I40E_HMC_TX_NEW_CONTEXT; 2885 tctx.base = itrq->itrq_desc_area.dmab_dma_address / 2886 I40E_HMC_TX_CTX_UNIT; 2887 tctx.fc_ena = I40E_HMC_TX_FC_DISABLE; 2888 tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE; 2889 tctx.fd_ena = I40E_HMC_TX_FD_DISABLE; 2890 tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE; 2891 tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE; 2892 tctx.qlen = itrq->itrq_tx_ring_size; 2893 tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE; 2894 tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE; 2895 tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE; 2896 tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address + 2897 sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size; 2898 2899 /* 2900 * This field isn't actually documented, like crc, but it suggests that 2901 * it should be zeroed. We leave both of these here because of that for 2902 * now. We should check with Intel on why these are here even. 2903 */ 2904 tctx.crc = 0; 2905 tctx.rdylist_act = 0; 2906 2907 /* 2908 * We're supposed to assign the rdylist field with the value of the 2909 * traffic class index for the first device. We query the VSI parameters 2910 * again to get what the handle is. Note that every queue is always 2911 * assigned to traffic class zero, because we don't actually use them. 2912 */ 2913 bzero(&context, sizeof (struct i40e_vsi_context)); 2914 context.seid = I40E_DEF_VSI_SEID(i40e); 2915 context.pf_num = hw->pf_id; 2916 err = i40e_aq_get_vsi_params(hw, &context, NULL); 2917 if (err != I40E_SUCCESS) { 2918 i40e_error(i40e, "get VSI params failed with %d", err); 2919 return (B_FALSE); 2920 } 2921 tctx.rdylist = LE_16(context.info.qs_handle[0]); 2922 2923 err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index); 2924 if (err != I40E_SUCCESS) { 2925 i40e_error(i40e, "failed to clear tx queue %d context: %d", 2926 itrq->itrq_index, err); 2927 return (B_FALSE); 2928 } 2929 2930 err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx); 2931 if (err != I40E_SUCCESS) { 2932 i40e_error(i40e, "failed to set tx queue %d context: %d", 2933 itrq->itrq_index, err); 2934 return (B_FALSE); 2935 } 2936 2937 return (B_TRUE); 2938 } 2939 2940 /* 2941 * Take care of setting up the descriptor rings and actually programming the 2942 * device. See 8.4.3.1.1 for what we need to do here. 2943 */ 2944 static boolean_t 2945 i40e_setup_tx_rings(i40e_t *i40e) 2946 { 2947 int i; 2948 i40e_hw_t *hw = &i40e->i40e_hw_space; 2949 2950 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2951 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 2952 uint32_t reg; 2953 2954 /* 2955 * Step 1. Clear the queue disable flag and verify that the 2956 * index is set correctly. 2957 */ 2958 i40e_pre_tx_queue_cfg(hw, i, B_TRUE); 2959 2960 /* 2961 * Step 2. Prepare the queue's FPM/HMC context. 2962 */ 2963 if (i40e_setup_tx_hmc(itrq) == B_FALSE) 2964 return (B_FALSE); 2965 2966 /* 2967 * Step 3. Verify that it's clear that this PF owns this queue. 2968 */ 2969 reg = I40E_QTX_CTL_PF_QUEUE; 2970 reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2971 I40E_QTX_CTL_PF_INDX_MASK; 2972 I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg); 2973 i40e_flush(hw); 2974 2975 /* 2976 * Step 4. Set the QENA_REQ flag. 2977 */ 2978 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2979 VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK | 2980 I40E_QTX_ENA_QENA_STAT_MASK)); 2981 reg |= I40E_QTX_ENA_QENA_REQ_MASK; 2982 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg); 2983 } 2984 2985 /* 2986 * Note, we wait for every queue to be enabled before we start checking. 2987 * This will hopefully cause most queues to be enabled at this point. 2988 */ 2989 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2990 uint32_t j, reg; 2991 2992 /* 2993 * Step 5. Verify that QENA_STAT has been set. It's promised 2994 * that this should occur within about 10 us, but like BSD, 2995 * we'll try for up to 100 ms for this queue. 2996 */ 2997 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) { 2998 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2999 3000 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 3001 break; 3002 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 3003 } 3004 3005 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 3006 i40e_error(i40e, "failed to enable tx queue %d, timed " 3007 "out", i); 3008 return (B_FALSE); 3009 } 3010 } 3011 3012 return (B_TRUE); 3013 } 3014 3015 void 3016 i40e_stop(i40e_t *i40e, boolean_t free_allocations) 3017 { 3018 uint_t i; 3019 i40e_hw_t *hw = &i40e->i40e_hw_space; 3020 3021 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 3022 3023 /* 3024 * Shutdown and drain the tx and rx pipeline. We do this using the 3025 * following steps. 3026 * 3027 * 1) Shutdown interrupts to all the queues (trying to keep the admin 3028 * queue alive). 3029 * 3030 * 2) Remove all of the interrupt tx and rx causes by setting the 3031 * interrupt linked lists to zero. 3032 * 3033 * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should 3034 * wait for all the queues to be disabled, once we reach that point 3035 * it should be safe to free associated data. 3036 * 3037 * 4) Wait 50ms after all that is done. This ensures that the rings are 3038 * ready for programming again and we don't have to think about this 3039 * in other parts of the driver. 3040 * 3041 * 5) Disable remaining chip interrupts, (admin queue, etc.) 3042 * 3043 * 6) Verify that FM is happy with all the register accesses we 3044 * performed. 3045 */ 3046 i40e_intr_io_disable_all(i40e); 3047 i40e_intr_io_clear_cause(i40e); 3048 3049 if (i40e_shutdown_rings(i40e) == B_FALSE) { 3050 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 3051 } 3052 3053 delay(50 * drv_usectohz(1000)); 3054 3055 /* 3056 * We don't delete the default VSI because it replaces the VEB 3057 * after VEB deletion (see the "Delete Element" section). 3058 * Furthermore, since the default VSI is provided by the 3059 * firmware, we never attempt to delete it. 3060 */ 3061 for (i = 1; i < i40e->i40e_num_rx_groups; i++) { 3062 i40e_delete_vsi(i40e, i); 3063 } 3064 3065 if (i40e->i40e_veb_seid != 0) { 3066 int rc = i40e_aq_delete_element(hw, i40e->i40e_veb_seid, NULL); 3067 3068 if (rc != I40E_SUCCESS) { 3069 i40e_error(i40e, "Failed to delete VEB %d: %d", rc, 3070 hw->aq.asq_last_status); 3071 } 3072 3073 i40e->i40e_veb_seid = 0; 3074 } 3075 3076 i40e_intr_chip_fini(i40e); 3077 3078 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3079 mutex_enter(&i40e->i40e_trqpairs[i].itrq_rx_lock); 3080 mutex_enter(&i40e->i40e_trqpairs[i].itrq_tx_lock); 3081 } 3082 3083 /* 3084 * We should consider refactoring this to be part of the ring start / 3085 * stop routines at some point. 3086 */ 3087 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3088 i40e_stats_trqpair_fini(&i40e->i40e_trqpairs[i]); 3089 } 3090 3091 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) != 3092 DDI_FM_OK) { 3093 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 3094 } 3095 3096 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3097 i40e_tx_cleanup_ring(&i40e->i40e_trqpairs[i]); 3098 } 3099 3100 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3101 mutex_exit(&i40e->i40e_trqpairs[i].itrq_rx_lock); 3102 mutex_exit(&i40e->i40e_trqpairs[i].itrq_tx_lock); 3103 } 3104 3105 for (i = 0; i < i40e->i40e_num_rx_groups; i++) { 3106 i40e_stat_vsi_fini(i40e, i); 3107 } 3108 3109 i40e->i40e_link_speed = 0; 3110 i40e->i40e_link_duplex = 0; 3111 i40e_link_state_set(i40e, LINK_STATE_UNKNOWN); 3112 3113 if (free_allocations) { 3114 i40e_free_ring_mem(i40e, B_FALSE); 3115 } 3116 } 3117 3118 boolean_t 3119 i40e_start(i40e_t *i40e, boolean_t alloc) 3120 { 3121 i40e_hw_t *hw = &i40e->i40e_hw_space; 3122 boolean_t rc = B_TRUE; 3123 int i, err; 3124 3125 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 3126 3127 if (alloc) { 3128 if (i40e_alloc_ring_mem(i40e) == B_FALSE) { 3129 i40e_error(i40e, 3130 "Failed to allocate ring memory"); 3131 return (B_FALSE); 3132 } 3133 } 3134 3135 /* 3136 * This should get refactored to be part of ring start and stop at 3137 * some point, along with most of the logic here. 3138 */ 3139 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3140 if (i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i]) == 3141 B_FALSE) { 3142 int j; 3143 3144 for (j = 0; j < i; j++) { 3145 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[j]; 3146 i40e_stats_trqpair_fini(itrq); 3147 } 3148 return (B_FALSE); 3149 } 3150 } 3151 3152 if (!i40e_chip_start(i40e)) { 3153 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE); 3154 rc = B_FALSE; 3155 goto done; 3156 } 3157 3158 if (i40e_setup_rx_rings(i40e) == B_FALSE) { 3159 rc = B_FALSE; 3160 goto done; 3161 } 3162 3163 if (i40e_setup_tx_rings(i40e) == B_FALSE) { 3164 rc = B_FALSE; 3165 goto done; 3166 } 3167 3168 /* 3169 * Enable broadcast traffic; however, do not enable multicast traffic. 3170 * That's handle exclusively through MAC's mc_multicst routines. 3171 */ 3172 err = i40e_aq_set_vsi_broadcast(hw, I40E_DEF_VSI_SEID(i40e), B_TRUE, 3173 NULL); 3174 if (err != I40E_SUCCESS) { 3175 i40e_error(i40e, "failed to set default VSI: %d", err); 3176 rc = B_FALSE; 3177 goto done; 3178 } 3179 3180 err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0, NULL); 3181 if (err != I40E_SUCCESS) { 3182 i40e_error(i40e, "failed to set MAC config: %d", err); 3183 rc = B_FALSE; 3184 goto done; 3185 } 3186 3187 /* 3188 * Finally, make sure that we're happy from an FM perspective. 3189 */ 3190 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) != 3191 DDI_FM_OK) { 3192 rc = B_FALSE; 3193 goto done; 3194 } 3195 3196 /* Clear state bits prior to final interrupt enabling. */ 3197 atomic_and_32(&i40e->i40e_state, 3198 ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP)); 3199 3200 i40e_intr_io_enable_all(i40e); 3201 3202 done: 3203 if (rc == B_FALSE) { 3204 i40e_stop(i40e, B_FALSE); 3205 if (alloc == B_TRUE) { 3206 i40e_free_ring_mem(i40e, B_TRUE); 3207 } 3208 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 3209 } 3210 3211 return (rc); 3212 } 3213 3214 /* 3215 * We may have loaned up descriptors to the stack. As such, if we still have 3216 * them outstanding, then we will not continue with detach. 3217 */ 3218 static boolean_t 3219 i40e_drain_rx(i40e_t *i40e) 3220 { 3221 mutex_enter(&i40e->i40e_rx_pending_lock); 3222 while (i40e->i40e_rx_pending > 0) { 3223 if (cv_reltimedwait(&i40e->i40e_rx_pending_cv, 3224 &i40e->i40e_rx_pending_lock, 3225 drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) { 3226 mutex_exit(&i40e->i40e_rx_pending_lock); 3227 return (B_FALSE); 3228 } 3229 } 3230 mutex_exit(&i40e->i40e_rx_pending_lock); 3231 3232 return (B_TRUE); 3233 } 3234 3235 /* 3236 * DDI UFM Callbacks 3237 */ 3238 static int 3239 i40e_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 3240 ddi_ufm_image_t *img) 3241 { 3242 if (imgno != 0) 3243 return (EINVAL); 3244 3245 ddi_ufm_image_set_desc(img, "Firmware"); 3246 ddi_ufm_image_set_nslots(img, 1); 3247 3248 return (0); 3249 } 3250 3251 static int 3252 i40e_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 3253 uint_t slotno, ddi_ufm_slot_t *slot) 3254 { 3255 i40e_t *i40e = (i40e_t *)arg; 3256 char *fw_ver = NULL, *fw_bld = NULL, *api_ver = NULL; 3257 nvlist_t *misc = NULL; 3258 uint_t flags = DDI_PROP_DONTPASS; 3259 int err; 3260 3261 if (imgno != 0 || slotno != 0 || 3262 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags, 3263 "firmware-version", &fw_ver) != DDI_PROP_SUCCESS || 3264 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags, 3265 "firmware-build", &fw_bld) != DDI_PROP_SUCCESS || 3266 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags, 3267 "api-version", &api_ver) != DDI_PROP_SUCCESS) { 3268 err = EINVAL; 3269 goto err; 3270 } 3271 3272 ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE); 3273 ddi_ufm_slot_set_version(slot, fw_ver); 3274 3275 (void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP); 3276 if ((err = nvlist_add_string(misc, "firmware-build", fw_bld)) != 0 || 3277 (err = nvlist_add_string(misc, "api-version", api_ver)) != 0) { 3278 goto err; 3279 } 3280 ddi_ufm_slot_set_misc(slot, misc); 3281 3282 ddi_prop_free(fw_ver); 3283 ddi_prop_free(fw_bld); 3284 ddi_prop_free(api_ver); 3285 3286 return (0); 3287 err: 3288 nvlist_free(misc); 3289 if (fw_ver != NULL) 3290 ddi_prop_free(fw_ver); 3291 if (fw_bld != NULL) 3292 ddi_prop_free(fw_bld); 3293 if (api_ver != NULL) 3294 ddi_prop_free(api_ver); 3295 3296 return (err); 3297 } 3298 3299 static int 3300 i40e_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 3301 { 3302 *caps = DDI_UFM_CAP_REPORT; 3303 3304 return (0); 3305 } 3306 3307 static ddi_ufm_ops_t i40e_ufm_ops = { 3308 NULL, 3309 i40e_ufm_fill_image, 3310 i40e_ufm_fill_slot, 3311 i40e_ufm_getcaps 3312 }; 3313 3314 static int 3315 i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3316 { 3317 i40e_t *i40e; 3318 struct i40e_osdep *osdep; 3319 i40e_hw_t *hw; 3320 int instance; 3321 3322 if (cmd != DDI_ATTACH) 3323 return (DDI_FAILURE); 3324 3325 instance = ddi_get_instance(devinfo); 3326 i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP); 3327 3328 i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP); 3329 i40e->i40e_instance = instance; 3330 i40e->i40e_dip = devinfo; 3331 3332 hw = &i40e->i40e_hw_space; 3333 osdep = &i40e->i40e_osdep_space; 3334 hw->back = osdep; 3335 osdep->ios_i40e = i40e; 3336 3337 ddi_set_driver_private(devinfo, i40e); 3338 3339 i40e_fm_init(i40e); 3340 i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT; 3341 3342 if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) { 3343 i40e_error(i40e, "Failed to map PCI configurations."); 3344 goto attach_fail; 3345 } 3346 i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG; 3347 3348 i40e_identify_hardware(i40e); 3349 3350 if (!i40e_regs_map(i40e)) { 3351 i40e_error(i40e, "Failed to map device registers."); 3352 goto attach_fail; 3353 } 3354 i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP; 3355 3356 i40e_init_properties(i40e); 3357 i40e->i40e_attach_progress |= I40E_ATTACH_PROPS; 3358 3359 if (!i40e_common_code_init(i40e, hw)) 3360 goto attach_fail; 3361 i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE; 3362 3363 /* 3364 * When we participate in IRM, we should make sure that we register 3365 * ourselves with it before callbacks. 3366 */ 3367 if (!i40e_alloc_intrs(i40e, devinfo)) { 3368 i40e_error(i40e, "Failed to allocate interrupts."); 3369 goto attach_fail; 3370 } 3371 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR; 3372 3373 if (!i40e_alloc_trqpairs(i40e)) { 3374 i40e_error(i40e, 3375 "Failed to allocate receive & transmit rings."); 3376 goto attach_fail; 3377 } 3378 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS; 3379 3380 if (!i40e_map_intrs_to_vectors(i40e)) { 3381 i40e_error(i40e, "Failed to map interrupts to vectors."); 3382 goto attach_fail; 3383 } 3384 3385 if (!i40e_add_intr_handlers(i40e)) { 3386 i40e_error(i40e, "Failed to add the interrupt handlers."); 3387 goto attach_fail; 3388 } 3389 i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR; 3390 3391 if (!i40e_final_init(i40e)) { 3392 i40e_error(i40e, "Final initialization failed."); 3393 goto attach_fail; 3394 } 3395 i40e->i40e_attach_progress |= I40E_ATTACH_INIT; 3396 3397 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) != 3398 DDI_FM_OK) { 3399 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 3400 goto attach_fail; 3401 } 3402 3403 if (!i40e_stats_init(i40e)) { 3404 i40e_error(i40e, "Stats initialization failed."); 3405 goto attach_fail; 3406 } 3407 i40e->i40e_attach_progress |= I40E_ATTACH_STATS; 3408 3409 if (!i40e_register_mac(i40e)) { 3410 i40e_error(i40e, "Failed to register to MAC/GLDv3"); 3411 goto attach_fail; 3412 } 3413 i40e->i40e_attach_progress |= I40E_ATTACH_MAC; 3414 3415 i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e, 3416 I40E_CYCLIC_PERIOD, DDI_IPL_0); 3417 if (i40e->i40e_periodic_id == 0) { 3418 i40e_error(i40e, "Failed to add the link-check timer"); 3419 goto attach_fail; 3420 } 3421 i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER; 3422 3423 if (!i40e_enable_interrupts(i40e)) { 3424 i40e_error(i40e, "Failed to enable DDI interrupts"); 3425 goto attach_fail; 3426 } 3427 i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR; 3428 3429 if (i40e->i40e_hw_space.bus.func == 0) { 3430 if (ddi_ufm_init(i40e->i40e_dip, DDI_UFM_CURRENT_VERSION, 3431 &i40e_ufm_ops, &i40e->i40e_ufmh, i40e) != 0) { 3432 i40e_error(i40e, "failed to initialize UFM subsystem"); 3433 goto attach_fail; 3434 } 3435 ddi_ufm_update(i40e->i40e_ufmh); 3436 i40e->i40e_attach_progress |= I40E_ATTACH_UFM_INIT; 3437 } 3438 3439 atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED); 3440 3441 mutex_enter(&i40e_glock); 3442 list_insert_tail(&i40e_glist, i40e); 3443 mutex_exit(&i40e_glock); 3444 3445 return (DDI_SUCCESS); 3446 3447 attach_fail: 3448 i40e_unconfigure(devinfo, i40e); 3449 return (DDI_FAILURE); 3450 } 3451 3452 static int 3453 i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3454 { 3455 i40e_t *i40e; 3456 3457 if (cmd != DDI_DETACH) 3458 return (DDI_FAILURE); 3459 3460 i40e = (i40e_t *)ddi_get_driver_private(devinfo); 3461 if (i40e == NULL) { 3462 i40e_log(NULL, "i40e_detach() called with no i40e pointer!"); 3463 return (DDI_FAILURE); 3464 } 3465 3466 if (i40e_drain_rx(i40e) == B_FALSE) { 3467 i40e_log(i40e, "timed out draining DMA resources, %d buffers " 3468 "remain", i40e->i40e_rx_pending); 3469 return (DDI_FAILURE); 3470 } 3471 3472 mutex_enter(&i40e_glock); 3473 list_remove(&i40e_glist, i40e); 3474 mutex_exit(&i40e_glock); 3475 3476 i40e_unconfigure(devinfo, i40e); 3477 3478 return (DDI_SUCCESS); 3479 } 3480 3481 static struct cb_ops i40e_cb_ops = { 3482 nulldev, /* cb_open */ 3483 nulldev, /* cb_close */ 3484 nodev, /* cb_strategy */ 3485 nodev, /* cb_print */ 3486 nodev, /* cb_dump */ 3487 nodev, /* cb_read */ 3488 nodev, /* cb_write */ 3489 nodev, /* cb_ioctl */ 3490 nodev, /* cb_devmap */ 3491 nodev, /* cb_mmap */ 3492 nodev, /* cb_segmap */ 3493 nochpoll, /* cb_chpoll */ 3494 ddi_prop_op, /* cb_prop_op */ 3495 NULL, /* cb_stream */ 3496 D_MP | D_HOTPLUG, /* cb_flag */ 3497 CB_REV, /* cb_rev */ 3498 nodev, /* cb_aread */ 3499 nodev /* cb_awrite */ 3500 }; 3501 3502 static struct dev_ops i40e_dev_ops = { 3503 DEVO_REV, /* devo_rev */ 3504 0, /* devo_refcnt */ 3505 NULL, /* devo_getinfo */ 3506 nulldev, /* devo_identify */ 3507 nulldev, /* devo_probe */ 3508 i40e_attach, /* devo_attach */ 3509 i40e_detach, /* devo_detach */ 3510 nodev, /* devo_reset */ 3511 &i40e_cb_ops, /* devo_cb_ops */ 3512 NULL, /* devo_bus_ops */ 3513 ddi_power, /* devo_power */ 3514 ddi_quiesce_not_supported /* devo_quiesce */ 3515 }; 3516 3517 static struct modldrv i40e_modldrv = { 3518 &mod_driverops, 3519 i40e_ident, 3520 &i40e_dev_ops 3521 }; 3522 3523 static struct modlinkage i40e_modlinkage = { 3524 MODREV_1, 3525 &i40e_modldrv, 3526 NULL 3527 }; 3528 3529 /* 3530 * Module Initialization Functions. 3531 */ 3532 int 3533 _init(void) 3534 { 3535 int status; 3536 3537 list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink)); 3538 list_create(&i40e_dlist, sizeof (i40e_device_t), 3539 offsetof(i40e_device_t, id_link)); 3540 mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL); 3541 mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME); 3542 3543 status = mod_install(&i40e_modlinkage); 3544 if (status != DDI_SUCCESS) { 3545 mac_fini_ops(&i40e_dev_ops); 3546 mutex_destroy(&i40e_glock); 3547 list_destroy(&i40e_dlist); 3548 list_destroy(&i40e_glist); 3549 } 3550 3551 return (status); 3552 } 3553 3554 int 3555 _info(struct modinfo *modinfop) 3556 { 3557 return (mod_info(&i40e_modlinkage, modinfop)); 3558 } 3559 3560 int 3561 _fini(void) 3562 { 3563 int status; 3564 3565 status = mod_remove(&i40e_modlinkage); 3566 if (status == DDI_SUCCESS) { 3567 mac_fini_ops(&i40e_dev_ops); 3568 mutex_destroy(&i40e_glock); 3569 list_destroy(&i40e_dlist); 3570 list_destroy(&i40e_glist); 3571 } 3572 3573 return (status); 3574 } 3575