1678453a8Sspeer /* 2678453a8Sspeer * CDDL HEADER START 3678453a8Sspeer * 4678453a8Sspeer * The contents of this file are subject to the terms of the 5678453a8Sspeer * Common Development and Distribution License (the "License"). 6678453a8Sspeer * You may not use this file except in compliance with the License. 7678453a8Sspeer * 8678453a8Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9678453a8Sspeer * or http://www.opensolaris.org/os/licensing. 10678453a8Sspeer * See the License for the specific language governing permissions 11678453a8Sspeer * and limitations under the License. 12678453a8Sspeer * 13678453a8Sspeer * When distributing Covered Code, include this CDDL HEADER in each 14678453a8Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15678453a8Sspeer * If applicable, add the following below this CDDL HEADER, with the 16678453a8Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 17678453a8Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 18678453a8Sspeer * 19678453a8Sspeer * CDDL HEADER END 20678453a8Sspeer */ 21678453a8Sspeer 22678453a8Sspeer /* 23*0dc2366fSVenugopal Iyer * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24678453a8Sspeer * Use is subject to license terms. 25678453a8Sspeer */ 26678453a8Sspeer 27678453a8Sspeer /* 28678453a8Sspeer * nxge_hio_guest.c 29678453a8Sspeer * 30678453a8Sspeer * This file manages the virtualization resources for a guest domain. 31678453a8Sspeer * 32678453a8Sspeer */ 33678453a8Sspeer 34678453a8Sspeer #include <sys/nxge/nxge_impl.h> 35678453a8Sspeer #include <sys/nxge/nxge_fzc.h> 36678453a8Sspeer #include <sys/nxge/nxge_rxdma.h> 37678453a8Sspeer #include <sys/nxge/nxge_txdma.h> 38678453a8Sspeer #include <sys/nxge/nxge_hio.h> 39678453a8Sspeer 40678453a8Sspeer /* 41678453a8Sspeer * nxge_guest_regs_map 42678453a8Sspeer * 43678453a8Sspeer * Map in a guest domain's register set(s). 44678453a8Sspeer * 45678453a8Sspeer * Arguments: 46678453a8Sspeer * nxge 47678453a8Sspeer * 48678453a8Sspeer * Notes: 49678453a8Sspeer * Note that we set <is_vraddr> to TRUE. 50678453a8Sspeer * 51678453a8Sspeer * Context: 52678453a8Sspeer * Guest domain 53678453a8Sspeer */ 54678453a8Sspeer static ddi_device_acc_attr_t nxge_guest_register_access_attributes = { 55678453a8Sspeer DDI_DEVICE_ATTR_V0, 56678453a8Sspeer DDI_STRUCTURE_LE_ACC, 57678453a8Sspeer DDI_STRICTORDER_ACC, 58678453a8Sspeer }; 59678453a8Sspeer 60678453a8Sspeer int 6163f531d1SSriharsha Basavapatna nxge_guest_regs_map(nxge_t *nxge) 62678453a8Sspeer { 63678453a8Sspeer dev_regs_t *regs; 64678453a8Sspeer off_t regsize; 65678453a8Sspeer int rv; 66678453a8Sspeer 67678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map")); 68678453a8Sspeer 69678453a8Sspeer /* So we can allocate properly-aligned memory. */ 70678453a8Sspeer nxge->niu_type = N2_NIU; /* Version 1.0 only */ 71678453a8Sspeer nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */ 72678453a8Sspeer 73678453a8Sspeer nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 74678453a8Sspeer regs = nxge->dev_regs; 75678453a8Sspeer 76678453a8Sspeer if ((rv = ddi_dev_regsize(nxge->dip, 0, ®size)) != DDI_SUCCESS) { 77678453a8Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed")); 78678453a8Sspeer return (NXGE_ERROR); 79678453a8Sspeer } 80678453a8Sspeer 81678453a8Sspeer rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)®s->nxge_regp, 0, 0, 82678453a8Sspeer &nxge_guest_register_access_attributes, ®s->nxge_regh); 83678453a8Sspeer 84678453a8Sspeer if (rv != DDI_SUCCESS) { 85678453a8Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed")); 86678453a8Sspeer return (NXGE_ERROR); 87678453a8Sspeer } 88678453a8Sspeer 89678453a8Sspeer nxge->npi_handle.regh = regs->nxge_regh; 90678453a8Sspeer nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 91678453a8Sspeer nxge->npi_handle.is_vraddr = B_TRUE; 92678453a8Sspeer nxge->npi_handle.function.instance = nxge->instance; 93678453a8Sspeer nxge->npi_handle.function.function = nxge->function_num; 94678453a8Sspeer nxge->npi_handle.nxgep = (void *)nxge; 95678453a8Sspeer 96678453a8Sspeer /* NPI_REG_ADD_HANDLE_SET() */ 97678453a8Sspeer nxge->npi_reg_handle.regh = regs->nxge_regh; 98678453a8Sspeer nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 99678453a8Sspeer nxge->npi_reg_handle.is_vraddr = B_TRUE; 100678453a8Sspeer nxge->npi_reg_handle.function.instance = nxge->instance; 101678453a8Sspeer nxge->npi_reg_handle.function.function = nxge->function_num; 102678453a8Sspeer nxge->npi_reg_handle.nxgep = (void *)nxge; 103678453a8Sspeer 104678453a8Sspeer /* NPI_VREG_ADD_HANDLE_SET() */ 105678453a8Sspeer nxge->npi_vreg_handle.regh = regs->nxge_regh; 106678453a8Sspeer nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 107678453a8Sspeer nxge->npi_vreg_handle.is_vraddr = B_TRUE; 108678453a8Sspeer nxge->npi_vreg_handle.function.instance = nxge->instance; 109678453a8Sspeer nxge->npi_vreg_handle.function.function = nxge->function_num; 110678453a8Sspeer nxge->npi_vreg_handle.nxgep = (void *)nxge; 111678453a8Sspeer 112678453a8Sspeer regs->nxge_vir_regp = regs->nxge_regp; 113678453a8Sspeer regs->nxge_vir_regh = regs->nxge_regh; 114678453a8Sspeer 115678453a8Sspeer /* 116678453a8Sspeer * We do NOT set the PCI, MSI-X, 2nd Virtualization, 117678453a8Sspeer * or FCODE reg variables. 118678453a8Sspeer */ 119678453a8Sspeer 120678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map")); 121678453a8Sspeer 122678453a8Sspeer return (NXGE_OK); 123678453a8Sspeer } 124678453a8Sspeer 125678453a8Sspeer void 126678453a8Sspeer nxge_guest_regs_map_free( 127678453a8Sspeer nxge_t *nxge) 128678453a8Sspeer { 129678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free")); 130678453a8Sspeer 131678453a8Sspeer if (nxge->dev_regs) { 132678453a8Sspeer if (nxge->dev_regs->nxge_regh) { 133678453a8Sspeer NXGE_DEBUG_MSG((nxge, DDI_CTL, 134678453a8Sspeer "==> nxge_unmap_regs: device registers")); 135678453a8Sspeer ddi_regs_map_free(&nxge->dev_regs->nxge_regh); 136678453a8Sspeer nxge->dev_regs->nxge_regh = NULL; 137678453a8Sspeer } 138678453a8Sspeer kmem_free(nxge->dev_regs, sizeof (dev_regs_t)); 139678453a8Sspeer nxge->dev_regs = 0; 140678453a8Sspeer } 141678453a8Sspeer 142678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free")); 143678453a8Sspeer } 144678453a8Sspeer 145678453a8Sspeer #if defined(sun4v) 146678453a8Sspeer 147678453a8Sspeer /* 148678453a8Sspeer * ------------------------------------------------------------- 149678453a8Sspeer * Local prototypes 150678453a8Sspeer * ------------------------------------------------------------- 151678453a8Sspeer */ 152678453a8Sspeer static nxge_hio_dc_t *nxge_guest_dc_alloc( 153678453a8Sspeer nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t); 154678453a8Sspeer 155678453a8Sspeer static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t); 156678453a8Sspeer static void nxge_check_guest_state(nxge_hio_vr_t *); 157678453a8Sspeer 158678453a8Sspeer /* 159678453a8Sspeer * nxge_hio_vr_add 160678453a8Sspeer * 161678453a8Sspeer * If we have been given a virtualization region (VR), 162678453a8Sspeer * then initialize it. 163678453a8Sspeer * 164678453a8Sspeer * Arguments: 165678453a8Sspeer * nxge 166678453a8Sspeer * 167678453a8Sspeer * Notes: 168678453a8Sspeer * 169678453a8Sspeer * Context: 170678453a8Sspeer * Guest domain 171678453a8Sspeer */ 172678453a8Sspeer int 173330cd344SMichael Speer nxge_hio_vr_add(nxge_t *nxge) 174678453a8Sspeer { 17563f531d1SSriharsha Basavapatna extern nxge_status_t nxge_mac_register(p_nxge_t); 17663f531d1SSriharsha Basavapatna 17763f531d1SSriharsha Basavapatna nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 17863f531d1SSriharsha Basavapatna nxge_hio_vr_t *vr; 17963f531d1SSriharsha Basavapatna nxge_hio_dc_t *dc; 18063f531d1SSriharsha Basavapatna int *reg_val; 18163f531d1SSriharsha Basavapatna uint_t reg_len; 18263f531d1SSriharsha Basavapatna uint8_t vr_index; 18363f531d1SSriharsha Basavapatna nxhv_vr_fp_t *fp; 18463f531d1SSriharsha Basavapatna uint64_t vr_address, vr_size; 18563f531d1SSriharsha Basavapatna uint32_t cookie; 18663f531d1SSriharsha Basavapatna nxhv_dc_fp_t *tx, *rx; 18763f531d1SSriharsha Basavapatna uint64_t tx_map, rx_map; 18863f531d1SSriharsha Basavapatna uint64_t hv_rv; 18963f531d1SSriharsha Basavapatna int i; 19063f531d1SSriharsha Basavapatna nxge_status_t status; 191678453a8Sspeer 192678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add")); 193678453a8Sspeer 194ef523517SMichael Speer if (nhd->type == NXGE_HIO_TYPE_SERVICE) { 195ef523517SMichael Speer /* 196ef523517SMichael Speer * Can't add VR to the service domain from which we came. 197ef523517SMichael Speer */ 198ef523517SMichael Speer ASSERT(nhd->type == NXGE_HIO_TYPE_GUEST); 199ef523517SMichael Speer return (DDI_FAILURE); 200ef523517SMichael Speer } 201ef523517SMichael Speer 202678453a8Sspeer /* 203678453a8Sspeer * Get our HV cookie. 204678453a8Sspeer */ 205678453a8Sspeer if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip, 206678453a8Sspeer 0, "reg", ®_val, ®_len) != DDI_PROP_SUCCESS) { 207678453a8Sspeer NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found")); 208ef523517SMichael Speer return (DDI_FAILURE); 209678453a8Sspeer } 210678453a8Sspeer 211da14cebeSEric Cheng cookie = (uint32_t)(reg_val[0]); 212678453a8Sspeer ddi_prop_free(reg_val); 213678453a8Sspeer 214678453a8Sspeer fp = &nhd->hio.vr; 215678453a8Sspeer hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size); 216678453a8Sspeer if (hv_rv != 0) { 217678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 218678453a8Sspeer "vr->getinfo() failed")); 219ef523517SMichael Speer return (DDI_FAILURE); 220678453a8Sspeer } 221678453a8Sspeer 222678453a8Sspeer /* 223678453a8Sspeer * In the guest domain, we can use any VR data structure 224678453a8Sspeer * we want, because we're not supposed to know which VR 225678453a8Sspeer * the service domain has allocated to us. 226678453a8Sspeer * 227678453a8Sspeer * In the current version, the least significant nybble of 228678453a8Sspeer * the cookie is the VR region, but that could change 229678453a8Sspeer * very easily. 230678453a8Sspeer * 231678453a8Sspeer * In the future, a guest may have more than one VR allocated 232678453a8Sspeer * to it, which is why we go through this exercise. 233678453a8Sspeer */ 234678453a8Sspeer MUTEX_ENTER(&nhd->lock); 235678453a8Sspeer for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) { 236678453a8Sspeer if (nhd->vr[vr_index].nxge == 0) { 237678453a8Sspeer nhd->vr[vr_index].nxge = (uintptr_t)nxge; 238678453a8Sspeer break; 239678453a8Sspeer } 240678453a8Sspeer } 241678453a8Sspeer MUTEX_EXIT(&nhd->lock); 242678453a8Sspeer 243678453a8Sspeer if (vr_index == FUNC_VIR_MAX) { 244678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add " 245678453a8Sspeer "no VRs available")); 246330cd344SMichael Speer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 247330cd344SMichael Speer "nxge_hio_vr_add(%d): cookie(0x%x)\n", 248330cd344SMichael Speer nxge->instance, cookie)); 249ef523517SMichael Speer return (DDI_FAILURE); 250678453a8Sspeer } 251678453a8Sspeer 252678453a8Sspeer vr = &nhd->vr[vr_index]; 253678453a8Sspeer 254678453a8Sspeer vr->nxge = (uintptr_t)nxge; 255678453a8Sspeer vr->cookie = (uint32_t)cookie; 256678453a8Sspeer vr->address = vr_address; 257678453a8Sspeer vr->size = vr_size; 258678453a8Sspeer vr->region = vr_index; 259678453a8Sspeer 260678453a8Sspeer /* 261678453a8Sspeer * This is redundant data, but useful nonetheless. It helps 262678453a8Sspeer * us to keep track of which RDCs & TDCs belong to us. 263678453a8Sspeer */ 264678453a8Sspeer if (nxge->tx_set.lg.count == 0) 265678453a8Sspeer (void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP); 266678453a8Sspeer if (nxge->rx_set.lg.count == 0) 267678453a8Sspeer (void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP); 268678453a8Sspeer 269678453a8Sspeer /* 270678453a8Sspeer * See nxge_intr.c. 271678453a8Sspeer */ 272678453a8Sspeer if (nxge_hio_intr_init(nxge) != NXGE_OK) { 273678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 274678453a8Sspeer "nxge_hio_intr_init() failed")); 275ef523517SMichael Speer return (DDI_FAILURE); 276678453a8Sspeer } 277678453a8Sspeer 278678453a8Sspeer /* 279678453a8Sspeer * Now we find out which RDCs & TDCs have been allocated to us. 280678453a8Sspeer */ 281678453a8Sspeer tx = &nhd->hio.tx; 282678453a8Sspeer if (tx->get_map) { 283678453a8Sspeer /* 284678453a8Sspeer * The map we get back is a bitmap of the 285678453a8Sspeer * virtual Tx DMA channels we own - 286678453a8Sspeer * they are NOT real channel numbers. 287678453a8Sspeer */ 288678453a8Sspeer hv_rv = (*tx->get_map)(vr->cookie, &tx_map); 289678453a8Sspeer if (hv_rv != 0) { 290678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 291678453a8Sspeer "tx->get_map() failed")); 292ef523517SMichael Speer return (DDI_FAILURE); 293678453a8Sspeer } 294678453a8Sspeer res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map); 295678453a8Sspeer 296678453a8Sspeer /* 297678453a8Sspeer * For each channel, mark these two fields 298678453a8Sspeer * while we have the VR data structure. 299678453a8Sspeer */ 300678453a8Sspeer for (i = 0; i < VP_CHANNEL_MAX; i++) { 301678453a8Sspeer if ((1 << i) & tx_map) { 302678453a8Sspeer dc = nxge_guest_dc_alloc(nxge, vr, 303678453a8Sspeer NXGE_TRANSMIT_GROUP); 304678453a8Sspeer if (dc == 0) { 305678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 306678453a8Sspeer "DC add failed")); 307ef523517SMichael Speer return (DDI_FAILURE); 308678453a8Sspeer } 309678453a8Sspeer dc->channel = (nxge_channel_t)i; 310678453a8Sspeer } 311678453a8Sspeer } 312678453a8Sspeer } 313678453a8Sspeer 314678453a8Sspeer rx = &nhd->hio.rx; 315678453a8Sspeer if (rx->get_map) { 316678453a8Sspeer /* 317678453a8Sspeer * I repeat, the map we get back is a bitmap of 318678453a8Sspeer * the virtual Rx DMA channels we own - 319678453a8Sspeer * they are NOT real channel numbers. 320678453a8Sspeer */ 321678453a8Sspeer hv_rv = (*rx->get_map)(vr->cookie, &rx_map); 322678453a8Sspeer if (hv_rv != 0) { 323678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 324678453a8Sspeer "rx->get_map() failed")); 325ef523517SMichael Speer return (DDI_FAILURE); 326678453a8Sspeer } 327678453a8Sspeer res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map); 328678453a8Sspeer 329678453a8Sspeer /* 330678453a8Sspeer * For each channel, mark these two fields 331678453a8Sspeer * while we have the VR data structure. 332678453a8Sspeer */ 333678453a8Sspeer for (i = 0; i < VP_CHANNEL_MAX; i++) { 334678453a8Sspeer if ((1 << i) & rx_map) { 335678453a8Sspeer dc = nxge_guest_dc_alloc(nxge, vr, 336678453a8Sspeer NXGE_RECEIVE_GROUP); 337678453a8Sspeer if (dc == 0) { 338678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 339678453a8Sspeer "DC add failed")); 340ef523517SMichael Speer return (DDI_FAILURE); 341678453a8Sspeer } 342678453a8Sspeer dc->channel = (nxge_channel_t)i; 343678453a8Sspeer } 344678453a8Sspeer } 345678453a8Sspeer } 346678453a8Sspeer 34763f531d1SSriharsha Basavapatna status = nxge_mac_register(nxge); 34863f531d1SSriharsha Basavapatna if (status != NXGE_OK) { 34963f531d1SSriharsha Basavapatna cmn_err(CE_WARN, "nxge(%d): nxge_mac_register failed\n", 35063f531d1SSriharsha Basavapatna nxge->instance); 351ef523517SMichael Speer return (DDI_FAILURE); 352678453a8Sspeer } 353678453a8Sspeer 354678453a8Sspeer nxge->hio_vr = vr; /* For faster lookups. */ 355678453a8Sspeer 356678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add")); 357678453a8Sspeer 358ef523517SMichael Speer return (DDI_SUCCESS); 359678453a8Sspeer } 360678453a8Sspeer 361678453a8Sspeer /* 362678453a8Sspeer * nxge_guest_dc_alloc 363678453a8Sspeer * 364678453a8Sspeer * Find a free nxge_hio_dc_t data structure. 365678453a8Sspeer * 366678453a8Sspeer * Arguments: 367678453a8Sspeer * nxge 368678453a8Sspeer * type TRANSMIT or RECEIVE. 369678453a8Sspeer * 370678453a8Sspeer * Notes: 371678453a8Sspeer * 372678453a8Sspeer * Context: 373678453a8Sspeer * Guest domain 374678453a8Sspeer */ 375678453a8Sspeer nxge_hio_dc_t * 376678453a8Sspeer nxge_guest_dc_alloc( 377678453a8Sspeer nxge_t *nxge, 378678453a8Sspeer nxge_hio_vr_t *vr, 379678453a8Sspeer nxge_grp_type_t type) 380678453a8Sspeer { 381678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 382678453a8Sspeer nxge_hio_dc_t *dc; 383678453a8Sspeer int limit, i; 384678453a8Sspeer 385678453a8Sspeer /* 386678453a8Sspeer * In the guest domain, there may be more than one VR. 387678453a8Sspeer * each one of which will be using the same slots, or 388678453a8Sspeer * virtual channel numbers. So the <nhd>'s rdc & tdc 389678453a8Sspeer * tables must be shared. 390678453a8Sspeer */ 391678453a8Sspeer if (type == NXGE_TRANSMIT_GROUP) { 392678453a8Sspeer dc = &nhd->tdc[0]; 393678453a8Sspeer limit = NXGE_MAX_TDCS; 394678453a8Sspeer } else { 395678453a8Sspeer dc = &nhd->rdc[0]; 396678453a8Sspeer limit = NXGE_MAX_RDCS; 397678453a8Sspeer } 398678453a8Sspeer 399678453a8Sspeer MUTEX_ENTER(&nhd->lock); 400678453a8Sspeer for (i = 0; i < limit; i++, dc++) { 401678453a8Sspeer if (dc->vr == 0) { 402678453a8Sspeer dc->vr = vr; 403678453a8Sspeer dc->cookie = vr->cookie; 404678453a8Sspeer MUTEX_EXIT(&nhd->lock); 405678453a8Sspeer return (dc); 406678453a8Sspeer } 407678453a8Sspeer } 408678453a8Sspeer MUTEX_EXIT(&nhd->lock); 409678453a8Sspeer 410678453a8Sspeer return (0); 411678453a8Sspeer } 412678453a8Sspeer 413*0dc2366fSVenugopal Iyer int 414*0dc2366fSVenugopal Iyer nxge_hio_get_dc_htable_idx(nxge_t *nxge, vpc_type_t type, uint32_t channel) 415*0dc2366fSVenugopal Iyer { 416*0dc2366fSVenugopal Iyer nxge_hio_dc_t *dc; 417*0dc2366fSVenugopal Iyer 418*0dc2366fSVenugopal Iyer ASSERT(isLDOMguest(nxge)); 419*0dc2366fSVenugopal Iyer 420*0dc2366fSVenugopal Iyer dc = nxge_grp_dc_find(nxge, type, channel); 421*0dc2366fSVenugopal Iyer if (dc == NULL) 422*0dc2366fSVenugopal Iyer return (-1); 423*0dc2366fSVenugopal Iyer 424*0dc2366fSVenugopal Iyer return (dc->ldg.vector); 425*0dc2366fSVenugopal Iyer } 426*0dc2366fSVenugopal Iyer 427678453a8Sspeer /* 428678453a8Sspeer * res_map_parse 429678453a8Sspeer * 430678453a8Sspeer * Parse a resource map. The resources are DMA channels, receive 431678453a8Sspeer * or transmit, depending on <type>. 432678453a8Sspeer * 433678453a8Sspeer * Arguments: 434678453a8Sspeer * nxge 435678453a8Sspeer * type Transmit or receive. 436678453a8Sspeer * res_map The resource map to parse. 437678453a8Sspeer * 438678453a8Sspeer * Notes: 439678453a8Sspeer * 440678453a8Sspeer * Context: 441678453a8Sspeer * Guest domain 442678453a8Sspeer */ 443678453a8Sspeer void 444678453a8Sspeer res_map_parse( 445678453a8Sspeer nxge_t *nxge, 446678453a8Sspeer nxge_grp_type_t type, 447678453a8Sspeer uint64_t res_map) 448678453a8Sspeer { 449678453a8Sspeer uint8_t slots, mask, slot; 450678453a8Sspeer int first, count; 451678453a8Sspeer 452678453a8Sspeer nxge_hw_pt_cfg_t *hardware; 453678453a8Sspeer nxge_grp_t *group; 454678453a8Sspeer 455678453a8Sspeer /* Slots are numbered 0 - 7. */ 456678453a8Sspeer slots = (uint8_t)(res_map & 0xff); 457678453a8Sspeer 458678453a8Sspeer /* Count the number of bits in the bitmap. */ 459678453a8Sspeer for (slot = 0, count = 0, mask = 1; slot < 8; slot++) { 460678453a8Sspeer if (slots & mask) 461678453a8Sspeer count++; 462678453a8Sspeer if (count == 1) 463678453a8Sspeer first = slot; 464678453a8Sspeer mask <<= 1; 465678453a8Sspeer } 466678453a8Sspeer 467678453a8Sspeer hardware = &nxge->pt_config.hw_config; 468678453a8Sspeer group = (type == NXGE_TRANSMIT_GROUP) ? 469678453a8Sspeer nxge->tx_set.group[0] : nxge->rx_set.group[0]; 470678453a8Sspeer 471678453a8Sspeer /* 472678453a8Sspeer * A guest domain has one Tx & one Rx group, so far. 473678453a8Sspeer * In the future, there may be more than one. 474678453a8Sspeer */ 475678453a8Sspeer if (type == NXGE_TRANSMIT_GROUP) { 476678453a8Sspeer nxge_dma_pt_cfg_t *port = &nxge->pt_config; 477da14cebeSEric Cheng nxge_tdc_grp_t *tdc_grp = &nxge->pt_config.tdc_grps[0]; 478678453a8Sspeer 479678453a8Sspeer hardware->tdc.start = first; 480678453a8Sspeer hardware->tdc.count = count; 481678453a8Sspeer hardware->tdc.owned = count; 482678453a8Sspeer 483da14cebeSEric Cheng tdc_grp->start_tdc = first; 484da14cebeSEric Cheng tdc_grp->max_tdcs = (uint8_t)count; 485da14cebeSEric Cheng tdc_grp->grp_index = group->index; 486da14cebeSEric Cheng tdc_grp->map = slots; 487da14cebeSEric Cheng 488678453a8Sspeer group->map = slots; 489678453a8Sspeer 490678453a8Sspeer /* 491678453a8Sspeer * Pointless in a guest domain. This bitmap is used 492678453a8Sspeer * in only one place: nxge_txc_init(), 493678453a8Sspeer * a service-domain-only function. 494678453a8Sspeer */ 495678453a8Sspeer port->tx_dma_map = slots; 496678453a8Sspeer 497678453a8Sspeer nxge->tx_set.owned.map |= slots; 498678453a8Sspeer } else { 499678453a8Sspeer nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0]; 500678453a8Sspeer 501678453a8Sspeer hardware->start_rdc = first; 502678453a8Sspeer hardware->max_rdcs = count; 503678453a8Sspeer 504678453a8Sspeer rdc_grp->start_rdc = (uint8_t)first; 505678453a8Sspeer rdc_grp->max_rdcs = (uint8_t)count; 506678453a8Sspeer rdc_grp->def_rdc = (uint8_t)first; 507678453a8Sspeer 508678453a8Sspeer rdc_grp->map = slots; 509678453a8Sspeer group->map = slots; 510678453a8Sspeer 511678453a8Sspeer nxge->rx_set.owned.map |= slots; 512678453a8Sspeer } 513678453a8Sspeer } 514678453a8Sspeer 515678453a8Sspeer /* 516678453a8Sspeer * nxge_hio_vr_release 517678453a8Sspeer * 518678453a8Sspeer * Release a virtualization region (VR). 519678453a8Sspeer * 520678453a8Sspeer * Arguments: 521678453a8Sspeer * nxge 522678453a8Sspeer * 523678453a8Sspeer * Notes: 524678453a8Sspeer * We must uninitialize all DMA channels associated with the VR, too. 525678453a8Sspeer * 526678453a8Sspeer * The service domain will re-initialize these DMA channels later. 527678453a8Sspeer * See nxge_hio.c:nxge_hio_share_free() for details. 528678453a8Sspeer * 529678453a8Sspeer * Context: 530678453a8Sspeer * Guest domain 531678453a8Sspeer */ 532678453a8Sspeer int 533330cd344SMichael Speer nxge_hio_vr_release(nxge_t *nxge) 534678453a8Sspeer { 535330cd344SMichael Speer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 536330cd344SMichael Speer int vr_index; 537330cd344SMichael Speer 538678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release")); 539678453a8Sspeer 540330cd344SMichael Speer if (nxge->hio_vr == NULL) { 541330cd344SMichael Speer return (NXGE_OK); 542330cd344SMichael Speer } 543330cd344SMichael Speer 544678453a8Sspeer /* 545678453a8Sspeer * Uninitialize interrupts. 546678453a8Sspeer */ 547678453a8Sspeer nxge_hio_intr_uninit(nxge); 548678453a8Sspeer 549678453a8Sspeer /* 550678453a8Sspeer * Uninitialize the receive DMA channels. 551678453a8Sspeer */ 552678453a8Sspeer nxge_uninit_rxdma_channels(nxge); 553678453a8Sspeer 554678453a8Sspeer /* 555678453a8Sspeer * Uninitialize the transmit DMA channels. 556678453a8Sspeer */ 557678453a8Sspeer nxge_uninit_txdma_channels(nxge); 558678453a8Sspeer 5599d5b8bc5SMichael Speer /* 5609d5b8bc5SMichael Speer * Remove both groups. Assumption: only two groups! 5619d5b8bc5SMichael Speer */ 5629d5b8bc5SMichael Speer if (nxge->rx_set.group[0] != NULL) 5636920a987SMisaki Miyashita nxge_grp_remove(nxge, nxge->rx_set.group[0]); 5649d5b8bc5SMichael Speer if (nxge->tx_set.group[0] != NULL) 5656920a987SMisaki Miyashita nxge_grp_remove(nxge, nxge->tx_set.group[0]); 566678453a8Sspeer 567678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release")); 568678453a8Sspeer 569330cd344SMichael Speer /* 570330cd344SMichael Speer * Clean up. 571330cd344SMichael Speer */ 572330cd344SMichael Speer MUTEX_ENTER(&nhd->lock); 573330cd344SMichael Speer for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) { 574330cd344SMichael Speer if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) { 575330cd344SMichael Speer nhd->vr[vr_index].nxge = NULL; 576330cd344SMichael Speer break; 577330cd344SMichael Speer } 578330cd344SMichael Speer } 579330cd344SMichael Speer MUTEX_EXIT(&nhd->lock); 580330cd344SMichael Speer 581678453a8Sspeer return (NXGE_OK); 582678453a8Sspeer } 583678453a8Sspeer 584678453a8Sspeer #if defined(NIU_LP_WORKAROUND) 585678453a8Sspeer /* 586678453a8Sspeer * nxge_tdc_lp_conf 587678453a8Sspeer * 588678453a8Sspeer * Configure the logical pages for a TDC. 589678453a8Sspeer * 590678453a8Sspeer * Arguments: 591678453a8Sspeer * nxge 592678453a8Sspeer * channel The TDC to configure. 593678453a8Sspeer * 594678453a8Sspeer * Notes: 595678453a8Sspeer * 596678453a8Sspeer * Context: 597678453a8Sspeer * Guest domain 598678453a8Sspeer */ 599678453a8Sspeer nxge_status_t 600678453a8Sspeer nxge_tdc_lp_conf( 601678453a8Sspeer p_nxge_t nxge, 602678453a8Sspeer int channel) 603678453a8Sspeer { 604678453a8Sspeer nxge_hio_dc_t *dc; 605678453a8Sspeer nxge_dma_common_t *data; 606678453a8Sspeer nxge_dma_common_t *control; 607678453a8Sspeer tx_ring_t *ring; 608678453a8Sspeer 609678453a8Sspeer uint64_t hv_rv; 610678453a8Sspeer uint64_t ra, size; 611678453a8Sspeer 612678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf")); 613678453a8Sspeer 614678453a8Sspeer ring = nxge->tx_rings->rings[channel]; 615678453a8Sspeer 616678453a8Sspeer if (ring->hv_set) { 617678453a8Sspeer /* This shouldn't happen. */ 618678453a8Sspeer return (NXGE_OK); 619678453a8Sspeer } 620678453a8Sspeer 621678453a8Sspeer if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel))) 622678453a8Sspeer return (NXGE_ERROR); 623678453a8Sspeer 624678453a8Sspeer /* 625678453a8Sspeer * Initialize logical page 0 for data buffers. 626678453a8Sspeer * 627678453a8Sspeer * <orig_ioaddr_pp> & <orig_alength> are initialized in 628678453a8Sspeer * nxge_main.c:nxge_dma_mem_alloc(). 629678453a8Sspeer */ 630678453a8Sspeer data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel]; 631678453a8Sspeer ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp; 632678453a8Sspeer ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength; 633678453a8Sspeer 634678453a8Sspeer hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie, 635678453a8Sspeer (uint64_t)channel, 0, 636678453a8Sspeer ring->hv_tx_buf_base_ioaddr_pp, 637678453a8Sspeer ring->hv_tx_buf_ioaddr_size); 638678453a8Sspeer 639678453a8Sspeer if (hv_rv != 0) { 640678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 641678453a8Sspeer "<== nxge_tdc_lp_conf: channel %d " 642678453a8Sspeer "(page 0 data buf) hv: %d " 643678453a8Sspeer "ioaddr_pp $%p size 0x%llx ", 644678453a8Sspeer channel, hv_rv, 645678453a8Sspeer ring->hv_tx_buf_base_ioaddr_pp, 646678453a8Sspeer ring->hv_tx_buf_ioaddr_size)); 647678453a8Sspeer return (NXGE_ERROR | hv_rv); 648678453a8Sspeer } 649678453a8Sspeer 650678453a8Sspeer ra = size = 0; 651678453a8Sspeer hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie, 652678453a8Sspeer (uint64_t)channel, 0, &ra, &size); 653678453a8Sspeer 654678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 655678453a8Sspeer "==> nxge_tdc_lp_conf: channel %d " 656678453a8Sspeer "(page 0 data buf) hv_rv 0x%llx " 657678453a8Sspeer "set ioaddr_pp $%p set size 0x%llx " 658678453a8Sspeer "get ra ioaddr_pp $%p get size 0x%llx ", 659678453a8Sspeer channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp, 660678453a8Sspeer ring->hv_tx_buf_ioaddr_size, ra, size)); 661678453a8Sspeer 662678453a8Sspeer /* 663678453a8Sspeer * Initialize logical page 1 for control buffers. 664678453a8Sspeer */ 665678453a8Sspeer control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel]; 666678453a8Sspeer ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp; 667678453a8Sspeer ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength; 668678453a8Sspeer 669678453a8Sspeer hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie, 670678453a8Sspeer (uint64_t)channel, (uint64_t)1, 671678453a8Sspeer ring->hv_tx_cntl_base_ioaddr_pp, 672678453a8Sspeer ring->hv_tx_cntl_ioaddr_size); 673678453a8Sspeer 674678453a8Sspeer if (hv_rv != 0) { 675678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 676678453a8Sspeer "<== nxge_tdc_lp_conf: channel %d " 677678453a8Sspeer "(page 1 cntl buf) hv_rv 0x%llx " 678678453a8Sspeer "ioaddr_pp $%p size 0x%llx ", 679678453a8Sspeer channel, hv_rv, 680678453a8Sspeer ring->hv_tx_cntl_base_ioaddr_pp, 681678453a8Sspeer ring->hv_tx_cntl_ioaddr_size)); 682678453a8Sspeer return (NXGE_ERROR | hv_rv); 683678453a8Sspeer } 684678453a8Sspeer 685678453a8Sspeer ra = size = 0; 686678453a8Sspeer hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie, 687678453a8Sspeer (uint64_t)channel, (uint64_t)1, &ra, &size); 688678453a8Sspeer 689678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 690678453a8Sspeer "==> nxge_tdc_lp_conf: channel %d " 691678453a8Sspeer "(page 1 cntl buf) hv_rv 0x%llx " 692678453a8Sspeer "set ioaddr_pp $%p set size 0x%llx " 693678453a8Sspeer "get ra ioaddr_pp $%p get size 0x%llx ", 694678453a8Sspeer channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp, 695678453a8Sspeer ring->hv_tx_cntl_ioaddr_size, ra, size)); 696678453a8Sspeer 697678453a8Sspeer ring->hv_set = B_TRUE; 698678453a8Sspeer 699678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf")); 700678453a8Sspeer 701678453a8Sspeer return (NXGE_OK); 702678453a8Sspeer } 703678453a8Sspeer 704678453a8Sspeer /* 705678453a8Sspeer * nxge_rdc_lp_conf 706678453a8Sspeer * 707678453a8Sspeer * Configure an RDC's logical pages. 708678453a8Sspeer * 709678453a8Sspeer * Arguments: 710678453a8Sspeer * nxge 711678453a8Sspeer * channel The RDC to configure. 712678453a8Sspeer * 713678453a8Sspeer * Notes: 714678453a8Sspeer * 715678453a8Sspeer * Context: 716678453a8Sspeer * Guest domain 717678453a8Sspeer */ 718678453a8Sspeer nxge_status_t 719678453a8Sspeer nxge_rdc_lp_conf( 720678453a8Sspeer p_nxge_t nxge, 721678453a8Sspeer int channel) 722678453a8Sspeer { 723678453a8Sspeer nxge_hio_dc_t *dc; 724678453a8Sspeer nxge_dma_common_t *data; 725678453a8Sspeer nxge_dma_common_t *control; 726678453a8Sspeer rx_rbr_ring_t *ring; 727678453a8Sspeer 728678453a8Sspeer uint64_t hv_rv; 729678453a8Sspeer uint64_t ra, size; 730678453a8Sspeer 731678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf")); 732678453a8Sspeer 733678453a8Sspeer ring = nxge->rx_rbr_rings->rbr_rings[channel]; 734678453a8Sspeer 735678453a8Sspeer if (ring->hv_set) { 736678453a8Sspeer return (NXGE_OK); 737678453a8Sspeer } 738678453a8Sspeer 739678453a8Sspeer if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) 740678453a8Sspeer return (NXGE_ERROR); 741678453a8Sspeer 742678453a8Sspeer /* 743678453a8Sspeer * Initialize logical page 0 for data buffers. 744678453a8Sspeer * 745678453a8Sspeer * <orig_ioaddr_pp> & <orig_alength> are initialized in 746678453a8Sspeer * nxge_main.c:nxge_dma_mem_alloc(). 747678453a8Sspeer */ 748678453a8Sspeer data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel]; 749678453a8Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp; 750678453a8Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength; 751678453a8Sspeer 752678453a8Sspeer hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie, 753678453a8Sspeer (uint64_t)channel, 0, 754678453a8Sspeer ring->hv_rx_buf_base_ioaddr_pp, 755678453a8Sspeer ring->hv_rx_buf_ioaddr_size); 756678453a8Sspeer 757678453a8Sspeer if (hv_rv != 0) { 758678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 759678453a8Sspeer "<== nxge_rdc_lp_conf: channel %d " 760678453a8Sspeer "(page 0 data buf) hv_rv 0x%llx " 761678453a8Sspeer "ioaddr_pp $%p size 0x%llx ", 762678453a8Sspeer channel, hv_rv, 763678453a8Sspeer ring->hv_rx_buf_base_ioaddr_pp, 764678453a8Sspeer ring->hv_rx_buf_ioaddr_size)); 765678453a8Sspeer return (NXGE_ERROR | hv_rv); 766678453a8Sspeer } 767678453a8Sspeer 768678453a8Sspeer ra = size = 0; 769678453a8Sspeer hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie, 770678453a8Sspeer (uint64_t)channel, 0, &ra, &size); 771678453a8Sspeer 772678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 773678453a8Sspeer "==> nxge_rdc_lp_conf: channel %d " 774678453a8Sspeer "(page 0 data buf) hv_rv 0x%llx " 775678453a8Sspeer "set ioaddr_pp $%p set size 0x%llx " 776678453a8Sspeer "get ra ioaddr_pp $%p get size 0x%llx ", 777678453a8Sspeer channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp, 778678453a8Sspeer ring->hv_rx_buf_ioaddr_size, ra, size)); 779678453a8Sspeer 780678453a8Sspeer /* 781678453a8Sspeer * Initialize logical page 1 for control buffers. 782678453a8Sspeer */ 783678453a8Sspeer control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel]; 784678453a8Sspeer ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp; 785678453a8Sspeer ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength; 786678453a8Sspeer 787678453a8Sspeer hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie, 788678453a8Sspeer (uint64_t)channel, (uint64_t)1, 789678453a8Sspeer ring->hv_rx_cntl_base_ioaddr_pp, 790678453a8Sspeer ring->hv_rx_cntl_ioaddr_size); 791678453a8Sspeer 792678453a8Sspeer if (hv_rv != 0) { 793678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 794678453a8Sspeer "<== nxge_rdc_lp_conf: channel %d " 795678453a8Sspeer "(page 1 cntl buf) hv_rv 0x%llx " 796678453a8Sspeer "ioaddr_pp $%p size 0x%llx ", 797678453a8Sspeer channel, hv_rv, 798678453a8Sspeer ring->hv_rx_cntl_base_ioaddr_pp, 799678453a8Sspeer ring->hv_rx_cntl_ioaddr_size)); 800678453a8Sspeer return (NXGE_ERROR | hv_rv); 801678453a8Sspeer } 802678453a8Sspeer 803678453a8Sspeer ra = size = 0; 804678453a8Sspeer hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie, 805678453a8Sspeer (uint64_t)channel, (uint64_t)1, &ra, &size); 806678453a8Sspeer 807678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 808678453a8Sspeer "==> nxge_rdc_lp_conf: channel %d " 809678453a8Sspeer "(page 1 cntl buf) hv_rv 0x%llx " 810678453a8Sspeer "set ioaddr_pp $%p set size 0x%llx " 811678453a8Sspeer "get ra ioaddr_pp $%p get size 0x%llx ", 812678453a8Sspeer channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp, 813678453a8Sspeer ring->hv_rx_cntl_ioaddr_size, ra, size)); 814678453a8Sspeer 815678453a8Sspeer ring->hv_set = B_TRUE; 816678453a8Sspeer 817678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf")); 818678453a8Sspeer 819678453a8Sspeer return (NXGE_OK); 820678453a8Sspeer } 821678453a8Sspeer #endif /* defined(NIU_LP_WORKAROUND) */ 822678453a8Sspeer 823678453a8Sspeer /* 824678453a8Sspeer * This value is in milliseconds. 825678453a8Sspeer */ 826678453a8Sspeer #define NXGE_GUEST_TIMER 500 /* 1/2 second, for now */ 827678453a8Sspeer 828678453a8Sspeer /* 829678453a8Sspeer * nxge_hio_start_timer 830678453a8Sspeer * 831678453a8Sspeer * Start the timer which checks for Tx hangs. 832678453a8Sspeer * 833678453a8Sspeer * Arguments: 834678453a8Sspeer * nxge 835678453a8Sspeer * 836678453a8Sspeer * Notes: 837678453a8Sspeer * This function is called from nxge_attach(). 838678453a8Sspeer * 839678453a8Sspeer * This function kicks off the guest domain equivalent of 840678453a8Sspeer * nxge_check_hw_state(). It is called only once, from attach. 841678453a8Sspeer * 842678453a8Sspeer * Context: 843678453a8Sspeer * Guest domain 844678453a8Sspeer */ 845678453a8Sspeer void 846678453a8Sspeer nxge_hio_start_timer( 847678453a8Sspeer nxge_t *nxge) 848678453a8Sspeer { 849678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 850678453a8Sspeer nxge_hio_vr_t *vr; 851678453a8Sspeer int region; 852678453a8Sspeer 853678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start")); 854678453a8Sspeer 855678453a8Sspeer MUTEX_ENTER(&nhd->lock); 856678453a8Sspeer 857678453a8Sspeer /* 858678453a8Sspeer * Find our VR data structure. (We are currently assuming 859678453a8Sspeer * one VR per guest domain. That may change in the future.) 860678453a8Sspeer */ 861678453a8Sspeer for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) { 862678453a8Sspeer if (nhd->vr[region].nxge == (uintptr_t)nxge) 863678453a8Sspeer break; 864678453a8Sspeer } 865678453a8Sspeer 866678453a8Sspeer MUTEX_EXIT(&nhd->lock); 867678453a8Sspeer 868678453a8Sspeer if (region == NXGE_VR_SR_MAX) { 869678453a8Sspeer return; 870678453a8Sspeer } 871678453a8Sspeer 872678453a8Sspeer vr = (nxge_hio_vr_t *)&nhd->vr[region]; 873678453a8Sspeer 874678453a8Sspeer nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state, 875678453a8Sspeer (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER)); 876678453a8Sspeer 877678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start")); 878678453a8Sspeer } 879678453a8Sspeer 880678453a8Sspeer /* 881678453a8Sspeer * nxge_check_guest_state 882678453a8Sspeer * 883678453a8Sspeer * Essentially, check for Tx hangs. In the future, if we are 884678453a8Sspeer * polling the hardware, we may do so here. 885678453a8Sspeer * 886678453a8Sspeer * Arguments: 887678453a8Sspeer * vr The virtualization region (VR) data structure. 888678453a8Sspeer * 889678453a8Sspeer * Notes: 890678453a8Sspeer * This function is the guest domain equivalent of 891678453a8Sspeer * nxge_check_hw_state(). Since we have no hardware to 892678453a8Sspeer * check, we simply call nxge_check_tx_hang(). 893678453a8Sspeer * 894678453a8Sspeer * Context: 895678453a8Sspeer * Guest domain 896678453a8Sspeer */ 897678453a8Sspeer void 898678453a8Sspeer nxge_check_guest_state( 899678453a8Sspeer nxge_hio_vr_t *vr) 900678453a8Sspeer { 901678453a8Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 902678453a8Sspeer 903678453a8Sspeer NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state")); 904678453a8Sspeer 905678453a8Sspeer MUTEX_ENTER(nxge->genlock); 906678453a8Sspeer nxge->nxge_timerid = 0; 907678453a8Sspeer 908d7cf53fcSmisaki Miyashita if (nxge->nxge_mac_state == NXGE_MAC_STARTED) { 909d7cf53fcSmisaki Miyashita nxge_check_tx_hang(nxge); 910678453a8Sspeer 911d7cf53fcSmisaki Miyashita nxge->nxge_timerid = timeout((void(*)(void *)) 912d7cf53fcSmisaki Miyashita nxge_check_guest_state, (caddr_t)vr, 913d7cf53fcSmisaki Miyashita drv_usectohz(1000 * NXGE_GUEST_TIMER)); 914d7cf53fcSmisaki Miyashita } 915678453a8Sspeer 916678453a8Sspeer nxge_check_guest_state_exit: 917678453a8Sspeer MUTEX_EXIT(nxge->genlock); 918678453a8Sspeer NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state")); 919678453a8Sspeer } 920678453a8Sspeer 921e759c33aSMichael Speer nxge_status_t 922e759c33aSMichael Speer nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm) 923e759c33aSMichael Speer { 924e759c33aSMichael Speer nxge_grp_t *group; 925e759c33aSMichael Speer uint32_t channel; 926e759c33aSMichael Speer nxge_hio_dc_t *dc; 927e759c33aSMichael Speer nxge_ldg_t *ldgp; 928e759c33aSMichael Speer 929e759c33aSMichael Speer /* 930e759c33aSMichael Speer * Validate state of guest interface before 931e759c33aSMichael Speer * proceeeding. 932e759c33aSMichael Speer */ 933e759c33aSMichael Speer if (!isLDOMguest(nxge)) 934e759c33aSMichael Speer return (NXGE_ERROR); 935e759c33aSMichael Speer if (nxge->nxge_mac_state != NXGE_MAC_STARTED) 936e759c33aSMichael Speer return (NXGE_ERROR); 937e759c33aSMichael Speer 938e759c33aSMichael Speer /* 939e759c33aSMichael Speer * In guest domain, always and only dealing with 940e759c33aSMichael Speer * group 0 for an instance of nxge. 941e759c33aSMichael Speer */ 942e759c33aSMichael Speer group = nxge->rx_set.group[0]; 943e759c33aSMichael Speer 944e759c33aSMichael Speer /* 945e759c33aSMichael Speer * Look to arm the the RDCs for the group. 946e759c33aSMichael Speer */ 947e759c33aSMichael Speer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 948e759c33aSMichael Speer if ((1 << channel) & group->map) { 949e759c33aSMichael Speer /* 950e759c33aSMichael Speer * Get the RDC. 951e759c33aSMichael Speer */ 952e759c33aSMichael Speer dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel); 953e759c33aSMichael Speer if (dc == NULL) 954e759c33aSMichael Speer return (NXGE_ERROR); 955e759c33aSMichael Speer 956e759c33aSMichael Speer /* 957e759c33aSMichael Speer * Get the RDC's ldg group. 958e759c33aSMichael Speer */ 959e759c33aSMichael Speer ldgp = &nxge->ldgvp->ldgp[dc->ldg.vector]; 960e759c33aSMichael Speer if (ldgp == NULL) 961e759c33aSMichael Speer return (NXGE_ERROR); 962e759c33aSMichael Speer 963e759c33aSMichael Speer /* 964e759c33aSMichael Speer * Set the state of the group. 965e759c33aSMichael Speer */ 966e759c33aSMichael Speer ldgp->arm = arm; 967e759c33aSMichael Speer 968e759c33aSMichael Speer nxge_hio_ldgimgn(nxge, ldgp); 969e759c33aSMichael Speer } 970e759c33aSMichael Speer } 971e759c33aSMichael Speer 972e759c33aSMichael Speer return (NXGE_OK); 973e759c33aSMichael Speer } 974e759c33aSMichael Speer 975e759c33aSMichael Speer nxge_status_t 976e759c33aSMichael Speer nxge_hio_rdc_enable(p_nxge_t nxge) 977e759c33aSMichael Speer { 978e759c33aSMichael Speer nxge_grp_t *group; 979e759c33aSMichael Speer npi_handle_t handle; 980e759c33aSMichael Speer uint32_t channel; 981e759c33aSMichael Speer npi_status_t rval; 982e759c33aSMichael Speer 983e759c33aSMichael Speer /* 984e759c33aSMichael Speer * Validate state of guest interface before 985e759c33aSMichael Speer * proceeeding. 986e759c33aSMichael Speer */ 987e759c33aSMichael Speer if (!isLDOMguest(nxge)) 988e759c33aSMichael Speer return (NXGE_ERROR); 989e759c33aSMichael Speer if (nxge->nxge_mac_state != NXGE_MAC_STARTED) 990e759c33aSMichael Speer return (NXGE_ERROR); 991e759c33aSMichael Speer 992e759c33aSMichael Speer /* 993e759c33aSMichael Speer * In guest domain, always and only dealing with 994e759c33aSMichael Speer * group 0 for an instance of nxge. 995e759c33aSMichael Speer */ 996e759c33aSMichael Speer group = nxge->rx_set.group[0]; 997e759c33aSMichael Speer 998e759c33aSMichael Speer /* 999e759c33aSMichael Speer * Get the PIO handle. 1000e759c33aSMichael Speer */ 1001e759c33aSMichael Speer handle = NXGE_DEV_NPI_HANDLE(nxge); 1002e759c33aSMichael Speer 1003e759c33aSMichael Speer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1004e759c33aSMichael Speer /* 1005e759c33aSMichael Speer * If this channel is in the map, then enable 1006e759c33aSMichael Speer * it. 1007e759c33aSMichael Speer */ 1008e759c33aSMichael Speer if ((1 << channel) & group->map) { 1009e759c33aSMichael Speer /* 1010e759c33aSMichael Speer * Enable the RDC and clear the empty bit. 1011e759c33aSMichael Speer */ 1012e759c33aSMichael Speer rval = npi_rxdma_cfg_rdc_enable(handle, channel); 1013e759c33aSMichael Speer if (rval != NPI_SUCCESS) 1014e759c33aSMichael Speer return (NXGE_ERROR); 1015e759c33aSMichael Speer 1016e759c33aSMichael Speer (void) npi_rxdma_channel_rbr_empty_clear(handle, 1017e759c33aSMichael Speer channel); 1018e759c33aSMichael Speer } 1019e759c33aSMichael Speer } 1020e759c33aSMichael Speer 1021e759c33aSMichael Speer return (NXGE_OK); 1022e759c33aSMichael Speer } 1023678453a8Sspeer #endif /* defined(sun4v) */ 1024