1678453a8Sspeer /*
2678453a8Sspeer  * CDDL HEADER START
3678453a8Sspeer  *
4678453a8Sspeer  * The contents of this file are subject to the terms of the
5678453a8Sspeer  * Common Development and Distribution License (the "License").
6678453a8Sspeer  * You may not use this file except in compliance with the License.
7678453a8Sspeer  *
8678453a8Sspeer  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9678453a8Sspeer  * or http://www.opensolaris.org/os/licensing.
10678453a8Sspeer  * See the License for the specific language governing permissions
11678453a8Sspeer  * and limitations under the License.
12678453a8Sspeer  *
13678453a8Sspeer  * When distributing Covered Code, include this CDDL HEADER in each
14678453a8Sspeer  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15678453a8Sspeer  * If applicable, add the following below this CDDL HEADER, with the
16678453a8Sspeer  * fields enclosed by brackets "[]" replaced with your own identifying
17678453a8Sspeer  * information: Portions Copyright [yyyy] [name of copyright owner]
18678453a8Sspeer  *
19678453a8Sspeer  * CDDL HEADER END
20678453a8Sspeer  */
21678453a8Sspeer 
22678453a8Sspeer /*
23678453a8Sspeer  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24678453a8Sspeer  * Use is subject to license terms.
25678453a8Sspeer  */
26678453a8Sspeer 
27678453a8Sspeer /*
28678453a8Sspeer  * nxge_hio_guest.c
29678453a8Sspeer  *
30678453a8Sspeer  * This file manages the virtualization resources for a guest domain.
31678453a8Sspeer  *
32678453a8Sspeer  */
33678453a8Sspeer 
34678453a8Sspeer #include <sys/nxge/nxge_impl.h>
35678453a8Sspeer #include <sys/nxge/nxge_fzc.h>
36678453a8Sspeer #include <sys/nxge/nxge_rxdma.h>
37678453a8Sspeer #include <sys/nxge/nxge_txdma.h>
38678453a8Sspeer 
39678453a8Sspeer #include <sys/nxge/nxge_hio.h>
40678453a8Sspeer 
41678453a8Sspeer /*
42678453a8Sspeer  * nxge_hio_unregister
43678453a8Sspeer  *
44678453a8Sspeer  *	Unregister with the VNET module.
45678453a8Sspeer  *
46678453a8Sspeer  * Arguments:
47678453a8Sspeer  * 	nxge
48678453a8Sspeer  *
49678453a8Sspeer  * Notes:
50678453a8Sspeer  *	We must uninitialize all DMA channels associated with the VR, too.
51678453a8Sspeer  *
52678453a8Sspeer  *	We're assuming that the channels will be disabled & unassigned
53678453a8Sspeer  *	in the service domain, after we're done here.
54678453a8Sspeer  *
55678453a8Sspeer  * Context:
56678453a8Sspeer  *	Guest domain
57678453a8Sspeer  */
58678453a8Sspeer void
59678453a8Sspeer nxge_hio_unregister(
60678453a8Sspeer 	nxge_t *nxge)
61678453a8Sspeer {
62678453a8Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
63678453a8Sspeer 
64678453a8Sspeer 	if (nhd == 0) {
65678453a8Sspeer 		return;
66678453a8Sspeer 	}
67678453a8Sspeer 
68678453a8Sspeer #if defined(sun4v)
69678453a8Sspeer 	/* Unregister with vNet. */
70678453a8Sspeer 	if (nhd->hio.vio.unregister) {
71678453a8Sspeer 		if (nxge->hio_vr)
72678453a8Sspeer 			(*nhd->hio.vio.unregister)(nxge->hio_vr->vhp);
73678453a8Sspeer 	}
74678453a8Sspeer #endif
75678453a8Sspeer }
76678453a8Sspeer 
77678453a8Sspeer /*
78678453a8Sspeer  * nxge_guest_regs_map
79678453a8Sspeer  *
80678453a8Sspeer  *	Map in a guest domain's register set(s).
81678453a8Sspeer  *
82678453a8Sspeer  * Arguments:
83678453a8Sspeer  * 	nxge
84678453a8Sspeer  *
85678453a8Sspeer  * Notes:
86678453a8Sspeer  *	Note that we set <is_vraddr> to TRUE.
87678453a8Sspeer  *
88678453a8Sspeer  * Context:
89678453a8Sspeer  *	Guest domain
90678453a8Sspeer  */
91678453a8Sspeer static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
92678453a8Sspeer 	DDI_DEVICE_ATTR_V0,
93678453a8Sspeer 	DDI_STRUCTURE_LE_ACC,
94678453a8Sspeer 	DDI_STRICTORDER_ACC,
95678453a8Sspeer };
96678453a8Sspeer 
97678453a8Sspeer int
98678453a8Sspeer nxge_guest_regs_map(
99678453a8Sspeer 	nxge_t *nxge)
100678453a8Sspeer {
101678453a8Sspeer 	dev_regs_t 	*regs;
102678453a8Sspeer 	off_t		regsize;
103678453a8Sspeer 	int rv;
104678453a8Sspeer 
105678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
106678453a8Sspeer 
107678453a8Sspeer 	/* So we can allocate properly-aligned memory. */
108678453a8Sspeer 	nxge->niu_type = N2_NIU; /* Version 1.0 only */
109678453a8Sspeer 	nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */
110678453a8Sspeer 
111678453a8Sspeer 	nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
112678453a8Sspeer 	regs = nxge->dev_regs;
113678453a8Sspeer 
114678453a8Sspeer 	if ((rv = ddi_dev_regsize(nxge->dip, 0, &regsize)) != DDI_SUCCESS) {
115678453a8Sspeer 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
116678453a8Sspeer 		return (NXGE_ERROR);
117678453a8Sspeer 	}
118678453a8Sspeer 
119678453a8Sspeer 	rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)&regs->nxge_regp, 0, 0,
120678453a8Sspeer 	    &nxge_guest_register_access_attributes, &regs->nxge_regh);
121678453a8Sspeer 
122678453a8Sspeer 	if (rv != DDI_SUCCESS) {
123678453a8Sspeer 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
124678453a8Sspeer 		return (NXGE_ERROR);
125678453a8Sspeer 	}
126678453a8Sspeer 
127678453a8Sspeer 	nxge->npi_handle.regh = regs->nxge_regh;
128678453a8Sspeer 	nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
129678453a8Sspeer 	nxge->npi_handle.is_vraddr = B_TRUE;
130678453a8Sspeer 	nxge->npi_handle.function.instance = nxge->instance;
131678453a8Sspeer 	nxge->npi_handle.function.function = nxge->function_num;
132678453a8Sspeer 	nxge->npi_handle.nxgep = (void *)nxge;
133678453a8Sspeer 
134678453a8Sspeer 	/* NPI_REG_ADD_HANDLE_SET() */
135678453a8Sspeer 	nxge->npi_reg_handle.regh = regs->nxge_regh;
136678453a8Sspeer 	nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
137678453a8Sspeer 	nxge->npi_reg_handle.is_vraddr = B_TRUE;
138678453a8Sspeer 	nxge->npi_reg_handle.function.instance = nxge->instance;
139678453a8Sspeer 	nxge->npi_reg_handle.function.function = nxge->function_num;
140678453a8Sspeer 	nxge->npi_reg_handle.nxgep = (void *)nxge;
141678453a8Sspeer 
142678453a8Sspeer 	/* NPI_VREG_ADD_HANDLE_SET() */
143678453a8Sspeer 	nxge->npi_vreg_handle.regh = regs->nxge_regh;
144678453a8Sspeer 	nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
145678453a8Sspeer 	nxge->npi_vreg_handle.is_vraddr = B_TRUE;
146678453a8Sspeer 	nxge->npi_vreg_handle.function.instance = nxge->instance;
147678453a8Sspeer 	nxge->npi_vreg_handle.function.function = nxge->function_num;
148678453a8Sspeer 	nxge->npi_vreg_handle.nxgep = (void *)nxge;
149678453a8Sspeer 
150678453a8Sspeer 	regs->nxge_vir_regp = regs->nxge_regp;
151678453a8Sspeer 	regs->nxge_vir_regh = regs->nxge_regh;
152678453a8Sspeer 
153678453a8Sspeer 	/*
154678453a8Sspeer 	 * We do NOT set the PCI, MSI-X, 2nd Virtualization,
155678453a8Sspeer 	 * or FCODE reg variables.
156678453a8Sspeer 	 */
157678453a8Sspeer 
158678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
159678453a8Sspeer 
160678453a8Sspeer 	return (NXGE_OK);
161678453a8Sspeer }
162678453a8Sspeer 
163678453a8Sspeer void
164678453a8Sspeer nxge_guest_regs_map_free(
165678453a8Sspeer 	nxge_t *nxge)
166678453a8Sspeer {
167678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
168678453a8Sspeer 
169678453a8Sspeer 	if (nxge->dev_regs) {
170678453a8Sspeer 		if (nxge->dev_regs->nxge_regh) {
171678453a8Sspeer 			NXGE_DEBUG_MSG((nxge, DDI_CTL,
172678453a8Sspeer 			    "==> nxge_unmap_regs: device registers"));
173678453a8Sspeer 			ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
174678453a8Sspeer 			nxge->dev_regs->nxge_regh = NULL;
175678453a8Sspeer 		}
176678453a8Sspeer 		kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
177678453a8Sspeer 		nxge->dev_regs = 0;
178678453a8Sspeer 	}
179678453a8Sspeer 
180678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
181678453a8Sspeer }
182678453a8Sspeer 
183678453a8Sspeer #if defined(sun4v)
184678453a8Sspeer 
185678453a8Sspeer /*
186678453a8Sspeer  * -------------------------------------------------------------
187678453a8Sspeer  * Local prototypes
188678453a8Sspeer  * -------------------------------------------------------------
189678453a8Sspeer  */
190678453a8Sspeer static nxge_hio_dc_t *nxge_guest_dc_alloc(
191678453a8Sspeer 	nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
192678453a8Sspeer 
193678453a8Sspeer static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
194678453a8Sspeer static void nxge_check_guest_state(nxge_hio_vr_t *);
195678453a8Sspeer 
196678453a8Sspeer /*
197678453a8Sspeer  * nxge_hio_vr_add
198678453a8Sspeer  *
199678453a8Sspeer  *	If we have been given a virtualization region (VR),
200678453a8Sspeer  *	then initialize it.
201678453a8Sspeer  *
202678453a8Sspeer  * Arguments:
203678453a8Sspeer  * 	nxge
204678453a8Sspeer  *
205678453a8Sspeer  * Notes:
206678453a8Sspeer  *
207678453a8Sspeer  * Context:
208678453a8Sspeer  *	Guest domain
209678453a8Sspeer  */
210678453a8Sspeer /* ARGSUSED */
211*330cd344SMichael Speer 
212678453a8Sspeer int
213*330cd344SMichael Speer nxge_hio_vr_add(nxge_t *nxge)
214678453a8Sspeer {
215678453a8Sspeer 	extern mac_callbacks_t nxge_m_callbacks;
216678453a8Sspeer 
217678453a8Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
218678453a8Sspeer 	nxge_hio_vr_t *vr;
219678453a8Sspeer 	nxge_hio_dc_t *dc;
220678453a8Sspeer 
221678453a8Sspeer 	int *reg_val;
222678453a8Sspeer 	uint_t reg_len;
223678453a8Sspeer 	uint8_t vr_index;
224678453a8Sspeer 
225678453a8Sspeer 	nxhv_vr_fp_t *fp;
226*330cd344SMichael Speer 	uint64_t vr_address, vr_size;
227*330cd344SMichael Speer 	uint32_t cookie;
228678453a8Sspeer 
229678453a8Sspeer 	nxhv_dc_fp_t *tx, *rx;
230678453a8Sspeer 	uint64_t tx_map, rx_map;
231678453a8Sspeer 
232678453a8Sspeer 	uint64_t hv_rv;
233678453a8Sspeer 
234678453a8Sspeer 	/* Variables needed to register with vnet. */
235678453a8Sspeer 	mac_register_t *mac_info;
236678453a8Sspeer 	ether_addr_t mac_addr;
237678453a8Sspeer 	nx_vio_fp_t *vio;
238678453a8Sspeer 
239678453a8Sspeer 	int i;
240678453a8Sspeer 
241678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
242678453a8Sspeer 
243678453a8Sspeer 	/*
244678453a8Sspeer 	 * Get our HV cookie.
245678453a8Sspeer 	 */
246678453a8Sspeer 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
247678453a8Sspeer 	    0, "reg", &reg_val, &reg_len) != DDI_PROP_SUCCESS) {
248678453a8Sspeer 		NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
249678453a8Sspeer 		return (NXGE_ERROR);
250678453a8Sspeer 	}
251678453a8Sspeer 
252*330cd344SMichael Speer 	cookie = (uint32_t)reg_val[0];
253678453a8Sspeer 	ddi_prop_free(reg_val);
254678453a8Sspeer 
255678453a8Sspeer 	fp = &nhd->hio.vr;
256678453a8Sspeer 	hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
257678453a8Sspeer 	if (hv_rv != 0) {
258678453a8Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
259678453a8Sspeer 		    "vr->getinfo() failed"));
260678453a8Sspeer 		return (NXGE_ERROR);
261678453a8Sspeer 	}
262678453a8Sspeer 
263678453a8Sspeer 	/*
264678453a8Sspeer 	 * In the guest domain, we can use any VR data structure
265678453a8Sspeer 	 * we want, because we're not supposed to know which VR
266678453a8Sspeer 	 * the service domain has allocated to us.
267678453a8Sspeer 	 *
268678453a8Sspeer 	 * In the current version, the least significant nybble of
269678453a8Sspeer 	 * the cookie is the VR region, but that could change
270678453a8Sspeer 	 * very easily.
271678453a8Sspeer 	 *
272678453a8Sspeer 	 * In the future, a guest may have more than one VR allocated
273678453a8Sspeer 	 * to it, which is why we go through this exercise.
274678453a8Sspeer 	 */
275678453a8Sspeer 	MUTEX_ENTER(&nhd->lock);
276678453a8Sspeer 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
277678453a8Sspeer 		if (nhd->vr[vr_index].nxge == 0) {
278678453a8Sspeer 			nhd->vr[vr_index].nxge = (uintptr_t)nxge;
279678453a8Sspeer 			break;
280678453a8Sspeer 		}
281678453a8Sspeer 	}
282678453a8Sspeer 	MUTEX_EXIT(&nhd->lock);
283678453a8Sspeer 
284678453a8Sspeer 	if (vr_index == FUNC_VIR_MAX) {
285678453a8Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
286678453a8Sspeer 		    "no VRs available"));
287*330cd344SMichael Speer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
288*330cd344SMichael Speer 		    "nxge_hio_vr_add(%d): cookie(0x%x)\n",
289*330cd344SMichael Speer 		    nxge->instance, cookie));
290678453a8Sspeer 		return (NXGE_ERROR);
291678453a8Sspeer 	}
292678453a8Sspeer 
293678453a8Sspeer 	vr = &nhd->vr[vr_index];
294678453a8Sspeer 
295678453a8Sspeer 	vr->nxge = (uintptr_t)nxge;
296678453a8Sspeer 	vr->cookie = (uint32_t)cookie;
297678453a8Sspeer 	vr->address = vr_address;
298678453a8Sspeer 	vr->size = vr_size;
299678453a8Sspeer 	vr->region = vr_index;
300678453a8Sspeer 
301678453a8Sspeer 	/*
302678453a8Sspeer 	 * This is redundant data, but useful nonetheless.  It helps
303678453a8Sspeer 	 * us to keep track of which RDCs & TDCs belong to us.
304678453a8Sspeer 	 */
305678453a8Sspeer 	if (nxge->tx_set.lg.count == 0)
306678453a8Sspeer 		(void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
307678453a8Sspeer 	if (nxge->rx_set.lg.count == 0)
308678453a8Sspeer 		(void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
309678453a8Sspeer 
310678453a8Sspeer 	/*
311678453a8Sspeer 	 * See nxge_intr.c.
312678453a8Sspeer 	 */
313678453a8Sspeer 	if (nxge_hio_intr_init(nxge) != NXGE_OK) {
314678453a8Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
315678453a8Sspeer 		    "nxge_hio_intr_init() failed"));
316678453a8Sspeer 		return (NXGE_ERROR);
317678453a8Sspeer 	}
318678453a8Sspeer 
319678453a8Sspeer 	/*
320678453a8Sspeer 	 * Now we find out which RDCs & TDCs have been allocated to us.
321678453a8Sspeer 	 */
322678453a8Sspeer 	tx = &nhd->hio.tx;
323678453a8Sspeer 	if (tx->get_map) {
324678453a8Sspeer 		/*
325678453a8Sspeer 		 * The map we get back is a bitmap of the
326678453a8Sspeer 		 * virtual Tx DMA channels we own -
327678453a8Sspeer 		 * they are NOT real channel numbers.
328678453a8Sspeer 		 */
329678453a8Sspeer 		hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
330678453a8Sspeer 		if (hv_rv != 0) {
331678453a8Sspeer 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
332678453a8Sspeer 			    "tx->get_map() failed"));
333678453a8Sspeer 			return (NXGE_ERROR);
334678453a8Sspeer 		}
335678453a8Sspeer 		res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
336678453a8Sspeer 
337678453a8Sspeer 		/*
338678453a8Sspeer 		 * For each channel, mark these two fields
339678453a8Sspeer 		 * while we have the VR data structure.
340678453a8Sspeer 		 */
341678453a8Sspeer 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
342678453a8Sspeer 			if ((1 << i) & tx_map) {
343678453a8Sspeer 				dc = nxge_guest_dc_alloc(nxge, vr,
344678453a8Sspeer 				    NXGE_TRANSMIT_GROUP);
345678453a8Sspeer 				if (dc == 0) {
346678453a8Sspeer 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
347678453a8Sspeer 					    "DC add failed"));
348678453a8Sspeer 					return (NXGE_ERROR);
349678453a8Sspeer 				}
350678453a8Sspeer 				dc->channel = (nxge_channel_t)i;
351678453a8Sspeer 			}
352678453a8Sspeer 		}
353678453a8Sspeer 	}
354678453a8Sspeer 
355678453a8Sspeer 	rx = &nhd->hio.rx;
356678453a8Sspeer 	if (rx->get_map) {
357678453a8Sspeer 		/*
358678453a8Sspeer 		 * I repeat, the map we get back is a bitmap of
359678453a8Sspeer 		 * the virtual Rx DMA channels we own -
360678453a8Sspeer 		 * they are NOT real channel numbers.
361678453a8Sspeer 		 */
362678453a8Sspeer 		hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
363678453a8Sspeer 		if (hv_rv != 0) {
364678453a8Sspeer 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
365678453a8Sspeer 			    "rx->get_map() failed"));
366678453a8Sspeer 			return (NXGE_ERROR);
367678453a8Sspeer 		}
368678453a8Sspeer 		res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
369678453a8Sspeer 
370678453a8Sspeer 		/*
371678453a8Sspeer 		 * For each channel, mark these two fields
372678453a8Sspeer 		 * while we have the VR data structure.
373678453a8Sspeer 		 */
374678453a8Sspeer 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
375678453a8Sspeer 			if ((1 << i) & rx_map) {
376678453a8Sspeer 				dc = nxge_guest_dc_alloc(nxge, vr,
377678453a8Sspeer 				    NXGE_RECEIVE_GROUP);
378678453a8Sspeer 				if (dc == 0) {
379678453a8Sspeer 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
380678453a8Sspeer 					    "DC add failed"));
381678453a8Sspeer 					return (NXGE_ERROR);
382678453a8Sspeer 				}
383678453a8Sspeer 				dc->channel = (nxge_channel_t)i;
384678453a8Sspeer 			}
385678453a8Sspeer 		}
386678453a8Sspeer 	}
387678453a8Sspeer 
388678453a8Sspeer 	/*
389678453a8Sspeer 	 * Register with vnet.
390678453a8Sspeer 	 */
391678453a8Sspeer 	if ((mac_info = mac_alloc(MAC_VERSION)) == NULL)
392678453a8Sspeer 		return (NXGE_ERROR);
393678453a8Sspeer 
394678453a8Sspeer 	mac_info->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
395678453a8Sspeer 	mac_info->m_driver = nxge;
396678453a8Sspeer 	mac_info->m_dip = nxge->dip;
397678453a8Sspeer 	mac_info->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
398678453a8Sspeer 	mac_info->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
399678453a8Sspeer 	(void) memset(mac_info->m_src_addr, 0xff, sizeof (MAXMACADDRLEN));
400678453a8Sspeer 	mac_info->m_callbacks = &nxge_m_callbacks;
401678453a8Sspeer 	mac_info->m_min_sdu = 0;
402678453a8Sspeer 	mac_info->m_max_sdu = NXGE_MTU_DEFAULT_MAX -
403678453a8Sspeer 	    sizeof (struct ether_header) - ETHERFCSL - 4;
404678453a8Sspeer 
405678453a8Sspeer 	(void) memset(&mac_addr, 0xff, sizeof (mac_addr));
406678453a8Sspeer 
407678453a8Sspeer 	/* Register with vio_net. */
408678453a8Sspeer 	vio = &nhd->hio.vio;
409678453a8Sspeer 	if ((*vio->__register)(mac_info, VIO_NET_RES_HYBRID,
410678453a8Sspeer 	    nxge->hio_mac_addr, mac_addr, &vr->vhp, &vio->cb)) {
411678453a8Sspeer 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "HIO registration() failed"));
412678453a8Sspeer 		return (NXGE_ERROR);
413678453a8Sspeer 	}
414678453a8Sspeer 
415678453a8Sspeer 	nxge->hio_vr = vr;	/* For faster lookups. */
416678453a8Sspeer 
417678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
418678453a8Sspeer 
419678453a8Sspeer 	return (NXGE_OK);
420678453a8Sspeer }
421678453a8Sspeer 
422678453a8Sspeer /*
423678453a8Sspeer  * nxge_guest_dc_alloc
424678453a8Sspeer  *
425678453a8Sspeer  *	Find a free nxge_hio_dc_t data structure.
426678453a8Sspeer  *
427678453a8Sspeer  * Arguments:
428678453a8Sspeer  * 	nxge
429678453a8Sspeer  * 	type	TRANSMIT or RECEIVE.
430678453a8Sspeer  *
431678453a8Sspeer  * Notes:
432678453a8Sspeer  *
433678453a8Sspeer  * Context:
434678453a8Sspeer  *	Guest domain
435678453a8Sspeer  */
436678453a8Sspeer nxge_hio_dc_t *
437678453a8Sspeer nxge_guest_dc_alloc(
438678453a8Sspeer 	nxge_t *nxge,
439678453a8Sspeer 	nxge_hio_vr_t *vr,
440678453a8Sspeer 	nxge_grp_type_t type)
441678453a8Sspeer {
442678453a8Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
443678453a8Sspeer 	nxge_hio_dc_t *dc;
444678453a8Sspeer 	int limit, i;
445678453a8Sspeer 
446678453a8Sspeer 	/*
447678453a8Sspeer 	 * In the guest domain, there may be more than one VR.
448678453a8Sspeer 	 * each one of which will be using the same slots, or
449678453a8Sspeer 	 * virtual channel numbers.  So the <nhd>'s rdc & tdc
450678453a8Sspeer 	 * tables must be shared.
451678453a8Sspeer 	 */
452678453a8Sspeer 	if (type == NXGE_TRANSMIT_GROUP) {
453678453a8Sspeer 		dc = &nhd->tdc[0];
454678453a8Sspeer 		limit = NXGE_MAX_TDCS;
455678453a8Sspeer 	} else {
456678453a8Sspeer 		dc = &nhd->rdc[0];
457678453a8Sspeer 		limit = NXGE_MAX_RDCS;
458678453a8Sspeer 	}
459678453a8Sspeer 
460678453a8Sspeer 	MUTEX_ENTER(&nhd->lock);
461678453a8Sspeer 	for (i = 0; i < limit; i++, dc++) {
462678453a8Sspeer 		if (dc->vr == 0) {
463678453a8Sspeer 			dc->vr = vr;
464678453a8Sspeer 			dc->cookie = vr->cookie;
465678453a8Sspeer 			MUTEX_EXIT(&nhd->lock);
466678453a8Sspeer 			return (dc);
467678453a8Sspeer 		}
468678453a8Sspeer 	}
469678453a8Sspeer 	MUTEX_EXIT(&nhd->lock);
470678453a8Sspeer 
471678453a8Sspeer 	return (0);
472678453a8Sspeer }
473678453a8Sspeer 
474678453a8Sspeer /*
475678453a8Sspeer  * res_map_parse
476678453a8Sspeer  *
477678453a8Sspeer  *	Parse a resource map.  The resources are DMA channels, receive
478678453a8Sspeer  *	or transmit, depending on <type>.
479678453a8Sspeer  *
480678453a8Sspeer  * Arguments:
481678453a8Sspeer  * 	nxge
482678453a8Sspeer  * 	type	Transmit or receive.
483678453a8Sspeer  *	res_map	The resource map to parse.
484678453a8Sspeer  *
485678453a8Sspeer  * Notes:
486678453a8Sspeer  *
487678453a8Sspeer  * Context:
488678453a8Sspeer  *	Guest domain
489678453a8Sspeer  */
490678453a8Sspeer void
491678453a8Sspeer res_map_parse(
492678453a8Sspeer 	nxge_t *nxge,
493678453a8Sspeer 	nxge_grp_type_t type,
494678453a8Sspeer 	uint64_t res_map)
495678453a8Sspeer {
496678453a8Sspeer 	uint8_t slots, mask, slot;
497678453a8Sspeer 	int first, count;
498678453a8Sspeer 
499678453a8Sspeer 	nxge_hw_pt_cfg_t *hardware;
500678453a8Sspeer 	nxge_grp_t *group;
501678453a8Sspeer 
502678453a8Sspeer 	/* Slots are numbered 0 - 7. */
503678453a8Sspeer 	slots = (uint8_t)(res_map & 0xff);
504678453a8Sspeer 
505678453a8Sspeer 	/* Count the number of bits in the bitmap. */
506678453a8Sspeer 	for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
507678453a8Sspeer 		if (slots & mask)
508678453a8Sspeer 			count++;
509678453a8Sspeer 		if (count == 1)
510678453a8Sspeer 			first = slot;
511678453a8Sspeer 		mask <<= 1;
512678453a8Sspeer 	}
513678453a8Sspeer 
514678453a8Sspeer 	hardware = &nxge->pt_config.hw_config;
515678453a8Sspeer 	group = (type == NXGE_TRANSMIT_GROUP) ?
516678453a8Sspeer 	    nxge->tx_set.group[0] : nxge->rx_set.group[0];
517678453a8Sspeer 
518678453a8Sspeer 	/*
519678453a8Sspeer 	 * A guest domain has one Tx & one Rx group, so far.
520678453a8Sspeer 	 * In the future, there may be more than one.
521678453a8Sspeer 	 */
522678453a8Sspeer 	if (type == NXGE_TRANSMIT_GROUP) {
523678453a8Sspeer 		nxge_dma_pt_cfg_t *port = &nxge->pt_config;
524678453a8Sspeer 
525678453a8Sspeer 		hardware->tdc.start = first;
526678453a8Sspeer 		hardware->tdc.count = count;
527678453a8Sspeer 		hardware->tdc.owned = count;
528678453a8Sspeer 
529678453a8Sspeer 		group->map = slots;
530678453a8Sspeer 
531678453a8Sspeer 		/*
532678453a8Sspeer 		 * Pointless in a guest domain.  This bitmap is used
533678453a8Sspeer 		 * in only one place: nxge_txc_init(),
534678453a8Sspeer 		 * a service-domain-only function.
535678453a8Sspeer 		 */
536678453a8Sspeer 		port->tx_dma_map = slots;
537678453a8Sspeer 
538678453a8Sspeer 		nxge->tx_set.owned.map |= slots;
539678453a8Sspeer 	} else {
540678453a8Sspeer 		nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
541678453a8Sspeer 
542678453a8Sspeer 		hardware->start_rdc = first;
543678453a8Sspeer 		hardware->max_rdcs = count;
544678453a8Sspeer 
545678453a8Sspeer 		rdc_grp->start_rdc = (uint8_t)first;
546678453a8Sspeer 		rdc_grp->max_rdcs = (uint8_t)count;
547678453a8Sspeer 		rdc_grp->def_rdc = (uint8_t)first;
548678453a8Sspeer 
549678453a8Sspeer 		rdc_grp->map = slots;
550678453a8Sspeer 		group->map = slots;
551678453a8Sspeer 
552678453a8Sspeer 		nxge->rx_set.owned.map |= slots;
553678453a8Sspeer 	}
554678453a8Sspeer }
555678453a8Sspeer 
556678453a8Sspeer /*
557678453a8Sspeer  * nxge_hio_vr_release
558678453a8Sspeer  *
559678453a8Sspeer  *	Release a virtualization region (VR).
560678453a8Sspeer  *
561678453a8Sspeer  * Arguments:
562678453a8Sspeer  * 	nxge
563678453a8Sspeer  *
564678453a8Sspeer  * Notes:
565678453a8Sspeer  *	We must uninitialize all DMA channels associated with the VR, too.
566678453a8Sspeer  *
567678453a8Sspeer  *	The service domain will re-initialize these DMA channels later.
568678453a8Sspeer  *	See nxge_hio.c:nxge_hio_share_free() for details.
569678453a8Sspeer  *
570678453a8Sspeer  * Context:
571678453a8Sspeer  *	Guest domain
572678453a8Sspeer  */
573678453a8Sspeer int
574*330cd344SMichael Speer nxge_hio_vr_release(nxge_t *nxge)
575678453a8Sspeer {
576*330cd344SMichael Speer 	nxge_hio_data_t	*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
577*330cd344SMichael Speer 	int		vr_index;
578*330cd344SMichael Speer 
579678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
580678453a8Sspeer 
581*330cd344SMichael Speer 	if (nxge->hio_vr == NULL) {
582*330cd344SMichael Speer 		return (NXGE_OK);
583*330cd344SMichael Speer 	}
584*330cd344SMichael Speer 
585678453a8Sspeer 	/*
586678453a8Sspeer 	 * Uninitialize interrupts.
587678453a8Sspeer 	 */
588678453a8Sspeer 	nxge_hio_intr_uninit(nxge);
589678453a8Sspeer 
590678453a8Sspeer 	/*
591678453a8Sspeer 	 * Uninitialize the receive DMA channels.
592678453a8Sspeer 	 */
593678453a8Sspeer 	nxge_uninit_rxdma_channels(nxge);
594678453a8Sspeer 
595678453a8Sspeer 	/*
596678453a8Sspeer 	 * Uninitialize the transmit DMA channels.
597678453a8Sspeer 	 */
598678453a8Sspeer 	nxge_uninit_txdma_channels(nxge);
599678453a8Sspeer 
6009d5b8bc5SMichael Speer 	/*
6019d5b8bc5SMichael Speer 	 * Remove both groups. Assumption: only two groups!
6029d5b8bc5SMichael Speer 	 */
6039d5b8bc5SMichael Speer 	if (nxge->rx_set.group[0] != NULL)
6046920a987SMisaki Miyashita 		nxge_grp_remove(nxge, nxge->rx_set.group[0]);
6059d5b8bc5SMichael Speer 	if (nxge->tx_set.group[0] != NULL)
6066920a987SMisaki Miyashita 		nxge_grp_remove(nxge, nxge->tx_set.group[0]);
607678453a8Sspeer 
608678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
609678453a8Sspeer 
610*330cd344SMichael Speer 	/*
611*330cd344SMichael Speer 	 * Clean up.
612*330cd344SMichael Speer 	 */
613*330cd344SMichael Speer 	MUTEX_ENTER(&nhd->lock);
614*330cd344SMichael Speer 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
615*330cd344SMichael Speer 		if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) {
616*330cd344SMichael Speer 			nhd->vr[vr_index].nxge = NULL;
617*330cd344SMichael Speer 			break;
618*330cd344SMichael Speer 		}
619*330cd344SMichael Speer 	}
620*330cd344SMichael Speer 	MUTEX_EXIT(&nhd->lock);
621*330cd344SMichael Speer 
622678453a8Sspeer 	return (NXGE_OK);
623678453a8Sspeer }
624678453a8Sspeer 
625678453a8Sspeer #if defined(NIU_LP_WORKAROUND)
626678453a8Sspeer /*
627678453a8Sspeer  * nxge_tdc_lp_conf
628678453a8Sspeer  *
629678453a8Sspeer  *	Configure the logical pages for a TDC.
630678453a8Sspeer  *
631678453a8Sspeer  * Arguments:
632678453a8Sspeer  * 	nxge
633678453a8Sspeer  * 	channel	The TDC to configure.
634678453a8Sspeer  *
635678453a8Sspeer  * Notes:
636678453a8Sspeer  *
637678453a8Sspeer  * Context:
638678453a8Sspeer  *	Guest domain
639678453a8Sspeer  */
640678453a8Sspeer nxge_status_t
641678453a8Sspeer nxge_tdc_lp_conf(
642678453a8Sspeer 	p_nxge_t nxge,
643678453a8Sspeer 	int channel)
644678453a8Sspeer {
645678453a8Sspeer 	nxge_hio_dc_t		*dc;
646678453a8Sspeer 	nxge_dma_common_t	*data;
647678453a8Sspeer 	nxge_dma_common_t	*control;
648678453a8Sspeer 	tx_ring_t 		*ring;
649678453a8Sspeer 
650678453a8Sspeer 	uint64_t		hv_rv;
651678453a8Sspeer 	uint64_t		ra, size;
652678453a8Sspeer 
653678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
654678453a8Sspeer 
655678453a8Sspeer 	ring = nxge->tx_rings->rings[channel];
656678453a8Sspeer 
657678453a8Sspeer 	if (ring->hv_set) {
658678453a8Sspeer 		/* This shouldn't happen. */
659678453a8Sspeer 		return (NXGE_OK);
660678453a8Sspeer 	}
661678453a8Sspeer 
662678453a8Sspeer 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
663678453a8Sspeer 		return (NXGE_ERROR);
664678453a8Sspeer 
665678453a8Sspeer 	/*
666678453a8Sspeer 	 * Initialize logical page 0 for data buffers.
667678453a8Sspeer 	 *
668678453a8Sspeer 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
669678453a8Sspeer 	 * nxge_main.c:nxge_dma_mem_alloc().
670678453a8Sspeer 	 */
671678453a8Sspeer 	data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
672678453a8Sspeer 	ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
673678453a8Sspeer 	ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
674678453a8Sspeer 
675678453a8Sspeer 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
676678453a8Sspeer 	    (uint64_t)channel, 0,
677678453a8Sspeer 	    ring->hv_tx_buf_base_ioaddr_pp,
678678453a8Sspeer 	    ring->hv_tx_buf_ioaddr_size);
679678453a8Sspeer 
680678453a8Sspeer 	if (hv_rv != 0) {
681678453a8Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
682678453a8Sspeer 		    "<== nxge_tdc_lp_conf: channel %d "
683678453a8Sspeer 		    "(page 0 data buf) hv: %d "
684678453a8Sspeer 		    "ioaddr_pp $%p size 0x%llx ",
685678453a8Sspeer 		    channel, hv_rv,
686678453a8Sspeer 		    ring->hv_tx_buf_base_ioaddr_pp,
687678453a8Sspeer 		    ring->hv_tx_buf_ioaddr_size));
688678453a8Sspeer 		return (NXGE_ERROR | hv_rv);
689678453a8Sspeer 	}
690678453a8Sspeer 
691678453a8Sspeer 	ra = size = 0;
692678453a8Sspeer 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
693678453a8Sspeer 	    (uint64_t)channel, 0, &ra, &size);
694678453a8Sspeer 
695678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
696678453a8Sspeer 	    "==> nxge_tdc_lp_conf: channel %d "
697678453a8Sspeer 	    "(page 0 data buf) hv_rv 0x%llx "
698678453a8Sspeer 	    "set ioaddr_pp $%p set size 0x%llx "
699678453a8Sspeer 	    "get ra ioaddr_pp $%p get size 0x%llx ",
700678453a8Sspeer 	    channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
701678453a8Sspeer 	    ring->hv_tx_buf_ioaddr_size, ra, size));
702678453a8Sspeer 
703678453a8Sspeer 	/*
704678453a8Sspeer 	 * Initialize logical page 1 for control buffers.
705678453a8Sspeer 	 */
706678453a8Sspeer 	control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
707678453a8Sspeer 	ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
708678453a8Sspeer 	ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
709678453a8Sspeer 
710678453a8Sspeer 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
711678453a8Sspeer 	    (uint64_t)channel, (uint64_t)1,
712678453a8Sspeer 	    ring->hv_tx_cntl_base_ioaddr_pp,
713678453a8Sspeer 	    ring->hv_tx_cntl_ioaddr_size);
714678453a8Sspeer 
715678453a8Sspeer 	if (hv_rv != 0) {
716678453a8Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
717678453a8Sspeer 		    "<== nxge_tdc_lp_conf: channel %d "
718678453a8Sspeer 		    "(page 1 cntl buf) hv_rv 0x%llx "
719678453a8Sspeer 		    "ioaddr_pp $%p size 0x%llx ",
720678453a8Sspeer 		    channel, hv_rv,
721678453a8Sspeer 		    ring->hv_tx_cntl_base_ioaddr_pp,
722678453a8Sspeer 		    ring->hv_tx_cntl_ioaddr_size));
723678453a8Sspeer 		return (NXGE_ERROR | hv_rv);
724678453a8Sspeer 	}
725678453a8Sspeer 
726678453a8Sspeer 	ra = size = 0;
727678453a8Sspeer 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
728678453a8Sspeer 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
729678453a8Sspeer 
730678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
731678453a8Sspeer 	    "==> nxge_tdc_lp_conf: channel %d "
732678453a8Sspeer 	    "(page 1 cntl buf) hv_rv 0x%llx "
733678453a8Sspeer 	    "set ioaddr_pp $%p set size 0x%llx "
734678453a8Sspeer 	    "get ra ioaddr_pp $%p get size 0x%llx ",
735678453a8Sspeer 	    channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
736678453a8Sspeer 	    ring->hv_tx_cntl_ioaddr_size, ra, size));
737678453a8Sspeer 
738678453a8Sspeer 	ring->hv_set = B_TRUE;
739678453a8Sspeer 
740678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
741678453a8Sspeer 
742678453a8Sspeer 	return (NXGE_OK);
743678453a8Sspeer }
744678453a8Sspeer 
745678453a8Sspeer /*
746678453a8Sspeer  * nxge_rdc_lp_conf
747678453a8Sspeer  *
748678453a8Sspeer  *	Configure an RDC's logical pages.
749678453a8Sspeer  *
750678453a8Sspeer  * Arguments:
751678453a8Sspeer  * 	nxge
752678453a8Sspeer  * 	channel	The RDC to configure.
753678453a8Sspeer  *
754678453a8Sspeer  * Notes:
755678453a8Sspeer  *
756678453a8Sspeer  * Context:
757678453a8Sspeer  *	Guest domain
758678453a8Sspeer  */
759678453a8Sspeer nxge_status_t
760678453a8Sspeer nxge_rdc_lp_conf(
761678453a8Sspeer 	p_nxge_t nxge,
762678453a8Sspeer 	int channel)
763678453a8Sspeer {
764678453a8Sspeer 	nxge_hio_dc_t		*dc;
765678453a8Sspeer 	nxge_dma_common_t	*data;
766678453a8Sspeer 	nxge_dma_common_t	*control;
767678453a8Sspeer 	rx_rbr_ring_t		*ring;
768678453a8Sspeer 
769678453a8Sspeer 	uint64_t		hv_rv;
770678453a8Sspeer 	uint64_t		ra, size;
771678453a8Sspeer 
772678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
773678453a8Sspeer 
774678453a8Sspeer 	ring = nxge->rx_rbr_rings->rbr_rings[channel];
775678453a8Sspeer 
776678453a8Sspeer 	if (ring->hv_set) {
777678453a8Sspeer 		return (NXGE_OK);
778678453a8Sspeer 	}
779678453a8Sspeer 
780678453a8Sspeer 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
781678453a8Sspeer 		return (NXGE_ERROR);
782678453a8Sspeer 
783678453a8Sspeer 	/*
784678453a8Sspeer 	 * Initialize logical page 0 for data buffers.
785678453a8Sspeer 	 *
786678453a8Sspeer 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
787678453a8Sspeer 	 * nxge_main.c:nxge_dma_mem_alloc().
788678453a8Sspeer 	 */
789678453a8Sspeer 	data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
790678453a8Sspeer 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
791678453a8Sspeer 	ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
792678453a8Sspeer 
793678453a8Sspeer 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
794678453a8Sspeer 	    (uint64_t)channel, 0,
795678453a8Sspeer 	    ring->hv_rx_buf_base_ioaddr_pp,
796678453a8Sspeer 	    ring->hv_rx_buf_ioaddr_size);
797678453a8Sspeer 
798678453a8Sspeer 	if (hv_rv != 0) {
799678453a8Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
800678453a8Sspeer 		    "<== nxge_rdc_lp_conf: channel %d "
801678453a8Sspeer 		    "(page 0 data buf) hv_rv 0x%llx "
802678453a8Sspeer 		    "ioaddr_pp $%p size 0x%llx ",
803678453a8Sspeer 		    channel, hv_rv,
804678453a8Sspeer 		    ring->hv_rx_buf_base_ioaddr_pp,
805678453a8Sspeer 		    ring->hv_rx_buf_ioaddr_size));
806678453a8Sspeer 		return (NXGE_ERROR | hv_rv);
807678453a8Sspeer 	}
808678453a8Sspeer 
809678453a8Sspeer 	ra = size = 0;
810678453a8Sspeer 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
811678453a8Sspeer 	    (uint64_t)channel, 0, &ra, &size);
812678453a8Sspeer 
813678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
814678453a8Sspeer 	    "==> nxge_rdc_lp_conf: channel %d "
815678453a8Sspeer 	    "(page 0 data buf) hv_rv 0x%llx "
816678453a8Sspeer 	    "set ioaddr_pp $%p set size 0x%llx "
817678453a8Sspeer 	    "get ra ioaddr_pp $%p get size 0x%llx ",
818678453a8Sspeer 	    channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
819678453a8Sspeer 	    ring->hv_rx_buf_ioaddr_size, ra, size));
820678453a8Sspeer 
821678453a8Sspeer 	/*
822678453a8Sspeer 	 * Initialize logical page 1 for control buffers.
823678453a8Sspeer 	 */
824678453a8Sspeer 	control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
825678453a8Sspeer 	ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
826678453a8Sspeer 	ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
827678453a8Sspeer 
828678453a8Sspeer 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
829678453a8Sspeer 	    (uint64_t)channel, (uint64_t)1,
830678453a8Sspeer 	    ring->hv_rx_cntl_base_ioaddr_pp,
831678453a8Sspeer 	    ring->hv_rx_cntl_ioaddr_size);
832678453a8Sspeer 
833678453a8Sspeer 	if (hv_rv != 0) {
834678453a8Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
835678453a8Sspeer 		    "<== nxge_rdc_lp_conf: channel %d "
836678453a8Sspeer 		    "(page 1 cntl buf) hv_rv 0x%llx "
837678453a8Sspeer 		    "ioaddr_pp $%p size 0x%llx ",
838678453a8Sspeer 		    channel, hv_rv,
839678453a8Sspeer 		    ring->hv_rx_cntl_base_ioaddr_pp,
840678453a8Sspeer 		    ring->hv_rx_cntl_ioaddr_size));
841678453a8Sspeer 		return (NXGE_ERROR | hv_rv);
842678453a8Sspeer 	}
843678453a8Sspeer 
844678453a8Sspeer 	ra = size = 0;
845678453a8Sspeer 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
846678453a8Sspeer 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
847678453a8Sspeer 
848678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
849678453a8Sspeer 	    "==> nxge_rdc_lp_conf: channel %d "
850678453a8Sspeer 	    "(page 1 cntl buf) hv_rv 0x%llx "
851678453a8Sspeer 	    "set ioaddr_pp $%p set size 0x%llx "
852678453a8Sspeer 	    "get ra ioaddr_pp $%p get size 0x%llx ",
853678453a8Sspeer 	    channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
854678453a8Sspeer 	    ring->hv_rx_cntl_ioaddr_size, ra, size));
855678453a8Sspeer 
856678453a8Sspeer 	ring->hv_set = B_TRUE;
857678453a8Sspeer 
858678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
859678453a8Sspeer 
860678453a8Sspeer 	return (NXGE_OK);
861678453a8Sspeer }
862678453a8Sspeer #endif	/* defined(NIU_LP_WORKAROUND) */
863678453a8Sspeer 
864678453a8Sspeer /*
865678453a8Sspeer  * This value is in milliseconds.
866678453a8Sspeer  */
867678453a8Sspeer #define	NXGE_GUEST_TIMER	500 /* 1/2 second, for now */
868678453a8Sspeer 
869678453a8Sspeer /*
870678453a8Sspeer  * nxge_hio_start_timer
871678453a8Sspeer  *
872678453a8Sspeer  *	Start the timer which checks for Tx hangs.
873678453a8Sspeer  *
874678453a8Sspeer  * Arguments:
875678453a8Sspeer  * 	nxge
876678453a8Sspeer  *
877678453a8Sspeer  * Notes:
878678453a8Sspeer  *	This function is called from nxge_attach().
879678453a8Sspeer  *
880678453a8Sspeer  *	This function kicks off the guest domain equivalent of
881678453a8Sspeer  *	nxge_check_hw_state().  It is called only once, from attach.
882678453a8Sspeer  *
883678453a8Sspeer  * Context:
884678453a8Sspeer  *	Guest domain
885678453a8Sspeer  */
886678453a8Sspeer void
887678453a8Sspeer nxge_hio_start_timer(
888678453a8Sspeer 	nxge_t *nxge)
889678453a8Sspeer {
890678453a8Sspeer 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
891678453a8Sspeer 	nxge_hio_vr_t *vr;
892678453a8Sspeer 	int region;
893678453a8Sspeer 
894678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
895678453a8Sspeer 
896678453a8Sspeer 	MUTEX_ENTER(&nhd->lock);
897678453a8Sspeer 
898678453a8Sspeer 	/*
899678453a8Sspeer 	 * Find our VR data structure.  (We are currently assuming
900678453a8Sspeer 	 * one VR per guest domain.  That may change in the future.)
901678453a8Sspeer 	 */
902678453a8Sspeer 	for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
903678453a8Sspeer 		if (nhd->vr[region].nxge == (uintptr_t)nxge)
904678453a8Sspeer 			break;
905678453a8Sspeer 	}
906678453a8Sspeer 
907678453a8Sspeer 	MUTEX_EXIT(&nhd->lock);
908678453a8Sspeer 
909678453a8Sspeer 	if (region == NXGE_VR_SR_MAX) {
910678453a8Sspeer 		return;
911678453a8Sspeer 	}
912678453a8Sspeer 
913678453a8Sspeer 	vr = (nxge_hio_vr_t *)&nhd->vr[region];
914678453a8Sspeer 
915678453a8Sspeer 	nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
916678453a8Sspeer 	    (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
917678453a8Sspeer 
918678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
919678453a8Sspeer }
920678453a8Sspeer 
921678453a8Sspeer /*
922678453a8Sspeer  * nxge_check_guest_state
923678453a8Sspeer  *
924678453a8Sspeer  *	Essentially, check for Tx hangs.  In the future, if we are
925678453a8Sspeer  *	polling the hardware, we may do so here.
926678453a8Sspeer  *
927678453a8Sspeer  * Arguments:
928678453a8Sspeer  * 	vr	The virtualization region (VR) data structure.
929678453a8Sspeer  *
930678453a8Sspeer  * Notes:
931678453a8Sspeer  *	This function is the guest domain equivalent of
932678453a8Sspeer  *	nxge_check_hw_state().  Since we have no hardware to
933678453a8Sspeer  * 	check, we simply call nxge_check_tx_hang().
934678453a8Sspeer  *
935678453a8Sspeer  * Context:
936678453a8Sspeer  *	Guest domain
937678453a8Sspeer  */
938678453a8Sspeer void
939678453a8Sspeer nxge_check_guest_state(
940678453a8Sspeer 	nxge_hio_vr_t *vr)
941678453a8Sspeer {
942678453a8Sspeer 	nxge_t *nxge = (nxge_t *)vr->nxge;
943678453a8Sspeer 
944678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
945678453a8Sspeer 
946678453a8Sspeer 	MUTEX_ENTER(nxge->genlock);
947678453a8Sspeer 
948678453a8Sspeer 	nxge->nxge_timerid = 0;
949678453a8Sspeer 
950d7cf53fcSmisaki Miyashita 	if (nxge->nxge_mac_state == NXGE_MAC_STARTED) {
951d7cf53fcSmisaki Miyashita 		nxge_check_tx_hang(nxge);
952678453a8Sspeer 
953d7cf53fcSmisaki Miyashita 		nxge->nxge_timerid = timeout((void(*)(void *))
954d7cf53fcSmisaki Miyashita 		    nxge_check_guest_state, (caddr_t)vr,
955d7cf53fcSmisaki Miyashita 		    drv_usectohz(1000 * NXGE_GUEST_TIMER));
956d7cf53fcSmisaki Miyashita 	}
957678453a8Sspeer 
958678453a8Sspeer nxge_check_guest_state_exit:
959678453a8Sspeer 	MUTEX_EXIT(nxge->genlock);
960678453a8Sspeer 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
961678453a8Sspeer }
962678453a8Sspeer 
963678453a8Sspeer #endif	/* defined(sun4v) */
964