1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * nxge_hio_guest.c
31  *
32  * This file manages the virtualization resources for a guest domain.
33  *
34  */
35 
36 #include <sys/nxge/nxge_impl.h>
37 #include <sys/nxge/nxge_fzc.h>
38 #include <sys/nxge/nxge_rxdma.h>
39 #include <sys/nxge/nxge_txdma.h>
40 
41 #include <sys/nxge/nxge_hio.h>
42 
43 /*
44  * nxge_hio_unregister
45  *
46  *	Unregister with the VNET module.
47  *
48  * Arguments:
49  * 	nxge
50  *
51  * Notes:
52  *	We must uninitialize all DMA channels associated with the VR, too.
53  *
54  *	We're assuming that the channels will be disabled & unassigned
55  *	in the service domain, after we're done here.
56  *
57  * Context:
58  *	Guest domain
59  */
60 void
61 nxge_hio_unregister(
62 	nxge_t *nxge)
63 {
64 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
65 
66 	if (nhd == 0) {
67 		return;
68 	}
69 
70 #if defined(sun4v)
71 	/* Unregister with vNet. */
72 	if (nhd->hio.vio.unregister) {
73 		if (nxge->hio_vr)
74 			(*nhd->hio.vio.unregister)(nxge->hio_vr->vhp);
75 	}
76 #endif
77 }
78 
79 /*
80  * nxge_guest_regs_map
81  *
82  *	Map in a guest domain's register set(s).
83  *
84  * Arguments:
85  * 	nxge
86  *
87  * Notes:
88  *	Note that we set <is_vraddr> to TRUE.
89  *
90  * Context:
91  *	Guest domain
92  */
93 static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
94 	DDI_DEVICE_ATTR_V0,
95 	DDI_STRUCTURE_LE_ACC,
96 	DDI_STRICTORDER_ACC,
97 };
98 
99 int
100 nxge_guest_regs_map(
101 	nxge_t *nxge)
102 {
103 	dev_regs_t 	*regs;
104 	off_t		regsize;
105 	int rv;
106 
107 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
108 
109 	/* So we can allocate properly-aligned memory. */
110 	nxge->niu_type = N2_NIU; /* Version 1.0 only */
111 	nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */
112 
113 	nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
114 	regs = nxge->dev_regs;
115 
116 	if ((rv = ddi_dev_regsize(nxge->dip, 0, &regsize)) != DDI_SUCCESS) {
117 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
118 		return (NXGE_ERROR);
119 	}
120 
121 	rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)&regs->nxge_regp, 0, 0,
122 	    &nxge_guest_register_access_attributes, &regs->nxge_regh);
123 
124 	if (rv != DDI_SUCCESS) {
125 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
126 		return (NXGE_ERROR);
127 	}
128 
129 	nxge->npi_handle.regh = regs->nxge_regh;
130 	nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
131 	nxge->npi_handle.is_vraddr = B_TRUE;
132 	nxge->npi_handle.function.instance = nxge->instance;
133 	nxge->npi_handle.function.function = nxge->function_num;
134 	nxge->npi_handle.nxgep = (void *)nxge;
135 
136 	/* NPI_REG_ADD_HANDLE_SET() */
137 	nxge->npi_reg_handle.regh = regs->nxge_regh;
138 	nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
139 	nxge->npi_reg_handle.is_vraddr = B_TRUE;
140 	nxge->npi_reg_handle.function.instance = nxge->instance;
141 	nxge->npi_reg_handle.function.function = nxge->function_num;
142 	nxge->npi_reg_handle.nxgep = (void *)nxge;
143 
144 	/* NPI_VREG_ADD_HANDLE_SET() */
145 	nxge->npi_vreg_handle.regh = regs->nxge_regh;
146 	nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
147 	nxge->npi_vreg_handle.is_vraddr = B_TRUE;
148 	nxge->npi_vreg_handle.function.instance = nxge->instance;
149 	nxge->npi_vreg_handle.function.function = nxge->function_num;
150 	nxge->npi_vreg_handle.nxgep = (void *)nxge;
151 
152 	regs->nxge_vir_regp = regs->nxge_regp;
153 	regs->nxge_vir_regh = regs->nxge_regh;
154 
155 	/*
156 	 * We do NOT set the PCI, MSI-X, 2nd Virtualization,
157 	 * or FCODE reg variables.
158 	 */
159 
160 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
161 
162 	return (NXGE_OK);
163 }
164 
165 void
166 nxge_guest_regs_map_free(
167 	nxge_t *nxge)
168 {
169 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
170 
171 	if (nxge->dev_regs) {
172 		if (nxge->dev_regs->nxge_regh) {
173 			NXGE_DEBUG_MSG((nxge, DDI_CTL,
174 			    "==> nxge_unmap_regs: device registers"));
175 			ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
176 			nxge->dev_regs->nxge_regh = NULL;
177 		}
178 		kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
179 		nxge->dev_regs = 0;
180 	}
181 
182 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
183 }
184 
185 #if defined(sun4v)
186 
187 /*
188  * -------------------------------------------------------------
189  * Local prototypes
190  * -------------------------------------------------------------
191  */
192 static nxge_hio_dc_t *nxge_guest_dc_alloc(
193 	nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
194 
195 static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
196 static void nxge_check_guest_state(nxge_hio_vr_t *);
197 
198 /*
199  * nxge_hio_vr_add
200  *
201  *	If we have been given a virtualization region (VR),
202  *	then initialize it.
203  *
204  * Arguments:
205  * 	nxge
206  *
207  * Notes:
208  *
209  * Context:
210  *	Guest domain
211  */
212 /* ARGSUSED */
213 int
214 nxge_hio_vr_add(
215 	nxge_t *nxge)
216 {
217 	extern mac_callbacks_t nxge_m_callbacks;
218 
219 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
220 	nxge_hio_vr_t *vr;
221 	nxge_hio_dc_t *dc;
222 
223 	int *reg_val;
224 	uint_t reg_len;
225 	uint8_t vr_index;
226 
227 	nxhv_vr_fp_t *fp;
228 	uint64_t cookie, vr_address, vr_size;
229 
230 	nxhv_dc_fp_t *tx, *rx;
231 	uint64_t tx_map, rx_map;
232 
233 	uint64_t hv_rv;
234 
235 	/* Variables needed to register with vnet. */
236 	mac_register_t *mac_info;
237 	ether_addr_t mac_addr;
238 	nx_vio_fp_t *vio;
239 
240 	int i;
241 
242 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
243 
244 	/*
245 	 * Get our HV cookie.
246 	 */
247 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
248 	    0, "reg", &reg_val, &reg_len) != DDI_PROP_SUCCESS) {
249 		NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
250 		return (NXGE_ERROR);
251 	}
252 
253 	cookie = (uint64_t)(reg_val[0]);
254 	ddi_prop_free(reg_val);
255 
256 	fp = &nhd->hio.vr;
257 	hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
258 	if (hv_rv != 0) {
259 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
260 		    "vr->getinfo() failed"));
261 		return (NXGE_ERROR);
262 	}
263 
264 	/*
265 	 * In the guest domain, we can use any VR data structure
266 	 * we want, because we're not supposed to know which VR
267 	 * the service domain has allocated to us.
268 	 *
269 	 * In the current version, the least significant nybble of
270 	 * the cookie is the VR region, but that could change
271 	 * very easily.
272 	 *
273 	 * In the future, a guest may have more than one VR allocated
274 	 * to it, which is why we go through this exercise.
275 	 */
276 	MUTEX_ENTER(&nhd->lock);
277 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
278 		if (nhd->vr[vr_index].nxge == 0) {
279 			nhd->vr[vr_index].nxge = (uintptr_t)nxge;
280 			break;
281 		}
282 	}
283 	MUTEX_EXIT(&nhd->lock);
284 
285 	if (vr_index == FUNC_VIR_MAX) {
286 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
287 		    "no VRs available"));
288 		return (NXGE_ERROR);
289 	}
290 
291 	vr = &nhd->vr[vr_index];
292 
293 	vr->nxge = (uintptr_t)nxge;
294 	vr->cookie = (uint32_t)cookie;
295 	vr->address = vr_address;
296 	vr->size = vr_size;
297 	vr->region = vr_index;
298 
299 	/*
300 	 * This is redundant data, but useful nonetheless.  It helps
301 	 * us to keep track of which RDCs & TDCs belong to us.
302 	 */
303 	if (nxge->tx_set.lg.count == 0)
304 		(void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
305 	if (nxge->rx_set.lg.count == 0)
306 		(void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
307 
308 	/*
309 	 * See nxge_intr.c.
310 	 */
311 	if (nxge_hio_intr_init(nxge) != NXGE_OK) {
312 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
313 		    "nxge_hio_intr_init() failed"));
314 		return (NXGE_ERROR);
315 	}
316 
317 	/*
318 	 * Now we find out which RDCs & TDCs have been allocated to us.
319 	 */
320 	tx = &nhd->hio.tx;
321 	if (tx->get_map) {
322 		/*
323 		 * The map we get back is a bitmap of the
324 		 * virtual Tx DMA channels we own -
325 		 * they are NOT real channel numbers.
326 		 */
327 		hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
328 		if (hv_rv != 0) {
329 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
330 			    "tx->get_map() failed"));
331 			return (NXGE_ERROR);
332 		}
333 		res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
334 
335 		/*
336 		 * For each channel, mark these two fields
337 		 * while we have the VR data structure.
338 		 */
339 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
340 			if ((1 << i) & tx_map) {
341 				dc = nxge_guest_dc_alloc(nxge, vr,
342 				    NXGE_TRANSMIT_GROUP);
343 				if (dc == 0) {
344 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
345 					    "DC add failed"));
346 					return (NXGE_ERROR);
347 				}
348 				dc->channel = (nxge_channel_t)i;
349 			}
350 		}
351 	}
352 
353 	rx = &nhd->hio.rx;
354 	if (rx->get_map) {
355 		/*
356 		 * I repeat, the map we get back is a bitmap of
357 		 * the virtual Rx DMA channels we own -
358 		 * they are NOT real channel numbers.
359 		 */
360 		hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
361 		if (hv_rv != 0) {
362 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
363 			    "rx->get_map() failed"));
364 			return (NXGE_ERROR);
365 		}
366 		res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
367 
368 		/*
369 		 * For each channel, mark these two fields
370 		 * while we have the VR data structure.
371 		 */
372 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
373 			if ((1 << i) & rx_map) {
374 				dc = nxge_guest_dc_alloc(nxge, vr,
375 				    NXGE_RECEIVE_GROUP);
376 				if (dc == 0) {
377 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
378 					    "DC add failed"));
379 					return (NXGE_ERROR);
380 				}
381 				dc->channel = (nxge_channel_t)i;
382 			}
383 		}
384 	}
385 
386 	/*
387 	 * Register with vnet.
388 	 */
389 	if ((mac_info = mac_alloc(MAC_VERSION)) == NULL)
390 		return (NXGE_ERROR);
391 
392 	mac_info->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
393 	mac_info->m_driver = nxge;
394 	mac_info->m_dip = nxge->dip;
395 	mac_info->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
396 	mac_info->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP);
397 	(void) memset(mac_info->m_src_addr, 0xff, sizeof (MAXMACADDRLEN));
398 	mac_info->m_callbacks = &nxge_m_callbacks;
399 	mac_info->m_min_sdu = 0;
400 	mac_info->m_max_sdu = NXGE_MTU_DEFAULT_MAX -
401 	    sizeof (struct ether_header) - ETHERFCSL - 4;
402 
403 	(void) memset(&mac_addr, 0xff, sizeof (mac_addr));
404 
405 	/* Register with vio_net. */
406 	vio = &nhd->hio.vio;
407 	if ((*vio->__register)(mac_info, VIO_NET_RES_HYBRID,
408 	    nxge->hio_mac_addr, mac_addr, &vr->vhp, &vio->cb)) {
409 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "HIO registration() failed"));
410 		return (NXGE_ERROR);
411 	}
412 
413 	nxge->hio_vr = vr;	/* For faster lookups. */
414 
415 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
416 
417 	return (NXGE_OK);
418 }
419 
420 /*
421  * nxge_guest_dc_alloc
422  *
423  *	Find a free nxge_hio_dc_t data structure.
424  *
425  * Arguments:
426  * 	nxge
427  * 	type	TRANSMIT or RECEIVE.
428  *
429  * Notes:
430  *
431  * Context:
432  *	Guest domain
433  */
434 nxge_hio_dc_t *
435 nxge_guest_dc_alloc(
436 	nxge_t *nxge,
437 	nxge_hio_vr_t *vr,
438 	nxge_grp_type_t type)
439 {
440 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
441 	nxge_hio_dc_t *dc;
442 	int limit, i;
443 
444 	/*
445 	 * In the guest domain, there may be more than one VR.
446 	 * each one of which will be using the same slots, or
447 	 * virtual channel numbers.  So the <nhd>'s rdc & tdc
448 	 * tables must be shared.
449 	 */
450 	if (type == NXGE_TRANSMIT_GROUP) {
451 		dc = &nhd->tdc[0];
452 		limit = NXGE_MAX_TDCS;
453 	} else {
454 		dc = &nhd->rdc[0];
455 		limit = NXGE_MAX_RDCS;
456 	}
457 
458 	MUTEX_ENTER(&nhd->lock);
459 	for (i = 0; i < limit; i++, dc++) {
460 		if (dc->vr == 0) {
461 			dc->vr = vr;
462 			dc->cookie = vr->cookie;
463 			MUTEX_EXIT(&nhd->lock);
464 			return (dc);
465 		}
466 	}
467 	MUTEX_EXIT(&nhd->lock);
468 
469 	return (0);
470 }
471 
472 /*
473  * res_map_parse
474  *
475  *	Parse a resource map.  The resources are DMA channels, receive
476  *	or transmit, depending on <type>.
477  *
478  * Arguments:
479  * 	nxge
480  * 	type	Transmit or receive.
481  *	res_map	The resource map to parse.
482  *
483  * Notes:
484  *
485  * Context:
486  *	Guest domain
487  */
488 void
489 res_map_parse(
490 	nxge_t *nxge,
491 	nxge_grp_type_t type,
492 	uint64_t res_map)
493 {
494 	uint8_t slots, mask, slot;
495 	int first, count;
496 
497 	nxge_hw_pt_cfg_t *hardware;
498 	nxge_grp_t *group;
499 
500 	/* Slots are numbered 0 - 7. */
501 	slots = (uint8_t)(res_map & 0xff);
502 
503 	/* Count the number of bits in the bitmap. */
504 	for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
505 		if (slots & mask)
506 			count++;
507 		if (count == 1)
508 			first = slot;
509 		mask <<= 1;
510 	}
511 
512 	hardware = &nxge->pt_config.hw_config;
513 	group = (type == NXGE_TRANSMIT_GROUP) ?
514 	    nxge->tx_set.group[0] : nxge->rx_set.group[0];
515 
516 	/*
517 	 * A guest domain has one Tx & one Rx group, so far.
518 	 * In the future, there may be more than one.
519 	 */
520 	if (type == NXGE_TRANSMIT_GROUP) {
521 		nxge_dma_pt_cfg_t *port = &nxge->pt_config;
522 
523 		hardware->tdc.start = first;
524 		hardware->tdc.count = count;
525 		hardware->tdc.owned = count;
526 
527 		group->map = slots;
528 
529 		/*
530 		 * Pointless in a guest domain.  This bitmap is used
531 		 * in only one place: nxge_txc_init(),
532 		 * a service-domain-only function.
533 		 */
534 		port->tx_dma_map = slots;
535 
536 		nxge->tx_set.owned.map |= slots;
537 	} else {
538 		nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
539 
540 		hardware->start_rdc = first;
541 		hardware->max_rdcs = count;
542 
543 		rdc_grp->start_rdc = (uint8_t)first;
544 		rdc_grp->max_rdcs = (uint8_t)count;
545 		rdc_grp->def_rdc = (uint8_t)first;
546 
547 		rdc_grp->map = slots;
548 		group->map = slots;
549 
550 		nxge->rx_set.owned.map |= slots;
551 	}
552 }
553 
554 /*
555  * nxge_hio_vr_release
556  *
557  *	Release a virtualization region (VR).
558  *
559  * Arguments:
560  * 	nxge
561  *
562  * Notes:
563  *	We must uninitialize all DMA channels associated with the VR, too.
564  *
565  *	The service domain will re-initialize these DMA channels later.
566  *	See nxge_hio.c:nxge_hio_share_free() for details.
567  *
568  * Context:
569  *	Guest domain
570  */
571 int
572 nxge_hio_vr_release(
573 	nxge_t *nxge)
574 {
575 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
576 
577 	/*
578 	 * Uninitialize interrupts.
579 	 */
580 	nxge_hio_intr_uninit(nxge);
581 
582 	/*
583 	 * Uninitialize the receive DMA channels.
584 	 */
585 	nxge_uninit_rxdma_channels(nxge);
586 
587 	/*
588 	 * Uninitialize the transmit DMA channels.
589 	 */
590 	nxge_uninit_txdma_channels(nxge);
591 
592 	// Remove both groups. Assumption: only two groups!
593 	nxge_grp_remove(nxge, (vr_handle_t)nxge->rx_set.group[0]);
594 	nxge_grp_remove(nxge, (vr_handle_t)nxge->tx_set.group[0]);
595 
596 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
597 
598 	return (NXGE_OK);
599 }
600 
601 #if defined(NIU_LP_WORKAROUND)
602 /*
603  * nxge_tdc_lp_conf
604  *
605  *	Configure the logical pages for a TDC.
606  *
607  * Arguments:
608  * 	nxge
609  * 	channel	The TDC to configure.
610  *
611  * Notes:
612  *
613  * Context:
614  *	Guest domain
615  */
616 nxge_status_t
617 nxge_tdc_lp_conf(
618 	p_nxge_t nxge,
619 	int channel)
620 {
621 	nxge_hio_dc_t		*dc;
622 	nxge_dma_common_t	*data;
623 	nxge_dma_common_t	*control;
624 	tx_ring_t 		*ring;
625 
626 	uint64_t		hv_rv;
627 	uint64_t		ra, size;
628 
629 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
630 
631 	ring = nxge->tx_rings->rings[channel];
632 
633 	if (ring->hv_set) {
634 		/* This shouldn't happen. */
635 		return (NXGE_OK);
636 	}
637 
638 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
639 		return (NXGE_ERROR);
640 
641 	/*
642 	 * Initialize logical page 0 for data buffers.
643 	 *
644 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
645 	 * nxge_main.c:nxge_dma_mem_alloc().
646 	 */
647 	data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
648 	ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
649 	ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
650 
651 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
652 	    (uint64_t)channel, 0,
653 	    ring->hv_tx_buf_base_ioaddr_pp,
654 	    ring->hv_tx_buf_ioaddr_size);
655 
656 	if (hv_rv != 0) {
657 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
658 		    "<== nxge_tdc_lp_conf: channel %d "
659 		    "(page 0 data buf) hv: %d "
660 		    "ioaddr_pp $%p size 0x%llx ",
661 		    channel, hv_rv,
662 		    ring->hv_tx_buf_base_ioaddr_pp,
663 		    ring->hv_tx_buf_ioaddr_size));
664 		return (NXGE_ERROR | hv_rv);
665 	}
666 
667 	ra = size = 0;
668 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
669 	    (uint64_t)channel, 0, &ra, &size);
670 
671 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
672 	    "==> nxge_tdc_lp_conf: channel %d "
673 	    "(page 0 data buf) hv_rv 0x%llx "
674 	    "set ioaddr_pp $%p set size 0x%llx "
675 	    "get ra ioaddr_pp $%p get size 0x%llx ",
676 	    channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
677 	    ring->hv_tx_buf_ioaddr_size, ra, size));
678 
679 	/*
680 	 * Initialize logical page 1 for control buffers.
681 	 */
682 	control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
683 	ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
684 	ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
685 
686 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
687 	    (uint64_t)channel, (uint64_t)1,
688 	    ring->hv_tx_cntl_base_ioaddr_pp,
689 	    ring->hv_tx_cntl_ioaddr_size);
690 
691 	if (hv_rv != 0) {
692 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
693 		    "<== nxge_tdc_lp_conf: channel %d "
694 		    "(page 1 cntl buf) hv_rv 0x%llx "
695 		    "ioaddr_pp $%p size 0x%llx ",
696 		    channel, hv_rv,
697 		    ring->hv_tx_cntl_base_ioaddr_pp,
698 		    ring->hv_tx_cntl_ioaddr_size));
699 		return (NXGE_ERROR | hv_rv);
700 	}
701 
702 	ra = size = 0;
703 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
704 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
705 
706 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
707 	    "==> nxge_tdc_lp_conf: channel %d "
708 	    "(page 1 cntl buf) hv_rv 0x%llx "
709 	    "set ioaddr_pp $%p set size 0x%llx "
710 	    "get ra ioaddr_pp $%p get size 0x%llx ",
711 	    channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
712 	    ring->hv_tx_cntl_ioaddr_size, ra, size));
713 
714 	ring->hv_set = B_TRUE;
715 
716 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
717 
718 	return (NXGE_OK);
719 }
720 
721 /*
722  * nxge_rdc_lp_conf
723  *
724  *	Configure an RDC's logical pages.
725  *
726  * Arguments:
727  * 	nxge
728  * 	channel	The RDC to configure.
729  *
730  * Notes:
731  *
732  * Context:
733  *	Guest domain
734  */
735 nxge_status_t
736 nxge_rdc_lp_conf(
737 	p_nxge_t nxge,
738 	int channel)
739 {
740 	nxge_hio_dc_t		*dc;
741 	nxge_dma_common_t	*data;
742 	nxge_dma_common_t	*control;
743 	rx_rbr_ring_t		*ring;
744 
745 	uint64_t		hv_rv;
746 	uint64_t		ra, size;
747 
748 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
749 
750 	ring = nxge->rx_rbr_rings->rbr_rings[channel];
751 
752 	if (ring->hv_set) {
753 		return (NXGE_OK);
754 	}
755 
756 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
757 		return (NXGE_ERROR);
758 
759 	/*
760 	 * Initialize logical page 0 for data buffers.
761 	 *
762 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
763 	 * nxge_main.c:nxge_dma_mem_alloc().
764 	 */
765 	data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
766 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
767 	ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
768 
769 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
770 	    (uint64_t)channel, 0,
771 	    ring->hv_rx_buf_base_ioaddr_pp,
772 	    ring->hv_rx_buf_ioaddr_size);
773 
774 	if (hv_rv != 0) {
775 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
776 		    "<== nxge_rdc_lp_conf: channel %d "
777 		    "(page 0 data buf) hv_rv 0x%llx "
778 		    "ioaddr_pp $%p size 0x%llx ",
779 		    channel, hv_rv,
780 		    ring->hv_rx_buf_base_ioaddr_pp,
781 		    ring->hv_rx_buf_ioaddr_size));
782 		return (NXGE_ERROR | hv_rv);
783 	}
784 
785 	ra = size = 0;
786 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
787 	    (uint64_t)channel, 0, &ra, &size);
788 
789 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
790 	    "==> nxge_rdc_lp_conf: channel %d "
791 	    "(page 0 data buf) hv_rv 0x%llx "
792 	    "set ioaddr_pp $%p set size 0x%llx "
793 	    "get ra ioaddr_pp $%p get size 0x%llx ",
794 	    channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
795 	    ring->hv_rx_buf_ioaddr_size, ra, size));
796 
797 	/*
798 	 * Initialize logical page 1 for control buffers.
799 	 */
800 	control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
801 	ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
802 	ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
803 
804 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
805 	    (uint64_t)channel, (uint64_t)1,
806 	    ring->hv_rx_cntl_base_ioaddr_pp,
807 	    ring->hv_rx_cntl_ioaddr_size);
808 
809 	if (hv_rv != 0) {
810 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
811 		    "<== nxge_rdc_lp_conf: channel %d "
812 		    "(page 1 cntl buf) hv_rv 0x%llx "
813 		    "ioaddr_pp $%p size 0x%llx ",
814 		    channel, hv_rv,
815 		    ring->hv_rx_cntl_base_ioaddr_pp,
816 		    ring->hv_rx_cntl_ioaddr_size));
817 		return (NXGE_ERROR | hv_rv);
818 	}
819 
820 	ra = size = 0;
821 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
822 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
823 
824 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
825 	    "==> nxge_rdc_lp_conf: channel %d "
826 	    "(page 1 cntl buf) hv_rv 0x%llx "
827 	    "set ioaddr_pp $%p set size 0x%llx "
828 	    "get ra ioaddr_pp $%p get size 0x%llx ",
829 	    channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
830 	    ring->hv_rx_cntl_ioaddr_size, ra, size));
831 
832 	ring->hv_set = B_TRUE;
833 
834 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
835 
836 	return (NXGE_OK);
837 }
838 #endif	/* defined(NIU_LP_WORKAROUND) */
839 
840 /*
841  * This value is in milliseconds.
842  */
843 #define	NXGE_GUEST_TIMER	500 /* 1/2 second, for now */
844 
845 /*
846  * nxge_hio_start_timer
847  *
848  *	Start the timer which checks for Tx hangs.
849  *
850  * Arguments:
851  * 	nxge
852  *
853  * Notes:
854  *	This function is called from nxge_attach().
855  *
856  *	This function kicks off the guest domain equivalent of
857  *	nxge_check_hw_state().  It is called only once, from attach.
858  *
859  * Context:
860  *	Guest domain
861  */
862 void
863 nxge_hio_start_timer(
864 	nxge_t *nxge)
865 {
866 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
867 	nxge_hio_vr_t *vr;
868 	int region;
869 
870 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
871 
872 	MUTEX_ENTER(&nhd->lock);
873 
874 	/*
875 	 * Find our VR data structure.  (We are currently assuming
876 	 * one VR per guest domain.  That may change in the future.)
877 	 */
878 	for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
879 		if (nhd->vr[region].nxge == (uintptr_t)nxge)
880 			break;
881 	}
882 
883 	MUTEX_EXIT(&nhd->lock);
884 
885 	if (region == NXGE_VR_SR_MAX) {
886 		return;
887 	}
888 
889 	vr = (nxge_hio_vr_t *)&nhd->vr[region];
890 
891 	nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
892 	    (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
893 
894 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
895 }
896 
897 /*
898  * nxge_check_guest_state
899  *
900  *	Essentially, check for Tx hangs.  In the future, if we are
901  *	polling the hardware, we may do so here.
902  *
903  * Arguments:
904  * 	vr	The virtualization region (VR) data structure.
905  *
906  * Notes:
907  *	This function is the guest domain equivalent of
908  *	nxge_check_hw_state().  Since we have no hardware to
909  * 	check, we simply call nxge_check_tx_hang().
910  *
911  * Context:
912  *	Guest domain
913  */
914 void
915 nxge_check_guest_state(
916 	nxge_hio_vr_t *vr)
917 {
918 	nxge_t *nxge = (nxge_t *)vr->nxge;
919 
920 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
921 
922 	MUTEX_ENTER(nxge->genlock);
923 
924 	nxge->nxge_timerid = 0;
925 
926 	nxge_check_tx_hang(nxge);
927 
928 	nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
929 	    (caddr_t)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
930 
931 nxge_check_guest_state_exit:
932 	MUTEX_EXIT(nxge->genlock);
933 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
934 }
935 
936 #endif	/* defined(sun4v) */
937