1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio_guest.c
29  *
30  * This file manages the virtualization resources for a guest domain.
31  *
32  */
33 
34 #include <sys/nxge/nxge_impl.h>
35 #include <sys/nxge/nxge_fzc.h>
36 #include <sys/nxge/nxge_rxdma.h>
37 #include <sys/nxge/nxge_txdma.h>
38 #include <sys/nxge/nxge_hio.h>
39 
40 /*
41  * nxge_guest_regs_map
42  *
43  *	Map in a guest domain's register set(s).
44  *
45  * Arguments:
46  * 	nxge
47  *
48  * Notes:
49  *	Note that we set <is_vraddr> to TRUE.
50  *
51  * Context:
52  *	Guest domain
53  */
54 static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
55 	DDI_DEVICE_ATTR_V0,
56 	DDI_STRUCTURE_LE_ACC,
57 	DDI_STRICTORDER_ACC,
58 };
59 
60 int
61 nxge_guest_regs_map(nxge_t *nxge)
62 {
63 	dev_regs_t 	*regs;
64 	off_t		regsize;
65 	int rv;
66 
67 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
68 
69 	/* So we can allocate properly-aligned memory. */
70 	nxge->niu_type = N2_NIU; /* Version 1.0 only */
71 	nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */
72 
73 	nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
74 	regs = nxge->dev_regs;
75 
76 	if ((rv = ddi_dev_regsize(nxge->dip, 0, &regsize)) != DDI_SUCCESS) {
77 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
78 		return (NXGE_ERROR);
79 	}
80 
81 	rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)&regs->nxge_regp, 0, 0,
82 	    &nxge_guest_register_access_attributes, &regs->nxge_regh);
83 
84 	if (rv != DDI_SUCCESS) {
85 		NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
86 		return (NXGE_ERROR);
87 	}
88 
89 	nxge->npi_handle.regh = regs->nxge_regh;
90 	nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
91 	nxge->npi_handle.is_vraddr = B_TRUE;
92 	nxge->npi_handle.function.instance = nxge->instance;
93 	nxge->npi_handle.function.function = nxge->function_num;
94 	nxge->npi_handle.nxgep = (void *)nxge;
95 
96 	/* NPI_REG_ADD_HANDLE_SET() */
97 	nxge->npi_reg_handle.regh = regs->nxge_regh;
98 	nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
99 	nxge->npi_reg_handle.is_vraddr = B_TRUE;
100 	nxge->npi_reg_handle.function.instance = nxge->instance;
101 	nxge->npi_reg_handle.function.function = nxge->function_num;
102 	nxge->npi_reg_handle.nxgep = (void *)nxge;
103 
104 	/* NPI_VREG_ADD_HANDLE_SET() */
105 	nxge->npi_vreg_handle.regh = regs->nxge_regh;
106 	nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
107 	nxge->npi_vreg_handle.is_vraddr = B_TRUE;
108 	nxge->npi_vreg_handle.function.instance = nxge->instance;
109 	nxge->npi_vreg_handle.function.function = nxge->function_num;
110 	nxge->npi_vreg_handle.nxgep = (void *)nxge;
111 
112 	regs->nxge_vir_regp = regs->nxge_regp;
113 	regs->nxge_vir_regh = regs->nxge_regh;
114 
115 	/*
116 	 * We do NOT set the PCI, MSI-X, 2nd Virtualization,
117 	 * or FCODE reg variables.
118 	 */
119 
120 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
121 
122 	return (NXGE_OK);
123 }
124 
125 void
126 nxge_guest_regs_map_free(
127 	nxge_t *nxge)
128 {
129 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
130 
131 	if (nxge->dev_regs) {
132 		if (nxge->dev_regs->nxge_regh) {
133 			NXGE_DEBUG_MSG((nxge, DDI_CTL,
134 			    "==> nxge_unmap_regs: device registers"));
135 			ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
136 			nxge->dev_regs->nxge_regh = NULL;
137 		}
138 		kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
139 		nxge->dev_regs = 0;
140 	}
141 
142 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
143 }
144 
145 #if defined(sun4v)
146 
147 /*
148  * -------------------------------------------------------------
149  * Local prototypes
150  * -------------------------------------------------------------
151  */
152 static nxge_hio_dc_t *nxge_guest_dc_alloc(
153 	nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
154 
155 static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
156 static void nxge_check_guest_state(nxge_hio_vr_t *);
157 
158 /*
159  * nxge_hio_vr_add
160  *
161  *	If we have been given a virtualization region (VR),
162  *	then initialize it.
163  *
164  * Arguments:
165  * 	nxge
166  *
167  * Notes:
168  *
169  * Context:
170  *	Guest domain
171  */
172 /* ARGSUSED */
173 int
174 nxge_hio_vr_add(nxge_t *nxge)
175 {
176 	extern nxge_status_t	nxge_mac_register(p_nxge_t);
177 
178 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
179 	nxge_hio_vr_t		*vr;
180 	nxge_hio_dc_t		*dc;
181 	int			*reg_val;
182 	uint_t			reg_len;
183 	uint8_t			vr_index;
184 	nxhv_vr_fp_t		*fp;
185 	uint64_t		vr_address, vr_size;
186 	uint32_t		cookie;
187 	nxhv_dc_fp_t		*tx, *rx;
188 	uint64_t		tx_map, rx_map;
189 	uint64_t		hv_rv;
190 	int			i;
191 	nxge_status_t		status;
192 
193 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
194 
195 	if (nhd->type == NXGE_HIO_TYPE_SERVICE) {
196 		/*
197 		 * Can't add VR to the service domain from which we came.
198 		 */
199 		ASSERT(nhd->type == NXGE_HIO_TYPE_GUEST);
200 		return (DDI_FAILURE);
201 	}
202 
203 	/*
204 	 * Get our HV cookie.
205 	 */
206 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
207 	    0, "reg", &reg_val, &reg_len) != DDI_PROP_SUCCESS) {
208 		NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
209 		return (DDI_FAILURE);
210 	}
211 
212 	cookie = (uint32_t)(reg_val[0]);
213 	ddi_prop_free(reg_val);
214 
215 	fp = &nhd->hio.vr;
216 	hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
217 	if (hv_rv != 0) {
218 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
219 		    "vr->getinfo() failed"));
220 		return (DDI_FAILURE);
221 	}
222 
223 	/*
224 	 * In the guest domain, we can use any VR data structure
225 	 * we want, because we're not supposed to know which VR
226 	 * the service domain has allocated to us.
227 	 *
228 	 * In the current version, the least significant nybble of
229 	 * the cookie is the VR region, but that could change
230 	 * very easily.
231 	 *
232 	 * In the future, a guest may have more than one VR allocated
233 	 * to it, which is why we go through this exercise.
234 	 */
235 	MUTEX_ENTER(&nhd->lock);
236 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
237 		if (nhd->vr[vr_index].nxge == 0) {
238 			nhd->vr[vr_index].nxge = (uintptr_t)nxge;
239 			break;
240 		}
241 	}
242 	MUTEX_EXIT(&nhd->lock);
243 
244 	if (vr_index == FUNC_VIR_MAX) {
245 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
246 		    "no VRs available"));
247 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
248 		    "nxge_hio_vr_add(%d): cookie(0x%x)\n",
249 		    nxge->instance, cookie));
250 		return (DDI_FAILURE);
251 	}
252 
253 	vr = &nhd->vr[vr_index];
254 
255 	vr->nxge = (uintptr_t)nxge;
256 	vr->cookie = (uint32_t)cookie;
257 	vr->address = vr_address;
258 	vr->size = vr_size;
259 	vr->region = vr_index;
260 
261 	/*
262 	 * This is redundant data, but useful nonetheless.  It helps
263 	 * us to keep track of which RDCs & TDCs belong to us.
264 	 */
265 	if (nxge->tx_set.lg.count == 0)
266 		(void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
267 	if (nxge->rx_set.lg.count == 0)
268 		(void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
269 
270 	/*
271 	 * See nxge_intr.c.
272 	 */
273 	if (nxge_hio_intr_init(nxge) != NXGE_OK) {
274 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
275 		    "nxge_hio_intr_init() failed"));
276 		return (DDI_FAILURE);
277 	}
278 
279 	/*
280 	 * Now we find out which RDCs & TDCs have been allocated to us.
281 	 */
282 	tx = &nhd->hio.tx;
283 	if (tx->get_map) {
284 		/*
285 		 * The map we get back is a bitmap of the
286 		 * virtual Tx DMA channels we own -
287 		 * they are NOT real channel numbers.
288 		 */
289 		hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
290 		if (hv_rv != 0) {
291 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
292 			    "tx->get_map() failed"));
293 			return (DDI_FAILURE);
294 		}
295 		res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
296 
297 		/*
298 		 * For each channel, mark these two fields
299 		 * while we have the VR data structure.
300 		 */
301 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
302 			if ((1 << i) & tx_map) {
303 				dc = nxge_guest_dc_alloc(nxge, vr,
304 				    NXGE_TRANSMIT_GROUP);
305 				if (dc == 0) {
306 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
307 					    "DC add failed"));
308 					return (DDI_FAILURE);
309 				}
310 				dc->channel = (nxge_channel_t)i;
311 			}
312 		}
313 	}
314 
315 	rx = &nhd->hio.rx;
316 	if (rx->get_map) {
317 		/*
318 		 * I repeat, the map we get back is a bitmap of
319 		 * the virtual Rx DMA channels we own -
320 		 * they are NOT real channel numbers.
321 		 */
322 		hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
323 		if (hv_rv != 0) {
324 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
325 			    "rx->get_map() failed"));
326 			return (DDI_FAILURE);
327 		}
328 		res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
329 
330 		/*
331 		 * For each channel, mark these two fields
332 		 * while we have the VR data structure.
333 		 */
334 		for (i = 0; i < VP_CHANNEL_MAX; i++) {
335 			if ((1 << i) & rx_map) {
336 				dc = nxge_guest_dc_alloc(nxge, vr,
337 				    NXGE_RECEIVE_GROUP);
338 				if (dc == 0) {
339 					NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
340 					    "DC add failed"));
341 					return (DDI_FAILURE);
342 				}
343 				dc->channel = (nxge_channel_t)i;
344 			}
345 		}
346 	}
347 
348 	status = nxge_mac_register(nxge);
349 	if (status != NXGE_OK) {
350 		cmn_err(CE_WARN, "nxge(%d): nxge_mac_register failed\n",
351 		    nxge->instance);
352 		return (DDI_FAILURE);
353 	}
354 
355 	nxge->hio_vr = vr;	/* For faster lookups. */
356 
357 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
358 
359 	return (DDI_SUCCESS);
360 }
361 
362 /*
363  * nxge_guest_dc_alloc
364  *
365  *	Find a free nxge_hio_dc_t data structure.
366  *
367  * Arguments:
368  * 	nxge
369  * 	type	TRANSMIT or RECEIVE.
370  *
371  * Notes:
372  *
373  * Context:
374  *	Guest domain
375  */
376 nxge_hio_dc_t *
377 nxge_guest_dc_alloc(
378 	nxge_t *nxge,
379 	nxge_hio_vr_t *vr,
380 	nxge_grp_type_t type)
381 {
382 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
383 	nxge_hio_dc_t *dc;
384 	int limit, i;
385 
386 	/*
387 	 * In the guest domain, there may be more than one VR.
388 	 * each one of which will be using the same slots, or
389 	 * virtual channel numbers.  So the <nhd>'s rdc & tdc
390 	 * tables must be shared.
391 	 */
392 	if (type == NXGE_TRANSMIT_GROUP) {
393 		dc = &nhd->tdc[0];
394 		limit = NXGE_MAX_TDCS;
395 	} else {
396 		dc = &nhd->rdc[0];
397 		limit = NXGE_MAX_RDCS;
398 	}
399 
400 	MUTEX_ENTER(&nhd->lock);
401 	for (i = 0; i < limit; i++, dc++) {
402 		if (dc->vr == 0) {
403 			dc->vr = vr;
404 			dc->cookie = vr->cookie;
405 			MUTEX_EXIT(&nhd->lock);
406 			return (dc);
407 		}
408 	}
409 	MUTEX_EXIT(&nhd->lock);
410 
411 	return (0);
412 }
413 
414 /*
415  * res_map_parse
416  *
417  *	Parse a resource map.  The resources are DMA channels, receive
418  *	or transmit, depending on <type>.
419  *
420  * Arguments:
421  * 	nxge
422  * 	type	Transmit or receive.
423  *	res_map	The resource map to parse.
424  *
425  * Notes:
426  *
427  * Context:
428  *	Guest domain
429  */
430 void
431 res_map_parse(
432 	nxge_t *nxge,
433 	nxge_grp_type_t type,
434 	uint64_t res_map)
435 {
436 	uint8_t slots, mask, slot;
437 	int first, count;
438 
439 	nxge_hw_pt_cfg_t *hardware;
440 	nxge_grp_t *group;
441 
442 	/* Slots are numbered 0 - 7. */
443 	slots = (uint8_t)(res_map & 0xff);
444 
445 	/* Count the number of bits in the bitmap. */
446 	for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
447 		if (slots & mask)
448 			count++;
449 		if (count == 1)
450 			first = slot;
451 		mask <<= 1;
452 	}
453 
454 	hardware = &nxge->pt_config.hw_config;
455 	group = (type == NXGE_TRANSMIT_GROUP) ?
456 	    nxge->tx_set.group[0] : nxge->rx_set.group[0];
457 
458 	/*
459 	 * A guest domain has one Tx & one Rx group, so far.
460 	 * In the future, there may be more than one.
461 	 */
462 	if (type == NXGE_TRANSMIT_GROUP) {
463 		nxge_dma_pt_cfg_t *port = &nxge->pt_config;
464 		nxge_tdc_grp_t *tdc_grp = &nxge->pt_config.tdc_grps[0];
465 
466 		hardware->tdc.start = first;
467 		hardware->tdc.count = count;
468 		hardware->tdc.owned = count;
469 
470 		tdc_grp->start_tdc = first;
471 		tdc_grp->max_tdcs = (uint8_t)count;
472 		tdc_grp->grp_index = group->index;
473 		tdc_grp->map = slots;
474 
475 		group->map = slots;
476 
477 		/*
478 		 * Pointless in a guest domain.  This bitmap is used
479 		 * in only one place: nxge_txc_init(),
480 		 * a service-domain-only function.
481 		 */
482 		port->tx_dma_map = slots;
483 
484 		nxge->tx_set.owned.map |= slots;
485 	} else {
486 		nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
487 
488 		hardware->start_rdc = first;
489 		hardware->max_rdcs = count;
490 
491 		rdc_grp->start_rdc = (uint8_t)first;
492 		rdc_grp->max_rdcs = (uint8_t)count;
493 		rdc_grp->def_rdc = (uint8_t)first;
494 
495 		rdc_grp->map = slots;
496 		group->map = slots;
497 
498 		nxge->rx_set.owned.map |= slots;
499 	}
500 }
501 
502 /*
503  * nxge_hio_vr_release
504  *
505  *	Release a virtualization region (VR).
506  *
507  * Arguments:
508  * 	nxge
509  *
510  * Notes:
511  *	We must uninitialize all DMA channels associated with the VR, too.
512  *
513  *	The service domain will re-initialize these DMA channels later.
514  *	See nxge_hio.c:nxge_hio_share_free() for details.
515  *
516  * Context:
517  *	Guest domain
518  */
519 int
520 nxge_hio_vr_release(nxge_t *nxge)
521 {
522 	nxge_hio_data_t	*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
523 	int		vr_index;
524 
525 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
526 
527 	if (nxge->hio_vr == NULL) {
528 		return (NXGE_OK);
529 	}
530 
531 	/*
532 	 * Uninitialize interrupts.
533 	 */
534 	nxge_hio_intr_uninit(nxge);
535 
536 	/*
537 	 * Uninitialize the receive DMA channels.
538 	 */
539 	nxge_uninit_rxdma_channels(nxge);
540 
541 	/*
542 	 * Uninitialize the transmit DMA channels.
543 	 */
544 	nxge_uninit_txdma_channels(nxge);
545 
546 	/*
547 	 * Remove both groups. Assumption: only two groups!
548 	 */
549 	if (nxge->rx_set.group[0] != NULL)
550 		nxge_grp_remove(nxge, nxge->rx_set.group[0]);
551 	if (nxge->tx_set.group[0] != NULL)
552 		nxge_grp_remove(nxge, nxge->tx_set.group[0]);
553 
554 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
555 
556 	/*
557 	 * Clean up.
558 	 */
559 	MUTEX_ENTER(&nhd->lock);
560 	for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
561 		if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) {
562 			nhd->vr[vr_index].nxge = NULL;
563 			break;
564 		}
565 	}
566 	MUTEX_EXIT(&nhd->lock);
567 
568 	return (NXGE_OK);
569 }
570 
571 #if defined(NIU_LP_WORKAROUND)
572 /*
573  * nxge_tdc_lp_conf
574  *
575  *	Configure the logical pages for a TDC.
576  *
577  * Arguments:
578  * 	nxge
579  * 	channel	The TDC to configure.
580  *
581  * Notes:
582  *
583  * Context:
584  *	Guest domain
585  */
586 nxge_status_t
587 nxge_tdc_lp_conf(
588 	p_nxge_t nxge,
589 	int channel)
590 {
591 	nxge_hio_dc_t		*dc;
592 	nxge_dma_common_t	*data;
593 	nxge_dma_common_t	*control;
594 	tx_ring_t 		*ring;
595 
596 	uint64_t		hv_rv;
597 	uint64_t		ra, size;
598 
599 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
600 
601 	ring = nxge->tx_rings->rings[channel];
602 
603 	if (ring->hv_set) {
604 		/* This shouldn't happen. */
605 		return (NXGE_OK);
606 	}
607 
608 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
609 		return (NXGE_ERROR);
610 
611 	/*
612 	 * Initialize logical page 0 for data buffers.
613 	 *
614 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
615 	 * nxge_main.c:nxge_dma_mem_alloc().
616 	 */
617 	data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
618 	ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
619 	ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
620 
621 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
622 	    (uint64_t)channel, 0,
623 	    ring->hv_tx_buf_base_ioaddr_pp,
624 	    ring->hv_tx_buf_ioaddr_size);
625 
626 	if (hv_rv != 0) {
627 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
628 		    "<== nxge_tdc_lp_conf: channel %d "
629 		    "(page 0 data buf) hv: %d "
630 		    "ioaddr_pp $%p size 0x%llx ",
631 		    channel, hv_rv,
632 		    ring->hv_tx_buf_base_ioaddr_pp,
633 		    ring->hv_tx_buf_ioaddr_size));
634 		return (NXGE_ERROR | hv_rv);
635 	}
636 
637 	ra = size = 0;
638 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
639 	    (uint64_t)channel, 0, &ra, &size);
640 
641 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
642 	    "==> nxge_tdc_lp_conf: channel %d "
643 	    "(page 0 data buf) hv_rv 0x%llx "
644 	    "set ioaddr_pp $%p set size 0x%llx "
645 	    "get ra ioaddr_pp $%p get size 0x%llx ",
646 	    channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
647 	    ring->hv_tx_buf_ioaddr_size, ra, size));
648 
649 	/*
650 	 * Initialize logical page 1 for control buffers.
651 	 */
652 	control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
653 	ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
654 	ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
655 
656 	hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
657 	    (uint64_t)channel, (uint64_t)1,
658 	    ring->hv_tx_cntl_base_ioaddr_pp,
659 	    ring->hv_tx_cntl_ioaddr_size);
660 
661 	if (hv_rv != 0) {
662 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
663 		    "<== nxge_tdc_lp_conf: channel %d "
664 		    "(page 1 cntl buf) hv_rv 0x%llx "
665 		    "ioaddr_pp $%p size 0x%llx ",
666 		    channel, hv_rv,
667 		    ring->hv_tx_cntl_base_ioaddr_pp,
668 		    ring->hv_tx_cntl_ioaddr_size));
669 		return (NXGE_ERROR | hv_rv);
670 	}
671 
672 	ra = size = 0;
673 	hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
674 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
675 
676 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
677 	    "==> nxge_tdc_lp_conf: channel %d "
678 	    "(page 1 cntl buf) hv_rv 0x%llx "
679 	    "set ioaddr_pp $%p set size 0x%llx "
680 	    "get ra ioaddr_pp $%p get size 0x%llx ",
681 	    channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
682 	    ring->hv_tx_cntl_ioaddr_size, ra, size));
683 
684 	ring->hv_set = B_TRUE;
685 
686 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
687 
688 	return (NXGE_OK);
689 }
690 
691 /*
692  * nxge_rdc_lp_conf
693  *
694  *	Configure an RDC's logical pages.
695  *
696  * Arguments:
697  * 	nxge
698  * 	channel	The RDC to configure.
699  *
700  * Notes:
701  *
702  * Context:
703  *	Guest domain
704  */
705 nxge_status_t
706 nxge_rdc_lp_conf(
707 	p_nxge_t nxge,
708 	int channel)
709 {
710 	nxge_hio_dc_t		*dc;
711 	nxge_dma_common_t	*data;
712 	nxge_dma_common_t	*control;
713 	rx_rbr_ring_t		*ring;
714 
715 	uint64_t		hv_rv;
716 	uint64_t		ra, size;
717 
718 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
719 
720 	ring = nxge->rx_rbr_rings->rbr_rings[channel];
721 
722 	if (ring->hv_set) {
723 		return (NXGE_OK);
724 	}
725 
726 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
727 		return (NXGE_ERROR);
728 
729 	/*
730 	 * Initialize logical page 0 for data buffers.
731 	 *
732 	 * <orig_ioaddr_pp> & <orig_alength> are initialized in
733 	 * nxge_main.c:nxge_dma_mem_alloc().
734 	 */
735 	data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
736 	ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
737 	ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
738 
739 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
740 	    (uint64_t)channel, 0,
741 	    ring->hv_rx_buf_base_ioaddr_pp,
742 	    ring->hv_rx_buf_ioaddr_size);
743 
744 	if (hv_rv != 0) {
745 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
746 		    "<== nxge_rdc_lp_conf: channel %d "
747 		    "(page 0 data buf) hv_rv 0x%llx "
748 		    "ioaddr_pp $%p size 0x%llx ",
749 		    channel, hv_rv,
750 		    ring->hv_rx_buf_base_ioaddr_pp,
751 		    ring->hv_rx_buf_ioaddr_size));
752 		return (NXGE_ERROR | hv_rv);
753 	}
754 
755 	ra = size = 0;
756 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
757 	    (uint64_t)channel, 0, &ra, &size);
758 
759 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
760 	    "==> nxge_rdc_lp_conf: channel %d "
761 	    "(page 0 data buf) hv_rv 0x%llx "
762 	    "set ioaddr_pp $%p set size 0x%llx "
763 	    "get ra ioaddr_pp $%p get size 0x%llx ",
764 	    channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
765 	    ring->hv_rx_buf_ioaddr_size, ra, size));
766 
767 	/*
768 	 * Initialize logical page 1 for control buffers.
769 	 */
770 	control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
771 	ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
772 	ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
773 
774 	hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
775 	    (uint64_t)channel, (uint64_t)1,
776 	    ring->hv_rx_cntl_base_ioaddr_pp,
777 	    ring->hv_rx_cntl_ioaddr_size);
778 
779 	if (hv_rv != 0) {
780 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
781 		    "<== nxge_rdc_lp_conf: channel %d "
782 		    "(page 1 cntl buf) hv_rv 0x%llx "
783 		    "ioaddr_pp $%p size 0x%llx ",
784 		    channel, hv_rv,
785 		    ring->hv_rx_cntl_base_ioaddr_pp,
786 		    ring->hv_rx_cntl_ioaddr_size));
787 		return (NXGE_ERROR | hv_rv);
788 	}
789 
790 	ra = size = 0;
791 	hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
792 	    (uint64_t)channel, (uint64_t)1, &ra, &size);
793 
794 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
795 	    "==> nxge_rdc_lp_conf: channel %d "
796 	    "(page 1 cntl buf) hv_rv 0x%llx "
797 	    "set ioaddr_pp $%p set size 0x%llx "
798 	    "get ra ioaddr_pp $%p get size 0x%llx ",
799 	    channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
800 	    ring->hv_rx_cntl_ioaddr_size, ra, size));
801 
802 	ring->hv_set = B_TRUE;
803 
804 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
805 
806 	return (NXGE_OK);
807 }
808 #endif	/* defined(NIU_LP_WORKAROUND) */
809 
810 /*
811  * This value is in milliseconds.
812  */
813 #define	NXGE_GUEST_TIMER	500 /* 1/2 second, for now */
814 
815 /*
816  * nxge_hio_start_timer
817  *
818  *	Start the timer which checks for Tx hangs.
819  *
820  * Arguments:
821  * 	nxge
822  *
823  * Notes:
824  *	This function is called from nxge_attach().
825  *
826  *	This function kicks off the guest domain equivalent of
827  *	nxge_check_hw_state().  It is called only once, from attach.
828  *
829  * Context:
830  *	Guest domain
831  */
832 void
833 nxge_hio_start_timer(
834 	nxge_t *nxge)
835 {
836 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
837 	nxge_hio_vr_t *vr;
838 	int region;
839 
840 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
841 
842 	MUTEX_ENTER(&nhd->lock);
843 
844 	/*
845 	 * Find our VR data structure.  (We are currently assuming
846 	 * one VR per guest domain.  That may change in the future.)
847 	 */
848 	for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
849 		if (nhd->vr[region].nxge == (uintptr_t)nxge)
850 			break;
851 	}
852 
853 	MUTEX_EXIT(&nhd->lock);
854 
855 	if (region == NXGE_VR_SR_MAX) {
856 		return;
857 	}
858 
859 	vr = (nxge_hio_vr_t *)&nhd->vr[region];
860 
861 	nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
862 	    (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
863 
864 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
865 }
866 
867 /*
868  * nxge_check_guest_state
869  *
870  *	Essentially, check for Tx hangs.  In the future, if we are
871  *	polling the hardware, we may do so here.
872  *
873  * Arguments:
874  * 	vr	The virtualization region (VR) data structure.
875  *
876  * Notes:
877  *	This function is the guest domain equivalent of
878  *	nxge_check_hw_state().  Since we have no hardware to
879  * 	check, we simply call nxge_check_tx_hang().
880  *
881  * Context:
882  *	Guest domain
883  */
884 void
885 nxge_check_guest_state(
886 	nxge_hio_vr_t *vr)
887 {
888 	nxge_t *nxge = (nxge_t *)vr->nxge;
889 
890 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
891 
892 	MUTEX_ENTER(nxge->genlock);
893 	nxge->nxge_timerid = 0;
894 
895 	if (nxge->nxge_mac_state == NXGE_MAC_STARTED) {
896 		nxge_check_tx_hang(nxge);
897 
898 		nxge->nxge_timerid = timeout((void(*)(void *))
899 		    nxge_check_guest_state, (caddr_t)vr,
900 		    drv_usectohz(1000 * NXGE_GUEST_TIMER));
901 	}
902 
903 nxge_check_guest_state_exit:
904 	MUTEX_EXIT(nxge->genlock);
905 	NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
906 }
907 
908 nxge_status_t
909 nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm)
910 {
911 	nxge_grp_t	*group;
912 	uint32_t	channel;
913 	nxge_hio_dc_t	*dc;
914 	nxge_ldg_t	*ldgp;
915 
916 	/*
917 	 * Validate state of guest interface before
918 	 * proceeeding.
919 	 */
920 	if (!isLDOMguest(nxge))
921 		return (NXGE_ERROR);
922 	if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
923 		return (NXGE_ERROR);
924 
925 	/*
926 	 * In guest domain, always and only dealing with
927 	 * group 0 for an instance of nxge.
928 	 */
929 	group = nxge->rx_set.group[0];
930 
931 	/*
932 	 * Look to arm the the RDCs for the group.
933 	 */
934 	for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
935 		if ((1 << channel) & group->map) {
936 			/*
937 			 * Get the RDC.
938 			 */
939 			dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel);
940 			if (dc == NULL)
941 				return (NXGE_ERROR);
942 
943 			/*
944 			 * Get the RDC's ldg group.
945 			 */
946 			ldgp = &nxge->ldgvp->ldgp[dc->ldg.vector];
947 			if (ldgp == NULL)
948 				return (NXGE_ERROR);
949 
950 			/*
951 			 * Set the state of the group.
952 			 */
953 			ldgp->arm = arm;
954 
955 			nxge_hio_ldgimgn(nxge, ldgp);
956 		}
957 	}
958 
959 	return (NXGE_OK);
960 }
961 
962 nxge_status_t
963 nxge_hio_rdc_enable(p_nxge_t nxge)
964 {
965 	nxge_grp_t	*group;
966 	npi_handle_t	handle;
967 	uint32_t	channel;
968 	npi_status_t	rval;
969 
970 	/*
971 	 * Validate state of guest interface before
972 	 * proceeeding.
973 	 */
974 	if (!isLDOMguest(nxge))
975 		return (NXGE_ERROR);
976 	if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
977 		return (NXGE_ERROR);
978 
979 	/*
980 	 * In guest domain, always and only dealing with
981 	 * group 0 for an instance of nxge.
982 	 */
983 	group = nxge->rx_set.group[0];
984 
985 	/*
986 	 * Get the PIO handle.
987 	 */
988 	handle = NXGE_DEV_NPI_HANDLE(nxge);
989 
990 	for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
991 		/*
992 		 * If this channel is in the map, then enable
993 		 * it.
994 		 */
995 		if ((1 << channel) & group->map) {
996 			/*
997 			 * Enable the RDC and clear the empty bit.
998 			 */
999 			rval = npi_rxdma_cfg_rdc_enable(handle, channel);
1000 			if (rval != NPI_SUCCESS)
1001 				return (NXGE_ERROR);
1002 
1003 			(void) npi_rxdma_channel_rbr_empty_clear(handle,
1004 			    channel);
1005 		}
1006 	}
1007 
1008 	return (NXGE_OK);
1009 }
1010 #endif	/* defined(sun4v) */
1011