xref: /illumos-gate/usr/src/uts/sun4u/io/px/px_lib4u.c (revision 1a887b2e15e4d9b63b5add57f3334b5b31960018)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/kmem.h>
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/util.h>
36 #include <sys/modctl.h>
37 #include <sys/disp.h>
38 #include <sys/stat.h>
39 #include <sys/ddi_impldefs.h>
40 #include <sys/vmem.h>
41 #include <sys/iommutsb.h>
42 #include <sys/cpuvar.h>
43 #include <sys/ivintr.h>
44 #include <px_obj.h>
45 #include <pcie_pwr.h>
46 #include "px_tools_var.h"
47 #include <px_regs.h>
48 #include <px_csr.h>
49 #include <sys/machsystm.h>
50 #include "px_lib4u.h"
51 #include "px_err.h"
52 
53 #pragma weak jbus_stst_order
54 
55 extern void jbus_stst_order();
56 
57 ulong_t px_mmu_dvma_end = 0xfffffffful;
58 uint_t px_ranges_phi_mask = 0xfffffffful;
59 
60 static int px_goto_l23ready(px_t *px_p);
61 static int px_goto_l0(px_t *px_p);
62 static int px_pre_pwron_check(px_t *px_p);
63 static uint32_t px_identity_chip(px_t *px_p);
64 static void px_lib_clr_errs(px_t *px_p, px_pec_t *pec_p);
65 
66 /*
67  * px_lib_map_registers
68  *
69  * This function is called from the attach routine to map the registers
70  * accessed by this driver.
71  *
72  * used by: px_attach()
73  *
74  * return value: DDI_FAILURE on failure
75  */
76 int
77 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
78 {
79 	ddi_device_acc_attr_t	attr;
80 	px_reg_bank_t		reg_bank = PX_REG_CSR;
81 
82 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
83 		pxu_p, dip);
84 
85 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
86 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
87 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
88 
89 	/*
90 	 * PCI CSR Base
91 	 */
92 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
93 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
94 		goto fail;
95 	}
96 
97 	reg_bank++;
98 
99 	/*
100 	 * XBUS CSR Base
101 	 */
102 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
103 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
104 		goto fail;
105 	}
106 
107 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
108 
109 done:
110 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
111 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
112 		    reg_bank, pxu_p->px_address[reg_bank]);
113 	}
114 
115 	return (DDI_SUCCESS);
116 
117 fail:
118 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
119 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
120 
121 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
122 		pxu_p->px_address[reg_bank] = NULL;
123 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
124 	}
125 
126 	return (DDI_FAILURE);
127 }
128 
129 /*
130  * px_lib_unmap_regs:
131  *
132  * This routine unmaps the registers mapped by map_px_registers.
133  *
134  * used by: px_detach(), and error conditions in px_attach()
135  *
136  * return value: none
137  */
138 void
139 px_lib_unmap_regs(pxu_t *pxu_p)
140 {
141 	int i;
142 
143 	for (i = 0; i < PX_REG_MAX; i++) {
144 		if (pxu_p->px_ac[i])
145 			ddi_regs_map_free(&pxu_p->px_ac[i]);
146 	}
147 }
148 
149 int
150 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
151 {
152 	px_t		*px_p = DIP_TO_STATE(dip);
153 	caddr_t		xbc_csr_base, csr_base;
154 	px_dvma_range_prop_t	px_dvma_range;
155 	uint32_t	chip_id;
156 	pxu_t		*pxu_p;
157 
158 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
159 
160 	if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED)
161 		return (DDI_FAILURE);
162 
163 	switch (chip_id) {
164 	case FIRE_VER_10:
165 		DBG(DBG_ATTACH, dip, "FIRE Hardware Version 1.0\n");
166 		break;
167 	case FIRE_VER_20:
168 		DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n");
169 		break;
170 	default:
171 		cmn_err(CE_WARN, "%s%d: FIRE Hardware Version Unknown\n",
172 		    ddi_driver_name(dip), ddi_get_instance(dip));
173 		return (DDI_FAILURE);
174 	}
175 
176 	/*
177 	 * Allocate platform specific structure and link it to
178 	 * the px state structure.
179 	 */
180 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
181 	pxu_p->chip_id = chip_id;
182 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
183 	    "portid", -1);
184 
185 	/* Map in the registers */
186 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
187 		kmem_free(pxu_p, sizeof (pxu_t));
188 
189 		return (DDI_FAILURE);
190 	}
191 
192 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
193 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
194 
195 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
196 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
197 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
198 
199 	/*
200 	 * Create "virtual-dma" property to support child devices
201 	 * needing to know DVMA range.
202 	 */
203 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
204 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
205 	px_dvma_range.dvma_len = (uint32_t)
206 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
207 
208 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
209 		"virtual-dma", (caddr_t)&px_dvma_range,
210 		sizeof (px_dvma_range_prop_t));
211 	/*
212 	 * Initilize all fire hardware specific blocks.
213 	 */
214 	hvio_cb_init(xbc_csr_base, pxu_p);
215 	hvio_ib_init(csr_base, pxu_p);
216 	hvio_pec_init(csr_base, pxu_p);
217 	hvio_mmu_init(csr_base, pxu_p);
218 
219 	px_p->px_plat_p = (void *)pxu_p;
220 
221 	/*
222 	 * Initialize all the interrupt handlers
223 	 */
224 	px_err_reg_enable(px_p, PX_ERR_JBC);
225 	px_err_reg_enable(px_p, PX_ERR_MMU);
226 	px_err_reg_enable(px_p, PX_ERR_IMU);
227 	px_err_reg_enable(px_p, PX_ERR_TLU_UE);
228 	px_err_reg_enable(px_p, PX_ERR_TLU_CE);
229 	px_err_reg_enable(px_p, PX_ERR_TLU_OE);
230 	px_err_reg_enable(px_p, PX_ERR_ILU);
231 	px_err_reg_enable(px_p, PX_ERR_LPU_LINK);
232 	px_err_reg_enable(px_p, PX_ERR_LPU_PHY);
233 	px_err_reg_enable(px_p, PX_ERR_LPU_RX);
234 	px_err_reg_enable(px_p, PX_ERR_LPU_TX);
235 	px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM);
236 	px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ);
237 
238 	/* Initilize device handle */
239 	*dev_hdl = (devhandle_t)csr_base;
240 
241 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
242 
243 	return (DDI_SUCCESS);
244 }
245 
246 int
247 px_lib_dev_fini(dev_info_t *dip)
248 {
249 	px_t	*px_p = DIP_TO_STATE(dip);
250 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
251 
252 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
253 
254 	/*
255 	 * Deinitialize all the interrupt handlers
256 	 */
257 	px_err_reg_disable(px_p, PX_ERR_JBC);
258 	px_err_reg_disable(px_p, PX_ERR_MMU);
259 	px_err_reg_disable(px_p, PX_ERR_IMU);
260 	px_err_reg_disable(px_p, PX_ERR_TLU_UE);
261 	px_err_reg_disable(px_p, PX_ERR_TLU_CE);
262 	px_err_reg_disable(px_p, PX_ERR_TLU_OE);
263 	px_err_reg_disable(px_p, PX_ERR_ILU);
264 	px_err_reg_disable(px_p, PX_ERR_LPU_LINK);
265 	px_err_reg_disable(px_p, PX_ERR_LPU_PHY);
266 	px_err_reg_disable(px_p, PX_ERR_LPU_RX);
267 	px_err_reg_disable(px_p, PX_ERR_LPU_TX);
268 	px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM);
269 	px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ);
270 
271 	iommu_tsb_free(pxu_p->tsb_cookie);
272 
273 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
274 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
275 	px_p->px_plat_p = NULL;
276 
277 	return (DDI_SUCCESS);
278 }
279 
280 /*ARGSUSED*/
281 int
282 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
283     sysino_t *sysino)
284 {
285 	px_t	*px_p = DIP_TO_STATE(dip);
286 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
287 	uint64_t	ret;
288 
289 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
290 	    "devino 0x%x\n", dip, devino);
291 
292 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
293 	    pxu_p, devino, sysino)) != H_EOK) {
294 		DBG(DBG_LIB_INT, dip,
295 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
296 		return (DDI_FAILURE);
297 	}
298 
299 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
300 	    *sysino);
301 
302 	return (DDI_SUCCESS);
303 }
304 
305 /*ARGSUSED*/
306 int
307 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
308     intr_valid_state_t *intr_valid_state)
309 {
310 	uint64_t	ret;
311 
312 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
313 	    dip, sysino);
314 
315 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
316 	    sysino, intr_valid_state)) != H_EOK) {
317 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
318 		    ret);
319 		return (DDI_FAILURE);
320 	}
321 
322 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
323 	    *intr_valid_state);
324 
325 	return (DDI_SUCCESS);
326 }
327 
328 /*ARGSUSED*/
329 int
330 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
331     intr_valid_state_t intr_valid_state)
332 {
333 	uint64_t	ret;
334 
335 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
336 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
337 
338 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
339 	    sysino, intr_valid_state)) != H_EOK) {
340 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
341 		    ret);
342 		return (DDI_FAILURE);
343 	}
344 
345 	return (DDI_SUCCESS);
346 }
347 
348 /*ARGSUSED*/
349 int
350 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
351     intr_state_t *intr_state)
352 {
353 	uint64_t	ret;
354 
355 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
356 	    dip, sysino);
357 
358 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
359 	    sysino, intr_state)) != H_EOK) {
360 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
361 		    ret);
362 		return (DDI_FAILURE);
363 	}
364 
365 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
366 	    *intr_state);
367 
368 	return (DDI_SUCCESS);
369 }
370 
371 /*ARGSUSED*/
372 int
373 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
374     intr_state_t intr_state)
375 {
376 	uint64_t	ret;
377 
378 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
379 	    "intr_state 0x%x\n", dip, sysino, intr_state);
380 
381 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
382 	    sysino, intr_state)) != H_EOK) {
383 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
384 		    ret);
385 		return (DDI_FAILURE);
386 	}
387 
388 	return (DDI_SUCCESS);
389 }
390 
391 /*ARGSUSED*/
392 int
393 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
394 {
395 	uint64_t	ret;
396 
397 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
398 	    dip, sysino);
399 
400 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip),
401 	    sysino, cpuid)) != H_EOK) {
402 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
403 		    ret);
404 		return (DDI_FAILURE);
405 	}
406 
407 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
408 
409 	return (DDI_SUCCESS);
410 }
411 
412 /*ARGSUSED*/
413 int
414 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
415 {
416 	uint64_t	ret;
417 
418 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
419 	    "cpuid 0x%x\n", dip, sysino, cpuid);
420 
421 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip),
422 	    sysino, cpuid)) != H_EOK) {
423 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
424 		    ret);
425 		return (DDI_FAILURE);
426 	}
427 
428 	return (DDI_SUCCESS);
429 }
430 
431 /*ARGSUSED*/
432 int
433 px_lib_intr_reset(dev_info_t *dip)
434 {
435 	devino_t	ino;
436 	sysino_t	sysino;
437 
438 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
439 
440 	/* Reset all Interrupts */
441 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
442 		if (px_lib_intr_devino_to_sysino(dip, ino,
443 		    &sysino) != DDI_SUCCESS)
444 			return (BF_FATAL);
445 
446 		if (px_lib_intr_setstate(dip, sysino,
447 		    INTR_IDLE_STATE) != DDI_SUCCESS)
448 			return (BF_FATAL);
449 	}
450 
451 	return (BF_NONE);
452 }
453 
454 /*ARGSUSED*/
455 int
456 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
457     io_attributes_t io_attributes, void *addr, size_t pfn_index,
458     int flag)
459 {
460 	px_t		*px_p = DIP_TO_STATE(dip);
461 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
462 	uint64_t	ret;
463 
464 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
465 	    "pages 0x%x atrr 0x%x addr 0x%p pfn_index 0x%llx, flag 0x%x\n",
466 	    dip, tsbid, pages, io_attributes, addr, pfn_index, flag);
467 
468 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
469 	    io_attributes, addr, pfn_index, flag)) != H_EOK) {
470 		DBG(DBG_LIB_DMA, dip,
471 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
472 		return (DDI_FAILURE);
473 	}
474 
475 	return (DDI_SUCCESS);
476 }
477 
478 /*ARGSUSED*/
479 int
480 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
481 {
482 	px_t		*px_p = DIP_TO_STATE(dip);
483 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
484 	uint64_t	ret;
485 
486 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
487 	    "pages 0x%x\n", dip, tsbid, pages);
488 
489 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
490 	    != H_EOK) {
491 		DBG(DBG_LIB_DMA, dip,
492 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
493 
494 		return (DDI_FAILURE);
495 	}
496 
497 	return (DDI_SUCCESS);
498 }
499 
500 /*ARGSUSED*/
501 int
502 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid,
503     io_attributes_t *attributes_p, r_addr_t *r_addr_p)
504 {
505 	px_t	*px_p = DIP_TO_STATE(dip);
506 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
507 	uint64_t	ret;
508 
509 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
510 	    dip, tsbid);
511 
512 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
513 	    attributes_p, r_addr_p)) != H_EOK) {
514 		DBG(DBG_LIB_DMA, dip,
515 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
516 
517 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
518 	}
519 
520 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
521 	    *attributes_p, *r_addr_p);
522 
523 	return (DDI_SUCCESS);
524 }
525 
526 
527 /*
528  * Checks dma attributes against system bypass ranges
529  * The bypass range is determined by the hardware. Return them so the
530  * common code can do generic checking against them.
531  */
532 /*ARGSUSED*/
533 int
534 px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attrp, uint64_t *lo_p, uint64_t *hi_p)
535 {
536 	*lo_p = MMU_BYPASS_BASE;
537 	*hi_p = MMU_BYPASS_END;
538 
539 	return (DDI_SUCCESS);
540 }
541 
542 
543 /*ARGSUSED*/
544 int
545 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra,
546     io_attributes_t io_attributes, io_addr_t *io_addr_p)
547 {
548 	uint64_t	ret;
549 
550 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
551 	    "attr 0x%x\n", dip, ra, io_attributes);
552 
553 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra,
554 	    io_attributes, io_addr_p)) != H_EOK) {
555 		DBG(DBG_LIB_DMA, dip,
556 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
557 		return (DDI_FAILURE);
558 	}
559 
560 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
561 	    *io_addr_p);
562 
563 	return (DDI_SUCCESS);
564 }
565 
566 /*
567  * bus dma sync entry point.
568  */
569 /*ARGSUSED*/
570 int
571 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
572 	off_t off, size_t len, uint_t cache_flags)
573 {
574 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
575 
576 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
577 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
578 	    dip, rdip, handle, off, len, cache_flags);
579 
580 	/*
581 	 * jbus_stst_order is found only in certain cpu modules.
582 	 * Just return success if not present.
583 	 */
584 	if (&jbus_stst_order == NULL)
585 		return (DDI_SUCCESS);
586 
587 	if (!(mp->dmai_flags & DMAI_FLAGS_INUSE)) {
588 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
589 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
590 
591 		return (DDI_FAILURE);
592 	}
593 
594 	if (mp->dmai_flags & DMAI_FLAGS_NOSYNC)
595 		return (DDI_SUCCESS);
596 
597 	/*
598 	 * No flush needed when sending data from memory to device.
599 	 * Nothing to do to "sync" memory to what device would already see.
600 	 */
601 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
602 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
603 		return (DDI_SUCCESS);
604 
605 	/*
606 	 * Perform necessary cpu workaround to ensure jbus ordering.
607 	 * CPU's internal "invalidate FIFOs" are flushed.
608 	 */
609 
610 #if !defined(lint)
611 	kpreempt_disable();
612 #endif
613 	jbus_stst_order();
614 #if !defined(lint)
615 	kpreempt_enable();
616 #endif
617 	return (DDI_SUCCESS);
618 }
619 
620 /*
621  * MSIQ Functions:
622  */
623 /*ARGSUSED*/
624 int
625 px_lib_msiq_init(dev_info_t *dip)
626 {
627 	px_t		*px_p = DIP_TO_STATE(dip);
628 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
629 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
630 	caddr_t		msiq_addr;
631 	px_dvma_addr_t	pg_index;
632 	size_t		size;
633 	int		ret;
634 
635 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
636 
637 	/*
638 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
639 	 * and then initialize the base address register.
640 	 *
641 	 * Allocate entries from Fire IOMMU so that the resulting address
642 	 * is properly aligned.  Calculate the index of the first allocated
643 	 * entry.  Note: The size of the mapping is assumed to be a multiple
644 	 * of the page size.
645 	 */
646 	msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p +
647 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
648 
649 	size = msiq_state_p->msiq_cnt *
650 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
651 
652 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
653 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
654 
655 	if (pxu_p->msiq_mapped_p == NULL)
656 		return (DDI_FAILURE);
657 
658 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
659 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
660 
661 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
662 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0,
663 	    MMU_MAP_BUF)) != DDI_SUCCESS) {
664 		DBG(DBG_LIB_MSIQ, dip,
665 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
666 
667 		(void) px_lib_msiq_fini(dip);
668 		return (DDI_FAILURE);
669 	}
670 
671 	(void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p);
672 
673 	return (DDI_SUCCESS);
674 }
675 
676 /*ARGSUSED*/
677 int
678 px_lib_msiq_fini(dev_info_t *dip)
679 {
680 	px_t		*px_p = DIP_TO_STATE(dip);
681 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
682 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
683 	px_dvma_addr_t	pg_index;
684 	size_t		size;
685 
686 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
687 
688 	/*
689 	 * Unmap and free the EQ memory that had been mapped
690 	 * into the Fire IOMMU.
691 	 */
692 	size = msiq_state_p->msiq_cnt *
693 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
694 
695 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
696 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
697 
698 	(void) px_lib_iommu_demap(px_p->px_dip,
699 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
700 
701 	/* Free the entries from the Fire MMU */
702 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
703 	    (void *)pxu_p->msiq_mapped_p, size);
704 
705 	return (DDI_SUCCESS);
706 }
707 
708 /*ARGSUSED*/
709 int
710 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
711     uint_t *msiq_rec_cnt_p)
712 {
713 	px_t		*px_p = DIP_TO_STATE(dip);
714 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
715 	uint64_t	*msiq_addr;
716 	size_t		msiq_size;
717 
718 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
719 	    dip, msiq_id);
720 
721 	msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p +
722 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
723 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
724 	ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size));
725 
726 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
727 
728 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
729 	    ra_p, *msiq_rec_cnt_p);
730 
731 	return (DDI_SUCCESS);
732 }
733 
734 /*ARGSUSED*/
735 int
736 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
737     pci_msiq_valid_state_t *msiq_valid_state)
738 {
739 	uint64_t	ret;
740 
741 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
742 	    dip, msiq_id);
743 
744 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
745 	    msiq_id, msiq_valid_state)) != H_EOK) {
746 		DBG(DBG_LIB_MSIQ, dip,
747 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
748 		return (DDI_FAILURE);
749 	}
750 
751 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
752 	    *msiq_valid_state);
753 
754 	return (DDI_SUCCESS);
755 }
756 
757 /*ARGSUSED*/
758 int
759 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
760     pci_msiq_valid_state_t msiq_valid_state)
761 {
762 	uint64_t	ret;
763 
764 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
765 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
766 
767 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
768 	    msiq_id, msiq_valid_state)) != H_EOK) {
769 		DBG(DBG_LIB_MSIQ, dip,
770 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
771 		return (DDI_FAILURE);
772 	}
773 
774 	return (DDI_SUCCESS);
775 }
776 
777 /*ARGSUSED*/
778 int
779 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
780     pci_msiq_state_t *msiq_state)
781 {
782 	uint64_t	ret;
783 
784 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
785 	    dip, msiq_id);
786 
787 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
788 	    msiq_id, msiq_state)) != H_EOK) {
789 		DBG(DBG_LIB_MSIQ, dip,
790 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
791 		return (DDI_FAILURE);
792 	}
793 
794 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
795 	    *msiq_state);
796 
797 	return (DDI_SUCCESS);
798 }
799 
800 /*ARGSUSED*/
801 int
802 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
803     pci_msiq_state_t msiq_state)
804 {
805 	uint64_t	ret;
806 
807 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
808 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
809 
810 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
811 	    msiq_id, msiq_state)) != H_EOK) {
812 		DBG(DBG_LIB_MSIQ, dip,
813 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
814 		return (DDI_FAILURE);
815 	}
816 
817 	return (DDI_SUCCESS);
818 }
819 
820 /*ARGSUSED*/
821 int
822 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
823     msiqhead_t *msiq_head)
824 {
825 	uint64_t	ret;
826 
827 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
828 	    dip, msiq_id);
829 
830 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
831 	    msiq_id, msiq_head)) != H_EOK) {
832 		DBG(DBG_LIB_MSIQ, dip,
833 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
834 		return (DDI_FAILURE);
835 	}
836 
837 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
838 	    *msiq_head);
839 
840 	return (DDI_SUCCESS);
841 }
842 
843 /*ARGSUSED*/
844 int
845 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
846     msiqhead_t msiq_head)
847 {
848 	uint64_t	ret;
849 
850 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
851 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
852 
853 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
854 	    msiq_id, msiq_head)) != H_EOK) {
855 		DBG(DBG_LIB_MSIQ, dip,
856 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
857 		return (DDI_FAILURE);
858 	}
859 
860 	return (DDI_SUCCESS);
861 }
862 
863 /*ARGSUSED*/
864 int
865 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
866     msiqtail_t *msiq_tail)
867 {
868 	uint64_t	ret;
869 
870 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
871 	    dip, msiq_id);
872 
873 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
874 	    msiq_id, msiq_tail)) != H_EOK) {
875 		DBG(DBG_LIB_MSIQ, dip,
876 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
877 		return (DDI_FAILURE);
878 	}
879 
880 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
881 	    *msiq_tail);
882 
883 	return (DDI_SUCCESS);
884 }
885 
886 /*ARGSUSED*/
887 void
888 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p)
889 {
890 	px_t		*px_p = DIP_TO_STATE(dip);
891 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
892 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr;
893 
894 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
895 	    dip, eq_rec_p);
896 
897 	if (!eq_rec_p->eq_rec_rid) {
898 		/* Set msiq_rec_rid to zero */
899 		msiq_rec_p->msiq_rec_rid = 0;
900 
901 		return;
902 	}
903 
904 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
905 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
906 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
907 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
908 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
909 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
910 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
911 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
912 
913 	/*
914 	 * Only upper 4 bits of eq_rec_fmt_type is used
915 	 * to identify the EQ record type.
916 	 */
917 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
918 	case EQ_REC_MSI32:
919 		msiq_rec_p->msiq_rec_type = MSI32_REC;
920 
921 		if (pxu_p->chip_id == FIRE_VER_10) {
922 			msiq_rec_p->msiq_rec_data.msi.msi_data =
923 			    (eq_rec_p->eq_rec_data0 & 0xFF) << 8 |
924 			    (eq_rec_p->eq_rec_data0 & 0xFF00) >> 8;
925 		} else {
926 			/* Default case is FIRE2.0 */
927 			msiq_rec_p->msiq_rec_data.msi.msi_data =
928 			    eq_rec_p->eq_rec_data0;
929 		}
930 
931 		break;
932 	case EQ_REC_MSI64:
933 		msiq_rec_p->msiq_rec_type = MSI64_REC;
934 
935 		if (pxu_p->chip_id == FIRE_VER_10) {
936 			msiq_rec_p->msiq_rec_data.msi.msi_data =
937 			    (eq_rec_p->eq_rec_data0 & 0xFF) << 8 |
938 			    (eq_rec_p->eq_rec_data0 & 0xFF00) >> 8;
939 		} else {
940 			/* Default case is FIRE2.0 */
941 			msiq_rec_p->msiq_rec_data.msi.msi_data =
942 			    eq_rec_p->eq_rec_data0;
943 		}
944 
945 		break;
946 	case EQ_REC_MSG:
947 		msiq_rec_p->msiq_rec_type = MSG_REC;
948 
949 		msiq_rec_p->msiq_rec_data.msg.msg_route =
950 		    eq_rec_p->eq_rec_fmt_type & 7;
951 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
952 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
953 		break;
954 	default:
955 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
956 		    "0x%lx is an unknown EQ record type",
957 		    ddi_driver_name(dip), ddi_get_instance(dip),
958 		    eq_rec_p->eq_rec_fmt_type);
959 		break;
960 	}
961 
962 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
963 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
964 	    (eq_rec_p->eq_rec_addr0 << 2));
965 
966 	/* Zero out eq_rec_rid field */
967 	eq_rec_p->eq_rec_rid = 0;
968 }
969 
970 /*
971  * MSI Functions:
972  */
973 /*ARGSUSED*/
974 int
975 px_lib_msi_init(dev_info_t *dip)
976 {
977 	px_t		*px_p = DIP_TO_STATE(dip);
978 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
979 	uint64_t	ret;
980 
981 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
982 
983 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
984 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
985 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
986 		    ret);
987 		return (DDI_FAILURE);
988 	}
989 
990 	return (DDI_SUCCESS);
991 }
992 
993 /*ARGSUSED*/
994 int
995 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
996     msiqid_t *msiq_id)
997 {
998 	uint64_t	ret;
999 
1000 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
1001 	    dip, msi_num);
1002 
1003 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
1004 	    msi_num, msiq_id)) != H_EOK) {
1005 		DBG(DBG_LIB_MSI, dip,
1006 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
1007 		return (DDI_FAILURE);
1008 	}
1009 
1010 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
1011 	    *msiq_id);
1012 
1013 	return (DDI_SUCCESS);
1014 }
1015 
1016 /*ARGSUSED*/
1017 int
1018 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
1019     msiqid_t msiq_id, msi_type_t msitype)
1020 {
1021 	uint64_t	ret;
1022 
1023 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
1024 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
1025 
1026 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
1027 	    msi_num, msiq_id)) != H_EOK) {
1028 		DBG(DBG_LIB_MSI, dip,
1029 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
1030 		return (DDI_FAILURE);
1031 	}
1032 
1033 	return (DDI_SUCCESS);
1034 }
1035 
1036 /*ARGSUSED*/
1037 int
1038 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
1039     pci_msi_valid_state_t *msi_valid_state)
1040 {
1041 	uint64_t	ret;
1042 
1043 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
1044 	    dip, msi_num);
1045 
1046 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
1047 	    msi_num, msi_valid_state)) != H_EOK) {
1048 		DBG(DBG_LIB_MSI, dip,
1049 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
1050 		return (DDI_FAILURE);
1051 	}
1052 
1053 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
1054 	    *msi_valid_state);
1055 
1056 	return (DDI_SUCCESS);
1057 }
1058 
1059 /*ARGSUSED*/
1060 int
1061 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
1062     pci_msi_valid_state_t msi_valid_state)
1063 {
1064 	uint64_t	ret;
1065 
1066 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
1067 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
1068 
1069 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
1070 	    msi_num, msi_valid_state)) != H_EOK) {
1071 		DBG(DBG_LIB_MSI, dip,
1072 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
1073 		return (DDI_FAILURE);
1074 	}
1075 
1076 	return (DDI_SUCCESS);
1077 }
1078 
1079 /*ARGSUSED*/
1080 int
1081 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
1082     pci_msi_state_t *msi_state)
1083 {
1084 	uint64_t	ret;
1085 
1086 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
1087 	    dip, msi_num);
1088 
1089 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
1090 	    msi_num, msi_state)) != H_EOK) {
1091 		DBG(DBG_LIB_MSI, dip,
1092 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
1093 		return (DDI_FAILURE);
1094 	}
1095 
1096 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
1097 	    *msi_state);
1098 
1099 	return (DDI_SUCCESS);
1100 }
1101 
1102 /*ARGSUSED*/
1103 int
1104 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
1105     pci_msi_state_t msi_state)
1106 {
1107 	uint64_t	ret;
1108 
1109 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
1110 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
1111 
1112 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1113 	    msi_num, msi_state)) != H_EOK) {
1114 		DBG(DBG_LIB_MSI, dip,
1115 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1116 		return (DDI_FAILURE);
1117 	}
1118 
1119 	return (DDI_SUCCESS);
1120 }
1121 
1122 /*
1123  * MSG Functions:
1124  */
1125 /*ARGSUSED*/
1126 int
1127 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1128     msiqid_t *msiq_id)
1129 {
1130 	uint64_t	ret;
1131 
1132 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1133 	    dip, msg_type);
1134 
1135 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1136 	    msg_type, msiq_id)) != H_EOK) {
1137 		DBG(DBG_LIB_MSG, dip,
1138 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1139 		return (DDI_FAILURE);
1140 	}
1141 
1142 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1143 	    *msiq_id);
1144 
1145 	return (DDI_SUCCESS);
1146 }
1147 
1148 /*ARGSUSED*/
1149 int
1150 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1151     msiqid_t msiq_id)
1152 {
1153 	uint64_t	ret;
1154 
1155 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
1156 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
1157 
1158 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1159 	    msg_type, msiq_id)) != H_EOK) {
1160 		DBG(DBG_LIB_MSG, dip,
1161 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1162 		return (DDI_FAILURE);
1163 	}
1164 
1165 	return (DDI_SUCCESS);
1166 }
1167 
1168 /*ARGSUSED*/
1169 int
1170 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1171     pcie_msg_valid_state_t *msg_valid_state)
1172 {
1173 	uint64_t	ret;
1174 
1175 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1176 	    dip, msg_type);
1177 
1178 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1179 	    msg_valid_state)) != H_EOK) {
1180 		DBG(DBG_LIB_MSG, dip,
1181 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1182 		return (DDI_FAILURE);
1183 	}
1184 
1185 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1186 	    *msg_valid_state);
1187 
1188 	return (DDI_SUCCESS);
1189 }
1190 
1191 /*ARGSUSED*/
1192 int
1193 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1194     pcie_msg_valid_state_t msg_valid_state)
1195 {
1196 	uint64_t	ret;
1197 
1198 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1199 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1200 
1201 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1202 	    msg_valid_state)) != H_EOK) {
1203 		DBG(DBG_LIB_MSG, dip,
1204 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1205 		return (DDI_FAILURE);
1206 	}
1207 
1208 	return (DDI_SUCCESS);
1209 }
1210 
1211 /*
1212  * Suspend/Resume Functions:
1213  * Currently unsupported by hypervisor
1214  */
1215 int
1216 px_lib_suspend(dev_info_t *dip)
1217 {
1218 	px_t		*px_p = DIP_TO_STATE(dip);
1219 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1220 	devhandle_t	dev_hdl, xbus_dev_hdl;
1221 	uint64_t	ret;
1222 
1223 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
1224 
1225 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1226 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1227 
1228 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) == H_EOK) {
1229 		px_p->px_cb_p->xbc_attachcnt--;
1230 		if (px_p->px_cb_p->xbc_attachcnt == 0)
1231 			if ((ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p))
1232 			    != H_EOK)
1233 				px_p->px_cb_p->xbc_attachcnt++;
1234 	}
1235 
1236 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
1237 }
1238 
1239 void
1240 px_lib_resume(dev_info_t *dip)
1241 {
1242 	px_t		*px_p = DIP_TO_STATE(dip);
1243 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1244 	devhandle_t	dev_hdl, xbus_dev_hdl;
1245 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
1246 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
1247 
1248 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
1249 
1250 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1251 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1252 
1253 	px_p->px_cb_p->xbc_attachcnt++;
1254 	if (px_p->px_cb_p->xbc_attachcnt == 1)
1255 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
1256 	hvio_resume(dev_hdl, pec_ino, pxu_p);
1257 }
1258 
1259 /*
1260  * PCI tool Functions:
1261  * Currently unsupported by hypervisor
1262  */
1263 /*ARGSUSED*/
1264 int
1265 px_lib_tools_dev_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode)
1266 {
1267 	px_t *px_p = DIP_TO_STATE(dip);
1268 
1269 	DBG(DBG_TOOLS, dip, "px_lib_tools_dev_reg_ops: dip 0x%p arg 0x%p "
1270 	    "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode);
1271 
1272 	return (px_dev_reg_ops(dip, arg, cmd, mode, px_p));
1273 }
1274 
1275 /*ARGSUSED*/
1276 int
1277 px_lib_tools_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode)
1278 {
1279 	DBG(DBG_TOOLS, dip, "px_lib_tools_bus_reg_ops: dip 0x%p arg 0x%p "
1280 	    "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode);
1281 
1282 	return (px_bus_reg_ops(dip, arg, cmd, mode));
1283 }
1284 
1285 /*ARGSUSED*/
1286 int
1287 px_lib_tools_intr_admn(dev_info_t *dip, void *arg, int cmd, int mode)
1288 {
1289 	px_t *px_p = DIP_TO_STATE(dip);
1290 
1291 	DBG(DBG_TOOLS, dip, "px_lib_tools_intr_admn: dip 0x%p arg 0x%p "
1292 	    "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode);
1293 
1294 	return (px_intr_admn(dip, arg, cmd, mode, px_p));
1295 }
1296 
1297 /*
1298  * Misc Functions:
1299  * Currently unsupported by hypervisor
1300  */
1301 uint64_t
1302 px_lib_get_cb(dev_info_t *dip)
1303 {
1304 	px_t	*px_p = DIP_TO_STATE(dip);
1305 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1306 
1307 	return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
1308 }
1309 
1310 void
1311 px_lib_set_cb(dev_info_t *dip, uint64_t val)
1312 {
1313 	px_t	*px_p = DIP_TO_STATE(dip);
1314 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1315 
1316 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
1317 }
1318 
1319 /*ARGSUSED*/
1320 int
1321 px_lib_map_vconfig(dev_info_t *dip,
1322 	ddi_map_req_t *mp, pci_config_offset_t off,
1323 		pci_regspec_t *rp, caddr_t *addrp)
1324 {
1325 	/*
1326 	 * No special config space access services in this layer.
1327 	 */
1328 	return (DDI_FAILURE);
1329 }
1330 
1331 static void
1332 px_lib_clr_errs(px_t *px_p, px_pec_t *pec_p)
1333 {
1334 	dev_info_t	*rpdip = px_p->px_dip;
1335 	px_cb_t		*cb_p = px_p->px_cb_p;
1336 	int		err = PX_OK, ret;
1337 	int		acctype = pec_p->pec_safeacc_type;
1338 	ddi_fm_error_t	derr;
1339 
1340 	/* Create the derr */
1341 	bzero(&derr, sizeof (ddi_fm_error_t));
1342 	derr.fme_version = DDI_FME_VERSION;
1343 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1344 	derr.fme_flag = acctype;
1345 
1346 	if (acctype == DDI_FM_ERR_EXPECTED) {
1347 		derr.fme_status = DDI_FM_NONFATAL;
1348 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
1349 	}
1350 
1351 	mutex_enter(&cb_p->xbc_fm_mutex);
1352 
1353 	/* send ereport/handle/clear fire registers */
1354 	err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE);
1355 
1356 	/* Check all child devices for errors */
1357 	ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr);
1358 
1359 	mutex_exit(&cb_p->xbc_fm_mutex);
1360 
1361 	/*
1362 	 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset,
1363 	 * therefore it does not cause panic.
1364 	 */
1365 	if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL))
1366 		fm_panic("Fatal System Port Error has occurred\n");
1367 }
1368 
1369 #ifdef  DEBUG
1370 int	px_peekfault_cnt = 0;
1371 int	px_pokefault_cnt = 0;
1372 #endif  /* DEBUG */
1373 
1374 /*ARGSUSED*/
1375 static int
1376 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
1377     peekpoke_ctlops_t *in_args)
1378 {
1379 	px_t *px_p = DIP_TO_STATE(dip);
1380 	px_pec_t *pec_p = px_p->px_pec_p;
1381 	int err = DDI_SUCCESS;
1382 	on_trap_data_t otd;
1383 
1384 	mutex_enter(&pec_p->pec_pokefault_mutex);
1385 	pec_p->pec_ontrap_data = &otd;
1386 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1387 
1388 	/* Set up protected environment. */
1389 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1390 		uintptr_t tramp = otd.ot_trampoline;
1391 
1392 		otd.ot_trampoline = (uintptr_t)&poke_fault;
1393 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
1394 		    (void *)in_args->host_addr);
1395 		otd.ot_trampoline = tramp;
1396 	} else
1397 		err = DDI_FAILURE;
1398 
1399 	px_lib_clr_errs(px_p, pec_p);
1400 
1401 	if (otd.ot_trap & OT_DATA_ACCESS)
1402 		err = DDI_FAILURE;
1403 
1404 	/* Take down protected environment. */
1405 	no_trap();
1406 
1407 	pec_p->pec_ontrap_data = NULL;
1408 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1409 	mutex_exit(&pec_p->pec_pokefault_mutex);
1410 
1411 #ifdef  DEBUG
1412 	if (err == DDI_FAILURE)
1413 		px_pokefault_cnt++;
1414 #endif
1415 	return (err);
1416 }
1417 
1418 /*ARGSUSED*/
1419 static int
1420 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
1421     peekpoke_ctlops_t *cautacc_ctlops_arg)
1422 {
1423 	size_t size = cautacc_ctlops_arg->size;
1424 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1425 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1426 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1427 	size_t repcount = cautacc_ctlops_arg->repcount;
1428 	uint_t flags = cautacc_ctlops_arg->flags;
1429 
1430 	px_t *px_p = DIP_TO_STATE(dip);
1431 	px_pec_t *pec_p = px_p->px_pec_p;
1432 	int err = DDI_SUCCESS;
1433 
1434 	/*
1435 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1436 	 * mutex.
1437 	 */
1438 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1439 
1440 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1441 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1442 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1443 
1444 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1445 		for (; repcount; repcount--) {
1446 			switch (size) {
1447 
1448 			case sizeof (uint8_t):
1449 				i_ddi_put8(hp, (uint8_t *)dev_addr,
1450 				    *(uint8_t *)host_addr);
1451 				break;
1452 
1453 			case sizeof (uint16_t):
1454 				i_ddi_put16(hp, (uint16_t *)dev_addr,
1455 				    *(uint16_t *)host_addr);
1456 				break;
1457 
1458 			case sizeof (uint32_t):
1459 				i_ddi_put32(hp, (uint32_t *)dev_addr,
1460 				    *(uint32_t *)host_addr);
1461 				break;
1462 
1463 			case sizeof (uint64_t):
1464 				i_ddi_put64(hp, (uint64_t *)dev_addr,
1465 				    *(uint64_t *)host_addr);
1466 				break;
1467 			}
1468 
1469 			host_addr += size;
1470 
1471 			if (flags == DDI_DEV_AUTOINCR)
1472 				dev_addr += size;
1473 
1474 			px_lib_clr_errs(px_p, pec_p);
1475 
1476 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
1477 				err = DDI_FAILURE;
1478 #ifdef  DEBUG
1479 				px_pokefault_cnt++;
1480 #endif
1481 				break;
1482 			}
1483 		}
1484 	}
1485 
1486 	i_ddi_notrap((ddi_acc_handle_t)hp);
1487 	pec_p->pec_ontrap_data = NULL;
1488 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1489 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1490 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1491 
1492 	return (err);
1493 }
1494 
1495 
1496 int
1497 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1498     peekpoke_ctlops_t *in_args)
1499 {
1500 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
1501 	    px_lib_do_poke(dip, rdip, in_args));
1502 }
1503 
1504 
1505 /*ARGSUSED*/
1506 static int
1507 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
1508 {
1509 	px_t *px_p = DIP_TO_STATE(dip);
1510 	px_pec_t *pec_p = px_p->px_pec_p;
1511 	int err = DDI_SUCCESS;
1512 	on_trap_data_t otd;
1513 
1514 	mutex_enter(&pec_p->pec_pokefault_mutex);
1515 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1516 
1517 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1518 		uintptr_t tramp = otd.ot_trampoline;
1519 
1520 		otd.ot_trampoline = (uintptr_t)&peek_fault;
1521 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
1522 		    (void *)in_args->host_addr);
1523 		otd.ot_trampoline = tramp;
1524 	} else
1525 		err = DDI_FAILURE;
1526 
1527 	no_trap();
1528 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1529 	mutex_exit(&pec_p->pec_pokefault_mutex);
1530 
1531 #ifdef  DEBUG
1532 	if (err == DDI_FAILURE)
1533 		px_peekfault_cnt++;
1534 #endif
1535 	return (err);
1536 }
1537 
1538 
1539 static int
1540 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
1541 {
1542 	size_t size = cautacc_ctlops_arg->size;
1543 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1544 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1545 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1546 	size_t repcount = cautacc_ctlops_arg->repcount;
1547 	uint_t flags = cautacc_ctlops_arg->flags;
1548 
1549 	px_t *px_p = DIP_TO_STATE(dip);
1550 	px_pec_t *pec_p = px_p->px_pec_p;
1551 	int err = DDI_SUCCESS;
1552 
1553 	/*
1554 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1555 	 * mutex.
1556 	 */
1557 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1558 
1559 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1560 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1561 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1562 
1563 	if (repcount == 1) {
1564 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1565 			i_ddi_caut_get(size, (void *)dev_addr,
1566 			    (void *)host_addr);
1567 		} else {
1568 			int i;
1569 			uint8_t *ff_addr = (uint8_t *)host_addr;
1570 			for (i = 0; i < size; i++)
1571 				*ff_addr++ = 0xff;
1572 
1573 			err = DDI_FAILURE;
1574 #ifdef  DEBUG
1575 			px_peekfault_cnt++;
1576 #endif
1577 		}
1578 	} else {
1579 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1580 			for (; repcount; repcount--) {
1581 				i_ddi_caut_get(size, (void *)dev_addr,
1582 				    (void *)host_addr);
1583 
1584 				host_addr += size;
1585 
1586 				if (flags == DDI_DEV_AUTOINCR)
1587 					dev_addr += size;
1588 			}
1589 		} else {
1590 			err = DDI_FAILURE;
1591 #ifdef  DEBUG
1592 			px_peekfault_cnt++;
1593 #endif
1594 		}
1595 	}
1596 
1597 	i_ddi_notrap((ddi_acc_handle_t)hp);
1598 	pec_p->pec_ontrap_data = NULL;
1599 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1600 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1601 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1602 
1603 	return (err);
1604 }
1605 
1606 /*ARGSUSED*/
1607 int
1608 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1609     peekpoke_ctlops_t *in_args, void *result)
1610 {
1611 	result = (void *)in_args->host_addr;
1612 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
1613 	    px_lib_do_peek(dip, in_args));
1614 }
1615 
1616 /*
1617  * implements PPM interface
1618  */
1619 int
1620 px_lib_pmctl(int cmd, px_t *px_p)
1621 {
1622 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
1623 	switch (cmd) {
1624 	case PPMREQ_PRE_PWR_OFF:
1625 		/*
1626 		 * Currently there is no device power management for
1627 		 * the root complex (fire). When there is we need to make
1628 		 * sure that it is at full power before trying to send the
1629 		 * PME_Turn_Off message.
1630 		 */
1631 		DBG(DBG_PWR, px_p->px_dip,
1632 		    "ioctl: request to send PME_Turn_Off\n");
1633 		return (px_goto_l23ready(px_p));
1634 
1635 	case PPMREQ_PRE_PWR_ON:
1636 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1637 		return (px_pre_pwron_check(px_p));
1638 
1639 	case PPMREQ_POST_PWR_ON:
1640 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1641 		return (px_goto_l0(px_p));
1642 
1643 	default:
1644 		return (DDI_FAILURE);
1645 	}
1646 }
1647 
1648 /*
1649  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
1650  * called by px_ioctl.
1651  * returns DDI_SUCCESS or DDI_FAILURE
1652  * 1. Wait for link to be in L1 state (link status reg)
1653  * 2. write to PME_Turn_off reg to boradcast
1654  * 3. set timeout
1655  * 4. If timeout, return failure.
1656  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
1657  */
1658 static int
1659 px_goto_l23ready(px_t *px_p)
1660 {
1661 	pcie_pwr_t	*pwr_p;
1662 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1663 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1664 	int		ret = DDI_SUCCESS;
1665 	clock_t		end, timeleft;
1666 	int		mutex_held = 1;
1667 
1668 	/* If no PM info, return failure */
1669 	if (!PCIE_PMINFO(px_p->px_dip) ||
1670 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1671 		return (DDI_FAILURE);
1672 
1673 	mutex_enter(&pwr_p->pwr_lock);
1674 	mutex_enter(&px_p->px_l23ready_lock);
1675 	/* Clear the PME_To_ACK receieved flag */
1676 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1677 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
1678 		ret = DDI_FAILURE;
1679 		goto l23ready_done;
1680 	}
1681 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
1682 
1683 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1684 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1685 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1686 		    &px_p->px_l23ready_lock, end);
1687 		/*
1688 		 * if cv_timedwait returns -1, it is either
1689 		 * 1) timed out or
1690 		 * 2) there was a pre-mature wakeup but by the time
1691 		 * cv_timedwait is called again end < lbolt i.e.
1692 		 * end is in the past.
1693 		 * 3) By the time we make first cv_timedwait call,
1694 		 * end < lbolt is true.
1695 		 */
1696 		if (timeleft == -1)
1697 			break;
1698 	}
1699 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1700 		/*
1701 		 * Either timedout or interrupt didn't get a
1702 		 * chance to grab the mutex and set the flag.
1703 		 * release the mutex and delay for sometime.
1704 		 * This will 1) give a chance for interrupt to
1705 		 * set the flag 2) creates a delay between two
1706 		 * consequetive requests.
1707 		 */
1708 		mutex_exit(&px_p->px_l23ready_lock);
1709 		delay(5);
1710 		mutex_held = 0;
1711 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1712 			ret = DDI_FAILURE;
1713 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1714 			    " for PME_TO_ACK\n");
1715 		}
1716 	}
1717 	px_p->px_pm_flags &= ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD);
1718 
1719 l23ready_done:
1720 	if (mutex_held)
1721 		mutex_exit(&px_p->px_l23ready_lock);
1722 	/*
1723 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1724 	 * was succesful.
1725 	 */
1726 	if (ret == DDI_SUCCESS) {
1727 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1728 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1729 			    "even though we received PME_To_ACK.\n");
1730 			ret = DDI_FAILURE;
1731 		} else
1732 			pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1733 
1734 	}
1735 	mutex_exit(&pwr_p->pwr_lock);
1736 	return (ret);
1737 }
1738 
1739 /*
1740  * Message interrupt handler intended to be shared for both
1741  * PME and PME_TO_ACK msg handling, currently only handles
1742  * PME_To_ACK message.
1743  */
1744 uint_t
1745 px_pmeq_intr(caddr_t arg)
1746 {
1747 	px_t	*px_p = (px_t *)arg;
1748 
1749 	mutex_enter(&px_p->px_l23ready_lock);
1750 	cv_broadcast(&px_p->px_l23ready_cv);
1751 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1752 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1753 	} else {
1754 		/*
1755 		 * This maybe the second ack received. If so then,
1756 		 * we should be receiving it during wait4L1 stage.
1757 		 */
1758 		px_p->px_pmetoack_ignored++;
1759 	}
1760 	mutex_exit(&px_p->px_l23ready_lock);
1761 	return (DDI_INTR_CLAIMED);
1762 }
1763 
1764 static int
1765 px_pre_pwron_check(px_t *px_p)
1766 {
1767 	pcie_pwr_t	*pwr_p;
1768 
1769 	/* If no PM info, return failure */
1770 	if (!PCIE_PMINFO(px_p->px_dip) ||
1771 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1772 		return (DDI_FAILURE);
1773 
1774 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1775 }
1776 
1777 static int
1778 px_goto_l0(px_t *px_p)
1779 {
1780 	pcie_pwr_t	*pwr_p;
1781 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1782 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1783 	int		ret = DDI_SUCCESS;
1784 	clock_t		end, timeleft;
1785 	int		mutex_held = 1;
1786 
1787 	/* If no PM info, return failure */
1788 	if (!PCIE_PMINFO(px_p->px_dip) ||
1789 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1790 		return (DDI_FAILURE);
1791 
1792 	mutex_enter(&pwr_p->pwr_lock);
1793 	mutex_enter(&px_p->px_lupsoft_lock);
1794 	/* Clear the LINKUP_RECVD receieved flag */
1795 	px_p->px_pm_flags &= ~PX_LINKUP_RECVD;
1796 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
1797 		ret = DDI_FAILURE;
1798 		goto l0_done;
1799 	}
1800 	px_p->px_pm_flags |= PX_LINKUP_PENDING;
1801 
1802 	end = ddi_get_lbolt() + drv_usectohz(px_linkup_timeout);
1803 	while (!(px_p->px_pm_flags & PX_LINKUP_RECVD)) {
1804 		timeleft = cv_timedwait(&px_p->px_lup_cv,
1805 		    &px_p->px_lupsoft_lock, end);
1806 		/*
1807 		 * if cv_timedwait returns -1, it is either
1808 		 * 1) timed out or
1809 		 * 2) there was a pre-mature wakeup but by the time
1810 		 * cv_timedwait is called again end < lbolt i.e.
1811 		 * end is in the past.
1812 		 * 3) By the time we make first cv_timedwait call,
1813 		 * end < lbolt is true.
1814 		 */
1815 		if (timeleft == -1)
1816 			break;
1817 	}
1818 	if (!(px_p->px_pm_flags & PX_LINKUP_RECVD)) {
1819 		/*
1820 		 * Either timedout or interrupt didn't get a
1821 		 * chance to grab the mutex and set the flag.
1822 		 * release the mutex and delay for sometime.
1823 		 * This will 1) give a chance for interrupt to
1824 		 * set the flag 2) creates a delay between two
1825 		 * consequetive requests.
1826 		 */
1827 		mutex_exit(&px_p->px_lupsoft_lock);
1828 		mutex_held = 0;
1829 		delay(5);
1830 		if (!(px_p->px_pm_flags & PX_LINKUP_RECVD)) {
1831 			ret = DDI_FAILURE;
1832 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1833 			    " for link up\n");
1834 		}
1835 	}
1836 	px_p->px_pm_flags &= ~(PX_LINKUP_PENDING | PX_LINKUP_RECVD);
1837 
1838 l0_done:
1839 	if (mutex_held)
1840 		mutex_exit(&px_p->px_lupsoft_lock);
1841 	if (ret == DDI_SUCCESS)
1842 		px_enable_detect_quiet(csr_base);
1843 	mutex_exit(&pwr_p->pwr_lock);
1844 	return (ret);
1845 }
1846 
1847 uint_t
1848 px_lup_softintr(caddr_t arg)
1849 {
1850 	px_t *px_p = (px_t *)arg;
1851 
1852 	mutex_enter(&px_p->px_lup_lock);
1853 	if (!(px_p->px_lupsoft_pending > 0)) {
1854 		/* Spurious */
1855 		mutex_exit(&px_p->px_lup_lock);
1856 		return (DDI_INTR_UNCLAIMED);
1857 	}
1858 	px_p->px_lupsoft_pending--;
1859 	if (px_p->px_lupsoft_pending > 0) {
1860 		/* More than one lup soft intr posted - unlikely */
1861 		mutex_exit(&px_p->px_lup_lock);
1862 		return (DDI_INTR_UNCLAIMED);
1863 	}
1864 	mutex_exit(&px_p->px_lup_lock);
1865 
1866 	mutex_enter(&px_p->px_lupsoft_lock);
1867 	cv_broadcast(&px_p->px_lup_cv);
1868 	if (px_p->px_pm_flags & PX_LINKUP_PENDING) {
1869 		px_p->px_pm_flags |= PX_LINKUP_RECVD;
1870 	} else {
1871 		/* Nobody waiting for this! */
1872 		px_p->px_lup_ignored++;
1873 	}
1874 	mutex_exit(&px_p->px_lupsoft_lock);
1875 	return (DDI_INTR_CLAIMED);
1876 }
1877 
1878 /*
1879  * Extract the drivers binding name to identify which chip we're binding to.
1880  * Whenever a new bus bridge is created, the driver alias entry should be
1881  * added here to identify the device if needed.  If a device isn't added,
1882  * the identity defaults to PX_CHIP_UNIDENTIFIED.
1883  */
1884 static uint32_t
1885 px_identity_chip(px_t *px_p)
1886 {
1887 	dev_info_t	*dip = px_p->px_dip;
1888 	char		*name = ddi_binding_name(dip);
1889 	uint32_t	revision = 0;
1890 
1891 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1892 	    "module-revision#", 0);
1893 
1894 	/* Check for Fire driver binding name */
1895 	if (strcmp(name, "pci108e,80f0") == 0) {
1896 		DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: "
1897 		    "name %s module-revision %d\n", ddi_driver_name(dip),
1898 		    ddi_get_instance(dip), name, revision);
1899 
1900 		return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00));
1901 	}
1902 
1903 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
1904 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
1905 
1906 	return (PX_CHIP_UNIDENTIFIED);
1907 }
1908 
1909 int
1910 px_err_add_intr(px_fault_t *px_fault_p)
1911 {
1912 	dev_info_t	*dip = px_fault_p->px_fh_dip;
1913 	px_t		*px_p = DIP_TO_STATE(dip);
1914 
1915 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
1916 		px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0);
1917 
1918 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
1919 
1920 	return (DDI_SUCCESS);
1921 }
1922 
1923 void
1924 px_err_rem_intr(px_fault_t *px_fault_p)
1925 {
1926 	dev_info_t	*dip = px_fault_p->px_fh_dip;
1927 	px_t		*px_p = DIP_TO_STATE(dip);
1928 
1929 	rem_ivintr(px_fault_p->px_fh_sysino, NULL);
1930 
1931 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
1932 		IB_INTR_WAIT);
1933 }
1934 
1935 #ifdef FMA
1936 void
1937 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
1938 {
1939 	/* populate the rc_status by reading the registers - TBD */
1940 }
1941 #endif /* FMA */
1942