xref: /illumos-gate/usr/src/uts/common/io/pciex/pcie.c (revision 79bed773cb9f85f14d6c40e097abafdf4cc1e687)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2017, Joyent, Inc.
25  */
26 
27 #include <sys/sysmacros.h>
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/modctl.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/util.h>
36 #include <sys/promif.h>
37 #include <sys/disp.h>
38 #include <sys/stat.h>
39 #include <sys/file.h>
40 #include <sys/pci_cap.h>
41 #include <sys/pci_impl.h>
42 #include <sys/pcie_impl.h>
43 #include <sys/hotplug/pci/pcie_hp.h>
44 #include <sys/hotplug/pci/pciehpc.h>
45 #include <sys/hotplug/pci/pcishpc.h>
46 #include <sys/hotplug/pci/pcicfg.h>
47 #include <sys/pci_cfgacc.h>
48 
49 /* Local functions prototypes */
50 static void pcie_init_pfd(dev_info_t *);
51 static void pcie_fini_pfd(dev_info_t *);
52 
53 #if defined(__i386) || defined(__amd64)
54 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *);
55 #endif /* defined(__i386) || defined(__amd64) */
56 
57 #ifdef DEBUG
58 uint_t pcie_debug_flags = 0;
59 static void pcie_print_bus(pcie_bus_t *bus_p);
60 void pcie_dbg(char *fmt, ...);
61 #endif /* DEBUG */
62 
63 /* Variable to control default PCI-Express config settings */
64 ushort_t pcie_command_default =
65     PCI_COMM_SERR_ENABLE |
66     PCI_COMM_WAIT_CYC_ENAB |
67     PCI_COMM_PARITY_DETECT |
68     PCI_COMM_ME |
69     PCI_COMM_MAE |
70     PCI_COMM_IO;
71 
72 /* xxx_fw are bits that are controlled by FW and should not be modified */
73 ushort_t pcie_command_default_fw =
74     PCI_COMM_SPEC_CYC |
75     PCI_COMM_MEMWR_INVAL |
76     PCI_COMM_PALETTE_SNOOP |
77     PCI_COMM_WAIT_CYC_ENAB |
78     0xF800; /* Reserved Bits */
79 
80 ushort_t pcie_bdg_command_default_fw =
81     PCI_BCNF_BCNTRL_ISA_ENABLE |
82     PCI_BCNF_BCNTRL_VGA_ENABLE |
83     0xF000; /* Reserved Bits */
84 
85 /* PCI-Express Base error defaults */
86 ushort_t pcie_base_err_default =
87     PCIE_DEVCTL_CE_REPORTING_EN |
88     PCIE_DEVCTL_NFE_REPORTING_EN |
89     PCIE_DEVCTL_FE_REPORTING_EN |
90     PCIE_DEVCTL_UR_REPORTING_EN;
91 
92 /* PCI-Express Device Control Register */
93 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN |
94     PCIE_DEVCTL_MAX_READ_REQ_512;
95 
96 /* PCI-Express AER Root Control Register */
97 #define	PCIE_ROOT_SYS_ERR	(PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \
98 				PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \
99 				PCIE_ROOTCTL_SYS_ERR_ON_FE_EN)
100 
101 ushort_t pcie_root_ctrl_default =
102     PCIE_ROOTCTL_SYS_ERR_ON_CE_EN |
103     PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
104     PCIE_ROOTCTL_SYS_ERR_ON_FE_EN;
105 
106 /* PCI-Express Root Error Command Register */
107 ushort_t pcie_root_error_cmd_default =
108     PCIE_AER_RE_CMD_CE_REP_EN |
109     PCIE_AER_RE_CMD_NFE_REP_EN |
110     PCIE_AER_RE_CMD_FE_REP_EN;
111 
112 /* ECRC settings in the PCIe AER Control Register */
113 uint32_t pcie_ecrc_value =
114     PCIE_AER_CTL_ECRC_GEN_ENA |
115     PCIE_AER_CTL_ECRC_CHECK_ENA;
116 
117 /*
118  * If a particular platform wants to disable certain errors such as UR/MA,
119  * instead of using #defines have the platform's PCIe Root Complex driver set
120  * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions.  For
121  * x86 the closest thing to a PCIe root complex driver is NPE.	For SPARC the
122  * closest PCIe root complex driver is PX.
123  *
124  * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86
125  * systems may want to disable SERR in general.  For root ports, enabling SERR
126  * causes NMIs which are not handled and results in a watchdog timeout error.
127  */
128 uint32_t pcie_aer_uce_mask = 0;		/* AER UE Mask */
129 uint32_t pcie_aer_ce_mask = 0;		/* AER CE Mask */
130 uint32_t pcie_aer_suce_mask = 0;	/* AER Secondary UE Mask */
131 uint32_t pcie_serr_disable_flag = 0;	/* Disable SERR */
132 
133 /* Default severities needed for eversholt.  Error handling doesn't care */
134 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \
135     PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \
136     PCIE_AER_UCE_TRAINING;
137 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \
138     PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \
139     PCIE_AER_SUCE_USC_MSG_DATA_ERR;
140 
141 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5;
142 int pcie_disable_ari = 0;
143 
144 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip,
145 	int *max_supported);
146 static int pcie_get_max_supported(dev_info_t *dip, void *arg);
147 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
148     caddr_t *addrp, ddi_acc_handle_t *handlep);
149 static void pcie_unmap_phys(ddi_acc_handle_t *handlep,	pci_regspec_t *ph);
150 
151 dev_info_t *pcie_get_rc_dip(dev_info_t *dip);
152 
153 /*
154  * modload support
155  */
156 
157 static struct modlmisc modlmisc	= {
158 	&mod_miscops,	/* Type	of module */
159 	"PCI Express Framework Module"
160 };
161 
162 static struct modlinkage modlinkage = {
163 	MODREV_1,
164 	(void	*)&modlmisc,
165 	NULL
166 };
167 
168 /*
169  * Global Variables needed for a non-atomic version of ddi_fm_ereport_post.
170  * Currently used to send the pci.fabric ereports whose payload depends on the
171  * type of PCI device it is being sent for.
172  */
173 char		*pcie_nv_buf;
174 nv_alloc_t	*pcie_nvap;
175 nvlist_t	*pcie_nvl;
176 
177 int
178 _init(void)
179 {
180 	int rval;
181 
182 	pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP);
183 	pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ);
184 	pcie_nvl = fm_nvlist_create(pcie_nvap);
185 
186 	if ((rval = mod_install(&modlinkage)) != 0) {
187 		fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
188 		fm_nva_xdestroy(pcie_nvap);
189 		kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
190 	}
191 	return (rval);
192 }
193 
194 int
195 _fini()
196 {
197 	int		rval;
198 
199 	if ((rval = mod_remove(&modlinkage)) == 0) {
200 		fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
201 		fm_nva_xdestroy(pcie_nvap);
202 		kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
203 	}
204 	return (rval);
205 }
206 
207 int
208 _info(struct modinfo *modinfop)
209 {
210 	return (mod_info(&modlinkage, modinfop));
211 }
212 
213 /* ARGSUSED */
214 int
215 pcie_init(dev_info_t *dip, caddr_t arg)
216 {
217 	int	ret = DDI_SUCCESS;
218 
219 	/*
220 	 * Create a "devctl" minor node to support DEVCTL_DEVICE_*
221 	 * and DEVCTL_BUS_* ioctls to this bus.
222 	 */
223 	if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR,
224 	    PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR),
225 	    DDI_NT_NEXUS, 0)) != DDI_SUCCESS) {
226 		PCIE_DBG("Failed to create devctl minor node for %s%d\n",
227 		    ddi_driver_name(dip), ddi_get_instance(dip));
228 
229 		return (ret);
230 	}
231 
232 	if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) {
233 		/*
234 		 * On some x86 platforms, we observed unexpected hotplug
235 		 * initialization failures in recent years. The known cause
236 		 * is a hardware issue: while the problem PCI bridges have
237 		 * the Hotplug Capable registers set, the machine actually
238 		 * does not implement the expected ACPI object.
239 		 *
240 		 * We don't want to stop PCI driver attach and system boot
241 		 * just because of this hotplug initialization failure.
242 		 * Continue with a debug message printed.
243 		 */
244 		PCIE_DBG("%s%d: Failed setting hotplug framework\n",
245 		    ddi_driver_name(dip), ddi_get_instance(dip));
246 
247 #if defined(__sparc)
248 		ddi_remove_minor_node(dip, "devctl");
249 
250 		return (ret);
251 #endif /* defined(__sparc) */
252 	}
253 
254 	return (DDI_SUCCESS);
255 }
256 
257 /* ARGSUSED */
258 int
259 pcie_uninit(dev_info_t *dip)
260 {
261 	int	ret = DDI_SUCCESS;
262 
263 	if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED)
264 		(void) pcie_ari_disable(dip);
265 
266 	if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) {
267 		PCIE_DBG("Failed to uninitialize hotplug for %s%d\n",
268 		    ddi_driver_name(dip), ddi_get_instance(dip));
269 
270 		return (ret);
271 	}
272 
273 	ddi_remove_minor_node(dip, "devctl");
274 
275 	return (ret);
276 }
277 
278 /*
279  * PCIe module interface for enabling hotplug interrupt.
280  *
281  * It should be called after pcie_init() is done and bus driver's
282  * interrupt handlers have being attached.
283  */
284 int
285 pcie_hpintr_enable(dev_info_t *dip)
286 {
287 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
288 	pcie_hp_ctrl_t	*ctrl_p = PCIE_GET_HP_CTRL(dip);
289 
290 	if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
291 		(void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p);
292 	} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
293 		(void) pcishpc_enable_irqs(ctrl_p);
294 	}
295 	return (DDI_SUCCESS);
296 }
297 
298 /*
299  * PCIe module interface for disabling hotplug interrupt.
300  *
301  * It should be called before pcie_uninit() is called and bus driver's
302  * interrupt handlers is dettached.
303  */
304 int
305 pcie_hpintr_disable(dev_info_t *dip)
306 {
307 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
308 	pcie_hp_ctrl_t	*ctrl_p = PCIE_GET_HP_CTRL(dip);
309 
310 	if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
311 		(void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p);
312 	} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
313 		(void) pcishpc_disable_irqs(ctrl_p);
314 	}
315 	return (DDI_SUCCESS);
316 }
317 
318 /* ARGSUSED */
319 int
320 pcie_intr(dev_info_t *dip)
321 {
322 	return (pcie_hp_intr(dip));
323 }
324 
325 /* ARGSUSED */
326 int
327 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp)
328 {
329 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
330 
331 	/*
332 	 * Make sure the open is for the right file type.
333 	 */
334 	if (otyp != OTYP_CHR)
335 		return (EINVAL);
336 
337 	/*
338 	 * Handle the open by tracking the device state.
339 	 */
340 	if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) ||
341 	    ((flags & FEXCL) &&
342 	    (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) {
343 		return (EBUSY);
344 	}
345 
346 	if (flags & FEXCL)
347 		bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
348 	else
349 		bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN;
350 
351 	return (0);
352 }
353 
354 /* ARGSUSED */
355 int
356 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp)
357 {
358 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
359 
360 	if (otyp != OTYP_CHR)
361 		return (EINVAL);
362 
363 	bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
364 
365 	return (0);
366 }
367 
368 /* ARGSUSED */
369 int
370 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode,
371     cred_t *credp, int *rvalp)
372 {
373 	struct devctl_iocdata	*dcp;
374 	uint_t			bus_state;
375 	int			rv = DDI_SUCCESS;
376 
377 	/*
378 	 * We can use the generic implementation for devctl ioctl
379 	 */
380 	switch (cmd) {
381 	case DEVCTL_DEVICE_GETSTATE:
382 	case DEVCTL_DEVICE_ONLINE:
383 	case DEVCTL_DEVICE_OFFLINE:
384 	case DEVCTL_BUS_GETSTATE:
385 		return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
386 	default:
387 		break;
388 	}
389 
390 	/*
391 	 * read devctl ioctl data
392 	 */
393 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
394 		return (EFAULT);
395 
396 	switch (cmd) {
397 	case DEVCTL_BUS_QUIESCE:
398 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
399 			if (bus_state == BUS_QUIESCED)
400 				break;
401 		(void) ndi_set_bus_state(dip, BUS_QUIESCED);
402 		break;
403 	case DEVCTL_BUS_UNQUIESCE:
404 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
405 			if (bus_state == BUS_ACTIVE)
406 				break;
407 		(void) ndi_set_bus_state(dip, BUS_ACTIVE);
408 		break;
409 	case DEVCTL_BUS_RESET:
410 	case DEVCTL_BUS_RESETALL:
411 	case DEVCTL_DEVICE_RESET:
412 		rv = ENOTSUP;
413 		break;
414 	default:
415 		rv = ENOTTY;
416 	}
417 
418 	ndi_dc_freehdl(dcp);
419 	return (rv);
420 }
421 
422 /* ARGSUSED */
423 int
424 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
425     int flags, char *name, caddr_t valuep, int *lengthp)
426 {
427 	if (dev == DDI_DEV_T_ANY)
428 		goto skip;
429 
430 	if (PCIE_IS_HOTPLUG_CAPABLE(dip) &&
431 	    strcmp(name, "pci-occupant") == 0) {
432 		int	pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev));
433 
434 		pcie_hp_create_occupant_props(dip, dev, pci_dev);
435 	}
436 
437 skip:
438 	return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
439 }
440 
441 int
442 pcie_init_cfghdl(dev_info_t *cdip)
443 {
444 	pcie_bus_t		*bus_p;
445 	ddi_acc_handle_t	eh = NULL;
446 
447 	bus_p = PCIE_DIP2BUS(cdip);
448 	if (bus_p == NULL)
449 		return (DDI_FAILURE);
450 
451 	/* Create an config access special to error handling */
452 	if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) {
453 		cmn_err(CE_WARN, "Cannot setup config access"
454 		    " for BDF 0x%x\n", bus_p->bus_bdf);
455 		return (DDI_FAILURE);
456 	}
457 
458 	bus_p->bus_cfg_hdl = eh;
459 	return (DDI_SUCCESS);
460 }
461 
462 void
463 pcie_fini_cfghdl(dev_info_t *cdip)
464 {
465 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(cdip);
466 
467 	pci_config_teardown(&bus_p->bus_cfg_hdl);
468 }
469 
470 void
471 pcie_determine_serial(dev_info_t *dip)
472 {
473 	pcie_bus_t		*bus_p = PCIE_DIP2BUS(dip);
474 	ddi_acc_handle_t	h;
475 	uint16_t		cap;
476 	uchar_t			serial[8];
477 	uint32_t		low, high;
478 
479 	if (!PCIE_IS_PCIE(bus_p))
480 		return;
481 
482 	h = bus_p->bus_cfg_hdl;
483 
484 	if ((PCI_CAP_LOCATE(h, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_SER), &cap)) ==
485 	    DDI_FAILURE)
486 		return;
487 
488 	high = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_UPPER_DW);
489 	low = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_LOWER_DW);
490 
491 	/*
492 	 * Here, we're trying to figure out if we had an invalid PCIe read. From
493 	 * looking at the contents of the value, it can be hard to tell the
494 	 * difference between a value that has all 1s correctly versus if we had
495 	 * an error. In this case, we only assume it's invalid if both register
496 	 * reads are invalid. We also only use 32-bit reads as we're not sure if
497 	 * all devices will support these as 64-bit reads, while we know that
498 	 * they'll support these as 32-bit reads.
499 	 */
500 	if (high == PCI_EINVAL32 && low == PCI_EINVAL32)
501 		return;
502 
503 	serial[0] = low & 0xff;
504 	serial[1] = (low >> 8) & 0xff;
505 	serial[2] = (low >> 16) & 0xff;
506 	serial[3] = (low >> 24) & 0xff;
507 	serial[4] = high & 0xff;
508 	serial[5] = (high >> 8) & 0xff;
509 	serial[6] = (high >> 16) & 0xff;
510 	serial[7] = (high >> 24) & 0xff;
511 
512 	(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, "pcie-serial",
513 	    serial, sizeof (serial));
514 }
515 
516 /*
517  * PCI-Express child device initialization.
518  * This function enables generic pci-express interrupts and error
519  * handling.
520  *
521  * @param pdip		root dip (root nexus's dip)
522  * @param cdip		child's dip (device's dip)
523  * @return		DDI_SUCCESS or DDI_FAILURE
524  */
525 /* ARGSUSED */
526 int
527 pcie_initchild(dev_info_t *cdip)
528 {
529 	uint16_t		tmp16, reg16;
530 	pcie_bus_t		*bus_p;
531 	uint32_t		devid, venid;
532 
533 	bus_p = PCIE_DIP2BUS(cdip);
534 	if (bus_p == NULL) {
535 		PCIE_DBG("%s: BUS not found.\n",
536 		    ddi_driver_name(cdip));
537 
538 		return (DDI_FAILURE);
539 	}
540 
541 	if (pcie_init_cfghdl(cdip) != DDI_SUCCESS)
542 		return (DDI_FAILURE);
543 
544 	/*
545 	 * Update pcie_bus_t with real Vendor Id Device Id.
546 	 *
547 	 * For assigned devices in IOV environment, the OBP will return
548 	 * faked device id/vendor id on configration read and for both
549 	 * properties in root domain. translate_devid() function will
550 	 * update the properties with real device-id/vendor-id on such
551 	 * platforms, so that we can utilize the properties here to get
552 	 * real device-id/vendor-id and overwrite the faked ids.
553 	 *
554 	 * For unassigned devices or devices in non-IOV environment, the
555 	 * operation below won't make a difference.
556 	 *
557 	 * The IOV implementation only supports assignment of PCIE
558 	 * endpoint devices. Devices under pci-pci bridges don't need
559 	 * operation like this.
560 	 */
561 	devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
562 	    "device-id", -1);
563 	venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
564 	    "vendor-id", -1);
565 	bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff);
566 
567 	/* Clear the device's status register */
568 	reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT);
569 	PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16);
570 
571 	/* Setup the device's command register */
572 	reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM);
573 	tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default;
574 
575 #if defined(__i386) || defined(__amd64)
576 	boolean_t empty_io_range = B_FALSE;
577 	boolean_t empty_mem_range = B_FALSE;
578 	/*
579 	 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem
580 	 * access as it can cause a hang if enabled.
581 	 */
582 	pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range,
583 	    &empty_mem_range);
584 	if ((empty_io_range == B_TRUE) &&
585 	    (pcie_command_default & PCI_COMM_IO)) {
586 		tmp16 &= ~PCI_COMM_IO;
587 		PCIE_DBG("No I/O range found for %s, bdf 0x%x\n",
588 		    ddi_driver_name(cdip), bus_p->bus_bdf);
589 	}
590 	if ((empty_mem_range == B_TRUE) &&
591 	    (pcie_command_default & PCI_COMM_MAE)) {
592 		tmp16 &= ~PCI_COMM_MAE;
593 		PCIE_DBG("No Mem range found for %s, bdf 0x%x\n",
594 		    ddi_driver_name(cdip), bus_p->bus_bdf);
595 	}
596 #endif /* defined(__i386) || defined(__amd64) */
597 
598 	if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p))
599 		tmp16 &= ~PCI_COMM_SERR_ENABLE;
600 
601 	PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16);
602 	PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16);
603 
604 	/*
605 	 * If the device has a bus control register then program it
606 	 * based on the settings in the command register.
607 	 */
608 	if (PCIE_IS_BDG(bus_p)) {
609 		/* Clear the device's secondary status register */
610 		reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
611 		PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16);
612 
613 		/* Setup the device's secondary command register */
614 		reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
615 		tmp16 = (reg16 & pcie_bdg_command_default_fw);
616 
617 		tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE;
618 		/*
619 		 * Workaround for this Nvidia bridge. Don't enable the SERR
620 		 * enable bit in the bridge control register as it could lead to
621 		 * bogus NMIs.
622 		 */
623 		if (bus_p->bus_dev_ven_id == 0x037010DE)
624 			tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE;
625 
626 		if (pcie_command_default & PCI_COMM_PARITY_DETECT)
627 			tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
628 
629 		/*
630 		 * Enable Master Abort Mode only if URs have not been masked.
631 		 * For PCI and PCIe-PCI bridges, enabling this bit causes a
632 		 * Master Aborts/UR to be forwarded as a UR/TA or SERR.  If this
633 		 * bit is masked, posted requests are dropped and non-posted
634 		 * requests are returned with -1.
635 		 */
636 		if (pcie_aer_uce_mask & PCIE_AER_UCE_UR)
637 			tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE;
638 		else
639 			tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
640 		PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16);
641 		PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL,
642 		    reg16);
643 	}
644 
645 	if (PCIE_IS_PCIE(bus_p)) {
646 		/* Setup PCIe device control register */
647 		reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
648 		/* note: MPS/MRRS are initialized in pcie_initchild_mps() */
649 		tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
650 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
651 		    (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
652 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK));
653 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
654 		PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
655 
656 		/* Enable PCIe errors */
657 		pcie_enable_errors(cdip);
658 
659 		pcie_determine_serial(cdip);
660 	}
661 
662 	bus_p->bus_ari = B_FALSE;
663 	if ((pcie_ari_is_enabled(ddi_get_parent(cdip))
664 	    == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip)
665 	    == PCIE_ARI_DEVICE)) {
666 		bus_p->bus_ari = B_TRUE;
667 	}
668 
669 	if (pcie_initchild_mps(cdip) == DDI_FAILURE) {
670 		pcie_fini_cfghdl(cdip);
671 		return (DDI_FAILURE);
672 	}
673 
674 	return (DDI_SUCCESS);
675 }
676 
677 static void
678 pcie_init_pfd(dev_info_t *dip)
679 {
680 	pf_data_t	*pfd_p = PCIE_ZALLOC(pf_data_t);
681 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
682 
683 	PCIE_DIP2PFD(dip) = pfd_p;
684 
685 	pfd_p->pe_bus_p = bus_p;
686 	pfd_p->pe_severity_flags = 0;
687 	pfd_p->pe_severity_mask = 0;
688 	pfd_p->pe_orig_severity_flags = 0;
689 	pfd_p->pe_lock = B_FALSE;
690 	pfd_p->pe_valid = B_FALSE;
691 
692 	/* Allocate the root fault struct for both RC and RP */
693 	if (PCIE_IS_ROOT(bus_p)) {
694 		PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
695 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
696 		PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
697 	}
698 
699 	PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
700 	PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
701 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
702 
703 	if (PCIE_IS_BDG(bus_p))
704 		PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
705 
706 	if (PCIE_IS_PCIE(bus_p)) {
707 		PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
708 
709 		if (PCIE_IS_RP(bus_p))
710 			PCIE_RP_REG(pfd_p) =
711 			    PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
712 
713 		PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
714 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
715 
716 		if (PCIE_IS_RP(bus_p)) {
717 			PCIE_ADV_RP_REG(pfd_p) =
718 			    PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
719 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
720 			    PCIE_INVALID_BDF;
721 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
722 			    PCIE_INVALID_BDF;
723 		} else if (PCIE_IS_PCIE_BDG(bus_p)) {
724 			PCIE_ADV_BDG_REG(pfd_p) =
725 			    PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t);
726 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
727 			    PCIE_INVALID_BDF;
728 		}
729 
730 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
731 			PCIX_BDG_ERR_REG(pfd_p) =
732 			    PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
733 
734 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
735 				PCIX_BDG_ECC_REG(pfd_p, 0) =
736 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
737 				PCIX_BDG_ECC_REG(pfd_p, 1) =
738 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
739 			}
740 		}
741 	} else if (PCIE_IS_PCIX(bus_p)) {
742 		if (PCIE_IS_BDG(bus_p)) {
743 			PCIX_BDG_ERR_REG(pfd_p) =
744 			    PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
745 
746 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
747 				PCIX_BDG_ECC_REG(pfd_p, 0) =
748 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
749 				PCIX_BDG_ECC_REG(pfd_p, 1) =
750 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
751 			}
752 		} else {
753 			PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t);
754 
755 			if (PCIX_ECC_VERSION_CHECK(bus_p))
756 				PCIX_ECC_REG(pfd_p) =
757 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
758 		}
759 	}
760 }
761 
762 static void
763 pcie_fini_pfd(dev_info_t *dip)
764 {
765 	pf_data_t	*pfd_p = PCIE_DIP2PFD(dip);
766 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
767 
768 	if (PCIE_IS_PCIE(bus_p)) {
769 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
770 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
771 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
772 				    sizeof (pf_pcix_ecc_regs_t));
773 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
774 				    sizeof (pf_pcix_ecc_regs_t));
775 			}
776 
777 			kmem_free(PCIX_BDG_ERR_REG(pfd_p),
778 			    sizeof (pf_pcix_bdg_err_regs_t));
779 		}
780 
781 		if (PCIE_IS_RP(bus_p))
782 			kmem_free(PCIE_ADV_RP_REG(pfd_p),
783 			    sizeof (pf_pcie_adv_rp_err_regs_t));
784 		else if (PCIE_IS_PCIE_BDG(bus_p))
785 			kmem_free(PCIE_ADV_BDG_REG(pfd_p),
786 			    sizeof (pf_pcie_adv_bdg_err_regs_t));
787 
788 		kmem_free(PCIE_ADV_REG(pfd_p),
789 		    sizeof (pf_pcie_adv_err_regs_t));
790 
791 		if (PCIE_IS_RP(bus_p))
792 			kmem_free(PCIE_RP_REG(pfd_p),
793 			    sizeof (pf_pcie_rp_err_regs_t));
794 
795 		kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
796 	} else if (PCIE_IS_PCIX(bus_p)) {
797 		if (PCIE_IS_BDG(bus_p)) {
798 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
799 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
800 				    sizeof (pf_pcix_ecc_regs_t));
801 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
802 				    sizeof (pf_pcix_ecc_regs_t));
803 			}
804 
805 			kmem_free(PCIX_BDG_ERR_REG(pfd_p),
806 			    sizeof (pf_pcix_bdg_err_regs_t));
807 		} else {
808 			if (PCIX_ECC_VERSION_CHECK(bus_p))
809 				kmem_free(PCIX_ECC_REG(pfd_p),
810 				    sizeof (pf_pcix_ecc_regs_t));
811 
812 			kmem_free(PCIX_ERR_REG(pfd_p),
813 			    sizeof (pf_pcix_err_regs_t));
814 		}
815 	}
816 
817 	if (PCIE_IS_BDG(bus_p))
818 		kmem_free(PCI_BDG_ERR_REG(pfd_p),
819 		    sizeof (pf_pci_bdg_err_regs_t));
820 
821 	kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
822 	kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
823 
824 	if (PCIE_IS_ROOT(bus_p)) {
825 		kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
826 		kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
827 	}
828 
829 	kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t));
830 
831 	PCIE_DIP2PFD(dip) = NULL;
832 }
833 
834 
835 /*
836  * Special functions to allocate pf_data_t's for PCIe root complexes.
837  * Note: Root Complex not Root Port
838  */
839 void
840 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p)
841 {
842 	pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip);
843 	pfd_p->pe_severity_flags = 0;
844 	pfd_p->pe_severity_mask = 0;
845 	pfd_p->pe_orig_severity_flags = 0;
846 	pfd_p->pe_lock = B_FALSE;
847 	pfd_p->pe_valid = B_FALSE;
848 
849 	PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
850 	PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
851 	PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
852 	PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
853 	PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
854 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
855 	PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
856 	PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
857 	PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
858 	PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
859 	PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
860 	PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF;
861 	PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF;
862 
863 	PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity;
864 }
865 
866 void
867 pcie_rc_fini_pfd(pf_data_t *pfd_p)
868 {
869 	kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t));
870 	kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t));
871 	kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t));
872 	kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
873 	kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
874 	kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
875 	kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
876 	kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
877 	kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
878 }
879 
880 /*
881  * init pcie_bus_t for root complex
882  *
883  * Only a few of the fields in bus_t is valid for root complex.
884  * The fields that are bracketed are initialized in this routine:
885  *
886  * dev_info_t *		<bus_dip>
887  * dev_info_t *		bus_rp_dip
888  * ddi_acc_handle_t	bus_cfg_hdl
889  * uint_t		<bus_fm_flags>
890  * pcie_req_id_t	bus_bdf
891  * pcie_req_id_t	bus_rp_bdf
892  * uint32_t		bus_dev_ven_id
893  * uint8_t		bus_rev_id
894  * uint8_t		<bus_hdr_type>
895  * uint16_t		<bus_dev_type>
896  * uint8_t		bus_bdg_secbus
897  * uint16_t		bus_pcie_off
898  * uint16_t		<bus_aer_off>
899  * uint16_t		bus_pcix_off
900  * uint16_t		bus_ecc_ver
901  * pci_bus_range_t	bus_bus_range
902  * ppb_ranges_t	*	bus_addr_ranges
903  * int			bus_addr_entries
904  * pci_regspec_t *	bus_assigned_addr
905  * int			bus_assigned_entries
906  * pf_data_t *		bus_pfd
907  * pcie_domain_t *	<bus_dom>
908  * int			bus_mps
909  * uint64_t		bus_cfgacc_base
910  * void	*		bus_plat_private
911  */
912 void
913 pcie_rc_init_bus(dev_info_t *dip)
914 {
915 	pcie_bus_t *bus_p;
916 
917 	bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
918 	bus_p->bus_dip = dip;
919 	bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO;
920 	bus_p->bus_hdr_type = PCI_HEADER_ONE;
921 
922 	/* Fake that there are AER logs */
923 	bus_p->bus_aer_off = (uint16_t)-1;
924 
925 	/* Needed only for handle lookup */
926 	atomic_or_uint(&bus_p->bus_fm_flags, PF_FM_READY);
927 
928 	ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p);
929 
930 	PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t);
931 }
932 
933 void
934 pcie_rc_fini_bus(dev_info_t *dip)
935 {
936 	pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip);
937 	ndi_set_bus_private(dip, B_FALSE, 0, NULL);
938 	kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t));
939 	kmem_free(bus_p, sizeof (pcie_bus_t));
940 }
941 
942 /*
943  * We need to capture the supported, maximum, and current device speed and
944  * width. The way that this has been done has changed over time.
945  *
946  * Prior to PCIe Gen 3, there were only current and supported speed fields.
947  * These were found in the link status and link capabilities registers of the
948  * PCI express capability. With the change to PCIe Gen 3, the information in the
949  * link capabilities changed to the maximum value. The supported speeds vector
950  * was moved to the link capabilities 2 register.
951  *
952  * Now, a device may not implement some of these registers. To determine whether
953  * or not it's here, we have to do the following. First, we need to check the
954  * revision of the PCI express capability. The link capabilities 2 register did
955  * not exist prior to version 2 of this register.
956  */
957 static void
958 pcie_capture_speeds(pcie_bus_t *bus_p, pcie_req_id_t bdf, dev_info_t *rcdip)
959 {
960 	uint16_t	vers, status;
961 	uint32_t	val, cap, cap2;
962 
963 	if (!PCIE_IS_PCIE(bus_p))
964 		return;
965 
966 	vers = pci_cfgacc_get16(rcdip, bdf, bus_p->bus_pcie_off + PCIE_PCIECAP);
967 	if (vers == PCI_EINVAL16)
968 		return;
969 	vers &= PCIE_PCIECAP_VER_MASK;
970 
971 	/*
972 	 * Verify the capability's version.
973 	 */
974 	switch (vers) {
975 	case PCIE_PCIECAP_VER_1_0:
976 		cap2 = 0;
977 		break;
978 	case PCIE_PCIECAP_VER_2_0:
979 		cap2 = pci_cfgacc_get32(rcdip, bdf, bus_p->bus_pcie_off +
980 		    PCIE_LINKCAP2);
981 		if (cap2 == PCI_EINVAL32)
982 			cap2 = 0;
983 		break;
984 	default:
985 		/* Don't try and handle an unknown version */
986 		return;
987 	}
988 
989 	status = pci_cfgacc_get16(rcdip, bdf, bus_p->bus_pcie_off +
990 	    PCIE_LINKSTS);
991 	cap = pci_cfgacc_get32(rcdip, bdf, bus_p->bus_pcie_off + PCIE_LINKCAP);
992 	if (status == PCI_EINVAL16 || cap == PCI_EINVAL32)
993 		return;
994 
995 	switch (status & PCIE_LINKSTS_SPEED_MASK) {
996 	case PCIE_LINKSTS_SPEED_2_5:
997 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_2_5;
998 		break;
999 	case PCIE_LINKSTS_SPEED_5:
1000 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_5;
1001 		break;
1002 	case PCIE_LINKSTS_SPEED_8:
1003 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_8;
1004 		break;
1005 	default:
1006 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_UNKNOWN;
1007 		break;
1008 	}
1009 
1010 	switch (status & PCIE_LINKSTS_NEG_WIDTH_MASK) {
1011 	case PCIE_LINKSTS_NEG_WIDTH_X1:
1012 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X1;
1013 		break;
1014 	case PCIE_LINKSTS_NEG_WIDTH_X2:
1015 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X2;
1016 		break;
1017 	case PCIE_LINKSTS_NEG_WIDTH_X4:
1018 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X4;
1019 		break;
1020 	case PCIE_LINKSTS_NEG_WIDTH_X8:
1021 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X8;
1022 		break;
1023 	case PCIE_LINKSTS_NEG_WIDTH_X12:
1024 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X12;
1025 		break;
1026 	case PCIE_LINKSTS_NEG_WIDTH_X16:
1027 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X16;
1028 		break;
1029 	case PCIE_LINKSTS_NEG_WIDTH_X32:
1030 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X32;
1031 		break;
1032 	default:
1033 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_UNKNOWN;
1034 		break;
1035 	}
1036 
1037 	switch (cap & PCIE_LINKCAP_MAX_WIDTH_MASK) {
1038 	case PCIE_LINKCAP_MAX_WIDTH_X1:
1039 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X1;
1040 		break;
1041 	case PCIE_LINKCAP_MAX_WIDTH_X2:
1042 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X2;
1043 		break;
1044 	case PCIE_LINKCAP_MAX_WIDTH_X4:
1045 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X4;
1046 		break;
1047 	case PCIE_LINKCAP_MAX_WIDTH_X8:
1048 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X8;
1049 		break;
1050 	case PCIE_LINKCAP_MAX_WIDTH_X12:
1051 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X12;
1052 		break;
1053 	case PCIE_LINKCAP_MAX_WIDTH_X16:
1054 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X16;
1055 		break;
1056 	case PCIE_LINKCAP_MAX_WIDTH_X32:
1057 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X32;
1058 		break;
1059 	default:
1060 		bus_p->bus_max_width = PCIE_LINK_WIDTH_UNKNOWN;
1061 		break;
1062 	}
1063 
1064 	/*
1065 	 * If we have the Link Capabilities 2, then we can get the supported
1066 	 * speeds from it and treat the bits in Link Capabilities 1 as the
1067 	 * maximum. If we don't, then we need to follow the Implementation Note
1068 	 * in the standard under Link Capabilities 2. Effectively, this means
1069 	 * that if the value of 10b is set in Link Capabilities register, that
1070 	 * it supports both 2.5 and 5 GT/s speeds.
1071 	 */
1072 	if (cap2 != 0) {
1073 		if (cap2 & PCIE_LINKCAP2_SPEED_2_5)
1074 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_2_5;
1075 		if (cap2 & PCIE_LINKCAP2_SPEED_5)
1076 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_5;
1077 		if (cap2 & PCIE_LINKCAP2_SPEED_8)
1078 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_8;
1079 
1080 		switch (cap & PCIE_LINKCAP_MAX_SPEED_MASK) {
1081 		case PCIE_LINKCAP_MAX_SPEED_2_5:
1082 			bus_p->bus_max_speed = PCIE_LINK_SPEED_2_5;
1083 			break;
1084 		case PCIE_LINKCAP_MAX_SPEED_5:
1085 			bus_p->bus_max_speed = PCIE_LINK_SPEED_5;
1086 			break;
1087 		case PCIE_LINKCAP_MAX_SPEED_8:
1088 			bus_p->bus_max_speed = PCIE_LINK_SPEED_8;
1089 			break;
1090 		default:
1091 			bus_p->bus_max_speed = PCIE_LINK_SPEED_UNKNOWN;
1092 			break;
1093 		}
1094 	} else {
1095 		if (cap & PCIE_LINKCAP_MAX_SPEED_5) {
1096 			bus_p->bus_max_speed = PCIE_LINK_SPEED_5;
1097 			bus_p->bus_sup_speed = PCIE_LINK_SPEED_2_5 |
1098 			    PCIE_LINK_SPEED_5;
1099 		}
1100 
1101 		if (cap & PCIE_LINKCAP_MAX_SPEED_2_5) {
1102 			bus_p->bus_max_speed = PCIE_LINK_SPEED_2_5;
1103 			bus_p->bus_sup_speed = PCIE_LINK_SPEED_2_5;
1104 		}
1105 	}
1106 }
1107 
1108 /*
1109  * partially init pcie_bus_t for device (dip,bdf) for accessing pci
1110  * config space
1111  *
1112  * This routine is invoked during boot, either after creating a devinfo node
1113  * (x86 case) or during px driver attach (sparc case); it is also invoked
1114  * in hotplug context after a devinfo node is created.
1115  *
1116  * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL
1117  * is set:
1118  *
1119  * dev_info_t *		<bus_dip>
1120  * dev_info_t *		<bus_rp_dip>
1121  * ddi_acc_handle_t	bus_cfg_hdl
1122  * uint_t		bus_fm_flags
1123  * pcie_req_id_t	<bus_bdf>
1124  * pcie_req_id_t	<bus_rp_bdf>
1125  * uint32_t		<bus_dev_ven_id>
1126  * uint8_t		<bus_rev_id>
1127  * uint8_t		<bus_hdr_type>
1128  * uint16_t		<bus_dev_type>
1129  * uint8_t		<bus_bdg_secbus
1130  * uint16_t		<bus_pcie_off>
1131  * uint16_t		<bus_aer_off>
1132  * uint16_t		<bus_pcix_off>
1133  * uint16_t		<bus_ecc_ver>
1134  * pci_bus_range_t	bus_bus_range
1135  * ppb_ranges_t	*	bus_addr_ranges
1136  * int			bus_addr_entries
1137  * pci_regspec_t *	bus_assigned_addr
1138  * int			bus_assigned_entries
1139  * pf_data_t *		bus_pfd
1140  * pcie_domain_t *	bus_dom
1141  * int			bus_mps
1142  * uint64_t		bus_cfgacc_base
1143  * void	*		bus_plat_private
1144  *
1145  * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL
1146  * is set:
1147  *
1148  * dev_info_t *		bus_dip
1149  * dev_info_t *		bus_rp_dip
1150  * ddi_acc_handle_t	bus_cfg_hdl
1151  * uint_t		bus_fm_flags
1152  * pcie_req_id_t	bus_bdf
1153  * pcie_req_id_t	bus_rp_bdf
1154  * uint32_t		bus_dev_ven_id
1155  * uint8_t		bus_rev_id
1156  * uint8_t		bus_hdr_type
1157  * uint16_t		bus_dev_type
1158  * uint8_t		<bus_bdg_secbus>
1159  * uint16_t		bus_pcie_off
1160  * uint16_t		bus_aer_off
1161  * uint16_t		bus_pcix_off
1162  * uint16_t		bus_ecc_ver
1163  * pci_bus_range_t	<bus_bus_range>
1164  * ppb_ranges_t	*	<bus_addr_ranges>
1165  * int			<bus_addr_entries>
1166  * pci_regspec_t *	<bus_assigned_addr>
1167  * int			<bus_assigned_entries>
1168  * pf_data_t *		<bus_pfd>
1169  * pcie_domain_t *	bus_dom
1170  * int			bus_mps
1171  * uint64_t		bus_cfgacc_base
1172  * void	*		<bus_plat_private>
1173  */
1174 
1175 pcie_bus_t *
1176 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags)
1177 {
1178 	uint16_t	status, base, baseptr, num_cap;
1179 	uint32_t	capid;
1180 	int		range_size;
1181 	pcie_bus_t	*bus_p;
1182 	dev_info_t	*rcdip;
1183 	dev_info_t	*pdip;
1184 	const char	*errstr = NULL;
1185 
1186 	if (!(flags & PCIE_BUS_INITIAL))
1187 		goto initial_done;
1188 
1189 	bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
1190 
1191 	bus_p->bus_dip = dip;
1192 	bus_p->bus_bdf = bdf;
1193 
1194 	rcdip = pcie_get_rc_dip(dip);
1195 	ASSERT(rcdip != NULL);
1196 
1197 	/* Save the Vendor ID, Device ID and revision ID */
1198 	bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID);
1199 	bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID);
1200 	/* Save the Header Type */
1201 	bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER);
1202 	bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M;
1203 
1204 	/*
1205 	 * Figure out the device type and all the relavant capability offsets
1206 	 */
1207 	/* set default value */
1208 	bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
1209 
1210 	status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT);
1211 	if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP))
1212 		goto caps_done; /* capability not supported */
1213 
1214 	/* Relevant conventional capabilities first */
1215 
1216 	/* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */
1217 	num_cap = 2;
1218 
1219 	switch (bus_p->bus_hdr_type) {
1220 	case PCI_HEADER_ZERO:
1221 		baseptr = PCI_CONF_CAP_PTR;
1222 		break;
1223 	case PCI_HEADER_PPB:
1224 		baseptr = PCI_BCNF_CAP_PTR;
1225 		break;
1226 	case PCI_HEADER_CARDBUS:
1227 		baseptr = PCI_CBUS_CAP_PTR;
1228 		break;
1229 	default:
1230 		cmn_err(CE_WARN, "%s: unexpected pci header type:%x",
1231 		    __func__, bus_p->bus_hdr_type);
1232 		goto caps_done;
1233 	}
1234 
1235 	base = baseptr;
1236 	for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap;
1237 	    base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) {
1238 		capid = pci_cfgacc_get8(rcdip, bdf, base);
1239 		switch (capid) {
1240 		case PCI_CAP_ID_PCI_E:
1241 			bus_p->bus_pcie_off = base;
1242 			bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf,
1243 			    base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1244 
1245 			/* Check and save PCIe hotplug capability information */
1246 			if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) &&
1247 			    (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP)
1248 			    & PCIE_PCIECAP_SLOT_IMPL) &&
1249 			    (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP)
1250 			    & PCIE_SLOTCAP_HP_CAPABLE))
1251 				bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE;
1252 
1253 			num_cap--;
1254 			break;
1255 		case PCI_CAP_ID_PCIX:
1256 			bus_p->bus_pcix_off = base;
1257 			if (PCIE_IS_BDG(bus_p))
1258 				bus_p->bus_ecc_ver =
1259 				    pci_cfgacc_get16(rcdip, bdf, base +
1260 				    PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
1261 			else
1262 				bus_p->bus_ecc_ver =
1263 				    pci_cfgacc_get16(rcdip, bdf, base +
1264 				    PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
1265 			num_cap--;
1266 			break;
1267 		default:
1268 			break;
1269 		}
1270 	}
1271 
1272 	/* Check and save PCI hotplug (SHPC) capability information */
1273 	if (PCIE_IS_BDG(bus_p)) {
1274 		base = baseptr;
1275 		for (base = pci_cfgacc_get8(rcdip, bdf, base);
1276 		    base; base = pci_cfgacc_get8(rcdip, bdf,
1277 		    base + PCI_CAP_NEXT_PTR)) {
1278 			capid = pci_cfgacc_get8(rcdip, bdf, base);
1279 			if (capid == PCI_CAP_ID_PCI_HOTPLUG) {
1280 				bus_p->bus_pci_hp_off = base;
1281 				bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE;
1282 				break;
1283 			}
1284 		}
1285 	}
1286 
1287 	/* Then, relevant extended capabilities */
1288 
1289 	if (!PCIE_IS_PCIE(bus_p))
1290 		goto caps_done;
1291 
1292 	/* Extended caps: PCIE_EXT_CAP_ID_AER */
1293 	for (base = PCIE_EXT_CAP; base; base = (capid >>
1294 	    PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) {
1295 		capid = pci_cfgacc_get32(rcdip, bdf, base);
1296 		if (capid == PCI_CAP_EINVAL32)
1297 			break;
1298 		if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK)
1299 		    == PCIE_EXT_CAP_ID_AER) {
1300 			bus_p->bus_aer_off = base;
1301 			break;
1302 		}
1303 	}
1304 
1305 	/*
1306 	 * Save and record speed information about the device.
1307 	 */
1308 
1309 caps_done:
1310 	/* save RP dip and RP bdf */
1311 	if (PCIE_IS_RP(bus_p)) {
1312 		bus_p->bus_rp_dip = dip;
1313 		bus_p->bus_rp_bdf = bus_p->bus_bdf;
1314 	} else {
1315 		for (pdip = ddi_get_parent(dip); pdip;
1316 		    pdip = ddi_get_parent(pdip)) {
1317 			pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip);
1318 
1319 			/*
1320 			 * If RP dip and RP bdf in parent's bus_t have
1321 			 * been initialized, simply use these instead of
1322 			 * continuing up to the RC.
1323 			 */
1324 			if (parent_bus_p->bus_rp_dip != NULL) {
1325 				bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip;
1326 				bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf;
1327 				break;
1328 			}
1329 
1330 			/*
1331 			 * When debugging be aware that some NVIDIA x86
1332 			 * architectures have 2 nodes for each RP, One at Bus
1333 			 * 0x0 and one at Bus 0x80.  The requester is from Bus
1334 			 * 0x80
1335 			 */
1336 			if (PCIE_IS_ROOT(parent_bus_p)) {
1337 				bus_p->bus_rp_dip = pdip;
1338 				bus_p->bus_rp_bdf = parent_bus_p->bus_bdf;
1339 				break;
1340 			}
1341 		}
1342 	}
1343 
1344 	bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
1345 	(void) atomic_swap_uint(&bus_p->bus_fm_flags, 0);
1346 	bus_p->bus_mps = 0;
1347 
1348 	ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p);
1349 
1350 	if (PCIE_IS_HOTPLUG_CAPABLE(dip))
1351 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1352 		    "hotplug-capable");
1353 
1354 initial_done:
1355 	if (!(flags & PCIE_BUS_FINAL))
1356 		goto final_done;
1357 
1358 	/* already initialized? */
1359 	bus_p = PCIE_DIP2BUS(dip);
1360 
1361 	/* Save the Range information if device is a switch/bridge */
1362 	if (PCIE_IS_BDG(bus_p)) {
1363 		/* get "bus_range" property */
1364 		range_size = sizeof (pci_bus_range_t);
1365 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1366 		    "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size)
1367 		    != DDI_PROP_SUCCESS) {
1368 			errstr = "Cannot find \"bus-range\" property";
1369 			cmn_err(CE_WARN,
1370 			    "PCIE init err info failed BDF 0x%x:%s\n",
1371 			    bus_p->bus_bdf, errstr);
1372 		}
1373 
1374 		/* get secondary bus number */
1375 		rcdip = pcie_get_rc_dip(dip);
1376 		ASSERT(rcdip != NULL);
1377 
1378 		bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip,
1379 		    bus_p->bus_bdf, PCI_BCNF_SECBUS);
1380 
1381 		/* Get "ranges" property */
1382 		if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1383 		    "ranges", (caddr_t)&bus_p->bus_addr_ranges,
1384 		    &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS)
1385 			bus_p->bus_addr_entries = 0;
1386 		bus_p->bus_addr_entries /= sizeof (ppb_ranges_t);
1387 	}
1388 
1389 	/* save "assigned-addresses" property array, ignore failues */
1390 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1391 	    "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr,
1392 	    &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS)
1393 		bus_p->bus_assigned_entries /= sizeof (pci_regspec_t);
1394 	else
1395 		bus_p->bus_assigned_entries = 0;
1396 
1397 	pcie_init_pfd(dip);
1398 
1399 	pcie_init_plat(dip);
1400 
1401 	pcie_capture_speeds(bus_p, bdf, rcdip);
1402 
1403 final_done:
1404 
1405 	PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n",
1406 	    ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf,
1407 	    bus_p->bus_bdg_secbus);
1408 #ifdef DEBUG
1409 	pcie_print_bus(bus_p);
1410 #endif
1411 
1412 	return (bus_p);
1413 }
1414 
1415 /*
1416  * Invoked before destroying devinfo node, mostly during hotplug
1417  * operation to free pcie_bus_t data structure
1418  */
1419 /* ARGSUSED */
1420 void
1421 pcie_fini_bus(dev_info_t *dip, uint8_t flags)
1422 {
1423 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
1424 	ASSERT(bus_p);
1425 
1426 	if (flags & PCIE_BUS_INITIAL) {
1427 		pcie_fini_plat(dip);
1428 		pcie_fini_pfd(dip);
1429 
1430 		kmem_free(bus_p->bus_assigned_addr,
1431 		    (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries));
1432 		kmem_free(bus_p->bus_addr_ranges,
1433 		    (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries));
1434 		/* zero out the fields that have been destroyed */
1435 		bus_p->bus_assigned_addr = NULL;
1436 		bus_p->bus_addr_ranges = NULL;
1437 		bus_p->bus_assigned_entries = 0;
1438 		bus_p->bus_addr_entries = 0;
1439 	}
1440 
1441 	if (flags & PCIE_BUS_FINAL) {
1442 		if (PCIE_IS_HOTPLUG_CAPABLE(dip)) {
1443 			(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1444 			    "hotplug-capable");
1445 		}
1446 
1447 		ndi_set_bus_private(dip, B_TRUE, 0, NULL);
1448 		kmem_free(bus_p, sizeof (pcie_bus_t));
1449 	}
1450 }
1451 
1452 int
1453 pcie_postattach_child(dev_info_t *cdip)
1454 {
1455 	pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
1456 
1457 	if (!bus_p)
1458 		return (DDI_FAILURE);
1459 
1460 	return (pcie_enable_ce(cdip));
1461 }
1462 
1463 /*
1464  * PCI-Express child device de-initialization.
1465  * This function disables generic pci-express interrupts and error
1466  * handling.
1467  */
1468 void
1469 pcie_uninitchild(dev_info_t *cdip)
1470 {
1471 	pcie_disable_errors(cdip);
1472 	pcie_fini_cfghdl(cdip);
1473 	pcie_fini_dom(cdip);
1474 }
1475 
1476 /*
1477  * find the root complex dip
1478  */
1479 dev_info_t *
1480 pcie_get_rc_dip(dev_info_t *dip)
1481 {
1482 	dev_info_t *rcdip;
1483 	pcie_bus_t *rc_bus_p;
1484 
1485 	for (rcdip = ddi_get_parent(dip); rcdip;
1486 	    rcdip = ddi_get_parent(rcdip)) {
1487 		rc_bus_p = PCIE_DIP2BUS(rcdip);
1488 		if (rc_bus_p && PCIE_IS_RC(rc_bus_p))
1489 			break;
1490 	}
1491 
1492 	return (rcdip);
1493 }
1494 
1495 boolean_t
1496 pcie_is_pci_device(dev_info_t *dip)
1497 {
1498 	dev_info_t	*pdip;
1499 	char		*device_type;
1500 
1501 	pdip = ddi_get_parent(dip);
1502 	if (pdip == NULL)
1503 		return (B_FALSE);
1504 
1505 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
1506 	    "device_type", &device_type) != DDI_PROP_SUCCESS)
1507 		return (B_FALSE);
1508 
1509 	if (strcmp(device_type, "pciex") != 0 &&
1510 	    strcmp(device_type, "pci") != 0) {
1511 		ddi_prop_free(device_type);
1512 		return (B_FALSE);
1513 	}
1514 
1515 	ddi_prop_free(device_type);
1516 	return (B_TRUE);
1517 }
1518 
1519 typedef struct {
1520 	boolean_t	init;
1521 	uint8_t		flags;
1522 } pcie_bus_arg_t;
1523 
1524 /*ARGSUSED*/
1525 static int
1526 pcie_fab_do_init_fini(dev_info_t *dip, void *arg)
1527 {
1528 	pcie_req_id_t	bdf;
1529 	pcie_bus_arg_t	*bus_arg = (pcie_bus_arg_t *)arg;
1530 
1531 	if (!pcie_is_pci_device(dip))
1532 		goto out;
1533 
1534 	if (bus_arg->init) {
1535 		if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS)
1536 			goto out;
1537 
1538 		(void) pcie_init_bus(dip, bdf, bus_arg->flags);
1539 	} else {
1540 		(void) pcie_fini_bus(dip, bus_arg->flags);
1541 	}
1542 
1543 	return (DDI_WALK_CONTINUE);
1544 
1545 out:
1546 	return (DDI_WALK_PRUNECHILD);
1547 }
1548 
1549 void
1550 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags)
1551 {
1552 	int		circular_count;
1553 	dev_info_t	*dip = ddi_get_child(rcdip);
1554 	pcie_bus_arg_t	arg;
1555 
1556 	arg.init = B_TRUE;
1557 	arg.flags = flags;
1558 
1559 	ndi_devi_enter(rcdip, &circular_count);
1560 	ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1561 	ndi_devi_exit(rcdip, circular_count);
1562 }
1563 
1564 void
1565 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags)
1566 {
1567 	int		circular_count;
1568 	dev_info_t	*dip = ddi_get_child(rcdip);
1569 	pcie_bus_arg_t	arg;
1570 
1571 	arg.init = B_FALSE;
1572 	arg.flags = flags;
1573 
1574 	ndi_devi_enter(rcdip, &circular_count);
1575 	ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1576 	ndi_devi_exit(rcdip, circular_count);
1577 }
1578 
1579 void
1580 pcie_enable_errors(dev_info_t *dip)
1581 {
1582 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1583 	uint16_t	reg16, tmp16;
1584 	uint32_t	reg32, tmp32;
1585 
1586 	ASSERT(bus_p);
1587 
1588 	/*
1589 	 * Clear any pending errors
1590 	 */
1591 	pcie_clear_errors(dip);
1592 
1593 	if (!PCIE_IS_PCIE(bus_p))
1594 		return;
1595 
1596 	/*
1597 	 * Enable Baseline Error Handling but leave CE reporting off (poweron
1598 	 * default).
1599 	 */
1600 	if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) !=
1601 	    PCI_CAP_EINVAL16) {
1602 		tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1603 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1604 		    (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1605 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1606 		    (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN));
1607 
1608 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
1609 		PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
1610 	}
1611 
1612 	/* Enable Root Port Baseline Error Receiving */
1613 	if (PCIE_IS_ROOT(bus_p) &&
1614 	    (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) !=
1615 	    PCI_CAP_EINVAL16) {
1616 
1617 		tmp16 = pcie_serr_disable_flag ?
1618 		    (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) :
1619 		    pcie_root_ctrl_default;
1620 		PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16);
1621 		PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL,
1622 		    reg16);
1623 	}
1624 
1625 	/*
1626 	 * Enable PCI-Express Advanced Error Handling if Exists
1627 	 */
1628 	if (!PCIE_HAS_AER(bus_p))
1629 		return;
1630 
1631 	/* Set Uncorrectable Severity */
1632 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) !=
1633 	    PCI_CAP_EINVAL32) {
1634 		tmp32 = pcie_aer_uce_severity;
1635 
1636 		PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32);
1637 		PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV,
1638 		    reg32);
1639 	}
1640 
1641 	/* Enable Uncorrectable errors */
1642 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) !=
1643 	    PCI_CAP_EINVAL32) {
1644 		tmp32 = pcie_aer_uce_mask;
1645 
1646 		PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32);
1647 		PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK,
1648 		    reg32);
1649 	}
1650 
1651 	/* Enable ECRC generation and checking */
1652 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1653 	    PCI_CAP_EINVAL32) {
1654 		tmp32 = reg32 | pcie_ecrc_value;
1655 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32);
1656 		PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32);
1657 	}
1658 
1659 	/* Enable Secondary Uncorrectable errors if this is a bridge */
1660 	if (!PCIE_IS_PCIE_BDG(bus_p))
1661 		goto root;
1662 
1663 	/* Set Uncorrectable Severity */
1664 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) !=
1665 	    PCI_CAP_EINVAL32) {
1666 		tmp32 = pcie_aer_suce_severity;
1667 
1668 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32);
1669 		PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV,
1670 		    reg32);
1671 	}
1672 
1673 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) !=
1674 	    PCI_CAP_EINVAL32) {
1675 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask);
1676 		PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32,
1677 		    PCIE_AER_SUCE_MASK, reg32);
1678 	}
1679 
1680 root:
1681 	/*
1682 	 * Enable Root Control this is a Root device
1683 	 */
1684 	if (!PCIE_IS_ROOT(bus_p))
1685 		return;
1686 
1687 	if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1688 	    PCI_CAP_EINVAL16) {
1689 		PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD,
1690 		    pcie_root_error_cmd_default);
1691 		PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16,
1692 		    PCIE_AER_RE_CMD, reg16);
1693 	}
1694 }
1695 
1696 /*
1697  * This function is used for enabling CE reporting and setting the AER CE mask.
1698  * When called from outside the pcie module it should always be preceded by
1699  * a call to pcie_enable_errors.
1700  */
1701 int
1702 pcie_enable_ce(dev_info_t *dip)
1703 {
1704 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1705 	uint16_t	device_sts, device_ctl;
1706 	uint32_t	tmp_pcie_aer_ce_mask;
1707 
1708 	if (!PCIE_IS_PCIE(bus_p))
1709 		return (DDI_SUCCESS);
1710 
1711 	/*
1712 	 * The "pcie_ce_mask" property is used to control both the CE reporting
1713 	 * enable field in the device control register and the AER CE mask. We
1714 	 * leave CE reporting disabled if pcie_ce_mask is set to -1.
1715 	 */
1716 
1717 	tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1718 	    DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask);
1719 
1720 	if (tmp_pcie_aer_ce_mask == (uint32_t)-1) {
1721 		/*
1722 		 * Nothing to do since CE reporting has already been disabled.
1723 		 */
1724 		return (DDI_SUCCESS);
1725 	}
1726 
1727 	if (PCIE_HAS_AER(bus_p)) {
1728 		/* Enable AER CE */
1729 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask);
1730 		PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK,
1731 		    0);
1732 
1733 		/* Clear any pending AER CE errors */
1734 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1);
1735 	}
1736 
1737 	/* clear any pending CE errors */
1738 	if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) !=
1739 	    PCI_CAP_EINVAL16)
1740 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS,
1741 		    device_sts & (~PCIE_DEVSTS_CE_DETECTED));
1742 
1743 	/* Enable CE reporting */
1744 	device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1745 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL,
1746 	    (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default);
1747 	PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl);
1748 
1749 	return (DDI_SUCCESS);
1750 }
1751 
1752 /* ARGSUSED */
1753 void
1754 pcie_disable_errors(dev_info_t *dip)
1755 {
1756 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1757 	uint16_t	device_ctl;
1758 	uint32_t	aer_reg;
1759 
1760 	if (!PCIE_IS_PCIE(bus_p))
1761 		return;
1762 
1763 	/*
1764 	 * Disable PCI-Express Baseline Error Handling
1765 	 */
1766 	device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1767 	device_ctl &= ~PCIE_DEVCTL_ERR_MASK;
1768 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl);
1769 
1770 	/*
1771 	 * Disable PCI-Express Advanced Error Handling if Exists
1772 	 */
1773 	if (!PCIE_HAS_AER(bus_p))
1774 		goto root;
1775 
1776 	/* Disable Uncorrectable errors */
1777 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS);
1778 
1779 	/* Disable Correctable errors */
1780 	PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS);
1781 
1782 	/* Disable ECRC generation and checking */
1783 	if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1784 	    PCI_CAP_EINVAL32) {
1785 		aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA |
1786 		    PCIE_AER_CTL_ECRC_CHECK_ENA);
1787 
1788 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg);
1789 	}
1790 	/*
1791 	 * Disable Secondary Uncorrectable errors if this is a bridge
1792 	 */
1793 	if (!PCIE_IS_PCIE_BDG(bus_p))
1794 		goto root;
1795 
1796 	PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS);
1797 
1798 root:
1799 	/*
1800 	 * disable Root Control this is a Root device
1801 	 */
1802 	if (!PCIE_IS_ROOT(bus_p))
1803 		return;
1804 
1805 	if (!pcie_serr_disable_flag) {
1806 		device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL);
1807 		device_ctl &= ~PCIE_ROOT_SYS_ERR;
1808 		PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl);
1809 	}
1810 
1811 	if (!PCIE_HAS_AER(bus_p))
1812 		return;
1813 
1814 	if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1815 	    PCI_CAP_EINVAL16) {
1816 		device_ctl &= ~pcie_root_error_cmd_default;
1817 		PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl);
1818 	}
1819 }
1820 
1821 /*
1822  * Extract bdf from "reg" property.
1823  */
1824 int
1825 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf)
1826 {
1827 	pci_regspec_t	*regspec;
1828 	int		reglen;
1829 
1830 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1831 	    "reg", (int **)&regspec, (uint_t *)&reglen) != DDI_SUCCESS)
1832 		return (DDI_FAILURE);
1833 
1834 	if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) {
1835 		ddi_prop_free(regspec);
1836 		return (DDI_FAILURE);
1837 	}
1838 
1839 	/* Get phys_hi from first element.  All have same bdf. */
1840 	*bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8;
1841 
1842 	ddi_prop_free(regspec);
1843 	return (DDI_SUCCESS);
1844 }
1845 
1846 dev_info_t *
1847 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
1848 {
1849 	dev_info_t *cdip = rdip;
1850 
1851 	for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
1852 		;
1853 
1854 	return (cdip);
1855 }
1856 
1857 uint32_t
1858 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip)
1859 {
1860 	dev_info_t *cdip;
1861 
1862 	/*
1863 	 * As part of the probing, the PCI fcode interpreter may setup a DMA
1864 	 * request if a given card has a fcode on it using dip and rdip of the
1865 	 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this
1866 	 * case, return a invalid value for the bdf since we cannot get to the
1867 	 * bdf value of the actual device which will be initiating this DMA.
1868 	 */
1869 	if (rdip == dip)
1870 		return (PCIE_INVALID_BDF);
1871 
1872 	cdip = pcie_get_my_childs_dip(dip, rdip);
1873 
1874 	/*
1875 	 * For a given rdip, return the bdf value of dip's (px or pcieb)
1876 	 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge.
1877 	 *
1878 	 * XXX - For now, return a invalid bdf value for all PCI and PCI-X
1879 	 * devices since this needs more work.
1880 	 */
1881 	return (PCI_GET_PCIE2PCI_SECBUS(cdip) ?
1882 	    PCIE_INVALID_BDF : PCI_GET_BDF(cdip));
1883 }
1884 
1885 uint32_t
1886 pcie_get_aer_uce_mask()
1887 {
1888 	return (pcie_aer_uce_mask);
1889 }
1890 uint32_t
1891 pcie_get_aer_ce_mask()
1892 {
1893 	return (pcie_aer_ce_mask);
1894 }
1895 uint32_t
1896 pcie_get_aer_suce_mask()
1897 {
1898 	return (pcie_aer_suce_mask);
1899 }
1900 uint32_t
1901 pcie_get_serr_mask()
1902 {
1903 	return (pcie_serr_disable_flag);
1904 }
1905 
1906 void
1907 pcie_set_aer_uce_mask(uint32_t mask)
1908 {
1909 	pcie_aer_uce_mask = mask;
1910 	if (mask & PCIE_AER_UCE_UR)
1911 		pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN;
1912 	else
1913 		pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN;
1914 
1915 	if (mask & PCIE_AER_UCE_ECRC)
1916 		pcie_ecrc_value = 0;
1917 }
1918 
1919 void
1920 pcie_set_aer_ce_mask(uint32_t mask)
1921 {
1922 	pcie_aer_ce_mask = mask;
1923 }
1924 void
1925 pcie_set_aer_suce_mask(uint32_t mask)
1926 {
1927 	pcie_aer_suce_mask = mask;
1928 }
1929 void
1930 pcie_set_serr_mask(uint32_t mask)
1931 {
1932 	pcie_serr_disable_flag = mask;
1933 }
1934 
1935 /*
1936  * Is the rdip a child of dip.	Used for checking certain CTLOPS from bubbling
1937  * up erronously.  Ex.	ISA ctlops to a PCI-PCI Bridge.
1938  */
1939 boolean_t
1940 pcie_is_child(dev_info_t *dip, dev_info_t *rdip)
1941 {
1942 	dev_info_t	*cdip = ddi_get_child(dip);
1943 	for (; cdip; cdip = ddi_get_next_sibling(cdip))
1944 		if (cdip == rdip)
1945 			break;
1946 	return (cdip != NULL);
1947 }
1948 
1949 boolean_t
1950 pcie_is_link_disabled(dev_info_t *dip)
1951 {
1952 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1953 
1954 	if (PCIE_IS_PCIE(bus_p)) {
1955 		if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) &
1956 		    PCIE_LINKCTL_LINK_DISABLE)
1957 			return (B_TRUE);
1958 	}
1959 	return (B_FALSE);
1960 }
1961 
1962 /*
1963  * Initialize the MPS for a root port.
1964  *
1965  * dip - dip of root port device.
1966  */
1967 void
1968 pcie_init_root_port_mps(dev_info_t *dip)
1969 {
1970 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1971 	int rp_cap, max_supported = pcie_max_mps;
1972 
1973 	(void) pcie_get_fabric_mps(ddi_get_parent(dip),
1974 	    ddi_get_child(dip), &max_supported);
1975 
1976 	rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, 0,
1977 	    bus_p->bus_pcie_off, PCIE_DEVCAP) &
1978 	    PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1979 
1980 	if (rp_cap < max_supported)
1981 		max_supported = rp_cap;
1982 
1983 	bus_p->bus_mps = max_supported;
1984 	(void) pcie_initchild_mps(dip);
1985 }
1986 
1987 /*
1988  * Initialize the Maximum Payload Size of a device.
1989  *
1990  * cdip - dip of device.
1991  *
1992  * returns - DDI_SUCCESS or DDI_FAILURE
1993  */
1994 int
1995 pcie_initchild_mps(dev_info_t *cdip)
1996 {
1997 	pcie_bus_t	*bus_p;
1998 	dev_info_t	*pdip = ddi_get_parent(cdip);
1999 	uint8_t		dev_type;
2000 
2001 	bus_p = PCIE_DIP2BUS(cdip);
2002 	if (bus_p == NULL) {
2003 		PCIE_DBG("%s: BUS not found.\n",
2004 		    ddi_driver_name(cdip));
2005 		return (DDI_FAILURE);
2006 	}
2007 
2008 	dev_type = bus_p->bus_dev_type;
2009 
2010 	/*
2011 	 * For ARI Devices, only function zero's MPS needs to be set.
2012 	 */
2013 	if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) &&
2014 	    (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) {
2015 		pcie_req_id_t child_bdf;
2016 
2017 		if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2018 			return (DDI_FAILURE);
2019 		if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0)
2020 			return (DDI_SUCCESS);
2021 	}
2022 
2023 	if (PCIE_IS_PCIE(bus_p)) {
2024 		int suggested_mrrs, fabric_mps;
2025 		uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl;
2026 
2027 		dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
2028 		if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p :
2029 		    PCIE_DIP2BUS(pdip))->bus_mps) < 0) {
2030 			dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
2031 			    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
2032 			    (pcie_devctl_default &
2033 			    (PCIE_DEVCTL_MAX_READ_REQ_MASK |
2034 			    PCIE_DEVCTL_MAX_PAYLOAD_MASK));
2035 
2036 			PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
2037 			return (DDI_SUCCESS);
2038 		}
2039 
2040 		device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
2041 		    PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2042 
2043 		device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >>
2044 		    PCIE_DEVCTL_MAX_READ_REQ_SHIFT;
2045 
2046 		if (device_mps_cap < fabric_mps)
2047 			device_mrrs = device_mps = device_mps_cap;
2048 		else
2049 			device_mps = (uint16_t)fabric_mps;
2050 
2051 		suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
2052 		    cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs);
2053 
2054 		if ((device_mps == fabric_mps) ||
2055 		    (suggested_mrrs < device_mrrs))
2056 			device_mrrs = (uint16_t)suggested_mrrs;
2057 
2058 		/*
2059 		 * Replace MPS and MRRS settings.
2060 		 */
2061 		dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
2062 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK);
2063 
2064 		dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) |
2065 		    device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT);
2066 
2067 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
2068 
2069 		bus_p->bus_mps = device_mps;
2070 	}
2071 
2072 	return (DDI_SUCCESS);
2073 }
2074 
2075 /*
2076  * Scans a device tree/branch for a maximum payload size capabilities.
2077  *
2078  * rc_dip - dip of Root Complex.
2079  * dip - dip of device where scan will begin.
2080  * max_supported (IN) - maximum allowable MPS.
2081  * max_supported (OUT) - maximum payload size capability of fabric.
2082  */
2083 void
2084 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
2085 {
2086 	if (dip == NULL)
2087 		return;
2088 
2089 	/*
2090 	 * Perform a fabric scan to obtain Maximum Payload Capabilities
2091 	 */
2092 	(void) pcie_scan_mps(rc_dip, dip, max_supported);
2093 
2094 	PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported);
2095 }
2096 
2097 /*
2098  * Scans fabric and determines Maximum Payload Size based on
2099  * highest common denominator alogorithm
2100  */
2101 static void
2102 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
2103 {
2104 	int circular_count;
2105 	pcie_max_supported_t max_pay_load_supported;
2106 
2107 	max_pay_load_supported.dip = rc_dip;
2108 	max_pay_load_supported.highest_common_mps = *max_supported;
2109 
2110 	ndi_devi_enter(ddi_get_parent(dip), &circular_count);
2111 	ddi_walk_devs(dip, pcie_get_max_supported,
2112 	    (void *)&max_pay_load_supported);
2113 	ndi_devi_exit(ddi_get_parent(dip), circular_count);
2114 
2115 	*max_supported = max_pay_load_supported.highest_common_mps;
2116 }
2117 
2118 /*
2119  * Called as part of the Maximum Payload Size scan.
2120  */
2121 static int
2122 pcie_get_max_supported(dev_info_t *dip, void *arg)
2123 {
2124 	uint32_t max_supported;
2125 	uint16_t cap_ptr;
2126 	pcie_max_supported_t *current = (pcie_max_supported_t *)arg;
2127 	pci_regspec_t *reg;
2128 	int rlen;
2129 	caddr_t virt;
2130 	ddi_acc_handle_t config_handle;
2131 
2132 	if (ddi_get_child(current->dip) == NULL) {
2133 		goto fail1;
2134 	}
2135 
2136 	if (pcie_dev(dip) == DDI_FAILURE) {
2137 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  "
2138 		    "Not a PCIe dev\n", ddi_driver_name(dip));
2139 		goto fail1;
2140 	}
2141 
2142 	/*
2143 	 * If the suggested-mrrs property exists, then don't include this
2144 	 * device in the MPS capabilities scan.
2145 	 */
2146 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2147 	    "suggested-mrrs") != 0)
2148 		goto fail1;
2149 
2150 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
2151 	    (caddr_t)&reg, &rlen) != DDI_PROP_SUCCESS) {
2152 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  "
2153 		    "Can not read reg\n", ddi_driver_name(dip));
2154 		goto fail1;
2155 	}
2156 
2157 	if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt,
2158 	    &config_handle) != DDI_SUCCESS) {
2159 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  pcie_map_phys "
2160 		    "failed\n", ddi_driver_name(dip));
2161 		goto fail2;
2162 	}
2163 
2164 	if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) ==
2165 	    DDI_FAILURE) {
2166 		goto fail3;
2167 	}
2168 
2169 	max_supported = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2170 	    PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2171 
2172 	PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip),
2173 	    max_supported);
2174 
2175 	if (max_supported < current->highest_common_mps)
2176 		current->highest_common_mps = max_supported;
2177 
2178 fail3:
2179 	pcie_unmap_phys(&config_handle, reg);
2180 fail2:
2181 	kmem_free(reg, rlen);
2182 fail1:
2183 	return (DDI_WALK_CONTINUE);
2184 }
2185 
2186 /*
2187  * Determines if there are any root ports attached to a root complex.
2188  *
2189  * dip - dip of root complex
2190  *
2191  * Returns - DDI_SUCCESS if there is at least one root port otherwise
2192  *	     DDI_FAILURE.
2193  */
2194 int
2195 pcie_root_port(dev_info_t *dip)
2196 {
2197 	int port_type;
2198 	uint16_t cap_ptr;
2199 	ddi_acc_handle_t config_handle;
2200 	dev_info_t *cdip = ddi_get_child(dip);
2201 
2202 	/*
2203 	 * Determine if any of the children of the passed in dip
2204 	 * are root ports.
2205 	 */
2206 	for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
2207 
2208 		if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS)
2209 			continue;
2210 
2211 		if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E,
2212 		    &cap_ptr)) == DDI_FAILURE) {
2213 			pci_config_teardown(&config_handle);
2214 			continue;
2215 		}
2216 
2217 		port_type = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2218 		    PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
2219 
2220 		pci_config_teardown(&config_handle);
2221 
2222 		if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT)
2223 			return (DDI_SUCCESS);
2224 	}
2225 
2226 	/* No root ports were found */
2227 
2228 	return (DDI_FAILURE);
2229 }
2230 
2231 /*
2232  * Function that determines if a device a PCIe device.
2233  *
2234  * dip - dip of device.
2235  *
2236  * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE.
2237  */
2238 int
2239 pcie_dev(dev_info_t *dip)
2240 {
2241 	/* get parent device's device_type property */
2242 	char *device_type;
2243 	int rc = DDI_FAILURE;
2244 	dev_info_t *pdip = ddi_get_parent(dip);
2245 
2246 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
2247 	    DDI_PROP_DONTPASS, "device_type", &device_type)
2248 	    != DDI_PROP_SUCCESS) {
2249 		return (DDI_FAILURE);
2250 	}
2251 
2252 	if (strcmp(device_type, "pciex") == 0)
2253 		rc = DDI_SUCCESS;
2254 	else
2255 		rc = DDI_FAILURE;
2256 
2257 	ddi_prop_free(device_type);
2258 	return (rc);
2259 }
2260 
2261 /*
2262  * Function to map in a device's memory space.
2263  */
2264 static int
2265 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
2266     caddr_t *addrp, ddi_acc_handle_t *handlep)
2267 {
2268 	ddi_map_req_t mr;
2269 	ddi_acc_hdl_t *hp;
2270 	int result;
2271 	ddi_device_acc_attr_t attr;
2272 
2273 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2274 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2275 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2276 	attr.devacc_attr_access = DDI_CAUTIOUS_ACC;
2277 
2278 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
2279 	hp = impl_acc_hdl_get(*handlep);
2280 	hp->ah_vers = VERS_ACCHDL;
2281 	hp->ah_dip = dip;
2282 	hp->ah_rnumber = 0;
2283 	hp->ah_offset = 0;
2284 	hp->ah_len = 0;
2285 	hp->ah_acc = attr;
2286 
2287 	mr.map_op = DDI_MO_MAP_LOCKED;
2288 	mr.map_type = DDI_MT_REGSPEC;
2289 	mr.map_obj.rp = (struct regspec *)phys_spec;
2290 	mr.map_prot = PROT_READ | PROT_WRITE;
2291 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
2292 	mr.map_handlep = hp;
2293 	mr.map_vers = DDI_MAP_VERSION;
2294 
2295 	result = ddi_map(dip, &mr, 0, 0, addrp);
2296 
2297 	if (result != DDI_SUCCESS) {
2298 		impl_acc_hdl_free(*handlep);
2299 		*handlep = (ddi_acc_handle_t)NULL;
2300 	} else {
2301 		hp->ah_addr = *addrp;
2302 	}
2303 
2304 	return (result);
2305 }
2306 
2307 /*
2308  * Map out memory that was mapped in with pcie_map_phys();
2309  */
2310 static void
2311 pcie_unmap_phys(ddi_acc_handle_t *handlep,  pci_regspec_t *ph)
2312 {
2313 	ddi_map_req_t mr;
2314 	ddi_acc_hdl_t *hp;
2315 
2316 	hp = impl_acc_hdl_get(*handlep);
2317 	ASSERT(hp);
2318 
2319 	mr.map_op = DDI_MO_UNMAP;
2320 	mr.map_type = DDI_MT_REGSPEC;
2321 	mr.map_obj.rp = (struct regspec *)ph;
2322 	mr.map_prot = PROT_READ | PROT_WRITE;
2323 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
2324 	mr.map_handlep = hp;
2325 	mr.map_vers = DDI_MAP_VERSION;
2326 
2327 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
2328 	    hp->ah_len, &hp->ah_addr);
2329 
2330 	impl_acc_hdl_free(*handlep);
2331 	*handlep = (ddi_acc_handle_t)NULL;
2332 }
2333 
2334 void
2335 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val)
2336 {
2337 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2338 	bus_p->bus_pfd->pe_rber_fatal = val;
2339 }
2340 
2341 /*
2342  * Return parent Root Port's pe_rber_fatal value.
2343  */
2344 boolean_t
2345 pcie_get_rber_fatal(dev_info_t *dip)
2346 {
2347 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2348 	pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip);
2349 	return (rp_bus_p->bus_pfd->pe_rber_fatal);
2350 }
2351 
2352 int
2353 pcie_ari_supported(dev_info_t *dip)
2354 {
2355 	uint32_t devcap2;
2356 	uint16_t pciecap;
2357 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2358 	uint8_t dev_type;
2359 
2360 	PCIE_DBG("pcie_ari_supported: dip=%p\n", dip);
2361 
2362 	if (bus_p == NULL)
2363 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2364 
2365 	dev_type = bus_p->bus_dev_type;
2366 
2367 	if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) &&
2368 	    (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT))
2369 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2370 
2371 	if (pcie_disable_ari) {
2372 		PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip);
2373 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2374 	}
2375 
2376 	pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
2377 
2378 	if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) {
2379 		PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip);
2380 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2381 	}
2382 
2383 	devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2);
2384 
2385 	PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n",
2386 	    dip, devcap2);
2387 
2388 	if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) {
2389 		PCIE_DBG("pcie_ari_supported: "
2390 		    "dip=%p: ARI Forwarding is supported\n", dip);
2391 		return (PCIE_ARI_FORW_SUPPORTED);
2392 	}
2393 	return (PCIE_ARI_FORW_NOT_SUPPORTED);
2394 }
2395 
2396 int
2397 pcie_ari_enable(dev_info_t *dip)
2398 {
2399 	uint16_t devctl2;
2400 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2401 
2402 	PCIE_DBG("pcie_ari_enable: dip=%p\n", dip);
2403 
2404 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2405 		return (DDI_FAILURE);
2406 
2407 	devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2408 	devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN;
2409 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2410 
2411 	PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n",
2412 	    dip, devctl2);
2413 
2414 	return (DDI_SUCCESS);
2415 }
2416 
2417 int
2418 pcie_ari_disable(dev_info_t *dip)
2419 {
2420 	uint16_t devctl2;
2421 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2422 
2423 	PCIE_DBG("pcie_ari_disable: dip=%p\n", dip);
2424 
2425 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2426 		return (DDI_FAILURE);
2427 
2428 	devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2429 	devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN;
2430 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2431 
2432 	PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n",
2433 	    dip, devctl2);
2434 
2435 	return (DDI_SUCCESS);
2436 }
2437 
2438 int
2439 pcie_ari_is_enabled(dev_info_t *dip)
2440 {
2441 	uint16_t devctl2;
2442 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2443 
2444 	PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip);
2445 
2446 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2447 		return (PCIE_ARI_FORW_DISABLED);
2448 
2449 	devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2);
2450 
2451 	PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n",
2452 	    dip, devctl2);
2453 
2454 	if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) {
2455 		PCIE_DBG("pcie_ari_is_enabled: "
2456 		    "dip=%p: ARI Forwarding is enabled\n", dip);
2457 		return (PCIE_ARI_FORW_ENABLED);
2458 	}
2459 
2460 	return (PCIE_ARI_FORW_DISABLED);
2461 }
2462 
2463 int
2464 pcie_ari_device(dev_info_t *dip)
2465 {
2466 	ddi_acc_handle_t handle;
2467 	uint16_t cap_ptr;
2468 
2469 	PCIE_DBG("pcie_ari_device: dip=%p\n", dip);
2470 
2471 	/*
2472 	 * XXX - This function may be called before the bus_p structure
2473 	 * has been populated.  This code can be changed to remove
2474 	 * pci_config_setup()/pci_config_teardown() when the RFE
2475 	 * to populate the bus_p structures early in boot is putback.
2476 	 */
2477 
2478 	/* First make sure it is a PCIe device */
2479 
2480 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2481 		return (PCIE_NOT_ARI_DEVICE);
2482 
2483 	if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr))
2484 	    != DDI_SUCCESS) {
2485 		pci_config_teardown(&handle);
2486 		return (PCIE_NOT_ARI_DEVICE);
2487 	}
2488 
2489 	/* Locate the ARI Capability */
2490 
2491 	if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI),
2492 	    &cap_ptr)) == DDI_FAILURE) {
2493 		pci_config_teardown(&handle);
2494 		return (PCIE_NOT_ARI_DEVICE);
2495 	}
2496 
2497 	/* ARI Capability was found so it must be a ARI device */
2498 	PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip);
2499 
2500 	pci_config_teardown(&handle);
2501 	return (PCIE_ARI_DEVICE);
2502 }
2503 
2504 int
2505 pcie_ari_get_next_function(dev_info_t *dip, int *func)
2506 {
2507 	uint32_t val;
2508 	uint16_t cap_ptr, next_function;
2509 	ddi_acc_handle_t handle;
2510 
2511 	/*
2512 	 * XXX - This function may be called before the bus_p structure
2513 	 * has been populated.  This code can be changed to remove
2514 	 * pci_config_setup()/pci_config_teardown() when the RFE
2515 	 * to populate the bus_p structures early in boot is putback.
2516 	 */
2517 
2518 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2519 		return (DDI_FAILURE);
2520 
2521 	if ((PCI_CAP_LOCATE(handle,
2522 	    PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) {
2523 		pci_config_teardown(&handle);
2524 		return (DDI_FAILURE);
2525 	}
2526 
2527 	val = PCI_CAP_GET32(handle, 0, cap_ptr, PCIE_ARI_CAP);
2528 
2529 	next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) &
2530 	    PCIE_ARI_CAP_NEXT_FUNC_MASK;
2531 
2532 	pci_config_teardown(&handle);
2533 
2534 	*func = next_function;
2535 
2536 	return (DDI_SUCCESS);
2537 }
2538 
2539 dev_info_t *
2540 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function)
2541 {
2542 	pcie_req_id_t child_bdf;
2543 	dev_info_t *cdip;
2544 
2545 	for (cdip = ddi_get_child(dip); cdip;
2546 	    cdip = ddi_get_next_sibling(cdip)) {
2547 
2548 		if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2549 			return (NULL);
2550 
2551 		if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function)
2552 			return (cdip);
2553 	}
2554 	return (NULL);
2555 }
2556 
2557 #ifdef	DEBUG
2558 
2559 static void
2560 pcie_print_bus(pcie_bus_t *bus_p)
2561 {
2562 	pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip);
2563 	pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags);
2564 
2565 	pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf);
2566 	pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id);
2567 	pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id);
2568 	pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type);
2569 	pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type);
2570 	pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus);
2571 	pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off);
2572 	pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off);
2573 	pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off);
2574 	pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver);
2575 }
2576 
2577 /*
2578  * For debugging purposes set pcie_dbg_print != 0 to see printf messages
2579  * during interrupt.
2580  *
2581  * When a proper solution is in place this code will disappear.
2582  * Potential solutions are:
2583  * o circular buffers
2584  * o taskq to print at lower pil
2585  */
2586 int pcie_dbg_print = 0;
2587 void
2588 pcie_dbg(char *fmt, ...)
2589 {
2590 	va_list ap;
2591 
2592 	if (!pcie_debug_flags) {
2593 		return;
2594 	}
2595 	va_start(ap, fmt);
2596 	if (servicing_interrupt()) {
2597 		if (pcie_dbg_print) {
2598 			prom_vprintf(fmt, ap);
2599 		}
2600 	} else {
2601 		prom_vprintf(fmt, ap);
2602 	}
2603 	va_end(ap);
2604 }
2605 #endif	/* DEBUG */
2606 
2607 #if defined(__i386) || defined(__amd64)
2608 static void
2609 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range,
2610     boolean_t *empty_mem_range)
2611 {
2612 	uint8_t	class, subclass;
2613 	uint_t	val;
2614 
2615 	class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
2616 	subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);
2617 
2618 	if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) {
2619 		val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) &
2620 		    PCI_BCNF_IO_MASK) << 8);
2621 		/*
2622 		 * Assuming that a zero based io_range[0] implies an
2623 		 * invalid I/O range.  Likewise for mem_range[0].
2624 		 */
2625 		if (val == 0)
2626 			*empty_io_range = B_TRUE;
2627 		val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) &
2628 		    PCI_BCNF_MEM_MASK) << 16);
2629 		if (val == 0)
2630 			*empty_mem_range = B_TRUE;
2631 	}
2632 }
2633 
2634 #endif /* defined(__i386) || defined(__amd64) */
2635