xref: /illumos-gate/usr/src/uts/sun4u/io/pci/pcipsy.c (revision d1b674c5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright 2019 Peter Tribble.
27  */
28 
29 /*
30  * Psycho+ specifics implementation:
31  *	interrupt mapping register
32  *	PBM configuration
33  *	ECC and PBM error handling
34  *	Iommu mapping handling
35  *	Streaming Cache flushing
36  */
37 
38 #include <sys/types.h>
39 #include <sys/kmem.h>
40 #include <sys/sysmacros.h>
41 #include <sys/async.h>
42 #include <sys/systm.h>
43 #include <sys/intreg.h>		/* UPAID_TO_IGN() */
44 #include <sys/ivintr.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/machsystm.h>
48 #include <sys/fm/util.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/iommutsb.h>
51 #include <sys/spl.h>
52 #include <sys/fm/util.h>
53 #include <sys/fm/protocol.h>
54 #include <sys/fm/io/pci.h>
55 #include <sys/fm/io/sun4upci.h>
56 #include <sys/pci/pci_obj.h>
57 #include <sys/pci/pcipsy.h>
58 
59 static uint32_t pci_identity_init(pci_t *pci_p);
60 static int pci_intr_setup(pci_t *pci_p);
61 static void pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p);
62 
63 static pci_ksinfo_t	*pci_name_kstat;
64 
65 /*LINTLIBRARY*/
66 /* called by pci_attach() DDI_ATTACH to initialize pci objects */
67 int
pci_obj_setup(pci_t * pci_p)68 pci_obj_setup(pci_t *pci_p)
69 {
70 	pci_common_t *cmn_p;
71 	int ret;
72 
73 	mutex_enter(&pci_global_mutex);
74 	cmn_p = get_pci_common_soft_state(pci_p->pci_id);
75 	if (cmn_p == NULL) {
76 		uint_t id = pci_p->pci_id;
77 		if (alloc_pci_common_soft_state(id) != DDI_SUCCESS) {
78 			mutex_exit(&pci_global_mutex);
79 			return (DDI_FAILURE);
80 		}
81 		cmn_p = get_pci_common_soft_state(id);
82 		cmn_p->pci_common_id = id;
83 	}
84 
85 	ASSERT((pci_p->pci_side == 0) || (pci_p->pci_side == 1));
86 	if (cmn_p->pci_p[pci_p->pci_side]) {
87 		/* second side attach */
88 		pci_p->pci_side = PCI_OTHER_SIDE(pci_p->pci_side);
89 		ASSERT(cmn_p->pci_p[pci_p->pci_side] == NULL);
90 	}
91 
92 	cmn_p->pci_p[pci_p->pci_side] = pci_p;
93 	pci_p->pci_common_p = cmn_p;
94 
95 	if (cmn_p->pci_common_refcnt == 0) {
96 		/* Perform allocation first to avoid delicate unwinding. */
97 		if (pci_alloc_tsb(pci_p) != DDI_SUCCESS) {
98 			cmn_p->pci_p[pci_p->pci_side] = NULL;
99 			pci_p->pci_common_p = NULL;
100 			free_pci_common_soft_state(cmn_p->pci_common_id);
101 			mutex_exit(&pci_global_mutex);
102 			return (DDI_FAILURE);
103 		}
104 		cmn_p->pci_common_tsb_cookie = pci_p->pci_tsb_cookie;
105 		cmn_p->pci_chip_id = pci_identity_init(pci_p);
106 
107 		ib_create(pci_p);
108 		cmn_p->pci_common_ib_p = pci_p->pci_ib_p;
109 
110 		cb_create(pci_p);
111 		cmn_p->pci_common_cb_p = pci_p->pci_cb_p;
112 
113 		iommu_create(pci_p);
114 		cmn_p->pci_common_iommu_p = pci_p->pci_iommu_p;
115 
116 		ecc_create(pci_p);
117 		cmn_p->pci_common_ecc_p = pci_p->pci_ecc_p;
118 	} else {
119 		ASSERT(cmn_p->pci_common_refcnt == 1);
120 
121 		pci_p->pci_tsb_cookie = cmn_p->pci_common_tsb_cookie;
122 		pci_p->pci_ib_p = cmn_p->pci_common_ib_p;
123 		pci_p->pci_cb_p = cmn_p->pci_common_cb_p;
124 		pci_p->pci_iommu_p = cmn_p->pci_common_iommu_p;
125 		pci_p->pci_ecc_p = cmn_p->pci_common_ecc_p;
126 	}
127 
128 	pbm_create(pci_p);
129 	sc_create(pci_p);
130 
131 	pci_fm_create(pci_p);
132 
133 	if ((ret = pci_intr_setup(pci_p)) != DDI_SUCCESS)
134 		goto done;
135 	if (CHIP_TYPE(pci_p) == PCI_CHIP_PSYCHO)
136 		pci_kstat_create(pci_p);
137 
138 	cmn_p->pci_common_attachcnt++;
139 	cmn_p->pci_common_refcnt++;
140 done:
141 	mutex_exit(&pci_global_mutex);
142 	if (ret != DDI_SUCCESS)
143 		cmn_err(CE_NOTE, "Interrupt register failure, returning 0x%x\n",
144 		    ret);
145 	return (ret);
146 }
147 
148 /* called by pci_detach() DDI_DETACH to destroy pci objects */
149 void
pci_obj_destroy(pci_t * pci_p)150 pci_obj_destroy(pci_t *pci_p)
151 {
152 	pci_common_t *cmn_p;
153 
154 	mutex_enter(&pci_global_mutex);
155 
156 	cmn_p = pci_p->pci_common_p;
157 	cmn_p->pci_common_refcnt--;
158 	cmn_p->pci_common_attachcnt--;
159 
160 	pci_kstat_destroy(pci_p);
161 
162 	sc_destroy(pci_p);
163 	pbm_destroy(pci_p);
164 	pci_fm_destroy(pci_p);
165 
166 	if (cmn_p->pci_common_refcnt != 0) {
167 		cmn_p->pci_p[pci_p->pci_side] = NULL;
168 		mutex_exit(&pci_global_mutex);
169 		return;
170 	}
171 
172 	ecc_destroy(pci_p);
173 	iommu_destroy(pci_p);
174 	cb_destroy(pci_p);
175 	ib_destroy(pci_p);
176 
177 	free_pci_common_soft_state(cmn_p->pci_common_id);
178 	pci_intr_teardown(pci_p);
179 	mutex_exit(&pci_global_mutex);
180 }
181 
182 /* called by pci_attach() DDI_RESUME to (re)initialize pci objects */
183 void
pci_obj_resume(pci_t * pci_p)184 pci_obj_resume(pci_t *pci_p)
185 {
186 	pci_common_t *cmn_p = pci_p->pci_common_p;
187 
188 	mutex_enter(&pci_global_mutex);
189 
190 	if (cmn_p->pci_common_attachcnt == 0) {
191 		ib_configure(pci_p->pci_ib_p);
192 		iommu_configure(pci_p->pci_iommu_p);
193 		ecc_configure(pci_p);
194 		ib_resume(pci_p->pci_ib_p);
195 	}
196 
197 	pbm_configure(pci_p->pci_pbm_p);
198 	sc_configure(pci_p->pci_sc_p);
199 
200 	if (cmn_p->pci_common_attachcnt == 0)
201 		cb_resume(pci_p->pci_cb_p);
202 
203 	pbm_resume(pci_p->pci_pbm_p);
204 
205 	cmn_p->pci_common_attachcnt++;
206 	mutex_exit(&pci_global_mutex);
207 }
208 
209 /* called by pci_detach() DDI_SUSPEND to suspend pci objects */
210 void
pci_obj_suspend(pci_t * pci_p)211 pci_obj_suspend(pci_t *pci_p)
212 {
213 	mutex_enter(&pci_global_mutex);
214 
215 	pbm_suspend(pci_p->pci_pbm_p);
216 	if (!--pci_p->pci_common_p->pci_common_attachcnt) {
217 		ib_suspend(pci_p->pci_ib_p);
218 		cb_suspend(pci_p->pci_cb_p);
219 	}
220 
221 	mutex_exit(&pci_global_mutex);
222 }
223 
224 static uint32_t javelin_prom_fix[] = {0xfff800, 0, 0, 0x3f};
225 static int
pci_intr_setup(pci_t * pci_p)226 pci_intr_setup(pci_t *pci_p)
227 {
228 	extern char *platform;
229 	dev_info_t *dip = pci_p->pci_dip;
230 	pbm_t *pbm_p = pci_p->pci_pbm_p;
231 	cb_t *cb_p = pci_p->pci_cb_p;
232 	int i, no_of_intrs;
233 
234 	/*
235 	 * This is a hack to fix a broken imap entry in the javelin PROM.
236 	 * see bugid 4226603
237 	 */
238 	if (strcmp((const char *)&platform, "SUNW,Ultra-250") == 0)
239 		(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
240 		    "interrupt-map-mask", (caddr_t)javelin_prom_fix,
241 		    sizeof (javelin_prom_fix));
242 
243 	/*
244 	 * Get the interrupts property.
245 	 */
246 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
247 	    "interrupts", (caddr_t)&pci_p->pci_inos,
248 	    &pci_p->pci_inos_len) != DDI_SUCCESS)
249 		cmn_err(CE_PANIC, "%s%d: no interrupts property\n",
250 		    ddi_driver_name(dip), ddi_get_instance(dip));
251 
252 	/*
253 	 * figure out number of interrupts in the "interrupts" property
254 	 * and convert them all into ino.
255 	 */
256 	i = ddi_getprop(DDI_DEV_T_ANY, dip, 0, "#interrupt-cells", 1);
257 	i = CELLS_1275_TO_BYTES(i);
258 	no_of_intrs = pci_p->pci_inos_len / i;
259 	for (i = 0; i < no_of_intrs; i++)
260 		pci_p->pci_inos[i] = IB_MONDO_TO_INO(pci_p->pci_inos[i]);
261 
262 	if (pci_p->pci_common_p->pci_common_refcnt == 0) {
263 		cb_p->cb_no_of_inos = no_of_intrs;
264 		if (i = cb_register_intr(pci_p))
265 			goto teardown;
266 		if (i = ecc_register_intr(pci_p))
267 			goto teardown;
268 
269 		intr_dist_add(cb_intr_dist, cb_p);
270 		cb_enable_intr(pci_p);
271 		ecc_enable_intr(pci_p);
272 	}
273 
274 	if (i = pbm_register_intr(pbm_p)) {
275 		if (pci_p->pci_common_p->pci_common_refcnt == 0)
276 			intr_dist_rem(cb_intr_dist, cb_p);
277 		goto teardown;
278 	}
279 	intr_dist_add(pbm_intr_dist, pbm_p);
280 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_PBM]);
281 
282 	if (pci_p->pci_common_p->pci_common_refcnt == 0)
283 		intr_dist_add_weighted(ib_intr_dist_all, pci_p->pci_ib_p);
284 	return (DDI_SUCCESS);
285 teardown:
286 	pci_intr_teardown(pci_p);
287 	return (i);
288 }
289 
290 /*
291  * pci_fix_ranges - fixes the config space entry of the "ranges"
292  *	property on psycho+ platforms
293  */
294 void
pci_fix_ranges(pci_ranges_t * rng_p,int rng_entries)295 pci_fix_ranges(pci_ranges_t *rng_p, int rng_entries)
296 {
297 	int i;
298 	for (i = 0; i < rng_entries; i++, rng_p++)
299 		if ((rng_p->child_high & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG)
300 			rng_p->parent_low |= rng_p->child_high;
301 }
302 
303 /*
304  * map_pci_registers
305  *
306  * This function is called from the attach routine to map the registers
307  * accessed by this driver.
308  *
309  * used by: pci_attach()
310  *
311  * return value: DDI_FAILURE on failure
312  */
313 int
map_pci_registers(pci_t * pci_p,dev_info_t * dip)314 map_pci_registers(pci_t *pci_p, dev_info_t *dip)
315 {
316 	ddi_device_acc_attr_t attr;
317 
318 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
319 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
320 
321 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
322 	if (ddi_regs_map_setup(dip, 0, &pci_p->pci_address[0], 0, 0,
323 	    &attr, &pci_p->pci_ac[0]) != DDI_SUCCESS) {
324 		cmn_err(CE_WARN, "%s%d: unable to map reg entry 0\n",
325 		    ddi_driver_name(dip), ddi_get_instance(dip));
326 		return (DDI_FAILURE);
327 	}
328 	/*
329 	 * if we don't have streaming buffer, then we don't have
330 	 * pci_address[2].
331 	 */
332 	if (pci_stream_buf_exists &&
333 	    ddi_regs_map_setup(dip, 2, &pci_p->pci_address[2], 0, 0,
334 	    &attr, &pci_p->pci_ac[2]) != DDI_SUCCESS) {
335 		cmn_err(CE_WARN, "%s%d: unable to map reg entry 2\n",
336 		    ddi_driver_name(dip), ddi_get_instance(dip));
337 		ddi_regs_map_free(&pci_p->pci_ac[0]);
338 		return (DDI_FAILURE);
339 	}
340 
341 	/*
342 	 * The second register set contains the bridge's configuration
343 	 * header.  This header is at the very beginning of the bridge's
344 	 * configuration space.  This space has litte-endian byte order.
345 	 */
346 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
347 	if (ddi_regs_map_setup(dip, 1, &pci_p->pci_address[1], 0,
348 	    PCI_CONF_HDR_SIZE, &attr, &pci_p->pci_ac[1]) != DDI_SUCCESS) {
349 
350 		cmn_err(CE_WARN, "%s%d: unable to map reg entry 1\n",
351 		    ddi_driver_name(dip), ddi_get_instance(dip));
352 		ddi_regs_map_free(&pci_p->pci_ac[0]);
353 		if (pci_stream_buf_exists)
354 			ddi_regs_map_free(&pci_p->pci_ac[2]);
355 		return (DDI_FAILURE);
356 	}
357 	DEBUG3(DBG_ATTACH, dip, "address (%p,%p,%p)\n",
358 	    pci_p->pci_address[0], pci_p->pci_address[1],
359 	    pci_p->pci_address[2]);
360 
361 	return (DDI_SUCCESS);
362 }
363 
364 /*
365  * unmap_pci_registers:
366  *
367  * This routine unmap the registers mapped by map_pci_registers.
368  *
369  * used by: pci_detach()
370  *
371  * return value: none
372  */
373 void
unmap_pci_registers(pci_t * pci_p)374 unmap_pci_registers(pci_t *pci_p)
375 {
376 	ddi_regs_map_free(&pci_p->pci_ac[0]);
377 	ddi_regs_map_free(&pci_p->pci_ac[1]);
378 	if (pci_stream_buf_exists)
379 		ddi_regs_map_free(&pci_p->pci_ac[2]);
380 }
381 
382 /*
383  * These convenience wrappers relies on map_pci_registers() to setup
384  * pci_address[0-2] correctly at first.
385  */
386 /* The psycho+ reg base is at 1fe.0000.0000 */
387 static uintptr_t
get_reg_base(pci_t * pci_p)388 get_reg_base(pci_t *pci_p)
389 {
390 	return ((uintptr_t)pci_p->pci_address[pci_stream_buf_exists ? 2 : 0]);
391 }
392 
393 /* The psycho+ config reg base is always the 2nd reg entry */
394 static uintptr_t
get_config_reg_base(pci_t * pci_p)395 get_config_reg_base(pci_t *pci_p)
396 {
397 	return ((uintptr_t)(pci_p->pci_address[1]));
398 }
399 
400 uint64_t
ib_get_map_reg(ib_mondo_t mondo,uint32_t cpu_id)401 ib_get_map_reg(ib_mondo_t mondo, uint32_t cpu_id)
402 {
403 	return ((mondo) | (cpu_id << COMMON_INTR_MAP_REG_TID_SHIFT) |
404 	    COMMON_INTR_MAP_REG_VALID);
405 
406 }
407 
408 uint32_t
ib_map_reg_get_cpu(volatile uint64_t reg)409 ib_map_reg_get_cpu(volatile uint64_t reg)
410 {
411 	return ((reg & COMMON_INTR_MAP_REG_TID) >>
412 	    COMMON_INTR_MAP_REG_TID_SHIFT);
413 }
414 
415 uint64_t *
ib_intr_map_reg_addr(ib_t * ib_p,ib_ino_t ino)416 ib_intr_map_reg_addr(ib_t *ib_p, ib_ino_t ino)
417 {
418 	uint64_t *addr;
419 
420 	if (ino & 0x20)
421 		addr = (uint64_t *)(ib_p->ib_obio_intr_map_regs +
422 		    (((uint_t)ino & 0x1f) << 3));
423 	else
424 		addr = (uint64_t *)(ib_p->ib_slot_intr_map_regs +
425 		    (((uint_t)ino & 0x3c) << 1));
426 	return (addr);
427 }
428 
429 uint64_t *
ib_clear_intr_reg_addr(ib_t * ib_p,ib_ino_t ino)430 ib_clear_intr_reg_addr(ib_t *ib_p, ib_ino_t ino)
431 {
432 	uint64_t *addr;
433 
434 	if (ino & 0x20)
435 		addr = (uint64_t *)(ib_p->ib_obio_clear_intr_regs +
436 		    (((uint_t)ino & 0x1f) << 3));
437 	else
438 		addr = (uint64_t *)(ib_p->ib_slot_clear_intr_regs +
439 		    (((uint_t)ino & 0x1f) << 3));
440 	return (addr);
441 }
442 
443 /*
444  * psycho have one mapping register per slot
445  */
446 void
ib_ino_map_reg_share(ib_t * ib_p,ib_ino_t ino,ib_ino_info_t * ino_p)447 ib_ino_map_reg_share(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
448 {
449 	if (!IB_IS_OBIO_INO(ino)) {
450 		ASSERT(ino_p->ino_slot_no < 8);
451 		ib_p->ib_map_reg_counters[ino_p->ino_slot_no]++;
452 	}
453 }
454 
455 /*
456  * return true if the ino shares mapping register with other interrupts
457  * of the same slot, or is still shared by other On-board devices.
458  */
459 int
ib_ino_map_reg_unshare(ib_t * ib_p,ib_ino_t ino,ib_ino_info_t * ino_p)460 ib_ino_map_reg_unshare(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
461 {
462 	ASSERT(IB_IS_OBIO_INO(ino) || ino_p->ino_slot_no < 8);
463 
464 	if (IB_IS_OBIO_INO(ino))
465 		return (ino_p->ino_ipil_size);
466 	else
467 		return (--ib_p->ib_map_reg_counters[ino_p->ino_slot_no]);
468 }
469 
470 /*ARGSUSED*/
471 void
pci_pbm_intr_dist(pbm_t * pbm_p)472 pci_pbm_intr_dist(pbm_t *pbm_p)
473 {
474 }
475 
476 uintptr_t
pci_ib_setup(ib_t * ib_p)477 pci_ib_setup(ib_t *ib_p)
478 {
479 	pci_t *pci_p = ib_p->ib_pci_p;
480 	uintptr_t a = get_reg_base(pci_p);
481 
482 	ib_p->ib_ign = PCI_ID_TO_IGN(pci_p->pci_id);
483 	ib_p->ib_max_ino = PSYCHO_MAX_INO;
484 	ib_p->ib_slot_intr_map_regs = a + PSYCHO_IB_SLOT_INTR_MAP_REG_OFFSET;
485 	ib_p->ib_obio_intr_map_regs = a + PSYCHO_IB_OBIO_INTR_MAP_REG_OFFSET;
486 	ib_p->ib_obio_clear_intr_regs =
487 	    a + PSYCHO_IB_OBIO_CLEAR_INTR_REG_OFFSET;
488 	return (a);
489 }
490 
491 uint32_t
pci_xlate_intr(dev_info_t * dip,dev_info_t * rdip,ib_t * ib_p,uint32_t intr)492 pci_xlate_intr(dev_info_t *dip, dev_info_t *rdip, ib_t *ib_p, uint32_t intr)
493 {
494 	int32_t len;
495 	dev_info_t *cdip;
496 	pci_regspec_t *pci_rp;
497 	uint32_t bus, dev, phys_hi;
498 
499 	if ((intr > PCI_INTD) || (intr < PCI_INTA))
500 		goto done;
501 	if (ddi_prop_exists(DDI_DEV_T_ANY, rdip, 0, "interrupt-map"))
502 		goto done;
503 	/*
504 	 * Hack for pre 1275 imap machines e.g. quark & tazmo
505 	 * We need to turn any PCI interrupts into ino interrupts.  machines
506 	 * supporting imap will have this done in the map.
507 	 */
508 	cdip = get_my_childs_dip(dip, rdip);
509 	if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, "reg",
510 	    (caddr_t)&pci_rp, &len) != DDI_SUCCESS)
511 		return (0);
512 	phys_hi = pci_rp->pci_phys_hi;
513 	kmem_free(pci_rp, len);
514 
515 	bus = PCI_REG_BUS_G(phys_hi);
516 	dev = PCI_REG_DEV_G(phys_hi);
517 
518 	/*
519 	 * The ino for a given device id is derived as 0BSSNN where
520 	 *
521 	 *	B = 0 for bus A, 1 for bus B
522 	 *	SS = dev - 1 for bus A, dev - 2 for bus B
523 	 *	NN = 00 for INTA#, 01 for INTB#, 10 for INTC#, 11 for INTD#
524 	 *
525 	 * if pci bus number > 0x80, then devices are located on the A side(66)
526 	 */
527 	DEBUG3(DBG_IB, dip, "pci_xlate_intr: bus=%x, dev=%x, intr=%x\n",
528 	    bus, dev, intr);
529 	intr--;
530 	intr |= (bus & 0x80) ? ((dev - 1) << 2) : (0x10 | ((dev - 2) << 2));
531 
532 	DEBUG1(DBG_IB, dip, "pci_xlate_intr: done ino=%x\n", intr);
533 done:
534 	return (IB_INO_TO_MONDO(ib_p, intr));
535 }
536 
537 /*
538  * Return the cpuid to to be used for an ino. Psycho has special slot-cpu
539  * constraints on cpu assignment:
540  *
541  * On multi-function pci cards, functions have separate devinfo nodes and
542  * interrupts. Some pci support hardware, such as the psycho/pcipsy chip,
543  * control interrupt-to-cpu binding on a per pci-slot basis instead of per
544  * function.  For hardware like this, if an interrupt for one function has
545  * already been directed to a particular cpu, we can't choose a different
546  * cpu for another function implemented in the same pci-slot - if we did
547  * we would be redirecting the first function too (which causes problems
548  * for consistent interrupt distribution).
549  *
550  * This function determines if there is already an established slot-oriented
551  * interrupt-to-cpu binding established, if there is then it returns that
552  * cpu.  Otherwise a new cpu is selected by intr_dist_cpuid().
553  *
554  * The devinfo node we are trying to associate a cpu with is
555  * ino_p->ino_ipil_p->ipil_ih_head->ih_dip.
556  */
557 uint32_t
pci_intr_dist_cpuid(ib_t * ib_p,ib_ino_info_t * ino_p)558 pci_intr_dist_cpuid(ib_t *ib_p, ib_ino_info_t *ino_p)
559 {
560 	dev_info_t	*rdip = ino_p->ino_ipil_p->ipil_ih_head->ih_dip;
561 	dev_info_t	*prdip = ddi_get_parent(rdip);
562 	ib_ino_info_t	*sino_p;
563 	dev_info_t	*sdip;
564 	dev_info_t	*psdip;
565 	char		*buf1 = NULL, *buf2 = NULL;
566 	char		*s1, *s2, *s3;
567 	int		l2;
568 	int		cpu_id;
569 
570 	/* must be psycho driver parent (not ebus) */
571 	if (strcmp(ddi_driver_name(prdip), "pcipsy") != 0)
572 		goto newcpu;
573 
574 	/*
575 	 * From PCI 1275 binding: 2.2.1.3 Unit Address representation:
576 	 *   Since the "unit-number" is the address that appears in on Open
577 	 *   Firmware 'device path', it follows that only the DD and DD,FF
578 	 *   forms of the text representation can appear in a 'device path'.
579 	 *
580 	 * The rdip unit address is of the form "DD[,FF]".  Define two
581 	 * unit address strings that represent same-slot use: "DD" and "DD,".
582 	 * The first compare uses strcmp, the second uses strncmp.
583 	 */
584 	s1 = ddi_get_name_addr(rdip);
585 	if (s1 == NULL)
586 		goto newcpu;
587 
588 	buf1 = kmem_alloc(MAXNAMELEN, KM_SLEEP);	/* strcmp */
589 	buf2 = kmem_alloc(MAXNAMELEN, KM_SLEEP);	/* strncmp */
590 	s1 = strcpy(buf1, s1);
591 	s2 = strcpy(buf2, s1);
592 
593 	s1 = strrchr(s1, ',');
594 	if (s1) {
595 		*s1 = '\0';			/* have "DD,FF" */
596 		s1 = buf1;			/* search via strcmp "DD" */
597 
598 		s2 = strrchr(s2, ',');
599 		*(s2 + 1) = '\0';
600 		s2 = buf2;
601 		l2 = strlen(s2);		/* search via strncmp "DD," */
602 	} else {
603 		(void) strcat(s2, ",");		/* have "DD" */
604 		l2 = strlen(s2);		/* search via strncmp "DD," */
605 	}
606 
607 	/*
608 	 * Search the established ino list for devinfo nodes bound
609 	 * to an ino that matches one of the slot use strings.
610 	 */
611 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
612 	for (sino_p = ib_p->ib_ino_lst; sino_p; sino_p = sino_p->ino_next_p) {
613 		/* skip self and non-established */
614 		if ((sino_p == ino_p) || (sino_p->ino_established == 0))
615 			continue;
616 
617 		/* skip non-siblings */
618 		sdip = sino_p->ino_ipil_p->ipil_ih_head->ih_dip;
619 		psdip = ddi_get_parent(sdip);
620 		if (psdip != prdip)
621 			continue;
622 
623 		/* must be psycho driver parent (not ebus) */
624 		if (strcmp(ddi_driver_name(psdip), "pcipsy") != 0)
625 			continue;
626 
627 		s3 = ddi_get_name_addr(sdip);
628 		if ((s1 && (strcmp(s1, s3) == 0)) ||
629 		    (strncmp(s2, s3, l2) == 0)) {
630 			extern int intr_dist_debug;
631 
632 			if (intr_dist_debug)
633 				cmn_err(CE_CONT, "intr_dist: "
634 				    "pcipsy`pci_intr_dist_cpuid "
635 				    "%s#%d %s: cpu %d established "
636 				    "by %s#%d %s\n", ddi_driver_name(rdip),
637 				    ddi_get_instance(rdip),
638 				    ddi_deviname(rdip, buf1), sino_p->ino_cpuid,
639 				    ddi_driver_name(sdip),
640 				    ddi_get_instance(sdip),
641 				    ddi_deviname(sdip, buf2));
642 			break;
643 		}
644 	}
645 
646 	/* If a slot use match is found then use established cpu */
647 	if (sino_p) {
648 		cpu_id = sino_p->ino_cpuid;	/* target established cpu */
649 		goto out;
650 	}
651 
652 newcpu:	cpu_id = intr_dist_cpuid();		/* target new cpu */
653 
654 out:	if (buf1)
655 		kmem_free(buf1, MAXNAMELEN);
656 	if (buf2)
657 		kmem_free(buf2, MAXNAMELEN);
658 	return (cpu_id);
659 }
660 
661 
662 /*ARGSUSED*/
663 static void
cb_thermal_timeout(void * arg)664 cb_thermal_timeout(void *arg)
665 {
666 	do_shutdown();
667 
668 	/*
669 	 * In case do_shutdown() fails to halt the system.
670 	 */
671 	(void) timeout((void(*)(void *))power_down, NULL,
672 	    thermal_powerdown_delay * hz);
673 }
674 
675 /*
676  * High-level handler for psycho's CBNINTR_THERMAL interrupt.
677  *
678  * Use timeout(9f) to implement the core functionality so that the
679  * timeout(9f) function can sleep, if needed.
680  */
681 /*ARGSUSED*/
682 uint_t
cb_thermal_intr(caddr_t a)683 cb_thermal_intr(caddr_t a)
684 {
685 	cmn_err(CE_WARN, "pci: Thermal warning detected!\n");
686 	if (pci_thermal_intr_fatal) {
687 		(void) timeout(cb_thermal_timeout, NULL, 0);
688 	}
689 	return (DDI_INTR_CLAIMED);
690 }
691 
692 void
pci_cb_teardown(pci_t * pci_p)693 pci_cb_teardown(pci_t *pci_p)
694 {
695 	cb_t	*cb_p = pci_p->pci_cb_p;
696 	uint32_t mondo;
697 
698 	if (pci_p->pci_thermal_interrupt != -1) {
699 		mondo = ((pci_p->pci_cb_p->cb_ign  << PCI_INO_BITS) |
700 		    pci_p->pci_inos[CBNINTR_THERMAL]);
701 		mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
702 
703 		cb_disable_nintr(cb_p, CBNINTR_THERMAL, IB_INTR_WAIT);
704 		VERIFY(rem_ivintr(mondo, pci_pil[CBNINTR_THERMAL]) == 0);
705 	}
706 }
707 
708 int
cb_register_intr(pci_t * pci_p)709 cb_register_intr(pci_t *pci_p)
710 {
711 	uint32_t mondo;
712 
713 	if (pci_p->pci_thermal_interrupt == -1)
714 		return (DDI_SUCCESS);
715 
716 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
717 	    pci_p->pci_inos[CBNINTR_THERMAL]);
718 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
719 
720 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_THERMAL],
721 	    (intrfunc)cb_thermal_intr, (caddr_t)pci_p->pci_cb_p,
722 	    NULL, NULL) == 0);
723 
724 	return (PCI_ATTACH_RETCODE(PCI_CB_OBJ, PCI_OBJ_INTR_ADD, DDI_SUCCESS));
725 }
726 
727 void
cb_enable_intr(pci_t * pci_p)728 cb_enable_intr(pci_t *pci_p)
729 {
730 	if (pci_p->pci_thermal_interrupt != -1)
731 		cb_enable_nintr(pci_p, CBNINTR_THERMAL);
732 }
733 
734 uint64_t
cb_ino_to_map_pa(cb_t * cb_p,ib_ino_t ino)735 cb_ino_to_map_pa(cb_t *cb_p, ib_ino_t ino)
736 {
737 	return (cb_p->cb_map_pa + ((ino & 0x1f) << 3));
738 }
739 
740 uint64_t
cb_ino_to_clr_pa(cb_t * cb_p,ib_ino_t ino)741 cb_ino_to_clr_pa(cb_t *cb_p, ib_ino_t ino)
742 {
743 	return (cb_p->cb_clr_pa + ((ino & 0x1f) << 3));
744 }
745 
746 /*
747  * allow removal of exported/shared thermal interrupt
748  */
749 int
cb_remove_xintr(pci_t * pci_p,dev_info_t * dip,dev_info_t * rdip,ib_ino_t ino,ib_mondo_t mondo)750 cb_remove_xintr(pci_t *pci_p, dev_info_t *dip, dev_info_t *rdip,
751     ib_ino_t ino, ib_mondo_t mondo)
752 {
753 	if (ino != pci_p->pci_inos[CBNINTR_THERMAL])
754 		return (DDI_FAILURE);
755 
756 	cb_disable_nintr(pci_p->pci_cb_p, CBNINTR_THERMAL, IB_INTR_WAIT);
757 	VERIFY(rem_ivintr(mondo, pci_pil[CBNINTR_THERMAL]) == 0);
758 
759 	DEBUG1(DBG_R_INTX, dip, "remove xintr %x\n", ino);
760 	return (DDI_SUCCESS);
761 }
762 
763 int
pci_ecc_add_intr(pci_t * pci_p,int inum,ecc_intr_info_t * eii_p)764 pci_ecc_add_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
765 {
766 	uint32_t mondo;
767 
768 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
769 	    pci_p->pci_inos[inum]);
770 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
771 
772 	VERIFY(add_ivintr(mondo, pci_pil[inum], (intrfunc)ecc_intr,
773 	    (caddr_t)eii_p, NULL, NULL) == 0);
774 
775 	return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD, DDI_SUCCESS));
776 }
777 
778 void
pci_ecc_rem_intr(pci_t * pci_p,int inum,ecc_intr_info_t * eii_p)779 pci_ecc_rem_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
780 {
781 	uint32_t mondo;
782 
783 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
784 	    pci_p->pci_inos[inum]);
785 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
786 
787 	VERIFY(rem_ivintr(mondo, pci_pil[inum]) == 0);
788 }
789 
790 static int pbm_has_pass_1_cheerio(pci_t *pci_p);
791 
792 void
pbm_configure(pbm_t * pbm_p)793 pbm_configure(pbm_t *pbm_p)
794 {
795 	pci_t *pci_p = pbm_p->pbm_pci_p;
796 	cb_t *cb_p = pci_p->pci_cb_p;
797 	dev_info_t *dip = pci_p->pci_dip;
798 	int instance = ddi_get_instance(dip);
799 	uint32_t mask = 1 << instance;
800 	uint64_t l;
801 	uint16_t s = 0;
802 
803 	/*
804 	 * Workarounds for hardware bugs:
805 	 *
806 	 * bus parking
807 	 *
808 	 *	Pass 2 psycho parts have a bug that requires bus
809 	 *	parking to be disabled.
810 	 *
811 	 *	Pass 1 cheerio parts have a bug which prevents them
812 	 *	from working on a PBM with bus parking enabled.
813 	 *
814 	 * rerun disable
815 	 *
816 	 *	Pass 1 and 2 psycho's require that the rerun's be
817 	 *	enabled.
818 	 *
819 	 * retry limit
820 	 *
821 	 *	For pass 1 and pass 2 psycho parts we disable the
822 	 *	retry limit.  This is because the limit of 16 seems
823 	 *	too restrictive for devices that are children of pci
824 	 *	to pci bridges.  For pass 3 this limit will be 64.
825 	 *
826 	 * DMA write/PIO read sync
827 	 *
828 	 *	For pass 2 psycho, the disable this feature.
829 	 */
830 	l = lddphysio(cb_p->cb_base_pa + PSYCHO_CB_CONTROL_STATUS_REG_OFFSET);
831 	l &= PSYCHO_CB_CONTROL_STATUS_VER;
832 	l >>= PSYCHO_CB_CONTROL_STATUS_VER_SHIFT;
833 
834 	DEBUG2(DBG_ATTACH, dip, "cb_create: ver=%d, mask=%x\n", l, mask);
835 	pci_rerun_disable = (uint32_t)-1;
836 
837 	switch (l) {
838 	case 0:
839 		DEBUG0(DBG_ATTACH, dip, "cb_create: psycho pass 1\n");
840 		if (!pci_disable_pass1_workarounds) {
841 			if (pbm_has_pass_1_cheerio(pci_p))
842 				pci_bus_parking_enable &= ~mask;
843 			pci_rerun_disable &= ~mask;
844 			pci_retry_disable |= mask;
845 		}
846 		break;
847 	case 1:
848 		if (!pci_disable_pass2_workarounds) {
849 			pci_bus_parking_enable &= ~mask;
850 			pci_rerun_disable &= ~mask;
851 			pci_retry_disable |= mask;
852 			pci_dwsync_disable |= mask;
853 		}
854 		break;
855 	case 2:
856 		if (!pci_disable_pass3_workarounds) {
857 			pci_dwsync_disable |= mask;
858 			if (pbm_has_pass_1_cheerio(pci_p))
859 				pci_bus_parking_enable &= ~mask;
860 		}
861 		break;
862 	case 3:
863 		if (!pci_disable_plus_workarounds) {
864 			pci_dwsync_disable |= mask;
865 			if (pbm_has_pass_1_cheerio(pci_p))
866 				pci_bus_parking_enable &= ~mask;
867 		}
868 		break;
869 	default:
870 		if (!pci_disable_default_workarounds) {
871 			pci_dwsync_disable |= mask;
872 			if (pbm_has_pass_1_cheerio(pci_p))
873 				pci_bus_parking_enable &= ~mask;
874 		}
875 		break;
876 	}
877 
878 	/*
879 	 * Clear any PBM errors.
880 	 */
881 	l = (PSYCHO_PCI_AFSR_E_MASK << PSYCHO_PCI_AFSR_PE_SHIFT) |
882 	    (PSYCHO_PCI_AFSR_E_MASK << PSYCHO_PCI_AFSR_SE_SHIFT);
883 	*pbm_p->pbm_async_flt_status_reg = l;
884 
885 	/*
886 	 * Clear error bits in configuration status register.
887 	 */
888 	s = PCI_STAT_PERROR | PCI_STAT_S_PERROR |
889 	    PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB |
890 	    PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR;
891 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf status reg=%x\n", s);
892 	pbm_p->pbm_config_header->ch_status_reg = s;
893 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf status reg==%x\n",
894 	    pbm_p->pbm_config_header->ch_status_reg);
895 
896 	l = *pbm_p->pbm_ctrl_reg;	/* save control register state */
897 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg==%llx\n", l);
898 
899 	/*
900 	 * See if any SERR# signals are asserted.  We'll clear them later.
901 	 */
902 	if (l & COMMON_PCI_CTRL_SERR)
903 		cmn_err(CE_WARN, "%s%d: SERR asserted on pci bus\n",
904 		    ddi_driver_name(dip), instance);
905 
906 	/*
907 	 * Determine if PCI bus is running at 33 or 66 mhz.
908 	 */
909 	if (l & COMMON_PCI_CTRL_SPEED)
910 		pbm_p->pbm_speed = PBM_SPEED_66MHZ;
911 	else
912 		pbm_p->pbm_speed = PBM_SPEED_33MHZ;
913 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: %d mhz\n",
914 	    pbm_p->pbm_speed  == PBM_SPEED_66MHZ ? 66 : 33);
915 
916 	/*
917 	 * Enable error interrupts.
918 	 */
919 	if (pci_error_intr_enable & mask)
920 		l |= PSYCHO_PCI_CTRL_ERR_INT_EN;
921 	else
922 		l &= ~PSYCHO_PCI_CTRL_ERR_INT_EN;
923 
924 	/*
925 	 * Disable pci streaming byte errors and error interrupts.
926 	 */
927 	pci_sbh_error_intr_enable &= ~mask;
928 	l &= ~PSYCHO_PCI_CTRL_SBH_INT_EN;
929 
930 	/*
931 	 * Enable/disable bus parking.
932 	 */
933 	if ((pci_bus_parking_enable & mask) &&
934 	    !ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
935 	    "no-bus-parking"))
936 		l |= PSYCHO_PCI_CTRL_ARB_PARK;
937 	else
938 		l &= ~PSYCHO_PCI_CTRL_ARB_PARK;
939 
940 	/*
941 	 * Enable arbitration.
942 	 */
943 	if (pci_p->pci_side == B)
944 		l = (l & ~PSYCHO_PCI_CTRL_ARB_EN_MASK) | pci_b_arb_enable;
945 	else
946 		l = (l & ~PSYCHO_PCI_CTRL_ARB_EN_MASK) | pci_a_arb_enable;
947 
948 	/*
949 	 * Make sure SERR is clear
950 	 */
951 	l |= COMMON_PCI_CTRL_SERR;
952 
953 	/*
954 	 * Make sure power management interrupt is disabled.
955 	 */
956 	l &= ~PSYCHO_PCI_CTRL_WAKEUP_EN;
957 
958 	/*
959 	 * Now finally write the control register with the appropriate value.
960 	 */
961 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
962 	*pbm_p->pbm_ctrl_reg = l;
963 
964 	/*
965 	 * Allow the diag register to be set based upon variable that
966 	 * can be configured via /etc/system.
967 	 */
968 	l = *pbm_p->pbm_diag_reg;
969 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg==%llx\n", l);
970 	if (pci_retry_disable & mask)
971 		l |= COMMON_PCI_DIAG_DIS_RETRY;
972 	if (pci_retry_enable & mask)
973 		l &= ~COMMON_PCI_DIAG_DIS_RETRY;
974 	if (pci_intsync_disable & mask)
975 		l |= COMMON_PCI_DIAG_DIS_INTSYNC;
976 	else
977 		l &= ~COMMON_PCI_DIAG_DIS_INTSYNC;
978 	if (pci_dwsync_disable & mask)
979 		l |= PSYCHO_PCI_DIAG_DIS_DWSYNC;
980 	else
981 		l &= ~PSYCHO_PCI_DIAG_DIS_DWSYNC;
982 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
983 	*pbm_p->pbm_diag_reg = l;
984 
985 	/*
986 	 * Enable SERR# and parity reporting via command register.
987 	 */
988 	s = pci_perr_enable & mask ? PCI_COMM_PARITY_DETECT : 0;
989 	s |= pci_serr_enable & mask ? PCI_COMM_SERR_ENABLE : 0;
990 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf command reg=%x\n", s);
991 	pbm_p->pbm_config_header->ch_command_reg = s;
992 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf command reg==%x\n",
993 	    pbm_p->pbm_config_header->ch_command_reg);
994 
995 	/*
996 	 * The current versions of the obp are suppose to set the latency
997 	 * timer register but do not.  Bug 1234181 is open against this
998 	 * problem.  Until this bug is fixed we check to see if the obp
999 	 * has attempted to set the latency timer register by checking
1000 	 * for the existence of a "latency-timer" property.
1001 	 */
1002 	if (pci_set_latency_timer_register) {
1003 		DEBUG1(DBG_ATTACH, dip,
1004 		    "pbm_configure: set psycho latency timer to %x\n",
1005 		    pci_latency_timer);
1006 		pbm_p->pbm_config_header->ch_latency_timer_reg =
1007 		    pci_latency_timer;
1008 	}
1009 
1010 	(void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer",
1011 	    (int)pbm_p->pbm_config_header->ch_latency_timer_reg);
1012 }
1013 
1014 uint_t
pbm_disable_pci_errors(pbm_t * pbm_p)1015 pbm_disable_pci_errors(pbm_t *pbm_p)
1016 {
1017 	pci_t *pci_p = pbm_p->pbm_pci_p;
1018 	ib_t *ib_p = pci_p->pci_ib_p;
1019 
1020 	/*
1021 	 * Disable error and streaming byte hole interrupts via the
1022 	 * PBM control register.
1023 	 */
1024 	*pbm_p->pbm_ctrl_reg &=
1025 	    ~(PSYCHO_PCI_CTRL_ERR_INT_EN | PSYCHO_PCI_CTRL_SBH_INT_EN);
1026 
1027 	/*
1028 	 * Disable error interrupts via the interrupt mapping register.
1029 	 */
1030 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_PBM], IB_INTR_NOWAIT);
1031 	return (BF_NONE);
1032 }
1033 
1034 /*ARGSUSED*/
1035 uint64_t
pci_sc_configure(pci_t * pci_p)1036 pci_sc_configure(pci_t *pci_p)
1037 {
1038 	return (0);
1039 }
1040 
1041 /*ARGSUSED*/
1042 void
pci_pbm_dma_sync(pbm_t * pbm_p,ib_ino_t ino)1043 pci_pbm_dma_sync(pbm_t *pbm_p, ib_ino_t ino)
1044 {
1045 	uint64_t pa = pbm_p->pbm_sync_reg_pa;
1046 	if (pa)
1047 		(void) lddphysio(pa);		/* Load from Sync Register */
1048 }
1049 
1050 /*ARGSUSED*/
1051 dvma_context_t
pci_iommu_get_dvma_context(iommu_t * iommu_p,dvma_addr_t dvma_pg_index)1052 pci_iommu_get_dvma_context(iommu_t *iommu_p, dvma_addr_t dvma_pg_index)
1053 {
1054 	ASSERT(0);
1055 	return (0);
1056 }
1057 
1058 /*ARGSUSED*/
1059 void
pci_iommu_free_dvma_context(iommu_t * iommu_p,dvma_context_t ctx)1060 pci_iommu_free_dvma_context(iommu_t *iommu_p, dvma_context_t ctx)
1061 {
1062 	ASSERT(0);
1063 }
1064 
1065 void
pci_iommu_config(iommu_t * iommu_p,uint64_t iommu_ctl,uint64_t cfgpa)1066 pci_iommu_config(iommu_t *iommu_p, uint64_t iommu_ctl, uint64_t cfgpa)
1067 {
1068 	volatile uint64_t *pbm_csr_p = (volatile uint64_t *)
1069 	    get_pbm_reg_base(iommu_p->iommu_pci_p);
1070 	volatile uint64_t pbm_ctl = *pbm_csr_p;
1071 
1072 	volatile uint64_t *iommu_ctl_p = iommu_p->iommu_ctrl_reg;
1073 	volatile uint64_t tsb_bar_val = iommu_p->iommu_tsb_paddr;
1074 	volatile uint64_t *tsb_bar_p = iommu_p->iommu_tsb_base_addr_reg;
1075 
1076 	DEBUG2(DBG_ATTACH, iommu_p->iommu_pci_p->pci_dip,
1077 	    "\npci_iommu_config: pbm_csr_p=%016llx pbm_ctl=%016llx",
1078 	    pbm_csr_p, pbm_ctl);
1079 	DEBUG2(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
1080 	    "\n\tiommu_ctl_p=%016llx iommu_ctl=%016llx",
1081 	    iommu_ctl_p, iommu_ctl);
1082 	DEBUG2(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
1083 	    "\n\tcfgpa=%016llx tsb_bar_val=%016llx", cfgpa, tsb_bar_val);
1084 
1085 	if (!cfgpa)
1086 		goto reprog;
1087 
1088 	/* disable PBM arbiters - turn off bits 0-7 */
1089 	*pbm_csr_p = (pbm_ctl >> 8) << 8;
1090 
1091 	/* make sure we own the bus by reading any child device config space */
1092 	(void) ldphysio(cfgpa); /* also flushes the prev write */
1093 reprog:
1094 	*tsb_bar_p = tsb_bar_val;
1095 	*iommu_ctl_p = iommu_ctl;
1096 
1097 	*pbm_csr_p = pbm_ctl;	/* re-enable bus arbitration */
1098 	pbm_ctl = *pbm_csr_p;	/* flush all prev writes */
1099 }
1100 
1101 int
pci_sc_ctx_inv(dev_info_t * dip,sc_t * sc_p,ddi_dma_impl_t * mp)1102 pci_sc_ctx_inv(dev_info_t *dip, sc_t *sc_p, ddi_dma_impl_t *mp)
1103 {
1104 	ASSERT(0);
1105 	return (DDI_FAILURE);
1106 }
1107 
1108 void
pci_cb_setup(pci_t * pci_p)1109 pci_cb_setup(pci_t *pci_p)
1110 {
1111 	uint64_t csr, csr_pa, pa;
1112 	cb_t *cb_p = pci_p->pci_cb_p;
1113 
1114 	/* cb_p->cb_node_id = 0; */
1115 	cb_p->cb_ign = PCI_ID_TO_IGN(pci_p->pci_id);
1116 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[0]);
1117 	cb_p->cb_base_pa  = pa = pa >> (32 - MMU_PAGESHIFT) << 32;
1118 	cb_p->cb_map_pa = pa + PSYCHO_IB_OBIO_INTR_MAP_REG_OFFSET;
1119 	cb_p->cb_clr_pa = pa + PSYCHO_IB_OBIO_CLEAR_INTR_REG_OFFSET;
1120 	cb_p->cb_obsta_pa = pa + COMMON_IB_OBIO_INTR_STATE_DIAG_REG;
1121 
1122 	csr_pa = pa + PSYCHO_CB_CONTROL_STATUS_REG_OFFSET;
1123 	csr = lddphysio(csr_pa);
1124 
1125 	/*
1126 	 * Clear any pending address parity errors.
1127 	 */
1128 	if (csr & COMMON_CB_CONTROL_STATUS_APERR) {
1129 		csr |= COMMON_CB_CONTROL_STATUS_APERR;
1130 		cmn_err(CE_WARN, "clearing UPA address parity error\n");
1131 	}
1132 	csr |= COMMON_CB_CONTROL_STATUS_APCKEN;
1133 	csr &= ~COMMON_CB_CONTROL_STATUS_IAP;
1134 	stdphysio(csr_pa, csr);
1135 
1136 }
1137 
1138 void
pci_ecc_setup(ecc_t * ecc_p)1139 pci_ecc_setup(ecc_t *ecc_p)
1140 {
1141 	ecc_p->ecc_ue.ecc_errpndg_mask = 0;
1142 	ecc_p->ecc_ue.ecc_offset_mask = PSYCHO_ECC_UE_AFSR_DW_OFFSET;
1143 	ecc_p->ecc_ue.ecc_offset_shift = PSYCHO_ECC_UE_AFSR_DW_OFFSET_SHIFT;
1144 	ecc_p->ecc_ue.ecc_size_log2 = 3;
1145 
1146 	ecc_p->ecc_ce.ecc_errpndg_mask = 0;
1147 	ecc_p->ecc_ce.ecc_offset_mask = PSYCHO_ECC_CE_AFSR_DW_OFFSET;
1148 	ecc_p->ecc_ce.ecc_offset_shift = PSYCHO_ECC_CE_AFSR_DW_OFFSET_SHIFT;
1149 	ecc_p->ecc_ce.ecc_size_log2 = 3;
1150 }
1151 
1152 /*
1153  * overwrite dvma end address (only on virtual-dma systems)
1154  * initialize tsb size
1155  * reset context bits
1156  * return: IOMMU CSR bank base address (VA)
1157  */
1158 uintptr_t
pci_iommu_setup(iommu_t * iommu_p)1159 pci_iommu_setup(iommu_t *iommu_p)
1160 {
1161 	pci_dvma_range_prop_t *dvma_prop;
1162 	int dvma_prop_len;
1163 
1164 	pci_t *pci_p = iommu_p->iommu_pci_p;
1165 	dev_info_t *dip = pci_p->pci_dip;
1166 	uint_t tsb_size = iommu_tsb_cookie_to_size(pci_p->pci_tsb_cookie);
1167 	uint_t tsb_size_prop;
1168 
1169 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1170 	    "virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
1171 	    DDI_PROP_SUCCESS)
1172 		goto tsb_done;
1173 
1174 	if (dvma_prop_len != sizeof (pci_dvma_range_prop_t)) {
1175 		cmn_err(CE_WARN, "%s%d: invalid virtual-dma property",
1176 		    ddi_driver_name(dip), ddi_get_instance(dip));
1177 		goto tsb_end;
1178 	}
1179 	iommu_p->iommu_dvma_end = dvma_prop->dvma_base +
1180 	    (dvma_prop->dvma_len - 1);
1181 	tsb_size_prop = IOMMU_BTOP(dvma_prop->dvma_len) * sizeof (uint64_t);
1182 	tsb_size = MIN(tsb_size_prop, tsb_size);
1183 tsb_end:
1184 	kmem_free(dvma_prop, dvma_prop_len);
1185 tsb_done:
1186 	iommu_p->iommu_tsb_size = iommu_tsb_size_encode(tsb_size);
1187 
1188 	if (CHIP_TYPE(pci_p) != PCI_CHIP_HUMMINGBIRD)
1189 		pci_preserve_iommu_tsb = 0;
1190 
1191 	/*
1192 	 * Psycho has no context support.
1193 	 */
1194 	iommu_p->iommu_ctx_bitmap = NULL;
1195 	iommu_p->iommu_flush_ctx_reg = NULL;
1196 	pci_use_contexts = 0;
1197 	pci_sc_use_contexts = 0;
1198 
1199 	/*
1200 	 * Determine the virtual address of the register block
1201 	 * containing the iommu control registers.
1202 	 */
1203 	return (get_reg_base(pci_p));
1204 }
1205 
1206 /*ARGSUSED*/
1207 void
pci_iommu_teardown(iommu_t * iommu_p)1208 pci_iommu_teardown(iommu_t *iommu_p)
1209 {
1210 }
1211 
1212 /* The psycho+ PBM reg base is at 1fe.0000.2000 */
1213 uintptr_t
get_pbm_reg_base(pci_t * pci_p)1214 get_pbm_reg_base(pci_t *pci_p)
1215 {
1216 	return ((uintptr_t)(pci_p->pci_address[0] +
1217 	    (pci_stream_buf_exists ? 0 : PSYCHO_PCI_PBM_REG_BASE)));
1218 }
1219 
1220 void
pci_post_uninit_child(pci_t * pci_p)1221 pci_post_uninit_child(pci_t *pci_p)
1222 {
1223 }
1224 
1225 void
pci_pbm_setup(pbm_t * pbm_p)1226 pci_pbm_setup(pbm_t *pbm_p)
1227 {
1228 	pci_t *pci_p = pbm_p->pbm_pci_p;
1229 
1230 	/*
1231 	 * Get the base virtual address for the PBM control block.
1232 	 */
1233 	uintptr_t a = get_pbm_reg_base(pci_p);
1234 
1235 	/*
1236 	 * Get the virtual address of the PCI configuration header.
1237 	 * This should be mapped little-endian.
1238 	 */
1239 	pbm_p->pbm_config_header =
1240 	    (config_header_t *)get_config_reg_base(pci_p);
1241 
1242 	/*
1243 	 * Get the virtual addresses for control, error and diag
1244 	 * registers.
1245 	 */
1246 	pbm_p->pbm_ctrl_reg = (uint64_t *)(a + PSYCHO_PCI_CTRL_REG_OFFSET);
1247 	pbm_p->pbm_diag_reg = (uint64_t *)(a + PSYCHO_PCI_DIAG_REG_OFFSET);
1248 	pbm_p->pbm_async_flt_status_reg =
1249 	    (uint64_t *)(a + PSYCHO_PCI_ASYNC_FLT_STATUS_REG_OFFSET);
1250 	pbm_p->pbm_async_flt_addr_reg =
1251 	    (uint64_t *)(a + PSYCHO_PCI_ASYNC_FLT_ADDR_REG_OFFSET);
1252 
1253 	if (CHIP_TYPE(pci_p) >= PCI_CHIP_SABRE)
1254 		pbm_p->pbm_sync_reg_pa =
1255 		    pci_p->pci_cb_p->cb_base_pa + DMA_WRITE_SYNC_REG;
1256 }
1257 
1258 /*ARGSUSED*/
1259 void
pci_pbm_teardown(pbm_t * pbm_p)1260 pci_pbm_teardown(pbm_t *pbm_p)
1261 {
1262 }
1263 
1264 void
pci_sc_setup(sc_t * sc_p)1265 pci_sc_setup(sc_t *sc_p)
1266 {
1267 	pci_t *pci_p = sc_p->sc_pci_p;
1268 
1269 	/*
1270 	 * Determine the virtual addresses of the streaming cache
1271 	 * control/status and flush registers.
1272 	 */
1273 	uintptr_t a = get_pbm_reg_base(pci_p);
1274 	sc_p->sc_ctrl_reg = (uint64_t *)(a + PSYCHO_SC_CTRL_REG_OFFSET);
1275 	sc_p->sc_invl_reg = (uint64_t *)(a + PSYCHO_SC_INVL_REG_OFFSET);
1276 	sc_p->sc_sync_reg = (uint64_t *)(a + PSYCHO_SC_SYNC_REG_OFFSET);
1277 
1278 	/*
1279 	 * Determine the virtual addresses of the streaming cache
1280 	 * diagnostic access registers.
1281 	 */
1282 	a = get_reg_base(pci_p);
1283 	if (pci_p->pci_bus_range.lo != 0) {
1284 		sc_p->sc_data_diag_acc = (uint64_t *)
1285 		    (a + PSYCHO_SC_A_DATA_DIAG_OFFSET);
1286 		sc_p->sc_tag_diag_acc = (uint64_t *)
1287 		    (a + PSYCHO_SC_A_TAG_DIAG_OFFSET);
1288 		sc_p->sc_ltag_diag_acc = (uint64_t *)
1289 		    (a + PSYCHO_SC_A_LTAG_DIAG_OFFSET);
1290 	} else {
1291 		sc_p->sc_data_diag_acc = (uint64_t *)
1292 		    (a + PSYCHO_SC_B_DATA_DIAG_OFFSET);
1293 		sc_p->sc_tag_diag_acc = (uint64_t *)
1294 		    (a + PSYCHO_SC_B_TAG_DIAG_OFFSET);
1295 		sc_p->sc_ltag_diag_acc = (uint64_t *)
1296 		    (a + PSYCHO_SC_B_LTAG_DIAG_OFFSET);
1297 	}
1298 }
1299 
1300 int
pci_get_numproxy(dev_info_t * dip)1301 pci_get_numproxy(dev_info_t *dip)
1302 {
1303 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1304 	    "#upa-interrupt-proxies", 1));
1305 }
1306 
1307 int
pci_get_portid(dev_info_t * dip)1308 pci_get_portid(dev_info_t *dip)
1309 {
1310 	return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1311 	    "upa-portid", -1));
1312 }
1313 
1314 /*
1315  * pbm_has_pass_1_cheerio
1316  *
1317  *
1318  * Given a PBM soft state pointer, this routine scans it child nodes
1319  * to see if one is a pass 1 cheerio.
1320  *
1321  * return value: 1 if pass 1 cheerio is found, 0 otherwise
1322  */
1323 static int
pbm_has_pass_1_cheerio(pci_t * pci_p)1324 pbm_has_pass_1_cheerio(pci_t *pci_p)
1325 {
1326 	dev_info_t *cdip;
1327 	int found = 0;
1328 	char *s;
1329 	int rev;
1330 
1331 	cdip = ddi_get_child(pci_p->pci_dip);
1332 	while (cdip != NULL && found == 0) {
1333 		s = ddi_get_name(cdip);
1334 		if (strcmp(s, "ebus") == 0 || strcmp(s, "pci108e,1000") == 0) {
1335 			rev =
1336 			    ddi_getprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
1337 			    "revision-id", 0);
1338 			if (rev == 0)
1339 				found = 1;
1340 		}
1341 		cdip = ddi_get_next_sibling(cdip);
1342 	}
1343 	return (found);
1344 }
1345 
1346 /*
1347  * Psycho Performance Events.
1348  */
1349 pci_kev_mask_t
1350 psycho_pci_events[] = {
1351 	{"dvma_stream_rd_a", 0x0},	{"dvma_stream_wr_a", 0x1},
1352 	{"dvma_const_rd_a", 0x2},	{"dvma_const_wr_a", 0x3},
1353 	{"dvma_stream_buf_mis_a", 0x4}, {"dvma_cycles_a", 0x5},
1354 	{"dvma_wd_xfr_a", 0x6},		{"pio_cycles_a", 0x7},
1355 	{"dvma_stream_rd_b", 0x8},	{"dvma_stream_wr_b", 0x9},
1356 	{"dvma_const_rd_b", 0xa},	{"dvma_const_wr_b", 0xb},
1357 	{"dvma_stream_buf_mis_b", 0xc}, {"dvma_cycles_b", 0xd},
1358 	{"dvma_wd_xfr_b", 0xe},		{"pio_cycles_b", 0xf},
1359 	{"dvma_tlb_misses", 0x10},	{"interrupts", 0x11},
1360 	{"upa_inter_nack", 0x12},	{"pio_reads", 0x13},
1361 	{"pio_writes", 0x14},		{"merge_buffer", 0x15},
1362 	{"dma_tbwalk_a", 0x16},		{"dma_stc_a", 0x17},
1363 	{"dma_tbwalk_b", 0x18},		{"dma_stc_b", 0x19},
1364 	{"clear_pic", 0x1f}
1365 };
1366 
1367 /*
1368  * Create the picN kstat's.
1369  */
1370 void
pci_kstat_init()1371 pci_kstat_init()
1372 {
1373 	pci_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
1374 	    KM_NOSLEEP);
1375 
1376 	if (pci_name_kstat == NULL) {
1377 		cmn_err(CE_WARN, "pcipsy : no space for kstat\n");
1378 	} else {
1379 		pci_name_kstat->pic_no_evs =
1380 		    sizeof (psycho_pci_events) / sizeof (pci_kev_mask_t);
1381 		pci_name_kstat->pic_shift[0] = PSYCHO_SHIFT_PIC0;
1382 		pci_name_kstat->pic_shift[1] = PSYCHO_SHIFT_PIC1;
1383 		pci_create_name_kstat("pcip",
1384 		    pci_name_kstat, psycho_pci_events);
1385 	}
1386 }
1387 
1388 /*
1389  * Called from _fini()
1390  */
1391 void
pci_kstat_fini()1392 pci_kstat_fini()
1393 {
1394 	if (pci_name_kstat != NULL) {
1395 		pci_delete_name_kstat(pci_name_kstat);
1396 		kmem_free(pci_name_kstat, sizeof (pci_ksinfo_t));
1397 		pci_name_kstat = NULL;
1398 	}
1399 }
1400 
1401 /* ARGSUSED */
1402 void
pci_add_pci_kstat(pci_t * pci_p)1403 pci_add_pci_kstat(pci_t *pci_p)
1404 {
1405 }
1406 
1407 /* ARGSUSED */
1408 void
pci_rem_pci_kstat(pci_t * pci_p)1409 pci_rem_pci_kstat(pci_t *pci_p)
1410 {
1411 }
1412 
1413 /*
1414  * Create the performance 'counters' kstat.
1415  */
1416 void
pci_add_upstream_kstat(pci_t * pci_p)1417 pci_add_upstream_kstat(pci_t *pci_p)
1418 {
1419 	pci_common_t	*cmn_p = pci_p->pci_common_p;
1420 	pci_cntr_pa_t	*cntr_pa_p = &cmn_p->pci_cmn_uks_pa;
1421 	uint64_t regbase = va_to_pa((void *)get_reg_base(pci_p));
1422 
1423 	cntr_pa_p->pcr_pa = regbase + PSYCHO_PERF_PCR_OFFSET;
1424 	cntr_pa_p->pic_pa = regbase + PSYCHO_PERF_PIC_OFFSET;
1425 	cmn_p->pci_common_uksp = pci_create_cntr_kstat(pci_p, "pcip",
1426 	    NUM_OF_PICS, pci_cntr_kstat_pa_update, cntr_pa_p);
1427 }
1428 
1429 /*
1430  * Extract the drivers binding name to identify which chip
1431  * we're binding to.  Whenever a new bus bridge is created, the driver alias
1432  * entry should be added here to identify the device if needed.  If a device
1433  * isn't added, the identity defaults to PCI_CHIP_UNIDENTIFIED.
1434  */
1435 static uint32_t
pci_identity_init(pci_t * pci_p)1436 pci_identity_init(pci_t *pci_p)
1437 {
1438 	dev_info_t *dip = pci_p->pci_dip;
1439 	char *name = ddi_binding_name(dip);
1440 
1441 	if (strcmp(name, "pci108e,8000") == 0)
1442 		return (CHIP_ID(PCI_CHIP_PSYCHO, 0x00, 0x00));
1443 	if (strcmp(name, "pci108e,a000") == 0)
1444 		return (CHIP_ID(PCI_CHIP_SABRE, 0x00, 0x00));
1445 	if (strcmp(name, "pci108e,a001") == 0)
1446 		return (CHIP_ID(PCI_CHIP_HUMMINGBIRD, 0x00, 0x00));
1447 	cmn_err(CE_CONT, "?%s%d:using default chip identity\n",
1448 	    ddi_driver_name(dip), ddi_get_instance(dip));
1449 	return (CHIP_ID(PCI_CHIP_PSYCHO, 0x00, 0x00));
1450 }
1451 
1452 /*ARGSUSED*/
1453 void
pci_post_init_child(pci_t * pci_p,dev_info_t * child)1454 pci_post_init_child(pci_t *pci_p, dev_info_t *child)
1455 {
1456 }
1457 
1458 /*ARGSUSED*/
1459 int
pci_pbm_add_intr(pci_t * pci_p)1460 pci_pbm_add_intr(pci_t *pci_p)
1461 {
1462 	return (DDI_SUCCESS);
1463 }
1464 
1465 /*ARGSUSED*/
1466 void
pci_pbm_rem_intr(pci_t * pci_p)1467 pci_pbm_rem_intr(pci_t *pci_p)
1468 {
1469 }
1470 
1471 /*ARGSUSED*/
1472 void
pci_pbm_suspend(pci_t * pci_p)1473 pci_pbm_suspend(pci_t *pci_p)
1474 {
1475 }
1476 
1477 /*ARGSUSED*/
1478 void
pci_pbm_resume(pci_t * pci_p)1479 pci_pbm_resume(pci_t *pci_p)
1480 {
1481 }
1482 
1483 /*
1484  * pcipsy error handling 101:
1485  *
1486  * The various functions below are responsible for error handling. Given
1487  * a particular error, they must gather the appropriate state, report all
1488  * errors with correct payload, and attempt recovery where ever possible.
1489  *
1490  * Recovery in the context of this driver is being able notify a leaf device
1491  * of the failed transaction. This leaf device may either be the master or
1492  * target for this transaction and may have already received an error
1493  * notification via a PCI interrupt. Notification is done via DMA and access
1494  * handles. If we capture an address for the transaction then we can map it
1495  * to a handle(if the leaf device is fma-compliant) and fault the handle as
1496  * well as call the device driver registered callback.
1497  *
1498  * The hardware can either interrupt or trap upon detection of an error, in
1499  * some rare cases it also causes a fatal reset.
1500  *
1501  * pbm_error_intr() and ecc_intr() are responsible for PCI Block Module
1502  * errors(generic PCI + bridge specific) and ECC errors, respectively. They
1503  * are common between pcisch and pcipsy and therefore exist in pci_pbm.c and
1504  * pci_ecc.c. To support error handling certain chip specific handlers
1505  * must exist and they are defined below.
1506  *
1507  * cpu_deferred_error() and cpu_async_error(), handle the traps that may
1508  * have originated from IO space. They call into the registered IO callbacks
1509  * to report and handle errors that may have caused the trap.
1510  *
1511  * pci_pbm_err_handler() is called by pbm_error_intr() or pci_err_callback()
1512  * (generic fma callback for pcipsy/pcisch, pci_fm.c). pci_err_callback() is
1513  * called when the CPU has trapped because of a possible IO error(TO/BERR/UE).
1514  * It will call pci_pbm_err_handler() to report and handle all PCI/PBM/IOMMU
1515  * related errors which are detected by the chip.
1516  *
1517  * pci_pbm_err_handler() calls a generic interface pbm_afsr_report()(pci_pbm.c)
1518  * to report the pbm specific errors and attempt to map the failed address
1519  * (if captured) to a device instance. pbm_afsr_report() calls a chip specific
1520  * interface to interpret the afsr bits pci_pbm_classify()(pcisch.c/pcipsy.c).
1521  *
1522  * ecc_err_handler()(pci_ecc.c) also calls a chip specific interface to
1523  * interpret the afsr, pci_ecc_classify(). ecc_err_handler() also calls
1524  * pci_pbm_err_handler() and ndi_fm_handler_dispatch() to log any related
1525  * errors.
1526  *
1527  * To make sure that the trap code and the interrupt code are not going
1528  * to step on each others toes we have a per chip pci_fm_mutex. This also
1529  * makes it necessary for us to be cautious while we are at a high PIL, so
1530  * that we do not cause a subsequent trap that causes us to hang.
1531  *
1532  * The attempt to commonize code was meant to keep in line with the current
1533  * pci driver implementation and it was not meant to confuse. If you are
1534  * confused then don't worry, I was too.
1535  */
1536 
1537 /*
1538  * For Psycho, a UE is always fatal, except if it is a translation error on a
1539  * Darwin platform.  We ignore these because they do not cause data corruption.
1540  */
1541 int
ecc_ue_is_fatal(struct async_flt * ecc)1542 ecc_ue_is_fatal(struct async_flt *ecc)
1543 {
1544 	return (((uint_t)(ecc->flt_stat >> SABRE_UE_AFSR_PDTE_SHIFT) &
1545 	    SABRE_UE_AFSR_E_PDTE) == 0);
1546 }
1547 
1548 /*
1549  * pci_ecc_classify, called by ecc_handler to classify ecc errors
1550  * and determine if we should panic or not.
1551  *
1552  * Note that it is possible yet extremely rare for more than one
1553  * primary error bit to be set.  We classify the ecc error based
1554  * on the first set bit that is found.
1555  */
1556 void
pci_ecc_classify(uint64_t err,ecc_errstate_t * ecc_err_p)1557 pci_ecc_classify(uint64_t err, ecc_errstate_t *ecc_err_p)
1558 {
1559 	struct async_flt *ecc = &ecc_err_p->ecc_aflt;
1560 	pci_common_t *cmn_p = ecc_err_p->ecc_ii_p.ecc_p->ecc_pci_cmn_p;
1561 
1562 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1563 
1564 	ecc_err_p->ecc_bridge_type = PCI_BRIDGE_TYPE(cmn_p);
1565 	/*
1566 	 * Get the parent bus id that caused the error.
1567 	 */
1568 	ecc_err_p->ecc_dev_id = (ecc_err_p->ecc_afsr & PSYCHO_ECC_UE_AFSR_ID)
1569 	    >> PSYCHO_ECC_UE_AFSR_ID_SHIFT;
1570 	/*
1571 	 * Determine the doubleword offset of the error.
1572 	 */
1573 	ecc_err_p->ecc_dw_offset = (ecc_err_p->ecc_afsr &
1574 	    PSYCHO_ECC_UE_AFSR_DW_OFFSET)
1575 	    >> PSYCHO_ECC_UE_AFSR_DW_OFFSET_SHIFT;
1576 	/*
1577 	 * Determine the primary error type.
1578 	 */
1579 	if (err & COMMON_ECC_AFSR_E_PIO) {
1580 		if (ecc_err_p->ecc_ii_p.ecc_type == CBNINTR_UE) {
1581 			if (ecc_err_p->ecc_pri) {
1582 				ecc->flt_erpt_class = PCI_ECC_PIO_UE;
1583 			} else {
1584 				ecc->flt_erpt_class = PCI_ECC_SEC_PIO_UE;
1585 			}
1586 			ecc->flt_panic = ecc_ue_is_fatal(&ecc_err_p->ecc_aflt);
1587 		} else {
1588 			ecc->flt_erpt_class = ecc_err_p->ecc_pri ?
1589 			    PCI_ECC_PIO_CE : PCI_ECC_SEC_PIO_CE;
1590 			return;
1591 		}
1592 	} else if (err & COMMON_ECC_AFSR_E_DRD) {
1593 		if (ecc_err_p->ecc_ii_p.ecc_type == CBNINTR_UE) {
1594 			if (ecc_err_p->ecc_pri) {
1595 				ecc->flt_erpt_class = PCI_ECC_DRD_UE;
1596 			} else {
1597 				ecc->flt_erpt_class = PCI_ECC_SEC_DRD_UE;
1598 			}
1599 			ecc->flt_panic = ecc_ue_is_fatal(&ecc_err_p->ecc_aflt);
1600 		} else {
1601 			ecc->flt_erpt_class = ecc_err_p->ecc_pri ?
1602 			    PCI_ECC_DRD_CE : PCI_ECC_SEC_DRD_CE;
1603 			return;
1604 		}
1605 	} else if (err & COMMON_ECC_AFSR_E_DWR) {
1606 		if (ecc_err_p->ecc_ii_p.ecc_type == CBNINTR_UE) {
1607 			if (ecc_err_p->ecc_pri) {
1608 				ecc->flt_erpt_class = PCI_ECC_DWR_UE;
1609 			} else {
1610 				ecc->flt_erpt_class = PCI_ECC_SEC_DWR_UE;
1611 			}
1612 			ecc->flt_panic = ecc_ue_is_fatal(&ecc_err_p->ecc_aflt);
1613 		} else {
1614 			ecc->flt_erpt_class = ecc_err_p->ecc_pri ?
1615 			    PCI_ECC_DWR_CE : PCI_ECC_SEC_DWR_CE;
1616 			return;
1617 		}
1618 	}
1619 }
1620 
1621 ushort_t
pci_ecc_get_synd(uint64_t afsr)1622 pci_ecc_get_synd(uint64_t afsr)
1623 {
1624 	return ((ushort_t)((afsr & PSYCHO_ECC_CE_AFSR_SYND)
1625 	    >> PSYCHO_ECC_CE_AFSR_SYND_SHIFT));
1626 }
1627 
1628 /*
1629  * pci_pbm_classify, called by pbm_afsr_report to classify piow afsr.
1630  */
1631 int
pci_pbm_classify(pbm_errstate_t * pbm_err_p)1632 pci_pbm_classify(pbm_errstate_t *pbm_err_p)
1633 {
1634 	uint32_t e;
1635 	int nerr = 0;
1636 	char **tmp_class;
1637 
1638 	if (pbm_err_p->pbm_pri) {
1639 		tmp_class = &pbm_err_p->pbm_pci.pci_err_class;
1640 		e = PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr);
1641 		pbm_err_p->pbm_log = FM_LOG_PCI;
1642 	} else {
1643 		tmp_class = &pbm_err_p->pbm_err_class;
1644 		e = PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr);
1645 		pbm_err_p->pbm_log = FM_LOG_PBM;
1646 	}
1647 
1648 	if (e & PSYCHO_PCI_AFSR_E_MA) {
1649 		*tmp_class = pbm_err_p->pbm_pri ? PCI_MA : PCI_SEC_MA;
1650 		nerr++;
1651 	}
1652 	if (e & PSYCHO_PCI_AFSR_E_TA) {
1653 		*tmp_class = pbm_err_p->pbm_pri ? PCI_REC_TA : PCI_SEC_REC_TA;
1654 		nerr++;
1655 	}
1656 	if (e & PSYCHO_PCI_AFSR_E_RTRY) {
1657 		pbm_err_p->pbm_err_class = pbm_err_p->pbm_pri ?
1658 		    PCI_PBM_RETRY : PCI_SEC_PBM_RETRY;
1659 		pbm_err_p->pbm_log = FM_LOG_PBM;
1660 		nerr++;
1661 	}
1662 	if (e & PSYCHO_PCI_AFSR_E_PERR) {
1663 		*tmp_class = pbm_err_p->pbm_pri ? PCI_MDPE : PCI_SEC_MDPE;
1664 		nerr++;
1665 	}
1666 	return (nerr);
1667 }
1668 
1669 /*
1670  * Function used to clear PBM/PCI/IOMMU error state after error handling
1671  * is complete. Only clearing error bits which have been logged. Called by
1672  * pci_pbm_err_handler and pci_bus_exit.
1673  */
1674 static void
pci_clear_error(pci_t * pci_p,pbm_errstate_t * pbm_err_p)1675 pci_clear_error(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
1676 {
1677 	pbm_t *pbm_p = pci_p->pci_pbm_p;
1678 
1679 	ASSERT(MUTEX_HELD(&pbm_p->pbm_pci_p->pci_common_p->pci_fm_mutex));
1680 
1681 	*pbm_p->pbm_ctrl_reg = pbm_err_p->pbm_ctl_stat;
1682 	*pbm_p->pbm_async_flt_status_reg = pbm_err_p->pbm_afsr;
1683 	pbm_p->pbm_config_header->ch_status_reg =
1684 	    pbm_err_p->pbm_pci.pci_cfg_stat;
1685 }
1686 
1687 /*ARGSUSED*/
1688 int
pci_pbm_err_handler(dev_info_t * dip,ddi_fm_error_t * derr,const void * impl_data,int caller)1689 pci_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr,
1690     const void *impl_data, int caller)
1691 {
1692 	int fatal = 0;
1693 	int nonfatal = 0;
1694 	int unknown = 0;
1695 	uint32_t prierr, secerr;
1696 	pbm_errstate_t pbm_err;
1697 	char buf[FM_MAX_CLASS];
1698 	pci_t *pci_p = (pci_t *)impl_data;
1699 	pbm_t *pbm_p = pci_p->pci_pbm_p;
1700 	int ret = 0;
1701 	uint64_t pbm_ctl_stat;
1702 	uint16_t pci_cfg_stat;
1703 
1704 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1705 	pci_pbm_errstate_get(pci_p, &pbm_err);
1706 
1707 	derr->fme_ena = derr->fme_ena ? derr->fme_ena :
1708 	    fm_ena_generate(0, FM_ENA_FMT1);
1709 
1710 	prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr);
1711 	secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr);
1712 
1713 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
1714 		if (caller == PCI_TRAP_CALL) {
1715 			/*
1716 			 * For ddi_caut_get treat all events as
1717 			 * nonfatal. The trampoline will set
1718 			 * err_ena = 0, err_status = NONFATAL. We only
1719 			 * really call this function so that pci_clear_error()
1720 			 * and ndi_fm_handler_dispatch() will get called.
1721 			 */
1722 			derr->fme_status = DDI_FM_NONFATAL;
1723 			nonfatal++;
1724 			goto done;
1725 		} else {
1726 			/*
1727 			 * For ddi_caut_put treat all events as nonfatal. Here
1728 			 * we have the handle and can call ndi_fm_acc_err_set().
1729 			 */
1730 			derr->fme_status = DDI_FM_NONFATAL;
1731 			ndi_fm_acc_err_set(pbm_p->pbm_excl_handle, derr);
1732 			nonfatal++;
1733 			goto done;
1734 		}
1735 	} else if (derr->fme_flag == DDI_FM_ERR_PEEK) {
1736 		/*
1737 		 * For ddi_peek treat all events as nonfatal. We only
1738 		 * really call this function so that pci_clear_error()
1739 		 * and ndi_fm_handler_dispatch() will get called.
1740 		 */
1741 		nonfatal++;
1742 		goto done;
1743 	} else if (derr->fme_flag == DDI_FM_ERR_POKE) {
1744 		/*
1745 		 * For ddi_poke we can treat as nonfatal if the
1746 		 * following conditions are met :
1747 		 * 1. Make sure only primary error is MA/TA
1748 		 * 2. Make sure no secondary error
1749 		 * 3. check pci config header stat reg to see MA/TA is
1750 		 *    logged. We cannot verify only MA/TA is recorded
1751 		 *    since it gets much more complicated when a
1752 		 *    PCI-to-PCI bridge is present.
1753 		 */
1754 		if ((prierr == PSYCHO_PCI_AFSR_E_MA) && !secerr &&
1755 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_MAST_AB)) {
1756 			nonfatal++;
1757 			goto done;
1758 		}
1759 		if ((prierr == PSYCHO_PCI_AFSR_E_TA) && !secerr &&
1760 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_TARG_AB)) {
1761 			nonfatal++;
1762 			goto done;
1763 		}
1764 	}
1765 
1766 	if (prierr || secerr) {
1767 		ret = pbm_afsr_report(dip, derr->fme_ena, &pbm_err);
1768 		if (ret == DDI_FM_FATAL)
1769 			fatal++;
1770 		else
1771 			nonfatal++;
1772 	}
1773 
1774 	ret = pci_cfg_report(dip, derr, &pbm_err.pbm_pci, caller, prierr);
1775 	if (ret == DDI_FM_FATAL)
1776 		fatal++;
1777 	else if (ret == DDI_FM_NONFATAL)
1778 		nonfatal++;
1779 
1780 	pbm_ctl_stat = pbm_err.pbm_ctl_stat;
1781 	pci_cfg_stat = pbm_err.pbm_pci.pci_cfg_stat;
1782 
1783 	/*
1784 	 * PBM Received System Error - During any transaction, or
1785 	 * at any point on the bus, some device may detect a critical
1786 	 * error and signal a system error to the system.
1787 	 */
1788 	if (pbm_ctl_stat & COMMON_PCI_CTRL_SERR) {
1789 		/*
1790 		 * may be expected (master abort from pci-pci bridge during
1791 		 * poke will generate SERR)
1792 		 */
1793 		if (derr->fme_flag != DDI_FM_ERR_POKE) {
1794 			pbm_err.pbm_pci.pci_err_class = PCI_REC_SERR;
1795 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1796 			    PCI_ERROR_SUBCLASS, pbm_err.pbm_pci.pci_err_class);
1797 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1798 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1799 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pci_cfg_stat,
1800 			    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
1801 			    pbm_err.pbm_pci.pci_cfg_comm, PCI_PA,
1802 			    DATA_TYPE_UINT64, (uint64_t)0, NULL);
1803 		}
1804 		unknown++;
1805 	}
1806 
1807 	/* Streaming Byte Hole Error */
1808 	if (pbm_ctl_stat & COMMON_PCI_CTRL_SBH_ERR) {
1809 		if (pci_panic_on_sbh_errors)
1810 			fatal++;
1811 		else
1812 			nonfatal++;
1813 		pbm_err.pbm_err_class = PCI_PSY_SBH;
1814 		pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
1815 	}
1816 done:
1817 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1818 	if (ret == DDI_FM_FATAL) {
1819 		fatal++;
1820 	} else if (ret == DDI_FM_NONFATAL) {
1821 		nonfatal++;
1822 	} else if (ret == DDI_FM_UNKNOWN) {
1823 		unknown++;
1824 	}
1825 
1826 	/*
1827 	 * rserr not claimed as nonfatal by a child is treated as fatal
1828 	 */
1829 	if (unknown && !nonfatal && !fatal)
1830 		fatal++;
1831 
1832 	/* Cleanup and reset error bits */
1833 	pci_clear_error(pci_p, &pbm_err);
1834 
1835 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1836 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1837 }
1838 
1839 int
pci_check_error(pci_t * pci_p)1840 pci_check_error(pci_t *pci_p)
1841 {
1842 	pbm_t *pbm_p = pci_p->pci_pbm_p;
1843 	uint16_t pci_cfg_stat;
1844 	uint64_t pbm_ctl_stat, pbm_afsr;
1845 
1846 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1847 
1848 	pci_cfg_stat = pbm_p->pbm_config_header->ch_status_reg;
1849 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
1850 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
1851 
1852 	if ((pci_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB |
1853 	    PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB |
1854 	    PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) ||
1855 	    (pbm_ctl_stat & (COMMON_PCI_CTRL_SBH_ERR |
1856 	    COMMON_PCI_CTRL_SERR)) ||
1857 	    (PBM_AFSR_TO_PRIERR(pbm_afsr)))
1858 		return (1);
1859 
1860 	return (0);
1861 
1862 }
1863 
1864 /*
1865  * Function used to gather PBM/PCI error state for the
1866  * pci_pbm_err_handler. This function must be called while pci_fm_mutex
1867  * is held.
1868  */
1869 static void
pci_pbm_errstate_get(pci_t * pci_p,pbm_errstate_t * pbm_err_p)1870 pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
1871 {
1872 	pbm_t *pbm_p = pci_p->pci_pbm_p;
1873 
1874 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1875 	bzero(pbm_err_p, sizeof (pbm_errstate_t));
1876 
1877 	/*
1878 	 * Capture all pbm error state for later logging
1879 	 */
1880 	pbm_err_p->pbm_bridge_type = PCI_BRIDGE_TYPE(pci_p->pci_common_p);
1881 	pbm_err_p->pbm_pci.pci_cfg_stat =
1882 	    pbm_p->pbm_config_header->ch_status_reg;
1883 	pbm_err_p->pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
1884 	pbm_err_p->pbm_pci.pci_cfg_comm =
1885 	    pbm_p->pbm_config_header->ch_command_reg;
1886 	pbm_err_p->pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
1887 	pbm_err_p->pbm_afar = *pbm_p->pbm_async_flt_addr_reg;
1888 	pbm_err_p->pbm_pci.pci_pa = *pbm_p->pbm_async_flt_addr_reg;
1889 }
1890 
1891 void
pbm_clear_error(pbm_t * pbm_p)1892 pbm_clear_error(pbm_t *pbm_p)
1893 {
1894 	uint64_t pbm_afsr, pbm_ctl_stat;
1895 
1896 	/*
1897 	 * for poke() support - called from POKE_FLUSH. Spin waiting
1898 	 * for MA, TA or SERR to be cleared by a pbm_error_intr().
1899 	 * We have to wait for SERR too in case the device is beyond
1900 	 * a pci-pci bridge.
1901 	 */
1902 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
1903 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
1904 	while (((pbm_afsr >> PSYCHO_PCI_AFSR_PE_SHIFT) &
1905 	    (PSYCHO_PCI_AFSR_E_MA | PSYCHO_PCI_AFSR_E_TA)) ||
1906 	    (pbm_ctl_stat & COMMON_PCI_CTRL_SERR)) {
1907 		pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
1908 		pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
1909 	}
1910 }
1911 
1912 /*ARGSUSED*/
1913 void
pci_format_addr(dev_info_t * dip,uint64_t * afar,uint64_t afsr)1914 pci_format_addr(dev_info_t *dip, uint64_t *afar, uint64_t afsr)
1915 {
1916 	/*
1917 	 * For Psycho the full address is stored in hardware. So
1918 	 * there is no need to format it.
1919 	 */
1920 }
1921 
1922 /*ARGSUSED*/
1923 int
pci_bus_quiesce(pci_t * pci_p,dev_info_t * dip,void * result)1924 pci_bus_quiesce(pci_t *pci_p, dev_info_t *dip, void *result)
1925 {
1926 	return (DDI_FAILURE);
1927 }
1928 
1929 /*ARGSUSED*/
1930 int
pci_bus_unquiesce(pci_t * pci_p,dev_info_t * dip,void * result)1931 pci_bus_unquiesce(pci_t *pci_p, dev_info_t *dip, void *result)
1932 {
1933 	return (DDI_FAILURE);
1934 }
1935 
1936 int
pci_reloc_getkey(void)1937 pci_reloc_getkey(void)
1938 {
1939 	return (0x100);
1940 }
1941 
1942 void
pci_vmem_free(iommu_t * iommu_p,ddi_dma_impl_t * mp,void * dvma_addr,size_t npages)1943 pci_vmem_free(iommu_t *iommu_p, ddi_dma_impl_t *mp, void *dvma_addr,
1944     size_t npages)
1945 {
1946 	pci_vmem_do_free(iommu_p, dvma_addr, npages,
1947 	    (mp->dmai_flags & DMAI_FLAGS_VMEMCACHE));
1948 }
1949 
1950 
1951 /*
1952  * NOTE: This call is only used by legacy systems (eg. E250 and E450) that
1953  * require unregistering the pci driver's thermal intrerrupt handler before
1954  * they can register their own.
1955  */
1956 void
pci_thermal_rem_intr(dev_info_t * rdip,uint_t inum)1957 pci_thermal_rem_intr(dev_info_t *rdip, uint_t inum)
1958 {
1959 	pci_t		*pci_p;
1960 	dev_info_t	*pdip;
1961 	uint32_t	dev_mondo, pci_mondo;
1962 	int		instance;
1963 
1964 	for (pdip = ddi_get_parent(rdip); pdip; pdip = ddi_get_parent(pdip)) {
1965 		if (strcmp(ddi_driver_name(pdip), "pcipsy") == 0)
1966 			break;
1967 	}
1968 
1969 	if (!pdip) {
1970 		cmn_err(CE_WARN, "pci_thermal_rem_intr() no pcipsy parent\n");
1971 		return;
1972 	}
1973 
1974 	instance = ddi_get_instance(pdip);
1975 	pci_p = get_pci_soft_state(instance);
1976 
1977 	/* Calculate the requesting device's mondo */
1978 	dev_mondo = pci_xlate_intr(pci_p->pci_dip, rdip, pci_p->pci_ib_p,
1979 	    IB_MONDO_TO_INO(i_ddi_get_inum(rdip, inum)));
1980 
1981 	/* get pci's thermal mondo */
1982 	pci_mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
1983 	    pci_p->pci_inos[CBNINTR_THERMAL]);
1984 	pci_mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, pci_mondo);
1985 
1986 	if (pci_mondo == dev_mondo) {
1987 		DEBUG2(DBG_ATTACH, rdip, "pci_thermal_rem_intr unregistered "
1988 		    "for dip=%s%d:", ddi_driver_name(rdip),
1989 		    ddi_get_instance(rdip));
1990 		VERIFY(rem_ivintr(pci_mondo, pci_pil[CBNINTR_THERMAL]) == 0);
1991 	}
1992 }
1993 
1994 /*
1995  * pci_iommu_bypass_end_configure
1996  *
1997  * Support for 40-bit bus width to UPA in DVMA and iommu bypass transfers:
1998  */
1999 
2000 dma_bypass_addr_t
pci_iommu_bypass_end_configure(void)2001 pci_iommu_bypass_end_configure(void)
2002 {
2003 
2004 	return ((dma_bypass_addr_t)UPA_IOMMU_BYPASS_END);
2005 }
2006