xref: /illumos-gate/usr/src/uts/sun4u/io/pci/pcisch.c (revision ae121a14)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Schizo specifics implementation:
27  *	interrupt mapping register
28  *	PBM configuration
29  *	ECC and PBM error handling
30  *	Iommu mapping handling
31  *	Streaming Cache flushing
32  */
33 
34 #include <sys/types.h>
35 #include <sys/kmem.h>
36 #include <sys/sysmacros.h>
37 #include <sys/async.h>
38 #include <sys/systm.h>
39 #include <sys/ivintr.h>
40 #include <sys/machsystm.h>	/* lddphys() */
41 #include <sys/machsystm.h>	/* lddphys, intr_dist_add */
42 #include <sys/iommutsb.h>
43 #include <sys/promif.h>		/* prom_printf */
44 #include <sys/map.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/spl.h>
49 #include <sys/fm/util.h>
50 #include <sys/ddi_impldefs.h>
51 #include <sys/fm/protocol.h>
52 #include <sys/fm/io/sun4upci.h>
53 #include <sys/fm/io/ddi.h>
54 #include <sys/fm/io/pci.h>
55 #include <sys/pci/pci_obj.h>
56 #include <sys/pci/pcisch.h>
57 #include <sys/pci/pcisch_asm.h>
58 #include <sys/x_call.h>		/* XCALL_PIL */
59 
60 /*LINTLIBRARY*/
61 
62 extern uint8_t ldstub(uint8_t *);
63 
64 #define	IOMMU_CTX_BITMAP_SIZE	(1 << (12 - 3))
65 static void iommu_ctx_free(iommu_t *);
66 static int iommu_tlb_scrub(iommu_t *, int);
67 static uint32_t pci_identity_init(pci_t *);
68 
69 static void pci_cb_clear_error(cb_t *, cb_errstate_t *);
70 static void pci_clear_error(pci_t *, pbm_errstate_t *);
71 static uint32_t pci_identity_init(pci_t *pci_p);
72 static int pci_intr_setup(pci_t *pci_p);
73 static void iommu_ereport_post(dev_info_t *, uint64_t, pbm_errstate_t *);
74 static void cb_ereport_post(dev_info_t *, uint64_t, cb_errstate_t *);
75 static void pcix_ereport_post(dev_info_t *, uint64_t, pbm_errstate_t *);
76 static void pci_format_ecc_addr(dev_info_t *dip, uint64_t *afar,
77 		ecc_region_t region);
78 static void pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p);
79 static void tm_vmem_free(ddi_dma_impl_t *mp, iommu_t *iommu_p,
80 		dvma_addr_t dvma_pg, int npages);
81 
82 static int pcix_ma_behind_bridge(pbm_errstate_t *pbm_err_p);
83 
84 static pci_ksinfo_t	*pci_name_kstat;
85 static pci_ksinfo_t	*saf_name_kstat;
86 
87 extern void pcix_set_cmd_reg(dev_info_t *child, uint16_t value);
88 
89 /* called by pci_attach() DDI_ATTACH to initialize pci objects */
90 int
pci_obj_setup(pci_t * pci_p)91 pci_obj_setup(pci_t *pci_p)
92 {
93 	pci_common_t *cmn_p;
94 	uint32_t chip_id = pci_identity_init(pci_p);
95 	uint32_t cmn_id = PCI_CMN_ID(ID_CHIP_TYPE(chip_id), pci_p->pci_id);
96 	int ret;
97 
98 	/* Perform allocations first to avoid delicate unwinding. */
99 	if (pci_alloc_tsb(pci_p) != DDI_SUCCESS)
100 		return (DDI_FAILURE);
101 
102 	mutex_enter(&pci_global_mutex);
103 	cmn_p = get_pci_common_soft_state(cmn_id);
104 	if (cmn_p == NULL) {
105 		if (alloc_pci_common_soft_state(cmn_id) != DDI_SUCCESS) {
106 			mutex_exit(&pci_global_mutex);
107 			pci_free_tsb(pci_p);
108 			return (DDI_FAILURE);
109 		}
110 		cmn_p = get_pci_common_soft_state(cmn_id);
111 		cmn_p->pci_common_id = cmn_id;
112 		cmn_p->pci_common_tsb_cookie = IOMMU_TSB_COOKIE_NONE;
113 	}
114 
115 	ASSERT((pci_p->pci_side == 0) || (pci_p->pci_side == 1));
116 	if (cmn_p->pci_p[pci_p->pci_side]) {
117 		/* second side attach */
118 		pci_p->pci_side = PCI_OTHER_SIDE(pci_p->pci_side);
119 		ASSERT(cmn_p->pci_p[pci_p->pci_side] == NULL);
120 	}
121 
122 	cmn_p->pci_p[pci_p->pci_side] = pci_p;
123 	pci_p->pci_common_p = cmn_p;
124 
125 	if (cmn_p->pci_common_refcnt == 0)
126 		cmn_p->pci_chip_id = chip_id;
127 
128 	ib_create(pci_p);
129 
130 	/*
131 	 * The initialization of cb internal interrupts depends on ib
132 	 */
133 	if (cmn_p->pci_common_refcnt == 0) {
134 		cb_create(pci_p);
135 		cmn_p->pci_common_cb_p = pci_p->pci_cb_p;
136 	} else
137 		pci_p->pci_cb_p = cmn_p->pci_common_cb_p;
138 
139 	iommu_create(pci_p);
140 
141 	if (cmn_p->pci_common_refcnt == 0) {
142 		ecc_create(pci_p);
143 		cmn_p->pci_common_ecc_p = pci_p->pci_ecc_p;
144 	} else
145 		pci_p->pci_ecc_p = cmn_p->pci_common_ecc_p;
146 
147 	pbm_create(pci_p);
148 	sc_create(pci_p);
149 
150 	pci_fm_create(pci_p);
151 
152 	if ((ret = pci_intr_setup(pci_p)) != DDI_SUCCESS)
153 		goto done;
154 
155 	pci_kstat_create(pci_p);
156 
157 	cmn_p->pci_common_attachcnt++;
158 	cmn_p->pci_common_refcnt++;
159 done:
160 	mutex_exit(&pci_global_mutex);
161 	if (ret != DDI_SUCCESS)
162 		cmn_err(CE_WARN, "pci_obj_setup failed %x", ret);
163 	return (ret);
164 }
165 
166 /* called by pci_detach() DDI_DETACH to destroy pci objects */
167 void
pci_obj_destroy(pci_t * pci_p)168 pci_obj_destroy(pci_t *pci_p)
169 {
170 	pci_common_t *cmn_p;
171 	mutex_enter(&pci_global_mutex);
172 
173 	cmn_p = pci_p->pci_common_p;
174 	cmn_p->pci_common_refcnt--;
175 	cmn_p->pci_common_attachcnt--;
176 
177 	pci_kstat_destroy(pci_p);
178 
179 	/* schizo non-shared objects */
180 	pci_fm_destroy(pci_p);
181 
182 	sc_destroy(pci_p);
183 	pbm_destroy(pci_p);
184 	iommu_destroy(pci_p);
185 	ib_destroy(pci_p);
186 
187 	if (cmn_p->pci_common_refcnt != 0) {
188 		pci_intr_teardown(pci_p);
189 		cmn_p->pci_p[pci_p->pci_side] = NULL;
190 		mutex_exit(&pci_global_mutex);
191 		return;
192 	}
193 
194 	/* schizo shared objects - uses cmn_p, must be destroyed before cmn */
195 	ecc_destroy(pci_p);
196 	cb_destroy(pci_p);
197 
198 	free_pci_common_soft_state(cmn_p->pci_common_id);
199 	pci_intr_teardown(pci_p);
200 	mutex_exit(&pci_global_mutex);
201 }
202 
203 /* called by pci_attach() DDI_RESUME to (re)initialize pci objects */
204 void
pci_obj_resume(pci_t * pci_p)205 pci_obj_resume(pci_t *pci_p)
206 {
207 	pci_common_t *cmn_p = pci_p->pci_common_p;
208 
209 	mutex_enter(&pci_global_mutex);
210 
211 	ib_configure(pci_p->pci_ib_p);
212 	iommu_configure(pci_p->pci_iommu_p);
213 
214 	if (cmn_p->pci_common_attachcnt == 0)
215 		ecc_configure(pci_p);
216 
217 	ib_resume(pci_p->pci_ib_p);
218 
219 	pbm_configure(pci_p->pci_pbm_p);
220 	sc_configure(pci_p->pci_sc_p);
221 
222 	if (cmn_p->pci_common_attachcnt == 0)
223 		cb_resume(pci_p->pci_cb_p);
224 
225 	pbm_resume(pci_p->pci_pbm_p);
226 
227 	cmn_p->pci_common_attachcnt++;
228 	mutex_exit(&pci_global_mutex);
229 }
230 
231 /* called by pci_detach() DDI_SUSPEND to suspend pci objects */
232 void
pci_obj_suspend(pci_t * pci_p)233 pci_obj_suspend(pci_t *pci_p)
234 {
235 	mutex_enter(&pci_global_mutex);
236 
237 	pbm_suspend(pci_p->pci_pbm_p);
238 	ib_suspend(pci_p->pci_ib_p);
239 
240 	if (!--pci_p->pci_common_p->pci_common_attachcnt)
241 		cb_suspend(pci_p->pci_cb_p);
242 
243 	mutex_exit(&pci_global_mutex);
244 }
245 
246 /*
247  * add an additional 0x35 or 0x36 ino interrupt on platforms don't have them
248  * This routine has multiple places that assumes interrupt takes one cell
249  * each and cell size is same as integer size.
250  */
251 static int
pci_intr_setup(pci_t * pci_p)252 pci_intr_setup(pci_t *pci_p)
253 {
254 	dev_info_t *dip = pci_p->pci_dip;
255 	pbm_t *pbm_p = pci_p->pci_pbm_p;
256 	cb_t *cb_p = pci_p->pci_cb_p;
257 	uint32_t *intr_buf, *new_intr_buf;
258 	int intr_len, intr_cnt, ret;
259 
260 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
261 	    "interrupts", (caddr_t)&intr_buf, &intr_len) != DDI_SUCCESS)
262 		cmn_err(CE_PANIC, "%s%d: no interrupts property\n",
263 		    ddi_driver_name(dip), ddi_get_instance(dip));
264 
265 	intr_cnt = BYTES_TO_1275_CELLS(intr_len);
266 	if (intr_cnt < CBNINTR_CDMA)	/* CBNINTR_CDMA is 0 based */
267 		cmn_err(CE_PANIC, "%s%d: <%d interrupts", ddi_driver_name(dip),
268 		    ddi_get_instance(dip), CBNINTR_CDMA);
269 
270 	if (intr_cnt == CBNINTR_CDMA)
271 		intr_cnt++;
272 
273 	new_intr_buf = kmem_alloc(CELLS_1275_TO_BYTES(intr_cnt), KM_SLEEP);
274 	bcopy(intr_buf, new_intr_buf, intr_len);
275 	kmem_free(intr_buf, intr_len);
276 
277 	new_intr_buf[CBNINTR_CDMA] = PBM_CDMA_INO_BASE + pci_p->pci_side;
278 	pci_p->pci_inos = new_intr_buf;
279 	pci_p->pci_inos_len = CELLS_1275_TO_BYTES(intr_cnt);
280 
281 	if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "interrupts",
282 	    (int *)new_intr_buf, intr_cnt))
283 		cmn_err(CE_PANIC, "%s%d: cannot update interrupts property\n",
284 		    ddi_driver_name(dip), ddi_get_instance(dip));
285 
286 	if (pci_p->pci_common_p->pci_common_refcnt == 0) {
287 		cb_p->cb_no_of_inos = intr_cnt;
288 		if (ret = cb_register_intr(pci_p))
289 			goto teardown;
290 		if (ret = ecc_register_intr(pci_p))
291 			goto teardown;
292 
293 		intr_dist_add(cb_intr_dist, cb_p);
294 		cb_enable_intr(pci_p);
295 		ecc_enable_intr(pci_p);
296 	}
297 
298 	if (CHIP_TYPE(pci_p) != PCI_CHIP_SCHIZO)
299 		pbm_p->pbm_sync_ino = pci_p->pci_inos[CBNINTR_PBM];
300 	if (ret = pbm_register_intr(pbm_p)) {
301 		if (pci_p->pci_common_p->pci_common_refcnt == 0)
302 			intr_dist_rem(cb_intr_dist, cb_p);
303 		goto teardown;
304 	}
305 	intr_dist_add(pbm_intr_dist, pbm_p);
306 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_PBM]);
307 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_CDMA]);
308 
309 	intr_dist_add_weighted(ib_intr_dist_all, pci_p->pci_ib_p);
310 	return (DDI_SUCCESS);
311 teardown:
312 	pci_intr_teardown(pci_p);
313 	return (ret);
314 }
315 
316 uint64_t
pci_sc_configure(pci_t * pci_p)317 pci_sc_configure(pci_t *pci_p)
318 {
319 	int instance;
320 	dev_info_t *dip = pci_p->pci_dip;
321 
322 	instance = ddi_get_instance(dip);
323 	if ((pci_xmits_sc_max_prf & (1 << instance)) &&
324 	    (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS))
325 		return (XMITS_SC_MAX_PRF);
326 	else
327 		return (0);
328 }
329 
330 static void
pci_schizo_cdma_sync(pbm_t * pbm_p)331 pci_schizo_cdma_sync(pbm_t *pbm_p)
332 {
333 	pci_t *pci_p = pbm_p->pbm_pci_p;
334 	hrtime_t start_time;
335 	volatile uint64_t *clr_p = ib_clear_intr_reg_addr(pci_p->pci_ib_p,
336 	    pci_p->pci_inos[CBNINTR_CDMA]);
337 	uint32_t fail_cnt = pci_cdma_intr_count;
338 
339 	mutex_enter(&pbm_p->pbm_sync_mutex);
340 #ifdef PBM_CDMA_DEBUG
341 	pbm_p->pbm_cdma_req_cnt++;
342 #endif /* PBM_CDMA_DEBUG */
343 	pbm_p->pbm_cdma_flag = PBM_CDMA_PEND;
344 	IB_INO_INTR_TRIG(clr_p);
345 wait:
346 	start_time = gethrtime();
347 	while (pbm_p->pbm_cdma_flag != PBM_CDMA_DONE) {
348 		if (gethrtime() - start_time <= pci_cdma_intr_timeout)
349 			continue;
350 		if (--fail_cnt > 0)
351 			goto wait;
352 		if (pbm_p->pbm_cdma_flag == PBM_CDMA_DONE)
353 			break;
354 		cmn_err(CE_PANIC, "%s (%s): consistent dma sync timeout",
355 		    pbm_p->pbm_nameinst_str, pbm_p->pbm_nameaddr_str);
356 	}
357 #ifdef PBM_CDMA_DEBUG
358 	if (pbm_p->pbm_cdma_flag != PBM_CDMA_DONE)
359 		pbm_p->pbm_cdma_to_cnt++;
360 	else {
361 		start_time = gethrtime() - start_time;
362 		pbm_p->pbm_cdma_success_cnt++;
363 		pbm_p->pbm_cdma_latency_sum += start_time;
364 		if (start_time > pbm_p->pbm_cdma_latency_max)
365 			pbm_p->pbm_cdma_latency_max = start_time;
366 	}
367 #endif /* PBM_CDMA_DEBUG */
368 	mutex_exit(&pbm_p->pbm_sync_mutex);
369 }
370 
371 #if !defined(lint)
372 #include <sys/cpuvar.h>
373 #endif
374 
375 #define	SYNC_HW_BUSY(pa, mask)	(lddphysio(pa) & (mask))
376 
377 /*
378  * Consistent DMA Sync/Flush
379  *
380  * XMITS and Tomatillo use multi-threaded sync/flush register.
381  * Called from interrupt wrapper: the associated ino is used to index
382  *	the distinctive register bit.
383  * Called from pci_dma_sync(): the bit belongs to PBM is shared
384  *	for all calls from pci_dma_sync(). Xmits requires serialization
385  *	while Tomatillo does not.
386  */
387 void
pci_pbm_dma_sync(pbm_t * pbm_p,ib_ino_t ino)388 pci_pbm_dma_sync(pbm_t *pbm_p, ib_ino_t ino)
389 {
390 	pci_t *pci_p = pbm_p->pbm_pci_p;
391 	hrtime_t start_time;
392 	uint64_t ino_mask, sync_reg_pa;
393 	volatile uint64_t flag_val;
394 	uint32_t locked, chip_type = CHIP_TYPE(pci_p);
395 	int	i;
396 
397 	if (chip_type == PCI_CHIP_SCHIZO) {
398 		pci_schizo_cdma_sync(pbm_p);
399 		return;
400 	}
401 
402 	sync_reg_pa = pbm_p->pbm_sync_reg_pa;
403 
404 	locked = 0;
405 	if (((chip_type == PCI_CHIP_XMITS) && (ino == pbm_p->pbm_sync_ino)) ||
406 	    pci_sync_lock) {
407 		locked = 1;
408 		mutex_enter(&pbm_p->pbm_sync_mutex);
409 	}
410 	ino_mask = 1ull << ino;
411 	stdphysio(sync_reg_pa, ino_mask);
412 
413 	for (i = 0; i < 5; i++) {
414 		if ((flag_val = SYNC_HW_BUSY(sync_reg_pa, ino_mask)) == 0)
415 			goto done;
416 	}
417 
418 	start_time = gethrtime();
419 	for (; (flag_val = SYNC_HW_BUSY(sync_reg_pa, ino_mask)) != 0; i++) {
420 		if (gethrtime() - start_time > pci_sync_buf_timeout)
421 			break;
422 	}
423 
424 	if (flag_val && SYNC_HW_BUSY(sync_reg_pa, ino_mask) && !panicstr)
425 		cmn_err(CE_PANIC, "%s: pbm dma sync %lx,%lx timeout!",
426 		    pbm_p->pbm_nameaddr_str, sync_reg_pa, flag_val);
427 done:
428 	/* optional: stdphysio(sync_reg_pa - 8, ino_mask); */
429 	if (locked)
430 		mutex_exit(&pbm_p->pbm_sync_mutex);
431 
432 	if (tomatillo_store_store_wrka) {
433 #if !defined(lint)
434 		kpreempt_disable();
435 #endif
436 		tomatillo_store_store_order();
437 #if !defined(lint)
438 		kpreempt_enable();
439 #endif
440 	}
441 
442 }
443 
444 /*ARGSUSED*/
445 void
pci_fix_ranges(pci_ranges_t * rng_p,int rng_entries)446 pci_fix_ranges(pci_ranges_t *rng_p, int rng_entries)
447 {
448 }
449 
450 /*
451  * map_pci_registers
452  *
453  * This function is called from the attach routine to map the registers
454  * accessed by this driver.
455  *
456  * used by: pci_attach()
457  *
458  * return value: DDI_FAILURE on failure
459  */
460 int
map_pci_registers(pci_t * pci_p,dev_info_t * dip)461 map_pci_registers(pci_t *pci_p, dev_info_t *dip)
462 {
463 	ddi_device_acc_attr_t attr;
464 	int len;
465 
466 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
467 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
468 
469 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
470 
471 	/*
472 	 * Register set 0 is PCI CSR Base
473 	 */
474 	if (ddi_regs_map_setup(dip, 0, &pci_p->pci_address[0], 0, 0,
475 	    &attr, &pci_p->pci_ac[0]) != DDI_SUCCESS) {
476 		len = 0;
477 		goto fail;
478 	}
479 	/*
480 	 * Register set 1 is Schizo CSR Base
481 	 */
482 	if (ddi_regs_map_setup(dip, 1, &pci_p->pci_address[1], 0, 0,
483 	    &attr, &pci_p->pci_ac[1]) != DDI_SUCCESS) {
484 		len = 1;
485 		goto fail;
486 	}
487 
488 	/*
489 	 * The third register set contains the bridge's configuration
490 	 * header.  This header is at the very beginning of the bridge's
491 	 * configuration space.  This space has litte-endian byte order.
492 	 */
493 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
494 	if (ddi_regs_map_setup(dip, 2, &pci_p->pci_address[2], 0,
495 	    PCI_CONF_HDR_SIZE, &attr, &pci_p->pci_ac[2]) != DDI_SUCCESS) {
496 		len = 2;
497 		goto fail;
498 	}
499 
500 	if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
501 	    "reg", &len) || (len / sizeof (pci_nexus_regspec_t) < 4))
502 		goto done;
503 
504 	/*
505 	 * The optional fourth register bank points to the
506 	 * interrupt concentrator registers.
507 	 */
508 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
509 	if (ddi_regs_map_setup(dip, 3, &pci_p->pci_address[3], 0,
510 	    0, &attr, &pci_p->pci_ac[3]) != DDI_SUCCESS) {
511 		len = 3;
512 		goto fail;
513 	}
514 
515 done:
516 	DEBUG4(DBG_ATTACH, dip, "address (%p,%p,%p,%p)\n",
517 	    pci_p->pci_address[0], pci_p->pci_address[1],
518 	    pci_p->pci_address[2], pci_p->pci_address[3]);
519 
520 	return (DDI_SUCCESS);
521 
522 
523 fail:
524 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
525 	    ddi_driver_name(dip), ddi_get_instance(dip), len);
526 	for (; len--; ddi_regs_map_free(&pci_p->pci_ac[len]))
527 		;
528 	return (DDI_FAILURE);
529 }
530 
531 /*
532  * unmap_pci_registers:
533  *
534  * This routine unmap the registers mapped by map_pci_registers.
535  *
536  * used by: pci_detach()
537  *
538  * return value: none
539  */
540 void
unmap_pci_registers(pci_t * pci_p)541 unmap_pci_registers(pci_t *pci_p)
542 {
543 	int i;
544 
545 	for (i = 0; i < 4; i++) {
546 		if (pci_p->pci_ac[i])
547 			ddi_regs_map_free(&pci_p->pci_ac[i]);
548 	}
549 }
550 
551 uint64_t
ib_get_map_reg(ib_mondo_t mondo,uint32_t cpu_id)552 ib_get_map_reg(ib_mondo_t mondo, uint32_t cpu_id)
553 {
554 	uint32_t agent_id;
555 	uint32_t node_id;
556 
557 	/* ensure that cpu_id is only 10 bits. */
558 	ASSERT((cpu_id & ~0x3ff) == 0);
559 
560 	agent_id = cpu_id & 0x1f;
561 	node_id = (cpu_id >> 5) & 0x1f;
562 
563 	return ((mondo) | (agent_id << COMMON_INTR_MAP_REG_TID_SHIFT) |
564 	    (node_id << SCHIZO_INTR_MAP_REG_NID_SHIFT) |
565 	    COMMON_INTR_MAP_REG_VALID);
566 }
567 
568 uint32_t
ib_map_reg_get_cpu(volatile uint64_t reg)569 ib_map_reg_get_cpu(volatile uint64_t reg)
570 {
571 	return (((reg & COMMON_INTR_MAP_REG_TID) >>
572 	    COMMON_INTR_MAP_REG_TID_SHIFT) |
573 	    ((reg & SCHIZO_INTR_MAP_REG_NID) >>
574 	    (SCHIZO_INTR_MAP_REG_NID_SHIFT-5)));
575 }
576 
577 uint64_t *
ib_intr_map_reg_addr(ib_t * ib_p,ib_ino_t ino)578 ib_intr_map_reg_addr(ib_t *ib_p, ib_ino_t ino)
579 {
580 	/*
581 	 * Schizo maps all interrupts in one contiguous area.
582 	 * (PCI_CSRBase + 0x00.1000 + INO * 8).
583 	 */
584 	return ((uint64_t *)(ib_p->ib_intr_map_regs) + (ino & 0x3f));
585 }
586 
587 uint64_t *
ib_clear_intr_reg_addr(ib_t * ib_p,ib_ino_t ino)588 ib_clear_intr_reg_addr(ib_t *ib_p, ib_ino_t ino)	/* XXX - needs work */
589 {
590 	/*
591 	 * Schizo maps clear intr. registers in contiguous area.
592 	 * (PCI_CSRBase + 0x00.1400 + INO * 8).
593 	 */
594 	return ((uint64_t *)(ib_p->ib_slot_clear_intr_regs) + (ino & 0x3f));
595 }
596 
597 /*
598  * schizo does not have mapping register per slot, so no sharing
599  * is done.
600  */
601 /*ARGSUSED*/
602 void
ib_ino_map_reg_share(ib_t * ib_p,ib_ino_t ino,ib_ino_info_t * ino_p)603 ib_ino_map_reg_share(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
604 {
605 }
606 
607 /*
608  * return true if there are interrupts using this mapping register
609  */
610 /*ARGSUSED*/
611 int
ib_ino_map_reg_unshare(ib_t * ib_p,ib_ino_t ino,ib_ino_info_t * ino_p)612 ib_ino_map_reg_unshare(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
613 {
614 	return (ino_p->ino_ipil_size);
615 }
616 
617 void
pci_pbm_intr_dist(pbm_t * pbm_p)618 pci_pbm_intr_dist(pbm_t *pbm_p)
619 {
620 	pci_t *pci_p = pbm_p->pbm_pci_p;
621 	ib_t *ib_p = pci_p->pci_ib_p;
622 	ib_ino_t ino = IB_MONDO_TO_INO(pci_p->pci_inos[CBNINTR_CDMA]);
623 
624 	mutex_enter(&pbm_p->pbm_sync_mutex);
625 	ib_intr_dist_nintr(ib_p, ino, ib_intr_map_reg_addr(ib_p, ino));
626 	mutex_exit(&pbm_p->pbm_sync_mutex);
627 }
628 
629 uint32_t
pci_xlate_intr(dev_info_t * dip,dev_info_t * rdip,ib_t * ib_p,uint32_t intr)630 pci_xlate_intr(dev_info_t *dip, dev_info_t *rdip, ib_t *ib_p, uint32_t intr)
631 {
632 	return (IB_INO_TO_MONDO(ib_p, intr));
633 }
634 
635 
636 /*
637  * Return the cpuid to to be used for an ino.  We have no special cpu
638  * assignment constraints for this nexus, so just call intr_dist_cpuid().
639  */
640 /* ARGSUSED */
641 uint32_t
pci_intr_dist_cpuid(ib_t * ib_p,ib_ino_info_t * ino_p)642 pci_intr_dist_cpuid(ib_t *ib_p, ib_ino_info_t *ino_p)
643 {
644 	return (intr_dist_cpuid());
645 }
646 
647 void
pci_cb_teardown(pci_t * pci_p)648 pci_cb_teardown(pci_t *pci_p)
649 {
650 	cb_t	*cb_p = pci_p->pci_cb_p;
651 	uint32_t mondo;
652 
653 	if (!pci_buserr_interrupt)
654 		return;
655 
656 	mondo = ((pci_p->pci_cb_p->cb_ign  << PCI_INO_BITS) |
657 	    pci_p->pci_inos[CBNINTR_BUS_ERROR]);
658 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
659 
660 	cb_disable_nintr(cb_p, CBNINTR_BUS_ERROR, IB_INTR_WAIT);
661 	VERIFY(rem_ivintr(mondo, pci_pil[CBNINTR_BUS_ERROR]) == 0);
662 }
663 
664 int
cb_register_intr(pci_t * pci_p)665 cb_register_intr(pci_t *pci_p)
666 {
667 	uint32_t mondo;
668 
669 	if (!pci_buserr_interrupt)
670 		return (DDI_SUCCESS);
671 
672 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
673 	    pci_p->pci_inos[CBNINTR_BUS_ERROR]);
674 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
675 
676 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_BUS_ERROR],
677 	    (intrfunc)cb_buserr_intr, (caddr_t)pci_p->pci_cb_p,
678 	    NULL, NULL) == 0);
679 
680 	return (PCI_ATTACH_RETCODE(PCI_CB_OBJ, PCI_OBJ_INTR_ADD, DDI_SUCCESS));
681 }
682 
683 void
cb_enable_intr(pci_t * pci_p)684 cb_enable_intr(pci_t *pci_p)
685 {
686 	if (pci_buserr_interrupt)
687 		cb_enable_nintr(pci_p, CBNINTR_BUS_ERROR);
688 }
689 
690 uint64_t
cb_ino_to_map_pa(cb_t * cb_p,ib_ino_t ino)691 cb_ino_to_map_pa(cb_t *cb_p, ib_ino_t ino)
692 {
693 	return (cb_p->cb_map_pa + (ino << 3));
694 }
695 
696 uint64_t
cb_ino_to_clr_pa(cb_t * cb_p,ib_ino_t ino)697 cb_ino_to_clr_pa(cb_t *cb_p, ib_ino_t ino)
698 {
699 	return (cb_p->cb_clr_pa + (ino << 3));
700 }
701 
702 /*
703  * Useful on psycho only.
704  */
705 int
cb_remove_xintr(pci_t * pci_p,dev_info_t * dip,dev_info_t * rdip,ib_ino_t ino,ib_mondo_t mondo)706 cb_remove_xintr(pci_t *pci_p, dev_info_t *dip, dev_info_t *rdip, ib_ino_t ino,
707     ib_mondo_t mondo)
708 {
709 	return (DDI_FAILURE);
710 }
711 
712 void
pbm_configure(pbm_t * pbm_p)713 pbm_configure(pbm_t *pbm_p)
714 {
715 	pci_t *pci_p = pbm_p->pbm_pci_p;
716 	dev_info_t *dip = pbm_p->pbm_pci_p->pci_dip;
717 	int instance = ddi_get_instance(dip);
718 	uint64_t l;
719 	uint64_t mask = 1ll << instance;
720 	ushort_t s = 0;
721 
722 	l = *pbm_p->pbm_ctrl_reg;	/* save control register state */
723 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
724 
725 	/*
726 	 * See if any SERR# signals are asserted.  We'll clear them later.
727 	 */
728 	if (l & COMMON_PCI_CTRL_SERR)
729 		cmn_err(CE_WARN, "%s%d: SERR asserted on pci bus\n",
730 		    ddi_driver_name(dip), instance);
731 
732 	/*
733 	 * Determine if PCI bus is running at 33 or 66 mhz.
734 	 */
735 	if (l & COMMON_PCI_CTRL_SPEED)
736 		pbm_p->pbm_speed = PBM_SPEED_66MHZ;
737 	else
738 		pbm_p->pbm_speed = PBM_SPEED_33MHZ;
739 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: %d mhz\n",
740 	    pbm_p->pbm_speed  == PBM_SPEED_66MHZ ? 66 : 33);
741 
742 	if (pci_set_dto_value & mask) {
743 		l &= ~(3ull << SCHIZO_PCI_CTRL_PTO_SHIFT);
744 		l |= pci_dto_value << SCHIZO_PCI_CTRL_PTO_SHIFT;
745 	} else if (PCI_CHIP_ID(pci_p) >= TOMATILLO_VER_21) {
746 		l |= (3ull << SCHIZO_PCI_CTRL_PTO_SHIFT);
747 	}
748 
749 	/*
750 	 * Enable error interrupts.
751 	 */
752 	if (pci_error_intr_enable & mask)
753 		l |= SCHIZO_PCI_CTRL_ERR_INT_EN;
754 	else
755 		l &= ~SCHIZO_PCI_CTRL_ERR_INT_EN;
756 
757 	/*
758 	 * Enable pci streaming byte errors and error interrupts.
759 	 */
760 	if (pci_sbh_error_intr_enable & mask)
761 		l |= SCHIZO_PCI_CTRL_SBH_INT_EN;
762 	else
763 		l &= ~SCHIZO_PCI_CTRL_SBH_INT_EN;
764 
765 	/*
766 	 * Enable pci discard timeout error interrupt.
767 	 */
768 	if (pci_mmu_error_intr_enable & mask)
769 		l |= SCHIZO_PCI_CTRL_MMU_INT_EN;
770 	else
771 		l &= ~SCHIZO_PCI_CTRL_MMU_INT_EN;
772 
773 	/*
774 	 * Enable PCI-X error interrupts.
775 	 */
776 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
777 
778 		if (xmits_error_intr_enable & mask)
779 			l |= XMITS_PCI_CTRL_X_ERRINT_EN;
780 		else
781 			l &= ~XMITS_PCI_CTRL_X_ERRINT_EN;
782 		/*
783 		 * Panic if older XMITS hardware is found.
784 		 */
785 		if (*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE)
786 			if (PCI_CHIP_ID(pci_p) <= XMITS_VER_10)
787 				cmn_err(CE_PANIC, "%s (%s): PCIX mode "
788 				"unsupported on XMITS version %d\n",
789 				    pbm_p->pbm_nameinst_str,
790 				    pbm_p->pbm_nameaddr_str, CHIP_VER(pci_p));
791 
792 		if (xmits_perr_recov_int_enable) {
793 			if (PCI_CHIP_ID(pci_p) >= XMITS_VER_30) {
794 				uint64_t pcix_err;
795 				/*
796 				 * Enable interrupt on PERR
797 				 */
798 				pcix_err = *pbm_p->pbm_pcix_err_stat_reg;
799 				pcix_err |= XMITS_PCIX_STAT_PERR_RECOV_INT_EN;
800 				pcix_err &= ~XMITS_PCIX_STAT_SERR_ON_PERR;
801 				*pbm_p->pbm_pcix_err_stat_reg = pcix_err;
802 			}
803 		}
804 
805 		/*
806 		 * Enable parity error detection on internal memories
807 		 */
808 		*pbm_p->pbm_pci_ped_ctrl = 0x3fff;
809 	}
810 
811 	/*
812 	 * Enable/disable bus parking.
813 	 */
814 	if ((pci_bus_parking_enable & mask) &&
815 	    !ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
816 	    "no-bus-parking"))
817 		l |= SCHIZO_PCI_CTRL_ARB_PARK;
818 	else
819 		l &= ~SCHIZO_PCI_CTRL_ARB_PARK;
820 
821 	/*
822 	 * Enable arbitration.
823 	 */
824 	l |= PCI_CHIP_ID(pci_p) == XMITS_VER_10 ? XMITS10_PCI_CTRL_ARB_EN_MASK :
825 	    SCHIZO_PCI_CTRL_ARB_EN_MASK;
826 
827 	/*
828 	 * Make sure SERR is clear
829 	 */
830 	l |= COMMON_PCI_CTRL_SERR;
831 
832 
833 	/*
834 	 * Enable DTO interrupt, if desired.
835 	 */
836 
837 	if (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_20 || (pci_dto_intr_enable &
838 	    mask))
839 		l |=	 (TOMATILLO_PCI_CTRL_DTO_INT_EN);
840 	else
841 		l &=	 ~(TOMATILLO_PCI_CTRL_DTO_INT_EN);
842 
843 	l |= TOMATILLO_PCI_CTRL_PEN_RD_MLTPL |
844 	    TOMATILLO_PCI_CTRL_PEN_RD_ONE |
845 	    TOMATILLO_PCI_CTRL_PEN_RD_LINE;
846 
847 	/*
848 	 * Now finally write the control register with the appropriate value.
849 	 */
850 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
851 	*pbm_p->pbm_ctrl_reg = l;
852 
853 	/*
854 	 * Enable IO Prefetch on Tomatillo
855 	 */
856 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
857 		volatile uint64_t *ioc_csr_p = pbm_p->pbm_ctrl_reg +
858 		    ((TOMATILLO_IOC_CSR_OFF -
859 		    SCHIZO_PCI_CTRL_REG_OFFSET) >> 3);
860 		*ioc_csr_p = TOMATILLO_WRT_PEN |
861 		    (1 << TOMATILLO_POFFSET_SHIFT) |
862 		    TOMATILLO_C_PEN_RD_MLTPL |
863 		    TOMATILLO_C_PEN_RD_ONE |
864 		    TOMATILLO_C_PEN_RD_LINE;
865 	}
866 
867 	/*
868 	 * Allow DMA write parity errors to generate an interrupt.
869 	 * This is implemented on Schizo 2.5 and greater and XMITS 3.0
870 	 * and greater.  Setting this on earlier versions of XMITS 3.0
871 	 * has no affect.
872 	 */
873 	if (((CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO) &&
874 	    PCI_CHIP_ID(pci_p) >= SCHIZO_VER_25) ||
875 	    (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS)) {
876 		volatile uint64_t *pbm_icd = pbm_p->pbm_ctrl_reg +
877 		    ((SCHIZO_PERF_PCI_ICD_OFFSET -
878 		    SCHIZO_PCI_CTRL_REG_OFFSET) >> 3);
879 
880 		*pbm_icd |= SCHIZO_PERF_PCI_ICD_DMAW_PARITY_INT_ENABLE;
881 	}
882 
883 	/*
884 	 * Clear any PBM errors.
885 	 */
886 	l = (SCHIZO_PCI_AFSR_E_MASK << SCHIZO_PCI_AFSR_PE_SHIFT) |
887 	    (SCHIZO_PCI_AFSR_E_MASK << SCHIZO_PCI_AFSR_SE_SHIFT);
888 	*pbm_p->pbm_async_flt_status_reg = l;
889 
890 	/*
891 	 * Allow the diag register to be set based upon variable that
892 	 * can be configured via /etc/system.
893 	 */
894 	l = *pbm_p->pbm_diag_reg;
895 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
896 
897 	/*
898 	 * Enable/disable retry limit.
899 	 */
900 	if (pci_retry_disable & mask)
901 		l |= COMMON_PCI_DIAG_DIS_RETRY;
902 	else
903 		l &= ~COMMON_PCI_DIAG_DIS_RETRY;
904 
905 	/*
906 	 * Enable/disable DMA write/interrupt synchronization.
907 	 */
908 	if (pci_intsync_disable & mask)
909 		l |= COMMON_PCI_DIAG_DIS_INTSYNC;
910 	else
911 		l &= ~COMMON_PCI_DIAG_DIS_INTSYNC;
912 
913 	/*
914 	 * Enable/disable retry arbitration priority.
915 	 */
916 	if (pci_enable_retry_arb & mask)
917 		l &= ~SCHIZO_PCI_DIAG_DIS_RTRY_ARB;
918 	else
919 		l |= SCHIZO_PCI_DIAG_DIS_RTRY_ARB;
920 
921 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
922 	*pbm_p->pbm_diag_reg = l;
923 
924 	/*
925 	 * Enable SERR# and parity reporting via command register.
926 	 */
927 	s = pci_perr_enable & mask ? PCI_COMM_PARITY_DETECT : 0;
928 	s |= pci_serr_enable & mask ? PCI_COMM_SERR_ENABLE : 0;
929 
930 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf command reg=%x\n", s);
931 	pbm_p->pbm_config_header->ch_command_reg = s;
932 
933 	/*
934 	 * Clear error bits in configuration status register.
935 	 */
936 	s = PCI_STAT_PERROR | PCI_STAT_S_PERROR |
937 	    PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB |
938 	    PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR;
939 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf status reg=%x\n", s);
940 	pbm_p->pbm_config_header->ch_status_reg = s;
941 
942 	/*
943 	 * The current versions of the obp are suppose to set the latency
944 	 * timer register but do not.  Bug 1234181 is open against this
945 	 * problem.  Until this bug is fixed we check to see if the obp
946 	 * has attempted to set the latency timer register by checking
947 	 * for the existence of a "latency-timer" property.
948 	 */
949 	if (pci_set_latency_timer_register) {
950 		DEBUG1(DBG_ATTACH, dip,
951 		    "pbm_configure: set schizo latency timer to %x\n",
952 		    pci_latency_timer);
953 		pbm_p->pbm_config_header->ch_latency_timer_reg =
954 		    pci_latency_timer;
955 	}
956 
957 	(void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer",
958 	    (int)pbm_p->pbm_config_header->ch_latency_timer_reg);
959 
960 	/*
961 	 * Adjust xmits_upper_retry_counter if set in /etc/system
962 	 *
963 	 * NOTE: current implementation resets UPPR_RTRY counter for
964 	 * _all_ XMITS' PBMs and does not support tuning per PBM.
965 	 */
966 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
967 		uint_t xurc = xmits_upper_retry_counter &
968 		    XMITS_UPPER_RETRY_MASK;
969 
970 		if (xurc) {
971 			*pbm_p->pbm_upper_retry_counter_reg = (uint64_t)xurc;
972 			DEBUG1(DBG_ATTACH, dip, "pbm_configure: Setting XMITS"
973 			    " uppr_rtry counter = 0x%lx\n",
974 			    *pbm_p->pbm_upper_retry_counter_reg);
975 		}
976 	}
977 }
978 
979 uint_t
pbm_disable_pci_errors(pbm_t * pbm_p)980 pbm_disable_pci_errors(pbm_t *pbm_p)
981 {
982 	pci_t *pci_p = pbm_p->pbm_pci_p;
983 	ib_t *ib_p = pci_p->pci_ib_p;
984 
985 	/*
986 	 * Disable error and streaming byte hole interrupts via the
987 	 * PBM control register.
988 	 */
989 	*pbm_p->pbm_ctrl_reg &=
990 	    ~(SCHIZO_PCI_CTRL_ERR_INT_EN | SCHIZO_PCI_CTRL_SBH_INT_EN |
991 	    SCHIZO_PCI_CTRL_MMU_INT_EN);
992 
993 	/*
994 	 * Disable error interrupts via the interrupt mapping register.
995 	 */
996 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_PBM], IB_INTR_NOWAIT);
997 	return (BF_NONE);
998 }
999 
1000 /*
1001  * Layout of the dvma context bucket bitmap entry:
1002  *
1003  *	63 - 56		55 - 0
1004  *	8-bit lock	56-bit, each represent one context
1005  *	DCB_LOCK_BITS	DCB_BMAP_BITS
1006  */
1007 #define	DCB_LOCK_BITS	8
1008 #define	DCB_BMAP_BITS	(64 - DCB_LOCK_BITS)
1009 
1010 dvma_context_t
pci_iommu_get_dvma_context(iommu_t * iommu_p,dvma_addr_t dvma_pg_index)1011 pci_iommu_get_dvma_context(iommu_t *iommu_p, dvma_addr_t dvma_pg_index)
1012 {
1013 	dvma_context_t ctx;
1014 	int i = (dvma_pg_index >> 6) & 0x1f;	/* 5 bit index within bucket */
1015 	uint64_t ctx_mask, test = 1ull << i;
1016 	uint32_t bucket_no = dvma_pg_index & 0x3f;
1017 	uint64_t *bucket_ptr = iommu_p->iommu_ctx_bitmap + bucket_no;
1018 
1019 	uint32_t spl = ddi_enter_critical();	/* block interrupts */
1020 	if (ldstub((uint8_t *)bucket_ptr)) {	/* try lock */
1021 		ddi_exit_critical(spl);		/* unblock interrupt */
1022 		pci_iommu_ctx_lock_failure++;
1023 		return (0);
1024 	}
1025 
1026 	/* clear lock bits */
1027 	ctx_mask = (*bucket_ptr << DCB_LOCK_BITS) >> DCB_LOCK_BITS;
1028 	ASSERT(*bucket_ptr >> DCB_BMAP_BITS == 0xff);
1029 	ASSERT(ctx_mask >> DCB_BMAP_BITS == 0);
1030 
1031 	if (ctx_mask & test)			/* quick check i bit */
1032 		for (i = 0, test = 1ull; test & ctx_mask; test <<= 1, i++)
1033 			;
1034 	if (i < DCB_BMAP_BITS)
1035 		ctx_mask |= test;
1036 	*bucket_ptr = ctx_mask;			/* unlock */
1037 	ddi_exit_critical(spl);			/* unblock interrupts */
1038 
1039 	ctx = i < DCB_BMAP_BITS ? (bucket_no << 6) | i : 0;
1040 	DEBUG3(DBG_DMA_MAP, iommu_p->iommu_pci_p->pci_dip,
1041 	    "get_dvma_context: ctx_mask=0x%x.%x ctx=0x%x\n",
1042 	    (uint32_t)(ctx_mask >> 32), (uint32_t)ctx_mask, ctx);
1043 	return (ctx);
1044 }
1045 
1046 void
pci_iommu_free_dvma_context(iommu_t * iommu_p,dvma_context_t ctx)1047 pci_iommu_free_dvma_context(iommu_t *iommu_p, dvma_context_t ctx)
1048 {
1049 	uint64_t ctx_mask;
1050 	uint32_t spl, bucket_no = ctx >> 6;
1051 	int bit_no = ctx & 0x3f;
1052 	uint64_t *bucket_ptr = iommu_p->iommu_ctx_bitmap + bucket_no;
1053 
1054 	DEBUG1(DBG_DMA_MAP, iommu_p->iommu_pci_p->pci_dip,
1055 	    "free_dvma_context: ctx=0x%x\n", ctx);
1056 
1057 	spl = ddi_enter_critical();			/* block interrupts */
1058 	while (ldstub((uint8_t *)bucket_ptr))		/* spin lock */
1059 		;
1060 	ctx_mask = (*bucket_ptr << DCB_LOCK_BITS) >> DCB_LOCK_BITS;
1061 							/* clear lock bits */
1062 	ASSERT(ctx_mask & (1ull << bit_no));
1063 	*bucket_ptr = ctx_mask ^ (1ull << bit_no);	/* clear & unlock */
1064 	ddi_exit_critical(spl);				/* unblock interrupt */
1065 }
1066 
1067 int
pci_sc_ctx_inv(dev_info_t * dip,sc_t * sc_p,ddi_dma_impl_t * mp)1068 pci_sc_ctx_inv(dev_info_t *dip, sc_t *sc_p, ddi_dma_impl_t *mp)
1069 {
1070 	dvma_context_t ctx = MP2CTX(mp);
1071 	volatile uint64_t *reg_addr = sc_p->sc_ctx_match_reg + ctx;
1072 	uint64_t matchreg;
1073 
1074 	if (!*reg_addr) {
1075 		DEBUG1(DBG_SC, dip, "ctx=%x no match\n", ctx);
1076 		return (DDI_SUCCESS);
1077 	}
1078 
1079 	*sc_p->sc_ctx_invl_reg = ctx;	/* 1st flush write */
1080 	matchreg = *reg_addr;		/* re-fetch after 1st flush */
1081 	if (!matchreg)
1082 		return (DDI_SUCCESS);
1083 
1084 	matchreg = (matchreg << SC_ENT_SHIFT) >> SC_ENT_SHIFT;	/* low 16-bit */
1085 	do {
1086 		if (matchreg & 1)
1087 			*sc_p->sc_ctx_invl_reg = ctx;
1088 		matchreg >>= 1;
1089 	} while (matchreg);
1090 
1091 	if (pci_ctx_no_compat || !*reg_addr)	/* compat: active ctx flush */
1092 		return (DDI_SUCCESS);
1093 
1094 	pci_ctx_unsuccess_count++;
1095 	if (pci_ctx_flush_warn)
1096 		cmn_err(pci_ctx_flush_warn, "%s%d: ctx flush unsuccessful\n",
1097 		    NAMEINST(dip));
1098 	return (DDI_FAILURE);
1099 }
1100 
1101 void
pci_cb_setup(pci_t * pci_p)1102 pci_cb_setup(pci_t *pci_p)
1103 {
1104 	dev_info_t *dip = pci_p->pci_dip;
1105 	cb_t *cb_p = pci_p->pci_cb_p;
1106 	uint64_t pa;
1107 	uint32_t chip_id = PCI_CHIP_ID(pci_p);
1108 	DEBUG1(DBG_ATTACH, dip, "cb_create: chip id %d\n", chip_id);
1109 
1110 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
1111 		if ((!tm_mtlb_gc_manual) &&
1112 		    (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_24))
1113 			tm_mtlb_gc = 1;
1114 
1115 		if (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_23) {
1116 			/* Workaround for the Tomatillo ASIC Erratum #72 */
1117 			ignore_invalid_vecintr = 1;
1118 			tomatillo_store_store_wrka = 1;
1119 			tomatillo_disallow_bypass = 1;
1120 			if (pci_spurintr_msgs == PCI_SPURINTR_MSG_DEFAULT)
1121 				pci_spurintr_msgs = 0;
1122 		}
1123 	}
1124 
1125 	if (chip_id == TOMATILLO_VER_20 || chip_id == TOMATILLO_VER_21)
1126 		cmn_err(CE_WARN, "Unsupported Tomatillo rev (%x)", chip_id);
1127 
1128 	if (chip_id < SCHIZO_VER_23)
1129 		pci_ctx_no_active_flush = 1;
1130 
1131 	cb_p->cb_node_id = PCI_ID_TO_NODEID(pci_p->pci_id);
1132 	cb_p->cb_ign	 = PCI_ID_TO_IGN(pci_p->pci_id);
1133 
1134 	/*
1135 	 * schizo control status reg bank is on the 2nd "reg" property entry
1136 	 * interrupt mapping/clear/state regs are on the 1st "reg" entry.
1137 	 *
1138 	 * ALL internal interrupts except pbm interrupts are shared by both
1139 	 * sides, 1st-side-attached is used as *the* owner.
1140 	 */
1141 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[1]);
1142 	cb_p->cb_base_pa = pa << MMU_PAGESHIFT;
1143 
1144 	pa = pci_p->pci_address[3] ?
1145 	    (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[3]) : 0;
1146 	cb_p->cb_icbase_pa = (pa == PFN_INVALID) ? 0 : pa << MMU_PAGESHIFT;
1147 
1148 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[0])
1149 	    << MMU_PAGESHIFT;
1150 	cb_p->cb_map_pa = pa + SCHIZO_IB_INTR_MAP_REG_OFFSET;
1151 	cb_p->cb_clr_pa = pa + SCHIZO_IB_CLEAR_INTR_REG_OFFSET;
1152 	cb_p->cb_obsta_pa = pa + COMMON_IB_OBIO_INTR_STATE_DIAG_REG;
1153 }
1154 
1155 void
pci_ecc_setup(ecc_t * ecc_p)1156 pci_ecc_setup(ecc_t *ecc_p)
1157 {
1158 	ecc_p->ecc_ue.ecc_errpndg_mask = SCHIZO_ECC_UE_AFSR_ERRPNDG;
1159 	ecc_p->ecc_ue.ecc_offset_mask = SCHIZO_ECC_UE_AFSR_QW_OFFSET;
1160 	ecc_p->ecc_ue.ecc_offset_shift = SCHIZO_ECC_UE_AFSR_QW_OFFSET_SHIFT;
1161 	ecc_p->ecc_ue.ecc_size_log2 = 4;
1162 
1163 	ecc_p->ecc_ce.ecc_errpndg_mask = SCHIZO_ECC_CE_AFSR_ERRPNDG;
1164 	ecc_p->ecc_ce.ecc_offset_mask = SCHIZO_ECC_CE_AFSR_QW_OFFSET;
1165 	ecc_p->ecc_ce.ecc_offset_shift = SCHIZO_ECC_CE_AFSR_QW_OFFSET_SHIFT;
1166 	ecc_p->ecc_ce.ecc_size_log2 = 4;
1167 }
1168 
1169 ushort_t
pci_ecc_get_synd(uint64_t afsr)1170 pci_ecc_get_synd(uint64_t afsr)
1171 {
1172 	return ((ushort_t)((afsr & SCHIZO_ECC_CE_AFSR_SYND) >>
1173 	    SCHIZO_ECC_CE_AFSR_SYND_SHIFT));
1174 }
1175 
1176 /*
1177  * overwrite dvma end address (only on virtual-dma systems)
1178  * initialize tsb size
1179  * reset context bits
1180  * return: IOMMU CSR bank base address (VA)
1181  */
1182 
1183 uintptr_t
pci_iommu_setup(iommu_t * iommu_p)1184 pci_iommu_setup(iommu_t *iommu_p)
1185 {
1186 	pci_dvma_range_prop_t *dvma_prop;
1187 	int dvma_prop_len;
1188 
1189 	uintptr_t a;
1190 	pci_t *pci_p = iommu_p->iommu_pci_p;
1191 	dev_info_t *dip = pci_p->pci_dip;
1192 	uint_t tsb_size = iommu_tsb_cookie_to_size(pci_p->pci_tsb_cookie);
1193 	uint_t tsb_size_prop;
1194 
1195 	/*
1196 	 * Initializations for Tomatillo's micro TLB bug. errata #82
1197 	 */
1198 	if (tm_mtlb_gc) {
1199 		iommu_p->iommu_mtlb_nreq = 0;
1200 		iommu_p->iommu_mtlb_npgs = 0;
1201 		iommu_p->iommu_mtlb_maxpgs = tm_mtlb_maxpgs;
1202 		iommu_p->iommu_mtlb_req_p = (dvma_unbind_req_t *)
1203 		    kmem_zalloc(sizeof (dvma_unbind_req_t) *
1204 		    (tm_mtlb_maxpgs + 1), KM_SLEEP);
1205 		mutex_init(&iommu_p->iommu_mtlb_lock, NULL, MUTEX_DRIVER, NULL);
1206 	}
1207 
1208 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1209 	    "virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
1210 	    DDI_PROP_SUCCESS)
1211 		goto tsb_done;
1212 
1213 	if (dvma_prop_len != sizeof (pci_dvma_range_prop_t)) {
1214 		cmn_err(CE_WARN, "%s%d: invalid virtual-dma property",
1215 		    ddi_driver_name(dip), ddi_get_instance(dip));
1216 		goto tsb_end;
1217 	}
1218 	iommu_p->iommu_dvma_end = dvma_prop->dvma_base +
1219 	    (dvma_prop->dvma_len - 1);
1220 	tsb_size_prop = IOMMU_BTOP(dvma_prop->dvma_len) * sizeof (uint64_t);
1221 	tsb_size = MIN(tsb_size_prop, tsb_size);
1222 tsb_end:
1223 	kmem_free(dvma_prop, dvma_prop_len);
1224 tsb_done:
1225 	iommu_p->iommu_tsb_size = iommu_tsb_size_encode(tsb_size);
1226 	iommu_p->iommu_ctx_bitmap =
1227 	    kmem_zalloc(IOMMU_CTX_BITMAP_SIZE, KM_SLEEP);
1228 	*iommu_p->iommu_ctx_bitmap = 1ull;	/* reserve context 0 */
1229 
1230 	/*
1231 	 * Determine the virtual address of the register block
1232 	 * containing the iommu control registers and determine
1233 	 * the virtual address of schizo specific iommu registers.
1234 	 */
1235 	a = (uintptr_t)pci_p->pci_address[0];
1236 	iommu_p->iommu_flush_ctx_reg =
1237 	    (uint64_t *)(a + SCHIZO_IOMMU_FLUSH_CTX_REG_OFFSET);
1238 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO)
1239 		iommu_p->iommu_tfar_reg =
1240 		    (uint64_t *)(a + TOMATILLO_IOMMU_ERR_TFAR_OFFSET);
1241 	return (a);	/* PCICSRBase */
1242 }
1243 
1244 void
pci_iommu_teardown(iommu_t * iommu_p)1245 pci_iommu_teardown(iommu_t *iommu_p)
1246 {
1247 	if (pci_use_contexts)
1248 		iommu_ctx_free(iommu_p);
1249 	if (iommu_p->iommu_mtlb_req_p) {
1250 		kmem_free(iommu_p->iommu_mtlb_req_p,
1251 		    sizeof (dvma_unbind_req_t) * (tm_mtlb_maxpgs + 1));
1252 		mutex_destroy(&iommu_p->iommu_mtlb_lock);
1253 		iommu_p->iommu_mtlb_req_p = NULL;
1254 		iommu_p->iommu_mtlb_nreq = 0;
1255 		iommu_p->iommu_mtlb_npgs = iommu_p->iommu_mtlb_maxpgs = 0;
1256 	}
1257 }
1258 
1259 uintptr_t
get_pbm_reg_base(pci_t * pci_p)1260 get_pbm_reg_base(pci_t *pci_p)
1261 {
1262 	return ((uintptr_t)
1263 	    (pci_p->pci_address[0] + SCHIZO_PCI_CTRL_REG_OFFSET));
1264 }
1265 
1266 /* ARGSUSED */
1267 static boolean_t
pci_pbm_panic_callb(void * arg,int code)1268 pci_pbm_panic_callb(void *arg, int code)
1269 {
1270 	pbm_t *pbm_p = (pbm_t *)arg;
1271 	volatile uint64_t *ctrl_reg_p;
1272 
1273 	if (pbm_p->pbm_quiesce_count > 0) {
1274 		ctrl_reg_p = pbm_p->pbm_ctrl_reg;
1275 		*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
1276 	}
1277 
1278 	return (B_TRUE);
1279 }
1280 
1281 static boolean_t
pci_pbm_debug_callb(void * arg,int code)1282 pci_pbm_debug_callb(void *arg, int code)
1283 {
1284 	pbm_t *pbm_p = (pbm_t *)arg;
1285 	volatile uint64_t *ctrl_reg_p;
1286 	uint64_t ctrl_reg;
1287 
1288 	if (pbm_p->pbm_quiesce_count > 0) {
1289 		ctrl_reg_p = pbm_p->pbm_ctrl_reg;
1290 		if (code == 0) {
1291 			*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
1292 		} else {
1293 			ctrl_reg = pbm_p->pbm_saved_ctrl_reg;
1294 			ctrl_reg &= ~(SCHIZO_PCI_CTRL_ARB_EN_MASK |
1295 			    SCHIZO_PCI_CTRL_ARB_PARK);
1296 			*ctrl_reg_p = ctrl_reg;
1297 		}
1298 	}
1299 
1300 	return (B_TRUE);
1301 }
1302 
1303 void
pci_pbm_setup(pbm_t * pbm_p)1304 pci_pbm_setup(pbm_t *pbm_p)
1305 {
1306 	pci_t *pci_p = pbm_p->pbm_pci_p;
1307 	caddr_t a = pci_p->pci_address[0]; /* PBM block base VA */
1308 	uint64_t pa = va_to_pa(a);
1309 	extern int segkmem_reloc;
1310 
1311 	mutex_init(&pbm_p->pbm_sync_mutex, NULL, MUTEX_DRIVER,
1312 	    (void *)ipltospl(XCALL_PIL));
1313 
1314 	pbm_p->pbm_config_header = (config_header_t *)pci_p->pci_address[2];
1315 	pbm_p->pbm_ctrl_reg = (uint64_t *)(a + SCHIZO_PCI_CTRL_REG_OFFSET);
1316 	pbm_p->pbm_diag_reg = (uint64_t *)(a + SCHIZO_PCI_DIAG_REG_OFFSET);
1317 	pbm_p->pbm_async_flt_status_reg =
1318 	    (uint64_t *)(a + SCHIZO_PCI_ASYNC_FLT_STATUS_REG_OFFSET);
1319 	pbm_p->pbm_async_flt_addr_reg =
1320 	    (uint64_t *)(a + SCHIZO_PCI_ASYNC_FLT_ADDR_REG_OFFSET);
1321 	pbm_p->pbm_estar_reg = (uint64_t *)(a + SCHIZO_PCI_ESTAR_REG_OFFSET);
1322 	pbm_p->pbm_pcix_err_stat_reg = (uint64_t *)(a +
1323 	    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
1324 	pbm_p->pbm_pci_ped_ctrl = (uint64_t *)(a +
1325 	    XMITS_PARITY_DETECT_REG_OFFSET);
1326 
1327 	/*
1328 	 * Create a property to indicate that this node supports DVMA
1329 	 * page relocation.
1330 	 */
1331 	if (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO && segkmem_reloc != 0) {
1332 		pci_dvma_remap_enabled = 1;
1333 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE,
1334 		    pci_p->pci_dip, "dvma-remap-supported");
1335 	}
1336 
1337 	/*
1338 	 * Register a panic callback so we can unquiesce this bus
1339 	 * if it has been placed in the quiesced state.
1340 	 */
1341 	pbm_p->pbm_panic_cb_id = callb_add(pci_pbm_panic_callb,
1342 	    (void *)pbm_p, CB_CL_PANIC, "pci_panic");
1343 	pbm_p->pbm_debug_cb_id = callb_add(pci_pbm_panic_callb,
1344 	    (void *)pbm_p, CB_CL_ENTER_DEBUGGER, "pci_debug_enter");
1345 
1346 	if (CHIP_TYPE(pci_p) != PCI_CHIP_SCHIZO)
1347 		goto non_schizo;
1348 
1349 	if (PCI_CHIP_ID(pci_p) >= SCHIZO_VER_23) {
1350 
1351 		pbm_p->pbm_sync_reg_pa = pa + SCHIZO_PBM_DMA_SYNC_REG_OFFSET;
1352 
1353 		/*
1354 		 * This is a software workaround to fix schizo hardware bug.
1355 		 * Create a boolean property and its existence means consistent
1356 		 * dma sync should not be done while in prom. The usb polled
1357 		 * code (OHCI,EHCI) will check for this property and will not
1358 		 * do dma sync if this property exist.
1359 		 */
1360 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE,
1361 		    pci_p->pci_dip, "no-prom-cdma-sync");
1362 	}
1363 	return;
1364 non_schizo:
1365 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
1366 		pci_dvma_sync_before_unmap = 1;
1367 		pa = pci_p->pci_cb_p->cb_icbase_pa;
1368 	}
1369 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS)
1370 		pbm_p->pbm_upper_retry_counter_reg =
1371 		    (uint64_t *)(a + XMITS_UPPER_RETRY_COUNTER_REG_OFFSET);
1372 
1373 	pbm_p->pbm_sync_reg_pa = pa + PBM_DMA_SYNC_PEND_REG_OFFSET;
1374 }
1375 
1376 void
pci_pbm_teardown(pbm_t * pbm_p)1377 pci_pbm_teardown(pbm_t *pbm_p)
1378 {
1379 	(void) callb_delete(pbm_p->pbm_panic_cb_id);
1380 	(void) callb_delete(pbm_p->pbm_debug_cb_id);
1381 }
1382 
1383 uintptr_t
pci_ib_setup(ib_t * ib_p)1384 pci_ib_setup(ib_t *ib_p)
1385 {
1386 	/*
1387 	 * Determine virtual addresses of bridge specific registers,
1388 	 */
1389 	pci_t *pci_p = ib_p->ib_pci_p;
1390 	uintptr_t a = (uintptr_t)pci_p->pci_address[0];
1391 
1392 	ib_p->ib_ign = PCI_ID_TO_IGN(pci_p->pci_id);
1393 	ib_p->ib_max_ino = SCHIZO_MAX_INO;
1394 	ib_p->ib_slot_intr_map_regs = a + SCHIZO_IB_SLOT_INTR_MAP_REG_OFFSET;
1395 	ib_p->ib_intr_map_regs = a + SCHIZO_IB_INTR_MAP_REG_OFFSET;
1396 	ib_p->ib_slot_clear_intr_regs = a + SCHIZO_IB_CLEAR_INTR_REG_OFFSET;
1397 	return (a);
1398 }
1399 
1400 void
pci_sc_setup(sc_t * sc_p)1401 pci_sc_setup(sc_t *sc_p)
1402 {
1403 	pci_t *pci_p = sc_p->sc_pci_p;
1404 	uintptr_t a;
1405 
1406 	/*
1407 	 * Determine the virtual addresses of the stream cache
1408 	 * control/status and flush registers.
1409 	 */
1410 	a = (uintptr_t)pci_p->pci_address[0];	/* PCICSRBase */
1411 	sc_p->sc_ctrl_reg = (uint64_t *)(a + SCHIZO_SC_CTRL_REG_OFFSET);
1412 	sc_p->sc_invl_reg = (uint64_t *)(a + SCHIZO_SC_INVL_REG_OFFSET);
1413 	sc_p->sc_sync_reg = (uint64_t *)(a + SCHIZO_SC_SYNC_REG_OFFSET);
1414 	sc_p->sc_ctx_invl_reg = (uint64_t *)(a + SCHIZO_SC_CTX_INVL_REG_OFFSET);
1415 	sc_p->sc_ctx_match_reg =
1416 	    (uint64_t *)(a + SCHIZO_SC_CTX_MATCH_REG_OFFSET);
1417 
1418 	/*
1419 	 * Determine the virtual addresses of the streaming cache
1420 	 * diagnostic access registers.
1421 	 */
1422 	sc_p->sc_data_diag_acc = (uint64_t *)(a + SCHIZO_SC_DATA_DIAG_OFFSET);
1423 	sc_p->sc_tag_diag_acc = (uint64_t *)(a + SCHIZO_SC_TAG_DIAG_OFFSET);
1424 	sc_p->sc_ltag_diag_acc = (uint64_t *)(a + SCHIZO_SC_LTAG_DIAG_OFFSET);
1425 }
1426 
1427 /*ARGSUSED*/
1428 int
pci_get_numproxy(dev_info_t * dip)1429 pci_get_numproxy(dev_info_t *dip)
1430 {
1431 	/*
1432 	 * Schizo does not support interrupt proxies.
1433 	 */
1434 	return (0);
1435 }
1436 
1437 /*
1438  * pcisch error handling 101:
1439  *
1440  * The various functions below are responsible for error handling. Given
1441  * a particular error, they must gather the appropriate state, report all
1442  * errors with correct payload, and attempt recovery where ever possible.
1443  *
1444  * Recovery in the context of this driver is being able notify a leaf device
1445  * of the failed transaction. This leaf device may either be the master or
1446  * target for this transaction and may have already received an error
1447  * notification via a PCI interrupt. Notification is done via DMA and access
1448  * handles. If we capture an address for the transaction then we can map it
1449  * to a handle(if the leaf device is fma-compliant) and fault the handle as
1450  * well as call the device driver registered callback.
1451  *
1452  * The hardware can either interrupt or trap upon detection of an error, in
1453  * some rare cases it also causes a fatal reset.
1454  *
1455  * cb_buserr_intr() is responsible for handling control block
1456  * errors(errors which stem from the host bus side of the bridge). Since
1457  * we support multiple chips and host bus standards, cb_buserr_intr will
1458  * call a bus specific error handler to report and handle the detected
1459  * error. Since this error can either affect or orginate from either of the
1460  * two PCI busses which are connected to the bridge, we need to call
1461  * pci_pbm_err_handler() for each bus as well to report their errors. We
1462  * also need to gather possible errors which have been detected by their
1463  * compliant children(via ndi_fm_handler_dispatch()).
1464  *
1465  * pbm_error_intr() and ecc_intr() are responsible for PCI Block Module
1466  * errors(generic PCI + bridge specific) and ECC errors, respectively. They
1467  * are common between pcisch and pcipsy and therefore exist in pci_pbm.c and
1468  * pci_ecc.c. To support error handling certain chip specific handlers
1469  * must exist and they are defined below.
1470  *
1471  * cpu_deferred_error() and cpu_async_error(), handle the traps that may
1472  * have originated from IO space. They call into the registered IO callbacks
1473  * to report and handle errors that may have caused the trap.
1474  *
1475  * pci_pbm_err_handler() is called by pbm_error_intr() or pci_err_callback()
1476  * (generic fma callback for pcipsy/pcisch, pci_fm.c). pci_err_callback() is
1477  * called when the CPU has trapped because of a possible IO error(TO/BERR/UE).
1478  * It will call pci_pbm_err_handler() to report and handle all PCI/PBM/IOMMU
1479  * related errors which are detected by the chip.
1480  *
1481  * pci_pbm_err_handler() calls a generic interface pbm_afsr_report()(pci_pbm.c)
1482  * to report the pbm specific errors and attempt to map the failed address
1483  * (if captured) to a device instance. pbm_afsr_report() calls a chip specific
1484  * interface to interpret the afsr bits pci_pbm_classify()(pcisch.c/pcipsy.c).
1485  * pci_pbm_err_handler() also calls iommu_err_handler() to handle IOMMU related
1486  * errors.
1487  *
1488  * iommu_err_handler() can recover from most errors, as long as the requesting
1489  * device is notified and the iommu can be flushed. If an IOMMU error occurs
1490  * due to a UE then it will be passed on to the ecc_err_handler() for
1491  * subsequent handling.
1492  *
1493  * ecc_err_handler()(pci_ecc.c) also calls a chip specific interface to
1494  * interpret the afsr, pci_ecc_classify(). ecc_err_handler() also calls
1495  * pci_pbm_err_handler() to report any pbm errors detected.
1496  *
1497  * To make sure that the trap code and the interrupt code are not going
1498  * to step on each others toes we have a per chip pci_fm_mutex. This also
1499  * makes it necessary for us to be caution while we are at a high PIL, so
1500  * that we do not cause a subsequent trap that causes us to hang.
1501  *
1502  * The attempt to commonize code was meant to keep in line with the current
1503  * pci driver implementation and it was not meant to confuse. If you are
1504  * confused then don't worry, I was too.
1505  *
1506  */
1507 static void
pci_cb_errstate_get(cb_t * cb_p,cb_errstate_t * cb_err_p)1508 pci_cb_errstate_get(cb_t *cb_p, cb_errstate_t *cb_err_p)
1509 {
1510 	uint64_t pa = cb_p->cb_base_pa;
1511 	int	i;
1512 
1513 	bzero(cb_err_p, sizeof (cb_errstate_t));
1514 
1515 	ASSERT(MUTEX_HELD(&cb_p->cb_pci_cmn_p->pci_fm_mutex));
1516 
1517 	cb_err_p->cb_bridge_type = PCI_BRIDGE_TYPE(cb_p->cb_pci_cmn_p);
1518 
1519 	cb_err_p->cb_csr = lddphysio(pa + SCHIZO_CB_CSR_OFFSET);
1520 	cb_err_p->cb_err = lddphysio(pa + SCHIZO_CB_ERRCTRL_OFFSET);
1521 	cb_err_p->cb_intr = lddphysio(pa + SCHIZO_CB_INTCTRL_OFFSET);
1522 	cb_err_p->cb_elog = lddphysio(pa + SCHIZO_CB_ERRLOG_OFFSET);
1523 	cb_err_p->cb_ecc = lddphysio(pa + SCHIZO_CB_ECCCTRL_OFFSET);
1524 	cb_err_p->cb_ue_afsr = lddphysio(pa + SCHIZO_CB_UEAFSR_OFFSET);
1525 	cb_err_p->cb_ue_afar = lddphysio(pa + SCHIZO_CB_UEAFAR_OFFSET);
1526 	cb_err_p->cb_ce_afsr = lddphysio(pa + SCHIZO_CB_CEAFSR_OFFSET);
1527 	cb_err_p->cb_ce_afar = lddphysio(pa + SCHIZO_CB_CEAFAR_OFFSET);
1528 
1529 	if ((CB_CHIP_TYPE((cb_t *)cb_p)) == PCI_CHIP_XMITS) {
1530 		cb_err_p->cb_first_elog = lddphysio(pa +
1531 		    XMITS_CB_FIRST_ERROR_LOG);
1532 		cb_err_p->cb_first_eaddr = lddphysio(pa +
1533 		    XMITS_CB_FIRST_ERROR_ADDR);
1534 		cb_err_p->cb_leaf_status = lddphysio(pa +
1535 		    XMITS_CB_FIRST_ERROR_ADDR);
1536 	}
1537 
1538 	/* Gather PBM state information for both sides of this chip */
1539 	for (i = 0; i < 2; i++) {
1540 		if (cb_p->cb_pci_cmn_p->pci_p[i] == NULL)
1541 			continue;
1542 		pci_pbm_errstate_get(((cb_t *)cb_p)->cb_pci_cmn_p->
1543 		    pci_p[i], &cb_err_p->cb_pbm[i]);
1544 	}
1545 }
1546 
1547 static void
pci_cb_clear_error(cb_t * cb_p,cb_errstate_t * cb_err_p)1548 pci_cb_clear_error(cb_t *cb_p, cb_errstate_t *cb_err_p)
1549 {
1550 	uint64_t pa = ((cb_t *)cb_p)->cb_base_pa;
1551 
1552 	stdphysio(pa + SCHIZO_CB_ERRLOG_OFFSET, cb_err_p->cb_elog);
1553 }
1554 
1555 static cb_fm_err_t safari_err_tbl[] = {
1556 	SAFARI_BAD_CMD,		SCHIZO_CB_ELOG_BAD_CMD,		CB_FATAL,
1557 	SAFARI_SSM_DIS,		SCHIZO_CB_ELOG_SSM_DIS,		CB_FATAL,
1558 	SAFARI_BAD_CMD_PCIA,	SCHIZO_CB_ELOG_BAD_CMD_PCIA,	CB_FATAL,
1559 	SAFARI_BAD_CMD_PCIB,	SCHIZO_CB_ELOG_BAD_CMD_PCIB,	CB_FATAL,
1560 	SAFARI_PAR_ERR_INT_PCIB, XMITS_CB_ELOG_PAR_ERR_INT_PCIB, CB_FATAL,
1561 	SAFARI_PAR_ERR_INT_PCIA, XMITS_CB_ELOG_PAR_ERR_INT_PCIA, CB_FATAL,
1562 	SAFARI_PAR_ERR_INT_SAF,	XMITS_CB_ELOG_PAR_ERR_INT_SAF,	CB_FATAL,
1563 	SAFARI_PLL_ERR_PCIB,	XMITS_CB_ELOG_PLL_ERR_PCIB,	CB_FATAL,
1564 	SAFARI_PLL_ERR_PCIA,	XMITS_CB_ELOG_PLL_ERR_PCIA,	CB_FATAL,
1565 	SAFARI_PLL_ERR_SAF,	XMITS_CB_ELOG_PLL_ERR_SAF,	CB_FATAL,
1566 	SAFARI_SAF_CIQ_TO,	SCHIZO_CB_ELOG_SAF_CIQ_TO,	CB_FATAL,
1567 	SAFARI_SAF_LPQ_TO,	SCHIZO_CB_ELOG_SAF_LPQ_TO,	CB_FATAL,
1568 	SAFARI_SAF_SFPQ_TO,	SCHIZO_CB_ELOG_SAF_SFPQ_TO,	CB_FATAL,
1569 	SAFARI_APERR,		SCHIZO_CB_ELOG_ADDR_PAR_ERR,	CB_FATAL,
1570 	SAFARI_UNMAP_ERR,	SCHIZO_CB_ELOG_UNMAP_ERR,	CB_FATAL,
1571 	SAFARI_BUS_ERR,		SCHIZO_CB_ELOG_BUS_ERR,		CB_FATAL,
1572 	SAFARI_TO_ERR,		SCHIZO_CB_ELOG_TO_ERR,		CB_FATAL,
1573 	SAFARI_DSTAT_ERR,	SCHIZO_CB_ELOG_DSTAT_ERR,	CB_FATAL,
1574 	SAFARI_SAF_UFPQ_TO,	SCHIZO_CB_ELOG_SAF_UFPQ_TO,	CB_FATAL,
1575 	SAFARI_CPU0_PAR_SINGLE,	SCHIZO_CB_ELOG_CPU0_PAR_SINGLE,	CB_FATAL,
1576 	SAFARI_CPU0_PAR_BIDI,	SCHIZO_CB_ELOG_CPU0_PAR_BIDI,	CB_FATAL,
1577 	SAFARI_CPU1_PAR_SINGLE,	SCHIZO_CB_ELOG_CPU1_PAR_SINGLE,	CB_FATAL,
1578 	SAFARI_CPU1_PAR_BIDI,	SCHIZO_CB_ELOG_CPU1_PAR_BIDI,	CB_FATAL,
1579 	NULL,			0,				0,
1580 };
1581 
1582 /*
1583  * Function used to handle and log Safari bus errors.
1584  */
1585 static int
safari_err_handler(dev_info_t * dip,uint64_t fme_ena,cb_errstate_t * cb_err_p)1586 safari_err_handler(dev_info_t *dip, uint64_t fme_ena,
1587     cb_errstate_t *cb_err_p)
1588 {
1589 	int	i;
1590 	int	fatal = 0;
1591 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
1592 	pci_common_t *cmn_p = pci_p->pci_common_p;
1593 
1594 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1595 
1596 	for (i = 0; safari_err_tbl[i].cb_err_class != NULL; i++) {
1597 		if (cb_err_p->cb_elog & safari_err_tbl[i].cb_reg_bit) {
1598 			cb_err_p->cb_err_class = safari_err_tbl[i].cb_err_class;
1599 			cb_ereport_post(dip, fme_ena, cb_err_p);
1600 			fatal += safari_err_tbl[i].cb_fatal;
1601 		}
1602 	}
1603 
1604 	if (fatal)
1605 		return (DDI_FM_FATAL);
1606 	return (DDI_FM_OK);
1607 
1608 }
1609 
1610 /*
1611  * Check pbm va log register for captured errant address, and fail handle
1612  * if in per device cache.
1613  * Called from jbus_err_handler.
1614  */
1615 static int
jbus_check_va_log(cb_t * cb_p,uint64_t fme_ena,cb_errstate_t * cb_err_p)1616 jbus_check_va_log(cb_t *cb_p, uint64_t fme_ena,
1617     cb_errstate_t *cb_err_p)
1618 {
1619 	int i;
1620 	int ret = DDI_FM_FATAL;
1621 	pci_common_t *cmn_p = cb_p->cb_pci_cmn_p;
1622 
1623 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1624 	/*
1625 	 * Check VA log register for address associated with error,
1626 	 * if no address is registered then return failure
1627 	 */
1628 	for (i = 0; i < 2; i++) {
1629 
1630 		if (cb_p->cb_pci_cmn_p->pci_p[i] == NULL)
1631 			continue;
1632 		/*
1633 		 * Look up and fault handle associated with
1634 		 * logged DMA address
1635 		 */
1636 		if (cb_err_p->cb_pbm[i].pbm_va_log) {
1637 			void *addr = (void *)&cb_err_p->cb_pbm[i].pbm_va_log;
1638 			ret = ndi_fmc_error(cb_p->cb_pci_cmn_p->pci_p[i]->
1639 			    pci_dip, NULL, DMA_HANDLE, fme_ena,
1640 			    (void *)addr);
1641 			if (ret == DDI_FM_NONFATAL)
1642 				break;
1643 		}
1644 	}
1645 	return (ret);
1646 }
1647 
1648 static cb_fm_err_t jbus_err_tbl[] = {
1649 	JBUS_APERR,		SCHIZO_CB_ELOG_ADDR_PAR_ERR,	CB_FATAL,
1650 	JBUS_PWR_DATA_PERR,	TOMATILLO_CB_ELOG_WR_DATA_PAR_ERR, CB_FATAL,
1651 	JBUS_DRD_DATA_PERR,	TOMATILLO_CB_ELOG_RD_DATA_PAR_ERR, CB_NONFATAL,
1652 	JBUS_CTL_PERR,		TOMATILLO_CB_ELOG_CTL_PAR_ERR,	CB_FATAL,
1653 	JBUS_ILL_BYTE_EN,	TOMATILLO_CB_ELOG_ILL_BYTE_EN,	CB_FATAL,
1654 	JBUS_ILL_COH_IN,	TOMATILLO_CB_ELOG_ILL_COH_IN,	CB_FATAL,
1655 	JBUS_SNOOP_ERR_RD,	TOMATILLO_CB_ELOG_SNOOP_ERR_RD,	CB_FATAL,
1656 	JBUS_SNOOP_ERR_RDS,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDS, CB_FATAL,
1657 	JBUS_SNOOP_ERR_RDSA,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDSA, CB_FATAL,
1658 	JBUS_SNOOP_ERR_OWN,	TOMATILLO_CB_ELOG_SNOOP_ERR_OWN, CB_FATAL,
1659 	JBUS_SNOOP_ERR_RDO,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDO, CB_FATAL,
1660 	JBUS_SNOOP_ERR_PCI,	TOMATILLO_CB_ELOG_SNOOP_ERR_PCI, CB_FATAL,
1661 	JBUS_SNOOP_ERR_GR,	TOMATILLO_CB_ELOG_SNOOP_ERR_GR,	CB_FATAL,
1662 	JBUS_SNOOP_ERR,		TOMATILLO_CB_ELOG_SNOOP_ERR,	CB_FATAL,
1663 	JBUS_BAD_CMD,		SCHIZO_CB_ELOG_BAD_CMD,		CB_FATAL,
1664 	JBUS_UNMAP_ERR,		SCHIZO_CB_ELOG_UNMAP_ERR,	CB_NONFATAL,
1665 	JBUS_TO_EXP_ERR,	TOMATILLO_CB_ELOG_TO_EXP_ERR,	CB_NONFATAL,
1666 	JBUS_TO_ERR,		SCHIZO_CB_ELOG_TO_ERR,		CB_NONFATAL,
1667 	JBUS_BUS_ERR,		SCHIZO_CB_ELOG_BUS_ERR,		CB_NONFATAL,
1668 	NULL,			0,				0,
1669 };
1670 
1671 /*
1672  * Function used to handle and log Jbus errors.
1673  */
1674 static int
jbus_err_handler(dev_info_t * dip,uint64_t fme_ena,cb_errstate_t * cb_err_p)1675 jbus_err_handler(dev_info_t *dip, uint64_t fme_ena,
1676     cb_errstate_t *cb_err_p)
1677 {
1678 	int	fatal = 0;
1679 	int	nonfatal = 0;
1680 	int	i;
1681 	pci_t	*pci_p = get_pci_soft_state(ddi_get_instance(dip));
1682 	cb_t	*cb_p = pci_p->pci_cb_p;
1683 
1684 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1685 
1686 	for (i = 0; jbus_err_tbl[i].cb_err_class != NULL; i++) {
1687 		if (!(cb_err_p->cb_elog & jbus_err_tbl[i].cb_reg_bit))
1688 			continue;
1689 		cb_err_p->cb_err_class = jbus_err_tbl[i].cb_err_class;
1690 		if (jbus_err_tbl[i].cb_fatal) {
1691 			fatal += jbus_err_tbl[i].cb_fatal;
1692 			continue;
1693 		}
1694 		if (jbus_check_va_log(cb_p, fme_ena, cb_err_p)
1695 		    != DDI_FM_NONFATAL) {
1696 			fatal++;
1697 		}
1698 		cb_ereport_post(dip, fme_ena, cb_err_p);
1699 	}
1700 
1701 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1702 	    DDI_FM_OK));
1703 }
1704 
1705 /*
1706  * Control Block error interrupt handler.
1707  */
1708 uint_t
cb_buserr_intr(caddr_t a)1709 cb_buserr_intr(caddr_t a)
1710 {
1711 	cb_t *cb_p = (cb_t *)a;
1712 	pci_common_t *cmn_p = cb_p->cb_pci_cmn_p;
1713 	pci_t *pci_p = cmn_p->pci_p[0];
1714 	cb_errstate_t cb_err;
1715 	ddi_fm_error_t derr;
1716 	int ret = DDI_FM_FATAL;
1717 	int i;
1718 
1719 	if (pci_p == NULL)
1720 		pci_p = cmn_p->pci_p[1];
1721 
1722 	bzero(&derr, sizeof (ddi_fm_error_t));
1723 	derr.fme_version = DDI_FME_VERSION;
1724 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1725 
1726 	mutex_enter(&cmn_p->pci_fm_mutex);
1727 
1728 	pci_cb_errstate_get(cb_p, &cb_err);
1729 
1730 	if (CB_CHIP_TYPE(cb_p) == PCI_CHIP_TOMATILLO)
1731 		ret = jbus_err_handler(pci_p->pci_dip, derr.fme_ena, &cb_err);
1732 	else if ((CB_CHIP_TYPE(cb_p) == PCI_CHIP_SCHIZO) ||
1733 	    (CB_CHIP_TYPE(cb_p) == PCI_CHIP_XMITS))
1734 		ret = safari_err_handler(pci_p->pci_dip, derr.fme_ena, &cb_err);
1735 
1736 	/*
1737 	 * Check for related errors in PBM and IOMMU. The IOMMU could cause
1738 	 * a timeout on the jbus due to an IOMMU miss, so we need to check and
1739 	 * log the IOMMU error registers.
1740 	 */
1741 	for (i = 0; i < 2; i++) {
1742 		if (cmn_p->pci_p[i] == NULL)
1743 			continue;
1744 		if (pci_pbm_err_handler(cmn_p->pci_p[i]->pci_dip, &derr,
1745 		    (void *)cmn_p->pci_p[i], PCI_CB_CALL) == DDI_FM_FATAL)
1746 			ret = DDI_FM_FATAL;
1747 	}
1748 
1749 	/* Cleanup and reset error bits */
1750 	(void) pci_cb_clear_error(cb_p, &cb_err);
1751 	mutex_exit(&cmn_p->pci_fm_mutex);
1752 
1753 	if (ret == DDI_FM_FATAL) {
1754 		fm_panic("Fatal System Bus Error has occurred\n");
1755 	}
1756 
1757 	return (DDI_INTR_CLAIMED);
1758 }
1759 
1760 static ecc_fm_err_t ecc_err_tbl[] = {
1761 	PCI_ECC_PIO_UE, COMMON_ECC_AFSR_E_PIO, CBNINTR_UE,
1762 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_UPA64S, SCH_REG_UPA,
1763 	ACC_HANDLE,
1764 
1765 	PCI_ECC_PIO_UE, COMMON_ECC_AFSR_E_PIO, CBNINTR_UE,
1766 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_REG, SCH_REG_PCIA_REG,
1767 	ACC_HANDLE,
1768 
1769 	PCI_ECC_PIO_UE, COMMON_ECC_AFSR_E_PIO, CBNINTR_UE,
1770 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_MEM, SCH_REG_PCIA_MEM,
1771 	ACC_HANDLE,
1772 
1773 	PCI_ECC_PIO_UE, COMMON_ECC_AFSR_E_PIO, CBNINTR_UE,
1774 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_CFGIO, SCH_REG_PCIA_CFGIO,
1775 	ACC_HANDLE,
1776 
1777 	PCI_ECC_PIO_UE, COMMON_ECC_AFSR_E_PIO, CBNINTR_UE,
1778 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_REG, SCH_REG_PCIB_REG,
1779 	ACC_HANDLE,
1780 
1781 	PCI_ECC_PIO_UE, COMMON_ECC_AFSR_E_PIO, CBNINTR_UE,
1782 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_MEM, SCH_REG_PCIB_MEM,
1783 	ACC_HANDLE,
1784 
1785 	PCI_ECC_PIO_UE, COMMON_ECC_AFSR_E_PIO, CBNINTR_UE,
1786 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_CFGIO, SCH_REG_PCIB_CFGIO,
1787 	ACC_HANDLE,
1788 
1789 	PCI_ECC_PIO_UE, COMMON_ECC_AFSR_E_PIO, CBNINTR_UE,
1790 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_SAFARI_REGS, SCH_REG_SAFARI_REGS,
1791 	ACC_HANDLE,
1792 
1793 	PCI_ECC_SEC_PIO_UE, COMMON_ECC_AFSR_E_PIO,  CBNINTR_UE,
1794 	PBM_SECONDARY, 0, 0, ACC_HANDLE,
1795 
1796 	PCI_ECC_PIO_CE, COMMON_ECC_AFSR_E_PIO,  CBNINTR_CE,
1797 	PBM_PRIMARY, 0, 0, ACC_HANDLE,
1798 
1799 	PCI_ECC_SEC_PIO_CE, COMMON_ECC_AFSR_E_PIO,  CBNINTR_CE,
1800 	PBM_SECONDARY, 0, 0, ACC_HANDLE,
1801 
1802 	PCI_ECC_DRD_UE, COMMON_ECC_AFSR_E_DRD, CBNINTR_UE,
1803 	PBM_PRIMARY, 0, 0, DMA_HANDLE,
1804 
1805 	PCI_ECC_SEC_DRD_UE, COMMON_ECC_AFSR_E_DRD, CBNINTR_UE,
1806 	PBM_SECONDARY, 0, 0, DMA_HANDLE,
1807 
1808 	PCI_ECC_DRD_CE, COMMON_ECC_AFSR_E_DRD, CBNINTR_CE,
1809 	PBM_PRIMARY, 0, 0, DMA_HANDLE,
1810 
1811 	PCI_ECC_SEC_DRD_CE, COMMON_ECC_AFSR_E_DRD, CBNINTR_CE,
1812 	PBM_SECONDARY, 0, 0, DMA_HANDLE,
1813 
1814 	PCI_ECC_DWR_UE, COMMON_ECC_AFSR_E_DWR, CBNINTR_UE,
1815 	PBM_PRIMARY, 0, 0, DMA_HANDLE,
1816 
1817 	PCI_ECC_SEC_DWR_UE, COMMON_ECC_AFSR_E_DWR, CBNINTR_UE,
1818 	PBM_SECONDARY, 0, 0, DMA_HANDLE,
1819 
1820 	PCI_ECC_DWR_CE, COMMON_ECC_AFSR_E_DWR, CBNINTR_CE,
1821 	PBM_PRIMARY, 0, 0, DMA_HANDLE,
1822 
1823 	PCI_ECC_SEC_DWR_CE, COMMON_ECC_AFSR_E_DWR, CBNINTR_CE,
1824 	PBM_SECONDARY, 0, 0, DMA_HANDLE,
1825 
1826 	NULL, 0, 0, 0, 0, 0,
1827 };
1828 
1829 /*
1830  * pci_ecc_classify, called by ecc_handler to classify ecc errors
1831  * and determine if we should panic or not.
1832  */
1833 void
pci_ecc_classify(uint64_t err,ecc_errstate_t * ecc_err_p)1834 pci_ecc_classify(uint64_t err, ecc_errstate_t *ecc_err_p)
1835 {
1836 	struct async_flt *ecc_p = &ecc_err_p->ecc_aflt;
1837 	uint64_t region, afar = ecc_p->flt_addr;
1838 	int i, j, ret = 0;
1839 	int flag, fatal = 0;
1840 	pci_common_t *cmn_p = ecc_err_p->ecc_ii_p.ecc_p->ecc_pci_cmn_p;
1841 	pci_t *pci_p = cmn_p->pci_p[0];
1842 
1843 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1844 
1845 	ecc_err_p->ecc_bridge_type = PCI_BRIDGE_TYPE(cmn_p);
1846 
1847 	if (pci_p == NULL)
1848 		pci_p = cmn_p->pci_p[1];
1849 
1850 	ecc_err_p->ecc_ctrl = lddphysio(ecc_err_p->ecc_ii_p.ecc_p->ecc_csr_pa);
1851 	ecc_err_p->ecc_err_addr = afar;
1852 	region = afar & SCHIZO_ECC_AFAR_PIOW_MASK;
1853 
1854 	for (i = 0; ecc_err_tbl[i].ecc_err_class != NULL; i++) {
1855 		if (!(err & ecc_err_tbl[i].ecc_reg_bit) ||
1856 		    (ecc_err_p->ecc_ii_p.ecc_type !=
1857 		    ecc_err_tbl[i].ecc_type) ||
1858 		    (ecc_err_p->ecc_pri != ecc_err_tbl[i].ecc_pri))
1859 			continue;
1860 
1861 		ecc_p->flt_erpt_class = ecc_err_tbl[i].ecc_err_class;
1862 		flag = ecc_err_tbl[i].ecc_flag;
1863 
1864 		if (!ecc_err_tbl[i].ecc_pri ||
1865 		    (ecc_err_tbl[i].ecc_type == CBNINTR_CE)) {
1866 			fatal += (ecc_err_tbl[i].ecc_type == CBNINTR_UE) ?
1867 			    1 : 0;
1868 			break;
1869 		}
1870 
1871 		if (flag == ACC_HANDLE &&
1872 		    (region & ecc_err_tbl[i].ecc_region_bits)) {
1873 			ecc_err_p->ecc_region = ecc_err_tbl[i].ecc_region;
1874 			pci_format_ecc_addr(pci_p->pci_dip,
1875 			    &ecc_err_p->ecc_err_addr,
1876 			    ecc_err_p->ecc_region);
1877 		}
1878 
1879 		/*
1880 		 * Lookup and fault errant handle
1881 		 */
1882 		for (j = 0; j < 2; ++j) {
1883 			ret = DDI_FM_UNKNOWN;
1884 			if (cmn_p->pci_p[j] == NULL)
1885 				continue;
1886 			ret = ndi_fmc_error(cmn_p->pci_p[j]->pci_dip, NULL,
1887 			    flag, ecc_err_p->ecc_ena,
1888 			    (void *)&ecc_err_p->ecc_err_addr);
1889 			if (ret == DDI_FM_NONFATAL) {
1890 				fatal = 0;
1891 				break;
1892 			} else
1893 				fatal++;
1894 		}
1895 		break;
1896 	}
1897 
1898 	if (fatal)
1899 		ecc_p->flt_panic = 1;
1900 	else if (flag != ACC_HANDLE)
1901 		ecc_err_p->ecc_pg_ret = 1;
1902 }
1903 
1904 /*
1905  * Tables to define PCI-X Split Completion errors
1906  */
1907 
1908 pcix_err_msg_rec_t pcix_completer_errs[] = {
1909 	{PCIX_CPLT_OUT_OF_RANGE,	"pcix", "oor"	},
1910 };
1911 
1912 pcix_err_tbl_t pcix_split_errs_tbl[] = {
1913 	{PCIX_CLASS_CPLT,
1914 		sizeof (pcix_completer_errs)/sizeof (pcix_err_msg_rec_t),
1915 		pcix_completer_errs		},
1916 };
1917 
1918 /*
1919  * Tables for the PCI-X error status messages
1920  */
1921 pcix_err_msg_rec_t pcix_stat_errs[] = {
1922 	{XMITS_PCIX_STAT_SC_DSCRD,	"pcix", "discard"	},
1923 	{XMITS_PCIX_STAT_SC_TTO,	"xmits.pbmx", "tato"	},
1924 	{XMITS_PCIX_STAT_SMMU,		"xmits.pbmx", "stmmu"	},
1925 	{XMITS_PCIX_STAT_SDSTAT,	"xmits.pbmx", "stdst"	},
1926 	{XMITS_PCIX_STAT_CMMU,		"xmits.pbmx", "cnmmu"	},
1927 	{XMITS_PCIX_STAT_CDSTAT,	"xmits.pbmx", "cndst"	}
1928 };
1929 
1930 pcix_err_tbl_t pcix_stat_errs_tbl =
1931 	{PCIX_NO_CLASS,
1932 		sizeof (pcix_stat_errs)/sizeof (pcix_err_msg_rec_t),
1933 		pcix_stat_errs		};
1934 
1935 
1936 /*
1937  * walk thru a table of error messages, printing as appropriate
1938  *
1939  * t - the table of messages to parse
1940  * err - the error to match against
1941  * multi - flag, sometimes multiple error bits may be set/desired
1942  */
1943 static int
pcix_lookup_err_msgs(dev_info_t * dip,uint64_t ena,pcix_err_tbl_t t,pbm_errstate_t * pbm_err_p)1944 pcix_lookup_err_msgs(dev_info_t *dip, uint64_t ena, pcix_err_tbl_t t,
1945     pbm_errstate_t *pbm_err_p)
1946 {
1947 	uint32_t err_bits  = pbm_err_p->pbm_err & XMITS_PCIX_MSG_INDEX_MASK;
1948 	int nerr = 0;
1949 	int j;
1950 	char buf[FM_MAX_CLASS];
1951 
1952 	for (j = 0; j < t.err_rec_num; j++)  {
1953 		uint32_t msg_key = t.err_msg_tbl[j].msg_key;
1954 		if (pbm_err_p->pbm_multi ? !(err_bits & msg_key) : err_bits
1955 		    != msg_key)
1956 			continue;
1957 
1958 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1959 		    t.err_msg_tbl[j].msg_class,
1960 		    pbm_err_p->pbm_pri ? "" : PCIX_SECONDARY,
1961 		    t.err_msg_tbl[j].msg_str);
1962 
1963 		pbm_err_p->pbm_err_class = buf;
1964 		pcix_ereport_post(dip, ena, pbm_err_p);
1965 		nerr++;
1966 	}
1967 	return (nerr ? DDI_FM_FATAL : DDI_FM_OK);
1968 }
1969 
1970 /*
1971  * Decodes primary(bit 27-24) or secondary(bit 15-12) PCI-X split
1972  * completion error message class and index in PBM AFSR.
1973  */
1974 static void
pcix_log_split_err(dev_info_t * dip,uint64_t ena,pbm_errstate_t * pbm_err_p)1975 pcix_log_split_err(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err_p)
1976 {
1977 	uint32_t class  = pbm_err_p->pbm_err & XMITS_PCIX_MSG_CLASS_MASK;
1978 	uint32_t num_classes = sizeof (pcix_split_errs_tbl) /
1979 	    sizeof (struct pcix_err_tbl);
1980 	int i;
1981 
1982 	for (i = 0; i < num_classes; i++) {
1983 		if (class == pcix_split_errs_tbl[i].err_class) {
1984 			pbm_err_p->pbm_multi = PCIX_SINGLE_ERR;
1985 			(void) pcix_lookup_err_msgs(dip, ena,
1986 			    pcix_split_errs_tbl[i], pbm_err_p);
1987 			break;
1988 		}
1989 	}
1990 }
1991 
1992 /*
1993  * Report PBM PCI-X Error Status Register if in PCI-X mode
1994  *
1995  * Once a PCI-X fault tree is constructed, the code below may need to
1996  * change.
1997  */
1998 static int
pcix_log_pbm(pci_t * pci_p,uint64_t ena,pbm_errstate_t * pbm_err_p)1999 pcix_log_pbm(pci_t *pci_p, uint64_t ena, pbm_errstate_t *pbm_err_p)
2000 {
2001 	int fatal = 0;
2002 	int nonfatal = 0;
2003 	uint32_t e;
2004 
2005 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2006 
2007 	DEBUG3(DBG_ERR_INTR, pci_p->pci_dip, "pcix_log_pbm: chip_type=%d "
2008 	    "ctr_stat=%lx afsr = 0x%lx", CHIP_TYPE(pci_p),
2009 	    pbm_err_p->pbm_ctl_stat, pbm_err_p->pbm_afsr);
2010 
2011 	if (!(CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) ||
2012 	    !(pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE))
2013 		return (DDI_FM_OK);
2014 
2015 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_P_SC_ERR) {
2016 		pbm_err_p->pbm_err = PBM_AFSR_TO_PRISPLIT(pbm_err_p->pbm_afsr);
2017 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2018 		pcix_log_split_err(pci_p->pci_dip, ena, pbm_err_p);
2019 		nonfatal++;
2020 	}
2021 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_S_SC_ERR) {
2022 		pbm_err_p->pbm_err = PBM_AFSR_TO_PRISPLIT(pbm_err_p->pbm_afsr);
2023 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2024 		pcix_log_split_err(pci_p->pci_dip, ena, pbm_err_p);
2025 		nonfatal++;
2026 	}
2027 
2028 	e = PBM_PCIX_TO_PRIERR(pbm_err_p->pbm_pcix_stat);
2029 	if (e) {
2030 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2031 		pbm_err_p->pbm_err = e;
2032 		pbm_err_p->pbm_multi = PCIX_MULTI_ERR;
2033 		if (pcix_lookup_err_msgs(pci_p->pci_dip, ena,
2034 		    pcix_stat_errs_tbl, pbm_err_p) == DDI_FM_FATAL)
2035 			fatal++;
2036 		else
2037 			nonfatal++;
2038 	}
2039 
2040 	e = PBM_PCIX_TO_SECERR(pbm_err_p->pbm_pcix_stat);
2041 	if (e) {
2042 		pbm_err_p->pbm_pri = PBM_SECONDARY;
2043 		pbm_err_p->pbm_err = e;
2044 		pbm_err_p->pbm_multi = PCIX_MULTI_ERR;
2045 		if (pcix_lookup_err_msgs(pci_p->pci_dip, ena,
2046 		    pcix_stat_errs_tbl, pbm_err_p) == DDI_FM_FATAL)
2047 			fatal++;
2048 		else
2049 			nonfatal++;
2050 	}
2051 
2052 	if (!fatal && !nonfatal)
2053 		return (DDI_FM_OK);
2054 	else if (fatal)
2055 		return (DDI_FM_FATAL);
2056 	return (DDI_FM_NONFATAL);
2057 }
2058 
2059 static pbm_fm_err_t pbm_err_tbl[] = {
2060 	PCI_MA,			SCHIZO_PCI_AFSR_E_MA,	PBM_PRIMARY,
2061 	FM_LOG_PCI,	PCI_TARG_MA,
2062 
2063 	PCI_SEC_MA,		SCHIZO_PCI_AFSR_E_MA,	PBM_SECONDARY,
2064 	FM_LOG_PBM,	NULL,
2065 
2066 	PCI_REC_TA,		SCHIZO_PCI_AFSR_E_TA,	PBM_PRIMARY,
2067 	FM_LOG_PCI,	PCI_TARG_REC_TA,
2068 
2069 	PCI_SEC_REC_TA,		SCHIZO_PCI_AFSR_E_TA,	PBM_SECONDARY,
2070 	FM_LOG_PBM,	NULL,
2071 
2072 	PCI_PBM_RETRY,		SCHIZO_PCI_AFSR_E_RTRY,	PBM_PRIMARY,
2073 	FM_LOG_PBM,	PCI_PBM_TARG_RETRY,
2074 
2075 	PCI_SEC_PBM_RETRY,	SCHIZO_PCI_AFSR_E_RTRY,	PBM_SECONDARY,
2076 	FM_LOG_PBM,	NULL,
2077 
2078 	PCI_MDPE,		SCHIZO_PCI_AFSR_E_PERR,	PBM_PRIMARY,
2079 	FM_LOG_PCI,	PCI_TARG_MDPE,
2080 
2081 	PCI_SEC_MDPE,		SCHIZO_PCI_AFSR_E_PERR,	PBM_SECONDARY,
2082 	FM_LOG_PBM,	NULL,
2083 
2084 	PCI_PBM_TTO,		SCHIZO_PCI_AFSR_E_TTO,	PBM_PRIMARY,
2085 	FM_LOG_PBM,	PCI_PBM_TARG_TTO,
2086 
2087 	PCI_SEC_PBM_TTO,	SCHIZO_PCI_AFSR_E_TTO,	PBM_SECONDARY,
2088 	FM_LOG_PBM,	NULL,
2089 
2090 	PCI_SCH_BUS_UNUSABLE_ERR, SCHIZO_PCI_AFSR_E_UNUSABLE, PBM_PRIMARY,
2091 	FM_LOG_PBM,	NULL,
2092 
2093 	PCI_SEC_SCH_BUS_UNUSABLE_ERR, SCHIZO_PCI_AFSR_E_UNUSABLE, PBM_SECONDARY,
2094 	FM_LOG_PBM,	NULL,
2095 
2096 	NULL,			0,			0,
2097 	0,		NULL,
2098 };
2099 
2100 
2101 /*
2102  * pci_pbm_classify, called by pbm_afsr_report to classify piow afsr.
2103  */
2104 int
pci_pbm_classify(pbm_errstate_t * pbm_err_p)2105 pci_pbm_classify(pbm_errstate_t *pbm_err_p)
2106 {
2107 	uint32_t err;
2108 	int nerr = 0;
2109 	int i;
2110 
2111 	err = pbm_err_p->pbm_pri ? PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr):
2112 	    PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr);
2113 
2114 	for (i = 0; pbm_err_tbl[i].pbm_err_class != NULL; i++) {
2115 		if ((err & pbm_err_tbl[i].pbm_reg_bit) &&
2116 		    (pbm_err_p->pbm_pri == pbm_err_tbl[i].pbm_pri)) {
2117 			if (pbm_err_tbl[i].pbm_flag == FM_LOG_PCI)
2118 				pbm_err_p->pbm_pci.pci_err_class =
2119 				    pbm_err_tbl[i].pbm_err_class;
2120 			else
2121 				pbm_err_p->pbm_err_class =
2122 				    pbm_err_tbl[i].pbm_err_class;
2123 
2124 			pbm_err_p->pbm_terr_class =
2125 			    pbm_err_tbl[i].pbm_terr_class;
2126 			pbm_err_p->pbm_log = pbm_err_tbl[i].pbm_flag;
2127 			nerr++;
2128 			break;
2129 		}
2130 	}
2131 
2132 	return (nerr);
2133 }
2134 
2135 /*
2136  * Function used to handle and log IOMMU errors. Called by pci_pbm_err_handler,
2137  * with pci_fm_mutex held.
2138  */
2139 static int
iommu_err_handler(dev_info_t * dip,uint64_t ena,pbm_errstate_t * pbm_err_p)2140 iommu_err_handler(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err_p)
2141 {
2142 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2143 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2144 	ecc_t *ecc_p = pci_p->pci_ecc_p;
2145 	uint64_t stat;
2146 	ushort_t ta_signalled;
2147 	int err = 0;
2148 	int fatal = 0;
2149 	int nonfatal = 0;
2150 	int ret;
2151 
2152 	ASSERT(MUTEX_HELD(&ecc_p->ecc_pci_cmn_p->pci_fm_mutex));
2153 	if (!((stat = *iommu_p->iommu_ctrl_reg) & TOMATILLO_IOMMU_ERR)) {
2154 		pbm_err_p->pbm_err_class = PCI_SCH_MMU_ERR;
2155 		iommu_ereport_post(dip, ena, pbm_err_p);
2156 		return (DDI_FM_NONFATAL);
2157 	}
2158 
2159 	/*
2160 	 * Need to make sure a Target Abort was signalled to the device if
2161 	 * we have any hope of recovering. Tomatillo does not send a TA for
2162 	 * DMA Writes that result in a Translation Error, thus fooling the
2163 	 * device into believing everything is as it expects. Ignorance
2164 	 * is bliss, but knowledge is power.
2165 	 */
2166 	ta_signalled = pbm_err_p->pbm_pci.pci_cfg_stat &
2167 	    PCI_STAT_S_TARG_AB;
2168 
2169 	if (stat & TOMATILLO_IOMMU_ERR_ILLTSBTBW) {
2170 		pbm_err_p->pbm_err_class = PCI_TOM_MMU_BAD_TSBTBW;
2171 		err = 1;
2172 		iommu_ereport_post(dip, ena, pbm_err_p);
2173 		if (!ta_signalled)
2174 			fatal++;
2175 		else
2176 			nonfatal++;
2177 	}
2178 
2179 	if (stat & TOMATILLO_IOMMU_ERR_BAD_VA) {
2180 		pbm_err_p->pbm_err_class = PCI_TOM_MMU_BAD_VA;
2181 		err = 1;
2182 		iommu_ereport_post(dip, ena, pbm_err_p);
2183 		if (!ta_signalled)
2184 			fatal++;
2185 		else
2186 			nonfatal++;
2187 	}
2188 
2189 	if (!err) {
2190 		stat = ((stat & TOMATILLO_IOMMU_ERRSTS) >>
2191 		    TOMATILLO_IOMMU_ERRSTS_SHIFT);
2192 		switch (stat) {
2193 		case TOMATILLO_IOMMU_PROTECTION_ERR:
2194 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_PROT_ERR;
2195 			iommu_ereport_post(dip, ena, pbm_err_p);
2196 			fatal++;
2197 			break;
2198 		case TOMATILLO_IOMMU_INVALID_ERR:
2199 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_INVAL_ERR;
2200 			/*
2201 			 * Fault the address in iommu_tfar
2202 			 * register to inform target driver of error
2203 			 */
2204 			ret = ndi_fmc_error(pci_p->pci_dip, NULL, DMA_HANDLE,
2205 			    ena, (void *)&pbm_err_p->pbm_iommu.iommu_tfar);
2206 
2207 			if (ret != DDI_FM_NONFATAL)
2208 				if (ta_signalled)
2209 					nonfatal++;
2210 				else
2211 					fatal++;
2212 			else
2213 				nonfatal++;
2214 
2215 			iommu_ereport_post(dip, ena, pbm_err_p);
2216 			break;
2217 		case TOMATILLO_IOMMU_TIMEOUT_ERR:
2218 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_TO_ERR;
2219 			fatal++;
2220 			iommu_ereport_post(dip, ena, pbm_err_p);
2221 			break;
2222 		case TOMATILLO_IOMMU_ECC_ERR:
2223 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_UE;
2224 			iommu_ereport_post(dip, ena, pbm_err_p);
2225 			break;
2226 		}
2227 	}
2228 
2229 	if (fatal)
2230 		return (DDI_FM_FATAL);
2231 	else if (nonfatal)
2232 		return (DDI_FM_NONFATAL);
2233 
2234 	return (DDI_FM_OK);
2235 }
2236 
2237 int
pci_check_error(pci_t * pci_p)2238 pci_check_error(pci_t *pci_p)
2239 {
2240 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2241 	uint16_t pci_cfg_stat;
2242 	uint64_t pbm_ctl_stat, pbm_afsr, pbm_pcix_stat;
2243 	caddr_t a = pci_p->pci_address[0];
2244 	uint64_t *pbm_pcix_stat_reg;
2245 
2246 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2247 
2248 	pci_cfg_stat = pbm_p->pbm_config_header->ch_status_reg;
2249 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2250 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2251 
2252 	if ((pci_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB |
2253 	    PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB |
2254 	    PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) ||
2255 	    (pbm_ctl_stat & (SCHIZO_PCI_CTRL_BUS_UNUSABLE |
2256 	    TOMATILLO_PCI_CTRL_PCI_DTO_ERR |
2257 	    SCHIZO_PCI_CTRL_PCI_TTO_ERR |
2258 	    SCHIZO_PCI_CTRL_PCI_RTRY_ERR |
2259 	    SCHIZO_PCI_CTRL_PCI_MMU_ERR |
2260 	    COMMON_PCI_CTRL_SBH_ERR |
2261 	    COMMON_PCI_CTRL_SERR)) ||
2262 	    (PBM_AFSR_TO_PRIERR(pbm_afsr)))
2263 		return (1);
2264 
2265 	if ((CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) &&
2266 	    (pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE)) {
2267 
2268 		pbm_pcix_stat_reg = (uint64_t *)(a +
2269 		    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
2270 
2271 		pbm_pcix_stat = *pbm_pcix_stat_reg;
2272 
2273 		if (PBM_PCIX_TO_PRIERR(pbm_pcix_stat))
2274 			return (1);
2275 
2276 		if (pbm_pcix_stat & XMITS_PCIX_STAT_PERR_RECOV_INT)
2277 			return (1);
2278 	}
2279 
2280 	return (0);
2281 
2282 }
2283 
2284 static pbm_fm_err_t pci_pbm_err_tbl[] = {
2285 	PCI_PBM_RETRY,			SCHIZO_PCI_CTRL_PCI_RTRY_ERR,
2286 	0,	PBM_NONFATAL,	PCI_PBM_TARG_RETRY,
2287 
2288 	PCI_PBM_TTO,			SCHIZO_PCI_CTRL_PCI_TTO_ERR,
2289 	0,	PBM_NONFATAL,	PCI_PBM_TARG_TTO,
2290 
2291 	PCI_SCH_BUS_UNUSABLE_ERR,	SCHIZO_PCI_CTRL_BUS_UNUSABLE,
2292 	0,	PBM_NONFATAL,	NULL,
2293 
2294 	NULL,				0,
2295 	0,	0,		NULL
2296 };
2297 
2298 /*
2299  * Function used to log all PCI/PBM/IOMMU errors found in the system.
2300  * It is called by the pbm_error_intr as well as the pci_err_callback(trap
2301  * callback). To protect access we hold the pci_fm_mutex when calling
2302  * this function.
2303  */
2304 int
pci_pbm_err_handler(dev_info_t * dip,ddi_fm_error_t * derr,const void * impl_data,int caller)2305 pci_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr,
2306     const void *impl_data, int caller)
2307 {
2308 	int fatal = 0;
2309 	int nonfatal = 0;
2310 	int unknown = 0;
2311 	uint32_t prierr, secerr;
2312 	pbm_errstate_t pbm_err;
2313 	char buf[FM_MAX_CLASS];
2314 	pci_t *pci_p = (pci_t *)impl_data;
2315 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2316 	int i, ret = 0;
2317 
2318 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2319 	pci_pbm_errstate_get(pci_p, &pbm_err);
2320 
2321 	derr->fme_ena = derr->fme_ena ? derr->fme_ena :
2322 	    fm_ena_generate(0, FM_ENA_FMT1);
2323 
2324 	prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr);
2325 	secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr);
2326 
2327 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
2328 		if (caller == PCI_TRAP_CALL) {
2329 			/*
2330 			 * For ddi_caut_get treat all events as nonfatal.
2331 			 * The trampoline will set err_ena = 0, err_status =
2332 			 * NONFATAL. We only really call this function so that
2333 			 * pci_clear_error() and ndi_fm_handler_dispatch() will
2334 			 * get called.
2335 			 */
2336 			derr->fme_status = DDI_FM_NONFATAL;
2337 			nonfatal++;
2338 			goto done;
2339 		} else {
2340 			/*
2341 			 * For ddi_caut_put treat all events as nonfatal. Here
2342 			 * we have the handle and can call ndi_fm_acc_err_set().
2343 			 */
2344 			derr->fme_status = DDI_FM_NONFATAL;
2345 			ndi_fm_acc_err_set(pbm_p->pbm_excl_handle, derr);
2346 			nonfatal++;
2347 			goto done;
2348 		}
2349 	} else if (derr->fme_flag == DDI_FM_ERR_PEEK) {
2350 		/*
2351 		 * For ddi_peek treat all events as nonfatal. We only
2352 		 * really call this function so that pci_clear_error()
2353 		 * and ndi_fm_handler_dispatch() will get called.
2354 		 */
2355 		nonfatal++;
2356 		goto done;
2357 	} else if (derr->fme_flag == DDI_FM_ERR_POKE) {
2358 		/*
2359 		 * For ddi_poke we can treat as nonfatal if the
2360 		 * following conditions are met :
2361 		 * 1. Make sure only primary error is MA/TA
2362 		 * 2. Make sure no secondary error bits set
2363 		 * 3. check pci config header stat reg to see MA/TA is
2364 		 *    logged. We cannot verify only MA/TA is recorded
2365 		 *    since it gets much more complicated when a
2366 		 *    PCI-to-PCI bridge is present.
2367 		 */
2368 		if ((prierr == SCHIZO_PCI_AFSR_E_MA) && !secerr &&
2369 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_MAST_AB)) {
2370 			nonfatal++;
2371 			goto done;
2372 		} else if ((*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE) &&
2373 		    pcix_ma_behind_bridge(&pbm_err)) {
2374 			/*
2375 			 * MAs behind a PCI-X bridge get sent back to
2376 			 * the host as a Split Completion Error Message.
2377 			 * We handle this the same as the above check.
2378 			 */
2379 			nonfatal++;
2380 			goto done;
2381 		}
2382 		if ((prierr == SCHIZO_PCI_AFSR_E_TA) && !secerr &&
2383 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_TARG_AB)) {
2384 			nonfatal++;
2385 			goto done;
2386 		}
2387 	}
2388 
2389 	DEBUG2(DBG_ERR_INTR, dip, "pci_pbm_err_handler: prierr=0x%x "
2390 	    "secerr=0x%x", prierr, secerr);
2391 
2392 	if (prierr || secerr) {
2393 		ret = pbm_afsr_report(dip, derr->fme_ena, &pbm_err);
2394 		if (ret == DDI_FM_FATAL)
2395 			fatal++;
2396 		else
2397 			nonfatal++;
2398 	}
2399 	if ((ret = pcix_log_pbm(pci_p, derr->fme_ena, &pbm_err))
2400 	    == DDI_FM_FATAL)
2401 		fatal++;
2402 	else if (ret == DDI_FM_NONFATAL)
2403 		nonfatal++;
2404 
2405 	if ((ret = pci_cfg_report(dip, derr, &pbm_err.pbm_pci, caller, prierr))
2406 	    == DDI_FM_FATAL)
2407 		fatal++;
2408 	else if (ret == DDI_FM_NONFATAL)
2409 		nonfatal++;
2410 
2411 	for (i = 0; pci_pbm_err_tbl[i].pbm_err_class != NULL; i++) {
2412 		if ((pbm_err.pbm_ctl_stat & pci_pbm_err_tbl[i].pbm_reg_bit) &&
2413 		    !prierr) {
2414 			pbm_err.pbm_err_class =
2415 			    pci_pbm_err_tbl[i].pbm_err_class;
2416 			pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
2417 			if (pci_pbm_err_tbl[i].pbm_flag)
2418 				fatal++;
2419 			else
2420 				nonfatal++;
2421 			if (caller == PCI_TRAP_CALL &&
2422 			    pci_pbm_err_tbl[i].pbm_terr_class)
2423 				pci_target_enqueue(derr->fme_ena,
2424 				    pci_pbm_err_tbl[i].pbm_terr_class,
2425 				    pbm_err.pbm_bridge_type,
2426 				    (uint64_t)derr->fme_bus_specific);
2427 		}
2428 	}
2429 
2430 	if ((pbm_err.pbm_ctl_stat & COMMON_PCI_CTRL_SBH_ERR) &&
2431 	    (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO)) {
2432 		pbm_err.pbm_err_class = PCI_SCH_SBH;
2433 		pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
2434 		if (pci_panic_on_sbh_errors)
2435 			fatal++;
2436 		else
2437 			nonfatal++;
2438 	}
2439 
2440 	/*
2441 	 * PBM Received System Error - During any transaction, or
2442 	 * at any point on the bus, some device may detect a critical
2443 	 * error and signal a system error to the system.
2444 	 */
2445 	if (pbm_err.pbm_ctl_stat & COMMON_PCI_CTRL_SERR) {
2446 		/*
2447 		 * may be expected (master abort from pci-pci bridge during
2448 		 * poke will generate SERR)
2449 		 */
2450 		if (derr->fme_flag != DDI_FM_ERR_POKE) {
2451 			DEBUG1(DBG_ERR_INTR, dip, "pci_pbm_err_handler: "
2452 			    "ereport_post: %s", buf);
2453 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2454 			    PCI_ERROR_SUBCLASS, PCI_REC_SERR);
2455 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2456 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2457 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2458 			    pbm_err.pbm_pci.pci_cfg_stat, PCI_CONFIG_COMMAND,
2459 			    DATA_TYPE_UINT16, pbm_err.pbm_pci.pci_cfg_comm,
2460 			    PCI_PA, DATA_TYPE_UINT64, (uint64_t)0, NULL);
2461 		}
2462 		unknown++;
2463 	}
2464 
2465 	/*
2466 	 * PCI Retry Timeout - Device fails to retry deferred
2467 	 * transaction within timeout. Only Tomatillo
2468 	 */
2469 	if (pbm_err.pbm_ctl_stat & TOMATILLO_PCI_CTRL_PCI_DTO_ERR) {
2470 		if (pci_dto_fault_warn == CE_PANIC)
2471 			fatal++;
2472 		else
2473 			nonfatal++;
2474 
2475 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2476 		    PCI_ERROR_SUBCLASS, PCI_DTO);
2477 		ddi_fm_ereport_post(dip, buf, derr->fme_ena, DDI_NOSLEEP,
2478 		    FM_VERSION, DATA_TYPE_UINT8, 0,
2479 		    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2480 		    pbm_err.pbm_pci.pci_cfg_stat,
2481 		    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2482 		    pbm_err.pbm_pci.pci_cfg_comm,
2483 		    PCI_PA, DATA_TYPE_UINT64, (uint64_t)0, NULL);
2484 	}
2485 
2486 	/*
2487 	 * PBM Detected Data Parity Error - DPE detected during a DMA Write
2488 	 * or PIO Read. Later case is taken care of by cpu_deferred_error
2489 	 * and sent here to be logged.
2490 	 */
2491 	if ((pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_PERROR) &&
2492 	    !(pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_S_SYSERR)) {
2493 		/*
2494 		 * If we have an address then fault
2495 		 * it, if not probe for errant device
2496 		 */
2497 		ret = DDI_FM_FATAL;
2498 		if (caller != PCI_TRAP_CALL) {
2499 			if (pbm_err.pbm_va_log) {
2500 				ret = ndi_fmc_error(dip, NULL, DMA_HANDLE,
2501 				    derr->fme_ena, (void *)&pbm_err.pbm_va_log);
2502 			}
2503 			if (ret == DDI_FM_NONFATAL)
2504 				nonfatal++;
2505 			else
2506 				fatal++;
2507 		} else
2508 			nonfatal++;
2509 
2510 	}
2511 
2512 	/* PBM Detected IOMMU Error */
2513 	if (pbm_err.pbm_ctl_stat & SCHIZO_PCI_CTRL_PCI_MMU_ERR) {
2514 		if (iommu_err_handler(dip, derr->fme_ena, &pbm_err)
2515 		    == DDI_FM_FATAL)
2516 			fatal++;
2517 		else
2518 			nonfatal++;
2519 	}
2520 
2521 done:
2522 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
2523 	if (ret == DDI_FM_FATAL) {
2524 		fatal++;
2525 	} else if (ret == DDI_FM_NONFATAL) {
2526 		nonfatal++;
2527 	} else if (ret == DDI_FM_UNKNOWN) {
2528 		unknown++;
2529 	}
2530 
2531 	/*
2532 	 * RSERR not claimed as nonfatal by a child is considered fatal
2533 	 */
2534 	if (unknown && !fatal && !nonfatal)
2535 		fatal++;
2536 
2537 	/* Cleanup and reset error bits */
2538 	pci_clear_error(pci_p, &pbm_err);
2539 
2540 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2541 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2542 }
2543 
2544 /*
2545  * Function returns TRUE if a Primary error is Split Completion Error
2546  * that indicates a Master Abort occured behind a PCI-X bridge.
2547  * This function should only be called for busses running in PCI-X mode.
2548  */
2549 static int
pcix_ma_behind_bridge(pbm_errstate_t * pbm_err_p)2550 pcix_ma_behind_bridge(pbm_errstate_t *pbm_err_p)
2551 {
2552 	uint64_t msg;
2553 
2554 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_S_SC_ERR)
2555 		return (0);
2556 
2557 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_P_SC_ERR) {
2558 		msg = (pbm_err_p->pbm_afsr >> XMITS_PCI_X_P_MSG_SHIFT) &
2559 		    XMITS_PCIX_MSG_MASK;
2560 		if (msg & PCIX_CLASS_BRIDGE)
2561 			if (msg & PCIX_BRIDGE_MASTER_ABORT) {
2562 				return (1);
2563 			}
2564 	}
2565 
2566 	return (0);
2567 }
2568 
2569 /*
2570  * Function used to gather PBM/PCI/IOMMU error state for the
2571  * pci_pbm_err_handler and the cb_buserr_intr. This function must be
2572  * called while pci_fm_mutex is held.
2573  */
2574 static void
pci_pbm_errstate_get(pci_t * pci_p,pbm_errstate_t * pbm_err_p)2575 pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
2576 {
2577 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2578 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2579 	caddr_t a = pci_p->pci_address[0];
2580 	uint64_t *pbm_pcix_stat_reg;
2581 
2582 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2583 	bzero(pbm_err_p, sizeof (pbm_errstate_t));
2584 
2585 	/*
2586 	 * Capture all pbm error state for later logging
2587 	 */
2588 	pbm_err_p->pbm_bridge_type = PCI_BRIDGE_TYPE(pci_p->pci_common_p);
2589 
2590 	pbm_err_p->pbm_pci.pci_cfg_stat =
2591 	    pbm_p->pbm_config_header->ch_status_reg;
2592 	pbm_err_p->pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2593 	pbm_err_p->pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2594 	pbm_err_p->pbm_afar = *pbm_p->pbm_async_flt_addr_reg;
2595 	pbm_err_p->pbm_iommu.iommu_stat = *iommu_p->iommu_ctrl_reg;
2596 	pbm_err_p->pbm_pci.pci_cfg_comm =
2597 	    pbm_p->pbm_config_header->ch_command_reg;
2598 	pbm_err_p->pbm_pci.pci_pa = *pbm_p->pbm_async_flt_addr_reg;
2599 
2600 	/*
2601 	 * Record errant slot for Xmits and Schizo
2602 	 * Not stored in Tomatillo
2603 	 */
2604 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS ||
2605 	    CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO) {
2606 		pbm_err_p->pbm_err_sl = (pbm_err_p->pbm_ctl_stat &
2607 		    SCHIZO_PCI_CTRL_ERR_SLOT) >>
2608 		    SCHIZO_PCI_CTRL_ERR_SLOT_SHIFT;
2609 
2610 		/*
2611 		 * The bit 51 on XMITS rev1.0 is same as
2612 		 * SCHIZO_PCI_CTRL_ERR_SLOT_LOCK on schizo2.3. But
2613 		 * this bit needs to be cleared to be able to latch
2614 		 * the slot info on next fault.
2615 		 * But in XMITS Rev2.0, this bit indicates a DMA Write
2616 		 * Parity error.
2617 		 */
2618 		if (pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_DMA_WR_PERR) {
2619 			if ((PCI_CHIP_ID(pci_p) == XMITS_VER_10) ||
2620 			    (PCI_CHIP_ID(pci_p) <= SCHIZO_VER_23)) {
2621 				/*
2622 				 * top 32 bits are W1C and we just want to
2623 				 * clear SLOT_LOCK. Leave bottom 32 bits
2624 				 * unchanged
2625 				 */
2626 				*pbm_p->pbm_ctrl_reg =
2627 				    pbm_err_p->pbm_ctl_stat &
2628 				    (SCHIZO_PCI_CTRL_ERR_SLOT_LOCK |
2629 				    0xffffffff);
2630 				pbm_err_p->pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2631 			}
2632 		}
2633 	}
2634 
2635 	/*
2636 	 * Tomatillo specific registers
2637 	 */
2638 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
2639 		pbm_err_p->pbm_va_log = (uint64_t)va_to_pa(
2640 		    (void *)(uintptr_t)*(a + TOMATILLO_TGT_ERR_VALOG_OFFSET));
2641 		pbm_err_p->pbm_iommu.iommu_tfar = *iommu_p->iommu_tfar_reg;
2642 	}
2643 
2644 	/*
2645 	 * Xmits PCI-X register
2646 	 */
2647 	if ((CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) &&
2648 	    (pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE)) {
2649 
2650 		pbm_pcix_stat_reg = (uint64_t *)(a +
2651 		    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
2652 
2653 		pbm_err_p->pbm_pcix_stat = *pbm_pcix_stat_reg;
2654 		pbm_err_p->pbm_pcix_pfar = pbm_err_p->pbm_pcix_stat &
2655 		    XMITS_PCI_X_STATUS_PFAR_MASK;
2656 	}
2657 }
2658 
2659 /*
2660  * Function used to clear PBM/PCI/IOMMU error state after error handling
2661  * is complete. Only clearing error bits which have been logged. Called by
2662  * pci_pbm_err_handler and pci_bus_exit.
2663  */
2664 static void
pci_clear_error(pci_t * pci_p,pbm_errstate_t * pbm_err_p)2665 pci_clear_error(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
2666 {
2667 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2668 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2669 
2670 	ASSERT(MUTEX_HELD(&pbm_p->pbm_pci_p->pci_common_p->pci_fm_mutex));
2671 
2672 	if (*pbm_p->pbm_ctrl_reg & SCHIZO_PCI_CTRL_PCI_MMU_ERR) {
2673 		iommu_tlb_scrub(pci_p->pci_iommu_p, 1);
2674 	}
2675 	pbm_p->pbm_config_header->ch_status_reg =
2676 	    pbm_err_p->pbm_pci.pci_cfg_stat;
2677 	*pbm_p->pbm_ctrl_reg = pbm_err_p->pbm_ctl_stat;
2678 	*pbm_p->pbm_async_flt_status_reg = pbm_err_p->pbm_afsr;
2679 	*iommu_p->iommu_ctrl_reg = pbm_err_p->pbm_iommu.iommu_stat;
2680 }
2681 
2682 void
pbm_clear_error(pbm_t * pbm_p)2683 pbm_clear_error(pbm_t *pbm_p)
2684 {
2685 	uint64_t pbm_afsr, pbm_ctl_stat;
2686 
2687 	/*
2688 	 * for poke() support - called from POKE_FLUSH. Spin waiting
2689 	 * for MA, TA or SERR to be cleared by a pbm_error_intr().
2690 	 * We have to wait for SERR too in case the device is beyond
2691 	 * a pci-pci bridge.
2692 	 */
2693 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2694 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2695 	while (((pbm_afsr >> SCHIZO_PCI_AFSR_PE_SHIFT) &
2696 	    (SCHIZO_PCI_AFSR_E_MA | SCHIZO_PCI_AFSR_E_TA)) ||
2697 	    (pbm_ctl_stat & COMMON_PCI_CTRL_SERR)) {
2698 		pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2699 		pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2700 	}
2701 }
2702 
2703 /*
2704  * Function used to convert the 32 bit captured PCI error address
2705  * to the full Safari or Jbus address. This is so we can look this address
2706  * up in our handle caches.
2707  */
2708 void
pci_format_addr(dev_info_t * dip,uint64_t * afar,uint64_t afsr)2709 pci_format_addr(dev_info_t *dip, uint64_t *afar, uint64_t afsr)
2710 {
2711 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2712 	pci_ranges_t *io_range, *mem_range;
2713 	uint64_t err_pa = 0;
2714 
2715 	if (afsr & SCHIZO_PCI_AFSR_CONF_SPACE) {
2716 		err_pa |= pci_p->pci_ranges->parent_high;
2717 		err_pa = err_pa << 32;
2718 		err_pa |= pci_p->pci_ranges->parent_low;
2719 	} else if (afsr & SCHIZO_PCI_AFSR_IO_SPACE) {
2720 		io_range = pci_p->pci_ranges + 1;
2721 		err_pa |= io_range->parent_high;
2722 		err_pa = err_pa << 32;
2723 		err_pa |= io_range->parent_low;
2724 	} else if (afsr & SCHIZO_PCI_AFSR_MEM_SPACE) {
2725 		mem_range = pci_p->pci_ranges + 2;
2726 		err_pa |= mem_range->parent_high;
2727 		err_pa = err_pa << 32;
2728 		err_pa |= mem_range->parent_low;
2729 	}
2730 	*afar |= err_pa;
2731 }
2732 
2733 static ecc_format_t ecc_format_tbl[] = {
2734 	SCH_REG_UPA,		0,				0,
2735 	SCH_REG_PCIA_REG,	SCHIZO_PCI_AFSR_CONF_SPACE,	PCI_SIDEA,
2736 	SCH_REG_PCIA_MEM,	SCHIZO_PCI_AFSR_MEM_SPACE,	PCI_SIDEA,
2737 	SCH_REG_PCIA_CFGIO,	SCHIZO_PCI_AFSR_IO_SPACE,	PCI_SIDEA,
2738 	SCH_REG_PCIB_REG,	SCHIZO_PCI_AFSR_CONF_SPACE,	PCI_SIDEB,
2739 	SCH_REG_PCIB_MEM,	SCHIZO_PCI_AFSR_MEM_SPACE,	PCI_SIDEB,
2740 	SCH_REG_PCIB_CFGIO,	SCHIZO_PCI_AFSR_IO_SPACE,	PCI_SIDEB,
2741 	SCH_REG_SAFARI_REGS,	0,				0,
2742 	0,			0,				0,
2743 };
2744 
2745 /*
2746  * Function used to convert the 32 bit PIO address captured for a
2747  * Safari Bus UE(during PIO Rd/Wr) to a full Safari Bus Address.
2748  */
2749 static void
pci_format_ecc_addr(dev_info_t * dip,uint64_t * afar,ecc_region_t region)2750 pci_format_ecc_addr(dev_info_t *dip, uint64_t *afar, ecc_region_t region)
2751 {
2752 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2753 	pci_common_t *cmn_p = pci_p->pci_common_p;
2754 	cb_t *cb_p = pci_p->pci_cb_p;
2755 	int i, pci_side = 0;
2756 	int swap = 0;
2757 	uint64_t pa = cb_p->cb_base_pa;
2758 	uint64_t flag, schizo_base, pci_csr_base;
2759 
2760 	if (pci_p == NULL)
2761 		return;
2762 
2763 	pci_csr_base = va_to_pa(pci_p->pci_address[0]);
2764 
2765 	/*
2766 	 * Using the csr_base address to determine which side
2767 	 * we are on.
2768 	 */
2769 	if (pci_csr_base & PCI_SIDE_ADDR_MASK)
2770 		pci_side = 1;
2771 	else
2772 		pci_side = 0;
2773 
2774 	schizo_base = pa - PBM_CTRL_OFFSET;
2775 
2776 	for (i = 0; ecc_format_tbl[i].ecc_region != 0; i++) {
2777 		if (region == ecc_format_tbl[i].ecc_region) {
2778 			flag = ecc_format_tbl[i].ecc_space;
2779 			if (ecc_format_tbl[i].ecc_side != pci_side)
2780 				swap = 1;
2781 			if (region == SCH_REG_SAFARI_REGS)
2782 				*afar |= schizo_base;
2783 			break;
2784 		}
2785 	}
2786 
2787 	if (swap) {
2788 		pci_p = cmn_p->pci_p[PCI_OTHER_SIDE(pci_p->pci_side)];
2789 
2790 		if (pci_p == NULL)
2791 			return;
2792 	}
2793 	pci_format_addr(pci_p->pci_dip, afar, flag);
2794 }
2795 
2796 /*
2797  * Function used to post control block specific ereports.
2798  */
2799 static void
cb_ereport_post(dev_info_t * dip,uint64_t ena,cb_errstate_t * cb_err)2800 cb_ereport_post(dev_info_t *dip, uint64_t ena, cb_errstate_t *cb_err)
2801 {
2802 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2803 	char buf[FM_MAX_CLASS], dev_path[MAXPATHLEN], *ptr;
2804 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2805 	nvlist_t *ereport, *detector;
2806 	errorq_elem_t *eqep;
2807 	nv_alloc_t *nva;
2808 
2809 	DEBUG1(DBG_ATTACH, dip, "cb_ereport_post: elog 0x%lx",
2810 	    cb_err->cb_elog);
2811 
2812 	/*
2813 	 * We do not use ddi_fm_ereport_post because we need to set a
2814 	 * special detector here. Since we do not have a device path for
2815 	 * the bridge chip we use what we think it should be to aid in
2816 	 * diagnosis.
2817 	 */
2818 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s", DDI_IO_CLASS,
2819 	    cb_err->cb_bridge_type, cb_err->cb_err_class);
2820 
2821 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2822 
2823 	eqep = errorq_reserve(fmhdl->fh_errorq);
2824 	if (eqep == NULL)
2825 		return;
2826 
2827 	ereport = errorq_elem_nvl(fmhdl->fh_errorq, eqep);
2828 	nva = errorq_elem_nva(fmhdl->fh_errorq, eqep);
2829 	detector = fm_nvlist_create(nva);
2830 
2831 	ASSERT(ereport);
2832 	ASSERT(nva);
2833 	ASSERT(detector);
2834 
2835 	ddi_pathname(dip, dev_path);
2836 	ptr = strrchr(dev_path, (int)',');
2837 
2838 	if (ptr)
2839 		*ptr = '\0';
2840 
2841 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL, dev_path,
2842 	    NULL, NULL);
2843 
2844 	DEBUG1(DBG_ERR_INTR, dip, "cb_ereport_post: ereport_set: %s", buf);
2845 
2846 	if (CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO ||
2847 	    CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
2848 		fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, ena, detector,
2849 		    SAFARI_CSR, DATA_TYPE_UINT64, cb_err->cb_csr,
2850 		    SAFARI_ERR, DATA_TYPE_UINT64, cb_err->cb_err,
2851 		    SAFARI_INTR, DATA_TYPE_UINT64, cb_err->cb_intr,
2852 		    SAFARI_ELOG, DATA_TYPE_UINT64, cb_err->cb_elog,
2853 		    SAFARI_PCR, DATA_TYPE_UINT64, cb_err->cb_pcr,
2854 		    NULL);
2855 	} else if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
2856 		fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, ena, detector,
2857 		    JBUS_CSR, DATA_TYPE_UINT64, cb_err->cb_csr,
2858 		    JBUS_ERR, DATA_TYPE_UINT64, cb_err->cb_err,
2859 		    JBUS_INTR, DATA_TYPE_UINT64, cb_err->cb_intr,
2860 		    JBUS_ELOG, DATA_TYPE_UINT64, cb_err->cb_elog,
2861 		    JBUS_PCR, DATA_TYPE_UINT64, cb_err->cb_pcr,
2862 		    NULL);
2863 	}
2864 	errorq_commit(fmhdl->fh_errorq, eqep, ERRORQ_ASYNC);
2865 }
2866 
2867 /*
2868  * Function used to post IOMMU specific ereports.
2869  */
2870 static void
iommu_ereport_post(dev_info_t * dip,uint64_t ena,pbm_errstate_t * pbm_err)2871 iommu_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
2872 {
2873 	char buf[FM_MAX_CLASS];
2874 
2875 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2876 	    pbm_err->pbm_bridge_type, pbm_err->pbm_err_class);
2877 
2878 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2879 
2880 	DEBUG1(DBG_ERR_INTR, dip, "iommu_ereport_post: ereport_set: %s", buf);
2881 
2882 	ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
2883 	    FM_VERSION, DATA_TYPE_UINT8, 0,
2884 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
2885 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
2886 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
2887 	    PCI_PBM_IOMMU_CTRL, DATA_TYPE_UINT64, pbm_err->pbm_iommu.iommu_stat,
2888 	    PCI_PBM_IOMMU_TFAR, DATA_TYPE_UINT64, pbm_err->pbm_iommu.iommu_tfar,
2889 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
2890 	    PCI_PBM_VALOG, DATA_TYPE_UINT64, pbm_err->pbm_va_log,
2891 	    NULL);
2892 }
2893 
2894 /*
2895  * Function used to post PCI-X generic ereports.
2896  * This function needs to be fixed once the Fault Boundary Analysis
2897  * for PCI-X is conducted. The payload should be made more generic.
2898  */
2899 static void
pcix_ereport_post(dev_info_t * dip,uint64_t ena,pbm_errstate_t * pbm_err)2900 pcix_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
2901 {
2902 	char buf[FM_MAX_CLASS];
2903 
2904 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2905 	    pbm_err->pbm_bridge_type, pbm_err->pbm_err_class);
2906 
2907 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2908 
2909 	DEBUG1(DBG_ERR_INTR, dip, "pcix_ereport_post: ereport_post: %s", buf);
2910 
2911 	ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
2912 	    FM_VERSION, DATA_TYPE_UINT8, 0,
2913 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
2914 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
2915 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
2916 	    PCI_PBM_AFSR, DATA_TYPE_UINT64, pbm_err->pbm_afsr,
2917 	    PCI_PBM_AFAR, DATA_TYPE_UINT64, pbm_err->pbm_afar,
2918 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
2919 	    PCIX_STAT, DATA_TYPE_UINT64, pbm_err->pbm_pcix_stat,
2920 	    PCIX_PFAR, DATA_TYPE_UINT32, pbm_err->pbm_pcix_pfar,
2921 	    NULL);
2922 }
2923 
2924 static void
iommu_ctx_free(iommu_t * iommu_p)2925 iommu_ctx_free(iommu_t *iommu_p)
2926 {
2927 	kmem_free(iommu_p->iommu_ctx_bitmap, IOMMU_CTX_BITMAP_SIZE);
2928 }
2929 
2930 /*
2931  * iommu_tlb_scrub():
2932  *	Exam TLB entries through TLB diagnostic registers and look for errors.
2933  *	scrub = 1 : cleanup all error bits in tlb, called in FAULT_RESET case
2934  *	scrub = 0 : log all error conditions to console, FAULT_LOG case
2935  *	In both cases, it returns number of errors found in tlb entries.
2936  */
2937 static int
iommu_tlb_scrub(iommu_t * iommu_p,int scrub)2938 iommu_tlb_scrub(iommu_t *iommu_p, int scrub)
2939 {
2940 	int i, nerr = 0;
2941 	dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
2942 	char *neg = "not ";
2943 
2944 	uint64_t base = (uint64_t)iommu_p->iommu_ctrl_reg -
2945 	    COMMON_IOMMU_CTRL_REG_OFFSET;
2946 
2947 	volatile uint64_t *tlb_tag = (volatile uint64_t *)
2948 	    (base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
2949 	volatile uint64_t *tlb_data = (volatile uint64_t *)
2950 	    (base + COMMON_IOMMU_TLB_DATA_DIAG_ACC_OFFSET);
2951 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++) {
2952 		uint64_t tag = tlb_tag[i];
2953 		uint64_t data = tlb_data[i];
2954 		uint32_t errstat;
2955 		iopfn_t pfn;
2956 
2957 		if (!(tag & TLBTAG_ERR_BIT))
2958 			continue;
2959 
2960 		pfn = (iopfn_t)(data & TLBDATA_MEMPA_BITS);
2961 		errstat = (uint32_t)
2962 		    ((tag & TLBTAG_ERRSTAT_BITS) >> TLBTAG_ERRSTAT_SHIFT);
2963 		if (errstat == TLBTAG_ERRSTAT_INVALID) {
2964 			if (scrub)
2965 				tlb_tag[i] = tlb_data[i] = 0ull;
2966 		} else
2967 			nerr++;
2968 
2969 		if (scrub)
2970 			continue;
2971 
2972 		cmn_err(CE_CONT, "%s%d: Error %x on IOMMU TLB entry %x:\n"
2973 		"\tContext=%lx %sWritable %sStreamable\n"
2974 		"\tPCI Page Size=%sk Address in page %lx\n",
2975 		    ddi_driver_name(dip), ddi_get_instance(dip), errstat, i,
2976 		    (tag & TLBTAG_CONTEXT_BITS) >> TLBTAG_CONTEXT_SHIFT,
2977 		    (tag & TLBTAG_WRITABLE_BIT) ? "" : neg,
2978 		    (tag & TLBTAG_STREAM_BIT) ? "" : neg,
2979 		    (tag & TLBTAG_PGSIZE_BIT) ? "64" : "8",
2980 		    (tag & TLBTAG_PCIVPN_BITS) << 13);
2981 		cmn_err(CE_CONT, "Memory: %sValid %sCacheable Page Frame=%lx\n",
2982 		    (data & TLBDATA_VALID_BIT) ? "" : neg,
2983 		    (data & TLBDATA_CACHE_BIT) ? "" : neg, pfn);
2984 	}
2985 	return (nerr);
2986 }
2987 
2988 /*
2989  * pci_iommu_disp: calculates the displacement needed in tomatillo's
2990  *	iommu control register and modifies the control value template
2991  *	from caller. It also clears any error status bit that are new
2992  *	in tomatillo.
2993  * return value: an 8-bit mask to enable corresponding 512 MB segments
2994  *	suitable for tomatillo's target address register.
2995  *	0x00: no programming is needed, use existing value from prom
2996  *	0x60: use segment 5 and 6 to form a 1GB dvma range
2997  */
2998 static uint64_t
pci_iommu_disp(iommu_t * iommu_p,uint64_t * ctl_p)2999 pci_iommu_disp(iommu_t *iommu_p, uint64_t *ctl_p)
3000 {
3001 	uint64_t ctl_old;
3002 	if (CHIP_TYPE(iommu_p->iommu_pci_p) != PCI_CHIP_TOMATILLO)
3003 		return (0);
3004 
3005 	ctl_old = *iommu_p->iommu_ctrl_reg;
3006 	/* iommu ctrl reg error bits are W1C */
3007 	if (ctl_old >> TOMATIILO_IOMMU_ERR_REG_SHIFT) {
3008 		cmn_err(CE_WARN, "Tomatillo iommu err: %lx", ctl_old);
3009 		*ctl_p |= (ctl_old >> TOMATIILO_IOMMU_ERR_REG_SHIFT)
3010 		    << TOMATIILO_IOMMU_ERR_REG_SHIFT;
3011 	}
3012 
3013 	if (iommu_p->iommu_tsb_size != TOMATILLO_IOMMU_TSB_MAX)
3014 		return (0);
3015 
3016 	/* Tomatillo 2.0 and later, and 1GB DVMA range */
3017 	*ctl_p |= 1 << TOMATILLO_IOMMU_SEG_DISP_SHIFT;
3018 	return (3 << (iommu_p->iommu_dvma_base >> (32 - 3)));
3019 }
3020 
3021 void
pci_iommu_config(iommu_t * iommu_p,uint64_t iommu_ctl,uint64_t cfgpa)3022 pci_iommu_config(iommu_t *iommu_p, uint64_t iommu_ctl, uint64_t cfgpa)
3023 {
3024 	uintptr_t pbm_regbase = get_pbm_reg_base(iommu_p->iommu_pci_p);
3025 	volatile uint64_t *pbm_csr_p = (volatile uint64_t *)pbm_regbase;
3026 	volatile uint64_t *tgt_space_p = (volatile uint64_t *)(pbm_regbase |
3027 	    (TOMATILLO_TGT_ADDR_SPACE_OFFSET - SCHIZO_PCI_CTRL_REG_OFFSET));
3028 	volatile uint64_t pbm_ctl = *pbm_csr_p;
3029 
3030 	volatile uint64_t *iommu_ctl_p = iommu_p->iommu_ctrl_reg;
3031 	volatile uint64_t tsb_bar_val = iommu_p->iommu_tsb_paddr;
3032 	volatile uint64_t *tsb_bar_p = iommu_p->iommu_tsb_base_addr_reg;
3033 	uint64_t mask = pci_iommu_disp(iommu_p, &iommu_ctl);
3034 
3035 	DEBUG2(DBG_ATTACH, iommu_p->iommu_pci_p->pci_dip,
3036 	    "\npci_iommu_config: pbm_csr_p=%llx pbm_ctl=%llx",
3037 	    pbm_csr_p, pbm_ctl);
3038 	DEBUG2(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
3039 	    "\n\tiommu_ctl_p=%llx iommu_ctl=%llx",
3040 	    iommu_ctl_p, iommu_ctl);
3041 	DEBUG4(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
3042 	    "\n\tcfgpa=%llx tgt_space_p=%llx mask=%x tsb=%llx\n",
3043 	    cfgpa, tgt_space_p, mask, tsb_bar_val);
3044 
3045 	if (!cfgpa)
3046 		goto reprog;
3047 
3048 	/* disable PBM arbiters - turn off bits 0-7 */
3049 	*pbm_csr_p = (pbm_ctl >> 8) << 8;
3050 
3051 	/*
3052 	 * For non-XMITS, flush any previous writes. This is only
3053 	 * necessary for host bridges that may have a USB keywboard
3054 	 * attached.  XMITS does not.
3055 	 */
3056 	if (!(CHIP_TYPE(iommu_p->iommu_pci_p) == PCI_CHIP_XMITS))
3057 		(void) ldphysio(cfgpa);
3058 
3059 reprog:
3060 	if (mask)
3061 		*tgt_space_p = mask;
3062 
3063 	*tsb_bar_p = tsb_bar_val;
3064 	*iommu_ctl_p = iommu_ctl;
3065 
3066 	*pbm_csr_p = pbm_ctl;	/* re-enable bus arbitration */
3067 	pbm_ctl = *pbm_csr_p;	/* flush all prev writes */
3068 }
3069 
3070 
3071 int
pci_get_portid(dev_info_t * dip)3072 pci_get_portid(dev_info_t *dip)
3073 {
3074 	return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3075 	    "portid", -1));
3076 }
3077 
3078 /*
3079  * Schizo Safari Performance Events.
3080  */
3081 pci_kev_mask_t
3082 schizo_saf_events[] = {
3083 	{"saf_bus_cycles", 0x1},	{"saf_pause_asserted_cycles", 0x2},
3084 	{"saf_frn_coherent_cmds", 0x3},	{"saf_frn_coherent_hits", 0x4},
3085 	{"saf_my_coherent_cmds", 0x5},	{"saf_my_coherent_hits", 0x6},
3086 	{"saf_frn_io_cmds", 0x7},	{"saf_frn_io_hits", 0x8},
3087 	{"merge_buffer", 0x9},		{"interrupts", 0xa},
3088 	{"csr_pios", 0xc},		{"upa_pios", 0xd},
3089 	{"pcia_pios", 0xe},		{"pcib_pios", 0xf},
3090 	{"saf_pause_seen_cycles", 0x11}, {"dvma_reads", 0x12},
3091 	{"dvma_writes", 0x13},		{"saf_orq_full_cycles", 0x14},
3092 	{"saf_data_in_cycles", 0x15},	{"saf_data_out_cycles", 0x16},
3093 	{"clear_pic", 0x1f}
3094 };
3095 
3096 
3097 /*
3098  * Schizo PCI Performance Events.
3099  */
3100 pci_kev_mask_t
3101 schizo_pci_events[] = {
3102 	{"dvma_stream_rd", 0x0},	{"dvma_stream_wr", 0x1},
3103 	{"dvma_const_rd", 0x2},		{"dvma_const_wr", 0x3},
3104 	{"dvma_stream_buf_mis", 0x4},	{"dvma_cycles", 0x5},
3105 	{"dvma_wd_xfr", 0x6},		{"pio_cycles", 0x7},
3106 	{"dvma_tlb_misses", 0x10},	{"interrupts", 0x11},
3107 	{"saf_inter_nack", 0x12},	{"pio_reads", 0x13},
3108 	{"pio_writes", 0x14},		{"dvma_rd_buf_timeout", 0x15},
3109 	{"dvma_rd_rtry_stc", 0x16},	{"dvma_wr_rtry_stc", 0x17},
3110 	{"dvma_rd_rtry_nonstc", 0x18},	{"dvma_wr_rtry_nonstc", 0x19},
3111 	{"E*_slow_transitions", 0x1a},	{"E*_slow_cycles_per_64", 0x1b},
3112 	{"clear_pic", 0x1f}
3113 };
3114 
3115 
3116 /*
3117  * Create the picN kstats for the pci
3118  * and safari events.
3119  */
3120 void
pci_kstat_init()3121 pci_kstat_init()
3122 {
3123 	pci_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
3124 	    KM_NOSLEEP);
3125 
3126 	if (pci_name_kstat == NULL) {
3127 		cmn_err(CE_WARN, "pcisch : no space for kstat\n");
3128 	} else {
3129 		pci_name_kstat->pic_no_evs =
3130 		    sizeof (schizo_pci_events) / sizeof (pci_kev_mask_t);
3131 		pci_name_kstat->pic_shift[0] = SCHIZO_SHIFT_PIC0;
3132 		pci_name_kstat->pic_shift[1] = SCHIZO_SHIFT_PIC1;
3133 		pci_create_name_kstat("pcis",
3134 		    pci_name_kstat, schizo_pci_events);
3135 	}
3136 
3137 	saf_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
3138 	    KM_NOSLEEP);
3139 	if (saf_name_kstat == NULL) {
3140 		cmn_err(CE_WARN, "pcisch : no space for kstat\n");
3141 	} else {
3142 		saf_name_kstat->pic_no_evs =
3143 		    sizeof (schizo_saf_events) / sizeof (pci_kev_mask_t);
3144 		saf_name_kstat->pic_shift[0] = SCHIZO_SHIFT_PIC0;
3145 		saf_name_kstat->pic_shift[1] = SCHIZO_SHIFT_PIC1;
3146 		pci_create_name_kstat("saf", saf_name_kstat, schizo_saf_events);
3147 	}
3148 }
3149 
3150 void
pci_kstat_fini()3151 pci_kstat_fini()
3152 {
3153 	if (pci_name_kstat != NULL) {
3154 		pci_delete_name_kstat(pci_name_kstat);
3155 		kmem_free(pci_name_kstat, sizeof (pci_ksinfo_t));
3156 		pci_name_kstat = NULL;
3157 	}
3158 
3159 	if (saf_name_kstat != NULL) {
3160 		pci_delete_name_kstat(saf_name_kstat);
3161 		kmem_free(saf_name_kstat, sizeof (pci_ksinfo_t));
3162 		saf_name_kstat = NULL;
3163 	}
3164 }
3165 
3166 /*
3167  * Create 'counters' kstat for pci events.
3168  */
3169 void
pci_add_pci_kstat(pci_t * pci_p)3170 pci_add_pci_kstat(pci_t *pci_p)
3171 {
3172 	pci_cntr_addr_t *cntr_addr_p = &pci_p->pci_ks_addr;
3173 	uintptr_t regbase = (uintptr_t)pci_p->pci_address[0];
3174 
3175 	cntr_addr_p->pcr_addr = (uint64_t *)
3176 	    (regbase + SCHIZO_PERF_PCI_PCR_OFFSET);
3177 	cntr_addr_p->pic_addr = (uint64_t *)
3178 	    (regbase + SCHIZO_PERF_PCI_PIC_OFFSET);
3179 
3180 	pci_p->pci_ksp = pci_create_cntr_kstat(pci_p, "pcis",
3181 	    NUM_OF_PICS, pci_cntr_kstat_update, cntr_addr_p);
3182 
3183 	if (pci_p->pci_ksp == NULL) {
3184 		cmn_err(CE_WARN, "pcisch : cannot create counter kstat");
3185 	}
3186 }
3187 
3188 void
pci_rem_pci_kstat(pci_t * pci_p)3189 pci_rem_pci_kstat(pci_t *pci_p)
3190 {
3191 	if (pci_p->pci_ksp != NULL)
3192 		kstat_delete(pci_p->pci_ksp);
3193 	pci_p->pci_ksp = NULL;
3194 }
3195 
3196 void
pci_add_upstream_kstat(pci_t * pci_p)3197 pci_add_upstream_kstat(pci_t *pci_p)
3198 {
3199 	pci_common_t	*cmn_p = pci_p->pci_common_p;
3200 	pci_cntr_pa_t	*cntr_pa_p = &cmn_p->pci_cmn_uks_pa;
3201 	uint64_t regbase = va_to_pa(pci_p->pci_address[1]);
3202 
3203 	cntr_pa_p->pcr_pa =
3204 	    regbase + SCHIZO_PERF_SAF_PCR_OFFSET;
3205 	cntr_pa_p->pic_pa =
3206 	    regbase + SCHIZO_PERF_SAF_PIC_OFFSET;
3207 
3208 	cmn_p->pci_common_uksp = pci_create_cntr_kstat(pci_p, "saf",
3209 	    NUM_OF_PICS, pci_cntr_kstat_pa_update, cntr_pa_p);
3210 }
3211 
3212 /*
3213  * Extract the drivers binding name to identify which chip
3214  * we're binding to.  Whenever a new bus bridge is created, the driver alias
3215  * entry should be added here to identify the device if needed.  If a device
3216  * isn't added, the identity defaults to PCI_CHIP_UNIDENTIFIED.
3217  */
3218 static uint32_t
pci_identity_init(pci_t * pci_p)3219 pci_identity_init(pci_t *pci_p)
3220 {
3221 	dev_info_t *dip = pci_p->pci_dip;
3222 	char *name = ddi_binding_name(dip);
3223 	uint32_t ver = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3224 	    "version#", 0);
3225 
3226 	if (strcmp(name, "pci108e,a801") == 0)
3227 		return (CHIP_ID(PCI_CHIP_TOMATILLO, ver, 0x00));
3228 
3229 	if (strcmp(name, "pci108e,8001") == 0)
3230 		return (CHIP_ID(PCI_CHIP_SCHIZO, ver, 0x00));
3231 
3232 	if (strcmp(name, "pci108e,8002") == 0) {
3233 		uint32_t mod_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3234 		    DDI_PROP_DONTPASS, "module-revision#", 0);
3235 		return (CHIP_ID(PCI_CHIP_XMITS, ver, mod_rev));
3236 	}
3237 
3238 	cmn_err(CE_WARN, "%s%d: Unknown PCI Host bridge %s %x\n",
3239 	    ddi_driver_name(dip), ddi_get_instance(dip), name, ver);
3240 
3241 	return (PCI_CHIP_UNIDENTIFIED);
3242 }
3243 
3244 /*
3245  * Setup a physical pointer to one leaf config space area. This
3246  * is used in several places in order to do a dummy read which
3247  * guarantees the nexus (and not a bus master) has gained control
3248  * of the bus.
3249  */
3250 static void
pci_setup_cfgpa(pci_t * pci_p)3251 pci_setup_cfgpa(pci_t *pci_p)
3252 {
3253 	dev_info_t *dip = pci_p->pci_dip;
3254 	dev_info_t *cdip;
3255 	pbm_t *pbm_p = pci_p->pci_pbm_p;
3256 	uint64_t cfgpa = pci_get_cfg_pabase(pci_p);
3257 	uint32_t *reg_p;
3258 	int reg_len;
3259 
3260 	for (cdip = ddi_get_child(dip); cdip != NULL;
3261 	    cdip = ddi_get_next_sibling(cdip)) {
3262 		if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
3263 		    "reg", (caddr_t)&reg_p, &reg_len) != DDI_PROP_SUCCESS)
3264 			continue;
3265 		cfgpa += (*reg_p) & (PCI_CONF_ADDR_MASK ^ PCI_REG_REG_M);
3266 		kmem_free(reg_p, reg_len);
3267 		break;
3268 	}
3269 	pbm_p->pbm_anychild_cfgpa = cfgpa;
3270 }
3271 
3272 void
pci_post_init_child(pci_t * pci_p,dev_info_t * child)3273 pci_post_init_child(pci_t *pci_p, dev_info_t *child)
3274 {
3275 	volatile uint64_t *ctrl_reg_p;
3276 	pbm_t *pbm_p = pci_p->pci_pbm_p;
3277 
3278 	pci_setup_cfgpa(pci_p);
3279 
3280 	/*
3281 	 * This is a hack for skyhawk/casinni combination to address
3282 	 * hardware problems between the request and grant signals which
3283 	 * causes a bus hang.  One workaround, which is applied here,
3284 	 * is to disable bus parking if the child contains the property
3285 	 * pci-req-removal.  Note that if the bus is quiesced we must mask
3286 	 * off the parking bit in the saved control registers, since the
3287 	 * quiesce operation temporarily turns off PCI bus parking.
3288 	 */
3289 	if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
3290 	    "pci-req-removal") == 1) {
3291 
3292 		if (pbm_p->pbm_quiesce_count > 0) {
3293 			pbm_p->pbm_saved_ctrl_reg &= ~SCHIZO_PCI_CTRL_ARB_PARK;
3294 		} else {
3295 			ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3296 			*ctrl_reg_p &= ~SCHIZO_PCI_CTRL_ARB_PARK;
3297 		}
3298 	}
3299 
3300 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
3301 		if (*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE) {
3302 			int value;
3303 
3304 			/*
3305 			 * Due to a XMITS bug, we need to set the outstanding
3306 			 * split transactions to 1 for all PCI-X functions
3307 			 * behind the leaf.
3308 			 */
3309 			value = (xmits_max_transactions << 4) |
3310 			    (xmits_max_read_bytes << 2);
3311 
3312 			DEBUG1(DBG_INIT_CLD, child, "Turning on XMITS NCPQ "
3313 			    "Workaround: value = %x\n", value);
3314 
3315 			pcix_set_cmd_reg(child, value);
3316 
3317 			(void) ndi_prop_update_int(DDI_DEV_T_NONE,
3318 			    child, "pcix-update-cmd-reg", value);
3319 		}
3320 
3321 		if (PCI_CHIP_ID(pci_p) >= XMITS_VER_30) {
3322 			uint64_t *pbm_pcix_diag_reg =
3323 			    (uint64_t *)(pci_p->pci_address[0] +
3324 			    XMITS_PCI_X_DIAG_REG_OFFSET);
3325 			uint64_t bugcntl = (*pbm_pcix_diag_reg >>
3326 			    XMITS_PCI_X_DIAG_BUGCNTL_SHIFT) &
3327 			    XMITS_PCI_X_DIAG_BUGCNTL_MASK;
3328 			uint64_t tunable = (*pbm_p->pbm_ctrl_reg &
3329 			    XMITS_PCI_CTRL_X_MODE ?
3330 			    xmits_pcix_diag_bugcntl_pcix :
3331 			    xmits_pcix_diag_bugcntl_pci)
3332 			    & XMITS_PCI_X_DIAG_BUGCNTL_MASK;
3333 
3334 			DEBUG4(DBG_INIT_CLD, pci_p->pci_dip, "%s: XMITS "
3335 			    "pcix diag bugcntl=0x%lx, tunable=0x%lx, mode=%s\n",
3336 			    ddi_driver_name(child), bugcntl, tunable,
3337 			    ((*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE)?
3338 			    "PCI-X":"PCI"));
3339 
3340 			DEBUG2(DBG_INIT_CLD, pci_p->pci_dip, "%s: XMITS "
3341 			    "pcix diag reg=0x%lx (CUR)\n",
3342 			    ddi_driver_name(child), *pbm_pcix_diag_reg);
3343 
3344 			/*
3345 			 * Due to a XMITS 3.x hw bug, we need to
3346 			 * read PBM's xmits pci ctrl status register to
3347 			 * determine mode (PCI or PCI-X) and then update
3348 			 * PBM's pcix diag register with new BUG_FIX_CNTL
3349 			 * bits (47:32) _if_ different from tunable's mode
3350 			 * based value. This update is performed only once
3351 			 * during the PBM's first child init.
3352 			 *
3353 			 * Per instructions from xmits hw engineering,
3354 			 * non-BUG_FIX_CNTL bits should not be preserved
3355 			 * when updating the pcix diag register. Such bits
3356 			 * should be written as 0s.
3357 			 */
3358 
3359 			if (bugcntl != tunable) {
3360 				*pbm_pcix_diag_reg = tunable <<
3361 				    XMITS_PCI_X_DIAG_BUGCNTL_SHIFT;
3362 
3363 				DEBUG2(DBG_INIT_CLD, pci_p->pci_dip, "%s: XMITS"
3364 				    " pcix diag reg=0x%lx (NEW)\n",
3365 				    ddi_driver_name(child), *pbm_pcix_diag_reg);
3366 			}
3367 		}
3368 	}
3369 }
3370 
3371 void
pci_post_uninit_child(pci_t * pci_p)3372 pci_post_uninit_child(pci_t *pci_p)
3373 {
3374 	pci_setup_cfgpa(pci_p);
3375 }
3376 
3377 static int
pci_tom_nbintr_op(pci_t * pci_p,uint32_t inum,intrfunc f,caddr_t arg,int flag)3378 pci_tom_nbintr_op(pci_t *pci_p, uint32_t inum, intrfunc f, caddr_t arg,
3379     int flag)
3380 {
3381 	uint32_t ino = pci_p->pci_inos[inum];
3382 	uint32_t mondo = IB_INO_TO_NBMONDO(pci_p->pci_ib_p, ino);
3383 	int ret = DDI_SUCCESS;
3384 
3385 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo); /* no op on tom */
3386 
3387 	switch (flag) {
3388 	case PCI_OBJ_INTR_ADD:
3389 		VERIFY(add_ivintr(mondo, pci_pil[inum], f,
3390 		    arg, NULL, NULL) == 0);
3391 		break;
3392 	case PCI_OBJ_INTR_REMOVE:
3393 		VERIFY(rem_ivintr(mondo, pci_pil[inum]) == 0);
3394 		break;
3395 	default:
3396 		ret = DDI_FAILURE;
3397 		break;
3398 	}
3399 
3400 	return (ret);
3401 }
3402 
3403 int
pci_ecc_add_intr(pci_t * pci_p,int inum,ecc_intr_info_t * eii_p)3404 pci_ecc_add_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
3405 {
3406 	uint32_t mondo;
3407 	int	r;
3408 
3409 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
3410 	    pci_p->pci_inos[inum]);
3411 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3412 
3413 	VERIFY(add_ivintr(mondo, pci_pil[inum], (intrfunc)ecc_intr,
3414 	    (caddr_t)eii_p, NULL, NULL) == 0);
3415 
3416 	if (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO)
3417 		return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD,
3418 		    DDI_SUCCESS));
3419 
3420 	r = pci_tom_nbintr_op(pci_p, inum, (intrfunc)ecc_intr,
3421 	    (caddr_t)eii_p, PCI_OBJ_INTR_ADD);
3422 	return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD, r));
3423 }
3424 
3425 void
pci_ecc_rem_intr(pci_t * pci_p,int inum,ecc_intr_info_t * eii_p)3426 pci_ecc_rem_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
3427 {
3428 	uint32_t mondo;
3429 
3430 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
3431 	    pci_p->pci_inos[inum]);
3432 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3433 
3434 	VERIFY(rem_ivintr(mondo, pci_pil[inum]) == 0);
3435 
3436 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO)
3437 		pci_tom_nbintr_op(pci_p, inum, (intrfunc)ecc_intr,
3438 		    (caddr_t)eii_p, PCI_OBJ_INTR_REMOVE);
3439 }
3440 
3441 static uint_t
pci_pbm_cdma_intr(caddr_t a)3442 pci_pbm_cdma_intr(caddr_t a)
3443 {
3444 	pbm_t *pbm_p = (pbm_t *)a;
3445 	pbm_p->pbm_cdma_flag = PBM_CDMA_DONE;
3446 #ifdef PBM_CDMA_DEBUG
3447 	pbm_p->pbm_cdma_intr_cnt++;
3448 #endif /* PBM_CDMA_DEBUG */
3449 	return (DDI_INTR_CLAIMED);
3450 }
3451 
3452 int
pci_pbm_add_intr(pci_t * pci_p)3453 pci_pbm_add_intr(pci_t *pci_p)
3454 {
3455 	uint32_t mondo;
3456 
3457 	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_CDMA]);
3458 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3459 
3460 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_CDMA],
3461 	    (intrfunc)pci_pbm_cdma_intr, (caddr_t)pci_p->pci_pbm_p,
3462 	    NULL, NULL) == 0);
3463 
3464 	return (DDI_SUCCESS);
3465 }
3466 
3467 void
pci_pbm_rem_intr(pci_t * pci_p)3468 pci_pbm_rem_intr(pci_t *pci_p)
3469 {
3470 	ib_t		*ib_p = pci_p->pci_ib_p;
3471 	uint32_t	mondo;
3472 
3473 	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_CDMA]);
3474 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3475 
3476 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_CDMA], IB_INTR_NOWAIT);
3477 	VERIFY(rem_ivintr(mondo, pci_pil[CBNINTR_CDMA]) == 0);
3478 }
3479 
3480 void
pci_pbm_suspend(pci_t * pci_p)3481 pci_pbm_suspend(pci_t *pci_p)
3482 {
3483 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
3484 	ib_ino_t	ino = pci_p->pci_inos[CBNINTR_CDMA];
3485 
3486 	/* Save CDMA interrupt state */
3487 	pbm_p->pbm_cdma_imr_save = *ib_intr_map_reg_addr(pci_p->pci_ib_p, ino);
3488 }
3489 
3490 void
pci_pbm_resume(pci_t * pci_p)3491 pci_pbm_resume(pci_t *pci_p)
3492 {
3493 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
3494 	ib_ino_t	ino = pci_p->pci_inos[CBNINTR_CDMA];
3495 
3496 	/* Restore CDMA interrupt state */
3497 	*ib_intr_map_reg_addr(pci_p->pci_ib_p, ino) = pbm_p->pbm_cdma_imr_save;
3498 }
3499 
3500 /*
3501  * pci_bus_quiesce
3502  *
3503  * This function is called as the corresponding control ops routine
3504  * to a DDI_CTLOPS_QUIESCE command.  Its mission is to halt all DMA
3505  * activity on the bus by disabling arbitration/parking.
3506  */
3507 int
pci_bus_quiesce(pci_t * pci_p,dev_info_t * dip,void * result)3508 pci_bus_quiesce(pci_t *pci_p, dev_info_t *dip, void *result)
3509 {
3510 	volatile uint64_t *ctrl_reg_p;
3511 	volatile uint64_t ctrl_reg;
3512 	pbm_t *pbm_p;
3513 
3514 	pbm_p = pci_p->pci_pbm_p;
3515 	ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3516 
3517 	if (pbm_p->pbm_quiesce_count++ == 0) {
3518 
3519 		DEBUG0(DBG_PWR, dip, "quiescing bus\n");
3520 
3521 		ctrl_reg = *ctrl_reg_p;
3522 		pbm_p->pbm_saved_ctrl_reg = ctrl_reg;
3523 		ctrl_reg &= ~(SCHIZO_PCI_CTRL_ARB_EN_MASK |
3524 		    SCHIZO_PCI_CTRL_ARB_PARK);
3525 		*ctrl_reg_p = ctrl_reg;
3526 #ifdef	DEBUG
3527 		ctrl_reg = *ctrl_reg_p;
3528 		if ((ctrl_reg & (SCHIZO_PCI_CTRL_ARB_EN_MASK |
3529 		    SCHIZO_PCI_CTRL_ARB_PARK)) != 0)
3530 			panic("ctrl_reg didn't quiesce: 0x%lx\n", ctrl_reg);
3531 #endif
3532 		if (pbm_p->pbm_anychild_cfgpa)
3533 			(void) ldphysio(pbm_p->pbm_anychild_cfgpa);
3534 	}
3535 
3536 	return (DDI_SUCCESS);
3537 }
3538 
3539 /*
3540  * pci_bus_unquiesce
3541  *
3542  * This function is called as the corresponding control ops routine
3543  * to a DDI_CTLOPS_UNQUIESCE command.  Its mission is to resume paused
3544  * DMA activity on the bus by re-enabling arbitration (and maybe parking).
3545  */
3546 int
pci_bus_unquiesce(pci_t * pci_p,dev_info_t * dip,void * result)3547 pci_bus_unquiesce(pci_t *pci_p, dev_info_t *dip, void *result)
3548 {
3549 	volatile uint64_t *ctrl_reg_p;
3550 	pbm_t *pbm_p;
3551 #ifdef	DEBUG
3552 	volatile uint64_t ctrl_reg;
3553 #endif
3554 
3555 	pbm_p = pci_p->pci_pbm_p;
3556 	ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3557 
3558 	ASSERT(pbm_p->pbm_quiesce_count > 0);
3559 	if (--pbm_p->pbm_quiesce_count == 0) {
3560 		*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
3561 #ifdef	DEBUG
3562 		ctrl_reg = *ctrl_reg_p;
3563 		if ((ctrl_reg & (SCHIZO_PCI_CTRL_ARB_EN_MASK |
3564 		    SCHIZO_PCI_CTRL_ARB_PARK)) == 0)
3565 			panic("ctrl_reg didn't unquiesce: 0x%lx\n", ctrl_reg);
3566 #endif
3567 	}
3568 
3569 	return (DDI_SUCCESS);
3570 }
3571 
3572 int
pci_reloc_getkey(void)3573 pci_reloc_getkey(void)
3574 {
3575 	return (0x200);
3576 }
3577 
3578 static void
tm_vmem_free(ddi_dma_impl_t * mp,iommu_t * iommu_p,dvma_addr_t dvma_pg,int npages)3579 tm_vmem_free(ddi_dma_impl_t *mp, iommu_t *iommu_p, dvma_addr_t dvma_pg,
3580     int npages)
3581 {
3582 	uint32_t dur_max, dur_base;
3583 	dvma_unbind_req_t *req_p, *req_max_p;
3584 	dvma_unbind_req_t *req_base_p = iommu_p->iommu_mtlb_req_p;
3585 	uint32_t tlb_vpn[IOMMU_TLB_ENTRIES];
3586 	caddr_t reg_base;
3587 	volatile uint64_t *tag_p;
3588 	int i, preserv_count = 0;
3589 
3590 	mutex_enter(&iommu_p->iommu_mtlb_lock);
3591 
3592 	iommu_p->iommu_mtlb_npgs += npages;
3593 	req_max_p = req_base_p + iommu_p->iommu_mtlb_nreq++;
3594 	req_max_p->dur_npg = npages;
3595 	req_max_p->dur_base = dvma_pg;
3596 	req_max_p->dur_flags = mp->dmai_flags & DMAI_FLAGS_VMEMCACHE;
3597 
3598 
3599 	if (iommu_p->iommu_mtlb_npgs <= iommu_p->iommu_mtlb_maxpgs)
3600 		goto done;
3601 
3602 	/* read TLB */
3603 	reg_base = iommu_p->iommu_pci_p->pci_address[0];
3604 	tag_p = (volatile uint64_t *)
3605 	    (reg_base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
3606 
3607 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++)
3608 		tlb_vpn[i] = tag_p[i] & SCHIZO_VPN_MASK;
3609 
3610 	/* for each request search the TLB for a matching address */
3611 	for (req_p = req_base_p; req_p <= req_max_p; req_p++) {
3612 		dur_base = req_p->dur_base;
3613 		dur_max = req_p->dur_base + req_p->dur_npg;
3614 
3615 		for (i = 0; i < IOMMU_TLB_ENTRIES; i++) {
3616 			uint_t vpn = tlb_vpn[i];
3617 			if (vpn >= dur_base && vpn < dur_max)
3618 				break;
3619 		}
3620 		if (i >= IOMMU_TLB_ENTRIES) {
3621 			pci_vmem_do_free(iommu_p,
3622 			    (void *)IOMMU_PTOB(req_p->dur_base),
3623 			    req_p->dur_npg, req_p->dur_flags);
3624 			iommu_p->iommu_mtlb_npgs -= req_p->dur_npg;
3625 			continue;
3626 		}
3627 		/* if an empty slot exists */
3628 		if ((req_p - req_base_p) != preserv_count)
3629 			*(req_base_p + preserv_count) = *req_p;
3630 		preserv_count++;
3631 	}
3632 
3633 	iommu_p->iommu_mtlb_nreq = preserv_count;
3634 done:
3635 	mutex_exit(&iommu_p->iommu_mtlb_lock);
3636 }
3637 
3638 void
pci_vmem_free(iommu_t * iommu_p,ddi_dma_impl_t * mp,void * dvma_addr,size_t npages)3639 pci_vmem_free(iommu_t *iommu_p, ddi_dma_impl_t *mp, void *dvma_addr,
3640     size_t npages)
3641 {
3642 	if (tm_mtlb_gc)
3643 		tm_vmem_free(mp, iommu_p,
3644 		    (dvma_addr_t)IOMMU_BTOP((dvma_addr_t)dvma_addr), npages);
3645 	else
3646 		pci_vmem_do_free(iommu_p, dvma_addr, npages,
3647 		    (mp->dmai_flags & DMAI_FLAGS_VMEMCACHE));
3648 }
3649 
3650 /*
3651  * pci_iommu_bypass_end_configure
3652  *
3653  * Support for 42-bit bus width to SAFARI and JBUS in DVMA and
3654  * iommu bypass transfers:
3655  */
3656 
3657 dma_bypass_addr_t
pci_iommu_bypass_end_configure(void)3658 pci_iommu_bypass_end_configure(void)
3659 {
3660 
3661 	return ((dma_bypass_addr_t)SAFARI_JBUS_IOMMU_BYPASS_END);
3662 }
3663