xref: /illumos-gate/usr/src/uts/sun4u/cpu/us3_common.c (revision 56870e8c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/systm.h>
28 #include <sys/ddi.h>
29 #include <sys/sysmacros.h>
30 #include <sys/archsystm.h>
31 #include <sys/vmsystm.h>
32 #include <sys/machparam.h>
33 #include <sys/machsystm.h>
34 #include <sys/machthread.h>
35 #include <sys/cpu.h>
36 #include <sys/cmp.h>
37 #include <sys/elf_SPARC.h>
38 #include <vm/vm_dep.h>
39 #include <vm/hat_sfmmu.h>
40 #include <vm/seg_kpm.h>
41 #include <sys/cpuvar.h>
42 #include <sys/cheetahregs.h>
43 #include <sys/us3_module.h>
44 #include <sys/async.h>
45 #include <sys/cmn_err.h>
46 #include <sys/debug.h>
47 #include <sys/dditypes.h>
48 #include <sys/prom_debug.h>
49 #include <sys/prom_plat.h>
50 #include <sys/cpu_module.h>
51 #include <sys/sysmacros.h>
52 #include <sys/intreg.h>
53 #include <sys/clock.h>
54 #include <sys/platform_module.h>
55 #include <sys/machtrap.h>
56 #include <sys/ontrap.h>
57 #include <sys/panic.h>
58 #include <sys/memlist.h>
59 #include <sys/bootconf.h>
60 #include <sys/ivintr.h>
61 #include <sys/atomic.h>
62 #include <sys/taskq.h>
63 #include <sys/note.h>
64 #include <sys/ndifm.h>
65 #include <sys/ddifm.h>
66 #include <sys/fm/protocol.h>
67 #include <sys/fm/util.h>
68 #include <sys/fm/cpu/UltraSPARC-III.h>
69 #include <sys/fpras_impl.h>
70 #include <sys/dtrace.h>
71 #include <sys/watchpoint.h>
72 #include <sys/plat_ecc_unum.h>
73 #include <sys/cyclic.h>
74 #include <sys/errorq.h>
75 #include <sys/errclassify.h>
76 #include <sys/pghw.h>
77 #include <sys/clock_impl.h>
78 
79 #ifdef	CHEETAHPLUS_ERRATUM_25
80 #include <sys/xc_impl.h>
81 #endif	/* CHEETAHPLUS_ERRATUM_25 */
82 
83 ch_cpu_logout_t	clop_before_flush;
84 ch_cpu_logout_t	clop_after_flush;
85 uint_t	flush_retries_done = 0;
86 /*
87  * Note that 'Cheetah PRM' refers to:
88  *   SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III
89  */
90 
91 /*
92  * Per CPU pointers to physical address of TL>0 logout data areas.
93  * These pointers have to be in the kernel nucleus to avoid MMU
94  * misses.
95  */
96 uint64_t ch_err_tl1_paddrs[NCPU];
97 
98 /*
99  * One statically allocated structure to use during startup/DR
100  * to prevent unnecessary panics.
101  */
102 ch_err_tl1_data_t ch_err_tl1_data;
103 
104 /*
105  * Per CPU pending error at TL>0, used by level15 softint handler
106  */
107 uchar_t ch_err_tl1_pending[NCPU];
108 
109 /*
110  * For deferred CE re-enable after trap.
111  */
112 taskq_t		*ch_check_ce_tq;
113 
114 /*
115  * Internal functions.
116  */
117 static int cpu_async_log_err(void *flt, errorq_elem_t *eqep);
118 static void cpu_log_diag_info(ch_async_flt_t *ch_flt);
119 static void cpu_queue_one_event(ch_async_flt_t *ch_flt, char *reason,
120     ecc_type_to_info_t *eccp, ch_diag_data_t *cdp);
121 static int cpu_flt_in_memory_one_event(ch_async_flt_t *ch_flt,
122     uint64_t t_afsr_bit);
123 static int clear_ecc(struct async_flt *ecc);
124 #if defined(CPU_IMP_ECACHE_ASSOC)
125 static int cpu_ecache_line_valid(ch_async_flt_t *ch_flt);
126 #endif
127 int cpu_ecache_set_size(struct cpu *cp);
128 static int cpu_ectag_line_invalid(int cachesize, uint64_t tag);
129 int cpu_ectag_pa_to_subblk(int cachesize, uint64_t subaddr);
130 uint64_t cpu_ectag_to_pa(int setsize, uint64_t tag);
131 int cpu_ectag_pa_to_subblk_state(int cachesize,
132 				uint64_t subaddr, uint64_t tag);
133 static void cpu_flush_ecache_line(ch_async_flt_t *ch_flt);
134 static int afsr_to_afar_status(uint64_t afsr, uint64_t afsr_bit);
135 static int afsr_to_esynd_status(uint64_t afsr, uint64_t afsr_bit);
136 static int afsr_to_msynd_status(uint64_t afsr, uint64_t afsr_bit);
137 static int afsr_to_synd_status(uint_t cpuid, uint64_t afsr, uint64_t afsr_bit);
138 static int synd_to_synd_code(int synd_status, ushort_t synd, uint64_t afsr_bit);
139 static int cpu_get_mem_unum_synd(int synd_code, struct async_flt *, char *buf);
140 static void cpu_uninit_ecache_scrub_dr(struct cpu *cp);
141 static void cpu_scrubphys(struct async_flt *aflt);
142 static void cpu_payload_add_aflt(struct async_flt *, nvlist_t *, nvlist_t *,
143     int *, int *);
144 static void cpu_payload_add_ecache(struct async_flt *, nvlist_t *);
145 static void cpu_ereport_init(struct async_flt *aflt);
146 static int cpu_check_secondary_errors(ch_async_flt_t *, uint64_t, uint64_t);
147 static uint8_t cpu_flt_bit_to_plat_error(struct async_flt *aflt);
148 static void cpu_log_fast_ecc_error(caddr_t tpc, int priv, int tl, uint64_t ceen,
149     uint64_t nceen, ch_cpu_logout_t *clop);
150 static int cpu_ce_delayed_ec_logout(uint64_t);
151 static int cpu_matching_ecache_line(uint64_t, void *, int, int *);
152 static int cpu_error_is_ecache_data(int, uint64_t);
153 static void cpu_fmri_cpu_set(nvlist_t *, int);
154 static int cpu_error_to_resource_type(struct async_flt *aflt);
155 
156 #ifdef	CHEETAHPLUS_ERRATUM_25
157 static int mondo_recover_proc(uint16_t, int);
158 static void cheetah_nudge_init(void);
159 static void cheetah_nudge_onln(void *arg, cpu_t *cpu, cyc_handler_t *hdlr,
160     cyc_time_t *when);
161 static void cheetah_nudge_buddy(void);
162 #endif	/* CHEETAHPLUS_ERRATUM_25 */
163 
164 #if defined(CPU_IMP_L1_CACHE_PARITY)
165 static void cpu_dcache_parity_info(ch_async_flt_t *ch_flt);
166 static void cpu_dcache_parity_check(ch_async_flt_t *ch_flt, int index);
167 static void cpu_record_dc_data_parity(ch_async_flt_t *ch_flt,
168     ch_dc_data_t *dest_dcp, ch_dc_data_t *src_dcp, int way, int word);
169 static void cpu_icache_parity_info(ch_async_flt_t *ch_flt);
170 static void cpu_icache_parity_check(ch_async_flt_t *ch_flt, int index);
171 static void cpu_pcache_parity_info(ch_async_flt_t *ch_flt);
172 static void cpu_pcache_parity_check(ch_async_flt_t *ch_flt, int index);
173 static void cpu_payload_add_dcache(struct async_flt *, nvlist_t *);
174 static void cpu_payload_add_icache(struct async_flt *, nvlist_t *);
175 #endif	/* CPU_IMP_L1_CACHE_PARITY */
176 
177 int (*p2get_mem_info)(int synd_code, uint64_t paddr,
178     uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
179     int *segsp, int *banksp, int *mcidp);
180 
181 /*
182  * This table is used to determine which bit(s) is(are) bad when an ECC
183  * error occurs.  The array is indexed by an 9-bit syndrome.  The entries
184  * of this array have the following semantics:
185  *
186  *      00-127  The number of the bad bit, when only one bit is bad.
187  *      128     ECC bit C0 is bad.
188  *      129     ECC bit C1 is bad.
189  *      130     ECC bit C2 is bad.
190  *      131     ECC bit C3 is bad.
191  *      132     ECC bit C4 is bad.
192  *      133     ECC bit C5 is bad.
193  *      134     ECC bit C6 is bad.
194  *      135     ECC bit C7 is bad.
195  *      136     ECC bit C8 is bad.
196  *	137-143 reserved for Mtag Data and ECC.
197  *      144(M2) Two bits are bad within a nibble.
198  *      145(M3) Three bits are bad within a nibble.
199  *      146(M3) Four bits are bad within a nibble.
200  *      147(M)  Multiple bits (5 or more) are bad.
201  *      148     NO bits are bad.
202  * Based on "Cheetah Programmer's Reference Manual" rev 1.1, Tables 11-4,11-5.
203  */
204 
205 #define	C0	128
206 #define	C1	129
207 #define	C2	130
208 #define	C3	131
209 #define	C4	132
210 #define	C5	133
211 #define	C6	134
212 #define	C7	135
213 #define	C8	136
214 #define	MT0	137	/* Mtag Data bit 0 */
215 #define	MT1	138
216 #define	MT2	139
217 #define	MTC0	140	/* Mtag Check bit 0 */
218 #define	MTC1	141
219 #define	MTC2	142
220 #define	MTC3	143
221 #define	M2	144
222 #define	M3	145
223 #define	M4	146
224 #define	M	147
225 #define	NA	148
226 #if defined(JALAPENO) || defined(SERRANO)
227 #define	S003	149	/* Syndrome 0x003 => likely from CPU/EDU:ST/FRU/BP */
228 #define	S003MEM	150	/* Syndrome 0x003 => likely from WDU/WBP */
229 #define	SLAST	S003MEM	/* last special syndrome */
230 #else /* JALAPENO || SERRANO */
231 #define	S003	149	/* Syndrome 0x003 => likely from EDU:ST */
232 #define	S071	150	/* Syndrome 0x071 => likely from WDU/CPU */
233 #define	S11C	151	/* Syndrome 0x11c => likely from BERR/DBERR */
234 #define	SLAST	S11C	/* last special syndrome */
235 #endif /* JALAPENO || SERRANO */
236 #if defined(JALAPENO) || defined(SERRANO)
237 #define	BPAR0	152	/* syndrom 152 through 167 for bus parity */
238 #define	BPAR15	167
239 #endif	/* JALAPENO || SERRANO */
240 
241 static uint8_t ecc_syndrome_tab[] =
242 {
243 NA,  C0,  C1, S003, C2,  M2,  M3,  47,  C3,  M2,  M2,  53,  M2,  41,  29,   M,
244 C4,   M,   M,  50,  M2,  38,  25,  M2,  M2,  33,  24,  M2,  11,   M,  M2,  16,
245 C5,   M,   M,  46,  M2,  37,  19,  M2,   M,  31,  32,   M,   7,  M2,  M2,  10,
246 M2,  40,  13,  M2,  59,   M,  M2,  66,   M,  M2,  M2,   0,  M2,  67,  71,   M,
247 C6,   M,   M,  43,   M,  36,  18,   M,  M2,  49,  15,   M,  63,  M2,  M2,   6,
248 M2,  44,  28,  M2,   M,  M2,  M2,  52,  68,  M2,  M2,  62,  M2,  M3,  M3,  M4,
249 M2,  26, 106,  M2,  64,   M,  M2,   2, 120,   M,  M2,  M3,   M,  M3,  M3,  M4,
250 #if defined(JALAPENO) || defined(SERRANO)
251 116, M2,  M2,  M3,  M2,  M3,   M,  M4,  M2,  58,  54,  M2,   M,  M4,  M4,  M3,
252 #else	/* JALAPENO || SERRANO */
253 116, S071, M2,  M3,  M2,  M3,   M,  M4,  M2,  58,  54,  M2,   M,  M4,  M4,  M3,
254 #endif	/* JALAPENO || SERRANO */
255 C7,  M2,   M,  42,   M,  35,  17,  M2,   M,  45,  14,  M2,  21,  M2,  M2,   5,
256 M,   27,   M,   M,  99,   M,   M,   3, 114,  M2,  M2,  20,  M2,  M3,  M3,   M,
257 M2,  23, 113,  M2, 112,  M2,   M,  51,  95,   M,  M2,  M3,  M2,  M3,  M3,  M2,
258 103,  M,  M2,  M3,  M2,  M3,  M3,  M4,  M2,  48,   M,   M,  73,  M2,   M,  M3,
259 M2,  22, 110,  M2, 109,  M2,   M,   9, 108,  M2,   M,  M3,  M2,  M3,  M3,   M,
260 102, M2,   M,   M,  M2,  M3,  M3,   M,  M2,  M3,  M3,  M2,   M,  M4,   M,  M3,
261 98,   M,  M2,  M3,  M2,   M,  M3,  M4,  M2,  M3,  M3,  M4,  M3,   M,   M,   M,
262 M2,  M3,  M3,   M,  M3,   M,   M,   M,  56,  M4,   M,  M3,  M4,   M,   M,   M,
263 C8,   M,  M2,  39,   M,  34, 105,  M2,   M,  30, 104,   M, 101,   M,   M,   4,
264 #if defined(JALAPENO) || defined(SERRANO)
265 M,    M, 100,   M,  83,   M,  M2,  12,  87,   M,   M,  57,  M2,   M,  M3,   M,
266 #else	/* JALAPENO || SERRANO */
267 M,    M, 100,   M,  83,   M,  M2,  12,  87,   M,   M,  57, S11C,  M,  M3,   M,
268 #endif	/* JALAPENO || SERRANO */
269 M2,  97,  82,  M2,  78,  M2,  M2,   1,  96,   M,   M,   M,   M,   M,  M3,  M2,
270 94,   M,  M2,  M3,  M2,   M,  M3,   M,  M2,   M,  79,   M,  69,   M,  M4,   M,
271 M2,  93,  92,   M,  91,   M,  M2,   8,  90,  M2,  M2,   M,   M,   M,   M,  M4,
272 89,   M,   M,  M3,  M2,  M3,  M3,   M,   M,   M,  M3,  M2,  M3,  M2,   M,  M3,
273 86,   M,  M2,  M3,  M2,   M,  M3,   M,  M2,   M,  M3,   M,  M3,   M,   M,  M3,
274 M,    M,  M3,  M2,  M3,  M2,  M4,   M,  60,   M,  M2,  M3,  M4,   M,   M,  M2,
275 M2,  88,  85,  M2,  84,   M,  M2,  55,  81,  M2,  M2,  M3,  M2,  M3,  M3,  M4,
276 77,   M,   M,   M,  M2,  M3,   M,   M,  M2,  M3,  M3,  M4,  M3,  M2,   M,   M,
277 74,   M,  M2,  M3,   M,   M,  M3,   M,   M,   M,  M3,   M,  M3,   M,  M4,  M3,
278 M2,  70, 107,  M4,  65,  M2,  M2,   M, 127,   M,   M,   M,  M2,  M3,  M3,   M,
279 80,  M2,  M2,  72,   M, 119, 118,   M,  M2, 126,  76,   M, 125,   M,  M4,  M3,
280 M2, 115, 124,   M,  75,   M,   M,  M3,  61,   M,  M4,   M,  M4,   M,   M,   M,
281 M,  123, 122,  M4, 121,  M4,   M,  M3, 117,  M2,  M2,  M3,  M4,  M3,   M,   M,
282 111,  M,   M,   M,  M4,  M3,  M3,   M,   M,   M,  M3,   M,  M3,  M2,   M,   M
283 };
284 
285 #define	ESYND_TBL_SIZE	(sizeof (ecc_syndrome_tab) / sizeof (uint8_t))
286 
287 #if !(defined(JALAPENO) || defined(SERRANO))
288 /*
289  * This table is used to determine which bit(s) is(are) bad when a Mtag
290  * error occurs.  The array is indexed by an 4-bit ECC syndrome. The entries
291  * of this array have the following semantics:
292  *
293  *      -1	Invalid mtag syndrome.
294  *      137     Mtag Data 0 is bad.
295  *      138     Mtag Data 1 is bad.
296  *      139     Mtag Data 2 is bad.
297  *      140     Mtag ECC 0 is bad.
298  *      141     Mtag ECC 1 is bad.
299  *      142     Mtag ECC 2 is bad.
300  *      143     Mtag ECC 3 is bad.
301  * Based on "Cheetah Programmer's Reference Manual" rev 1.1, Tables 11-6.
302  */
303 short mtag_syndrome_tab[] =
304 {
305 NA, MTC0, MTC1, M2, MTC2, M2, M2, MT0, MTC3, M2, M2,  MT1, M2, MT2, M2, M2
306 };
307 
308 #define	MSYND_TBL_SIZE	(sizeof (mtag_syndrome_tab) / sizeof (short))
309 
310 #else /* !(JALAPENO || SERRANO) */
311 
312 #define	BSYND_TBL_SIZE	16
313 
314 #endif /* !(JALAPENO || SERRANO) */
315 
316 /*
317  * Virtual Address bit flag in the data cache. This is actually bit 2 in the
318  * dcache data tag.
319  */
320 #define	VA13	INT64_C(0x0000000000000002)
321 
322 /*
323  * Types returned from cpu_error_to_resource_type()
324  */
325 #define	ERRTYPE_UNKNOWN		0
326 #define	ERRTYPE_CPU		1
327 #define	ERRTYPE_MEMORY		2
328 #define	ERRTYPE_ECACHE_DATA	3
329 
330 /*
331  * CE initial classification and subsequent action lookup table
332  */
333 static ce_dispact_t ce_disp_table[CE_INITDISPTBL_SIZE];
334 static int ce_disp_inited;
335 
336 /*
337  * Set to disable leaky and partner check for memory correctables
338  */
339 int ce_xdiag_off;
340 
341 /*
342  * The following are not incremented atomically so are indicative only
343  */
344 static int ce_xdiag_drops;
345 static int ce_xdiag_lkydrops;
346 static int ce_xdiag_ptnrdrops;
347 static int ce_xdiag_bad;
348 
349 /*
350  * CE leaky check callback structure
351  */
352 typedef struct {
353 	struct async_flt *lkycb_aflt;
354 	errorq_t *lkycb_eqp;
355 	errorq_elem_t *lkycb_eqep;
356 } ce_lkychk_cb_t;
357 
358 /*
359  * defines for various ecache_flush_flag's
360  */
361 #define	ECACHE_FLUSH_LINE	1
362 #define	ECACHE_FLUSH_ALL	2
363 
364 /*
365  * STICK sync
366  */
367 #define	STICK_ITERATION 10
368 #define	MAX_TSKEW	1
369 #define	EV_A_START	0
370 #define	EV_A_END	1
371 #define	EV_B_START	2
372 #define	EV_B_END	3
373 #define	EVENTS		4
374 
375 static int64_t stick_iter = STICK_ITERATION;
376 static int64_t stick_tsk = MAX_TSKEW;
377 
378 typedef enum {
379 	EVENT_NULL = 0,
380 	SLAVE_START,
381 	SLAVE_CONT,
382 	MASTER_START
383 } event_cmd_t;
384 
385 static volatile event_cmd_t stick_sync_cmd = EVENT_NULL;
386 static int64_t timestamp[EVENTS];
387 static volatile int slave_done;
388 
389 #ifdef DEBUG
390 #define	DSYNC_ATTEMPTS 64
391 typedef struct {
392 	int64_t	skew_val[DSYNC_ATTEMPTS];
393 } ss_t;
394 
395 ss_t stick_sync_stats[NCPU];
396 #endif /* DEBUG */
397 
398 uint_t cpu_impl_dual_pgsz = 0;
399 #if defined(CPU_IMP_DUAL_PAGESIZE)
400 uint_t disable_dual_pgsz = 0;
401 #endif	/* CPU_IMP_DUAL_PAGESIZE */
402 
403 /*
404  * Save the cache bootup state for use when internal
405  * caches are to be re-enabled after an error occurs.
406  */
407 uint64_t cache_boot_state;
408 
409 /*
410  * PA[22:0] represent Displacement in Safari configuration space.
411  */
412 uint_t	root_phys_addr_lo_mask = 0x7fffffu;
413 
414 bus_config_eclk_t bus_config_eclk[] = {
415 #if defined(JALAPENO) || defined(SERRANO)
416 	{JBUS_CONFIG_ECLK_1_DIV, JBUS_CONFIG_ECLK_1},
417 	{JBUS_CONFIG_ECLK_2_DIV, JBUS_CONFIG_ECLK_2},
418 	{JBUS_CONFIG_ECLK_32_DIV, JBUS_CONFIG_ECLK_32},
419 #else /* JALAPENO || SERRANO */
420 	{SAFARI_CONFIG_ECLK_1_DIV, SAFARI_CONFIG_ECLK_1},
421 	{SAFARI_CONFIG_ECLK_2_DIV, SAFARI_CONFIG_ECLK_2},
422 	{SAFARI_CONFIG_ECLK_32_DIV, SAFARI_CONFIG_ECLK_32},
423 #endif /* JALAPENO || SERRANO */
424 	{0, 0}
425 };
426 
427 /*
428  * Interval for deferred CEEN reenable
429  */
430 int cpu_ceen_delay_secs = CPU_CEEN_DELAY_SECS;
431 
432 /*
433  * set in /etc/system to control logging of user BERR/TO's
434  */
435 int cpu_berr_to_verbose = 0;
436 
437 /*
438  * set to 0 in /etc/system to defer CEEN reenable for all CEs
439  */
440 uint64_t cpu_ce_not_deferred = CPU_CE_NOT_DEFERRED;
441 uint64_t cpu_ce_not_deferred_ext = CPU_CE_NOT_DEFERRED_EXT;
442 
443 /*
444  * Set of all offline cpus
445  */
446 cpuset_t cpu_offline_set;
447 
448 static void cpu_delayed_check_ce_errors(void *);
449 static void cpu_check_ce_errors(void *);
450 void cpu_error_ecache_flush(ch_async_flt_t *);
451 static int cpu_error_ecache_flush_required(ch_async_flt_t *);
452 static void cpu_log_and_clear_ce(ch_async_flt_t *);
453 void cpu_ce_detected(ch_cpu_errors_t *, int);
454 
455 /*
456  * CE Leaky check timeout in microseconds.  This is chosen to be twice the
457  * memory refresh interval of current DIMMs (64ms).  After initial fix that
458  * gives at least one full refresh cycle in which the cell can leak
459  * (whereafter further refreshes simply reinforce any incorrect bit value).
460  */
461 clock_t cpu_ce_lkychk_timeout_usec = 128000;
462 
463 /*
464  * CE partner check partner caching period in seconds
465  */
466 int cpu_ce_ptnr_cachetime_sec = 60;
467 
468 /*
469  * Sets trap table entry ttentry by overwriting eight instructions from ttlabel
470  */
471 #define	CH_SET_TRAP(ttentry, ttlabel)			\
472 		bcopy((const void *)&ttlabel, &ttentry, 32);		\
473 		flush_instr_mem((caddr_t)&ttentry, 32);
474 
475 static int min_ecache_size;
476 static uint_t priv_hcl_1;
477 static uint_t priv_hcl_2;
478 static uint_t priv_hcl_4;
479 static uint_t priv_hcl_8;
480 
481 void
cpu_setup(void)482 cpu_setup(void)
483 {
484 	extern int at_flags;
485 	extern int cpc_has_overflow_intr;
486 
487 	/*
488 	 * Setup chip-specific trap handlers.
489 	 */
490 	cpu_init_trap();
491 
492 	cache |= (CACHE_VAC | CACHE_PTAG | CACHE_IOCOHERENT);
493 
494 	at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1 | EF_SPARC_SUN_US3;
495 
496 	/*
497 	 * save the cache bootup state.
498 	 */
499 	cache_boot_state = get_dcu() & DCU_CACHE;
500 
501 	/*
502 	 * Due to the number of entries in the fully-associative tlb
503 	 * this may have to be tuned lower than in spitfire.
504 	 */
505 	pp_slots = MIN(8, MAXPP_SLOTS);
506 
507 	/*
508 	 * Block stores do not invalidate all pages of the d$, pagecopy
509 	 * et. al. need virtual translations with virtual coloring taken
510 	 * into consideration.  prefetch/ldd will pollute the d$ on the
511 	 * load side.
512 	 */
513 	pp_consistent_coloring = PPAGE_STORE_VCOLORING | PPAGE_LOADS_POLLUTE;
514 
515 	if (use_page_coloring) {
516 		do_pg_coloring = 1;
517 	}
518 
519 	isa_list =
520 	    "sparcv9+vis2 sparcv9+vis sparcv9 "
521 	    "sparcv8plus+vis2 sparcv8plus+vis sparcv8plus "
522 	    "sparcv8 sparcv8-fsmuld sparcv7 sparc";
523 
524 	/*
525 	 * On Panther-based machines, this should
526 	 * also include AV_SPARC_POPC too
527 	 */
528 	cpu_hwcap_flags = AV_SPARC_VIS | AV_SPARC_VIS2;
529 
530 	/*
531 	 * On cheetah, there's no hole in the virtual address space
532 	 */
533 	hole_start = hole_end = 0;
534 
535 	/*
536 	 * The kpm mapping window.
537 	 * kpm_size:
538 	 *	The size of a single kpm range.
539 	 *	The overall size will be: kpm_size * vac_colors.
540 	 * kpm_vbase:
541 	 *	The virtual start address of the kpm range within the kernel
542 	 *	virtual address space. kpm_vbase has to be kpm_size aligned.
543 	 */
544 	kpm_size = (size_t)(8ull * 1024 * 1024 * 1024 * 1024); /* 8TB */
545 	kpm_size_shift = 43;
546 	kpm_vbase = (caddr_t)0x8000000000000000ull; /* 8EB */
547 	kpm_smallpages = 1;
548 
549 	/*
550 	 * The traptrace code uses either %tick or %stick for
551 	 * timestamping.  We have %stick so we can use it.
552 	 */
553 	traptrace_use_stick = 1;
554 
555 	/*
556 	 * Cheetah has a performance counter overflow interrupt
557 	 */
558 	cpc_has_overflow_intr = 1;
559 
560 #if defined(CPU_IMP_DUAL_PAGESIZE)
561 	/*
562 	 * Use Cheetah+ and later dual page size support.
563 	 */
564 	if (!disable_dual_pgsz) {
565 		cpu_impl_dual_pgsz = 1;
566 	}
567 #endif	/* CPU_IMP_DUAL_PAGESIZE */
568 
569 	/*
570 	 * Declare that this architecture/cpu combination does fpRAS.
571 	 */
572 	fpras_implemented = 1;
573 
574 	/*
575 	 * Setup CE lookup table
576 	 */
577 	CE_INITDISPTBL_POPULATE(ce_disp_table);
578 	ce_disp_inited = 1;
579 }
580 
581 /*
582  * Called by setcpudelay
583  */
584 void
cpu_init_tick_freq(void)585 cpu_init_tick_freq(void)
586 {
587 	/*
588 	 * For UltraSPARC III and beyond we want to use the
589 	 * system clock rate as the basis for low level timing,
590 	 * due to support of mixed speed CPUs and power managment.
591 	 */
592 	if (system_clock_freq == 0)
593 		cmn_err(CE_PANIC, "setcpudelay: invalid system_clock_freq");
594 
595 	sys_tick_freq = system_clock_freq;
596 }
597 
598 #ifdef CHEETAHPLUS_ERRATUM_25
599 /*
600  * Tunables
601  */
602 int cheetah_bpe_off = 0;
603 int cheetah_sendmondo_recover = 1;
604 int cheetah_sendmondo_fullscan = 0;
605 int cheetah_sendmondo_recover_delay = 5;
606 
607 #define	CHEETAH_LIVELOCK_MIN_DELAY	1
608 
609 /*
610  * Recovery Statistics
611  */
612 typedef struct cheetah_livelock_entry	{
613 	int cpuid;		/* fallen cpu */
614 	int buddy;		/* cpu that ran recovery */
615 	clock_t lbolt;		/* when recovery started */
616 	hrtime_t recovery_time;	/* time spent in recovery */
617 } cheetah_livelock_entry_t;
618 
619 #define	CHEETAH_LIVELOCK_NENTRY	32
620 
621 cheetah_livelock_entry_t cheetah_livelock_hist[CHEETAH_LIVELOCK_NENTRY];
622 int cheetah_livelock_entry_nxt;
623 
624 #define	CHEETAH_LIVELOCK_ENTRY_NEXT(statp)	{			\
625 	statp = cheetah_livelock_hist + cheetah_livelock_entry_nxt;	\
626 	if (++cheetah_livelock_entry_nxt >= CHEETAH_LIVELOCK_NENTRY) {	\
627 		cheetah_livelock_entry_nxt = 0;				\
628 	}								\
629 }
630 
631 #define	CHEETAH_LIVELOCK_ENTRY_SET(statp, item, val)	statp->item = val
632 
633 struct {
634 	hrtime_t hrt;		/* maximum recovery time */
635 	int recovery;		/* recovered */
636 	int full_claimed;	/* maximum pages claimed in full recovery */
637 	int proc_entry;		/* attempted to claim TSB */
638 	int proc_tsb_scan;	/* tsb scanned */
639 	int proc_tsb_partscan;	/* tsb partially scanned */
640 	int proc_tsb_fullscan;	/* whole tsb scanned */
641 	int proc_claimed;	/* maximum pages claimed in tsb scan */
642 	int proc_user;		/* user thread */
643 	int proc_kernel;	/* kernel thread */
644 	int proc_onflt;		/* bad stack */
645 	int proc_cpu;		/* null cpu */
646 	int proc_thread;	/* null thread */
647 	int proc_proc;		/* null proc */
648 	int proc_as;		/* null as */
649 	int proc_hat;		/* null hat */
650 	int proc_hat_inval;	/* hat contents don't make sense */
651 	int proc_hat_busy;	/* hat is changing TSBs */
652 	int proc_tsb_reloc;	/* TSB skipped because being relocated */
653 	int proc_cnum_bad;	/* cnum out of range */
654 	int proc_cnum;		/* last cnum processed */
655 	tte_t proc_tte;		/* last tte processed */
656 } cheetah_livelock_stat;
657 
658 #define	CHEETAH_LIVELOCK_STAT(item)	cheetah_livelock_stat.item++
659 
660 #define	CHEETAH_LIVELOCK_STATSET(item, value)		\
661 	cheetah_livelock_stat.item = value
662 
663 #define	CHEETAH_LIVELOCK_MAXSTAT(item, value)	{	\
664 	if (value > cheetah_livelock_stat.item)		\
665 		cheetah_livelock_stat.item = value;	\
666 }
667 
668 /*
669  * Attempt to recover a cpu by claiming every cache line as saved
670  * in the TSB that the non-responsive cpu is using. Since we can't
671  * grab any adaptive lock, this is at best an attempt to do so. Because
672  * we don't grab any locks, we must operate under the protection of
673  * on_fault().
674  *
675  * Return 1 if cpuid could be recovered, 0 if failed.
676  */
677 int
mondo_recover_proc(uint16_t cpuid,int bn)678 mondo_recover_proc(uint16_t cpuid, int bn)
679 {
680 	label_t ljb;
681 	cpu_t *cp;
682 	kthread_t *t;
683 	proc_t *p;
684 	struct as *as;
685 	struct hat *hat;
686 	uint_t  cnum;
687 	struct tsb_info *tsbinfop;
688 	struct tsbe *tsbep;
689 	caddr_t tsbp;
690 	caddr_t end_tsbp;
691 	uint64_t paddr;
692 	uint64_t idsr;
693 	u_longlong_t pahi, palo;
694 	int pages_claimed = 0;
695 	tte_t tsbe_tte;
696 	int tried_kernel_tsb = 0;
697 	mmu_ctx_t *mmu_ctxp;
698 
699 	CHEETAH_LIVELOCK_STAT(proc_entry);
700 
701 	if (on_fault(&ljb)) {
702 		CHEETAH_LIVELOCK_STAT(proc_onflt);
703 		goto badstruct;
704 	}
705 
706 	if ((cp = cpu[cpuid]) == NULL) {
707 		CHEETAH_LIVELOCK_STAT(proc_cpu);
708 		goto badstruct;
709 	}
710 
711 	if ((t = cp->cpu_thread) == NULL) {
712 		CHEETAH_LIVELOCK_STAT(proc_thread);
713 		goto badstruct;
714 	}
715 
716 	if ((p = ttoproc(t)) == NULL) {
717 		CHEETAH_LIVELOCK_STAT(proc_proc);
718 		goto badstruct;
719 	}
720 
721 	if ((as = p->p_as) == NULL) {
722 		CHEETAH_LIVELOCK_STAT(proc_as);
723 		goto badstruct;
724 	}
725 
726 	if ((hat = as->a_hat) == NULL) {
727 		CHEETAH_LIVELOCK_STAT(proc_hat);
728 		goto badstruct;
729 	}
730 
731 	if (hat != ksfmmup) {
732 		CHEETAH_LIVELOCK_STAT(proc_user);
733 		if (hat->sfmmu_flags & (HAT_BUSY | HAT_SWAPPED | HAT_SWAPIN)) {
734 			CHEETAH_LIVELOCK_STAT(proc_hat_busy);
735 			goto badstruct;
736 		}
737 		tsbinfop = hat->sfmmu_tsb;
738 		if (tsbinfop == NULL) {
739 			CHEETAH_LIVELOCK_STAT(proc_hat_inval);
740 			goto badstruct;
741 		}
742 		tsbp = tsbinfop->tsb_va;
743 		end_tsbp = tsbp + TSB_BYTES(tsbinfop->tsb_szc);
744 	} else {
745 		CHEETAH_LIVELOCK_STAT(proc_kernel);
746 		tsbinfop = NULL;
747 		tsbp = ktsb_base;
748 		end_tsbp = tsbp + TSB_BYTES(ktsb_sz);
749 	}
750 
751 	/* Verify as */
752 	if (hat->sfmmu_as != as) {
753 		CHEETAH_LIVELOCK_STAT(proc_hat_inval);
754 		goto badstruct;
755 	}
756 
757 	mmu_ctxp = CPU_MMU_CTXP(cp);
758 	ASSERT(mmu_ctxp);
759 	cnum = hat->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
760 	CHEETAH_LIVELOCK_STATSET(proc_cnum, cnum);
761 
762 	if ((cnum < 0) || (cnum == INVALID_CONTEXT) ||
763 	    (cnum >= mmu_ctxp->mmu_nctxs)) {
764 		CHEETAH_LIVELOCK_STAT(proc_cnum_bad);
765 		goto badstruct;
766 	}
767 
768 	do {
769 		CHEETAH_LIVELOCK_STAT(proc_tsb_scan);
770 
771 		/*
772 		 * Skip TSBs being relocated.  This is important because
773 		 * we want to avoid the following deadlock scenario:
774 		 *
775 		 * 1) when we came in we set ourselves to "in recover" state.
776 		 * 2) when we try to touch TSB being relocated the mapping
777 		 *    will be in the suspended state so we'll spin waiting
778 		 *    for it to be unlocked.
779 		 * 3) when the CPU that holds the TSB mapping locked tries to
780 		 *    unlock it it will send a xtrap which will fail to xcall
781 		 *    us or the CPU we're trying to recover, and will in turn
782 		 *    enter the mondo code.
783 		 * 4) since we are still spinning on the locked mapping
784 		 *    no further progress will be made and the system will
785 		 *    inevitably hard hang.
786 		 *
787 		 * A TSB not being relocated can't begin being relocated
788 		 * while we're accessing it because we check
789 		 * sendmondo_in_recover before relocating TSBs.
790 		 */
791 		if (hat != ksfmmup &&
792 		    (tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
793 			CHEETAH_LIVELOCK_STAT(proc_tsb_reloc);
794 			goto next_tsbinfo;
795 		}
796 
797 		for (tsbep = (struct tsbe *)tsbp;
798 		    tsbep < (struct tsbe *)end_tsbp; tsbep++) {
799 			tsbe_tte = tsbep->tte_data;
800 
801 			if (tsbe_tte.tte_val == 0) {
802 				/*
803 				 * Invalid tte
804 				 */
805 				continue;
806 			}
807 			if (tsbe_tte.tte_se) {
808 				/*
809 				 * Don't want device registers
810 				 */
811 				continue;
812 			}
813 			if (tsbe_tte.tte_cp == 0) {
814 				/*
815 				 * Must be cached in E$
816 				 */
817 				continue;
818 			}
819 			if (tsbep->tte_tag.tag_invalid != 0) {
820 				/*
821 				 * Invalid tag, ingnore this entry.
822 				 */
823 				continue;
824 			}
825 			CHEETAH_LIVELOCK_STATSET(proc_tte, tsbe_tte);
826 			idsr = getidsr();
827 			if ((idsr & (IDSR_NACK_BIT(bn) |
828 			    IDSR_BUSY_BIT(bn))) == 0) {
829 				CHEETAH_LIVELOCK_STAT(proc_tsb_partscan);
830 				goto done;
831 			}
832 			pahi = tsbe_tte.tte_pahi;
833 			palo = tsbe_tte.tte_palo;
834 			paddr = (uint64_t)((pahi << 32) |
835 			    (palo << MMU_PAGESHIFT));
836 			claimlines(paddr, TTEBYTES(TTE_CSZ(&tsbe_tte)),
837 			    CH_ECACHE_SUBBLK_SIZE);
838 			if ((idsr & IDSR_BUSY_BIT(bn)) == 0) {
839 				shipit(cpuid, bn);
840 			}
841 			pages_claimed++;
842 		}
843 next_tsbinfo:
844 		if (tsbinfop != NULL)
845 			tsbinfop = tsbinfop->tsb_next;
846 		if (tsbinfop != NULL) {
847 			tsbp = tsbinfop->tsb_va;
848 			end_tsbp = tsbp + TSB_BYTES(tsbinfop->tsb_szc);
849 		} else if (tsbp == ktsb_base) {
850 			tried_kernel_tsb = 1;
851 		} else if (!tried_kernel_tsb) {
852 			tsbp = ktsb_base;
853 			end_tsbp = tsbp + TSB_BYTES(ktsb_sz);
854 			hat = ksfmmup;
855 			tsbinfop = NULL;
856 		}
857 	} while (tsbinfop != NULL ||
858 	    ((tsbp == ktsb_base) && !tried_kernel_tsb));
859 
860 	CHEETAH_LIVELOCK_STAT(proc_tsb_fullscan);
861 	CHEETAH_LIVELOCK_MAXSTAT(proc_claimed, pages_claimed);
862 	no_fault();
863 	idsr = getidsr();
864 	if ((idsr & (IDSR_NACK_BIT(bn) |
865 	    IDSR_BUSY_BIT(bn))) == 0) {
866 		return (1);
867 	} else {
868 		return (0);
869 	}
870 
871 done:
872 	no_fault();
873 	CHEETAH_LIVELOCK_MAXSTAT(proc_claimed, pages_claimed);
874 	return (1);
875 
876 badstruct:
877 	no_fault();
878 	return (0);
879 }
880 
881 /*
882  * Attempt to claim ownership, temporarily, of every cache line that a
883  * non-responsive cpu might be using.  This might kick that cpu out of
884  * this state.
885  *
886  * The return value indicates to the caller if we have exhausted all recovery
887  * techniques. If 1 is returned, it is useless to call this function again
888  * even for a different target CPU.
889  */
890 int
mondo_recover(uint16_t cpuid,int bn)891 mondo_recover(uint16_t cpuid, int bn)
892 {
893 	struct memseg *seg;
894 	uint64_t begin_pa, end_pa, cur_pa;
895 	hrtime_t begin_hrt, end_hrt;
896 	int retval = 0;
897 	int pages_claimed = 0;
898 	cheetah_livelock_entry_t *histp;
899 	uint64_t idsr;
900 
901 	if (atomic_cas_32(&sendmondo_in_recover, 0, 1) != 0) {
902 		/*
903 		 * Wait while recovery takes place
904 		 */
905 		while (sendmondo_in_recover) {
906 			drv_usecwait(1);
907 		}
908 		/*
909 		 * Assume we didn't claim the whole memory. If
910 		 * the target of this caller is not recovered,
911 		 * it will come back.
912 		 */
913 		return (retval);
914 	}
915 
916 	CHEETAH_LIVELOCK_ENTRY_NEXT(histp);
917 	CHEETAH_LIVELOCK_ENTRY_SET(histp, lbolt, LBOLT_WAITFREE);
918 	CHEETAH_LIVELOCK_ENTRY_SET(histp, cpuid, cpuid);
919 	CHEETAH_LIVELOCK_ENTRY_SET(histp, buddy, CPU->cpu_id);
920 
921 	begin_hrt = gethrtime_waitfree();
922 	/*
923 	 * First try to claim the lines in the TSB the target
924 	 * may have been using.
925 	 */
926 	if (mondo_recover_proc(cpuid, bn) == 1) {
927 		/*
928 		 * Didn't claim the whole memory
929 		 */
930 		goto done;
931 	}
932 
933 	/*
934 	 * We tried using the TSB. The target is still
935 	 * not recovered. Check if complete memory scan is
936 	 * enabled.
937 	 */
938 	if (cheetah_sendmondo_fullscan == 0) {
939 		/*
940 		 * Full memory scan is disabled.
941 		 */
942 		retval = 1;
943 		goto done;
944 	}
945 
946 	/*
947 	 * Try claiming the whole memory.
948 	 */
949 	for (seg = memsegs; seg; seg = seg->next) {
950 		begin_pa = (uint64_t)(seg->pages_base) << MMU_PAGESHIFT;
951 		end_pa = (uint64_t)(seg->pages_end) << MMU_PAGESHIFT;
952 		for (cur_pa = begin_pa; cur_pa < end_pa;
953 		    cur_pa += MMU_PAGESIZE) {
954 			idsr = getidsr();
955 			if ((idsr & (IDSR_NACK_BIT(bn) |
956 			    IDSR_BUSY_BIT(bn))) == 0) {
957 				/*
958 				 * Didn't claim all memory
959 				 */
960 				goto done;
961 			}
962 			claimlines(cur_pa, MMU_PAGESIZE,
963 			    CH_ECACHE_SUBBLK_SIZE);
964 			if ((idsr & IDSR_BUSY_BIT(bn)) == 0) {
965 				shipit(cpuid, bn);
966 			}
967 			pages_claimed++;
968 		}
969 	}
970 
971 	/*
972 	 * We did all we could.
973 	 */
974 	retval = 1;
975 
976 done:
977 	/*
978 	 * Update statistics
979 	 */
980 	end_hrt = gethrtime_waitfree();
981 	CHEETAH_LIVELOCK_STAT(recovery);
982 	CHEETAH_LIVELOCK_MAXSTAT(hrt, (end_hrt - begin_hrt));
983 	CHEETAH_LIVELOCK_MAXSTAT(full_claimed, pages_claimed);
984 	CHEETAH_LIVELOCK_ENTRY_SET(histp, recovery_time, \
985 	    (end_hrt -  begin_hrt));
986 
987 	while (atomic_cas_32(&sendmondo_in_recover, 1, 0) != 1)
988 		;
989 
990 	return (retval);
991 }
992 
993 /*
994  * This is called by the cyclic framework when this CPU becomes online
995  */
996 /*ARGSUSED*/
997 static void
cheetah_nudge_onln(void * arg,cpu_t * cpu,cyc_handler_t * hdlr,cyc_time_t * when)998 cheetah_nudge_onln(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
999 {
1000 
1001 	hdlr->cyh_func = (cyc_func_t)cheetah_nudge_buddy;
1002 	hdlr->cyh_level = CY_LOW_LEVEL;
1003 	hdlr->cyh_arg = NULL;
1004 
1005 	/*
1006 	 * Stagger the start time
1007 	 */
1008 	when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
1009 	if (cheetah_sendmondo_recover_delay < CHEETAH_LIVELOCK_MIN_DELAY) {
1010 		cheetah_sendmondo_recover_delay = CHEETAH_LIVELOCK_MIN_DELAY;
1011 	}
1012 	when->cyt_interval = cheetah_sendmondo_recover_delay * NANOSEC;
1013 }
1014 
1015 /*
1016  * Create a low level cyclic to send a xtrap to the next cpu online.
1017  * However, there's no need to have this running on a uniprocessor system.
1018  */
1019 static void
cheetah_nudge_init(void)1020 cheetah_nudge_init(void)
1021 {
1022 	cyc_omni_handler_t hdlr;
1023 
1024 	if (max_ncpus == 1) {
1025 		return;
1026 	}
1027 
1028 	hdlr.cyo_online = cheetah_nudge_onln;
1029 	hdlr.cyo_offline = NULL;
1030 	hdlr.cyo_arg = NULL;
1031 
1032 	mutex_enter(&cpu_lock);
1033 	(void) cyclic_add_omni(&hdlr);
1034 	mutex_exit(&cpu_lock);
1035 }
1036 
1037 /*
1038  * Cyclic handler to wake up buddy
1039  */
1040 void
cheetah_nudge_buddy(void)1041 cheetah_nudge_buddy(void)
1042 {
1043 	/*
1044 	 * Disable kernel preemption to protect the cpu list
1045 	 */
1046 	kpreempt_disable();
1047 	if ((CPU->cpu_next_onln != CPU) && (sendmondo_in_recover == 0)) {
1048 		xt_one(CPU->cpu_next_onln->cpu_id, (xcfunc_t *)xt_sync_tl1,
1049 		    0, 0);
1050 	}
1051 	kpreempt_enable();
1052 }
1053 
1054 #endif	/* CHEETAHPLUS_ERRATUM_25 */
1055 
1056 #ifdef SEND_MONDO_STATS
1057 uint32_t x_one_stimes[64];
1058 uint32_t x_one_ltimes[16];
1059 uint32_t x_set_stimes[64];
1060 uint32_t x_set_ltimes[16];
1061 uint32_t x_set_cpus[NCPU];
1062 uint32_t x_nack_stimes[64];
1063 #endif
1064 
1065 /*
1066  * Note: A version of this function is used by the debugger via the KDI,
1067  * and must be kept in sync with this version.  Any changes made to this
1068  * function to support new chips or to accomodate errata must also be included
1069  * in the KDI-specific version.  See us3_kdi.c.
1070  */
1071 void
send_one_mondo(int cpuid)1072 send_one_mondo(int cpuid)
1073 {
1074 	int busy, nack;
1075 	uint64_t idsr, starttick, endtick, tick, lasttick;
1076 	uint64_t busymask;
1077 #ifdef	CHEETAHPLUS_ERRATUM_25
1078 	int recovered = 0;
1079 #endif
1080 
1081 	CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
1082 	starttick = lasttick = gettick();
1083 	shipit(cpuid, 0);
1084 	endtick = starttick + xc_tick_limit;
1085 	busy = nack = 0;
1086 #if defined(JALAPENO) || defined(SERRANO)
1087 	/*
1088 	 * Lower 2 bits of the agent ID determine which BUSY/NACK pair
1089 	 * will be used for dispatching interrupt. For now, assume
1090 	 * there are no more than IDSR_BN_SETS CPUs, hence no aliasing
1091 	 * issues with respect to BUSY/NACK pair usage.
1092 	 */
1093 	busymask  = IDSR_BUSY_BIT(cpuid);
1094 #else /* JALAPENO || SERRANO */
1095 	busymask = IDSR_BUSY;
1096 #endif /* JALAPENO || SERRANO */
1097 	for (;;) {
1098 		idsr = getidsr();
1099 		if (idsr == 0)
1100 			break;
1101 
1102 		tick = gettick();
1103 		/*
1104 		 * If there is a big jump between the current tick
1105 		 * count and lasttick, we have probably hit a break
1106 		 * point.  Adjust endtick accordingly to avoid panic.
1107 		 */
1108 		if (tick > (lasttick + xc_tick_jump_limit))
1109 			endtick += (tick - lasttick);
1110 		lasttick = tick;
1111 		if (tick > endtick) {
1112 			if (panic_quiesce)
1113 				return;
1114 #ifdef	CHEETAHPLUS_ERRATUM_25
1115 			if (cheetah_sendmondo_recover && recovered == 0) {
1116 				if (mondo_recover(cpuid, 0)) {
1117 					/*
1118 					 * We claimed the whole memory or
1119 					 * full scan is disabled.
1120 					 */
1121 					recovered++;
1122 				}
1123 				tick = gettick();
1124 				endtick = tick + xc_tick_limit;
1125 				lasttick = tick;
1126 				/*
1127 				 * Recheck idsr
1128 				 */
1129 				continue;
1130 			} else
1131 #endif	/* CHEETAHPLUS_ERRATUM_25 */
1132 			{
1133 				cmn_err(CE_PANIC, "send mondo timeout "
1134 				    "(target 0x%x) [%d NACK %d BUSY]",
1135 				    cpuid, nack, busy);
1136 			}
1137 		}
1138 
1139 		if (idsr & busymask) {
1140 			busy++;
1141 			continue;
1142 		}
1143 		drv_usecwait(1);
1144 		shipit(cpuid, 0);
1145 		nack++;
1146 		busy = 0;
1147 	}
1148 #ifdef SEND_MONDO_STATS
1149 	{
1150 		int n = gettick() - starttick;
1151 		if (n < 8192)
1152 			x_one_stimes[n >> 7]++;
1153 		else
1154 			x_one_ltimes[(n >> 13) & 0xf]++;
1155 	}
1156 #endif
1157 }
1158 
1159 void
syncfpu(void)1160 syncfpu(void)
1161 {
1162 }
1163 
1164 /*
1165  * Return processor specific async error structure
1166  * size used.
1167  */
1168 int
cpu_aflt_size(void)1169 cpu_aflt_size(void)
1170 {
1171 	return (sizeof (ch_async_flt_t));
1172 }
1173 
1174 /*
1175  * Tunable to disable the checking of other cpu logout areas during panic for
1176  * potential syndrome 71 generating errors.
1177  */
1178 int enable_check_other_cpus_logout = 1;
1179 
1180 /*
1181  * Check other cpus logout area for potential synd 71 generating
1182  * errors.
1183  */
1184 static void
cpu_check_cpu_logout(int cpuid,caddr_t tpc,int tl,int ecc_type,ch_cpu_logout_t * clop)1185 cpu_check_cpu_logout(int cpuid, caddr_t tpc, int tl, int ecc_type,
1186     ch_cpu_logout_t *clop)
1187 {
1188 	struct async_flt *aflt;
1189 	ch_async_flt_t ch_flt;
1190 	uint64_t t_afar, t_afsr, t_afsr_ext, t_afsr_errs;
1191 
1192 	if (clop == NULL || clop->clo_data.chd_afar == LOGOUT_INVALID) {
1193 		return;
1194 	}
1195 
1196 	bzero(&ch_flt, sizeof (ch_async_flt_t));
1197 
1198 	t_afar = clop->clo_data.chd_afar;
1199 	t_afsr = clop->clo_data.chd_afsr;
1200 	t_afsr_ext = clop->clo_data.chd_afsr_ext;
1201 #if defined(SERRANO)
1202 	ch_flt.afar2 = clop->clo_data.chd_afar2;
1203 #endif	/* SERRANO */
1204 
1205 	/*
1206 	 * In order to simplify code, we maintain this afsr_errs
1207 	 * variable which holds the aggregate of AFSR and AFSR_EXT
1208 	 * sticky bits.
1209 	 */
1210 	t_afsr_errs = (t_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
1211 	    (t_afsr & C_AFSR_ALL_ERRS);
1212 
1213 	/* Setup the async fault structure */
1214 	aflt = (struct async_flt *)&ch_flt;
1215 	aflt->flt_id = gethrtime_waitfree();
1216 	ch_flt.afsr_ext = t_afsr_ext;
1217 	ch_flt.afsr_errs = t_afsr_errs;
1218 	aflt->flt_stat = t_afsr;
1219 	aflt->flt_addr = t_afar;
1220 	aflt->flt_bus_id = cpuid;
1221 	aflt->flt_inst = cpuid;
1222 	aflt->flt_pc = tpc;
1223 	aflt->flt_prot = AFLT_PROT_NONE;
1224 	aflt->flt_class = CPU_FAULT;
1225 	aflt->flt_priv = ((t_afsr & C_AFSR_PRIV) != 0);
1226 	aflt->flt_tl = tl;
1227 	aflt->flt_status = ecc_type;
1228 	aflt->flt_panic = C_AFSR_PANIC(t_afsr_errs);
1229 
1230 	/*
1231 	 * Queue events on the async event queue, one event per error bit.
1232 	 * If no events are queued, queue an event to complain.
1233 	 */
1234 	if (cpu_queue_events(&ch_flt, NULL, t_afsr_errs, clop) == 0) {
1235 		ch_flt.flt_type = CPU_INV_AFSR;
1236 		cpu_errorq_dispatch(FM_EREPORT_CPU_USIII_INVALID_AFSR,
1237 		    (void *)&ch_flt, sizeof (ch_async_flt_t), ue_queue,
1238 		    aflt->flt_panic);
1239 	}
1240 
1241 	/*
1242 	 * Zero out + invalidate CPU logout.
1243 	 */
1244 	bzero(clop, sizeof (ch_cpu_logout_t));
1245 	clop->clo_data.chd_afar = LOGOUT_INVALID;
1246 }
1247 
1248 /*
1249  * Check the logout areas of all other cpus for unlogged errors.
1250  */
1251 static void
cpu_check_other_cpus_logout(void)1252 cpu_check_other_cpus_logout(void)
1253 {
1254 	int i, j;
1255 	processorid_t myid;
1256 	struct cpu *cp;
1257 	ch_err_tl1_data_t *cl1p;
1258 
1259 	myid = CPU->cpu_id;
1260 	for (i = 0; i < NCPU; i++) {
1261 		cp = cpu[i];
1262 
1263 		if ((cp == NULL) || !(cp->cpu_flags & CPU_EXISTS) ||
1264 		    (cp->cpu_id == myid) || (CPU_PRIVATE(cp) == NULL)) {
1265 			continue;
1266 		}
1267 
1268 		/*
1269 		 * Check each of the tl>0 logout areas
1270 		 */
1271 		cl1p = CPU_PRIVATE_PTR(cp, chpr_tl1_err_data[0]);
1272 		for (j = 0; j < CH_ERR_TL1_TLMAX; j++, cl1p++) {
1273 			if (cl1p->ch_err_tl1_flags == 0)
1274 				continue;
1275 
1276 			cpu_check_cpu_logout(i, (caddr_t)cl1p->ch_err_tl1_tpc,
1277 			    1, ECC_F_TRAP, &cl1p->ch_err_tl1_logout);
1278 		}
1279 
1280 		/*
1281 		 * Check each of the remaining logout areas
1282 		 */
1283 		cpu_check_cpu_logout(i, NULL, 0, ECC_F_TRAP,
1284 		    CPU_PRIVATE_PTR(cp, chpr_fecctl0_logout));
1285 		cpu_check_cpu_logout(i, NULL, 0, ECC_C_TRAP,
1286 		    CPU_PRIVATE_PTR(cp, chpr_cecc_logout));
1287 		cpu_check_cpu_logout(i, NULL, 0, ECC_D_TRAP,
1288 		    CPU_PRIVATE_PTR(cp, chpr_async_logout));
1289 	}
1290 }
1291 
1292 /*
1293  * The fast_ecc_err handler transfers control here for UCU, UCC events.
1294  * Note that we flush Ecache twice, once in the fast_ecc_err handler to
1295  * flush the error that caused the UCU/UCC, then again here at the end to
1296  * flush the TL=1 trap handler code out of the Ecache, so we can minimize
1297  * the probability of getting a TL>1 Fast ECC trap when we're fielding
1298  * another Fast ECC trap.
1299  *
1300  * Cheetah+ also handles: TSCE: No additional processing required.
1301  * Panther adds L3_UCU and L3_UCC which are reported in AFSR_EXT.
1302  *
1303  * Note that the p_clo_flags input is only valid in cases where the
1304  * cpu_private struct is not yet initialized (since that is the only
1305  * time that information cannot be obtained from the logout struct.)
1306  */
1307 /*ARGSUSED*/
1308 void
cpu_fast_ecc_error(struct regs * rp,ulong_t p_clo_flags)1309 cpu_fast_ecc_error(struct regs *rp, ulong_t p_clo_flags)
1310 {
1311 	ch_cpu_logout_t *clop;
1312 	uint64_t ceen, nceen;
1313 
1314 	/*
1315 	 * Get the CPU log out info. If we can't find our CPU private
1316 	 * pointer, then we will have to make due without any detailed
1317 	 * logout information.
1318 	 */
1319 	if (CPU_PRIVATE(CPU) == NULL) {
1320 		clop = NULL;
1321 		ceen = p_clo_flags & EN_REG_CEEN;
1322 		nceen = p_clo_flags & EN_REG_NCEEN;
1323 	} else {
1324 		clop = CPU_PRIVATE_PTR(CPU, chpr_fecctl0_logout);
1325 		ceen = clop->clo_flags & EN_REG_CEEN;
1326 		nceen = clop->clo_flags & EN_REG_NCEEN;
1327 	}
1328 
1329 	cpu_log_fast_ecc_error((caddr_t)rp->r_pc,
1330 	    (rp->r_tstate & TSTATE_PRIV) ? 1 : 0, 0, ceen, nceen, clop);
1331 }
1332 
1333 /*
1334  * Log fast ecc error, called from either Fast ECC at TL=0 or Fast
1335  * ECC at TL>0.  Need to supply either a error register pointer or a
1336  * cpu logout structure pointer.
1337  */
1338 static void
cpu_log_fast_ecc_error(caddr_t tpc,int priv,int tl,uint64_t ceen,uint64_t nceen,ch_cpu_logout_t * clop)1339 cpu_log_fast_ecc_error(caddr_t tpc, int priv, int tl, uint64_t ceen,
1340     uint64_t nceen, ch_cpu_logout_t *clop)
1341 {
1342 	struct async_flt *aflt;
1343 	ch_async_flt_t ch_flt;
1344 	uint64_t t_afar, t_afsr, t_afsr_ext, t_afsr_errs;
1345 	char pr_reason[MAX_REASON_STRING];
1346 	ch_cpu_errors_t cpu_error_regs;
1347 
1348 	bzero(&ch_flt, sizeof (ch_async_flt_t));
1349 	/*
1350 	 * If no cpu logout data, then we will have to make due without
1351 	 * any detailed logout information.
1352 	 */
1353 	if (clop == NULL) {
1354 		ch_flt.flt_diag_data.chd_afar = LOGOUT_INVALID;
1355 		get_cpu_error_state(&cpu_error_regs);
1356 		set_cpu_error_state(&cpu_error_regs);
1357 		t_afar = cpu_error_regs.afar;
1358 		t_afsr = cpu_error_regs.afsr;
1359 		t_afsr_ext = cpu_error_regs.afsr_ext;
1360 #if defined(SERRANO)
1361 		ch_flt.afar2 = cpu_error_regs.afar2;
1362 #endif	/* SERRANO */
1363 	} else {
1364 		t_afar = clop->clo_data.chd_afar;
1365 		t_afsr = clop->clo_data.chd_afsr;
1366 		t_afsr_ext = clop->clo_data.chd_afsr_ext;
1367 #if defined(SERRANO)
1368 		ch_flt.afar2 = clop->clo_data.chd_afar2;
1369 #endif	/* SERRANO */
1370 	}
1371 
1372 	/*
1373 	 * In order to simplify code, we maintain this afsr_errs
1374 	 * variable which holds the aggregate of AFSR and AFSR_EXT
1375 	 * sticky bits.
1376 	 */
1377 	t_afsr_errs = (t_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
1378 	    (t_afsr & C_AFSR_ALL_ERRS);
1379 	pr_reason[0] = '\0';
1380 
1381 	/* Setup the async fault structure */
1382 	aflt = (struct async_flt *)&ch_flt;
1383 	aflt->flt_id = gethrtime_waitfree();
1384 	ch_flt.afsr_ext = t_afsr_ext;
1385 	ch_flt.afsr_errs = t_afsr_errs;
1386 	aflt->flt_stat = t_afsr;
1387 	aflt->flt_addr = t_afar;
1388 	aflt->flt_bus_id = getprocessorid();
1389 	aflt->flt_inst = CPU->cpu_id;
1390 	aflt->flt_pc = tpc;
1391 	aflt->flt_prot = AFLT_PROT_NONE;
1392 	aflt->flt_class = CPU_FAULT;
1393 	aflt->flt_priv = priv;
1394 	aflt->flt_tl = tl;
1395 	aflt->flt_status = ECC_F_TRAP;
1396 	aflt->flt_panic = C_AFSR_PANIC(t_afsr_errs);
1397 
1398 	/*
1399 	 * XXXX - Phenomenal hack to get around Solaris not getting all the
1400 	 * cmn_err messages out to the console.  The situation is a UCU (in
1401 	 * priv mode) which causes a WDU which causes a UE (on the retry).
1402 	 * The messages for the UCU and WDU are enqueued and then pulled off
1403 	 * the async queue via softint and syslogd starts to process them
1404 	 * but doesn't get them to the console.  The UE causes a panic, but
1405 	 * since the UCU/WDU messages are already in transit, those aren't
1406 	 * on the async queue.  The hack is to check if we have a matching
1407 	 * WDU event for the UCU, and if it matches, we're more than likely
1408 	 * going to panic with a UE, unless we're under protection.  So, we
1409 	 * check to see if we got a matching WDU event and if we're under
1410 	 * protection.
1411 	 *
1412 	 * For Cheetah/Cheetah+/Jaguar/Jalapeno, the sequence we care about
1413 	 * looks like this:
1414 	 *    UCU->WDU->UE
1415 	 * For Panther, it could look like either of these:
1416 	 *    UCU---->WDU->L3_WDU->UE
1417 	 *    L3_UCU->WDU->L3_WDU->UE
1418 	 */
1419 	if ((t_afsr_errs & (C_AFSR_UCU | C_AFSR_L3_UCU)) &&
1420 	    aflt->flt_panic == 0 && aflt->flt_priv != 0 &&
1421 	    curthread->t_ontrap == NULL &&
1422 	    curthread->t_lofault == (uintptr_t)NULL) {
1423 		get_cpu_error_state(&cpu_error_regs);
1424 		if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
1425 			aflt->flt_panic |=
1426 			    ((cpu_error_regs.afsr & C_AFSR_WDU) &&
1427 			    (cpu_error_regs.afsr_ext & C_AFSR_L3_WDU) &&
1428 			    (cpu_error_regs.afar == t_afar));
1429 			aflt->flt_panic |= ((clop == NULL) &&
1430 			    (t_afsr_errs & C_AFSR_WDU) &&
1431 			    (t_afsr_errs & C_AFSR_L3_WDU));
1432 		} else {
1433 			aflt->flt_panic |=
1434 			    ((cpu_error_regs.afsr & C_AFSR_WDU) &&
1435 			    (cpu_error_regs.afar == t_afar));
1436 			aflt->flt_panic |= ((clop == NULL) &&
1437 			    (t_afsr_errs & C_AFSR_WDU));
1438 		}
1439 	}
1440 
1441 	/*
1442 	 * Queue events on the async event queue, one event per error bit.
1443 	 * If no events are queued or no Fast ECC events are on in the AFSR,
1444 	 * queue an event to complain.
1445 	 */
1446 	if (cpu_queue_events(&ch_flt, pr_reason, t_afsr_errs, clop) == 0 ||
1447 	    ((t_afsr_errs & (C_AFSR_FECC_ERRS | C_AFSR_EXT_FECC_ERRS)) == 0)) {
1448 		ch_flt.flt_type = CPU_INV_AFSR;
1449 		cpu_errorq_dispatch(FM_EREPORT_CPU_USIII_INVALID_AFSR,
1450 		    (void *)&ch_flt, sizeof (ch_async_flt_t), ue_queue,
1451 		    aflt->flt_panic);
1452 	}
1453 
1454 	/*
1455 	 * Zero out + invalidate CPU logout.
1456 	 */
1457 	if (clop) {
1458 		bzero(clop, sizeof (ch_cpu_logout_t));
1459 		clop->clo_data.chd_afar = LOGOUT_INVALID;
1460 	}
1461 
1462 	/*
1463 	 * We carefully re-enable NCEEN and CEEN and then check if any deferred
1464 	 * or disrupting errors have happened.  We do this because if a
1465 	 * deferred or disrupting error had occurred with NCEEN/CEEN off, the
1466 	 * trap will not be taken when NCEEN/CEEN is re-enabled.  Note that
1467 	 * CEEN works differently on Cheetah than on Spitfire.  Also, we enable
1468 	 * NCEEN/CEEN *before* checking the AFSR to avoid the small window of a
1469 	 * deferred or disrupting error happening between checking the AFSR and
1470 	 * enabling NCEEN/CEEN.
1471 	 *
1472 	 * Note: CEEN and NCEEN are only reenabled if they were on when trap
1473 	 * taken.
1474 	 */
1475 	set_error_enable(get_error_enable() | (nceen | ceen));
1476 	if (clear_errors(&ch_flt)) {
1477 		aflt->flt_panic |= ((ch_flt.afsr_errs &
1478 		    (C_AFSR_EXT_ASYNC_ERRS | C_AFSR_ASYNC_ERRS)) != 0);
1479 		(void) cpu_queue_events(&ch_flt, pr_reason, ch_flt.afsr_errs,
1480 		    NULL);
1481 	}
1482 
1483 	/*
1484 	 * Panic here if aflt->flt_panic has been set.  Enqueued errors will
1485 	 * be logged as part of the panic flow.
1486 	 */
1487 	if (aflt->flt_panic)
1488 		fm_panic("%sError(s)", pr_reason);
1489 
1490 	/*
1491 	 * Flushing the Ecache here gets the part of the trap handler that
1492 	 * is run at TL=1 out of the Ecache.
1493 	 */
1494 	cpu_flush_ecache();
1495 }
1496 
1497 /*
1498  * This is called via sys_trap from pil15_interrupt code if the
1499  * corresponding entry in ch_err_tl1_pending is set.  Checks the
1500  * various ch_err_tl1_data structures for valid entries based on the bit
1501  * settings in the ch_err_tl1_flags entry of the structure.
1502  */
1503 /*ARGSUSED*/
1504 void
cpu_tl1_error(struct regs * rp,int panic)1505 cpu_tl1_error(struct regs *rp, int panic)
1506 {
1507 	ch_err_tl1_data_t *cl1p, cl1;
1508 	int i, ncl1ps;
1509 	uint64_t me_flags;
1510 	uint64_t ceen, nceen;
1511 
1512 	if (ch_err_tl1_paddrs[CPU->cpu_id] == 0) {
1513 		cl1p = &ch_err_tl1_data;
1514 		ncl1ps = 1;
1515 	} else if (CPU_PRIVATE(CPU) != NULL) {
1516 		cl1p = CPU_PRIVATE_PTR(CPU, chpr_tl1_err_data[0]);
1517 		ncl1ps = CH_ERR_TL1_TLMAX;
1518 	} else {
1519 		ncl1ps = 0;
1520 	}
1521 
1522 	for (i = 0; i < ncl1ps; i++, cl1p++) {
1523 		if (cl1p->ch_err_tl1_flags == 0)
1524 			continue;
1525 
1526 		/*
1527 		 * Grab a copy of the logout data and invalidate
1528 		 * the logout area.
1529 		 */
1530 		cl1 = *cl1p;
1531 		bzero(cl1p, sizeof (ch_err_tl1_data_t));
1532 		cl1p->ch_err_tl1_logout.clo_data.chd_afar = LOGOUT_INVALID;
1533 		me_flags = CH_ERR_ME_FLAGS(cl1.ch_err_tl1_flags);
1534 
1535 		/*
1536 		 * Log "first error" in ch_err_tl1_data.
1537 		 */
1538 		if (cl1.ch_err_tl1_flags & CH_ERR_FECC) {
1539 			ceen = get_error_enable() & EN_REG_CEEN;
1540 			nceen = get_error_enable() & EN_REG_NCEEN;
1541 			cpu_log_fast_ecc_error((caddr_t)cl1.ch_err_tl1_tpc, 1,
1542 			    1, ceen, nceen, &cl1.ch_err_tl1_logout);
1543 		}
1544 #if defined(CPU_IMP_L1_CACHE_PARITY)
1545 		if (cl1.ch_err_tl1_flags & (CH_ERR_IPE | CH_ERR_DPE)) {
1546 			cpu_parity_error(rp, cl1.ch_err_tl1_flags,
1547 			    (caddr_t)cl1.ch_err_tl1_tpc);
1548 		}
1549 #endif	/* CPU_IMP_L1_CACHE_PARITY */
1550 
1551 		/*
1552 		 * Log "multiple events" in ch_err_tl1_data.  Note that
1553 		 * we don't read and clear the AFSR/AFAR in the TL>0 code
1554 		 * if the structure is busy, we just do the cache flushing
1555 		 * we have to do and then do the retry.  So the AFSR/AFAR
1556 		 * at this point *should* have some relevant info.  If there
1557 		 * are no valid errors in the AFSR, we'll assume they've
1558 		 * already been picked up and logged.  For I$/D$ parity,
1559 		 * we just log an event with an "Unknown" (NULL) TPC.
1560 		 */
1561 		if (me_flags & CH_ERR_FECC) {
1562 			ch_cpu_errors_t cpu_error_regs;
1563 			uint64_t t_afsr_errs;
1564 
1565 			/*
1566 			 * Get the error registers and see if there's
1567 			 * a pending error.  If not, don't bother
1568 			 * generating an "Invalid AFSR" error event.
1569 			 */
1570 			get_cpu_error_state(&cpu_error_regs);
1571 			t_afsr_errs = (cpu_error_regs.afsr_ext &
1572 			    C_AFSR_EXT_ALL_ERRS) |
1573 			    (cpu_error_regs.afsr & C_AFSR_ALL_ERRS);
1574 			if (t_afsr_errs != 0) {
1575 				ceen = get_error_enable() & EN_REG_CEEN;
1576 				nceen = get_error_enable() & EN_REG_NCEEN;
1577 				cpu_log_fast_ecc_error((caddr_t)NULL, 1,
1578 				    1, ceen, nceen, NULL);
1579 			}
1580 		}
1581 #if defined(CPU_IMP_L1_CACHE_PARITY)
1582 		if (me_flags & (CH_ERR_IPE | CH_ERR_DPE)) {
1583 			cpu_parity_error(rp, me_flags, (caddr_t)NULL);
1584 		}
1585 #endif	/* CPU_IMP_L1_CACHE_PARITY */
1586 	}
1587 }
1588 
1589 /*
1590  * Called from Fast ECC TL>0 handler in case of fatal error.
1591  * cpu_tl1_error should always find an associated ch_err_tl1_data structure,
1592  * but if we don't, we'll panic with something reasonable.
1593  */
1594 /*ARGSUSED*/
1595 void
cpu_tl1_err_panic(struct regs * rp,ulong_t flags)1596 cpu_tl1_err_panic(struct regs *rp, ulong_t flags)
1597 {
1598 	cpu_tl1_error(rp, 1);
1599 	/*
1600 	 * Should never return, but just in case.
1601 	 */
1602 	fm_panic("Unsurvivable ECC Error at TL>0");
1603 }
1604 
1605 /*
1606  * The ce_err/ce_err_tl1 handlers transfer control here for CE, EMC, EDU:ST,
1607  * EDC, WDU, WDC, CPU, CPC, IVU, IVC events.
1608  * Disrupting errors controlled by NCEEN: EDU:ST, WDU, CPU, IVU
1609  * Disrupting errors controlled by CEEN: CE, EMC, EDC, WDC, CPC, IVC
1610  *
1611  * Cheetah+ also handles (No additional processing required):
1612  *    DUE, DTO, DBERR	(NCEEN controlled)
1613  *    THCE		(CEEN and ET_ECC_en controlled)
1614  *    TUE		(ET_ECC_en controlled)
1615  *
1616  * Panther further adds:
1617  *    IMU, L3_EDU, L3_WDU, L3_CPU		(NCEEN controlled)
1618  *    IMC, L3_EDC, L3_WDC, L3_CPC, L3_THCE	(CEEN controlled)
1619  *    TUE_SH, TUE		(NCEEN and L2_tag_ECC_en controlled)
1620  *    L3_TUE, L3_TUE_SH		(NCEEN and ET_ECC_en controlled)
1621  *    THCE			(CEEN and L2_tag_ECC_en controlled)
1622  *    L3_THCE			(CEEN and ET_ECC_en controlled)
1623  *
1624  * Note that the p_clo_flags input is only valid in cases where the
1625  * cpu_private struct is not yet initialized (since that is the only
1626  * time that information cannot be obtained from the logout struct.)
1627  */
1628 /*ARGSUSED*/
1629 void
cpu_disrupting_error(struct regs * rp,ulong_t p_clo_flags)1630 cpu_disrupting_error(struct regs *rp, ulong_t p_clo_flags)
1631 {
1632 	struct async_flt *aflt;
1633 	ch_async_flt_t ch_flt;
1634 	char pr_reason[MAX_REASON_STRING];
1635 	ch_cpu_logout_t *clop;
1636 	uint64_t t_afar, t_afsr, t_afsr_ext, t_afsr_errs;
1637 	ch_cpu_errors_t cpu_error_regs;
1638 
1639 	bzero(&ch_flt, sizeof (ch_async_flt_t));
1640 	/*
1641 	 * Get the CPU log out info. If we can't find our CPU private
1642 	 * pointer, then we will have to make due without any detailed
1643 	 * logout information.
1644 	 */
1645 	if (CPU_PRIVATE(CPU) == NULL) {
1646 		clop = NULL;
1647 		ch_flt.flt_diag_data.chd_afar = LOGOUT_INVALID;
1648 		get_cpu_error_state(&cpu_error_regs);
1649 		set_cpu_error_state(&cpu_error_regs);
1650 		t_afar = cpu_error_regs.afar;
1651 		t_afsr = cpu_error_regs.afsr;
1652 		t_afsr_ext = cpu_error_regs.afsr_ext;
1653 #if defined(SERRANO)
1654 		ch_flt.afar2 = cpu_error_regs.afar2;
1655 #endif	/* SERRANO */
1656 	} else {
1657 		clop = CPU_PRIVATE_PTR(CPU, chpr_cecc_logout);
1658 		t_afar = clop->clo_data.chd_afar;
1659 		t_afsr = clop->clo_data.chd_afsr;
1660 		t_afsr_ext = clop->clo_data.chd_afsr_ext;
1661 #if defined(SERRANO)
1662 		ch_flt.afar2 = clop->clo_data.chd_afar2;
1663 #endif	/* SERRANO */
1664 	}
1665 
1666 	/*
1667 	 * In order to simplify code, we maintain this afsr_errs
1668 	 * variable which holds the aggregate of AFSR and AFSR_EXT
1669 	 * sticky bits.
1670 	 */
1671 	t_afsr_errs = (t_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
1672 	    (t_afsr & C_AFSR_ALL_ERRS);
1673 
1674 	pr_reason[0] = '\0';
1675 	/* Setup the async fault structure */
1676 	aflt = (struct async_flt *)&ch_flt;
1677 	ch_flt.afsr_ext = t_afsr_ext;
1678 	ch_flt.afsr_errs = t_afsr_errs;
1679 	aflt->flt_stat = t_afsr;
1680 	aflt->flt_addr = t_afar;
1681 	aflt->flt_pc = (caddr_t)rp->r_pc;
1682 	aflt->flt_priv = (rp->r_tstate & TSTATE_PRIV) ?  1 : 0;
1683 	aflt->flt_tl = 0;
1684 	aflt->flt_panic = C_AFSR_PANIC(t_afsr_errs);
1685 
1686 	/*
1687 	 * If this trap is a result of one of the errors not masked
1688 	 * by cpu_ce_not_deferred, we don't reenable CEEN. Instead
1689 	 * indicate that a timeout is to be set later.
1690 	 */
1691 	if (!(t_afsr_errs & (cpu_ce_not_deferred | cpu_ce_not_deferred_ext)) &&
1692 	    !aflt->flt_panic)
1693 		ch_flt.flt_trapped_ce = CE_CEEN_DEFER | CE_CEEN_TRAPPED;
1694 	else
1695 		ch_flt.flt_trapped_ce = CE_CEEN_NODEFER | CE_CEEN_TRAPPED;
1696 
1697 	/*
1698 	 * log the CE and clean up
1699 	 */
1700 	cpu_log_and_clear_ce(&ch_flt);
1701 
1702 	/*
1703 	 * We re-enable CEEN (if required) and check if any disrupting errors
1704 	 * have happened.  We do this because if a disrupting error had occurred
1705 	 * with CEEN off, the trap will not be taken when CEEN is re-enabled.
1706 	 * Note that CEEN works differently on Cheetah than on Spitfire.  Also,
1707 	 * we enable CEEN *before* checking the AFSR to avoid the small window
1708 	 * of a error happening between checking the AFSR and enabling CEEN.
1709 	 */
1710 	if (ch_flt.flt_trapped_ce & CE_CEEN_NODEFER)
1711 		set_error_enable(get_error_enable() | EN_REG_CEEN);
1712 	if (clear_errors(&ch_flt)) {
1713 		(void) cpu_queue_events(&ch_flt, pr_reason, ch_flt.afsr_errs,
1714 		    NULL);
1715 	}
1716 
1717 	/*
1718 	 * Panic here if aflt->flt_panic has been set.  Enqueued errors will
1719 	 * be logged as part of the panic flow.
1720 	 */
1721 	if (aflt->flt_panic)
1722 		fm_panic("%sError(s)", pr_reason);
1723 }
1724 
1725 /*
1726  * The async_err handler transfers control here for UE, EMU, EDU:BLD,
1727  * L3_EDU:BLD, TO, and BERR events.
1728  * Deferred errors controlled by NCEEN: UE, EMU, EDU:BLD, L3_EDU:BLD, TO, BERR
1729  *
1730  * Cheetah+: No additional errors handled.
1731  *
1732  * Note that the p_clo_flags input is only valid in cases where the
1733  * cpu_private struct is not yet initialized (since that is the only
1734  * time that information cannot be obtained from the logout struct.)
1735  */
1736 /*ARGSUSED*/
1737 void
cpu_deferred_error(struct regs * rp,ulong_t p_clo_flags)1738 cpu_deferred_error(struct regs *rp, ulong_t p_clo_flags)
1739 {
1740 	ushort_t ttype, tl;
1741 	ch_async_flt_t ch_flt;
1742 	struct async_flt *aflt;
1743 	int trampolined = 0;
1744 	char pr_reason[MAX_REASON_STRING];
1745 	ch_cpu_logout_t *clop;
1746 	uint64_t ceen, clo_flags;
1747 	uint64_t log_afsr;
1748 	uint64_t t_afar, t_afsr, t_afsr_ext, t_afsr_errs;
1749 	ch_cpu_errors_t cpu_error_regs;
1750 	int expected = DDI_FM_ERR_UNEXPECTED;
1751 	ddi_acc_hdl_t *hp;
1752 
1753 	/*
1754 	 * We need to look at p_flag to determine if the thread detected an
1755 	 * error while dumping core.  We can't grab p_lock here, but it's ok
1756 	 * because we just need a consistent snapshot and we know that everyone
1757 	 * else will store a consistent set of bits while holding p_lock.  We
1758 	 * don't have to worry about a race because SDOCORE is set once prior
1759 	 * to doing i/o from the process's address space and is never cleared.
1760 	 */
1761 	uint_t pflag = ttoproc(curthread)->p_flag;
1762 
1763 	bzero(&ch_flt, sizeof (ch_async_flt_t));
1764 	/*
1765 	 * Get the CPU log out info. If we can't find our CPU private
1766 	 * pointer then we will have to make due without any detailed
1767 	 * logout information.
1768 	 */
1769 	if (CPU_PRIVATE(CPU) == NULL) {
1770 		clop = NULL;
1771 		ch_flt.flt_diag_data.chd_afar = LOGOUT_INVALID;
1772 		get_cpu_error_state(&cpu_error_regs);
1773 		set_cpu_error_state(&cpu_error_regs);
1774 		t_afar = cpu_error_regs.afar;
1775 		t_afsr = cpu_error_regs.afsr;
1776 		t_afsr_ext = cpu_error_regs.afsr_ext;
1777 #if defined(SERRANO)
1778 		ch_flt.afar2 = cpu_error_regs.afar2;
1779 #endif	/* SERRANO */
1780 		clo_flags = p_clo_flags;
1781 	} else {
1782 		clop = CPU_PRIVATE_PTR(CPU, chpr_async_logout);
1783 		t_afar = clop->clo_data.chd_afar;
1784 		t_afsr = clop->clo_data.chd_afsr;
1785 		t_afsr_ext = clop->clo_data.chd_afsr_ext;
1786 #if defined(SERRANO)
1787 		ch_flt.afar2 = clop->clo_data.chd_afar2;
1788 #endif	/* SERRANO */
1789 		clo_flags = clop->clo_flags;
1790 	}
1791 
1792 	/*
1793 	 * In order to simplify code, we maintain this afsr_errs
1794 	 * variable which holds the aggregate of AFSR and AFSR_EXT
1795 	 * sticky bits.
1796 	 */
1797 	t_afsr_errs = (t_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
1798 	    (t_afsr & C_AFSR_ALL_ERRS);
1799 	pr_reason[0] = '\0';
1800 
1801 	/*
1802 	 * Grab information encoded into our clo_flags field.
1803 	 */
1804 	ceen = clo_flags & EN_REG_CEEN;
1805 	tl = (clo_flags & CLO_FLAGS_TL_MASK) >> CLO_FLAGS_TL_SHIFT;
1806 	ttype = (clo_flags & CLO_FLAGS_TT_MASK) >> CLO_FLAGS_TT_SHIFT;
1807 
1808 	/*
1809 	 * handle the specific error
1810 	 */
1811 	aflt = (struct async_flt *)&ch_flt;
1812 	aflt->flt_id = gethrtime_waitfree();
1813 	aflt->flt_bus_id = getprocessorid();
1814 	aflt->flt_inst = CPU->cpu_id;
1815 	ch_flt.afsr_ext = t_afsr_ext;
1816 	ch_flt.afsr_errs = t_afsr_errs;
1817 	aflt->flt_stat = t_afsr;
1818 	aflt->flt_addr = t_afar;
1819 	aflt->flt_pc = (caddr_t)rp->r_pc;
1820 	aflt->flt_prot = AFLT_PROT_NONE;
1821 	aflt->flt_class = CPU_FAULT;
1822 	aflt->flt_priv = (rp->r_tstate & TSTATE_PRIV) ?  1 : 0;
1823 	aflt->flt_tl = (uchar_t)tl;
1824 	aflt->flt_panic = ((tl != 0) || (aft_testfatal != 0) ||
1825 	    C_AFSR_PANIC(t_afsr_errs));
1826 	aflt->flt_core = (pflag & SDOCORE) ? 1 : 0;
1827 	aflt->flt_status = ((ttype == T_DATA_ERROR) ? ECC_D_TRAP : ECC_I_TRAP);
1828 
1829 	/*
1830 	 * If the trap occurred in privileged mode at TL=0, we need to check to
1831 	 * see if we were executing in the kernel under on_trap() or t_lofault
1832 	 * protection.  If so, modify the saved registers so that we return
1833 	 * from the trap to the appropriate trampoline routine.
1834 	 */
1835 	if (aflt->flt_priv && tl == 0) {
1836 		if (curthread->t_ontrap != NULL) {
1837 			on_trap_data_t *otp = curthread->t_ontrap;
1838 
1839 			if (otp->ot_prot & OT_DATA_EC) {
1840 				aflt->flt_prot = AFLT_PROT_EC;
1841 				otp->ot_trap |= OT_DATA_EC;
1842 				rp->r_pc = otp->ot_trampoline;
1843 				rp->r_npc = rp->r_pc + 4;
1844 				trampolined = 1;
1845 			}
1846 
1847 			if ((t_afsr & (C_AFSR_TO | C_AFSR_BERR)) &&
1848 			    (otp->ot_prot & OT_DATA_ACCESS)) {
1849 				aflt->flt_prot = AFLT_PROT_ACCESS;
1850 				otp->ot_trap |= OT_DATA_ACCESS;
1851 				rp->r_pc = otp->ot_trampoline;
1852 				rp->r_npc = rp->r_pc + 4;
1853 				trampolined = 1;
1854 				/*
1855 				 * for peeks and caut_gets errors are expected
1856 				 */
1857 				hp = (ddi_acc_hdl_t *)otp->ot_handle;
1858 				if (!hp)
1859 					expected = DDI_FM_ERR_PEEK;
1860 				else if (hp->ah_acc.devacc_attr_access ==
1861 				    DDI_CAUTIOUS_ACC)
1862 					expected = DDI_FM_ERR_EXPECTED;
1863 			}
1864 
1865 		} else if (curthread->t_lofault) {
1866 			aflt->flt_prot = AFLT_PROT_COPY;
1867 			rp->r_g1 = EFAULT;
1868 			rp->r_pc = curthread->t_lofault;
1869 			rp->r_npc = rp->r_pc + 4;
1870 			trampolined = 1;
1871 		}
1872 	}
1873 
1874 	/*
1875 	 * If we're in user mode or we're doing a protected copy, we either
1876 	 * want the ASTON code below to send a signal to the user process
1877 	 * or we want to panic if aft_panic is set.
1878 	 *
1879 	 * If we're in privileged mode and we're not doing a copy, then we
1880 	 * need to check if we've trampolined.  If we haven't trampolined,
1881 	 * we should panic.
1882 	 */
1883 	if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
1884 		if (t_afsr_errs &
1885 		    ((C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS) &
1886 		    ~(C_AFSR_BERR | C_AFSR_TO)))
1887 			aflt->flt_panic |= aft_panic;
1888 	} else if (!trampolined) {
1889 			aflt->flt_panic = 1;
1890 	}
1891 
1892 	/*
1893 	 * If we've trampolined due to a privileged TO or BERR, or if an
1894 	 * unprivileged TO or BERR occurred, we don't want to enqueue an
1895 	 * event for that TO or BERR.  Queue all other events (if any) besides
1896 	 * the TO/BERR.  Since we may not be enqueing any events, we need to
1897 	 * ignore the number of events queued.  If we haven't trampolined due
1898 	 * to a TO or BERR, just enqueue events normally.
1899 	 */
1900 	log_afsr = t_afsr_errs;
1901 	if (trampolined) {
1902 		log_afsr &= ~(C_AFSR_TO | C_AFSR_BERR);
1903 	} else if (!aflt->flt_priv) {
1904 		/*
1905 		 * User mode, suppress messages if
1906 		 * cpu_berr_to_verbose is not set.
1907 		 */
1908 		if (!cpu_berr_to_verbose)
1909 			log_afsr &= ~(C_AFSR_TO | C_AFSR_BERR);
1910 	}
1911 
1912 	/*
1913 	 * Log any errors that occurred
1914 	 */
1915 	if (((log_afsr &
1916 	    ((C_AFSR_ALL_ERRS | C_AFSR_EXT_ALL_ERRS) & ~C_AFSR_ME)) &&
1917 	    cpu_queue_events(&ch_flt, pr_reason, log_afsr, clop) == 0) ||
1918 	    (t_afsr_errs & (C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS)) == 0) {
1919 		ch_flt.flt_type = CPU_INV_AFSR;
1920 		cpu_errorq_dispatch(FM_EREPORT_CPU_USIII_INVALID_AFSR,
1921 		    (void *)&ch_flt, sizeof (ch_async_flt_t), ue_queue,
1922 		    aflt->flt_panic);
1923 	}
1924 
1925 	/*
1926 	 * Zero out + invalidate CPU logout.
1927 	 */
1928 	if (clop) {
1929 		bzero(clop, sizeof (ch_cpu_logout_t));
1930 		clop->clo_data.chd_afar = LOGOUT_INVALID;
1931 	}
1932 
1933 #if defined(JALAPENO) || defined(SERRANO)
1934 	/*
1935 	 * UE/RUE/BERR/TO: Call our bus nexus friends to check for
1936 	 * IO errors that may have resulted in this trap.
1937 	 */
1938 	if (t_afsr & (C_AFSR_UE|C_AFSR_RUE|C_AFSR_TO|C_AFSR_BERR)) {
1939 		cpu_run_bus_error_handlers(aflt, expected);
1940 	}
1941 
1942 	/*
1943 	 * UE/RUE: If UE or RUE is in memory, we need to flush the bad
1944 	 * line from the Ecache.  We also need to query the bus nexus for
1945 	 * fatal errors.  Attempts to do diagnostic read on caches may
1946 	 * introduce more errors (especially when the module is bad).
1947 	 */
1948 	if (t_afsr & (C_AFSR_UE|C_AFSR_RUE)) {
1949 		/*
1950 		 * Ask our bus nexus friends if they have any fatal errors.  If
1951 		 * so, they will log appropriate error messages.
1952 		 */
1953 		if (bus_func_invoke(BF_TYPE_UE) == BF_FATAL)
1954 			aflt->flt_panic = 1;
1955 
1956 		/*
1957 		 * We got a UE or RUE and are panicking, save the fault PA in
1958 		 * a known location so that the platform specific panic code
1959 		 * can check for copyback errors.
1960 		 */
1961 		if (aflt->flt_panic && cpu_flt_in_memory(&ch_flt, C_AFSR_UE)) {
1962 			panic_aflt = *aflt;
1963 		}
1964 	}
1965 
1966 	/*
1967 	 * Flush Ecache line or entire Ecache
1968 	 */
1969 	if (t_afsr & (C_AFSR_UE | C_AFSR_RUE | C_AFSR_EDU | C_AFSR_BERR))
1970 		cpu_error_ecache_flush(&ch_flt);
1971 #else /* JALAPENO || SERRANO */
1972 	/*
1973 	 * UE/BERR/TO: Call our bus nexus friends to check for
1974 	 * IO errors that may have resulted in this trap.
1975 	 */
1976 	if (t_afsr & (C_AFSR_UE|C_AFSR_TO|C_AFSR_BERR)) {
1977 		cpu_run_bus_error_handlers(aflt, expected);
1978 	}
1979 
1980 	/*
1981 	 * UE: If the UE is in memory, we need to flush the bad
1982 	 * line from the Ecache.  We also need to query the bus nexus for
1983 	 * fatal errors.  Attempts to do diagnostic read on caches may
1984 	 * introduce more errors (especially when the module is bad).
1985 	 */
1986 	if (t_afsr & C_AFSR_UE) {
1987 		/*
1988 		 * Ask our legacy bus nexus friends if they have any fatal
1989 		 * errors.  If so, they will log appropriate error messages.
1990 		 */
1991 		if (bus_func_invoke(BF_TYPE_UE) == BF_FATAL)
1992 			aflt->flt_panic = 1;
1993 
1994 		/*
1995 		 * We got a UE and are panicking, save the fault PA in a known
1996 		 * location so that the platform specific panic code can check
1997 		 * for copyback errors.
1998 		 */
1999 		if (aflt->flt_panic && cpu_flt_in_memory(&ch_flt, C_AFSR_UE)) {
2000 			panic_aflt = *aflt;
2001 		}
2002 	}
2003 
2004 	/*
2005 	 * Flush Ecache line or entire Ecache
2006 	 */
2007 	if (t_afsr_errs &
2008 	    (C_AFSR_UE | C_AFSR_EDU | C_AFSR_BERR | C_AFSR_L3_EDU))
2009 		cpu_error_ecache_flush(&ch_flt);
2010 #endif /* JALAPENO || SERRANO */
2011 
2012 	/*
2013 	 * We carefully re-enable NCEEN and CEEN and then check if any deferred
2014 	 * or disrupting errors have happened.  We do this because if a
2015 	 * deferred or disrupting error had occurred with NCEEN/CEEN off, the
2016 	 * trap will not be taken when NCEEN/CEEN is re-enabled.  Note that
2017 	 * CEEN works differently on Cheetah than on Spitfire.  Also, we enable
2018 	 * NCEEN/CEEN *before* checking the AFSR to avoid the small window of a
2019 	 * deferred or disrupting error happening between checking the AFSR and
2020 	 * enabling NCEEN/CEEN.
2021 	 *
2022 	 * Note: CEEN reenabled only if it was on when trap taken.
2023 	 */
2024 	set_error_enable(get_error_enable() | (EN_REG_NCEEN | ceen));
2025 	if (clear_errors(&ch_flt)) {
2026 		/*
2027 		 * Check for secondary errors, and avoid panicking if we
2028 		 * have them
2029 		 */
2030 		if (cpu_check_secondary_errors(&ch_flt, t_afsr_errs,
2031 		    t_afar) == 0) {
2032 			aflt->flt_panic |= ((ch_flt.afsr_errs &
2033 			    (C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS)) != 0);
2034 		}
2035 		(void) cpu_queue_events(&ch_flt, pr_reason, ch_flt.afsr_errs,
2036 		    NULL);
2037 	}
2038 
2039 	/*
2040 	 * Panic here if aflt->flt_panic has been set.  Enqueued errors will
2041 	 * be logged as part of the panic flow.
2042 	 */
2043 	if (aflt->flt_panic)
2044 		fm_panic("%sError(s)", pr_reason);
2045 
2046 	/*
2047 	 * If we queued an error and we are going to return from the trap and
2048 	 * the error was in user mode or inside of a copy routine, set AST flag
2049 	 * so the queue will be drained before returning to user mode.  The
2050 	 * AST processing will also act on our failure policy.
2051 	 */
2052 	if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
2053 		int pcb_flag = 0;
2054 
2055 		if (t_afsr_errs &
2056 		    (C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS &
2057 		    ~(C_AFSR_BERR | C_AFSR_TO)))
2058 			pcb_flag |= ASYNC_HWERR;
2059 
2060 		if (t_afsr & C_AFSR_BERR)
2061 			pcb_flag |= ASYNC_BERR;
2062 
2063 		if (t_afsr & C_AFSR_TO)
2064 			pcb_flag |= ASYNC_BTO;
2065 
2066 		ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
2067 		aston(curthread);
2068 	}
2069 }
2070 
2071 #if defined(CPU_IMP_L1_CACHE_PARITY)
2072 /*
2073  * Handling of data and instruction parity errors (traps 0x71, 0x72).
2074  *
2075  * For Panther, P$ data parity errors during floating point load hits
2076  * are also detected (reported as TT 0x71) and handled by this trap
2077  * handler.
2078  *
2079  * AFSR/AFAR are not set for parity errors, only TPC (a virtual address)
2080  * is available.
2081  */
2082 /*ARGSUSED*/
2083 void
cpu_parity_error(struct regs * rp,uint_t flags,caddr_t tpc)2084 cpu_parity_error(struct regs *rp, uint_t flags, caddr_t tpc)
2085 {
2086 	ch_async_flt_t ch_flt;
2087 	struct async_flt *aflt;
2088 	uchar_t tl = ((flags & CH_ERR_TL) != 0);
2089 	uchar_t iparity = ((flags & CH_ERR_IPE) != 0);
2090 	uchar_t panic = ((flags & CH_ERR_PANIC) != 0);
2091 	char *error_class;
2092 	int index, way, word;
2093 	ch_dc_data_t tmp_dcp;
2094 	int dc_set_size = dcache_size / CH_DCACHE_NWAY;
2095 	uint64_t parity_bits, pbits;
2096 	/* The parity bit array corresponds to the result of summing two bits */
2097 	static int parity_bits_popc[] = { 0, 1, 1, 0 };
2098 
2099 	/*
2100 	 * Log the error.
2101 	 * For icache parity errors the fault address is the trap PC.
2102 	 * For dcache/pcache parity errors the instruction would have to
2103 	 * be decoded to determine the address and that isn't possible
2104 	 * at high PIL.
2105 	 */
2106 	bzero(&ch_flt, sizeof (ch_async_flt_t));
2107 	aflt = (struct async_flt *)&ch_flt;
2108 	aflt->flt_id = gethrtime_waitfree();
2109 	aflt->flt_bus_id = getprocessorid();
2110 	aflt->flt_inst = CPU->cpu_id;
2111 	aflt->flt_pc = tpc;
2112 	aflt->flt_addr = iparity ? (uint64_t)tpc : AFLT_INV_ADDR;
2113 	aflt->flt_prot = AFLT_PROT_NONE;
2114 	aflt->flt_class = CPU_FAULT;
2115 	aflt->flt_priv = (tl || (rp->r_tstate & TSTATE_PRIV)) ?  1 : 0;
2116 	aflt->flt_tl = tl;
2117 	aflt->flt_panic = panic;
2118 	aflt->flt_status = iparity ? ECC_IP_TRAP : ECC_DP_TRAP;
2119 	ch_flt.flt_type = iparity ? CPU_IC_PARITY : CPU_DC_PARITY;
2120 
2121 	if (iparity) {
2122 		cpu_icache_parity_info(&ch_flt);
2123 		if (ch_flt.parity_data.ipe.cpl_off != -1)
2124 			error_class = FM_EREPORT_CPU_USIII_IDSPE;
2125 		else if (ch_flt.parity_data.ipe.cpl_way != -1)
2126 			error_class = FM_EREPORT_CPU_USIII_ITSPE;
2127 		else
2128 			error_class = FM_EREPORT_CPU_USIII_IPE;
2129 		aflt->flt_payload = FM_EREPORT_PAYLOAD_ICACHE_PE;
2130 	} else {
2131 		cpu_dcache_parity_info(&ch_flt);
2132 		if (ch_flt.parity_data.dpe.cpl_off != -1) {
2133 			/*
2134 			 * If not at TL 0 and running on a Jalapeno processor,
2135 			 * then process as a true ddspe.  A true
2136 			 * ddspe error can only occur if the way == 0
2137 			 */
2138 			way = ch_flt.parity_data.dpe.cpl_way;
2139 			if ((tl == 0) && (way != 0) &&
2140 			    IS_JALAPENO(cpunodes[CPU->cpu_id].implementation)) {
2141 				for (index = 0; index < dc_set_size;
2142 				    index += dcache_linesize) {
2143 					get_dcache_dtag(index + way *
2144 					    dc_set_size,
2145 					    (uint64_t *)&tmp_dcp);
2146 					/*
2147 					 * Check data array for even parity.
2148 					 * The 8 parity bits are grouped into
2149 					 * 4 pairs each of which covers a 64-bit
2150 					 * word.  The endianness is reversed
2151 					 * -- the low-order parity bits cover
2152 					 *  the high-order data words.
2153 					 */
2154 					parity_bits = tmp_dcp.dc_utag >> 8;
2155 					for (word = 0; word < 4; word++) {
2156 						pbits = (parity_bits >>
2157 						    (6 - word * 2)) & 3;
2158 						if (((popc64(
2159 						    tmp_dcp.dc_data[word]) +
2160 						    parity_bits_popc[pbits]) &
2161 						    1) && (tmp_dcp.dc_tag &
2162 						    VA13)) {
2163 							/* cleanup */
2164 							correct_dcache_parity(
2165 							    dcache_size,
2166 							    dcache_linesize);
2167 							if (cache_boot_state &
2168 							    DCU_DC) {
2169 								flush_dcache();
2170 							}
2171 
2172 							set_dcu(get_dcu() |
2173 							    cache_boot_state);
2174 							return;
2175 						}
2176 					}
2177 				}
2178 			} /* (tl == 0) && (way != 0) && IS JALAPENO */
2179 			error_class = FM_EREPORT_CPU_USIII_DDSPE;
2180 		} else if (ch_flt.parity_data.dpe.cpl_way != -1)
2181 			error_class = FM_EREPORT_CPU_USIII_DTSPE;
2182 		else
2183 			error_class = FM_EREPORT_CPU_USIII_DPE;
2184 		aflt->flt_payload = FM_EREPORT_PAYLOAD_DCACHE_PE;
2185 		/*
2186 		 * For panther we also need to check the P$ for parity errors.
2187 		 */
2188 		if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
2189 			cpu_pcache_parity_info(&ch_flt);
2190 			if (ch_flt.parity_data.dpe.cpl_cache == CPU_PC_PARITY) {
2191 				error_class = FM_EREPORT_CPU_USIII_PDSPE;
2192 				aflt->flt_payload =
2193 				    FM_EREPORT_PAYLOAD_PCACHE_PE;
2194 			}
2195 		}
2196 	}
2197 
2198 	cpu_errorq_dispatch(error_class, (void *)&ch_flt,
2199 	    sizeof (ch_async_flt_t), ue_queue, aflt->flt_panic);
2200 
2201 	if (iparity) {
2202 		/*
2203 		 * Invalidate entire I$.
2204 		 * This is required due to the use of diagnostic ASI
2205 		 * accesses that may result in a loss of I$ coherency.
2206 		 */
2207 		if (cache_boot_state & DCU_IC) {
2208 			flush_icache();
2209 		}
2210 		/*
2211 		 * According to section P.3.1 of the Panther PRM, we
2212 		 * need to do a little more for recovery on those
2213 		 * CPUs after encountering an I$ parity error.
2214 		 */
2215 		if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
2216 			flush_ipb();
2217 			correct_dcache_parity(dcache_size,
2218 			    dcache_linesize);
2219 			flush_pcache();
2220 		}
2221 	} else {
2222 		/*
2223 		 * Since the valid bit is ignored when checking parity the
2224 		 * D$ data and tag must also be corrected.  Set D$ data bits
2225 		 * to zero and set utag to 0, 1, 2, 3.
2226 		 */
2227 		correct_dcache_parity(dcache_size, dcache_linesize);
2228 
2229 		/*
2230 		 * According to section P.3.3 of the Panther PRM, we
2231 		 * need to do a little more for recovery on those
2232 		 * CPUs after encountering a D$ or P$ parity error.
2233 		 *
2234 		 * As far as clearing P$ parity errors, it is enough to
2235 		 * simply invalidate all entries in the P$ since P$ parity
2236 		 * error traps are only generated for floating point load
2237 		 * hits.
2238 		 */
2239 		if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
2240 			flush_icache();
2241 			flush_ipb();
2242 			flush_pcache();
2243 		}
2244 	}
2245 
2246 	/*
2247 	 * Invalidate entire D$ if it was enabled.
2248 	 * This is done to avoid stale data in the D$ which might
2249 	 * occur with the D$ disabled and the trap handler doing
2250 	 * stores affecting lines already in the D$.
2251 	 */
2252 	if (cache_boot_state & DCU_DC) {
2253 		flush_dcache();
2254 	}
2255 
2256 	/*
2257 	 * Restore caches to their bootup state.
2258 	 */
2259 	set_dcu(get_dcu() | cache_boot_state);
2260 
2261 	/*
2262 	 * Panic here if aflt->flt_panic has been set.  Enqueued errors will
2263 	 * be logged as part of the panic flow.
2264 	 */
2265 	if (aflt->flt_panic)
2266 		fm_panic("%sError(s)", iparity ? "IPE " : "DPE ");
2267 
2268 	/*
2269 	 * If this error occurred at TL>0 then flush the E$ here to reduce
2270 	 * the chance of getting an unrecoverable Fast ECC error.  This
2271 	 * flush will evict the part of the parity trap handler that is run
2272 	 * at TL>1.
2273 	 */
2274 	if (tl) {
2275 		cpu_flush_ecache();
2276 	}
2277 }
2278 
2279 /*
2280  * On an I$ parity error, mark the appropriate entries in the ch_async_flt_t
2281  * to indicate which portions of the captured data should be in the ereport.
2282  */
2283 void
cpu_async_log_ic_parity_err(ch_async_flt_t * ch_flt)2284 cpu_async_log_ic_parity_err(ch_async_flt_t *ch_flt)
2285 {
2286 	int way = ch_flt->parity_data.ipe.cpl_way;
2287 	int offset = ch_flt->parity_data.ipe.cpl_off;
2288 	int tag_index;
2289 	struct async_flt *aflt = (struct async_flt *)ch_flt;
2290 
2291 
2292 	if ((offset != -1) || (way != -1)) {
2293 		/*
2294 		 * Parity error in I$ tag or data
2295 		 */
2296 		tag_index = ch_flt->parity_data.ipe.cpl_ic[way].ic_idx;
2297 		if (IS_PANTHER(cpunodes[aflt->flt_inst].implementation))
2298 			ch_flt->parity_data.ipe.cpl_ic[way].ic_way =
2299 			    PN_ICIDX_TO_WAY(tag_index);
2300 		else
2301 			ch_flt->parity_data.ipe.cpl_ic[way].ic_way =
2302 			    CH_ICIDX_TO_WAY(tag_index);
2303 		ch_flt->parity_data.ipe.cpl_ic[way].ic_logflag =
2304 		    IC_LOGFLAG_MAGIC;
2305 	} else {
2306 		/*
2307 		 * Parity error was not identified.
2308 		 * Log tags and data for all ways.
2309 		 */
2310 		for (way = 0; way < CH_ICACHE_NWAY; way++) {
2311 			tag_index = ch_flt->parity_data.ipe.cpl_ic[way].ic_idx;
2312 			if (IS_PANTHER(cpunodes[aflt->flt_inst].implementation))
2313 				ch_flt->parity_data.ipe.cpl_ic[way].ic_way =
2314 				    PN_ICIDX_TO_WAY(tag_index);
2315 			else
2316 				ch_flt->parity_data.ipe.cpl_ic[way].ic_way =
2317 				    CH_ICIDX_TO_WAY(tag_index);
2318 			ch_flt->parity_data.ipe.cpl_ic[way].ic_logflag =
2319 			    IC_LOGFLAG_MAGIC;
2320 		}
2321 	}
2322 }
2323 
2324 /*
2325  * On an D$ parity error, mark the appropriate entries in the ch_async_flt_t
2326  * to indicate which portions of the captured data should be in the ereport.
2327  */
2328 void
cpu_async_log_dc_parity_err(ch_async_flt_t * ch_flt)2329 cpu_async_log_dc_parity_err(ch_async_flt_t *ch_flt)
2330 {
2331 	int way = ch_flt->parity_data.dpe.cpl_way;
2332 	int offset = ch_flt->parity_data.dpe.cpl_off;
2333 	int tag_index;
2334 
2335 	if (offset != -1) {
2336 		/*
2337 		 * Parity error in D$ or P$ data array.
2338 		 *
2339 		 * First check to see whether the parity error is in D$ or P$
2340 		 * since P$ data parity errors are reported in Panther using
2341 		 * the same trap.
2342 		 */
2343 		if (ch_flt->parity_data.dpe.cpl_cache == CPU_PC_PARITY) {
2344 			tag_index = ch_flt->parity_data.dpe.cpl_pc[way].pc_idx;
2345 			ch_flt->parity_data.dpe.cpl_pc[way].pc_way =
2346 			    CH_PCIDX_TO_WAY(tag_index);
2347 			ch_flt->parity_data.dpe.cpl_pc[way].pc_logflag =
2348 			    PC_LOGFLAG_MAGIC;
2349 		} else {
2350 			tag_index = ch_flt->parity_data.dpe.cpl_dc[way].dc_idx;
2351 			ch_flt->parity_data.dpe.cpl_dc[way].dc_way =
2352 			    CH_DCIDX_TO_WAY(tag_index);
2353 			ch_flt->parity_data.dpe.cpl_dc[way].dc_logflag =
2354 			    DC_LOGFLAG_MAGIC;
2355 		}
2356 	} else if (way != -1) {
2357 		/*
2358 		 * Parity error in D$ tag.
2359 		 */
2360 		tag_index = ch_flt->parity_data.dpe.cpl_dc[way].dc_idx;
2361 		ch_flt->parity_data.dpe.cpl_dc[way].dc_way =
2362 		    CH_DCIDX_TO_WAY(tag_index);
2363 		ch_flt->parity_data.dpe.cpl_dc[way].dc_logflag =
2364 		    DC_LOGFLAG_MAGIC;
2365 	}
2366 }
2367 #endif	/* CPU_IMP_L1_CACHE_PARITY */
2368 
2369 /*
2370  * The cpu_async_log_err() function is called via the [uc]e_drain() function to
2371  * post-process CPU events that are dequeued.  As such, it can be invoked
2372  * from softint context, from AST processing in the trap() flow, or from the
2373  * panic flow.  We decode the CPU-specific data, and take appropriate actions.
2374  * Historically this entry point was used to log the actual cmn_err(9F) text;
2375  * now with FMA it is used to prepare 'flt' to be converted into an ereport.
2376  * With FMA this function now also returns a flag which indicates to the
2377  * caller whether the ereport should be posted (1) or suppressed (0).
2378  */
2379 static int
cpu_async_log_err(void * flt,errorq_elem_t * eqep)2380 cpu_async_log_err(void *flt, errorq_elem_t *eqep)
2381 {
2382 	ch_async_flt_t *ch_flt = (ch_async_flt_t *)flt;
2383 	struct async_flt *aflt = (struct async_flt *)flt;
2384 	uint64_t errors;
2385 	extern void memscrub_induced_error(void);
2386 
2387 	switch (ch_flt->flt_type) {
2388 	case CPU_INV_AFSR:
2389 		/*
2390 		 * If it is a disrupting trap and the AFSR is zero, then
2391 		 * the event has probably already been noted. Do not post
2392 		 * an ereport.
2393 		 */
2394 		if ((aflt->flt_status & ECC_C_TRAP) &&
2395 		    (!(aflt->flt_stat & C_AFSR_MASK)))
2396 			return (0);
2397 		else
2398 			return (1);
2399 	case CPU_TO:
2400 	case CPU_BERR:
2401 	case CPU_FATAL:
2402 	case CPU_FPUERR:
2403 		return (1);
2404 
2405 	case CPU_UE_ECACHE_RETIRE:
2406 		cpu_log_err(aflt);
2407 		cpu_page_retire(ch_flt);
2408 		return (1);
2409 
2410 	/*
2411 	 * Cases where we may want to suppress logging or perform
2412 	 * extended diagnostics.
2413 	 */
2414 	case CPU_CE:
2415 	case CPU_EMC:
2416 		/*
2417 		 * We want to skip logging and further classification
2418 		 * only if ALL the following conditions are true:
2419 		 *
2420 		 *	1. There is only one error
2421 		 *	2. That error is a correctable memory error
2422 		 *	3. The error is caused by the memory scrubber (in
2423 		 *	   which case the error will have occurred under
2424 		 *	   on_trap protection)
2425 		 *	4. The error is on a retired page
2426 		 *
2427 		 * Note: AFLT_PROT_EC is used places other than the memory
2428 		 * scrubber.  However, none of those errors should occur
2429 		 * on a retired page.
2430 		 */
2431 		if ((ch_flt->afsr_errs &
2432 		    (C_AFSR_ALL_ERRS | C_AFSR_EXT_ALL_ERRS)) == C_AFSR_CE &&
2433 		    aflt->flt_prot == AFLT_PROT_EC) {
2434 
2435 			if (page_retire_check(aflt->flt_addr, NULL) == 0) {
2436 				if (ch_flt->flt_trapped_ce & CE_CEEN_DEFER) {
2437 
2438 				/*
2439 				 * Since we're skipping logging, we'll need
2440 				 * to schedule the re-enabling of CEEN
2441 				 */
2442 				(void) timeout(cpu_delayed_check_ce_errors,
2443 				    (void *)(uintptr_t)aflt->flt_inst,
2444 				    drv_usectohz((clock_t)cpu_ceen_delay_secs
2445 				    * MICROSEC));
2446 				}
2447 
2448 				/*
2449 				 * Inform memscrubber - scrubbing induced
2450 				 * CE on a retired page.
2451 				 */
2452 				memscrub_induced_error();
2453 				return (0);
2454 			}
2455 		}
2456 
2457 		/*
2458 		 * Perform/schedule further classification actions, but
2459 		 * only if the page is healthy (we don't want bad
2460 		 * pages inducing too much diagnostic activity).  If we could
2461 		 * not find a page pointer then we also skip this.  If
2462 		 * ce_scrub_xdiag_recirc returns nonzero then it has chosen
2463 		 * to copy and recirculate the event (for further diagnostics)
2464 		 * and we should not proceed to log it here.
2465 		 *
2466 		 * This must be the last step here before the cpu_log_err()
2467 		 * below - if an event recirculates cpu_ce_log_err() will
2468 		 * not call the current function but just proceed directly
2469 		 * to cpu_ereport_post after the cpu_log_err() avoided below.
2470 		 *
2471 		 * Note: Check cpu_impl_async_log_err if changing this
2472 		 */
2473 		if (page_retire_check(aflt->flt_addr, &errors) == EINVAL) {
2474 			CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
2475 			    CE_XDIAG_SKIP_NOPP);
2476 		} else {
2477 			if (errors != PR_OK) {
2478 				CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
2479 				    CE_XDIAG_SKIP_PAGEDET);
2480 			} else if (ce_scrub_xdiag_recirc(aflt, ce_queue, eqep,
2481 			    offsetof(ch_async_flt_t, cmn_asyncflt))) {
2482 				return (0);
2483 			}
2484 		}
2485 		/*FALLTHRU*/
2486 
2487 	/*
2488 	 * Cases where we just want to report the error and continue.
2489 	 */
2490 	case CPU_CE_ECACHE:
2491 	case CPU_UE_ECACHE:
2492 	case CPU_IV:
2493 	case CPU_ORPH:
2494 		cpu_log_err(aflt);
2495 		return (1);
2496 
2497 	/*
2498 	 * Cases where we want to fall through to handle panicking.
2499 	 */
2500 	case CPU_UE:
2501 		/*
2502 		 * We want to skip logging in the same conditions as the
2503 		 * CE case.  In addition, we want to make sure we're not
2504 		 * panicking.
2505 		 */
2506 		if (!panicstr && (ch_flt->afsr_errs &
2507 		    (C_AFSR_ALL_ERRS | C_AFSR_EXT_ALL_ERRS)) == C_AFSR_UE &&
2508 		    aflt->flt_prot == AFLT_PROT_EC) {
2509 			if (page_retire_check(aflt->flt_addr, NULL) == 0) {
2510 				/* Zero the address to clear the error */
2511 				softcall(ecc_page_zero, (void *)aflt->flt_addr);
2512 				/*
2513 				 * Inform memscrubber - scrubbing induced
2514 				 * UE on a retired page.
2515 				 */
2516 				memscrub_induced_error();
2517 				return (0);
2518 			}
2519 		}
2520 		cpu_log_err(aflt);
2521 		break;
2522 
2523 	default:
2524 		/*
2525 		 * If the us3_common.c code doesn't know the flt_type, it may
2526 		 * be an implementation-specific code.  Call into the impldep
2527 		 * backend to find out what to do: if it tells us to continue,
2528 		 * break and handle as if falling through from a UE; if not,
2529 		 * the impldep backend has handled the error and we're done.
2530 		 */
2531 		switch (cpu_impl_async_log_err(flt, eqep)) {
2532 		case CH_ASYNC_LOG_DONE:
2533 			return (1);
2534 		case CH_ASYNC_LOG_RECIRC:
2535 			return (0);
2536 		case CH_ASYNC_LOG_CONTINUE:
2537 			break; /* continue on to handle UE-like error */
2538 		default:
2539 			cmn_err(CE_WARN, "discarding error 0x%p with "
2540 			    "invalid fault type (0x%x)",
2541 			    (void *)aflt, ch_flt->flt_type);
2542 			return (0);
2543 		}
2544 	}
2545 
2546 	/* ... fall through from the UE case */
2547 
2548 	if (aflt->flt_addr != AFLT_INV_ADDR && aflt->flt_in_memory) {
2549 		if (!panicstr) {
2550 			cpu_page_retire(ch_flt);
2551 		} else {
2552 			/*
2553 			 * Clear UEs on panic so that we don't
2554 			 * get haunted by them during panic or
2555 			 * after reboot
2556 			 */
2557 			cpu_clearphys(aflt);
2558 			(void) clear_errors(NULL);
2559 		}
2560 	}
2561 
2562 	return (1);
2563 }
2564 
2565 /*
2566  * Retire the bad page that may contain the flushed error.
2567  */
2568 void
cpu_page_retire(ch_async_flt_t * ch_flt)2569 cpu_page_retire(ch_async_flt_t *ch_flt)
2570 {
2571 	struct async_flt *aflt = (struct async_flt *)ch_flt;
2572 	(void) page_retire(aflt->flt_addr, PR_UE);
2573 }
2574 
2575 /*
2576  * Return true if the error specified in the AFSR indicates
2577  * an E$ data error (L2$ for Cheetah/Cheetah+/Jaguar, L3$
2578  * for Panther, none for Jalapeno/Serrano).
2579  */
2580 /* ARGSUSED */
2581 static int
cpu_error_is_ecache_data(int cpuid,uint64_t t_afsr)2582 cpu_error_is_ecache_data(int cpuid, uint64_t t_afsr)
2583 {
2584 #if defined(JALAPENO) || defined(SERRANO)
2585 	return (0);
2586 #elif defined(CHEETAH_PLUS)
2587 	if (IS_PANTHER(cpunodes[cpuid].implementation))
2588 		return ((t_afsr & C_AFSR_EXT_L3_DATA_ERRS) != 0);
2589 	return ((t_afsr & C_AFSR_EC_DATA_ERRS) != 0);
2590 #else	/* CHEETAH_PLUS */
2591 	return ((t_afsr & C_AFSR_EC_DATA_ERRS) != 0);
2592 #endif
2593 }
2594 
2595 /*
2596  * The cpu_log_err() function is called by cpu_async_log_err() to perform the
2597  * generic event post-processing for correctable and uncorrectable memory,
2598  * E$, and MTag errors.  Historically this entry point was used to log bits of
2599  * common cmn_err(9F) text; now with FMA it is used to prepare 'flt' to be
2600  * converted into an ereport.  In addition, it transmits the error to any
2601  * platform-specific service-processor FRU logging routines, if available.
2602  */
2603 void
cpu_log_err(struct async_flt * aflt)2604 cpu_log_err(struct async_flt *aflt)
2605 {
2606 	char unum[UNUM_NAMLEN];
2607 	int synd_status, synd_code, afar_status;
2608 	ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
2609 
2610 	if (cpu_error_is_ecache_data(aflt->flt_inst, ch_flt->flt_bit))
2611 		aflt->flt_status |= ECC_ECACHE;
2612 	else
2613 		aflt->flt_status &= ~ECC_ECACHE;
2614 	/*
2615 	 * Determine syndrome status.
2616 	 */
2617 	synd_status = afsr_to_synd_status(aflt->flt_inst,
2618 	    ch_flt->afsr_errs, ch_flt->flt_bit);
2619 
2620 	/*
2621 	 * Determine afar status.
2622 	 */
2623 	if (pf_is_memory(aflt->flt_addr >> MMU_PAGESHIFT))
2624 		afar_status = afsr_to_afar_status(ch_flt->afsr_errs,
2625 		    ch_flt->flt_bit);
2626 	else
2627 		afar_status = AFLT_STAT_INVALID;
2628 
2629 	synd_code = synd_to_synd_code(synd_status,
2630 	    aflt->flt_synd, ch_flt->flt_bit);
2631 
2632 	/*
2633 	 * If afar status is not invalid do a unum lookup.
2634 	 */
2635 	if (afar_status != AFLT_STAT_INVALID) {
2636 		(void) cpu_get_mem_unum_synd(synd_code, aflt, unum);
2637 	} else {
2638 		unum[0] = '\0';
2639 	}
2640 
2641 	/*
2642 	 * Do not send the fruid message (plat_ecc_error_data_t)
2643 	 * to the SC if it can handle the enhanced error information
2644 	 * (plat_ecc_error2_data_t) or when the tunable
2645 	 * ecc_log_fruid_enable is set to 0.
2646 	 */
2647 
2648 	if (&plat_ecc_capability_sc_get &&
2649 	    plat_ecc_capability_sc_get(PLAT_ECC_ERROR_MESSAGE)) {
2650 		if (&plat_log_fruid_error)
2651 			plat_log_fruid_error(synd_code, aflt, unum,
2652 			    ch_flt->flt_bit);
2653 	}
2654 
2655 	if (aflt->flt_func != NULL)
2656 		aflt->flt_func(aflt, unum);
2657 
2658 	if (afar_status != AFLT_STAT_INVALID)
2659 		cpu_log_diag_info(ch_flt);
2660 
2661 	/*
2662 	 * If we have a CEEN error , we do not reenable CEEN until after
2663 	 * we exit the trap handler. Otherwise, another error may
2664 	 * occur causing the handler to be entered recursively.
2665 	 * We set a timeout to trigger in cpu_ceen_delay_secs seconds,
2666 	 * to try and ensure that the CPU makes progress in the face
2667 	 * of a CE storm.
2668 	 */
2669 	if (ch_flt->flt_trapped_ce & CE_CEEN_DEFER) {
2670 		(void) timeout(cpu_delayed_check_ce_errors,
2671 		    (void *)(uintptr_t)aflt->flt_inst,
2672 		    drv_usectohz((clock_t)cpu_ceen_delay_secs * MICROSEC));
2673 	}
2674 }
2675 
2676 /*
2677  * Invoked by error_init() early in startup and therefore before
2678  * startup_errorq() is called to drain any error Q -
2679  *
2680  * startup()
2681  *   startup_end()
2682  *     error_init()
2683  *       cpu_error_init()
2684  * errorq_init()
2685  *   errorq_drain()
2686  * start_other_cpus()
2687  *
2688  * The purpose of this routine is to create error-related taskqs.  Taskqs
2689  * are used for this purpose because cpu_lock can't be grabbed from interrupt
2690  * context.
2691  */
2692 void
cpu_error_init(int items)2693 cpu_error_init(int items)
2694 {
2695 	/*
2696 	 * Create taskq(s) to reenable CE
2697 	 */
2698 	ch_check_ce_tq = taskq_create("cheetah_check_ce", 1, minclsyspri,
2699 	    items, items, TASKQ_PREPOPULATE);
2700 }
2701 
2702 void
cpu_ce_log_err(struct async_flt * aflt,errorq_elem_t * eqep)2703 cpu_ce_log_err(struct async_flt *aflt, errorq_elem_t *eqep)
2704 {
2705 	char unum[UNUM_NAMLEN];
2706 	int len;
2707 
2708 	switch (aflt->flt_class) {
2709 	case CPU_FAULT:
2710 		cpu_ereport_init(aflt);
2711 		if (cpu_async_log_err(aflt, eqep))
2712 			cpu_ereport_post(aflt);
2713 		break;
2714 
2715 	case BUS_FAULT:
2716 		if (aflt->flt_func != NULL) {
2717 			(void) cpu_get_mem_unum_aflt(AFLT_STAT_VALID, aflt,
2718 			    unum, UNUM_NAMLEN, &len);
2719 			aflt->flt_func(aflt, unum);
2720 		}
2721 		break;
2722 
2723 	case RECIRC_CPU_FAULT:
2724 		aflt->flt_class = CPU_FAULT;
2725 		cpu_log_err(aflt);
2726 		cpu_ereport_post(aflt);
2727 		break;
2728 
2729 	case RECIRC_BUS_FAULT:
2730 		ASSERT(aflt->flt_class != RECIRC_BUS_FAULT);
2731 		/*FALLTHRU*/
2732 	default:
2733 		cmn_err(CE_WARN, "discarding CE error 0x%p with invalid "
2734 		    "fault class (0x%x)", (void *)aflt, aflt->flt_class);
2735 		return;
2736 	}
2737 }
2738 
2739 /*
2740  * Scrub and classify a CE.  This function must not modify the
2741  * fault structure passed to it but instead should return the classification
2742  * information.
2743  */
2744 
2745 static uchar_t
cpu_ce_scrub_mem_err_common(struct async_flt * ecc,boolean_t logout_tried)2746 cpu_ce_scrub_mem_err_common(struct async_flt *ecc, boolean_t logout_tried)
2747 {
2748 	uchar_t disp = CE_XDIAG_EXTALG;
2749 	on_trap_data_t otd;
2750 	uint64_t orig_err;
2751 	ch_cpu_logout_t *clop;
2752 
2753 	/*
2754 	 * Clear CEEN.  CPU CE TL > 0 trap handling will already have done
2755 	 * this, but our other callers have not.  Disable preemption to
2756 	 * avoid CPU migration so that we restore CEEN on the correct
2757 	 * cpu later.
2758 	 *
2759 	 * CEEN is cleared so that further CEs that our instruction and
2760 	 * data footprint induce do not cause use to either creep down
2761 	 * kernel stack to the point of overflow, or do so much CE
2762 	 * notification as to make little real forward progress.
2763 	 *
2764 	 * NCEEN must not be cleared.  However it is possible that
2765 	 * our accesses to the flt_addr may provoke a bus error or timeout
2766 	 * if the offending address has just been unconfigured as part of
2767 	 * a DR action.  So we must operate under on_trap protection.
2768 	 */
2769 	kpreempt_disable();
2770 	orig_err = get_error_enable();
2771 	if (orig_err & EN_REG_CEEN)
2772 		set_error_enable(orig_err & ~EN_REG_CEEN);
2773 
2774 	/*
2775 	 * Our classification algorithm includes the line state before
2776 	 * the scrub; we'd like this captured after the detection and
2777 	 * before the algorithm below - the earlier the better.
2778 	 *
2779 	 * If we've come from a cpu CE trap then this info already exists
2780 	 * in the cpu logout area.
2781 	 *
2782 	 * For a CE detected by memscrub for which there was no trap
2783 	 * (running with CEEN off) cpu_log_and_clear_ce has called
2784 	 * cpu_ce_delayed_ec_logout to capture some cache data, and
2785 	 * marked the fault structure as incomplete as a flag to later
2786 	 * logging code.
2787 	 *
2788 	 * If called directly from an IO detected CE there has been
2789 	 * no line data capture.  In this case we logout to the cpu logout
2790 	 * area - that's appropriate since it's the cpu cache data we need
2791 	 * for classification.  We thus borrow the cpu logout area for a
2792 	 * short time, and cpu_ce_delayed_ec_logout will mark it as busy in
2793 	 * this time (we will invalidate it again below).
2794 	 *
2795 	 * If called from the partner check xcall handler then this cpu
2796 	 * (the partner) has not necessarily experienced a CE at this
2797 	 * address.  But we want to capture line state before its scrub
2798 	 * attempt since we use that in our classification.
2799 	 */
2800 	if (logout_tried == B_FALSE) {
2801 		if (!cpu_ce_delayed_ec_logout(ecc->flt_addr))
2802 			disp |= CE_XDIAG_NOLOGOUT;
2803 	}
2804 
2805 	/*
2806 	 * Scrub memory, then check AFSR for errors.  The AFAR we scrub may
2807 	 * no longer be valid (if DR'd since the initial event) so we
2808 	 * perform this scrub under on_trap protection.  If this access is
2809 	 * ok then further accesses below will also be ok - DR cannot
2810 	 * proceed while this thread is active (preemption is disabled);
2811 	 * to be safe we'll nonetheless use on_trap again below.
2812 	 */
2813 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
2814 		cpu_scrubphys(ecc);
2815 	} else {
2816 		no_trap();
2817 		if (orig_err & EN_REG_CEEN)
2818 			set_error_enable(orig_err);
2819 		kpreempt_enable();
2820 		return (disp);
2821 	}
2822 	no_trap();
2823 
2824 	/*
2825 	 * Did the casx read of the scrub log a CE that matches the AFAR?
2826 	 * Note that it's quite possible that the read sourced the data from
2827 	 * another cpu.
2828 	 */
2829 	if (clear_ecc(ecc))
2830 		disp |= CE_XDIAG_CE1;
2831 
2832 	/*
2833 	 * Read the data again.  This time the read is very likely to
2834 	 * come from memory since the scrub induced a writeback to memory.
2835 	 */
2836 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
2837 		(void) lddphys(P2ALIGN(ecc->flt_addr, 8));
2838 	} else {
2839 		no_trap();
2840 		if (orig_err & EN_REG_CEEN)
2841 			set_error_enable(orig_err);
2842 		kpreempt_enable();
2843 		return (disp);
2844 	}
2845 	no_trap();
2846 
2847 	/* Did that read induce a CE that matches the AFAR? */
2848 	if (clear_ecc(ecc))
2849 		disp |= CE_XDIAG_CE2;
2850 
2851 	/*
2852 	 * Look at the logout information and record whether we found the
2853 	 * line in l2/l3 cache.  For Panther we are interested in whether
2854 	 * we found it in either cache (it won't reside in both but
2855 	 * it is possible to read it that way given the moving target).
2856 	 */
2857 	clop = CPU_PRIVATE(CPU) ? CPU_PRIVATE_PTR(CPU, chpr_cecc_logout) : NULL;
2858 	if (!(disp & CE_XDIAG_NOLOGOUT) && clop &&
2859 	    clop->clo_data.chd_afar != LOGOUT_INVALID) {
2860 		int hit, level;
2861 		int state;
2862 		int totalsize;
2863 		ch_ec_data_t *ecp;
2864 
2865 		/*
2866 		 * If hit is nonzero then a match was found and hit will
2867 		 * be one greater than the index which hit.  For Panther we
2868 		 * also need to pay attention to level to see which of l2$ or
2869 		 * l3$ it hit in.
2870 		 */
2871 		hit = cpu_matching_ecache_line(ecc->flt_addr, &clop->clo_data,
2872 		    0, &level);
2873 
2874 		if (hit) {
2875 			--hit;
2876 			disp |= CE_XDIAG_AFARMATCH;
2877 
2878 			if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
2879 				if (level == 2)
2880 					ecp = &clop->clo_data.chd_l2_data[hit];
2881 				else
2882 					ecp = &clop->clo_data.chd_ec_data[hit];
2883 			} else {
2884 				ASSERT(level == 2);
2885 				ecp = &clop->clo_data.chd_ec_data[hit];
2886 			}
2887 			totalsize = cpunodes[CPU->cpu_id].ecache_size;
2888 			state = cpu_ectag_pa_to_subblk_state(totalsize,
2889 			    ecc->flt_addr, ecp->ec_tag);
2890 
2891 			/*
2892 			 * Cheetah variants use different state encodings -
2893 			 * the CH_ECSTATE_* defines vary depending on the
2894 			 * module we're compiled for.  Translate into our
2895 			 * one true version.  Conflate Owner-Shared state
2896 			 * of SSM mode with Owner as victimisation of such
2897 			 * lines may cause a writeback.
2898 			 */
2899 			switch (state) {
2900 			case CH_ECSTATE_MOD:
2901 				disp |= EC_STATE_M;
2902 				break;
2903 
2904 			case CH_ECSTATE_OWN:
2905 			case CH_ECSTATE_OWS:
2906 				disp |= EC_STATE_O;
2907 				break;
2908 
2909 			case CH_ECSTATE_EXL:
2910 				disp |= EC_STATE_E;
2911 				break;
2912 
2913 			case CH_ECSTATE_SHR:
2914 				disp |= EC_STATE_S;
2915 				break;
2916 
2917 			default:
2918 				disp |= EC_STATE_I;
2919 				break;
2920 			}
2921 		}
2922 
2923 		/*
2924 		 * If we initiated the delayed logout then we are responsible
2925 		 * for invalidating the logout area.
2926 		 */
2927 		if (logout_tried == B_FALSE) {
2928 			bzero(clop, sizeof (ch_cpu_logout_t));
2929 			clop->clo_data.chd_afar = LOGOUT_INVALID;
2930 		}
2931 	}
2932 
2933 	/*
2934 	 * Re-enable CEEN if we turned it off.
2935 	 */
2936 	if (orig_err & EN_REG_CEEN)
2937 		set_error_enable(orig_err);
2938 	kpreempt_enable();
2939 
2940 	return (disp);
2941 }
2942 
2943 /*
2944  * Scrub a correctable memory error and collect data for classification
2945  * of CE type.  This function is called in the detection path, ie tl0 handling
2946  * of a correctable error trap (cpus) or interrupt (IO) at high PIL.
2947  */
2948 void
cpu_ce_scrub_mem_err(struct async_flt * ecc,boolean_t logout_tried)2949 cpu_ce_scrub_mem_err(struct async_flt *ecc, boolean_t logout_tried)
2950 {
2951 	/*
2952 	 * Cheetah CE classification does not set any bits in flt_status.
2953 	 * Instead we will record classification datapoints in flt_disp.
2954 	 */
2955 	ecc->flt_status &= ~(ECC_INTERMITTENT | ECC_PERSISTENT | ECC_STICKY);
2956 
2957 	/*
2958 	 * To check if the error detected by IO is persistent, sticky or
2959 	 * intermittent.  This is noticed by clear_ecc().
2960 	 */
2961 	if (ecc->flt_status & ECC_IOBUS)
2962 		ecc->flt_stat = C_AFSR_MEMORY;
2963 
2964 	/*
2965 	 * Record information from this first part of the algorithm in
2966 	 * flt_disp.
2967 	 */
2968 	ecc->flt_disp = cpu_ce_scrub_mem_err_common(ecc, logout_tried);
2969 }
2970 
2971 /*
2972  * Select a partner to perform a further CE classification check from.
2973  * Must be called with kernel preemption disabled (to stop the cpu list
2974  * from changing).  The detecting cpu we are partnering has cpuid
2975  * aflt->flt_inst; we might not be running on the detecting cpu.
2976  *
2977  * Restrict choice to active cpus in the same cpu partition as ourselves in
2978  * an effort to stop bad cpus in one partition causing other partitions to
2979  * perform excessive diagnostic activity.  Actually since the errorq drain
2980  * is run from a softint most of the time and that is a global mechanism
2981  * this isolation is only partial.  Return NULL if we fail to find a
2982  * suitable partner.
2983  *
2984  * We prefer a partner that is in a different latency group to ourselves as
2985  * we will share fewer datapaths.  If such a partner is unavailable then
2986  * choose one in the same lgroup but prefer a different chip and only allow
2987  * a sibling core if flags includes PTNR_SIBLINGOK.  If all else fails and
2988  * flags includes PTNR_SELFOK then permit selection of the original detector.
2989  *
2990  * We keep a cache of the last partner selected for a cpu, and we'll try to
2991  * use that previous partner if no more than cpu_ce_ptnr_cachetime_sec seconds
2992  * have passed since that selection was made.  This provides the benefit
2993  * of the point-of-view of different partners over time but without
2994  * requiring frequent cpu list traversals.
2995  */
2996 
2997 #define	PTNR_SIBLINGOK	0x1	/* Allow selection of sibling core */
2998 #define	PTNR_SELFOK	0x2	/* Allow selection of cpu to "partner" itself */
2999 
3000 static cpu_t *
ce_ptnr_select(struct async_flt * aflt,int flags,int * typep)3001 ce_ptnr_select(struct async_flt *aflt, int flags, int *typep)
3002 {
3003 	cpu_t *sp, *dtcr, *ptnr, *locptnr, *sibptnr;
3004 	hrtime_t lasttime, thistime;
3005 
3006 	ASSERT(curthread->t_preempt > 0 || getpil() >= DISP_LEVEL);
3007 
3008 	dtcr = cpu[aflt->flt_inst];
3009 
3010 	/*
3011 	 * Short-circuit for the following cases:
3012 	 *	. the dtcr is not flagged active
3013 	 *	. there is just one cpu present
3014 	 *	. the detector has disappeared
3015 	 *	. we were given a bad flt_inst cpuid; this should not happen
3016 	 *	  (eg PCI code now fills flt_inst) but if it does it is no
3017 	 *	  reason to panic.
3018 	 *	. there is just one cpu left online in the cpu partition
3019 	 *
3020 	 * If we return NULL after this point then we do not update the
3021 	 * chpr_ceptnr_seltime which will cause us to perform a full lookup
3022 	 * again next time; this is the case where the only other cpu online
3023 	 * in the detector's partition is on the same chip as the detector
3024 	 * and since CEEN re-enable is throttled even that case should not
3025 	 * hurt performance.
3026 	 */
3027 	if (dtcr == NULL || !cpu_flagged_active(dtcr->cpu_flags)) {
3028 		return (NULL);
3029 	}
3030 	if (ncpus == 1 || dtcr->cpu_part->cp_ncpus == 1) {
3031 		if (flags & PTNR_SELFOK) {
3032 			*typep = CE_XDIAG_PTNR_SELF;
3033 			return (dtcr);
3034 		} else {
3035 			return (NULL);
3036 		}
3037 	}
3038 
3039 	thistime = gethrtime();
3040 	lasttime = CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime);
3041 
3042 	/*
3043 	 * Select a starting point.
3044 	 */
3045 	if (!lasttime) {
3046 		/*
3047 		 * We've never selected a partner for this detector before.
3048 		 * Start the scan at the next online cpu in the same cpu
3049 		 * partition.
3050 		 */
3051 		sp = dtcr->cpu_next_part;
3052 	} else if (thistime - lasttime < cpu_ce_ptnr_cachetime_sec * NANOSEC) {
3053 		/*
3054 		 * Our last selection has not aged yet.  If this partner:
3055 		 *	. is still a valid cpu,
3056 		 *	. is still in the same partition as the detector
3057 		 *	. is still marked active
3058 		 *	. satisfies the 'flags' argument criteria
3059 		 * then select it again without updating the timestamp.
3060 		 */
3061 		sp = cpu[CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id)];
3062 		if (sp == NULL || sp->cpu_part != dtcr->cpu_part ||
3063 		    !cpu_flagged_active(sp->cpu_flags) ||
3064 		    (sp == dtcr && !(flags & PTNR_SELFOK)) ||
3065 		    (pg_plat_cpus_share(sp, dtcr, PGHW_CHIP) &&
3066 		    !(flags & PTNR_SIBLINGOK))) {
3067 			sp = dtcr->cpu_next_part;
3068 		} else {
3069 			if (sp->cpu_lpl->lpl_lgrp != dtcr->cpu_lpl->lpl_lgrp) {
3070 				*typep = CE_XDIAG_PTNR_REMOTE;
3071 			} else if (sp == dtcr) {
3072 				*typep = CE_XDIAG_PTNR_SELF;
3073 			} else if (pg_plat_cpus_share(sp, dtcr, PGHW_CHIP)) {
3074 				*typep = CE_XDIAG_PTNR_SIBLING;
3075 			} else {
3076 				*typep = CE_XDIAG_PTNR_LOCAL;
3077 			}
3078 			return (sp);
3079 		}
3080 	} else {
3081 		/*
3082 		 * Our last selection has aged.  If it is nonetheless still a
3083 		 * valid cpu then start the scan at the next cpu in the
3084 		 * partition after our last partner.  If the last selection
3085 		 * is no longer a valid cpu then go with our default.  In
3086 		 * this way we slowly cycle through possible partners to
3087 		 * obtain multiple viewpoints over time.
3088 		 */
3089 		sp = cpu[CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id)];
3090 		if (sp == NULL) {
3091 			sp = dtcr->cpu_next_part;
3092 		} else {
3093 			sp = sp->cpu_next_part;		/* may be dtcr */
3094 			if (sp->cpu_part != dtcr->cpu_part)
3095 				sp = dtcr;
3096 		}
3097 	}
3098 
3099 	/*
3100 	 * We have a proposed starting point for our search, but if this
3101 	 * cpu is offline then its cpu_next_part will point to itself
3102 	 * so we can't use that to iterate over cpus in this partition in
3103 	 * the loop below.  We still want to avoid iterating over cpus not
3104 	 * in our partition, so in the case that our starting point is offline
3105 	 * we will repoint it to be the detector itself;  and if the detector
3106 	 * happens to be offline we'll return NULL from the following loop.
3107 	 */
3108 	if (!cpu_flagged_active(sp->cpu_flags)) {
3109 		sp = dtcr;
3110 	}
3111 
3112 	ptnr = sp;
3113 	locptnr = NULL;
3114 	sibptnr = NULL;
3115 	do {
3116 		if (ptnr == dtcr || !cpu_flagged_active(ptnr->cpu_flags))
3117 			continue;
3118 		if (ptnr->cpu_lpl->lpl_lgrp != dtcr->cpu_lpl->lpl_lgrp) {
3119 			CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id) = ptnr->cpu_id;
3120 			CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime) = thistime;
3121 			*typep = CE_XDIAG_PTNR_REMOTE;
3122 			return (ptnr);
3123 		}
3124 		if (pg_plat_cpus_share(ptnr, dtcr, PGHW_CHIP)) {
3125 			if (sibptnr == NULL)
3126 				sibptnr = ptnr;
3127 			continue;
3128 		}
3129 		if (locptnr == NULL)
3130 			locptnr = ptnr;
3131 	} while ((ptnr = ptnr->cpu_next_part) != sp);
3132 
3133 	/*
3134 	 * A foreign partner has already been returned if one was available.
3135 	 *
3136 	 * If locptnr is not NULL it is a cpu in the same lgroup as the
3137 	 * detector, is active, and is not a sibling of the detector.
3138 	 *
3139 	 * If sibptnr is not NULL it is a sibling of the detector, and is
3140 	 * active.
3141 	 *
3142 	 * If we have to resort to using the detector itself we have already
3143 	 * checked that it is active.
3144 	 */
3145 	if (locptnr) {
3146 		CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id) = locptnr->cpu_id;
3147 		CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime) = thistime;
3148 		*typep = CE_XDIAG_PTNR_LOCAL;
3149 		return (locptnr);
3150 	} else if (sibptnr && flags & PTNR_SIBLINGOK) {
3151 		CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id) = sibptnr->cpu_id;
3152 		CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime) = thistime;
3153 		*typep = CE_XDIAG_PTNR_SIBLING;
3154 		return (sibptnr);
3155 	} else if (flags & PTNR_SELFOK) {
3156 		CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id) = dtcr->cpu_id;
3157 		CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime) = thistime;
3158 		*typep = CE_XDIAG_PTNR_SELF;
3159 		return (dtcr);
3160 	}
3161 
3162 	return (NULL);
3163 }
3164 
3165 /*
3166  * Cross call handler that is requested to run on the designated partner of
3167  * a cpu that experienced a possibly sticky or possibly persistnet CE.
3168  */
3169 static void
ce_ptnrchk_xc(struct async_flt * aflt,uchar_t * dispp)3170 ce_ptnrchk_xc(struct async_flt *aflt, uchar_t *dispp)
3171 {
3172 	*dispp = cpu_ce_scrub_mem_err_common(aflt, B_FALSE);
3173 }
3174 
3175 /*
3176  * The associated errorqs are never destroyed so we do not need to deal with
3177  * them disappearing before this timeout fires.  If the affected memory
3178  * has been DR'd out since the original event the scrub algrithm will catch
3179  * any errors and return null disposition info.  If the original detecting
3180  * cpu has been DR'd out then ereport detector info will not be able to
3181  * lookup CPU type;  with a small timeout this is unlikely.
3182  */
3183 static void
ce_lkychk_cb(ce_lkychk_cb_t * cbarg)3184 ce_lkychk_cb(ce_lkychk_cb_t *cbarg)
3185 {
3186 	struct async_flt *aflt = cbarg->lkycb_aflt;
3187 	uchar_t disp;
3188 	cpu_t *cp;
3189 	int ptnrtype;
3190 
3191 	kpreempt_disable();
3192 	if (cp = ce_ptnr_select(aflt, PTNR_SIBLINGOK | PTNR_SELFOK,
3193 	    &ptnrtype)) {
3194 		xc_one(cp->cpu_id, (xcfunc_t *)ce_ptnrchk_xc, (uint64_t)aflt,
3195 		    (uint64_t)&disp);
3196 		CE_XDIAG_SETLKYINFO(aflt->flt_disp, disp);
3197 		CE_XDIAG_SETPTNRID(aflt->flt_disp, cp->cpu_id);
3198 		CE_XDIAG_SETPTNRTYPE(aflt->flt_disp, ptnrtype);
3199 	} else {
3200 		ce_xdiag_lkydrops++;
3201 		if (ncpus > 1)
3202 			CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
3203 			    CE_XDIAG_SKIP_NOPTNR);
3204 	}
3205 	kpreempt_enable();
3206 
3207 	errorq_commit(cbarg->lkycb_eqp, cbarg->lkycb_eqep, ERRORQ_ASYNC);
3208 	kmem_free(cbarg, sizeof (ce_lkychk_cb_t));
3209 }
3210 
3211 /*
3212  * Called from errorq drain code when processing a CE error, both from
3213  * CPU and PCI drain functions.  Decide what further classification actions,
3214  * if any, we will perform.  Perform immediate actions now, and schedule
3215  * delayed actions as required.  Note that we are no longer necessarily running
3216  * on the detecting cpu, and that the async_flt structure will not persist on
3217  * return from this function.
3218  *
3219  * Calls to this function should aim to be self-throtlling in some way.  With
3220  * the delayed re-enable of CEEN the absolute rate of calls should not
3221  * be excessive.  Callers should also avoid performing in-depth classification
3222  * for events in pages that are already known to be suspect.
3223  *
3224  * We return nonzero to indicate that the event has been copied and
3225  * recirculated for further testing.  The caller should not log the event
3226  * in this case - it will be logged when further test results are available.
3227  *
3228  * Our possible contexts are that of errorq_drain: below lock level or from
3229  * panic context.  We can assume that the cpu we are running on is online.
3230  */
3231 
3232 
3233 #ifdef DEBUG
3234 static int ce_xdiag_forceaction;
3235 #endif
3236 
3237 int
ce_scrub_xdiag_recirc(struct async_flt * aflt,errorq_t * eqp,errorq_elem_t * eqep,size_t afltoffset)3238 ce_scrub_xdiag_recirc(struct async_flt *aflt, errorq_t *eqp,
3239     errorq_elem_t *eqep, size_t afltoffset)
3240 {
3241 	ce_dispact_t dispact, action;
3242 	cpu_t *cp;
3243 	uchar_t dtcrinfo, disp;
3244 	int ptnrtype;
3245 
3246 	if (!ce_disp_inited || panicstr || ce_xdiag_off) {
3247 		ce_xdiag_drops++;
3248 		return (0);
3249 	} else if (!aflt->flt_in_memory) {
3250 		ce_xdiag_drops++;
3251 		CE_XDIAG_SETSKIPCODE(aflt->flt_disp, CE_XDIAG_SKIP_NOTMEM);
3252 		return (0);
3253 	}
3254 
3255 	dtcrinfo = CE_XDIAG_DTCRINFO(aflt->flt_disp);
3256 
3257 	/*
3258 	 * Some correctable events are not scrubbed/classified, such as those
3259 	 * noticed at the tail of cpu_deferred_error.  So if there is no
3260 	 * initial detector classification go no further.
3261 	 */
3262 	if (!CE_XDIAG_EXT_ALG_APPLIED(dtcrinfo)) {
3263 		ce_xdiag_drops++;
3264 		CE_XDIAG_SETSKIPCODE(aflt->flt_disp, CE_XDIAG_SKIP_NOSCRUB);
3265 		return (0);
3266 	}
3267 
3268 	dispact = CE_DISPACT(ce_disp_table,
3269 	    CE_XDIAG_AFARMATCHED(dtcrinfo),
3270 	    CE_XDIAG_STATE(dtcrinfo),
3271 	    CE_XDIAG_CE1SEEN(dtcrinfo),
3272 	    CE_XDIAG_CE2SEEN(dtcrinfo));
3273 
3274 
3275 	action = CE_ACT(dispact);	/* bad lookup caught below */
3276 #ifdef DEBUG
3277 	if (ce_xdiag_forceaction != 0)
3278 		action = ce_xdiag_forceaction;
3279 #endif
3280 
3281 	switch (action) {
3282 	case CE_ACT_LKYCHK: {
3283 		caddr_t ndata;
3284 		errorq_elem_t *neqep;
3285 		struct async_flt *ecc;
3286 		ce_lkychk_cb_t *cbargp;
3287 
3288 		if ((ndata = errorq_elem_dup(eqp, eqep, &neqep)) == NULL) {
3289 			ce_xdiag_lkydrops++;
3290 			CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
3291 			    CE_XDIAG_SKIP_DUPFAIL);
3292 			break;
3293 		}
3294 		ecc = (struct async_flt *)(ndata + afltoffset);
3295 
3296 		ASSERT(ecc->flt_class == CPU_FAULT ||
3297 		    ecc->flt_class == BUS_FAULT);
3298 		ecc->flt_class = (ecc->flt_class == CPU_FAULT) ?
3299 		    RECIRC_CPU_FAULT : RECIRC_BUS_FAULT;
3300 
3301 		cbargp = kmem_alloc(sizeof (ce_lkychk_cb_t), KM_SLEEP);
3302 		cbargp->lkycb_aflt = ecc;
3303 		cbargp->lkycb_eqp = eqp;
3304 		cbargp->lkycb_eqep = neqep;
3305 
3306 		(void) timeout((void (*)(void *))ce_lkychk_cb,
3307 		    (void *)cbargp, drv_usectohz(cpu_ce_lkychk_timeout_usec));
3308 		return (1);
3309 	}
3310 
3311 	case CE_ACT_PTNRCHK:
3312 		kpreempt_disable();	/* stop cpu list changing */
3313 		if ((cp = ce_ptnr_select(aflt, 0, &ptnrtype)) != NULL) {
3314 			xc_one(cp->cpu_id, (xcfunc_t *)ce_ptnrchk_xc,
3315 			    (uint64_t)aflt, (uint64_t)&disp);
3316 			CE_XDIAG_SETPTNRINFO(aflt->flt_disp, disp);
3317 			CE_XDIAG_SETPTNRID(aflt->flt_disp, cp->cpu_id);
3318 			CE_XDIAG_SETPTNRTYPE(aflt->flt_disp, ptnrtype);
3319 		} else if (ncpus > 1) {
3320 			ce_xdiag_ptnrdrops++;
3321 			CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
3322 			    CE_XDIAG_SKIP_NOPTNR);
3323 		} else {
3324 			ce_xdiag_ptnrdrops++;
3325 			CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
3326 			    CE_XDIAG_SKIP_UNIPROC);
3327 		}
3328 		kpreempt_enable();
3329 		break;
3330 
3331 	case CE_ACT_DONE:
3332 		break;
3333 
3334 	case CE_DISP_BAD:
3335 	default:
3336 #ifdef DEBUG
3337 		cmn_err(CE_PANIC, "ce_scrub_post: Bad action '%d'", action);
3338 #endif
3339 		ce_xdiag_bad++;
3340 		CE_XDIAG_SETSKIPCODE(aflt->flt_disp, CE_XDIAG_SKIP_ACTBAD);
3341 		break;
3342 	}
3343 
3344 	return (0);
3345 }
3346 
3347 /*
3348  * We route all errors through a single switch statement.
3349  */
3350 void
cpu_ue_log_err(struct async_flt * aflt)3351 cpu_ue_log_err(struct async_flt *aflt)
3352 {
3353 	switch (aflt->flt_class) {
3354 	case CPU_FAULT:
3355 		cpu_ereport_init(aflt);
3356 		if (cpu_async_log_err(aflt, NULL))
3357 			cpu_ereport_post(aflt);
3358 		break;
3359 
3360 	case BUS_FAULT:
3361 		bus_async_log_err(aflt);
3362 		break;
3363 
3364 	default:
3365 		cmn_err(CE_WARN, "discarding async error %p with invalid "
3366 		    "fault class (0x%x)", (void *)aflt, aflt->flt_class);
3367 		return;
3368 	}
3369 }
3370 
3371 /*
3372  * Routine for panic hook callback from panic_idle().
3373  */
3374 void
cpu_async_panic_callb(void)3375 cpu_async_panic_callb(void)
3376 {
3377 	ch_async_flt_t ch_flt;
3378 	struct async_flt *aflt;
3379 	ch_cpu_errors_t cpu_error_regs;
3380 	uint64_t afsr_errs;
3381 
3382 	get_cpu_error_state(&cpu_error_regs);
3383 
3384 	afsr_errs = (cpu_error_regs.afsr & C_AFSR_ALL_ERRS) |
3385 	    (cpu_error_regs.afsr_ext & C_AFSR_EXT_ALL_ERRS);
3386 
3387 	if (afsr_errs) {
3388 
3389 		bzero(&ch_flt, sizeof (ch_async_flt_t));
3390 		aflt = (struct async_flt *)&ch_flt;
3391 		aflt->flt_id = gethrtime_waitfree();
3392 		aflt->flt_bus_id = getprocessorid();
3393 		aflt->flt_inst = CPU->cpu_id;
3394 		aflt->flt_stat = cpu_error_regs.afsr;
3395 		aflt->flt_addr = cpu_error_regs.afar;
3396 		aflt->flt_prot = AFLT_PROT_NONE;
3397 		aflt->flt_class = CPU_FAULT;
3398 		aflt->flt_priv = ((cpu_error_regs.afsr & C_AFSR_PRIV) != 0);
3399 		aflt->flt_panic = 1;
3400 		ch_flt.afsr_ext = cpu_error_regs.afsr_ext;
3401 		ch_flt.afsr_errs = afsr_errs;
3402 #if defined(SERRANO)
3403 		ch_flt.afar2 = cpu_error_regs.afar2;
3404 #endif	/* SERRANO */
3405 		(void) cpu_queue_events(&ch_flt, NULL, afsr_errs, NULL);
3406 	}
3407 }
3408 
3409 /*
3410  * Routine to convert a syndrome into a syndrome code.
3411  */
3412 static int
synd_to_synd_code(int synd_status,ushort_t synd,uint64_t afsr_bit)3413 synd_to_synd_code(int synd_status, ushort_t synd, uint64_t afsr_bit)
3414 {
3415 	if (synd_status == AFLT_STAT_INVALID)
3416 		return (-1);
3417 
3418 	/*
3419 	 * Use the syndrome to index the appropriate syndrome table,
3420 	 * to get the code indicating which bit(s) is(are) bad.
3421 	 */
3422 	if (afsr_bit &
3423 	    (C_AFSR_MSYND_ERRS | C_AFSR_ESYND_ERRS | C_AFSR_EXT_ESYND_ERRS)) {
3424 		if (afsr_bit & C_AFSR_MSYND_ERRS) {
3425 #if defined(JALAPENO) || defined(SERRANO)
3426 			if ((synd == 0) || (synd >= BSYND_TBL_SIZE))
3427 				return (-1);
3428 			else
3429 				return (BPAR0 + synd);
3430 #else /* JALAPENO || SERRANO */
3431 			if ((synd == 0) || (synd >= MSYND_TBL_SIZE))
3432 				return (-1);
3433 			else
3434 				return (mtag_syndrome_tab[synd]);
3435 #endif /* JALAPENO || SERRANO */
3436 		} else {
3437 			if ((synd == 0) || (synd >= ESYND_TBL_SIZE))
3438 				return (-1);
3439 			else
3440 				return (ecc_syndrome_tab[synd]);
3441 		}
3442 	} else {
3443 		return (-1);
3444 	}
3445 }
3446 
3447 int
cpu_get_mem_sid(char * unum,char * buf,int buflen,int * lenp)3448 cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
3449 {
3450 	if (&plat_get_mem_sid)
3451 		return (plat_get_mem_sid(unum, buf, buflen, lenp));
3452 	else
3453 		return (ENOTSUP);
3454 }
3455 
3456 int
cpu_get_mem_offset(uint64_t flt_addr,uint64_t * offp)3457 cpu_get_mem_offset(uint64_t flt_addr, uint64_t *offp)
3458 {
3459 	if (&plat_get_mem_offset)
3460 		return (plat_get_mem_offset(flt_addr, offp));
3461 	else
3462 		return (ENOTSUP);
3463 }
3464 
3465 int
cpu_get_mem_addr(char * unum,char * sid,uint64_t offset,uint64_t * addrp)3466 cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp)
3467 {
3468 	if (&plat_get_mem_addr)
3469 		return (plat_get_mem_addr(unum, sid, offset, addrp));
3470 	else
3471 		return (ENOTSUP);
3472 }
3473 
3474 /*
3475  * Routine to return a string identifying the physical name
3476  * associated with a memory/cache error.
3477  */
3478 int
cpu_get_mem_unum(int synd_status,ushort_t flt_synd,uint64_t flt_stat,uint64_t flt_addr,int flt_bus_id,int flt_in_memory,ushort_t flt_status,char * buf,int buflen,int * lenp)3479 cpu_get_mem_unum(int synd_status, ushort_t flt_synd, uint64_t flt_stat,
3480     uint64_t flt_addr, int flt_bus_id, int flt_in_memory,
3481     ushort_t flt_status, char *buf, int buflen, int *lenp)
3482 {
3483 	int synd_code;
3484 	int ret;
3485 
3486 	/*
3487 	 * An AFSR of -1 defaults to a memory syndrome.
3488 	 */
3489 	if (flt_stat == (uint64_t)-1)
3490 		flt_stat = C_AFSR_CE;
3491 
3492 	synd_code = synd_to_synd_code(synd_status, flt_synd, flt_stat);
3493 
3494 	/*
3495 	 * Syndrome code must be either a single-bit error code
3496 	 * (0...143) or -1 for unum lookup.
3497 	 */
3498 	if (synd_code < 0 || synd_code >= M2)
3499 		synd_code = -1;
3500 	if (&plat_get_mem_unum) {
3501 		if ((ret = plat_get_mem_unum(synd_code, flt_addr, flt_bus_id,
3502 		    flt_in_memory, flt_status, buf, buflen, lenp)) != 0) {
3503 			buf[0] = '\0';
3504 			*lenp = 0;
3505 		}
3506 
3507 		return (ret);
3508 	}
3509 
3510 	return (ENOTSUP);
3511 }
3512 
3513 /*
3514  * Wrapper for cpu_get_mem_unum() routine that takes an
3515  * async_flt struct rather than explicit arguments.
3516  */
3517 int
cpu_get_mem_unum_aflt(int synd_status,struct async_flt * aflt,char * buf,int buflen,int * lenp)3518 cpu_get_mem_unum_aflt(int synd_status, struct async_flt *aflt,
3519     char *buf, int buflen, int *lenp)
3520 {
3521 	/*
3522 	 * If we come thru here for an IO bus error aflt->flt_stat will
3523 	 * not be the CPU AFSR, and we pass in a -1 to cpu_get_mem_unum()
3524 	 * so it will interpret this as a memory error.
3525 	 */
3526 	return (cpu_get_mem_unum(synd_status, aflt->flt_synd,
3527 	    (aflt->flt_class == BUS_FAULT) ?
3528 	    (uint64_t)-1 : ((ch_async_flt_t *)aflt)->flt_bit,
3529 	    aflt->flt_addr, aflt->flt_bus_id, aflt->flt_in_memory,
3530 	    aflt->flt_status, buf, buflen, lenp));
3531 }
3532 
3533 /*
3534  * Return unum string given synd_code and async_flt into
3535  * the buf with size UNUM_NAMLEN
3536  */
3537 static int
cpu_get_mem_unum_synd(int synd_code,struct async_flt * aflt,char * buf)3538 cpu_get_mem_unum_synd(int synd_code, struct async_flt *aflt, char *buf)
3539 {
3540 	int ret, len;
3541 
3542 	/*
3543 	 * Syndrome code must be either a single-bit error code
3544 	 * (0...143) or -1 for unum lookup.
3545 	 */
3546 	if (synd_code < 0 || synd_code >= M2)
3547 		synd_code = -1;
3548 	if (&plat_get_mem_unum) {
3549 		if ((ret = plat_get_mem_unum(synd_code, aflt->flt_addr,
3550 		    aflt->flt_bus_id, aflt->flt_in_memory,
3551 		    aflt->flt_status, buf, UNUM_NAMLEN, &len)) != 0) {
3552 			buf[0] = '\0';
3553 		}
3554 		return (ret);
3555 	}
3556 
3557 	buf[0] = '\0';
3558 	return (ENOTSUP);
3559 }
3560 
3561 /*
3562  * This routine is a more generic interface to cpu_get_mem_unum()
3563  * that may be used by other modules (e.g. the 'mm' driver, through
3564  * the 'MEM_NAME' ioctl, which is used by fmd to resolve unum's
3565  * for Jalapeno/Serrano FRC/RCE or FRU/RUE paired events).
3566  */
3567 int
cpu_get_mem_name(uint64_t synd,uint64_t * afsr,uint64_t afar,char * buf,int buflen,int * lenp)3568 cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar,
3569     char *buf, int buflen, int *lenp)
3570 {
3571 	int synd_status, flt_in_memory, ret;
3572 	ushort_t flt_status = 0;
3573 	char unum[UNUM_NAMLEN];
3574 	uint64_t t_afsr_errs;
3575 
3576 	/*
3577 	 * Check for an invalid address.
3578 	 */
3579 	if (afar == (uint64_t)-1)
3580 		return (ENXIO);
3581 
3582 	if (synd == (uint64_t)-1)
3583 		synd_status = AFLT_STAT_INVALID;
3584 	else
3585 		synd_status = AFLT_STAT_VALID;
3586 
3587 	flt_in_memory = (*afsr & C_AFSR_MEMORY) &&
3588 	    pf_is_memory(afar >> MMU_PAGESHIFT);
3589 
3590 	/*
3591 	 * Get aggregate AFSR for call to cpu_error_is_ecache_data.
3592 	 */
3593 	if (*afsr == (uint64_t)-1)
3594 		t_afsr_errs = C_AFSR_CE;
3595 	else {
3596 		t_afsr_errs = (*afsr & C_AFSR_ALL_ERRS);
3597 #if defined(CHEETAH_PLUS)
3598 		if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
3599 			t_afsr_errs |= (*(afsr + 1) & C_AFSR_EXT_ALL_ERRS);
3600 #endif	/* CHEETAH_PLUS */
3601 	}
3602 
3603 	/*
3604 	 * Turn on ECC_ECACHE if error type is E$ Data.
3605 	 */
3606 	if (cpu_error_is_ecache_data(CPU->cpu_id, t_afsr_errs))
3607 		flt_status |= ECC_ECACHE;
3608 
3609 	ret = cpu_get_mem_unum(synd_status, (ushort_t)synd, t_afsr_errs, afar,
3610 	    CPU->cpu_id, flt_in_memory, flt_status, unum, UNUM_NAMLEN, lenp);
3611 	if (ret != 0)
3612 		return (ret);
3613 
3614 	if (*lenp >= buflen)
3615 		return (ENAMETOOLONG);
3616 
3617 	(void) strncpy(buf, unum, buflen);
3618 
3619 	return (0);
3620 }
3621 
3622 /*
3623  * Routine to return memory information associated
3624  * with a physical address and syndrome.
3625  */
3626 int
cpu_get_mem_info(uint64_t synd,uint64_t afar,uint64_t * mem_sizep,uint64_t * seg_sizep,uint64_t * bank_sizep,int * segsp,int * banksp,int * mcidp)3627 cpu_get_mem_info(uint64_t synd, uint64_t afar,
3628     uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
3629     int *segsp, int *banksp, int *mcidp)
3630 {
3631 	int synd_status, synd_code;
3632 
3633 	if (afar == (uint64_t)-1)
3634 		return (ENXIO);
3635 
3636 	if (synd == (uint64_t)-1)
3637 		synd_status = AFLT_STAT_INVALID;
3638 	else
3639 		synd_status = AFLT_STAT_VALID;
3640 
3641 	synd_code = synd_to_synd_code(synd_status, synd, C_AFSR_CE);
3642 
3643 	if (p2get_mem_info != NULL)
3644 		return ((p2get_mem_info)(synd_code, afar,
3645 		    mem_sizep, seg_sizep, bank_sizep,
3646 		    segsp, banksp, mcidp));
3647 	else
3648 		return (ENOTSUP);
3649 }
3650 
3651 /*
3652  * Routine to return a string identifying the physical
3653  * name associated with a cpuid.
3654  */
3655 int
cpu_get_cpu_unum(int cpuid,char * buf,int buflen,int * lenp)3656 cpu_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
3657 {
3658 	int ret;
3659 	char unum[UNUM_NAMLEN];
3660 
3661 	if (&plat_get_cpu_unum) {
3662 		if ((ret = plat_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, lenp))
3663 		    != 0)
3664 			return (ret);
3665 	} else {
3666 		return (ENOTSUP);
3667 	}
3668 
3669 	if (*lenp >= buflen)
3670 		return (ENAMETOOLONG);
3671 
3672 	(void) strncpy(buf, unum, buflen);
3673 
3674 	return (0);
3675 }
3676 
3677 /*
3678  * This routine exports the name buffer size.
3679  */
3680 size_t
cpu_get_name_bufsize()3681 cpu_get_name_bufsize()
3682 {
3683 	return (UNUM_NAMLEN);
3684 }
3685 
3686 /*
3687  * Historical function, apparantly not used.
3688  */
3689 /* ARGSUSED */
3690 void
cpu_read_paddr(struct async_flt * ecc,short verbose,short ce_err)3691 cpu_read_paddr(struct async_flt *ecc, short verbose, short