xref: /illumos-gate/usr/src/uts/sun4u/cpu/us3_common.c (revision 56870e8c)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51e2e7a75Shuah  * Common Development and Distribution License (the "License").
61e2e7a75Shuah  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22f60f9424SChristopher Baumbauer - Sun Microsystems - San Diego United States  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #include <sys/types.h>
277c478bd9Sstevel@tonic-gate #include <sys/systm.h>
287c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
297c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
307c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
317c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>
327c478bd9Sstevel@tonic-gate #include <sys/machparam.h>
337c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
347c478bd9Sstevel@tonic-gate #include <sys/machthread.h>
357c478bd9Sstevel@tonic-gate #include <sys/cpu.h>
367c478bd9Sstevel@tonic-gate #include <sys/cmp.h>
377c478bd9Sstevel@tonic-gate #include <sys/elf_SPARC.h>
387c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h>
397c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
407c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h>
417c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
427c478bd9Sstevel@tonic-gate #include <sys/cheetahregs.h>
437c478bd9Sstevel@tonic-gate #include <sys/us3_module.h>
447c478bd9Sstevel@tonic-gate #include <sys/async.h>
457c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
467c478bd9Sstevel@tonic-gate #include <sys/debug.h>
477c478bd9Sstevel@tonic-gate #include <sys/dditypes.h>
487c478bd9Sstevel@tonic-gate #include <sys/prom_debug.h>
497c478bd9Sstevel@tonic-gate #include <sys/prom_plat.h>
507c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h>
517c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
527c478bd9Sstevel@tonic-gate #include <sys/intreg.h>
537c478bd9Sstevel@tonic-gate #include <sys/clock.h>
547c478bd9Sstevel@tonic-gate #include <sys/platform_module.h>
557c478bd9Sstevel@tonic-gate #include <sys/machtrap.h>
567c478bd9Sstevel@tonic-gate #include <sys/ontrap.h>
577c478bd9Sstevel@tonic-gate #include <sys/panic.h>
587c478bd9Sstevel@tonic-gate #include <sys/memlist.h>
597c478bd9Sstevel@tonic-gate #include <sys/bootconf.h>
607c478bd9Sstevel@tonic-gate #include <sys/ivintr.h>
617c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
627c478bd9Sstevel@tonic-gate #include <sys/taskq.h>
637c478bd9Sstevel@tonic-gate #include <sys/note.h>
647c478bd9Sstevel@tonic-gate #include <sys/ndifm.h>
657c478bd9Sstevel@tonic-gate #include <sys/ddifm.h>
667c478bd9Sstevel@tonic-gate #include <sys/fm/protocol.h>
677c478bd9Sstevel@tonic-gate #include <sys/fm/util.h>
687c478bd9Sstevel@tonic-gate #include <sys/fm/cpu/UltraSPARC-III.h>
697c478bd9Sstevel@tonic-gate #include <sys/fpras_impl.h>
707c478bd9Sstevel@tonic-gate #include <sys/dtrace.h>
717c478bd9Sstevel@tonic-gate #include <sys/watchpoint.h>
727c478bd9Sstevel@tonic-gate #include <sys/plat_ecc_unum.h>
737c478bd9Sstevel@tonic-gate #include <sys/cyclic.h>
747c478bd9Sstevel@tonic-gate #include <sys/errorq.h>
757c478bd9Sstevel@tonic-gate #include <sys/errclassify.h>
76fb2f18f8Sesaxe #include <sys/pghw.h>
77d3d50737SRafael Vanoni #include <sys/clock_impl.h>
787c478bd9Sstevel@tonic-gate 
797c478bd9Sstevel@tonic-gate #ifdef	CHEETAHPLUS_ERRATUM_25
807c478bd9Sstevel@tonic-gate #include <sys/xc_impl.h>
817c478bd9Sstevel@tonic-gate #endif	/* CHEETAHPLUS_ERRATUM_25 */
827c478bd9Sstevel@tonic-gate 
837bebe46cSjc ch_cpu_logout_t	clop_before_flush;
847bebe46cSjc ch_cpu_logout_t	clop_after_flush;
857bebe46cSjc uint_t	flush_retries_done = 0;
867c478bd9Sstevel@tonic-gate /*
877c478bd9Sstevel@tonic-gate  * Note that 'Cheetah PRM' refers to:
887c478bd9Sstevel@tonic-gate  *   SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III
897c478bd9Sstevel@tonic-gate  */
907c478bd9Sstevel@tonic-gate 
917c478bd9Sstevel@tonic-gate /*
927c478bd9Sstevel@tonic-gate  * Per CPU pointers to physical address of TL>0 logout data areas.
937c478bd9Sstevel@tonic-gate  * These pointers have to be in the kernel nucleus to avoid MMU
947c478bd9Sstevel@tonic-gate  * misses.
957c478bd9Sstevel@tonic-gate  */
967c478bd9Sstevel@tonic-gate uint64_t ch_err_tl1_paddrs[NCPU];
977c478bd9Sstevel@tonic-gate 
987c478bd9Sstevel@tonic-gate /*
997c478bd9Sstevel@tonic-gate  * One statically allocated structure to use during startup/DR
1007c478bd9Sstevel@tonic-gate  * to prevent unnecessary panics.
1017c478bd9Sstevel@tonic-gate  */
1027c478bd9Sstevel@tonic-gate ch_err_tl1_data_t ch_err_tl1_data;
1037c478bd9Sstevel@tonic-gate 
1047c478bd9Sstevel@tonic-gate /*
1057c478bd9Sstevel@tonic-gate  * Per CPU pending error at TL>0, used by level15 softint handler
1067c478bd9Sstevel@tonic-gate  */
1077c478bd9Sstevel@tonic-gate uchar_t ch_err_tl1_pending[NCPU];
1087c478bd9Sstevel@tonic-gate 
1097c478bd9Sstevel@tonic-gate /*
1107c478bd9Sstevel@tonic-gate  * For deferred CE re-enable after trap.
1117c478bd9Sstevel@tonic-gate  */
1127c478bd9Sstevel@tonic-gate taskq_t		*ch_check_ce_tq;
1137c478bd9Sstevel@tonic-gate 
1147c478bd9Sstevel@tonic-gate /*
1157c478bd9Sstevel@tonic-gate  * Internal functions.
1167c478bd9Sstevel@tonic-gate  */
1177c478bd9Sstevel@tonic-gate static int cpu_async_log_err(void *flt, errorq_elem_t *eqep);
1187c478bd9Sstevel@tonic-gate static void cpu_log_diag_info(ch_async_flt_t *ch_flt);
1197c478bd9Sstevel@tonic-gate static void cpu_queue_one_event(ch_async_flt_t *ch_flt, char *reason,
1207c478bd9Sstevel@tonic-gate     ecc_type_to_info_t *eccp, ch_diag_data_t *cdp);
12193743541Smb static int cpu_flt_in_memory_one_event(ch_async_flt_t *ch_flt,
12293743541Smb     uint64_t t_afsr_bit);
1237c478bd9Sstevel@tonic-gate static int clear_ecc(struct async_flt *ecc);
1247c478bd9Sstevel@tonic-gate #if defined(CPU_IMP_ECACHE_ASSOC)
1257c478bd9Sstevel@tonic-gate static int cpu_ecache_line_valid(ch_async_flt_t *ch_flt);
1267c478bd9Sstevel@tonic-gate #endif
1277bebe46cSjc int cpu_ecache_set_size(struct cpu *cp);
1287c478bd9Sstevel@tonic-gate static int cpu_ectag_line_invalid(int cachesize, uint64_t tag);
1297bebe46cSjc int cpu_ectag_pa_to_subblk(int cachesize, uint64_t subaddr);
1307bebe46cSjc uint64_t cpu_ectag_to_pa(int setsize, uint64_t tag);
1317bebe46cSjc int cpu_ectag_pa_to_subblk_state(int cachesize,
1327c478bd9Sstevel@tonic-gate 				uint64_t subaddr, uint64_t tag);
1337c478bd9Sstevel@tonic-gate static void cpu_flush_ecache_line(ch_async_flt_t *ch_flt);
1347c478bd9Sstevel@tonic-gate static int afsr_to_afar_status(uint64_t afsr, uint64_t afsr_bit);
1357c478bd9Sstevel@tonic-gate static int afsr_to_esynd_status(uint64_t afsr, uint64_t afsr_bit);
1367c478bd9Sstevel@tonic-gate static int afsr_to_msynd_status(uint64_t afsr, uint64_t afsr_bit);
1377c478bd9Sstevel@tonic-gate static int afsr_to_synd_status(uint_t cpuid, uint64_t afsr, uint64_t afsr_bit);
1387c478bd9Sstevel@tonic-gate static int synd_to_synd_code(int synd_status, ushort_t synd, uint64_t afsr_bit);
13993743541Smb static int cpu_get_mem_unum_synd(int synd_code, struct async_flt *, char *buf);
1407c478bd9Sstevel@tonic-gate static void cpu_uninit_ecache_scrub_dr(struct cpu *cp);
1417c478bd9Sstevel@tonic-gate static void cpu_scrubphys(struct async_flt *aflt);
1427c478bd9Sstevel@tonic-gate static void cpu_payload_add_aflt(struct async_flt *, nvlist_t *, nvlist_t *,
1437c478bd9Sstevel@tonic-gate     int *, int *);
1447c478bd9Sstevel@tonic-gate static void cpu_payload_add_ecache(struct async_flt *, nvlist_t *);
1457c478bd9Sstevel@tonic-gate static void cpu_ereport_init(struct async_flt *aflt);
1467c478bd9Sstevel@tonic-gate static int cpu_check_secondary_errors(ch_async_flt_t *, uint64_t, uint64_t);
1477c478bd9Sstevel@tonic-gate static uint8_t cpu_flt_bit_to_plat_error(struct async_flt *aflt);
1487c478bd9Sstevel@tonic-gate static void cpu_log_fast_ecc_error(caddr_t tpc, int priv, int tl, uint64_t ceen,
1494fd7ecabSdilpreet     uint64_t nceen, ch_cpu_logout_t *clop);
1507c478bd9Sstevel@tonic-gate static int cpu_ce_delayed_ec_logout(uint64_t);
1517c478bd9Sstevel@tonic-gate static int cpu_matching_ecache_line(uint64_t, void *, int, int *);
15238e9bdffSmikechr static int cpu_error_is_ecache_data(int, uint64_t);
15338e9bdffSmikechr static void cpu_fmri_cpu_set(nvlist_t *, int);
15438e9bdffSmikechr static int cpu_error_to_resource_type(struct async_flt *aflt);
1557c478bd9Sstevel@tonic-gate 
1567c478bd9Sstevel@tonic-gate #ifdef	CHEETAHPLUS_ERRATUM_25
1577c478bd9Sstevel@tonic-gate static int mondo_recover_proc(uint16_t, int);
1587c478bd9Sstevel@tonic-gate static void cheetah_nudge_init(void);
1597c478bd9Sstevel@tonic-gate static void cheetah_nudge_onln(void *arg, cpu_t *cpu, cyc_handler_t *hdlr,
1607c478bd9Sstevel@tonic-gate     cyc_time_t *when);
1617c478bd9Sstevel@tonic-gate static void cheetah_nudge_buddy(void);
1627c478bd9Sstevel@tonic-gate #endif	/* CHEETAHPLUS_ERRATUM_25 */
1637c478bd9Sstevel@tonic-gate 
1647c478bd9Sstevel@tonic-gate #if defined(CPU_IMP_L1_CACHE_PARITY)
1657c478bd9Sstevel@tonic-gate static void cpu_dcache_parity_info(ch_async_flt_t *ch_flt);
1667c478bd9Sstevel@tonic-gate static void cpu_dcache_parity_check(ch_async_flt_t *ch_flt, int index);
1677c478bd9Sstevel@tonic-gate static void cpu_record_dc_data_parity(ch_async_flt_t *ch_flt,
1687c478bd9Sstevel@tonic-gate     ch_dc_data_t *dest_dcp, ch_dc_data_t *src_dcp, int way, int word);
1697c478bd9Sstevel@tonic-gate static void cpu_icache_parity_info(ch_async_flt_t *ch_flt);
1707c478bd9Sstevel@tonic-gate static void cpu_icache_parity_check(ch_async_flt_t *ch_flt, int index);
1717c478bd9Sstevel@tonic-gate static void cpu_pcache_parity_info(ch_async_flt_t *ch_flt);
1727c478bd9Sstevel@tonic-gate static void cpu_pcache_parity_check(ch_async_flt_t *ch_flt, int index);
1737c478bd9Sstevel@tonic-gate static void cpu_payload_add_dcache(struct async_flt *, nvlist_t *);
1747c478bd9Sstevel@tonic-gate static void cpu_payload_add_icache(struct async_flt *, nvlist_t *);
1757c478bd9Sstevel@tonic-gate #endif	/* CPU_IMP_L1_CACHE_PARITY */
1767c478bd9Sstevel@tonic-gate 
1777c478bd9Sstevel@tonic-gate int (*p2get_mem_info)(int synd_code, uint64_t paddr,
1787c478bd9Sstevel@tonic-gate     uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
1797c478bd9Sstevel@tonic-gate     int *segsp, int *banksp, int *mcidp);
1807c478bd9Sstevel@tonic-gate 
1817c478bd9Sstevel@tonic-gate /*
1827c478bd9Sstevel@tonic-gate  * This table is used to determine which bit(s) is(are) bad when an ECC
1837c478bd9Sstevel@tonic-gate  * error occurs.  The array is indexed by an 9-bit syndrome.  The entries
1847c478bd9Sstevel@tonic-gate  * of this array have the following semantics:
1857c478bd9Sstevel@tonic-gate  *
1867c478bd9Sstevel@tonic-gate  *      00-127  The number of the bad bit, when only one bit is bad.
1877c478bd9Sstevel@tonic-gate  *      128     ECC bit C0 is bad.
1887c478bd9Sstevel@tonic-gate  *      129     ECC bit C1 is bad.
1897c478bd9Sstevel@tonic-gate  *      130     ECC bit C2 is bad.
1907c478bd9Sstevel@tonic-gate  *      131     ECC bit C3 is bad.
1917c478bd9Sstevel@tonic-gate  *      132     ECC bit C4 is bad.
1927c478bd9Sstevel@tonic-gate  *      133     ECC bit C5 is bad.
1937c478bd9Sstevel@tonic-gate  *      134     ECC bit C6 is bad.
1947c478bd9Sstevel@tonic-gate  *      135     ECC bit C7 is bad.
1957c478bd9Sstevel@tonic-gate  *      136     ECC bit C8 is bad.
1967c478bd9Sstevel@tonic-gate  *	137-143 reserved for Mtag Data and ECC.
1977c478bd9Sstevel@tonic-gate  *      144(M2) Two bits are bad within a nibble.
1987c478bd9Sstevel@tonic-gate  *      145(M3) Three bits are bad within a nibble.
1997c478bd9Sstevel@tonic-gate  *      146(M3) Four bits are bad within a nibble.
2007c478bd9Sstevel@tonic-gate  *      147(M)  Multiple bits (5 or more) are bad.
2017c478bd9Sstevel@tonic-gate  *      148     NO bits are bad.
2027c478bd9Sstevel@tonic-gate  * Based on "Cheetah Programmer's Reference Manual" rev 1.1, Tables 11-4,11-5.
2037c478bd9Sstevel@tonic-gate  */
2047c478bd9Sstevel@tonic-gate 
2057c478bd9Sstevel@tonic-gate #define	C0	128
2067c478bd9Sstevel@tonic-gate #define	C1	129
2077c478bd9Sstevel@tonic-gate #define	C2	130
2087c478bd9Sstevel@tonic-gate #define	C3	131
2097c478bd9Sstevel@tonic-gate #define	C4	132
2107c478bd9Sstevel@tonic-gate #define	C5	133
2117c478bd9Sstevel@tonic-gate #define	C6	134
2127c478bd9Sstevel@tonic-gate #define	C7	135
2137c478bd9Sstevel@tonic-gate #define	C8	136
2147c478bd9Sstevel@tonic-gate #define	MT0	137	/* Mtag Data bit 0 */
2157c478bd9Sstevel@tonic-gate #define	MT1	138
2167c478bd9Sstevel@tonic-gate #define	MT2	139
2177c478bd9Sstevel@tonic-gate #define	MTC0	140	/* Mtag Check bit 0 */
2187c478bd9Sstevel@tonic-gate #define	MTC1	141
2197c478bd9Sstevel@tonic-gate #define	MTC2	142
2207c478bd9Sstevel@tonic-gate #define	MTC3	143
2217c478bd9Sstevel@tonic-gate #define	M2	144
2227c478bd9Sstevel@tonic-gate #define	M3	145
2237c478bd9Sstevel@tonic-gate #define	M4	146
2247c478bd9Sstevel@tonic-gate #define	M	147
2257c478bd9Sstevel@tonic-gate #define	NA	148
2267c478bd9Sstevel@tonic-gate #if defined(JALAPENO) || defined(SERRANO)
2277c478bd9Sstevel@tonic-gate #define	S003	149	/* Syndrome 0x003 => likely from CPU/EDU:ST/FRU/BP */
2287c478bd9Sstevel@tonic-gate #define	S003MEM	150	/* Syndrome 0x003 => likely from WDU/WBP */
2297c478bd9Sstevel@tonic-gate #define	SLAST	S003MEM	/* last special syndrome */
2307c478bd9Sstevel@tonic-gate #else /* JALAPENO || SERRANO */
2317c478bd9Sstevel@tonic-gate #define	S003	149	/* Syndrome 0x003 => likely from EDU:ST */
2327c478bd9Sstevel@tonic-gate #define	S071	150	/* Syndrome 0x071 => likely from WDU/CPU */
2337c478bd9Sstevel@tonic-gate #define	S11C	151	/* Syndrome 0x11c => likely from BERR/DBERR */
2347c478bd9Sstevel@tonic-gate #define	SLAST	S11C	/* last special syndrome */
2357c478bd9Sstevel@tonic-gate #endif /* JALAPENO || SERRANO */
2367c478bd9Sstevel@tonic-gate #if defined(JALAPENO) || defined(SERRANO)
2377c478bd9Sstevel@tonic-gate #define	BPAR0	152	/* syndrom 152 through 167 for bus parity */
2387c478bd9Sstevel@tonic-gate #define	BPAR15	167
2397c478bd9Sstevel@tonic-gate #endif	/* JALAPENO || SERRANO */
2407c478bd9Sstevel@tonic-gate 
2417c478bd9Sstevel@tonic-gate static uint8_t ecc_syndrome_tab[] =
2427c478bd9Sstevel@tonic-gate {
2437c478bd9Sstevel@tonic-gate NA,  C0,  C1, S003, C2,  M2,  M3,  47,  C3,  M2,  M2,  53,  M2,  41,  29,   M,
2447c478bd9Sstevel@tonic-gate C4,   M,   M,  50,  M2,  38,  25,  M2,  M2,  33,  24,  M2,  11,   M,  M2,  16,
2457c478bd9Sstevel@tonic-gate C5,   M,   M,  46,  M2,  37,  19,  M2,   M,  31,  32,   M,   7,  M2,  M2,  10,
2467c478bd9Sstevel@tonic-gate M2,  40,  13,  M2,  59,   M,  M2,  66,   M,  M2,  M2,   0,  M2,  67,  71,   M,
2477c478bd9Sstevel@tonic-gate C6,   M,   M,  43,   M,  36,  18,   M,  M2,  49,  15,   M,  63,  M2,  M2,   6,
2487c478bd9Sstevel@tonic-gate M2,  44,  28,  M2,   M,  M2,  M2,  52,  68,  M2,  M2,  62,  M2,  M3,  M3,  M4,
2497c478bd9Sstevel@tonic-gate M2,  26, 106,  M2,  64,   M,  M2,   2, 120,   M,  M2,  M3,   M,  M3,  M3,  M4,
2507c478bd9Sstevel@tonic-gate #if defined(JALAPENO) || defined(SERRANO)
2517c478bd9Sstevel@tonic-gate 116, M2,  M2,  M3,  M2,  M3,   M,  M4,  M2,  58,  54,  M2,   M,  M4,  M4,  M3,
2527c478bd9Sstevel@tonic-gate #else	/* JALAPENO || SERRANO */
2537c478bd9Sstevel@tonic-gate 116, S071, M2,  M3,  M2,  M3,   M,  M4,  M2,  58,  54,  M2,   M,  M4,  M4,  M3,
2547c478bd9Sstevel@tonic-gate #endif	/* JALAPENO || SERRANO */
2557c478bd9Sstevel@tonic-gate C7,  M2,   M,  42,   M,  35,  17,  M2,   M,  45,  14,  M2,  21,  M2,  M2,   5,
2567c478bd9Sstevel@tonic-gate M,   27,   M,   M,  99,   M,   M,   3, 114,  M2,  M2,  20,  M2,  M3,  M3,   M,
2577c478bd9Sstevel@tonic-gate M2,  23, 113,  M2, 112,  M2,   M,  51,  95,   M,  M2,  M3,  M2,  M3,  M3,  M2,
2587c478bd9Sstevel@tonic-gate 103,  M,  M2,  M3,  M2,  M3,  M3,  M4,  M2,  48,   M,   M,  73,  M2,   M,  M3,
2597c478bd9Sstevel@tonic-gate M2,  22, 110,  M2, 109,  M2,   M,   9, 108,  M2,   M,  M3,  M2,  M3,  M3,   M,
2607c478bd9Sstevel@tonic-gate 102, M2,   M,   M,  M2,  M3,  M3,   M,  M2,  M3,  M3,  M2,   M,  M4,   M,  M3,
2617c478bd9Sstevel@tonic-gate 98,   M,  M2,  M3,  M2,   M,  M3,  M4,  M2,  M3,  M3,  M4,  M3,   M,   M,   M,
2627c478bd9Sstevel@tonic-gate M2,  M3,  M3,   M,  M3,   M,   M,   M,  56,  M4,   M,  M3,  M4,   M,   M,   M,
2637c478bd9Sstevel@tonic-gate C8,   M,  M2,  39,   M,  34, 105,  M2,   M,  30, 104,   M, 101,   M,   M,   4,
2647c478bd9Sstevel@tonic-gate #if defined(JALAPENO) || defined(SERRANO)
2657c478bd9Sstevel@tonic-gate M,    M, 100,   M,  83,   M,  M2,  12,  87,   M,   M,  57,  M2,   M,  M3,   M,
2667c478bd9Sstevel@tonic-gate #else	/* JALAPENO || SERRANO */
2677c478bd9Sstevel@tonic-gate M,    M, 100,   M,  83,   M,  M2,  12,  87,   M,   M,  57, S11C,  M,  M3,   M,
2687c478bd9Sstevel@tonic-gate #endif	/* JALAPENO || SERRANO */
2697c478bd9Sstevel@tonic-gate M2,  97,  82,  M2,  78,  M2,  M2,   1,  96,   M,   M,   M,   M,   M,  M3,  M2,
2707c478bd9Sstevel@tonic-gate 94,   M,  M2,  M3,  M2,   M,  M3,   M,  M2,   M,  79,   M,  69,   M,  M4,   M,
2717c478bd9Sstevel@tonic-gate M2,  93,  92,   M,  91,   M,  M2,   8,  90,  M2,  M2,   M,   M,   M,   M,  M4,
2727c478bd9Sstevel@tonic-gate 89,   M,   M,  M3,  M2,  M3,  M3,   M,   M,   M,  M3,  M2,  M3,  M2,   M,  M3,
2737c478bd9Sstevel@tonic-gate 86,   M,  M2,  M3,  M2,   M,  M3,   M,  M2,   M,  M3,   M,  M3,   M,   M,  M3,
2747c478bd9Sstevel@tonic-gate M,    M,  M3,  M2,  M3,  M2,  M4,   M,  60,   M,  M2,  M3,  M4,   M,   M,  M2,
2757c478bd9Sstevel@tonic-gate M2,  88,  85,  M2,  84,   M,  M2,  55,  81,  M2,  M2,  M3,  M2,  M3,  M3,  M4,
2767c478bd9Sstevel@tonic-gate 77,   M,   M,   M,  M2,  M3,   M,   M,  M2,  M3,  M3,  M4,  M3,  M2,   M,   M,
2777c478bd9Sstevel@tonic-gate 74,   M,  M2,  M3,   M,   M,  M3,   M,   M,   M,  M3,   M,  M3,   M,  M4,  M3,
2787c478bd9Sstevel@tonic-gate M2,  70, 107,  M4,  65,  M2,  M2,   M, 127,   M,   M,   M,  M2,  M3,  M3,   M,
2797c478bd9Sstevel@tonic-gate 80,  M2,  M2,  72,   M, 119, 118,   M,  M2, 126,  76,   M, 125,   M,  M4,  M3,
2807c478bd9Sstevel@tonic-gate M2, 115, 124,   M,  75,   M,   M,  M3,  61,   M,  M4,   M,  M4,   M,   M,   M,
2817c478bd9Sstevel@tonic-gate M,  123, 122,  M4, 121,  M4,   M,  M3, 117,  M2,  M2,  M3,  M4,  M3,   M,   M,
2827c478bd9Sstevel@tonic-gate 111,  M,   M,   M,  M4,  M3,  M3,   M,   M,   M,  M3,   M,  M3,  M2,   M,   M
2837c478bd9Sstevel@tonic-gate };
2847c478bd9Sstevel@tonic-gate 
2857c478bd9Sstevel@tonic-gate #define	ESYND_TBL_SIZE	(sizeof (ecc_syndrome_tab) / sizeof (uint8_t))
2867c478bd9Sstevel@tonic-gate 
2877c478bd9Sstevel@tonic-gate #if !(defined(JALAPENO) || defined(SERRANO))
2887c478bd9Sstevel@tonic-gate /*
2897c478bd9Sstevel@tonic-gate  * This table is used to determine which bit(s) is(are) bad when a Mtag
2907c478bd9Sstevel@tonic-gate  * error occurs.  The array is indexed by an 4-bit ECC syndrome. The entries
2917c478bd9Sstevel@tonic-gate  * of this array have the following semantics:
2927c478bd9Sstevel@tonic-gate  *
2937c478bd9Sstevel@tonic-gate  *      -1	Invalid mtag syndrome.
2947c478bd9Sstevel@tonic-gate  *      137     Mtag Data 0 is bad.
2957c478bd9Sstevel@tonic-gate  *      138     Mtag Data 1 is bad.
2967c478bd9Sstevel@tonic-gate  *      139     Mtag Data 2 is bad.
2977c478bd9Sstevel@tonic-gate  *      140     Mtag ECC 0 is bad.
2987c478bd9Sstevel@tonic-gate  *      141     Mtag ECC 1 is bad.
2997c478bd9Sstevel@tonic-gate  *      142     Mtag ECC 2 is bad.
3007c478bd9Sstevel@tonic-gate  *      143     Mtag ECC 3 is bad.
3017c478bd9Sstevel@tonic-gate  * Based on "Cheetah Programmer's Reference Manual" rev 1.1, Tables 11-6.
3027c478bd9Sstevel@tonic-gate  */
3037c478bd9Sstevel@tonic-gate short mtag_syndrome_tab[] =
3047c478bd9Sstevel@tonic-gate {
3057c478bd9Sstevel@tonic-gate NA, MTC0, MTC1, M2, MTC2, M2, M2, MT0, MTC3, M2, M2,  MT1, M2, MT2, M2, M2
3067c478bd9Sstevel@tonic-gate };
3077c478bd9Sstevel@tonic-gate 
3087c478bd9Sstevel@tonic-gate #define	MSYND_TBL_SIZE	(sizeof (mtag_syndrome_tab) / sizeof (short))
3097c478bd9Sstevel@tonic-gate 
3107c478bd9Sstevel@tonic-gate #else /* !(JALAPENO || SERRANO) */
3117c478bd9Sstevel@tonic-gate 
3127c478bd9Sstevel@tonic-gate #define	BSYND_TBL_SIZE	16
3137c478bd9Sstevel@tonic-gate 
3147c478bd9Sstevel@tonic-gate #endif /* !(JALAPENO || SERRANO) */
3157c478bd9Sstevel@tonic-gate 
316f60f9424SChristopher Baumbauer - Sun Microsystems - San Diego United States /*
317f60f9424SChristopher Baumbauer - Sun Microsystems - San Diego United States  * Virtual Address bit flag in the data cache. This is actually bit 2 in the
318f60f9424SChristopher Baumbauer - Sun Microsystems - San Diego United States  * dcache data tag.
319f60f9424SChristopher Baumbauer - Sun Microsystems - San Diego United States  */
320f60f9424SChristopher Baumbauer - Sun Microsystems - San Diego United States #define	VA13	INT64_C(0x0000000000000002)
321f60f9424SChristopher Baumbauer - Sun Microsystems - San Diego United States 
32238e9bdffSmikechr /*
32338e9bdffSmikechr  * Types returned from cpu_error_to_resource_type()
32438e9bdffSmikechr  */
32538e9bdffSmikechr #define	ERRTYPE_UNKNOWN		0
32638e9bdffSmikechr #define	ERRTYPE_CPU		1
32738e9bdffSmikechr #define	ERRTYPE_MEMORY		2
32838e9bdffSmikechr #define	ERRTYPE_ECACHE_DATA	3
32938e9bdffSmikechr 
3307c478bd9Sstevel@tonic-gate /*
3317c478bd9Sstevel@tonic-gate  * CE initial classification and subsequent action lookup table
3327c478bd9Sstevel@tonic-gate  */
3337c478bd9Sstevel@tonic-gate static ce_dispact_t ce_disp_table[CE_INITDISPTBL_SIZE];
3347c478bd9Sstevel@tonic-gate static int ce_disp_inited;
3357c478bd9Sstevel@tonic-gate 
3367c478bd9Sstevel@tonic-gate /*
3377c478bd9Sstevel@tonic-gate  * Set to disable leaky and partner check for memory correctables
3387c478bd9Sstevel@tonic-gate  */
3397c478bd9Sstevel@tonic-gate int ce_xdiag_off;
3407c478bd9Sstevel@tonic-gate 
3417c478bd9Sstevel@tonic-gate /*
3427c478bd9Sstevel@tonic-gate  * The following are not incremented atomically so are indicative only
3437c478bd9Sstevel@tonic-gate  */
3447c478bd9Sstevel@tonic-gate static int ce_xdiag_drops;
3457c478bd9Sstevel@tonic-gate static int ce_xdiag_lkydrops;
3467c478bd9Sstevel@tonic-gate static int ce_xdiag_ptnrdrops;
3477c478bd9Sstevel@tonic-gate static int ce_xdiag_bad;
3487c478bd9Sstevel@tonic-gate 
3497c478bd9Sstevel@tonic-gate /*
3507c478bd9Sstevel@tonic-gate  * CE leaky check callback structure
3517c478bd9Sstevel@tonic-gate  */
3527c478bd9Sstevel@tonic-gate typedef struct {
3537c478bd9Sstevel@tonic-gate 	struct async_flt *lkycb_aflt;
3547c478bd9Sstevel@tonic-gate 	errorq_t *lkycb_eqp;
3557c478bd9Sstevel@tonic-gate 	errorq_elem_t *lkycb_eqep;
3567c478bd9Sstevel@tonic-gate } ce_lkychk_cb_t;
3577c478bd9Sstevel@tonic-gate 
3587c478bd9Sstevel@tonic-gate /*
3597c478bd9Sstevel@tonic-gate  * defines for various ecache_flush_flag's
3607c478bd9Sstevel@tonic-gate  */
3617c478bd9Sstevel@tonic-gate #define	ECACHE_FLUSH_LINE	1
3627c478bd9Sstevel@tonic-gate #define	ECACHE_FLUSH_ALL	2
3637c478bd9Sstevel@tonic-gate 
3647c478bd9Sstevel@tonic-gate /*
3657c478bd9Sstevel@tonic-gate  * STICK sync
3667c478bd9Sstevel@tonic-gate  */
3677c478bd9Sstevel@tonic-gate #define	STICK_ITERATION 10
3687c478bd9Sstevel@tonic-gate #define	MAX_TSKEW	1
3697c478bd9Sstevel@tonic-gate #define	EV_A_START	0
3707c478bd9Sstevel@tonic-gate #define	EV_A_END	1
3717c478bd9Sstevel@tonic-gate #define	EV_B_START	2
3727c478bd9Sstevel@tonic-gate #define	EV_B_END	3
3737c478bd9Sstevel@tonic-gate #define	EVENTS		4
3747c478bd9Sstevel@tonic-gate 
3757c478bd9Sstevel@tonic-gate static int64_t stick_iter = STICK_ITERATION;
3767c478bd9Sstevel@tonic-gate static int64_t stick_tsk = MAX_TSKEW;
3777c478bd9Sstevel@tonic-gate 
3787c478bd9Sstevel@tonic-gate typedef enum {
3797c478bd9Sstevel@tonic-gate 	EVENT_NULL = 0,
3807c478bd9Sstevel@tonic-gate 	SLAVE_START,
3817c478bd9Sstevel@tonic-gate 	SLAVE_CONT,
3827c478bd9Sstevel@tonic-gate 	MASTER_START
3837c478bd9Sstevel@tonic-gate } event_cmd_t;
3847c478bd9Sstevel@tonic-gate 
3857c478bd9Sstevel@tonic-gate static volatile event_cmd_t stick_sync_cmd = EVENT_NULL;
3867c478bd9Sstevel@tonic-gate static int64_t timestamp[EVENTS];
3877c478bd9Sstevel@tonic-gate static volatile int slave_done;
3887c478bd9Sstevel@tonic-gate 
3897c478bd9Sstevel@tonic-gate #ifdef DEBUG
3907c478bd9Sstevel@tonic-gate #define	DSYNC_ATTEMPTS 64
3917c478bd9Sstevel@tonic-gate typedef struct {
3927c478bd9Sstevel@tonic-gate 	int64_t	skew_val[DSYNC_ATTEMPTS];
3937c478bd9Sstevel@tonic-gate } ss_t;
3947c478bd9Sstevel@tonic-gate 
3957c478bd9Sstevel@tonic-gate ss_t stick_sync_stats[NCPU];
3967c478bd9Sstevel@tonic-gate #endif /* DEBUG */
3977c478bd9Sstevel@tonic-gate 
3981e2e7a75Shuah uint_t cpu_impl_dual_pgsz = 0;
3997c478bd9Sstevel@tonic-gate #if defined(CPU_IMP_DUAL_PAGESIZE)
4007c478bd9Sstevel@tonic-gate uint_t disable_dual_pgsz = 0;
4017c478bd9Sstevel@tonic-gate #endif	/* CPU_IMP_DUAL_PAGESIZE */
4027c478bd9Sstevel@tonic-gate 
4037c478bd9Sstevel@tonic-gate /*
4047c478bd9Sstevel@tonic-gate  * Save the cache bootup state for use when internal
4057c478bd9Sstevel@tonic-gate  * caches are to be re-enabled after an error occurs.
4067c478bd9Sstevel@tonic-gate  */
4077c478bd9Sstevel@tonic-gate uint64_t cache_boot_state;
4087c478bd9Sstevel@tonic-gate 
4097c478bd9Sstevel@tonic-gate /*
4107c478bd9Sstevel@tonic-gate  * PA[22:0] represent Displacement in Safari configuration space.
4117c478bd9Sstevel@tonic-gate  */
4127c478bd9Sstevel@tonic-gate uint_t	root_phys_addr_lo_mask = 0x7fffffu;
4137c478bd9Sstevel@tonic-gate 
4147c478bd9Sstevel@tonic-gate bus_config_eclk_t bus_config_eclk[] = {
4157c478bd9Sstevel@tonic-gate #if defined(JALAPENO) || defined(SERRANO)
4167c478bd9Sstevel@tonic-gate 	{JBUS_CONFIG_ECLK_1_DIV, JBUS_CONFIG_ECLK_1},
4177c478bd9Sstevel@tonic-gate 	{JBUS_CONFIG_ECLK_2_DIV, JBUS_CONFIG_ECLK_2},
4187c478bd9Sstevel@tonic-gate 	{JBUS_CONFIG_ECLK_32_DIV, JBUS_CONFIG_ECLK_32},
4197c478bd9Sstevel@tonic-gate #else /* JALAPENO || SERRANO */
4207c478bd9Sstevel@tonic-gate 	{SAFARI_CONFIG_ECLK_1_DIV, SAFARI_CONFIG_ECLK_1},
4217c478bd9Sstevel@tonic-gate 	{SAFARI_CONFIG_ECLK_2_DIV, SAFARI_CONFIG_ECLK_2},
4227c478bd9Sstevel@tonic-gate 	{SAFARI_CONFIG_ECLK_32_DIV, SAFARI_CONFIG_ECLK_32},
4237c478bd9Sstevel@tonic-gate #endif /* JALAPENO || SERRANO */
4247c478bd9Sstevel@tonic-gate 	{0, 0}
4257c478bd9Sstevel@tonic-gate };
4267c478bd9Sstevel@tonic-gate 
4277c478bd9Sstevel@tonic-gate /*
4287c478bd9Sstevel@tonic-gate  * Interval for deferred CEEN reenable
4297c478bd9Sstevel@tonic-gate  */
4307c478bd9Sstevel@tonic-gate int cpu_ceen_delay_secs = CPU_CEEN_DELAY_SECS;
4317c478bd9Sstevel@tonic-gate 
4327c478bd9Sstevel@tonic-gate /*
4337c478bd9Sstevel@tonic-gate  * set in /etc/system to control logging of user BERR/TO's
4347c478bd9Sstevel@tonic-gate  */
4357c478bd9Sstevel@tonic-gate int cpu_berr_to_verbose = 0;
4367c478bd9Sstevel@tonic-gate 
4377c478bd9Sstevel@tonic-gate /*
4387c478bd9Sstevel@tonic-gate  * set to 0 in /etc/system to defer CEEN reenable for all CEs
4397c478bd9Sstevel@tonic-gate  */
4407c478bd9Sstevel@tonic-gate uint64_t cpu_ce_not_deferred = CPU_CE_NOT_DEFERRED;
4417c478bd9Sstevel@tonic-gate uint64_t cpu_ce_not_deferred_ext = CPU_CE_NOT_DEFERRED_EXT;
4427c478bd9Sstevel@tonic-gate 
4437c478bd9Sstevel@tonic-gate /*
4447c478bd9Sstevel@tonic-gate  * Set of all offline cpus
4457c478bd9Sstevel@tonic-gate  */
4467c478bd9Sstevel@tonic-gate cpuset_t cpu_offline_set;
4477c478bd9Sstevel@tonic-gate 
4487c478bd9Sstevel@tonic-gate static void cpu_delayed_check_ce_errors(void *);
4497c478bd9Sstevel@tonic-gate static void cpu_check_ce_errors(void *);
4507c478bd9Sstevel@tonic-gate void cpu_error_ecache_flush(ch_async_flt_t *);
4517c478bd9Sstevel@tonic-gate static int cpu_error_ecache_flush_required(ch_async_flt_t *);
4527c478bd9Sstevel@tonic-gate static void cpu_log_and_clear_ce(ch_async_flt_t *);
4537c478bd9Sstevel@tonic-gate void cpu_ce_detected(ch_cpu_errors_t *, int);
4547c478bd9Sstevel@tonic-gate 
4557c478bd9Sstevel@tonic-gate /*
4567c478bd9Sstevel@tonic-gate  * CE Leaky check timeout in microseconds.  This is chosen to be twice the
4577c478bd9Sstevel@tonic-gate  * memory refresh interval of current DIMMs (64ms).  After initial fix that
4587c478bd9Sstevel@tonic-gate  * gives at least one full refresh cycle in which the cell can leak
4597c478bd9Sstevel@tonic-gate  * (whereafter further refreshes simply reinforce any incorrect bit value).
4607c478bd9Sstevel@tonic-gate  */
4617c478bd9Sstevel@tonic-gate clock_t cpu_ce_lkychk_timeout_usec = 128000;
4627c478bd9Sstevel@tonic-gate 
4637c478bd9Sstevel@tonic-gate /*
4647c478bd9Sstevel@tonic-gate  * CE partner check partner caching period in seconds
4657c478bd9Sstevel@tonic-gate  */
4667c478bd9Sstevel@tonic-gate int cpu_ce_ptnr_cachetime_sec = 60;
4677c478bd9Sstevel@tonic-gate 
4687c478bd9Sstevel@tonic-gate /*
4697c478bd9Sstevel@tonic-gate  * Sets trap table entry ttentry by overwriting eight instructions from ttlabel
4707c478bd9Sstevel@tonic-gate  */
4717c478bd9Sstevel@tonic-gate #define	CH_SET_TRAP(ttentry, ttlabel)			\
4727c478bd9Sstevel@tonic-gate 		bcopy((const void *)&ttlabel, &ttentry, 32);		\
4737c478bd9Sstevel@tonic-gate 		flush_instr_mem((caddr_t)&ttentry, 32);
4747c478bd9Sstevel@tonic-gate 
4757c478bd9Sstevel@tonic-gate static int min_ecache_size;
4767c478bd9Sstevel@tonic-gate static uint_t priv_hcl_1;
4777c478bd9Sstevel@tonic-gate static uint_t priv_hcl_2;
4787c478bd9Sstevel@tonic-gate static uint_t priv_hcl_4;
4797c478bd9Sstevel@tonic-gate static uint_t priv_hcl_8;
4807c478bd9Sstevel@tonic-gate 
4817c478bd9Sstevel@tonic-gate void
cpu_setup(void)4827c478bd9Sstevel@tonic-gate cpu_setup(void)
4837c478bd9Sstevel@tonic-gate {
4847c478bd9Sstevel@tonic-gate 	extern int at_flags;
4857c478bd9Sstevel@tonic-gate 	extern int cpc_has_overflow_intr;
4867c478bd9Sstevel@tonic-gate 
4877c478bd9Sstevel@tonic-gate 	/*
4887c478bd9Sstevel@tonic-gate 	 * Setup chip-specific trap handlers.
4897c478bd9Sstevel@tonic-gate 	 */
4907c478bd9Sstevel@tonic-gate 	cpu_init_trap();
4917c478bd9Sstevel@tonic-gate 
4927c478bd9Sstevel@tonic-gate 	cache |= (CACHE_VAC | CACHE_PTAG | CACHE_IOCOHERENT);
4937c478bd9Sstevel@tonic-gate 
4947c478bd9Sstevel@tonic-gate 	at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1 | EF_SPARC_SUN_US3;
4957c478bd9Sstevel@tonic-gate 
4967c478bd9Sstevel@tonic-gate 	/*
4977c478bd9Sstevel@tonic-gate 	 * save the cache bootup state.
4987c478bd9Sstevel@tonic-gate 	 */
4997c478bd9Sstevel@tonic-gate 	cache_boot_state = get_dcu() & DCU_CACHE;
5007c478bd9Sstevel@tonic-gate 
5017c478bd9Sstevel@tonic-gate 	/*
5027c478bd9Sstevel@tonic-gate 	 * Due to the number of entries in the fully-associative tlb
5037c478bd9Sstevel@tonic-gate 	 * this may have to be tuned lower than in spitfire.
5047c478bd9Sstevel@tonic-gate 	 */
5057c478bd9Sstevel@tonic-gate 	pp_slots = MIN(8, MAXPP_SLOTS);
5067c478bd9Sstevel@tonic-gate 
5077c478bd9Sstevel@tonic-gate 	/*
5087c478bd9Sstevel@tonic-gate 	 * Block stores do not invalidate all pages of the d$, pagecopy
5097c478bd9Sstevel@tonic-gate 	 * et. al. need virtual translations with virtual coloring taken
5107c478bd9Sstevel@tonic-gate 	 * into consideration.  prefetch/ldd will pollute the d$ on the
5117c478bd9Sstevel@tonic-gate 	 * load side.
5127c478bd9Sstevel@tonic-gate 	 */
5137c478bd9Sstevel@tonic-gate 	pp_consistent_coloring = PPAGE_STORE_VCOLORING | PPAGE_LOADS_POLLUTE;
5147c478bd9Sstevel@tonic-gate 
5157c478bd9Sstevel@tonic-gate 	if (use_page_coloring) {
5167c478bd9Sstevel@tonic-gate 		do_pg_coloring = 1;
5177c478bd9Sstevel@tonic-gate 	}
5187c478bd9Sstevel@tonic-gate 
5197c478bd9Sstevel@tonic-gate 	isa_list =
5207c478bd9Sstevel@tonic-gate 	    "sparcv9+vis2 sparcv9+vis sparcv9 "
5217c478bd9Sstevel@tonic-gate 	    "sparcv8plus+vis2 sparcv8plus+vis sparcv8plus "
5227c478bd9Sstevel@tonic-gate 	    "sparcv8 sparcv8-fsmuld sparcv7 sparc";
5237c478bd9Sstevel@tonic-gate 
5247c478bd9Sstevel@tonic-gate 	/*
5257c478bd9Sstevel@tonic-gate 	 * On Panther-based machines, this should
5267c478bd9Sstevel@tonic-gate 	 * also include AV_SPARC_POPC too
5277c478bd9Sstevel@tonic-gate 	 */
5287c478bd9Sstevel@tonic-gate 	cpu_hwcap_flags = AV_SPARC_VIS | AV_SPARC_VIS2;
5297c478bd9Sstevel@tonic-gate 
5307c478bd9Sstevel@tonic-gate 	/*
5317c478bd9Sstevel@tonic-gate 	 * On cheetah, there's no hole in the virtual address space
5327c478bd9Sstevel@tonic-gate 	 */
5337c478bd9Sstevel@tonic-gate 	hole_start = hole_end = 0;
5347c478bd9Sstevel@tonic-gate 
5357c478bd9Sstevel@tonic-gate 	/*
5367c478bd9Sstevel@tonic-gate 	 * The kpm mapping window.
5377c478bd9Sstevel@tonic-gate 	 * kpm_size:
5387c478bd9Sstevel@tonic-gate 	 *	The size of a single kpm range.
5397c478bd9Sstevel@tonic-gate 	 *	The overall size will be: kpm_size * vac_colors.
5407c478bd9Sstevel@tonic-gate 	 * kpm_vbase:
5417c478bd9Sstevel@tonic-gate 	 *	The virtual start address of the kpm range within the kernel
5427c478bd9Sstevel@tonic-gate 	 *	virtual address space. kpm_vbase has to be kpm_size aligned.
5437c478bd9Sstevel@tonic-gate 	 */
5447c478bd9Sstevel@tonic-gate 	kpm_size = (size_t)(8ull * 1024 * 1024 * 1024 * 1024); /* 8TB */
5457c478bd9Sstevel@tonic-gate 	kpm_size_shift = 43;
5467c478bd9Sstevel@tonic-gate 	kpm_vbase = (caddr_t)0x8000000000000000ull; /* 8EB */
5477c478bd9Sstevel@tonic-gate 	kpm_smallpages = 1;
5487c478bd9Sstevel@tonic-gate 
5497c478bd9Sstevel@tonic-gate 	/*
5507c478bd9Sstevel@tonic-gate 	 * The traptrace code uses either %tick or %stick for
5517c478bd9Sstevel@tonic-gate 	 * timestamping.  We have %stick so we can use it.
5527c478bd9Sstevel@tonic-gate 	 */
5537c478bd9Sstevel@tonic-gate 	traptrace_use_stick = 1;
5547c478bd9Sstevel@tonic-gate 
5557c478bd9Sstevel@tonic-gate 	/*
5567c478bd9Sstevel@tonic-gate 	 * Cheetah has a performance counter overflow interrupt
5577c478bd9Sstevel@tonic-gate 	 */
5587c478bd9Sstevel@tonic-gate 	cpc_has_overflow_intr = 1;
5597c478bd9Sstevel@tonic-gate 
5607c478bd9Sstevel@tonic-gate #if defined(CPU_IMP_DUAL_PAGESIZE)
5617c478bd9Sstevel@tonic-gate 	/*
5627c478bd9Sstevel@tonic-gate 	 * Use Cheetah+ and later dual page size support.
5637c478bd9Sstevel@tonic-gate 	 */
5647c478bd9Sstevel@tonic-gate 	if (!disable_dual_pgsz) {
5651e2e7a75Shuah 		cpu_impl_dual_pgsz = 1;
5667c478bd9Sstevel@tonic-gate 	}
5677c478bd9Sstevel@tonic-gate #endif	/* CPU_IMP_DUAL_PAGESIZE */
5687c478bd9Sstevel@tonic-gate 
5697c478bd9Sstevel@tonic-gate 	/*
5707c478bd9Sstevel@tonic-gate 	 * Declare that this architecture/cpu combination does fpRAS.
5717c478bd9Sstevel@tonic-gate 	 */
5727c478bd9Sstevel@tonic-gate 	fpras_implemented = 1;
5737c478bd9Sstevel@tonic-gate 
5747c478bd9Sstevel@tonic-gate 	/*
5757c478bd9Sstevel@tonic-gate 	 * Setup CE lookup table
5767c478bd9Sstevel@tonic-gate 	 */
5777c478bd9Sstevel@tonic-gate 	CE_INITDISPTBL_POPULATE(ce_disp_table);
5787c478bd9Sstevel@tonic-gate 	ce_disp_inited = 1;
5797c478bd9Sstevel@tonic-gate }
5807c478bd9Sstevel@tonic-gate 
5817c478bd9Sstevel@tonic-gate /*
5827c478bd9Sstevel@tonic-gate  * Called by setcpudelay
5837c478bd9Sstevel@tonic-gate  */
5847c478bd9Sstevel@tonic-gate void
cpu_init_tick_freq(void)5857c478bd9Sstevel@tonic-gate cpu_init_tick_freq(void)
5867c478bd9Sstevel@tonic-gate {
5877c478bd9Sstevel@tonic-gate 	/*
5887c478bd9Sstevel@tonic-gate 	 * For UltraSPARC III and beyond we want to use the
5897c478bd9Sstevel@tonic-gate 	 * system clock rate as the basis for low level timing,
5907c478bd9Sstevel@tonic-gate 	 * due to support of mixed speed CPUs and power managment.
5917c478bd9Sstevel@tonic-gate 	 */
5927c478bd9Sstevel@tonic-gate 	if (system_clock_freq == 0)
5937c478bd9Sstevel@tonic-gate 		cmn_err(CE_PANIC, "setcpudelay: invalid system_clock_freq");
5947c478bd9Sstevel@tonic-gate 
5957c478bd9Sstevel@tonic-gate 	sys_tick_freq = system_clock_freq;
5967c478bd9Sstevel@tonic-gate }
5977c478bd9Sstevel@tonic-gate 
5987c478bd9Sstevel@tonic-gate #ifdef CHEETAHPLUS_ERRATUM_25
5997c478bd9Sstevel@tonic-gate /*
6007c478bd9Sstevel@tonic-gate  * Tunables
6017c478bd9Sstevel@tonic-gate  */
6027c478bd9Sstevel@tonic-gate int cheetah_bpe_off = 0;
6037c478bd9Sstevel@tonic-gate int cheetah_sendmondo_recover = 1;
6047c478bd9Sstevel@tonic-gate int cheetah_sendmondo_fullscan = 0;
6057c478bd9Sstevel@tonic-gate int cheetah_sendmondo_recover_delay = 5;
6067c478bd9Sstevel@tonic-gate 
6077c478bd9Sstevel@tonic-gate #define	CHEETAH_LIVELOCK_MIN_DELAY	1
6087c478bd9Sstevel@tonic-gate 
6097c478bd9Sstevel@tonic-gate /*
6107c478bd9Sstevel@tonic-gate  * Recovery Statistics
6117c478bd9Sstevel@tonic-gate  */
6127c478bd9Sstevel@tonic-gate typedef struct cheetah_livelock_entry	{
6137c478bd9Sstevel@tonic-gate 	int cpuid;		/* fallen cpu */
6147c478bd9Sstevel@tonic-gate 	int buddy;		/* cpu that ran recovery */
6157c478bd9Sstevel@tonic-gate 	clock_t lbolt;		/* when recovery started */
6167c478bd9Sstevel@tonic-gate 	hrtime_t recovery_time;	/* time spent in recovery */
6177c478bd9Sstevel@tonic-gate } cheetah_livelock_entry_t;
6187c478bd9Sstevel@tonic-gate 
6197c478bd9Sstevel@tonic-gate #define	CHEETAH_LIVELOCK_NENTRY	32
6207c478bd9Sstevel@tonic-gate 
6217c478bd9Sstevel@tonic-gate cheetah_livelock_entry_t cheetah_livelock_hist[CHEETAH_LIVELOCK_NENTRY];
6227c478bd9Sstevel@tonic-gate int cheetah_livelock_entry_nxt;
6237c478bd9Sstevel@tonic-gate 
6247c478bd9Sstevel@tonic-gate #define	CHEETAH_LIVELOCK_ENTRY_NEXT(statp)	{			\
6257c478bd9Sstevel@tonic-gate 	statp = cheetah_livelock_hist + cheetah_livelock_entry_nxt;	\
6267c478bd9Sstevel@tonic-gate 	if (++cheetah_livelock_entry_nxt >= CHEETAH_LIVELOCK_NENTRY) {	\
6277c478bd9Sstevel@tonic-gate 		cheetah_livelock_entry_nxt = 0;				\
6287c478bd9Sstevel@tonic-gate 	}								\
6297c478bd9Sstevel@tonic-gate }
6307c478bd9Sstevel@tonic-gate 
6317c478bd9Sstevel@tonic-gate #define	CHEETAH_LIVELOCK_ENTRY_SET(statp, item, val)	statp->item = val
6327c478bd9Sstevel@tonic-gate 
6337c478bd9Sstevel@tonic-gate struct {
6347c478bd9Sstevel@tonic-gate 	hrtime_t hrt;		/* maximum recovery time */
6357c478bd9Sstevel@tonic-gate 	int recovery;		/* recovered */
6367c478bd9Sstevel@tonic-gate 	int full_claimed;	/* maximum pages claimed in full recovery */
6377c478bd9Sstevel@tonic-gate 	int proc_entry;		/* attempted to claim TSB */
6387c478bd9Sstevel@tonic-gate 	int proc_tsb_scan;	/* tsb scanned */
6397c478bd9Sstevel@tonic-gate 	int proc_tsb_partscan;	/* tsb partially scanned */
6407c478bd9Sstevel@tonic-gate 	int proc_tsb_fullscan;	/* whole tsb scanned */
6417c478bd9Sstevel@tonic-gate 	int proc_claimed;	/* maximum pages claimed in tsb scan */
6427c478bd9Sstevel@tonic-gate 	int proc_user;		/* user thread */
6437c478bd9Sstevel@tonic-gate 	int proc_kernel;	/* kernel thread */
6447c478bd9Sstevel@tonic-gate 	int proc_onflt;		/* bad stack */
6457c478bd9Sstevel@tonic-gate 	int proc_cpu;		/* null cpu */
6467c478bd9Sstevel@tonic-gate 	int proc_thread;	/* null thread */
6477c478bd9Sstevel@tonic-gate 	int proc_proc;		/* null proc */
6487c478bd9Sstevel@tonic-gate 	int proc_as;		/* null as */
6497c478bd9Sstevel@tonic-gate 	int proc_hat;		/* null hat */
6507c478bd9Sstevel@tonic-gate 	int proc_hat_inval;	/* hat contents don't make sense */
6517c478bd9Sstevel@tonic-gate 	int proc_hat_busy;	/* hat is changing TSBs */
6527c478bd9Sstevel@tonic-gate 	int proc_tsb_reloc;	/* TSB skipped because being relocated */
6537c478bd9Sstevel@tonic-gate 	int proc_cnum_bad;	/* cnum out of range */
6547c478bd9Sstevel@tonic-gate 	int proc_cnum;		/* last cnum processed */
6557c478bd9Sstevel@tonic-gate 	tte_t proc_tte;		/* last tte processed */
6567c478bd9Sstevel@tonic-gate } cheetah_livelock_stat;
6577c478bd9Sstevel@tonic-gate 
6587c478bd9Sstevel@tonic-gate #define	CHEETAH_LIVELOCK_STAT(item)	cheetah_livelock_stat.item++
6597c478bd9Sstevel@tonic-gate 
6607c478bd9Sstevel@tonic-gate #define	CHEETAH_LIVELOCK_STATSET(item, value)		\
6617c478bd9Sstevel@tonic-gate 	cheetah_livelock_stat.item = value
6627c478bd9Sstevel@tonic-gate 
6637c478bd9Sstevel@tonic-gate #define	CHEETAH_LIVELOCK_MAXSTAT(item, value)	{	\
6647c478bd9Sstevel@tonic-gate 	if (value > cheetah_livelock_stat.item)		\
6657c478bd9Sstevel@tonic-gate 		cheetah_livelock_stat.item = value;	\
6667c478bd9Sstevel@tonic-gate }
6677c478bd9Sstevel@tonic-gate 
6687c478bd9Sstevel@tonic-gate /*
6697c478bd9Sstevel@tonic-gate  * Attempt to recover a cpu by claiming every cache line as saved
6707c478bd9Sstevel@tonic-gate  * in the TSB that the non-responsive cpu is using. Since we can't
6717c478bd9Sstevel@tonic-gate  * grab any adaptive lock, this is at best an attempt to do so. Because
6727c478bd9Sstevel@tonic-gate  * we don't grab any locks, we must operate under the protection of
6737c478bd9Sstevel@tonic-gate  * on_fault().
6747c478bd9Sstevel@tonic-gate  *
6757c478bd9Sstevel@tonic-gate  * Return 1 if cpuid could be recovered, 0 if failed.
6767c478bd9Sstevel@tonic-gate  */
6777c478bd9Sstevel@tonic-gate int
mondo_recover_proc(uint16_t cpuid,int bn)6787c478bd9Sstevel@tonic-gate mondo_recover_proc(uint16_t cpuid, int bn)
6797c478bd9Sstevel@tonic-gate {
6807c478bd9Sstevel@tonic-gate 	label_t ljb;
6817c478bd9Sstevel@tonic-gate 	cpu_t *cp;
6827c478bd9Sstevel@tonic-gate 	kthread_t *t;
6837c478bd9Sstevel@tonic-gate 	proc_t *p;
6847c478bd9Sstevel@tonic-gate 	struct as *as;
6857c478bd9Sstevel@tonic-gate 	struct hat *hat;
6861e2e7a75Shuah 	uint_t  cnum;
6877c478bd9Sstevel@tonic-gate 	struct tsb_info *tsbinfop;
6887c478bd9Sstevel@tonic-gate 	struct tsbe *tsbep;
6897c478bd9Sstevel@tonic-gate 	caddr_t tsbp;
6907c478bd9Sstevel@tonic-gate 	caddr_t end_tsbp;
6917c478bd9Sstevel@tonic-gate 	uint64_t paddr;
6927c478bd9Sstevel@tonic-gate 	uint64_t idsr;
6937c478bd9Sstevel@tonic-gate 	u_longlong_t pahi, palo;
6947c478bd9Sstevel@tonic-gate 	int pages_claimed = 0;
6957c478bd9Sstevel@tonic-gate 	tte_t tsbe_tte;
6967c478bd9Sstevel@tonic-gate 	int tried_kernel_tsb = 0;
6971e2e7a75Shuah 	mmu_ctx_t *mmu_ctxp;
6987c478bd9Sstevel@tonic-gate 
6997c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_STAT(proc_entry);
7007c478bd9Sstevel@tonic-gate 
7017c478bd9Sstevel@tonic-gate 	if (on_fault(&ljb)) {
7027c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_onflt);
7037c478bd9Sstevel@tonic-gate 		goto badstruct;
7047c478bd9Sstevel@tonic-gate 	}
7057c478bd9Sstevel@tonic-gate 
7067c478bd9Sstevel@tonic-gate 	if ((cp = cpu[cpuid]) == NULL) {
7077c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_cpu);
7087c478bd9Sstevel@tonic-gate 		goto badstruct;
7097c478bd9Sstevel@tonic-gate 	}
7107c478bd9Sstevel@tonic-gate 
7117c478bd9Sstevel@tonic-gate 	if ((t = cp->cpu_thread) == NULL) {
7127c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_thread);
7137c478bd9Sstevel@tonic-gate 		goto badstruct;
7147c478bd9Sstevel@tonic-gate 	}
7157c478bd9Sstevel@tonic-gate 
7167c478bd9Sstevel@tonic-gate 	if ((p = ttoproc(t)) == NULL) {
7177c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_proc);
7187c478bd9Sstevel@tonic-gate 		goto badstruct;
7197c478bd9Sstevel@tonic-gate 	}
7207c478bd9Sstevel@tonic-gate 
7217c478bd9Sstevel@tonic-gate 	if ((as = p->p_as) == NULL) {
7227c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_as);
7237c478bd9Sstevel@tonic-gate 		goto badstruct;
7247c478bd9Sstevel@tonic-gate 	}
7257c478bd9Sstevel@tonic-gate 
7267c478bd9Sstevel@tonic-gate 	if ((hat = as->a_hat) == NULL) {
7277c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_hat);
7287c478bd9Sstevel@tonic-gate 		goto badstruct;
7297c478bd9Sstevel@tonic-gate 	}
7307c478bd9Sstevel@tonic-gate 
7317c478bd9Sstevel@tonic-gate 	if (hat != ksfmmup) {
7327c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_user);
7337c478bd9Sstevel@tonic-gate 		if (hat->sfmmu_flags & (HAT_BUSY | HAT_SWAPPED | HAT_SWAPIN)) {
7347c478bd9Sstevel@tonic-gate 			CHEETAH_LIVELOCK_STAT(proc_hat_busy);
7357c478bd9Sstevel@tonic-gate 			goto badstruct;
7367c478bd9Sstevel@tonic-gate 		}
7377c478bd9Sstevel@tonic-gate 		tsbinfop = hat->sfmmu_tsb;
7387c478bd9Sstevel@tonic-gate 		if (tsbinfop == NULL) {
7397c478bd9Sstevel@tonic-gate 			CHEETAH_LIVELOCK_STAT(proc_hat_inval);
7407c478bd9Sstevel@tonic-gate 			goto badstruct;
7417c478bd9Sstevel@tonic-gate 		}
7427c478bd9Sstevel@tonic-gate 		tsbp = tsbinfop->tsb_va;
7437c478bd9Sstevel@tonic-gate 		end_tsbp = tsbp + TSB_BYTES(tsbinfop->tsb_szc);
7447c478bd9Sstevel@tonic-gate 	} else {
7457c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_kernel);
7467c478bd9Sstevel@tonic-gate 		tsbinfop = NULL;
7477c478bd9Sstevel@tonic-gate 		tsbp = ktsb_base;
7487c478bd9Sstevel@tonic-gate 		end_tsbp = tsbp + TSB_BYTES(ktsb_sz);
7497c478bd9Sstevel@tonic-gate 	}
7507c478bd9Sstevel@tonic-gate 
7517c478bd9Sstevel@tonic-gate 	/* Verify as */
7527c478bd9Sstevel@tonic-gate 	if (hat->sfmmu_as != as) {
7537c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_hat_inval);
7547c478bd9Sstevel@tonic-gate 		goto badstruct;
7557c478bd9Sstevel@tonic-gate 	}
7567c478bd9Sstevel@tonic-gate 
7571e2e7a75Shuah 	mmu_ctxp = CPU_MMU_CTXP(cp);
7581e2e7a75Shuah 	ASSERT(mmu_ctxp);
7591e2e7a75Shuah 	cnum = hat->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
7607c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_STATSET(proc_cnum, cnum);
7617c478bd9Sstevel@tonic-gate 
7621e2e7a75Shuah 	if ((cnum < 0) || (cnum == INVALID_CONTEXT) ||
7631e2e7a75Shuah 	    (cnum >= mmu_ctxp->mmu_nctxs)) {
7647c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_cnum_bad);
7657c478bd9Sstevel@tonic-gate 		goto badstruct;
7667c478bd9Sstevel@tonic-gate 	}
7677c478bd9Sstevel@tonic-gate 
7687c478bd9Sstevel@tonic-gate 	do {
7697c478bd9Sstevel@tonic-gate 		CHEETAH_LIVELOCK_STAT(proc_tsb_scan);
7707c478bd9Sstevel@tonic-gate 
7717c478bd9Sstevel@tonic-gate 		/*
7727c478bd9Sstevel@tonic-gate 		 * Skip TSBs being relocated.  This is important because
7737c478bd9Sstevel@tonic-gate 		 * we want to avoid the following deadlock scenario:
7747c478bd9Sstevel@tonic-gate 		 *
7757c478bd9Sstevel@tonic-gate 		 * 1) when we came in we set ourselves to "in recover" state.
7767c478bd9Sstevel@tonic-gate 		 * 2) when we try to touch TSB being relocated the mapping
7777c478bd9Sstevel@tonic-gate 		 *    will be in the suspended state so we'll spin waiting
7787c478bd9Sstevel@tonic-gate 		 *    for it to be unlocked.
7797c478bd9Sstevel@tonic-gate 		 * 3) when the CPU that holds the TSB mapping locked tries to
7807c478bd9Sstevel@tonic-gate 		 *    unlock it it will send a xtrap which will fail to xcall
7817c478bd9Sstevel@tonic-gate 		 *    us or the CPU we're trying to recover, and will in turn
7827c478bd9Sstevel@tonic-gate 		 *    enter the mondo code.
7837c478bd9Sstevel@tonic-gate 		 * 4) since we are still spinning on the locked mapping
7847c478bd9Sstevel@tonic-gate 		 *    no further progress will be made and the system will
7857c478bd9Sstevel@tonic-gate 		 *    inevitably hard hang.
7867c478bd9Sstevel@tonic-gate 		 *
7877c478bd9Sstevel@tonic-gate 		 * A TSB not being relocated can't begin being relocated
7887c478bd9Sstevel@tonic-gate 		 * while we're accessing it because we check
7897c478bd9Sstevel@tonic-gate 		 * sendmondo_in_recover before relocating TSBs.
7907c478bd9Sstevel@tonic-gate 		 */
7917c478bd9Sstevel@tonic-gate 		if (hat != ksfmmup &&
7927c478bd9Sstevel@tonic-gate 		    (tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
7937c478bd9Sstevel@tonic-gate 			CHEETAH_LIVELOCK_STAT(proc_tsb_reloc);
7947c478bd9Sstevel@tonic-gate 			goto next_tsbinfo;
7957c478bd9Sstevel@tonic-gate 		}
7967c478bd9Sstevel@tonic-gate 
7977c478bd9Sstevel@tonic-gate 		for (tsbep = (struct tsbe *)tsbp;
7987c478bd9Sstevel@tonic-gate 		    tsbep < (struct tsbe *)end_tsbp; tsbep++) {
7997c478bd9Sstevel@tonic-gate 			tsbe_tte = tsbep->tte_data;
8007c478bd9Sstevel@tonic-gate 
8017c478bd9Sstevel@tonic-gate 			if (tsbe_tte.tte_val == 0) {
8027c478bd9Sstevel@tonic-gate 				/*
8037c478bd9Sstevel@tonic-gate 				 * Invalid tte
8047c478bd9Sstevel@tonic-gate 				 */
8057c478bd9Sstevel@tonic-gate 				continue;
8067c478bd9Sstevel@tonic-gate 			}
8077c478bd9Sstevel@tonic-gate 			if (tsbe_tte.tte_se) {
8087c478bd9Sstevel@tonic-gate 				/*
8097c478bd9Sstevel@tonic-gate 				 * Don't want device registers
8107c478bd9Sstevel@tonic-gate 				 */
8117c478bd9Sstevel@tonic-gate 				continue;
8127c478bd9Sstevel@tonic-gate 			}
8137c478bd9Sstevel@tonic-gate 			if (tsbe_tte.tte_cp == 0) {
8147c478bd9Sstevel@tonic-gate 				/*
8157c478bd9Sstevel@tonic-gate 				 * Must be cached in E$
8167c478bd9Sstevel@tonic-gate 				 */
8177c478bd9Sstevel@tonic-gate 				continue;
8187c478bd9Sstevel@tonic-gate 			}
819953394f3Sjesusm 			if (tsbep->tte_tag.tag_invalid != 0) {
820953394f3Sjesusm 				/*
821953394f3Sjesusm 				 * Invalid tag, ingnore this entry.
822953394f3Sjesusm 				 */
823953394f3Sjesusm 				continue;
824953394f3Sjesusm 			}
8257c478bd9Sstevel@tonic-gate 			CHEETAH_LIVELOCK_STATSET(proc_tte, tsbe_tte);
8267c478bd9Sstevel@tonic-gate 			idsr = getidsr();
8277c478bd9Sstevel@tonic-gate 			if ((idsr & (IDSR_NACK_BIT(bn) |
8287c478bd9Sstevel@tonic-gate 			    IDSR_BUSY_BIT(bn))) == 0) {
8297c478bd9Sstevel@tonic-gate 				CHEETAH_LIVELOCK_STAT(proc_tsb_partscan);
8307c478bd9Sstevel@tonic-gate 				goto done;
8317c478bd9Sstevel@tonic-gate 			}
8327c478bd9Sstevel@tonic-gate 			pahi = tsbe_tte.tte_pahi;
8337c478bd9Sstevel@tonic-gate 			palo = tsbe_tte.tte_palo;
8347c478bd9Sstevel@tonic-gate 			paddr = (uint64_t)((pahi << 32) |
8357c478bd9Sstevel@tonic-gate 			    (palo << MMU_PAGESHIFT));
8367c478bd9Sstevel@tonic-gate 			claimlines(paddr, TTEBYTES(TTE_CSZ(&tsbe_tte)),
8377c478bd9Sstevel@tonic-gate 			    CH_ECACHE_SUBBLK_SIZE);
8387c478bd9Sstevel@tonic-gate 			if ((idsr & IDSR_BUSY_BIT(bn)) == 0) {
8397c478bd9Sstevel@tonic-gate 				shipit(cpuid, bn);
8407c478bd9Sstevel@tonic-gate 			}
8417c478bd9Sstevel@tonic-gate 			pages_claimed++;
8427c478bd9Sstevel@tonic-gate 		}
8437c478bd9Sstevel@tonic-gate next_tsbinfo:
8447c478bd9Sstevel@tonic-gate 		if (tsbinfop != NULL)
8457c478bd9Sstevel@tonic-gate 			tsbinfop = tsbinfop->tsb_next;
8467c478bd9Sstevel@tonic-gate 		if (tsbinfop != NULL) {
8477c478bd9Sstevel@tonic-gate 			tsbp = tsbinfop->tsb_va;
8487c478bd9Sstevel@tonic-gate 			end_tsbp = tsbp + TSB_BYTES(tsbinfop->tsb_szc);
8497c478bd9Sstevel@tonic-gate 		} else if (tsbp == ktsb_base) {
8507c478bd9Sstevel@tonic-gate 			tried_kernel_tsb = 1;
8517c478bd9Sstevel@tonic-gate 		} else if (!tried_kernel_tsb) {
8527c478bd9Sstevel@tonic-gate 			tsbp = ktsb_base;
8537c478bd9Sstevel@tonic-gate 			end_tsbp = tsbp + TSB_BYTES(ktsb_sz);
8547c478bd9Sstevel@tonic-gate 			hat = ksfmmup;
8557c478bd9Sstevel@tonic-gate 			tsbinfop = NULL;
8567c478bd9Sstevel@tonic-gate 		}
8577c478bd9Sstevel@tonic-gate 	} while (tsbinfop != NULL ||
858953394f3Sjesusm 	    ((tsbp == ktsb_base) && !tried_kernel_tsb));
8597c478bd9Sstevel@tonic-gate 
8607c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_STAT(proc_tsb_fullscan);
8617c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_MAXSTAT(proc_claimed, pages_claimed);
8627c478bd9Sstevel@tonic-gate 	no_fault();
8637c478bd9Sstevel@tonic-gate 	idsr = getidsr();
8647c478bd9Sstevel@tonic-gate 	if ((idsr & (IDSR_NACK_BIT(bn) |
8657c478bd9Sstevel@tonic-gate 	    IDSR_BUSY_BIT(bn))) == 0) {
8667c478bd9Sstevel@tonic-gate 		return (1);
8677c478bd9Sstevel@tonic-gate 	} else {
8687c478bd9Sstevel@tonic-gate 		return (0);
8697c478bd9Sstevel@tonic-gate 	}
8707c478bd9Sstevel@tonic-gate 
8717c478bd9Sstevel@tonic-gate done:
8727c478bd9Sstevel@tonic-gate 	no_fault();
8737c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_MAXSTAT(proc_claimed, pages_claimed);
8747c478bd9Sstevel@tonic-gate 	return (1);
8757c478bd9Sstevel@tonic-gate 
8767c478bd9Sstevel@tonic-gate badstruct:
8777c478bd9Sstevel@tonic-gate 	no_fault();
8787c478bd9Sstevel@tonic-gate 	return (0);
8797c478bd9Sstevel@tonic-gate }
8807c478bd9Sstevel@tonic-gate 
8817c478bd9Sstevel@tonic-gate /*
8827c478bd9Sstevel@tonic-gate  * Attempt to claim ownership, temporarily, of every cache line that a
8837c478bd9Sstevel@tonic-gate  * non-responsive cpu might be using.  This might kick that cpu out of
8847c478bd9Sstevel@tonic-gate  * this state.
8857c478bd9Sstevel@tonic-gate  *
8867c478bd9Sstevel@tonic-gate  * The return value indicates to the caller if we have exhausted all recovery
8877c478bd9Sstevel@tonic-gate  * techniques. If 1 is returned, it is useless to call this function again
8887c478bd9Sstevel@tonic-gate  * even for a different target CPU.
8897c478bd9Sstevel@tonic-gate  */
8907c478bd9Sstevel@tonic-gate int
mondo_recover(uint16_t cpuid,int bn)8917c478bd9Sstevel@tonic-gate mondo_recover(uint16_t cpuid, int bn)
8927c478bd9Sstevel@tonic-gate {
8937c478bd9Sstevel@tonic-gate 	struct memseg *seg;
8947c478bd9Sstevel@tonic-gate 	uint64_t begin_pa, end_pa, cur_pa;
8957c478bd9Sstevel@tonic-gate 	hrtime_t begin_hrt, end_hrt;
8967c478bd9Sstevel@tonic-gate 	int retval = 0;
8977c478bd9Sstevel@tonic-gate 	int pages_claimed = 0;
8987c478bd9Sstevel@tonic-gate 	cheetah_livelock_entry_t *histp;
8997c478bd9Sstevel@tonic-gate 	uint64_t idsr;
9007c478bd9Sstevel@tonic-gate 
90175d94465SJosef 'Jeff' Sipek 	if (atomic_cas_32(&sendmondo_in_recover, 0, 1) != 0) {
9027c478bd9Sstevel@tonic-gate 		/*
9037c478bd9Sstevel@tonic-gate 		 * Wait while recovery takes place
9047c478bd9Sstevel@tonic-gate 		 */
9057c478bd9Sstevel@tonic-gate 		while (sendmondo_in_recover) {
9067c478bd9Sstevel@tonic-gate 			drv_usecwait(1);
9077c478bd9Sstevel@tonic-gate 		}
9087c478bd9Sstevel@tonic-gate 		/*
9097c478bd9Sstevel@tonic-gate 		 * Assume we didn't claim the whole memory. If
9107c478bd9Sstevel@tonic-gate 		 * the target of this caller is not recovered,
9117c478bd9Sstevel@tonic-gate 		 * it will come back.
9127c478bd9Sstevel@tonic-gate 		 */
9137c478bd9Sstevel@tonic-gate 		return (retval);
9147c478bd9Sstevel@tonic-gate 	}
9157c478bd9Sstevel@tonic-gate 
916d3d50737SRafael Vanoni 	CHEETAH_LIVELOCK_ENTRY_NEXT(histp);
917d3d50737SRafael Vanoni 	CHEETAH_LIVELOCK_ENTRY_SET(histp, lbolt, LBOLT_WAITFREE);
9187c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_ENTRY_SET(histp, cpuid, cpuid);
9197c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_ENTRY_SET(histp, buddy, CPU->cpu_id);
9207c478bd9Sstevel@tonic-gate 
9217c478bd9Sstevel@tonic-gate 	begin_hrt = gethrtime_waitfree();
9227c478bd9Sstevel@tonic-gate 	/*
9237c478bd9Sstevel@tonic-gate 	 * First try to claim the lines in the TSB the target
9247c478bd9Sstevel@tonic-gate 	 * may have been using.
9257c478bd9Sstevel@tonic-gate 	 */
9267c478bd9Sstevel@tonic-gate 	if (mondo_recover_proc(cpuid, bn) == 1) {
9277c478bd9Sstevel@tonic-gate 		/*
9287c478bd9Sstevel@tonic-gate 		 * Didn't claim the whole memory
9297c478bd9Sstevel@tonic-gate 		 */
9307c478bd9Sstevel@tonic-gate 		goto done;
9317c478bd9Sstevel@tonic-gate 	}
9327c478bd9Sstevel@tonic-gate 
9337c478bd9Sstevel@tonic-gate 	/*
9347c478bd9Sstevel@tonic-gate 	 * We tried using the TSB. The target is still
9357c478bd9Sstevel@tonic-gate 	 * not recovered. Check if complete memory scan is
9367c478bd9Sstevel@tonic-gate 	 * enabled.
9377c478bd9Sstevel@tonic-gate 	 */
9387c478bd9Sstevel@tonic-gate 	if (cheetah_sendmondo_fullscan == 0) {
9397c478bd9Sstevel@tonic-gate 		/*
9407c478bd9Sstevel@tonic-gate 		 * Full memory scan is disabled.
9417c478bd9Sstevel@tonic-gate 		 */
9427c478bd9Sstevel@tonic-gate 		retval = 1;
9437c478bd9Sstevel@tonic-gate 		goto done;
9447c478bd9Sstevel@tonic-gate 	}
9457c478bd9Sstevel@tonic-gate 
9467c478bd9Sstevel@tonic-gate 	/*
9477c478bd9Sstevel@tonic-gate 	 * Try claiming the whole memory.
9487c478bd9Sstevel@tonic-gate 	 */
9497c478bd9Sstevel@tonic-gate 	for (seg = memsegs; seg; seg = seg->next) {
9507c478bd9Sstevel@tonic-gate 		begin_pa = (uint64_t)(seg->pages_base) << MMU_PAGESHIFT;
9517c478bd9Sstevel@tonic-gate 		end_pa = (uint64_t)(seg->pages_end) << MMU_PAGESHIFT;
9527c478bd9Sstevel@tonic-gate 		for (cur_pa = begin_pa; cur_pa < end_pa;
9537c478bd9Sstevel@tonic-gate 		    cur_pa += MMU_PAGESIZE) {
9547c478bd9Sstevel@tonic-gate 			idsr = getidsr();
9557c478bd9Sstevel@tonic-gate 			if ((idsr & (IDSR_NACK_BIT(bn) |
9567c478bd9Sstevel@tonic-gate 			    IDSR_BUSY_BIT(bn))) == 0) {
9577c478bd9Sstevel@tonic-gate 				/*
9587c478bd9Sstevel@tonic-gate 				 * Didn't claim all memory
9597c478bd9Sstevel@tonic-gate 				 */
9607c478bd9Sstevel@tonic-gate 				goto done;
9617c478bd9Sstevel@tonic-gate 			}
9627c478bd9Sstevel@tonic-gate 			claimlines(cur_pa, MMU_PAGESIZE,
9637c478bd9Sstevel@tonic-gate 			    CH_ECACHE_SUBBLK_SIZE);
9647c478bd9Sstevel@tonic-gate 			if ((idsr & IDSR_BUSY_BIT(bn)) == 0) {
9657c478bd9Sstevel@tonic-gate 				shipit(cpuid, bn);
9667c478bd9Sstevel@tonic-gate 			}
9677c478bd9Sstevel@tonic-gate 			pages_claimed++;
9687c478bd9Sstevel@tonic-gate 		}
9697c478bd9Sstevel@tonic-gate 	}
9707c478bd9Sstevel@tonic-gate 
9717c478bd9Sstevel@tonic-gate 	/*
9727c478bd9Sstevel@tonic-gate 	 * We did all we could.
9737c478bd9Sstevel@tonic-gate 	 */
9747c478bd9Sstevel@tonic-gate 	retval = 1;
9757c478bd9Sstevel@tonic-gate 
9767c478bd9Sstevel@tonic-gate done:
9777c478bd9Sstevel@tonic-gate 	/*
9787c478bd9Sstevel@tonic-gate 	 * Update statistics
9797c478bd9Sstevel@tonic-gate 	 */
9807c478bd9Sstevel@tonic-gate 	end_hrt = gethrtime_waitfree();
9817c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_STAT(recovery);
9827c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_MAXSTAT(hrt, (end_hrt - begin_hrt));
9837c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_MAXSTAT(full_claimed, pages_claimed);
9847c478bd9Sstevel@tonic-gate 	CHEETAH_LIVELOCK_ENTRY_SET(histp, recovery_time, \
9857c478bd9Sstevel@tonic-gate 	    (end_hrt -  begin_hrt));
9867c478bd9Sstevel@tonic-gate 
98775d94465SJosef 'Jeff' Sipek 	while (atomic_cas_32(&sendmondo_in_recover, 1, 0) != 1)
988953394f3Sjesusm 		;
9897c478bd9Sstevel@tonic-gate 
9907c478bd9Sstevel@tonic-gate 	return (retval);
9917c478bd9Sstevel@tonic-gate }
9927c478bd9Sstevel@tonic-gate 
9937c478bd9Sstevel@tonic-gate /*
9947c478bd9Sstevel@tonic-gate  * This is called by the cyclic framework when this CPU becomes online
9957c478bd9Sstevel@tonic-gate  */
9967c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9977c478bd9Sstevel@tonic-gate static void
cheetah_nudge_onln(void * arg,cpu_t * cpu,cyc_handler_t * hdlr,cyc_time_t * when)9987c478bd9Sstevel@tonic-gate cheetah_nudge_onln(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
9997c478bd9Sstevel@tonic-gate {
10007c478bd9Sstevel@tonic-gate 
10017c478bd9Sstevel@tonic-gate 	hdlr->cyh_func = (cyc_func_t)cheetah_nudge_buddy;
10027c478bd9Sstevel@tonic-gate 	hdlr->cyh_level = CY_LOW_LEVEL;
10037c478bd9Sstevel@tonic-gate 	hdlr->cyh_arg = NULL;
10047c478bd9Sstevel@tonic-gate 
10057c478bd9Sstevel@tonic-gate 	/*
10067c478bd9Sstevel@tonic-gate 	 * Stagger the start time
10077c478bd9Sstevel@tonic-gate 	 */
10087c478bd9Sstevel@tonic-gate 	when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
10097c478bd9Sstevel@tonic-gate 	if (cheetah_sendmondo_recover_delay < CHEETAH_LIVELOCK_MIN_DELAY) {
10107c478bd9Sstevel@tonic-gate 		cheetah_sendmondo_recover_delay = CHEETAH_LIVELOCK_MIN_DELAY;
10117c478bd9Sstevel@tonic-gate 	}
10127c478bd9Sstevel@tonic-gate 	when->cyt_interval = cheetah_sendmondo_recover_delay * NANOSEC;
10137c478bd9Sstevel@tonic-gate }
10147c478bd9Sstevel@tonic-gate 
10157c478bd9Sstevel@tonic-gate /*
10167c478bd9Sstevel@tonic-gate  * Create a low level cyclic to send a xtrap to the next cpu online.
10177c478bd9Sstevel@tonic-gate  * However, there's no need to have this running on a uniprocessor system.
10187c478bd9Sstevel@tonic-gate  */
10197c478bd9Sstevel@tonic-gate static void
cheetah_nudge_init(void)10207c478bd9Sstevel@tonic-gate cheetah_nudge_init(void)
10217c478bd9Sstevel@tonic-gate {
10227c478bd9Sstevel@tonic-gate 	cyc_omni_handler_t hdlr;
10237c478bd9Sstevel@tonic-gate 
10247c478bd9Sstevel@tonic-gate 	if (max_ncpus == 1) {
10257c478bd9Sstevel@tonic-gate 		return;
10267c478bd9Sstevel@tonic-gate 	}
10277c478bd9Sstevel@tonic-gate 
10287c478bd9Sstevel@tonic-gate 	hdlr.cyo_online = cheetah_nudge_onln;
10297c478bd9Sstevel@tonic-gate 	hdlr.cyo_offline = NULL;
10307c478bd9Sstevel@tonic-gate 	hdlr.cyo_arg = NULL;
10317c478bd9Sstevel@tonic-gate 
10327c478bd9Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
10337c478bd9Sstevel@tonic-gate 	(void) cyclic_add_omni(&hdlr);
10347c478bd9Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
10357c478bd9Sstevel@tonic-gate }
10367c478bd9Sstevel@tonic-gate 
10377c478bd9Sstevel@tonic-gate /*
10387c478bd9Sstevel@tonic-gate  * Cyclic handler to wake up buddy
10397c478bd9Sstevel@tonic-gate  */
10407c478bd9Sstevel@tonic-gate void
cheetah_nudge_buddy(void)10417c478bd9Sstevel@tonic-gate cheetah_nudge_buddy(void)
10427c478bd9Sstevel@tonic-gate {
10437c478bd9Sstevel@tonic-gate 	/*
10447c478bd9Sstevel@tonic-gate 	 * Disable kernel preemption to protect the cpu list
10457c478bd9Sstevel@tonic-gate 	 */
10467c478bd9Sstevel@tonic-gate 	kpreempt_disable();
10477c478bd9Sstevel@tonic-gate 	if ((CPU->cpu_next_onln != CPU) && (sendmondo_in_recover == 0)) {
10487c478bd9Sstevel@tonic-gate 		xt_one(CPU->cpu_next_onln->cpu_id, (xcfunc_t *)xt_sync_tl1,
10497c478bd9Sstevel@tonic-gate 		    0, 0);
10507c478bd9Sstevel@tonic-gate 	}
10517c478bd9Sstevel@tonic-gate 	kpreempt_enable();
10527c478bd9Sstevel@tonic-gate }
10537c478bd9Sstevel@tonic-gate 
10547c478bd9Sstevel@tonic-gate #endif	/* CHEETAHPLUS_ERRATUM_25 */
10557c478bd9Sstevel@tonic-gate 
10567c478bd9Sstevel@tonic-gate #ifdef SEND_MONDO_STATS
10577c478bd9Sstevel@tonic-gate uint32_t x_one_stimes[64];
10587c478bd9Sstevel@tonic-gate uint32_t x_one_ltimes[16];
10597c478bd9Sstevel@tonic-gate uint32_t x_set_stimes[64];
10607c478bd9Sstevel@tonic-gate uint32_t x_set_ltimes[16];
10617c478bd9Sstevel@tonic-gate uint32_t x_set_cpus[NCPU];
10627c478bd9Sstevel@tonic-gate uint32_t x_nack_stimes[64];
10637c478bd9Sstevel@tonic-gate #endif
10647c478bd9Sstevel@tonic-gate 
10657c478bd9Sstevel@tonic-gate /*
10667c478bd9Sstevel@tonic-gate  * Note: A version of this function is used by the debugger via the KDI,
10677c478bd9Sstevel@tonic-gate  * and must be kept in sync with this version.  Any changes made to this
10687c478bd9Sstevel@tonic-gate  * function to support new chips or to accomodate errata must also be included
10697c478bd9Sstevel@tonic-gate  * in the KDI-specific version.  See us3_kdi.c.
10707c478bd9Sstevel@tonic-gate  */
10717c478bd9Sstevel@tonic-gate void
send_one_mondo(int cpuid)10727c478bd9Sstevel@tonic-gate send_one_mondo(int cpuid)
10737c478bd9Sstevel@tonic-gate {
10747c478bd9Sstevel@tonic-gate 	int busy, nack;
10757c478bd9Sstevel@tonic-gate 	uint64_t idsr, starttick, endtick, tick, lasttick;
10767c478bd9Sstevel@tonic-gate 	uint64_t busymask;
10777c478bd9Sstevel@tonic-gate #ifdef	CHEETAHPLUS_ERRATUM_25
10787c478bd9Sstevel@tonic-gate 	int recovered = 0;
10797c478bd9Sstevel@tonic-gate #endif
10807c478bd9Sstevel@tonic-gate 
10817c478bd9Sstevel@tonic-gate 	CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
10827c478bd9Sstevel@tonic-gate 	starttick = lasttick = gettick();
10837c478bd9Sstevel@tonic-gate 	shipit(cpuid, 0);
10847c478bd9Sstevel@tonic-gate 	endtick = starttick + xc_tick_limit;
10857c478bd9Sstevel@tonic-gate 	busy = nack = 0;
10867c478bd9Sstevel@tonic-gate #if defined(JALAPENO) || defined(SERRANO)
10877c478bd9Sstevel@tonic-gate 	/*
10887c478bd9Sstevel@tonic-gate 	 * Lower 2 bits of the agent ID determine which BUSY/NACK pair
10897c478bd9Sstevel@tonic-gate 	 * will be used for dispatching interrupt. For now, assume
10907c478bd9Sstevel@tonic-gate 	 * there are no more than IDSR_BN_SETS CPUs, hence no aliasing
10917c478bd9Sstevel@tonic-gate 	 * issues with respect to BUSY/NACK pair usage.
10927c478bd9Sstevel@tonic-gate 	 */
10937c478bd9Sstevel@tonic-gate 	busymask  = IDSR_BUSY_BIT(cpuid);
10947c478bd9Sstevel@tonic-gate #else /* JALAPENO || SERRANO */
10957c478bd9Sstevel@tonic-gate 	busymask = IDSR_BUSY;
10967c478bd9Sstevel@tonic-gate #endif /* JALAPENO || SERRANO */
10977c478bd9Sstevel@tonic-gate 	for (;;) {
10987c478bd9Sstevel@tonic-gate 		idsr = getidsr();
10997c478bd9Sstevel@tonic-gate 		if (idsr == 0)
11007c478bd9Sstevel@tonic-gate 			break;
11017c478bd9Sstevel@tonic-gate 
11027c478bd9Sstevel@tonic-gate 		tick = gettick();
11037c478bd9Sstevel@tonic-gate 		/*
11047c478bd9Sstevel@tonic-gate 		 * If there is a big jump between the current tick
11057c478bd9Sstevel@tonic-gate 		 * count and lasttick, we have probably hit a break
11067c478bd9Sstevel@tonic-gate 		 * point.  Adjust endtick accordingly to avoid panic.
11077c478bd9Sstevel@tonic-gate 		 */
11087c478bd9Sstevel@tonic-gate 		if (tick > (lasttick + xc_tick_jump_limit))
11097c478bd9Sstevel@tonic-gate 			endtick += (tick - lasttick);
11107c478bd9Sstevel@tonic-gate 		lasttick = tick;
11117c478bd9Sstevel@tonic-gate 		if (tick > endtick) {
11127c478bd9Sstevel@tonic-gate 			if (panic_quiesce)
11137c478bd9Sstevel@tonic-gate 				return;
11147c478bd9Sstevel@tonic-gate #ifdef	CHEETAHPLUS_ERRATUM_25
11157c478bd9Sstevel@tonic-gate 			if (cheetah_sendmondo_recover && recovered == 0) {
11167c478bd9Sstevel@tonic-gate 				if (mondo_recover(cpuid, 0)) {
11177c478bd9Sstevel@tonic-gate 					/*
11187c478bd9Sstevel@tonic-gate 					 * We claimed the whole memory or
11197c478bd9Sstevel@tonic-gate 					 * full scan is disabled.
11207c478bd9Sstevel@tonic-gate 					 */
11217c478bd9Sstevel@tonic-gate 					recovered++;
11227c478bd9Sstevel@tonic-gate 				}
11237c478bd9Sstevel@tonic-gate 				tick = gettick();
11247c478bd9Sstevel@tonic-gate 				endtick = tick + xc_tick_limit;
11257c478bd9Sstevel@tonic-gate 				lasttick = tick;
11267c478bd9Sstevel@tonic-gate 				/*
11277c478bd9Sstevel@tonic-gate 				 * Recheck idsr
11287c478bd9Sstevel@tonic-gate 				 */
11297c478bd9Sstevel@tonic-gate 				continue;
11307c478bd9Sstevel@tonic-gate 			} else
11317c478bd9Sstevel@tonic-gate #endif	/* CHEETAHPLUS_ERRATUM_25 */
11327c478bd9Sstevel@tonic-gate 			{
11337c478bd9Sstevel@tonic-gate 				cmn_err(CE_PANIC, "send mondo timeout "
11347c478bd9Sstevel@tonic-gate 				    "(target 0x%x) [%d NACK %d BUSY]",
11357c478bd9Sstevel@tonic-gate 				    cpuid, nack, busy);
11367c478bd9Sstevel@tonic-gate 			}
11377c478bd9Sstevel@tonic-gate 		}
11387c478bd9Sstevel@tonic-gate 
11397c478bd9Sstevel@tonic-gate 		if (idsr & busymask) {
11407c478bd9Sstevel@tonic-gate 			busy++;
11417c478bd9Sstevel@tonic-gate 			continue;
11427c478bd9Sstevel@tonic-gate 		}
11437c478bd9Sstevel@tonic-gate 		drv_usecwait(1);
11447c478bd9Sstevel@tonic-gate 		shipit(cpuid, 0);
11457c478bd9Sstevel@tonic-gate 		nack++;
11467c478bd9Sstevel@tonic-gate 		busy = 0;
11477c478bd9Sstevel@tonic-gate 	}
11487c478bd9Sstevel@tonic-gate #ifdef SEND_MONDO_STATS
11497c478bd9Sstevel@tonic-gate 	{
11507c478bd9Sstevel@tonic-gate 		int n = gettick() - starttick;
11517c478bd9Sstevel@tonic-gate 		if (n < 8192)
11527c478bd9Sstevel@tonic-gate 			x_one_stimes[n >> 7]++;
11537c478bd9Sstevel@tonic-gate 		else
11547c478bd9Sstevel@tonic-gate 			x_one_ltimes[(n >> 13) & 0xf]++;
11557c478bd9Sstevel@tonic-gate 	}
11567c478bd9Sstevel@tonic-gate #endif
11577c478bd9Sstevel@tonic-gate }
11587c478bd9Sstevel@tonic-gate 
11597c478bd9Sstevel@tonic-gate void
syncfpu(void)11607c478bd9Sstevel@tonic-gate syncfpu(void)
11617c478bd9Sstevel@tonic-gate {
11627c478bd9Sstevel@tonic-gate }
11637c478bd9Sstevel@tonic-gate 
11647c478bd9Sstevel@tonic-gate /*
11657c478bd9Sstevel@tonic-gate  * Return processor specific async error structure
11667c478bd9Sstevel@tonic-gate  * size used.
11677c478bd9Sstevel@tonic-gate  */
11687c478bd9Sstevel@tonic-gate int
cpu_aflt_size(void)11697c478bd9Sstevel@tonic-gate cpu_aflt_size(void)
11707c478bd9Sstevel@tonic-gate {
11717c478bd9Sstevel@tonic-gate 	return (sizeof (ch_async_flt_t));
11727c478bd9Sstevel@tonic-gate }
11737c478bd9Sstevel@tonic-gate 
1174