xref: /illumos-gate/usr/src/uts/sun4/os/machdep.c (revision 088d69f878cf3fb57556357236ef8e1c8f9d893e)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5a8599265Selowe  * Common Development and Distribution License (the "License").
6a8599265Selowe  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22bd28a477SPrashanth Sreenivasa  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23*088d69f8SJerry Jelinek  * Copyright (c) 2017, Joyent, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #include <sys/types.h>
277c478bd9Sstevel@tonic-gate #include <sys/kstat.h>
287c478bd9Sstevel@tonic-gate #include <sys/param.h>
297c478bd9Sstevel@tonic-gate #include <sys/stack.h>
307c478bd9Sstevel@tonic-gate #include <sys/regset.h>
317c478bd9Sstevel@tonic-gate #include <sys/thread.h>
327c478bd9Sstevel@tonic-gate #include <sys/proc.h>
337c478bd9Sstevel@tonic-gate #include <sys/procfs_isa.h>
347c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
357c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
367c478bd9Sstevel@tonic-gate #include <sys/systm.h>
377c478bd9Sstevel@tonic-gate #include <sys/machpcb.h>
387c478bd9Sstevel@tonic-gate #include <sys/machasi.h>
397c478bd9Sstevel@tonic-gate #include <sys/vis.h>
407c478bd9Sstevel@tonic-gate #include <sys/fpu/fpusystm.h>
417c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h>
427c478bd9Sstevel@tonic-gate #include <sys/privregs.h>
437c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
447c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
457c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
467c478bd9Sstevel@tonic-gate #include <sys/time.h>
477c478bd9Sstevel@tonic-gate #include <sys/clock.h>
487c478bd9Sstevel@tonic-gate #include <sys/cmp.h>
497c478bd9Sstevel@tonic-gate #include <sys/platform_module.h>
507c478bd9Sstevel@tonic-gate #include <sys/bl.h>
517c478bd9Sstevel@tonic-gate #include <sys/nvpair.h>
527c478bd9Sstevel@tonic-gate #include <sys/kdi_impl.h>
537c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
547c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
557c478bd9Sstevel@tonic-gate #include <sys/promif.h>
567c478bd9Sstevel@tonic-gate #include <sys/pool_pset.h>
57ae115bc7Smrj #include <sys/mem.h>
58ae115bc7Smrj #include <sys/dumphdr.h>
59a8599265Selowe #include <vm/seg_kmem.h>
60ae115bc7Smrj #include <sys/hold_page.h>
61ae115bc7Smrj #include <sys/cpu.h>
62d3d50737SRafael Vanoni #include <sys/ivintr.h>
63d3d50737SRafael Vanoni #include <sys/clock_impl.h>
64bd28a477SPrashanth Sreenivasa #include <sys/machclock.h>
657c478bd9Sstevel@tonic-gate 
667c478bd9Sstevel@tonic-gate int maxphys = MMU_PAGESIZE * 16;	/* 128k */
677c478bd9Sstevel@tonic-gate int klustsize = MMU_PAGESIZE * 16;	/* 128k */
687c478bd9Sstevel@tonic-gate 
697c478bd9Sstevel@tonic-gate /*
707c478bd9Sstevel@tonic-gate  * Initialize kernel thread's stack.
717c478bd9Sstevel@tonic-gate  */
727c478bd9Sstevel@tonic-gate caddr_t
737c478bd9Sstevel@tonic-gate thread_stk_init(caddr_t stk)
747c478bd9Sstevel@tonic-gate {
757c478bd9Sstevel@tonic-gate 	kfpu_t *fp;
767c478bd9Sstevel@tonic-gate 	ulong_t align;
777c478bd9Sstevel@tonic-gate 
787c478bd9Sstevel@tonic-gate 	/* allocate extra space for floating point state */
797c478bd9Sstevel@tonic-gate 	stk -= SA(sizeof (kfpu_t) + GSR_SIZE);
807c478bd9Sstevel@tonic-gate 	align = (uintptr_t)stk & 0x3f;
817c478bd9Sstevel@tonic-gate 	stk -= align;		/* force v9_fpu to be 16 byte aligned */
827c478bd9Sstevel@tonic-gate 	fp = (kfpu_t *)stk;
837c478bd9Sstevel@tonic-gate 	fp->fpu_fprs = 0;
847c478bd9Sstevel@tonic-gate 
857c478bd9Sstevel@tonic-gate 	stk -= SA(MINFRAME);
867c478bd9Sstevel@tonic-gate 	return (stk);
877c478bd9Sstevel@tonic-gate }
887c478bd9Sstevel@tonic-gate 
89a8599265Selowe #define	WIN32_SIZE	(MAXWIN * sizeof (struct rwindow32))
90a8599265Selowe #define	WIN64_SIZE	(MAXWIN * sizeof (struct rwindow64))
91a8599265Selowe 
92a8599265Selowe kmem_cache_t	*wbuf32_cache;
93a8599265Selowe kmem_cache_t	*wbuf64_cache;
94a8599265Selowe 
95a8599265Selowe void
96a8599265Selowe lwp_stk_cache_init(void)
97a8599265Selowe {
9838b87cdfSelowe 	/*
9938b87cdfSelowe 	 * Window buffers are allocated from the static arena
10038b87cdfSelowe 	 * because they are accessed at TL>0. We also must use
10138b87cdfSelowe 	 * KMC_NOHASH to prevent them from straddling page
10238b87cdfSelowe 	 * boundaries as they are accessed by physical address.
10338b87cdfSelowe 	 */
104a8599265Selowe 	wbuf32_cache = kmem_cache_create("wbuf32_cache", WIN32_SIZE,
10538b87cdfSelowe 	    0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
106a8599265Selowe 	wbuf64_cache = kmem_cache_create("wbuf64_cache", WIN64_SIZE,
10738b87cdfSelowe 	    0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
108a8599265Selowe }
109a8599265Selowe 
1107c478bd9Sstevel@tonic-gate /*
1117c478bd9Sstevel@tonic-gate  * Initialize lwp's kernel stack.
1127c478bd9Sstevel@tonic-gate  * Note that now that the floating point register save area (kfpu_t)
1137c478bd9Sstevel@tonic-gate  * has been broken out from machpcb and aligned on a 64 byte boundary so that
1147c478bd9Sstevel@tonic-gate  * we can do block load/stores to/from it, there are a couple of potential
1157c478bd9Sstevel@tonic-gate  * optimizations to save stack space. 1. The floating point register save
1167c478bd9Sstevel@tonic-gate  * area could be aligned on a 16 byte boundary, and the floating point code
1177c478bd9Sstevel@tonic-gate  * changed to (a) check the alignment and (b) use different save/restore
1187c478bd9Sstevel@tonic-gate  * macros depending upon the alignment. 2. The lwp_stk_init code below
1197c478bd9Sstevel@tonic-gate  * could be changed to calculate if less space would be wasted if machpcb
1207c478bd9Sstevel@tonic-gate  * was first instead of second. However there is a REGOFF macro used in
1217c478bd9Sstevel@tonic-gate  * locore, syscall_trap, machdep and mlsetup that assumes that the saved
1227c478bd9Sstevel@tonic-gate  * register area is a fixed distance from the %sp, and would have to be
1237c478bd9Sstevel@tonic-gate  * changed to a pointer or something...JJ said later.
1247c478bd9Sstevel@tonic-gate  */
1257c478bd9Sstevel@tonic-gate caddr_t
1267c478bd9Sstevel@tonic-gate lwp_stk_init(klwp_t *lwp, caddr_t stk)
1277c478bd9Sstevel@tonic-gate {
1287c478bd9Sstevel@tonic-gate 	struct machpcb *mpcb;
1297c478bd9Sstevel@tonic-gate 	kfpu_t *fp;
1307c478bd9Sstevel@tonic-gate 	uintptr_t aln;
1317c478bd9Sstevel@tonic-gate 
1327c478bd9Sstevel@tonic-gate 	stk -= SA(sizeof (kfpu_t) + GSR_SIZE);
1337c478bd9Sstevel@tonic-gate 	aln = (uintptr_t)stk & 0x3F;
1347c478bd9Sstevel@tonic-gate 	stk -= aln;
1357c478bd9Sstevel@tonic-gate 	fp = (kfpu_t *)stk;
1367c478bd9Sstevel@tonic-gate 	stk -= SA(sizeof (struct machpcb));
1377c478bd9Sstevel@tonic-gate 	mpcb = (struct machpcb *)stk;
1387c478bd9Sstevel@tonic-gate 	bzero(mpcb, sizeof (struct machpcb));
1397c478bd9Sstevel@tonic-gate 	bzero(fp, sizeof (kfpu_t) + GSR_SIZE);
1407c478bd9Sstevel@tonic-gate 	lwp->lwp_regs = (void *)&mpcb->mpcb_regs;
1417c478bd9Sstevel@tonic-gate 	lwp->lwp_fpu = (void *)fp;
1427c478bd9Sstevel@tonic-gate 	mpcb->mpcb_fpu = fp;
1437c478bd9Sstevel@tonic-gate 	mpcb->mpcb_fpu->fpu_q = mpcb->mpcb_fpu_q;
1447c478bd9Sstevel@tonic-gate 	mpcb->mpcb_thread = lwp->lwp_thread;
1457c478bd9Sstevel@tonic-gate 	mpcb->mpcb_wbcnt = 0;
1467c478bd9Sstevel@tonic-gate 	if (lwp->lwp_procp->p_model == DATAMODEL_ILP32) {
1477c478bd9Sstevel@tonic-gate 		mpcb->mpcb_wstate = WSTATE_USER32;
148a8599265Selowe 		mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
1497c478bd9Sstevel@tonic-gate 	} else {
1507c478bd9Sstevel@tonic-gate 		mpcb->mpcb_wstate = WSTATE_USER64;
151a8599265Selowe 		mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
1527c478bd9Sstevel@tonic-gate 	}
1537c478bd9Sstevel@tonic-gate 	ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0);
1547c478bd9Sstevel@tonic-gate 	mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
1557c478bd9Sstevel@tonic-gate 	mpcb->mpcb_pa = va_to_pa(mpcb);
1567c478bd9Sstevel@tonic-gate 	return (stk);
1577c478bd9Sstevel@tonic-gate }
1587c478bd9Sstevel@tonic-gate 
1597c478bd9Sstevel@tonic-gate void
1607c478bd9Sstevel@tonic-gate lwp_stk_fini(klwp_t *lwp)
1617c478bd9Sstevel@tonic-gate {
1627c478bd9Sstevel@tonic-gate 	struct machpcb *mpcb = lwptompcb(lwp);
1637c478bd9Sstevel@tonic-gate 
1647c478bd9Sstevel@tonic-gate 	/*
1657c478bd9Sstevel@tonic-gate 	 * there might be windows still in the wbuf due to unmapped
1667c478bd9Sstevel@tonic-gate 	 * stack, misaligned stack pointer, etc.  We just free it.
1677c478bd9Sstevel@tonic-gate 	 */
1687c478bd9Sstevel@tonic-gate 	mpcb->mpcb_wbcnt = 0;
1697c478bd9Sstevel@tonic-gate 	if (mpcb->mpcb_wstate == WSTATE_USER32)
170a8599265Selowe 		kmem_cache_free(wbuf32_cache, mpcb->mpcb_wbuf);
1717c478bd9Sstevel@tonic-gate 	else
172a8599265Selowe 		kmem_cache_free(wbuf64_cache, mpcb->mpcb_wbuf);
1737c478bd9Sstevel@tonic-gate 	mpcb->mpcb_wbuf = NULL;
1747c478bd9Sstevel@tonic-gate 	mpcb->mpcb_wbuf_pa = -1;
1757c478bd9Sstevel@tonic-gate }
1767c478bd9Sstevel@tonic-gate 
177*088d69f8SJerry Jelinek /*ARGSUSED*/
178*088d69f8SJerry Jelinek void
179*088d69f8SJerry Jelinek lwp_fp_init(klwp_t *lwp)
180*088d69f8SJerry Jelinek {
181*088d69f8SJerry Jelinek }
1827c478bd9Sstevel@tonic-gate 
1837c478bd9Sstevel@tonic-gate /*
1847c478bd9Sstevel@tonic-gate  * Copy regs from parent to child.
1857c478bd9Sstevel@tonic-gate  */
1867c478bd9Sstevel@tonic-gate void
1877c478bd9Sstevel@tonic-gate lwp_forkregs(klwp_t *lwp, klwp_t *clwp)
1887c478bd9Sstevel@tonic-gate {
1897c478bd9Sstevel@tonic-gate 	kthread_t *t, *pt = lwptot(lwp);
1907c478bd9Sstevel@tonic-gate 	struct machpcb *mpcb = lwptompcb(clwp);
1917c478bd9Sstevel@tonic-gate 	struct machpcb *pmpcb = lwptompcb(lwp);
1927c478bd9Sstevel@tonic-gate 	kfpu_t *fp, *pfp = lwptofpu(lwp);
1937c478bd9Sstevel@tonic-gate 	caddr_t wbuf;
1947c478bd9Sstevel@tonic-gate 	uint_t wstate;
1957c478bd9Sstevel@tonic-gate 
1967c478bd9Sstevel@tonic-gate 	t = mpcb->mpcb_thread;
1977c478bd9Sstevel@tonic-gate 	/*
1987c478bd9Sstevel@tonic-gate 	 * remember child's fp and wbuf since they will get erased during
1997c478bd9Sstevel@tonic-gate 	 * the bcopy.
2007c478bd9Sstevel@tonic-gate 	 */
2017c478bd9Sstevel@tonic-gate 	fp = mpcb->mpcb_fpu;
2027c478bd9Sstevel@tonic-gate 	wbuf = mpcb->mpcb_wbuf;
2037c478bd9Sstevel@tonic-gate 	wstate = mpcb->mpcb_wstate;
2047c478bd9Sstevel@tonic-gate 	/*
2057c478bd9Sstevel@tonic-gate 	 * Don't copy mpcb_frame since we hand-crafted it
2067c478bd9Sstevel@tonic-gate 	 * in thread_load().
2077c478bd9Sstevel@tonic-gate 	 */
2087c478bd9Sstevel@tonic-gate 	bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct machpcb) - REGOFF);
2097c478bd9Sstevel@tonic-gate 	mpcb->mpcb_thread = t;
2107c478bd9Sstevel@tonic-gate 	mpcb->mpcb_fpu = fp;
2117c478bd9Sstevel@tonic-gate 	fp->fpu_q = mpcb->mpcb_fpu_q;
2127c478bd9Sstevel@tonic-gate 
2137c478bd9Sstevel@tonic-gate 	/*
2147c478bd9Sstevel@tonic-gate 	 * It is theoretically possibly for the lwp's wstate to
2157c478bd9Sstevel@tonic-gate 	 * be different from its value assigned in lwp_stk_init,
2167c478bd9Sstevel@tonic-gate 	 * since lwp_stk_init assumed the data model of the process.
2177c478bd9Sstevel@tonic-gate 	 * Here, we took on the data model of the cloned lwp.
2187c478bd9Sstevel@tonic-gate 	 */
2197c478bd9Sstevel@tonic-gate 	if (mpcb->mpcb_wstate != wstate) {
2207c478bd9Sstevel@tonic-gate 		if (wstate == WSTATE_USER32) {
221a8599265Selowe 			kmem_cache_free(wbuf32_cache, wbuf);
222a8599265Selowe 			wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
2237c478bd9Sstevel@tonic-gate 			wstate = WSTATE_USER64;
2247c478bd9Sstevel@tonic-gate 		} else {
225a8599265Selowe 			kmem_cache_free(wbuf64_cache, wbuf);
226a8599265Selowe 			wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
2277c478bd9Sstevel@tonic-gate 			wstate = WSTATE_USER32;
2287c478bd9Sstevel@tonic-gate 		}
2297c478bd9Sstevel@tonic-gate 	}
2307c478bd9Sstevel@tonic-gate 
2317c478bd9Sstevel@tonic-gate 	mpcb->mpcb_pa = va_to_pa(mpcb);
2327c478bd9Sstevel@tonic-gate 	mpcb->mpcb_wbuf = wbuf;
2337c478bd9Sstevel@tonic-gate 	mpcb->mpcb_wbuf_pa = va_to_pa(wbuf);
2347c478bd9Sstevel@tonic-gate 
2357c478bd9Sstevel@tonic-gate 	ASSERT(mpcb->mpcb_wstate == wstate);
2367c478bd9Sstevel@tonic-gate 
2377c478bd9Sstevel@tonic-gate 	if (mpcb->mpcb_wbcnt != 0) {
2387c478bd9Sstevel@tonic-gate 		bcopy(pmpcb->mpcb_wbuf, mpcb->mpcb_wbuf,
2397c478bd9Sstevel@tonic-gate 		    mpcb->mpcb_wbcnt * ((mpcb->mpcb_wstate == WSTATE_USER32) ?
2407c478bd9Sstevel@tonic-gate 		    sizeof (struct rwindow32) : sizeof (struct rwindow64)));
2417c478bd9Sstevel@tonic-gate 	}
2427c478bd9Sstevel@tonic-gate 
2437c478bd9Sstevel@tonic-gate 	if (pt == curthread)
2447c478bd9Sstevel@tonic-gate 		pfp->fpu_fprs = _fp_read_fprs();
2457c478bd9Sstevel@tonic-gate 	if ((pfp->fpu_en) || (pfp->fpu_fprs & FPRS_FEF)) {
2467c478bd9Sstevel@tonic-gate 		if (pt == curthread && fpu_exists) {
2477c478bd9Sstevel@tonic-gate 			save_gsr(clwp->lwp_fpu);
2487c478bd9Sstevel@tonic-gate 		} else {
2497c478bd9Sstevel@tonic-gate 			uint64_t gsr;
2507c478bd9Sstevel@tonic-gate 			gsr = get_gsr(lwp->lwp_fpu);
2517c478bd9Sstevel@tonic-gate 			set_gsr(gsr, clwp->lwp_fpu);
2527c478bd9Sstevel@tonic-gate 		}
2537c478bd9Sstevel@tonic-gate 		fp_fork(lwp, clwp);
2547c478bd9Sstevel@tonic-gate 	}
2557c478bd9Sstevel@tonic-gate }
2567c478bd9Sstevel@tonic-gate 
2577c478bd9Sstevel@tonic-gate /*
2587c478bd9Sstevel@tonic-gate  * Free lwp fpu regs.
2597c478bd9Sstevel@tonic-gate  */
2607c478bd9Sstevel@tonic-gate void
2617c478bd9Sstevel@tonic-gate lwp_freeregs(klwp_t *lwp, int isexec)
2627c478bd9Sstevel@tonic-gate {
2637c478bd9Sstevel@tonic-gate 	kfpu_t *fp = lwptofpu(lwp);
2647c478bd9Sstevel@tonic-gate 
2657c478bd9Sstevel@tonic-gate 	if (lwptot(lwp) == curthread)
2667c478bd9Sstevel@tonic-gate 		fp->fpu_fprs = _fp_read_fprs();
2677c478bd9Sstevel@tonic-gate 	if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF))
2687c478bd9Sstevel@tonic-gate 		fp_free(fp, isexec);
2697c478bd9Sstevel@tonic-gate }
2707c478bd9Sstevel@tonic-gate 
2719acbbeafSnn /*
272fd9e7635Sedp  * These function are currently unused on sparc.
2739acbbeafSnn  */
2749acbbeafSnn /*ARGSUSED*/
2759acbbeafSnn void
2769acbbeafSnn lwp_attach_brand_hdlrs(klwp_t *lwp)
2779acbbeafSnn {}
2789acbbeafSnn 
279fd9e7635Sedp /*ARGSUSED*/
280fd9e7635Sedp void
281fd9e7635Sedp lwp_detach_brand_hdlrs(klwp_t *lwp)
282fd9e7635Sedp {}
283fd9e7635Sedp 
2847c478bd9Sstevel@tonic-gate /*
2857c478bd9Sstevel@tonic-gate  * fill in the extra register state area specified with the
2867c478bd9Sstevel@tonic-gate  * specified lwp's platform-dependent non-floating-point extra
2877c478bd9Sstevel@tonic-gate  * register state information
2887c478bd9Sstevel@tonic-gate  */
2897c478bd9Sstevel@tonic-gate /* ARGSUSED */
2907c478bd9Sstevel@tonic-gate void
2917c478bd9Sstevel@tonic-gate xregs_getgfiller(klwp_id_t lwp, caddr_t xrp)
2927c478bd9Sstevel@tonic-gate {
2937c478bd9Sstevel@tonic-gate 	/* for sun4u nothing to do here, added for symmetry */
2947c478bd9Sstevel@tonic-gate }
2957c478bd9Sstevel@tonic-gate 
2967c478bd9Sstevel@tonic-gate /*
2977c478bd9Sstevel@tonic-gate  * fill in the extra register state area specified with the specified lwp's
2987c478bd9Sstevel@tonic-gate  * platform-dependent floating-point extra register state information.
2997c478bd9Sstevel@tonic-gate  * NOTE:  'lwp' might not correspond to 'curthread' since this is
3007c478bd9Sstevel@tonic-gate  * called from code in /proc to get the registers of another lwp.
3017c478bd9Sstevel@tonic-gate  */
3027c478bd9Sstevel@tonic-gate void
3037c478bd9Sstevel@tonic-gate xregs_getfpfiller(klwp_id_t lwp, caddr_t xrp)
3047c478bd9Sstevel@tonic-gate {
3057c478bd9Sstevel@tonic-gate 	prxregset_t *xregs = (prxregset_t *)xrp;
3067c478bd9Sstevel@tonic-gate 	kfpu_t *fp = lwptofpu(lwp);
3077c478bd9Sstevel@tonic-gate 	uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
3087c478bd9Sstevel@tonic-gate 	uint64_t gsr;
3097c478bd9Sstevel@tonic-gate 
3107c478bd9Sstevel@tonic-gate 	/*
3117c478bd9Sstevel@tonic-gate 	 * fp_fksave() does not flush the GSR register into
3127c478bd9Sstevel@tonic-gate 	 * the lwp area, so do it now
3137c478bd9Sstevel@tonic-gate 	 */
3147c478bd9Sstevel@tonic-gate 	kpreempt_disable();
3157c478bd9Sstevel@tonic-gate 	if (ttolwp(curthread) == lwp && fpu_exists) {
3167c478bd9Sstevel@tonic-gate 		fp->fpu_fprs = _fp_read_fprs();
3177c478bd9Sstevel@tonic-gate 		if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
3187c478bd9Sstevel@tonic-gate 			_fp_write_fprs(fprs);
3197c478bd9Sstevel@tonic-gate 			fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs;
3207c478bd9Sstevel@tonic-gate 		}
3217c478bd9Sstevel@tonic-gate 		save_gsr(fp);
3227c478bd9Sstevel@tonic-gate 	}
3237c478bd9Sstevel@tonic-gate 	gsr = get_gsr(fp);
3247c478bd9Sstevel@tonic-gate 	kpreempt_enable();
3257c478bd9Sstevel@tonic-gate 	PRXREG_GSR(xregs) = gsr;
3267c478bd9Sstevel@tonic-gate }
3277c478bd9Sstevel@tonic-gate 
3287c478bd9Sstevel@tonic-gate /*
3297c478bd9Sstevel@tonic-gate  * set the specified lwp's platform-dependent non-floating-point
3307c478bd9Sstevel@tonic-gate  * extra register state based on the specified input
3317c478bd9Sstevel@tonic-gate  */
3327c478bd9Sstevel@tonic-gate /* ARGSUSED */
3337c478bd9Sstevel@tonic-gate void
3347c478bd9Sstevel@tonic-gate xregs_setgfiller(klwp_id_t lwp, caddr_t xrp)
3357c478bd9Sstevel@tonic-gate {
3367c478bd9Sstevel@tonic-gate 	/* for sun4u nothing to do here, added for symmetry */
3377c478bd9Sstevel@tonic-gate }
3387c478bd9Sstevel@tonic-gate 
3397c478bd9Sstevel@tonic-gate /*
3407c478bd9Sstevel@tonic-gate  * set the specified lwp's platform-dependent floating-point
3417c478bd9Sstevel@tonic-gate  * extra register state based on the specified input
3427c478bd9Sstevel@tonic-gate  */
3437c478bd9Sstevel@tonic-gate void
3447c478bd9Sstevel@tonic-gate xregs_setfpfiller(klwp_id_t lwp, caddr_t xrp)
3457c478bd9Sstevel@tonic-gate {
3467c478bd9Sstevel@tonic-gate 	prxregset_t *xregs = (prxregset_t *)xrp;
3477c478bd9Sstevel@tonic-gate 	kfpu_t *fp = lwptofpu(lwp);
3487c478bd9Sstevel@tonic-gate 	uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
3497c478bd9Sstevel@tonic-gate 	uint64_t gsr = PRXREG_GSR(xregs);
3507c478bd9Sstevel@tonic-gate 
3517c478bd9Sstevel@tonic-gate 	kpreempt_disable();
3527c478bd9Sstevel@tonic-gate 	set_gsr(gsr, lwptofpu(lwp));
3537c478bd9Sstevel@tonic-gate 
3547c478bd9Sstevel@tonic-gate 	if ((lwp == ttolwp(curthread)) && fpu_exists) {
3557c478bd9Sstevel@tonic-gate 		fp->fpu_fprs = _fp_read_fprs();
3567c478bd9Sstevel@tonic-gate 		if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
3577c478bd9Sstevel@tonic-gate 			_fp_write_fprs(fprs);
3587c478bd9Sstevel@tonic-gate 			fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs;
3597c478bd9Sstevel@tonic-gate 		}
3607c478bd9Sstevel@tonic-gate 		restore_gsr(lwptofpu(lwp));
3617c478bd9Sstevel@tonic-gate 	}
3627c478bd9Sstevel@tonic-gate 	kpreempt_enable();
3637c478bd9Sstevel@tonic-gate }
3647c478bd9Sstevel@tonic-gate 
3657c478bd9Sstevel@tonic-gate /*
3667c478bd9Sstevel@tonic-gate  * fill in the sun4u asrs, ie, the lwp's platform-dependent
3677c478bd9Sstevel@tonic-gate  * non-floating-point extra register state information
3687c478bd9Sstevel@tonic-gate  */
3697c478bd9Sstevel@tonic-gate /* ARGSUSED */
3707c478bd9Sstevel@tonic-gate void
3717c478bd9Sstevel@tonic-gate getasrs(klwp_t *lwp, asrset_t asr)
3727c478bd9Sstevel@tonic-gate {
3737c478bd9Sstevel@tonic-gate 	/* for sun4u nothing to do here, added for symmetry */
3747c478bd9Sstevel@tonic-gate }
3757c478bd9Sstevel@tonic-gate 
3767c478bd9Sstevel@tonic-gate /*
3777c478bd9Sstevel@tonic-gate  * fill in the sun4u asrs, ie, the lwp's platform-dependent
3787c478bd9Sstevel@tonic-gate  * floating-point extra register state information
3797c478bd9Sstevel@tonic-gate  */
3807c478bd9Sstevel@tonic-gate void
3817c478bd9Sstevel@tonic-gate getfpasrs(klwp_t *lwp, asrset_t asr)
3827c478bd9Sstevel@tonic-gate {
3837c478bd9Sstevel@tonic-gate 	kfpu_t *fp = lwptofpu(lwp);
3847c478bd9Sstevel@tonic-gate 	uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
3857c478bd9Sstevel@tonic-gate 
3867c478bd9Sstevel@tonic-gate 	kpreempt_disable();
3877c478bd9Sstevel@tonic-gate 	if (ttolwp(curthread) == lwp)
3887c478bd9Sstevel@tonic-gate 		fp->fpu_fprs = _fp_read_fprs();
3897c478bd9Sstevel@tonic-gate 	if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) {
3907c478bd9Sstevel@tonic-gate 		if (fpu_exists && ttolwp(curthread) == lwp) {
3917c478bd9Sstevel@tonic-gate 			if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
3927c478bd9Sstevel@tonic-gate 				_fp_write_fprs(fprs);
3937c478bd9Sstevel@tonic-gate 				fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs;
3947c478bd9Sstevel@tonic-gate 			}
3957c478bd9Sstevel@tonic-gate 			save_gsr(fp);
3967c478bd9Sstevel@tonic-gate 		}
3977c478bd9Sstevel@tonic-gate 		asr[ASR_GSR] = (int64_t)get_gsr(fp);
3987c478bd9Sstevel@tonic-gate 	}
3997c478bd9Sstevel@tonic-gate 	kpreempt_enable();
4007c478bd9Sstevel@tonic-gate }
4017c478bd9Sstevel@tonic-gate 
4027c478bd9Sstevel@tonic-gate /*
4037c478bd9Sstevel@tonic-gate  * set the sun4u asrs, ie, the lwp's platform-dependent
4047c478bd9Sstevel@tonic-gate  * non-floating-point extra register state information
4057c478bd9Sstevel@tonic-gate  */
4067c478bd9Sstevel@tonic-gate /* ARGSUSED */
4077c478bd9Sstevel@tonic-gate void
4087c478bd9Sstevel@tonic-gate setasrs(klwp_t *lwp, asrset_t asr)
4097c478bd9Sstevel@tonic-gate {
4107c478bd9Sstevel@tonic-gate 	/* for sun4u nothing to do here, added for symmetry */
4117c478bd9Sstevel@tonic-gate }
4127c478bd9Sstevel@tonic-gate 
4137c478bd9Sstevel@tonic-gate void
4147c478bd9Sstevel@tonic-gate setfpasrs(klwp_t *lwp, asrset_t asr)
4157c478bd9Sstevel@tonic-gate {
4167c478bd9Sstevel@tonic-gate 	kfpu_t *fp = lwptofpu(lwp);
4177c478bd9Sstevel@tonic-gate 	uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
4187c478bd9Sstevel@tonic-gate 
4197c478bd9Sstevel@tonic-gate 	kpreempt_disable();
4207c478bd9Sstevel@tonic-gate 	if (ttolwp(curthread) == lwp)
4217c478bd9Sstevel@tonic-gate 		fp->fpu_fprs = _fp_read_fprs();
4227c478bd9Sstevel@tonic-gate 	if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) {
4237c478bd9Sstevel@tonic-gate 		set_gsr(asr[ASR_GSR], fp);
4247c478bd9Sstevel@tonic-gate 		if (fpu_exists && ttolwp(curthread) == lwp) {
4257c478bd9Sstevel@tonic-gate 			if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
4267c478bd9Sstevel@tonic-gate 				_fp_write_fprs(fprs);
4277c478bd9Sstevel@tonic-gate 				fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs;
4287c478bd9Sstevel@tonic-gate 			}
4297c478bd9Sstevel@tonic-gate 			restore_gsr(fp);
4307c478bd9Sstevel@tonic-gate 		}
4317c478bd9Sstevel@tonic-gate 	}
4327c478bd9Sstevel@tonic-gate 	kpreempt_enable();
4337c478bd9Sstevel@tonic-gate }
4347c478bd9Sstevel@tonic-gate 
4357c478bd9Sstevel@tonic-gate /*
4367c478bd9Sstevel@tonic-gate  * Create interrupt kstats for this CPU.
4377c478bd9Sstevel@tonic-gate  */
4387c478bd9Sstevel@tonic-gate void
4397c478bd9Sstevel@tonic-gate cpu_create_intrstat(cpu_t *cp)
4407c478bd9Sstevel@tonic-gate {
4417c478bd9Sstevel@tonic-gate 	int		i;
4427c478bd9Sstevel@tonic-gate 	kstat_t		*intr_ksp;
4437c478bd9Sstevel@tonic-gate 	kstat_named_t	*knp;
4447c478bd9Sstevel@tonic-gate 	char		name[KSTAT_STRLEN];
4457c478bd9Sstevel@tonic-gate 	zoneid_t	zoneid;
4467c478bd9Sstevel@tonic-gate 
4477c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
4487c478bd9Sstevel@tonic-gate 
4497c478bd9Sstevel@tonic-gate 	if (pool_pset_enabled())
4507c478bd9Sstevel@tonic-gate 		zoneid = GLOBAL_ZONEID;
4517c478bd9Sstevel@tonic-gate 	else
4527c478bd9Sstevel@tonic-gate 		zoneid = ALL_ZONES;
4537c478bd9Sstevel@tonic-gate 
4547c478bd9Sstevel@tonic-gate 	intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc",
4557c478bd9Sstevel@tonic-gate 	    KSTAT_TYPE_NAMED, PIL_MAX * 2, NULL, zoneid);
4567c478bd9Sstevel@tonic-gate 
4577c478bd9Sstevel@tonic-gate 	/*
4587c478bd9Sstevel@tonic-gate 	 * Initialize each PIL's named kstat
4597c478bd9Sstevel@tonic-gate 	 */
4607c478bd9Sstevel@tonic-gate 	if (intr_ksp != NULL) {
4617c478bd9Sstevel@tonic-gate 		intr_ksp->ks_update = cpu_kstat_intrstat_update;
4627c478bd9Sstevel@tonic-gate 		knp = (kstat_named_t *)intr_ksp->ks_data;
4637c478bd9Sstevel@tonic-gate 		intr_ksp->ks_private = cp;
4647c478bd9Sstevel@tonic-gate 		for (i = 0; i < PIL_MAX; i++) {
4657c478bd9Sstevel@tonic-gate 			(void) snprintf(name, KSTAT_STRLEN, "level-%d-time",
4667c478bd9Sstevel@tonic-gate 			    i + 1);
4677c478bd9Sstevel@tonic-gate 			kstat_named_init(&knp[i * 2], name, KSTAT_DATA_UINT64);
4687c478bd9Sstevel@tonic-gate 			(void) snprintf(name, KSTAT_STRLEN, "level-%d-count",
4697c478bd9Sstevel@tonic-gate 			    i + 1);
4707c478bd9Sstevel@tonic-gate 			kstat_named_init(&knp[(i * 2) + 1], name,
4717c478bd9Sstevel@tonic-gate 			    KSTAT_DATA_UINT64);
4727c478bd9Sstevel@tonic-gate 		}
4737c478bd9Sstevel@tonic-gate 		kstat_install(intr_ksp);
4747c478bd9Sstevel@tonic-gate 	}
4757c478bd9Sstevel@tonic-gate }
4767c478bd9Sstevel@tonic-gate 
4777c478bd9Sstevel@tonic-gate /*
4787c478bd9Sstevel@tonic-gate  * Delete interrupt kstats for this CPU.
4797c478bd9Sstevel@tonic-gate  */
4807c478bd9Sstevel@tonic-gate void
4817c478bd9Sstevel@tonic-gate cpu_delete_intrstat(cpu_t *cp)
4827c478bd9Sstevel@tonic-gate {
4837c478bd9Sstevel@tonic-gate 	kstat_delete_byname_zone("cpu", cp->cpu_id, "intrstat", ALL_ZONES);
4847c478bd9Sstevel@tonic-gate }
4857c478bd9Sstevel@tonic-gate 
4867c478bd9Sstevel@tonic-gate /*
4877c478bd9Sstevel@tonic-gate  * Convert interrupt statistics from CPU ticks to nanoseconds and
4887c478bd9Sstevel@tonic-gate  * update kstat.
4897c478bd9Sstevel@tonic-gate  */
4907c478bd9Sstevel@tonic-gate int
4917c478bd9Sstevel@tonic-gate cpu_kstat_intrstat_update(kstat_t *ksp, int rw)
4927c478bd9Sstevel@tonic-gate {
4937c478bd9Sstevel@tonic-gate 	kstat_named_t	*knp = ksp->ks_data;
4947c478bd9Sstevel@tonic-gate 	cpu_t		*cpup = (cpu_t *)ksp->ks_private;
4957c478bd9Sstevel@tonic-gate 	int		i;
4967c478bd9Sstevel@tonic-gate 
4977c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
4987c478bd9Sstevel@tonic-gate 		return (EACCES);
4997c478bd9Sstevel@tonic-gate 
5007c478bd9Sstevel@tonic-gate 	/*
5017c478bd9Sstevel@tonic-gate 	 * We use separate passes to copy and convert the statistics to
5027c478bd9Sstevel@tonic-gate 	 * nanoseconds. This assures that the snapshot of the data is as
5037c478bd9Sstevel@tonic-gate 	 * self-consistent as possible.
5047c478bd9Sstevel@tonic-gate 	 */
5057c478bd9Sstevel@tonic-gate 
5067c478bd9Sstevel@tonic-gate 	for (i = 0; i < PIL_MAX; i++) {
5077c478bd9Sstevel@tonic-gate 		knp[i * 2].value.ui64 = cpup->cpu_m.intrstat[i + 1][0];
5087c478bd9Sstevel@tonic-gate 		knp[(i * 2) + 1].value.ui64 = cpup->cpu_stats.sys.intr[i];
5097c478bd9Sstevel@tonic-gate 	}
5107c478bd9Sstevel@tonic-gate 
5117c478bd9Sstevel@tonic-gate 	for (i = 0; i < PIL_MAX; i++) {
5127c478bd9Sstevel@tonic-gate 		knp[i * 2].value.ui64 =
5137c478bd9Sstevel@tonic-gate 		    (uint64_t)tick2ns((hrtime_t)knp[i * 2].value.ui64,
514843e1988Sjohnlev 		    cpup->cpu_id);
5157c478bd9Sstevel@tonic-gate 	}
5167c478bd9Sstevel@tonic-gate 
5177c478bd9Sstevel@tonic-gate 	return (0);
5187c478bd9Sstevel@tonic-gate }
5197c478bd9Sstevel@tonic-gate 
5207c478bd9Sstevel@tonic-gate /*
5217c478bd9Sstevel@tonic-gate  * Called by common/os/cpu.c for psrinfo(1m) kstats
5227c478bd9Sstevel@tonic-gate  */
5237c478bd9Sstevel@tonic-gate char *
5247c478bd9Sstevel@tonic-gate cpu_fru_fmri(cpu_t *cp)
5257c478bd9Sstevel@tonic-gate {
5267c478bd9Sstevel@tonic-gate 	return (cpunodes[cp->cpu_id].fru_fmri);
5277c478bd9Sstevel@tonic-gate }
5287c478bd9Sstevel@tonic-gate 
5297c478bd9Sstevel@tonic-gate /*
5307c478bd9Sstevel@tonic-gate  * An interrupt thread is ending a time slice, so compute the interval it
5317c478bd9Sstevel@tonic-gate  * ran for and update the statistic for its PIL.
5327c478bd9Sstevel@tonic-gate  */
5337c478bd9Sstevel@tonic-gate void
5347c478bd9Sstevel@tonic-gate cpu_intr_swtch_enter(kthread_id_t t)
5357c478bd9Sstevel@tonic-gate {
5367c478bd9Sstevel@tonic-gate 	uint64_t	interval;
5377c478bd9Sstevel@tonic-gate 	uint64_t	start;
538eda89462Sesolom 	cpu_t		*cpu;
5397c478bd9Sstevel@tonic-gate 
5407c478bd9Sstevel@tonic-gate 	ASSERT((t->t_flag & T_INTR_THREAD) != 0);
5417c478bd9Sstevel@tonic-gate 	ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
5427c478bd9Sstevel@tonic-gate 
5437c478bd9Sstevel@tonic-gate 	/*
5447c478bd9Sstevel@tonic-gate 	 * We could be here with a zero timestamp. This could happen if:
5457c478bd9Sstevel@tonic-gate 	 * an interrupt thread which no longer has a pinned thread underneath
5467c478bd9Sstevel@tonic-gate 	 * it (i.e. it blocked at some point in its past) has finished running
5477c478bd9Sstevel@tonic-gate 	 * its handler. intr_thread() updated the interrupt statistic for its
5487c478bd9Sstevel@tonic-gate 	 * PIL and zeroed its timestamp. Since there was no pinned thread to
5497c478bd9Sstevel@tonic-gate 	 * return to, swtch() gets called and we end up here.
5507c478bd9Sstevel@tonic-gate 	 *
5517c478bd9Sstevel@tonic-gate 	 * It can also happen if an interrupt thread in intr_thread() calls
5527c478bd9Sstevel@tonic-gate 	 * preempt. It will have already taken care of updating stats. In
5537c478bd9Sstevel@tonic-gate 	 * this event, the interrupt thread will be runnable.
5547c478bd9Sstevel@tonic-gate 	 */
5557c478bd9Sstevel@tonic-gate 	if (t->t_intr_start) {
5567c478bd9Sstevel@tonic-gate 		do {
5577c478bd9Sstevel@tonic-gate 			start = t->t_intr_start;
558bd28a477SPrashanth Sreenivasa 			interval = CLOCK_TICK_COUNTER() - start;
55975d94465SJosef 'Jeff' Sipek 		} while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
560eda89462Sesolom 		cpu = CPU;
561eda89462Sesolom 		if (cpu->cpu_m.divisor > 1)
562eda89462Sesolom 			interval *= cpu->cpu_m.divisor;
563eda89462Sesolom 		cpu->cpu_m.intrstat[t->t_pil][0] += interval;
564eda89462Sesolom 
565eda89462Sesolom 		atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
566eda89462Sesolom 		    interval);
5677c478bd9Sstevel@tonic-gate 	} else
5687c478bd9Sstevel@tonic-gate 		ASSERT(t->t_intr == NULL || t->t_state == TS_RUN);
5697c478bd9Sstevel@tonic-gate }
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate 
5727c478bd9Sstevel@tonic-gate /*
5737c478bd9Sstevel@tonic-gate  * An interrupt thread is returning from swtch(). Place a starting timestamp
5747c478bd9Sstevel@tonic-gate  * in its thread structure.
5757c478bd9Sstevel@tonic-gate  */
5767c478bd9Sstevel@tonic-gate void
5777c478bd9Sstevel@tonic-gate cpu_intr_swtch_exit(kthread_id_t t)
5787c478bd9Sstevel@tonic-gate {
5797c478bd9Sstevel@tonic-gate 	uint64_t ts;
5807c478bd9Sstevel@tonic-gate 
5817c478bd9Sstevel@tonic-gate 	ASSERT((t->t_flag & T_INTR_THREAD) != 0);
5827c478bd9Sstevel@tonic-gate 	ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);
5837c478bd9Sstevel@tonic-gate 
5847c478bd9Sstevel@tonic-gate 	do {
5857c478bd9Sstevel@tonic-gate 		ts = t->t_intr_start;
58675d94465SJosef 'Jeff' Sipek 	} while (atomic_cas_64(&t->t_intr_start, ts, CLOCK_TICK_COUNTER()) !=
58775d94465SJosef 'Jeff' Sipek 	    ts);
5887c478bd9Sstevel@tonic-gate }
5897c478bd9Sstevel@tonic-gate 
5907c478bd9Sstevel@tonic-gate 
5917c478bd9Sstevel@tonic-gate int
5927c478bd9Sstevel@tonic-gate blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class)
5937c478bd9Sstevel@tonic-gate {
5947c478bd9Sstevel@tonic-gate 	if (&plat_blacklist)
5957c478bd9Sstevel@tonic-gate 		return (plat_blacklist(cmd, scheme, fmri, class));
5967c478bd9Sstevel@tonic-gate 
5977c478bd9Sstevel@tonic-gate 	return (ENOTSUP);
5987c478bd9Sstevel@tonic-gate }
5997c478bd9Sstevel@tonic-gate 
6007c478bd9Sstevel@tonic-gate int
6017c478bd9Sstevel@tonic-gate kdi_pread(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
6027c478bd9Sstevel@tonic-gate {
6037c478bd9Sstevel@tonic-gate 	extern void kdi_flush_caches(void);
6047c478bd9Sstevel@tonic-gate 	size_t nread = 0;
6057c478bd9Sstevel@tonic-gate 	uint32_t word;
6067c478bd9Sstevel@tonic-gate 	int slop, i;
6077c478bd9Sstevel@tonic-gate 
6087c478bd9Sstevel@tonic-gate 	kdi_flush_caches();
6097c478bd9Sstevel@tonic-gate 	membar_enter();
6107c478bd9Sstevel@tonic-gate 
6117c478bd9Sstevel@tonic-gate 	/* We might not begin on a word boundary. */
6127c478bd9Sstevel@tonic-gate 	if ((slop = addr & 3) != 0) {
6137c478bd9Sstevel@tonic-gate 		word = ldphys(addr & ~3);
6147c478bd9Sstevel@tonic-gate 		for (i = slop; i < 4 && nbytes > 0; i++, nbytes--, nread++)
6157c478bd9Sstevel@tonic-gate 			*buf++ = ((uchar_t *)&word)[i];
6167c478bd9Sstevel@tonic-gate 		addr = roundup(addr, 4);
6177c478bd9Sstevel@tonic-gate 	}
6187c478bd9Sstevel@tonic-gate 
6197c478bd9Sstevel@tonic-gate 	while (nbytes > 0) {
6207c478bd9Sstevel@tonic-gate 		word = ldphys(addr);
6217c478bd9Sstevel@tonic-gate 		for (i = 0; i < 4 && nbytes > 0; i++, nbytes--, nread++, addr++)
6227c478bd9Sstevel@tonic-gate 			*buf++ = ((uchar_t *)&word)[i];
6237c478bd9Sstevel@tonic-gate 	}
6247c478bd9Sstevel@tonic-gate 
6257c478bd9Sstevel@tonic-gate 	kdi_flush_caches();
6267c478bd9Sstevel@tonic-gate 
6277c478bd9Sstevel@tonic-gate 	*ncopiedp = nread;
6287c478bd9Sstevel@tonic-gate 	return (0);
6297c478bd9Sstevel@tonic-gate }
6307c478bd9Sstevel@tonic-gate 
6317c478bd9Sstevel@tonic-gate int
6327c478bd9Sstevel@tonic-gate kdi_pwrite(caddr_t buf, size_t nbytes, uint64_t addr, size_t *ncopiedp)
6337c478bd9Sstevel@tonic-gate {
6347c478bd9Sstevel@tonic-gate 	extern void kdi_flush_caches(void);
6357c478bd9Sstevel@tonic-gate 	size_t nwritten = 0;
6367c478bd9Sstevel@tonic-gate 	uint32_t word;
6377c478bd9Sstevel@tonic-gate 	int slop, i;
6387c478bd9Sstevel@tonic-gate 
6397c478bd9Sstevel@tonic-gate 	kdi_flush_caches();
6407c478bd9Sstevel@tonic-gate 
6417c478bd9Sstevel@tonic-gate 	/* We might not begin on a word boundary. */
6427c478bd9Sstevel@tonic-gate 	if ((slop = addr & 3) != 0) {
6437c478bd9Sstevel@tonic-gate 		word = ldphys(addr & ~3);
6447c478bd9Sstevel@tonic-gate 		for (i = slop; i < 4 && nbytes > 0; i++, nbytes--, nwritten++)
6457c478bd9Sstevel@tonic-gate 			((uchar_t *)&word)[i] = *buf++;
6467c478bd9Sstevel@tonic-gate 		stphys(addr & ~3, word);
6477c478bd9Sstevel@tonic-gate 		addr = roundup(addr, 4);
6487c478bd9Sstevel@tonic-gate 	}
6497c478bd9Sstevel@tonic-gate 
6507c478bd9Sstevel@tonic-gate 	while (nbytes > 3) {
6517c478bd9Sstevel@tonic-gate 		for (word = 0, i = 0; i < 4; i++, nbytes--, nwritten++)
6527c478bd9Sstevel@tonic-gate 			((uchar_t *)&word)[i] = *buf++;
6537c478bd9Sstevel@tonic-gate 		stphys(addr, word);
6547c478bd9Sstevel@tonic-gate 		addr += 4;
6557c478bd9Sstevel@tonic-gate 	}
6567c478bd9Sstevel@tonic-gate 
6577c478bd9Sstevel@tonic-gate 	/* We might not end with a whole word. */
6587c478bd9Sstevel@tonic-gate 	if (nbytes > 0) {
6597c478bd9Sstevel@tonic-gate 		word = ldphys(addr);
6607c478bd9Sstevel@tonic-gate 		for (i = 0; nbytes > 0; i++, nbytes--, nwritten++)
6617c478bd9Sstevel@tonic-gate 			((uchar_t *)&word)[i] = *buf++;
6627c478bd9Sstevel@tonic-gate 		stphys(addr, word);
6637c478bd9Sstevel@tonic-gate 	}
6647c478bd9Sstevel@tonic-gate 
6657c478bd9Sstevel@tonic-gate 	membar_enter();
6667c478bd9Sstevel@tonic-gate 	kdi_flush_caches();
6677c478bd9Sstevel@tonic-gate 
6687c478bd9Sstevel@tonic-gate 	*ncopiedp = nwritten;
6697c478bd9Sstevel@tonic-gate 	return (0);
6707c478bd9Sstevel@tonic-gate }
6717c478bd9Sstevel@tonic-gate 
6727c478bd9Sstevel@tonic-gate static void
6737c478bd9Sstevel@tonic-gate kdi_kernpanic(struct regs *regs, uint_t tt)
6747c478bd9Sstevel@tonic-gate {
6757c478bd9Sstevel@tonic-gate 	sync_reg_buf = *regs;
6767c478bd9Sstevel@tonic-gate 	sync_tt = tt;
6777c478bd9Sstevel@tonic-gate 
6787c478bd9Sstevel@tonic-gate 	sync_handler();
6797c478bd9Sstevel@tonic-gate }
6807c478bd9Sstevel@tonic-gate 
6817c478bd9Sstevel@tonic-gate static void
6827c478bd9Sstevel@tonic-gate kdi_plat_call(void (*platfn)(void))
6837c478bd9Sstevel@tonic-gate {
6847c478bd9Sstevel@tonic-gate 	if (platfn != NULL) {
6857c478bd9Sstevel@tonic-gate 		prom_suspend_prepost();
6867c478bd9Sstevel@tonic-gate 		platfn();
6877c478bd9Sstevel@tonic-gate 		prom_resume_prepost();
6887c478bd9Sstevel@tonic-gate 	}
6897c478bd9Sstevel@tonic-gate }
6907c478bd9Sstevel@tonic-gate 
691d3d50737SRafael Vanoni /*
692d3d50737SRafael Vanoni  * kdi_system_claim and release are defined here for all sun4 platforms and
693d3d50737SRafael Vanoni  * pointed to by mach_kdi_init() to provide default callbacks for such systems.
694d3d50737SRafael Vanoni  * Specific sun4u or sun4v platforms may implement their own claim and release
695d3d50737SRafael Vanoni  * routines, at which point their respective callbacks will be updated.
696d3d50737SRafael Vanoni  */
697d3d50737SRafael Vanoni static void
698d3d50737SRafael Vanoni kdi_system_claim(void)
699d3d50737SRafael Vanoni {
700d3d50737SRafael Vanoni 	lbolt_debug_entry();
701d3d50737SRafael Vanoni }
702d3d50737SRafael Vanoni 
703d3d50737SRafael Vanoni static void
704d3d50737SRafael Vanoni kdi_system_release(void)
705d3d50737SRafael Vanoni {
706d3d50737SRafael Vanoni 	lbolt_debug_return();
707d3d50737SRafael Vanoni }
708d3d50737SRafael Vanoni 
7097c478bd9Sstevel@tonic-gate void
7107c478bd9Sstevel@tonic-gate mach_kdi_init(kdi_t *kdi)
7117c478bd9Sstevel@tonic-gate {
7127c478bd9Sstevel@tonic-gate 	kdi->kdi_plat_call = kdi_plat_call;
713ae115bc7Smrj 	kdi->kdi_kmdb_enter = kmdb_enter;
714d3d50737SRafael Vanoni 	kdi->pkdi_system_claim = kdi_system_claim;
715d3d50737SRafael Vanoni 	kdi->pkdi_system_release = kdi_system_release;
7167c478bd9Sstevel@tonic-gate 	kdi->mkdi_cpu_index = kdi_cpu_index;
7177c478bd9Sstevel@tonic-gate 	kdi->mkdi_trap_vatotte = kdi_trap_vatotte;
7187c478bd9Sstevel@tonic-gate 	kdi->mkdi_kernpanic = kdi_kernpanic;
7197c478bd9Sstevel@tonic-gate }
720eda89462Sesolom 
721eda89462Sesolom 
722eda89462Sesolom /*
723eda89462Sesolom  * get_cpu_mstate() is passed an array of timestamps, NCMSTATES
724eda89462Sesolom  * long, and it fills in the array with the time spent on cpu in
725eda89462Sesolom  * each of the mstates, where time is returned in nsec.
726eda89462Sesolom  *
727eda89462Sesolom  * No guarantee is made that the returned values in times[] will
728eda89462Sesolom  * monotonically increase on sequential calls, although this will
729eda89462Sesolom  * be true in the long run. Any such guarantee must be handled by
730eda89462Sesolom  * the caller, if needed. This can happen if we fail to account
731eda89462Sesolom  * for elapsed time due to a generation counter conflict, yet we
732eda89462Sesolom  * did account for it on a prior call (see below).
733eda89462Sesolom  *
734eda89462Sesolom  * The complication is that the cpu in question may be updating
735eda89462Sesolom  * its microstate at the same time that we are reading it.
736eda89462Sesolom  * Because the microstate is only updated when the CPU's state
737eda89462Sesolom  * changes, the values in cpu_intracct[] can be indefinitely out
738eda89462Sesolom  * of date. To determine true current values, it is necessary to
739eda89462Sesolom  * compare the current time with cpu_mstate_start, and add the
740eda89462Sesolom  * difference to times[cpu_mstate].
741eda89462Sesolom  *
742eda89462Sesolom  * This can be a problem if those values are changing out from
743eda89462Sesolom  * under us. Because the code path in new_cpu_mstate() is
744eda89462Sesolom  * performance critical, we have not added a lock to it. Instead,
745eda89462Sesolom  * we have added a generation counter. Before beginning
746eda89462Sesolom  * modifications, the counter is set to 0. After modifications,
747eda89462Sesolom  * it is set to the old value plus one.
748eda89462Sesolom  *
749eda89462Sesolom  * get_cpu_mstate() will not consider the values of cpu_mstate
750eda89462Sesolom  * and cpu_mstate_start to be usable unless the value of
751eda89462Sesolom  * cpu_mstate_gen is both non-zero and unchanged, both before and
752eda89462Sesolom  * after reading the mstate information. Note that we must
753eda89462Sesolom  * protect against out-of-order loads around accesses to the
754eda89462Sesolom  * generation counter. Also, this is a best effort approach in
755eda89462Sesolom  * that we do not retry should the counter be found to have
756eda89462Sesolom  * changed.
757eda89462Sesolom  *
758eda89462Sesolom  * cpu_intracct[] is used to identify time spent in each CPU
759eda89462Sesolom  * mstate while handling interrupts. Such time should be reported
760eda89462Sesolom  * against system time, and so is subtracted out from its
761eda89462Sesolom  * corresponding cpu_acct[] time and added to
762eda89462Sesolom  * cpu_acct[CMS_SYSTEM]. Additionally, intracct time is stored in
763eda89462Sesolom  * %ticks, but acct time may be stored as %sticks, thus requiring
764eda89462Sesolom  * different conversions before they can be compared.
765eda89462Sesolom  */
766eda89462Sesolom 
767eda89462Sesolom void
768eda89462Sesolom get_cpu_mstate(cpu_t *cpu, hrtime_t *times)
769eda89462Sesolom {
770eda89462Sesolom 	int i;
771eda89462Sesolom 	hrtime_t now, start;
772eda89462Sesolom 	uint16_t gen;
773eda89462Sesolom 	uint16_t state;
774eda89462Sesolom 	hrtime_t intracct[NCMSTATES];
775eda89462Sesolom 
776eda89462Sesolom 	/*
777eda89462Sesolom 	 * Load all volatile state under the protection of membar.
778eda89462Sesolom 	 * cpu_acct[cpu_mstate] must be loaded to avoid double counting
779eda89462Sesolom 	 * of (now - cpu_mstate_start) by a change in CPU mstate that
780eda89462Sesolom 	 * arrives after we make our last check of cpu_mstate_gen.
781eda89462Sesolom 	 */
782eda89462Sesolom 
783eda89462Sesolom 	now = gethrtime_unscaled();
784eda89462Sesolom 	gen = cpu->cpu_mstate_gen;
785eda89462Sesolom 
786eda89462Sesolom 	membar_consumer();	/* guarantee load ordering */
787eda89462Sesolom 	start = cpu->cpu_mstate_start;
788eda89462Sesolom 	state = cpu->cpu_mstate;
789eda89462Sesolom 	for (i = 0; i < NCMSTATES; i++) {
790eda89462Sesolom 		intracct[i] = cpu->cpu_intracct[i];
791eda89462Sesolom 		times[i] = cpu->cpu_acct[i];
792eda89462Sesolom 	}
793eda89462Sesolom 	membar_consumer();	/* guarantee load ordering */
794eda89462Sesolom 
795eda89462Sesolom 	if (gen != 0 && gen == cpu->cpu_mstate_gen && now > start)
796eda89462Sesolom 		times[state] += now - start;
797eda89462Sesolom 
798eda89462Sesolom 	for (i = 0; i < NCMSTATES; i++) {
799eda89462Sesolom 		scalehrtime(&times[i]);
800eda89462Sesolom 		intracct[i] = tick2ns((hrtime_t)intracct[i], cpu->cpu_id);
801eda89462Sesolom 	}
802eda89462Sesolom 
803eda89462Sesolom 	for (i = 0; i < NCMSTATES; i++) {
804eda89462Sesolom 		if (i == CMS_SYSTEM)
805eda89462Sesolom 			continue;
806eda89462Sesolom 		times[i] -= intracct[i];
807eda89462Sesolom 		if (times[i] < 0) {
808eda89462Sesolom 			intracct[i] += times[i];
809eda89462Sesolom 			times[i] = 0;
810eda89462Sesolom 		}
811eda89462Sesolom 		times[CMS_SYSTEM] += intracct[i];
812eda89462Sesolom 	}
813eda89462Sesolom }
814ae115bc7Smrj 
815ae115bc7Smrj void
816ae115bc7Smrj mach_cpu_pause(volatile char *safe)
817ae115bc7Smrj {
818ae115bc7Smrj 	/*
819ae115bc7Smrj 	 * This cpu is now safe.
820ae115bc7Smrj 	 */
821ae115bc7Smrj 	*safe = PAUSE_WAIT;
822ae115bc7Smrj 	membar_enter(); /* make sure stores are flushed */
823ae115bc7Smrj 
824ae115bc7Smrj 	/*
825ae115bc7Smrj 	 * Now we wait.  When we are allowed to continue, safe
826ae115bc7Smrj 	 * will be set to PAUSE_IDLE.
827ae115bc7Smrj 	 */
828ae115bc7Smrj 	while (*safe != PAUSE_IDLE)
829ae115bc7Smrj 		SMT_PAUSE();
830ae115bc7Smrj }
831ae115bc7Smrj 
832ae115bc7Smrj /*ARGSUSED*/
833ae115bc7Smrj int
834843e1988Sjohnlev plat_mem_do_mmio(struct uio *uio, enum uio_rw rw)
835ae115bc7Smrj {
836843e1988Sjohnlev 	return (ENOTSUP);
837ae115bc7Smrj }
838ae115bc7Smrj 
839ca3e8d88SDave Plauger /* cpu threshold for compressed dumps */
840ca3e8d88SDave Plauger #ifdef sun4v
8414cca9c84SDave Plauger uint_t dump_plat_mincpu_default = DUMP_PLAT_SUN4V_MINCPU;
842ca3e8d88SDave Plauger #else
8434cca9c84SDave Plauger uint_t dump_plat_mincpu_default = DUMP_PLAT_SUN4U_MINCPU;
844ca3e8d88SDave Plauger #endif
845ca3e8d88SDave Plauger 
846ae115bc7Smrj int
847ae115bc7Smrj dump_plat_addr()
848ae115bc7Smrj {
849ae115bc7Smrj 	return (0);
850ae115bc7Smrj }
851ae115bc7Smrj 
852ae115bc7Smrj void
853ae115bc7Smrj dump_plat_pfn()
854ae115bc7Smrj {
855ae115bc7Smrj }
856ae115bc7Smrj 
857ae115bc7Smrj /* ARGSUSED */
858ae115bc7Smrj int
859ae115bc7Smrj dump_plat_data(void *dump_cdata)
860ae115bc7Smrj {
861ae115bc7Smrj 	return (0);
862ae115bc7Smrj }
863ae115bc7Smrj 
864ae115bc7Smrj /* ARGSUSED */
865ae115bc7Smrj int
866ae115bc7Smrj plat_hold_page(pfn_t pfn, int lock, page_t **pp_ret)
867ae115bc7Smrj {
868ae115bc7Smrj 	return (PLAT_HOLD_OK);
869ae115bc7Smrj }
870ae115bc7Smrj 
871ae115bc7Smrj /* ARGSUSED */
872ae115bc7Smrj void
873ae115bc7Smrj plat_release_page(page_t *pp)
874ae115bc7Smrj {
875ae115bc7Smrj }
87648633f18SJan Setje-Eilers 
87748633f18SJan Setje-Eilers /* ARGSUSED */
87848633f18SJan Setje-Eilers void
87948633f18SJan Setje-Eilers progressbar_key_abort(ldi_ident_t li)
88048633f18SJan Setje-Eilers {
88148633f18SJan Setje-Eilers }
882d3d50737SRafael Vanoni 
883d3d50737SRafael Vanoni /*
884d3d50737SRafael Vanoni  * We need to post a soft interrupt to reprogram the lbolt cyclic when
885d3d50737SRafael Vanoni  * switching from event to cyclic driven lbolt. The following code adds
886d3d50737SRafael Vanoni  * and posts the softint for sun4 platforms.
887d3d50737SRafael Vanoni  */
888d3d50737SRafael Vanoni static uint64_t lbolt_softint_inum;
889d3d50737SRafael Vanoni 
890d3d50737SRafael Vanoni void
891d3d50737SRafael Vanoni lbolt_softint_add(void)
892d3d50737SRafael Vanoni {
893d3d50737SRafael Vanoni 	lbolt_softint_inum = add_softintr(LOCK_LEVEL,
894d3d50737SRafael Vanoni 	    (softintrfunc)lbolt_ev_to_cyclic, NULL, SOFTINT_MT);
895d3d50737SRafael Vanoni }
896d3d50737SRafael Vanoni 
897d3d50737SRafael Vanoni void
898d3d50737SRafael Vanoni lbolt_softint_post(void)
899d3d50737SRafael Vanoni {
900d3d50737SRafael Vanoni 	setsoftint(lbolt_softint_inum);
901d3d50737SRafael Vanoni }
902