27c478bdstevel@tonic-gate * CDDL HEADER START
37c478bdstevel@tonic-gate *
47c478bdstevel@tonic-gate * The contents of this file are subject to the terms of the
525cf1a3jl * Common Development and Distribution License (the "License").
625cf1a3jl * You may not use this file except in compliance with the License.
77c478bdstevel@tonic-gate *
87c478bdstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bdstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bdstevel@tonic-gate * See the License for the specific language governing permissions
117c478bdstevel@tonic-gate * and limitations under the License.
127c478bdstevel@tonic-gate *
137c478bdstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bdstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bdstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bdstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bdstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bdstevel@tonic-gate *
197c478bdstevel@tonic-gate * CDDL HEADER END
207c478bdstevel@tonic-gate */
221426d65sm * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bdstevel@tonic-gate * Use is subject to license terms.
247c478bdstevel@tonic-gate */
277c478bdstevel@tonic-gate * SFMMU primitives.  These primitives should only be used by sfmmu
287c478bdstevel@tonic-gate * routines.
297c478bdstevel@tonic-gate */
317c478bdstevel@tonic-gate#include "assym.h"
337c478bdstevel@tonic-gate#include <sys/asm_linkage.h>
347c478bdstevel@tonic-gate#include <sys/machtrap.h>
357c478bdstevel@tonic-gate#include <sys/machasi.h>
367c478bdstevel@tonic-gate#include <sys/sun4asi.h>
377c478bdstevel@tonic-gate#include <sys/pte.h>
387c478bdstevel@tonic-gate#include <sys/mmu.h>
397c478bdstevel@tonic-gate#include <vm/hat_sfmmu.h>
407c478bdstevel@tonic-gate#include <vm/seg_spt.h>
417c478bdstevel@tonic-gate#include <sys/machparam.h>
427c478bdstevel@tonic-gate#include <sys/privregs.h>
437c478bdstevel@tonic-gate#include <sys/scb.h>
447c478bdstevel@tonic-gate#include <sys/intreg.h>
457c478bdstevel@tonic-gate#include <sys/machthread.h>
467c478bdstevel@tonic-gate#include <sys/clock.h>
477c478bdstevel@tonic-gate#include <sys/trapstat.h>
507c478bdstevel@tonic-gate * sfmmu related subroutines
517c478bdstevel@tonic-gate */
541e2e7a7huah * Invalidate either the context of a specific victim or any process
551e2e7a7huah * currently running on this CPU.
567c478bdstevel@tonic-gate *
571e2e7a7huah * %g1 = sfmmup whose ctx is being invalidated
581e2e7a7huah *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
591e2e7a7huah * Note %g1 is the only input argument used by this xcall handler.
607c478bdstevel@tonic-gate */
611e2e7a7huah	ENTRY(sfmmu_raise_tsb_exception)
627c478bdstevel@tonic-gate	!
631426d65sm	! if (victim == INVALID_CONTEXT ||
641426d65sm	!     current CPU tsbmiss->usfmmup == victim sfmmup) {
651426d65sm	!       if (shctx_on) {
661426d65sm	!               shctx = INVALID;
671426d65sm	!       }
681e2e7a7huah	!	if (sec-ctx > INVALID_CONTEXT) {
691e2e7a7huah	!		write INVALID_CONTEXT to sec-ctx
701e2e7a7huah	!	}
711e2e7a7huah	!	if (pri-ctx > INVALID_CONTEXT) {
721e2e7a7huah	!		write INVALID_CONTEXT to pri-ctx
731e2e7a7huah	!	}
747c478bdstevel@tonic-gate	! }
761e2e7a7huah	sethi   %hi(ksfmmup), %g3
771e2e7a7huah        ldx     [%g3 + %lo(ksfmmup)], %g3
781e2e7a7huah	cmp	%g1, %g3
791426d65sm	be,a,pn %xcc, ptl1_panic		/* can't invalidate kernel ctx */
801e2e7a7huah	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
821e2e7a7huah	set	INVALID_CONTEXT, %g2
831e2e7a7huah	cmp	%g1, INVALID_CONTEXT
841426d65sm	be,pn	%xcc, 0f			/* called from wrap_around? */
851e2e7a7huah	  mov	MMU_SCONTEXT, %g3
871426d65sm	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
881426d65sm	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
891426d65sm	cmp	%g5, %g1			/* hat toBe-invalid running? */
901426d65sm	bne,pt	%xcc, 3f
911426d65sm	  nop
941426d65sm	sethi   %hi(shctx_on), %g5
951426d65sm        ld      [%g5 + %lo(shctx_on)], %g5
961426d65sm        brz     %g5, 1f
971426d65sm          mov     MMU_SHARED_CONTEXT, %g5
981426d65sm        sethi   %hi(FLUSH_ADDR), %g4
991426d65sm        stxa    %g0, [%g5]ASI_MMU_CTX
1001426d65sm        flush   %g4
1031e2e7a7huah	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = pgsz | sec-ctx */
1041e2e7a7huah	set     CTXREG_CTX_MASK, %g4
1051e2e7a7huah	and	%g5, %g4, %g5			/* %g5 = sec-ctx */
1061e2e7a7huah	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
1071426d65sm	ble,pn	%xcc, 2f			/* yes, no need to change */
1081426d65sm	  mov   MMU_PCONTEXT, %g7
1101e2e7a7huah	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
1111e2e7a7huah	membar	#Sync
114febcc4ajimand	ldxa	[%g7]ASI_MMU_CTX, %g3		/* get pgz | pri-ctx */
115febcc4ajimand	and     %g3, %g4, %g5			/* %g5 = pri-ctx */
1161e2e7a7huah	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
1171426d65sm	ble,pn	%xcc, 3f			/* yes, no need to change */
118febcc4ajimand	  srlx	%g3, CTXREG_NEXT_SHIFT, %g3	/* %g3 = nucleus pgsz */
119febcc4ajimand	sllx	%g3, CTXREG_NEXT_SHIFT, %g3	/* need to preserve nucleus pgsz */
120febcc4ajimand	or	%g3, %g2, %g2			/* %g2 = nucleus pgsz | INVALID_CONTEXT */
1221e2e7a7huah	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
1247c478bdstevel@tonic-gate	retry
1251e2e7a7huah	SET_SIZE(sfmmu_raise_tsb_exception)
1291e2e7a7huah	/*
1301e2e7a7huah	 * %o0 = virtual address
1311e2e7a7huah	 * %o1 = address of TTE to be loaded
1321e2e7a7huah	 */
1331e2e7a7huah	ENTRY_NP(sfmmu_itlb_ld_kva)
1347c478bdstevel@tonic-gate	rdpr	%pstate, %o3
1357c478bdstevel@tonic-gate#ifdef DEBUG
1361e2e7a7huah	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1)
1377c478bdstevel@tonic-gate#endif /* DEBUG */
1387c478bdstevel@tonic-gate	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
1397c478bdstevel@tonic-gate	srln	%o0, MMU_PAGESHIFT, %o0
1407c478bdstevel@tonic-gate	slln	%o0, MMU_PAGESHIFT, %o0		! Clear page offset
1421e2e7a7huah	ldx	[%o1], %g1
1437c478bdstevel@tonic-gate	set	MMU_TAG_ACCESS, %o5
1447c478bdstevel@tonic-gate#ifdef	CHEETAHPLUS_ERRATUM_34
1457c478bdstevel@tonic-gate	!
1467c478bdstevel@tonic-gate	! If this is Cheetah or derivative and the specified TTE is locked
1477c478bdstevel@tonic-gate	! and hence to be loaded into the T16, fully-associative TLB, we
1487c478bdstevel@tonic-gate	! must avoid Cheetah+ erratum 34.  In Cheetah+ erratum 34, under
1497c478bdstevel@tonic-gate	! certain conditions an ITLB locked index 0 TTE will erroneously be
1507c478bdstevel@tonic-gate	! displaced when a new TTE is loaded via ASI_ITLB_IN.  To avoid
1517c478bdstevel@tonic-gate	! this erratum, we scan the T16 top down for an unlocked TTE and
1527c478bdstevel@tonic-gate	! explicitly load the specified TTE into that index.
1537c478bdstevel@tonic-gate	!
1547c478bdstevel@tonic-gate	GET_CPU_IMPL(%g2)
1557c478bdstevel@tonic-gate	cmp	%g2, CHEETAH_IMPL
1567c478bdstevel@tonic-gate	bl,pn	%icc, 0f
1577c478bdstevel@tonic-gate	  nop
1597c478bdstevel@tonic-gate	andcc	%g1, TTE_LCK_INT, %g0
1607c478bdstevel@tonic-gate	bz	%icc, 0f			! Lock bit is not set;
1617c478bdstevel@tonic-gate						!   load normally.
1627c478bdstevel@tonic-gate	  or	%g0, (15 << 3), %g3		! Start searching from the
1637c478bdstevel@tonic-gate						!   top down.
1667c478bdstevel@tonic-gate	ldxa	[%g3]ASI_ITLB_ACCESS, %g4	! Load TTE from t16
1687c478bdstevel@tonic-gate	!
1697c478bdstevel@tonic-gate	! If this entry isn't valid, we'll choose to displace it (regardless
1707c478bdstevel@tonic-gate	! of the lock bit).
1717c478bdstevel@tonic-gate	!
1727c478bdstevel@tonic-gate	cmp	%g4, %g0
1737c478bdstevel@tonic-gate	bge	%xcc, 2f			! TTE is > 0 iff not valid
1747c478bdstevel@tonic-gate	  andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
1757c478bdstevel@tonic-gate	bz	%icc, 2f			! If unlocked, go displace
1767c478bdstevel@tonic-gate	  nop
1777c478bdstevel@tonic-gate	sub	%g3, (1 << 3), %g3
1787c478bdstevel@tonic-gate	brgz	%g3, 1b				! Still more TLB entries
1797c478bdstevel@tonic-gate	  nop					! to search
1817c478bdstevel@tonic-gate	sethi   %hi(sfmmu_panic5), %o0          ! We searched all entries and
1827c478bdstevel@tonic-gate	call    panic                           ! found no unlocked TTE so
1837c478bdstevel@tonic-gate	  or    %o0, %lo(sfmmu_panic5), %o0     ! give up.
1877c478bdstevel@tonic-gate	!
1887c478bdstevel@tonic-gate	! We have found an unlocked or non-valid entry; we'll explicitly load
1897c478bdstevel@tonic-gate	! our locked entry here.
1907c478bdstevel@tonic-gate	!
1917c478bdstevel@tonic-gate	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
1927c478bdstevel@tonic-gate	stxa	%o0, [%o5]ASI_IMMU
1937c478bdstevel@tonic-gate	stxa	%g1, [%g3]ASI_ITLB_ACCESS
1947c478bdstevel@tonic-gate	flush	%o1				! Flush required for I-MMU
1957c478bdstevel@tonic-gate	ba	3f				! Delay slot of ba is empty
1961e2e7a7huah	  nop					!   per Erratum 64
1997c478bdstevel@tonic-gate#endif	/* CHEETAHPLUS_ERRATUM_34 */
2007c478bdstevel@tonic-gate	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
2017c478bdstevel@tonic-gate	stxa	%o0, [%o5]ASI_IMMU
2027c478bdstevel@tonic-gate	stxa	%g1, [%g0]ASI_ITLB_IN
2037c478bdstevel@tonic-gate	flush	%o1				! Flush required for I-MMU
2057c478bdstevel@tonic-gate	retl
2067c478bdstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		! Enable interrupts
2071e2e7a7huah	SET_SIZE(sfmmu_itlb_ld_kva)
2097c478bdstevel@tonic-gate	/*
2107c478bdstevel@tonic-gate	 * Load an entry into the DTLB.
2117c478bdstevel@tonic-gate	 *
2127c478bdstevel@tonic-gate	 * Special handling is required for locked entries since there
2137c478bdstevel@tonic-gate	 * are some TLB slots that are reserved for the kernel but not
2147c478bdstevel@tonic-gate	 * always held locked.  We want to avoid loading locked TTEs
2157c478bdstevel@tonic-gate	 * into those slots since they could be displaced.
2161e2e7a7huah	 *
2171e2e7a7huah	 * %o0 = virtual address
2181e2e7a7huah	 * %o1 = address of TTE to be loaded
2197c478bdstevel@tonic-gate	 */
2201e2e7a7huah	ENTRY_NP(sfmmu_dtlb_ld_kva)
2217c478bdstevel@tonic-gate	rdpr	%pstate, %o3
2227c478bdstevel@tonic-gate#ifdef DEBUG
2231e2e7a7huah	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1)
2247c478bdstevel@tonic-gate#endif /* DEBUG */
2257c478bdstevel@tonic-gate	wrpr	%o3, PSTATE_IE, %pstate		! disable interrupts
2267c478bdstevel@tonic-gate	srln	%o0, MMU_PAGESHIFT, %o0
2277c478bdstevel@tonic-gate	slln	%o0, MMU_PAGESHIFT, %o0		! clear page offset
2291e2e7a7huah	ldx	[%o1], %g1
2317c478bdstevel@tonic-gate	set	MMU_TAG_ACCESS, %o5
2331e2e7a7huah	set	cpu_impl_dual_pgsz, %o2
2341e2e7a7huah	ld	[%o2], %o2
2351e2e7a7huah	brz	%o2, 1f
2361e2e7a7huah	  nop
2381e2e7a7huah	sethi	%hi(ksfmmup), %o2
2391e2e7a7huah	ldx	[%o2 + %lo(ksfmmup)], %o2
2401e2e7a7huah	ldub    [%o2 + SFMMU_CEXT], %o2
2411e2e7a7huah        sll     %o2, TAGACCEXT_SHIFT, %o2
2437c478bdstevel@tonic-gate	set	MMU_TAG_ACCESS_EXT, %o4		! can go into T8 if unlocked
2447c478bdstevel@tonic-gate	stxa	%o2,[%o4]ASI_DMMU
2457c478bdstevel@tonic-gate	membar	#Sync
2477c478bdstevel@tonic-gate	andcc	%g1, TTE_LCK_INT, %g0		! Locked entries require
2487c478bdstevel@tonic-gate	bnz,pn	%icc, 2f			! special handling
2497c478bdstevel@tonic-gate	  sethi	%hi(dtlb_resv_ttenum), %g3
2507c478bdstevel@tonic-gate	stxa	%o0,[%o5]ASI_DMMU		! Load unlocked TTE
2517c478bdstevel@tonic-gate	stxa	%g1,[%g0]ASI_DTLB_IN		! via DTLB_IN
2527c478bdstevel@tonic-gate	membar	#Sync
2537c478bdstevel@tonic-gate	retl
2547c478bdstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		! enable interrupts
25619f938djfrank#ifdef	CHEETAHPLUS_ERRATUM_34
25719f938djfrank	GET_CPU_IMPL(%g2)
2597c478bdstevel@tonic-gate	ld	[%g3 + %lo(dtlb_resv_ttenum)], %g3
2607c478bdstevel@tonic-gate	sll	%g3, 3, %g3			! First reserved idx in TLB 0
2617c478bdstevel@tonic-gate	sub	%g3, (1 << 3), %g3		! Decrement idx
26219f938djfrank	! Erratum 15 workaround due to ld [%g3 + %lo(dtlb_resv_ttenum)], %g3
26319f938djfrank	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
2657c478bdstevel@tonic-gate	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
2667c478bdstevel@tonic-gate	!
2677c478bdstevel@tonic-gate	! If this entry isn't valid, we'll choose to displace it (regardless
2687c478bdstevel@tonic-gate	! of the lock bit).
2697c478bdstevel@tonic-gate	!
2707c478bdstevel@tonic-gate	brgez,pn %g4, 4f			! TTE is > 0 iff not valid
2717c478bdstevel@tonic-gate	  nop
2727c478bdstevel@tonic-gate	andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
2737c478bdstevel@tonic-gate	bz,pn	%icc, 4f			! If unlocked, go displace
2747c478bdstevel@tonic-gate	  nop
2757c478bdstevel@tonic-gate	sub	%g3, (1 << 3), %g3		! Decrement idx
27619f938djfrank#ifdef	CHEETAHPLUS_ERRATUM_34
27719f938djfrank	!
27819f938djfrank	! If this is a Cheetah or derivative, we must work around Erratum 34
27919f938djfrank	! for the DTLB.  Erratum 34 states that under certain conditions,
28019f938djfrank	! a locked entry 0 TTE may be improperly displaced.  To avoid this,
28119f938djfrank	! we do not place a locked TTE in entry 0.
28219f938djfrank	!
28319f938djfrank	brgz	%g3, 3b
28419f938djfrank	  nop
28519f938djfrank	cmp	%g2, CHEETAH_IMPL
28619f938djfrank	bge,pt	%icc, 5f
28719f938djfrank	  nop
28819f938djfrank	brz	%g3, 3b
28919f938djfrank	 nop
29019f938djfrank#else	/* CHEETAHPLUS_ERRATUM_34 */
2911e2e7a7huah	brgez	%g3, 3b
2927c478bdstevel@tonic-gate	  nop
29319f938djfrank#endif	/* CHEETAHPLUS_ERRATUM_34 */
2957c478bdstevel@tonic-gate	sethi	%hi(sfmmu_panic5), %o0		! We searched all entries and
2967c478bdstevel@tonic-gate	call	panic				! found no unlocked TTE so
2977c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_panic5), %o0	! give up.
2997c478bdstevel@tonic-gate	stxa	%o0,[%o5]ASI_DMMU		! Setup tag access
30025cf1a3jl#ifdef	OLYMPUS_SHARED_FTLB
30125cf1a3jl	stxa	%g1,[%g0]ASI_DTLB_IN
3037c478bdstevel@tonic-gate	stxa	%g1,[%g3]ASI_DTLB_ACCESS	! Displace entry at idx
3057c478bdstevel@tonic-gate	membar	#Sync
3067c478bdstevel@tonic-gate	retl
3077c478bdstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		! enable interrupts
3081e2e7a7huah	SET_SIZE(sfmmu_dtlb_ld_kva)
3107c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_getctx_pri)
3117c478bdstevel@tonic-gate	set	MMU_PCONTEXT, %o0
3127c478bdstevel@tonic-gate	retl
3137c478bdstevel@tonic-gate	  ldxa	[%o0]ASI_MMU_CTX, %o0
3147c478bdstevel@tonic-gate	SET_SIZE(sfmmu_getctx_pri)
3167c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_getctx_sec)
3177c478bdstevel@tonic-gate	set	MMU_SCONTEXT, %o0
3187c478bdstevel@tonic-gate	set	CTXREG_CTX_MASK, %o1
3197c478bdstevel@tonic-gate	ldxa	[%o0]ASI_MMU_CTX, %o0
3207c478bdstevel@tonic-gate	retl
3211e2e7a7huah	  and	%o0, %o1, %o0
3227c478bdstevel@tonic-gate	SET_SIZE(sfmmu_getctx_sec)
3247c478bdstevel@tonic-gate	/*
3257c478bdstevel@tonic-gate	 * Set the secondary context register for this process.
3261e2e7a7huah	 * %o0 = page_size | context number for this process.
3277c478bdstevel@tonic-gate	 */
3287c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_setctx_sec)
3297c478bdstevel@tonic-gate	/*
3307c478bdstevel@tonic-gate	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
3317c478bdstevel@tonic-gate	 * But we can also get called from C with interrupts enabled. So,
3321e2e7a7huah	 * we need to check first.
3337c478bdstevel@tonic-gate	 */
3357c478bdstevel@tonic-gate	/* If interrupts are not disabled, then disable them */
3367c478bdstevel@tonic-gate	rdpr	%pstate, %g1
3377c478bdstevel@tonic-gate	btst	PSTATE_IE, %g1
3387c478bdstevel@tonic-gate	bnz,a,pt %icc, 1f
3391e2e7a7huah	  wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
3427c478bdstevel@tonic-gate	mov	MMU_SCONTEXT, %o1
3447c478bdstevel@tonic-gate	sethi	%hi(FLUSH_ADDR), %o4
3457c478bdstevel@tonic-gate	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
3467c478bdstevel@tonic-gate	flush	%o4
3471426d65sm        sethi   %hi(shctx_on), %g3
3481426d65sm        ld      [%g3 + %lo(shctx_on)], %g3
3491426d65sm	brz     %g3, 2f
3501426d65sm	  nop
3511426d65sm	set	CTXREG_CTX_MASK, %o4
3521426d65sm	and	%o0,%o4,%o1
3531426d65sm	cmp	%o1, INVALID_CONTEXT
3541426d65sm	bne,pn %icc, 2f
3551426d65sm   	  mov     MMU_SHARED_CONTEXT, %o1
3561426d65sm        sethi   %hi(FLUSH_ADDR), %o4
3571426d65sm        stxa    %g0, [%o1]ASI_MMU_CTX           /* set 2nd context reg. */
3581426d65sm        flush   %o4
3601e2e7a7huah	/*
3611e2e7a7huah	 * if the routine was entered with intr enabled, then enable intr now.
3621e2e7a7huah	 * otherwise, keep intr disabled, return without enabing intr.
3631e2e7a7huah	 * %g1 - old intr state
3641e2e7a7huah	 */
3651426d65sm2:	btst	PSTATE_IE, %g1
3661426d65sm	bnz,a,pt %icc, 3f
3671e2e7a7huah	  wrpr	%g0, %g1, %pstate		/* enable interrupts */
3681426d65sm3:	retl
3691e2e7a7huah	  nop
3707c478bdstevel@tonic-gate	SET_SIZE(sfmmu_setctx_sec)
3727c478bdstevel@tonic-gate	/*
3737c478bdstevel@tonic-gate	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
3747c478bdstevel@tonic-gate	 * returns the detection value in %o0.
37525cf1a3jl	 *
37625cf1a3jl	 * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
37725cf1a3jl	 *  - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
37825cf1a3jl	 *  - FJ OPL Olympus-C and later  (less than SPITFIRE_IMPL)
37925cf1a3jl	 *
3807c478bdstevel@tonic-gate	 */
3817c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_setup_4lp)
3827c478bdstevel@tonic-gate	GET_CPU_IMPL(%o0);
3837c478bdstevel@tonic-gate	cmp	%o0, CHEETAH_PLUS_IMPL
38425cf1a3jl	bge,pt	%icc, 4f
38525cf1a3jl	  mov	1, %o1
38625cf1a3jl	cmp	%o0, SPITFIRE_IMPL
38725cf1a3jl	bge,a,pn %icc, 3f
3887c478bdstevel@tonic-gate	  clr	%o1
3907c478bdstevel@tonic-gate	set	ktsb_phys, %o2
3917c478bdstevel@tonic-gate	st	%o1, [%o2]
39225cf1a3jl3:	retl
3937c478bdstevel@tonic-gate	mov	%o1, %o0
3947c478bdstevel@tonic-gate	SET_SIZE(sfmmu_setup_4lp)
3977c478bdstevel@tonic-gate	/*
3987c478bdstevel@tonic-gate	 * Called to load MMU registers and tsbmiss area
3997c478bdstevel@tonic-gate	 * for the active process.  This function should
4007c478bdstevel@tonic-gate	 * only be called from TL=0.
4017c478bdstevel@tonic-gate	 *
4027c478bdstevel@tonic-gate	 * %o0 - hat pointer
4031426d65sm	 *
4047c478bdstevel@tonic-gate	 */
4057c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_load_mmustate)
4071e2e7a7huah#ifdef DEBUG
4081426d65sm        PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1)
4091e2e7a7huah#endif /* DEBUG */
4111426d65sm        sethi   %hi(ksfmmup), %o3
4121426d65sm        ldx     [%o3 + %lo(ksfmmup)], %o3
4131426d65sm        cmp     %o3, %o0
4141426d65sm        be,pn   %xcc, 8f			! if kernel as, do nothing
4151426d65sm          nop
4161426d65sm        /*
4171426d65sm         * We need to set up the TSB base register, tsbmiss
4181426d65sm         * area, and load locked TTE(s) for the TSB.
4191426d65sm         */
4201426d65sm        ldx     [%o0 + SFMMU_TSB], %o1          ! %o1 = first tsbinfo
4211426d65sm        ldx     [%o1 + TSBINFO_NEXTPTR], %g2    ! %g2 = second tsbinfo
42325cf1a3jl#ifdef UTSB_PHYS
4241426d65sm        /*
4251426d65sm         * UTSB_PHYS accesses user TSBs via physical addresses.  The first
4261426d65sm         * TSB is in the MMU I/D TSB Base registers.  The 2nd, 3rd and
4271426d65sm	 * 4th TSBs use designated ASI_SCRATCHPAD regs as pseudo TSB base regs.
42825cf1a3jl	 */
4301426d65sm        /* create/set first UTSBREG actually loaded into MMU_TSB  */
4311426d65sm        MAKE_UTSBREG(%o1, %o2, %o3)             ! %o2 = first utsbreg
4321426d65sm 	LOAD_TSBREG(%o2, %o3, %o4)              ! write TSB base register
4341426d65sm        brz,a,pt  %g2, 2f
4351426d65sm          mov   -1, %o2                         ! use -1 if no second TSB
4371426d65sm        MAKE_UTSBREG(%g2, %o2, %o3)             ! %o2 = second utsbreg
4391426d65sm        SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
4411426d65sm	/* make 3rd and 4th TSB */
4421426d65sm	CPU_TSBMISS_AREA(%o4, %o3) 		! %o4 = tsbmiss area
4441426d65sm        ldx     [%o0 + SFMMU_SCDP], %g2         ! %g2 = sfmmu_scd
4451426d65sm        brz,pt  %g2, 3f
4461426d65sm          mov   -1, %o2                         ! use -1 if no third TSB
4481426d65sm        ldx     [%g2 + SCD_SFMMUP], %g3         ! %g3 = scdp->scd_sfmmup
4491426d65sm        ldx     [%g3 + SFMMU_TSB], %o1          ! %o1 = first scd tsbinfo
4501426d65sm        brz,pn %o1, 5f
4511426d65sm          nop                                   ! panic if no third TSB
4531426d65sm	/* make 3rd UTSBREG */
4541426d65sm        MAKE_UTSBREG(%o1, %o2, %o3)             ! %o2 = third utsbreg
4561426d65sm        SET_UTSBREG(SCRATCHPAD_UTSBREG3, %o2, %o3)
4571426d65sm	stn	%o2, [%o4 + TSBMISS_TSBSCDPTR]
4591426d65sm        brz,pt  %g2, 4f
4601426d65sm          mov   -1, %o2                         ! use -1 if no 3rd or 4th TSB
4621426d65sm        ldx     [%o1 + TSBINFO_NEXTPTR], %g2    ! %g2 = second scd tsbinfo
4631426d65sm        brz,pt  %g2, 4f
4641426d65sm          mov   -1, %o2                         ! use -1 if no 4th TSB
4661426d65sm	/* make 4th UTSBREG */
4671426d65sm        MAKE_UTSBREG(%g2, %o2, %o3)             ! %o2 = fourth utsbreg
4691426d65sm        SET_UTSBREG(SCRATCHPAD_UTSBREG4, %o2, %o3)
4701426d65sm	stn	%o2, [%o4 + TSBMISS_TSBSCDPTR4M]
4711426d65sm	ba,pt	%icc, 6f
4721426d65sm	  mov	%o4, %o2			! %o2 = tsbmiss area
4741426d65sm        sethi   %hi(panicstr), %g1              ! panic if no 3rd TSB
4751426d65sm        ldx     [%g1 + %lo(panicstr)], %g1
4761426d65sm        tst     %g1
4781426d65sm        bnz,pn  %xcc, 8f
4791426d65sm          nop
4811426d65sm        sethi   %hi(sfmmu_panic10), %o0
4821426d65sm        call    panic
4831426d65sm          or     %o0, %lo(sfmmu_panic10), %o0
4851426d65sm#else /* UTSBREG_PHYS */
4871426d65sm        brz,pt  %g2, 4f
4881426d65sm          nop
4891426d65sm        /*
4901426d65sm         * We have a second TSB for this process, so we need to
4911426d65sm         * encode data for both the first and second TSB in our single
4921426d65sm         * TSB base register.  See hat_sfmmu.h for details on what bits
4931426d65sm         * correspond to which TSB.
4941426d65sm         * We also need to load a locked TTE into the TLB for the second TSB
4951426d65sm         * in this case.
4961426d65sm         */
4971426d65sm        MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd)
4981426d65sm        ! %o2 = tsbreg
4991426d65sm        sethi   %hi(utsb4m_dtlb_ttenum), %o3
5001426d65sm        sethi   %hi(utsb4m_vabase), %o4
5011426d65sm        ld      [%o3 + %lo(utsb4m_dtlb_ttenum)], %o3
5021426d65sm        ldx     [%o4 + %lo(utsb4m_vabase)], %o4 ! %o4 = TLB tag for sec TSB
5031426d65sm        sll     %o3, DTACC_SHIFT, %o3           ! %o3 = sec TSB TLB index
5041426d65sm        RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd)       ! or-in bits of TSB VA
5051426d65sm        LOAD_TSBTTE(%g2, %o3, %o4, %g3)         ! load sec TSB locked TTE
5061426d65sm        sethi   %hi(utsb_vabase), %g3
5071426d65sm        ldx     [%g3 + %lo(utsb_vabase)], %g3   ! %g3 = TLB tag for first TSB
5081426d65sm        ba,pt   %xcc, 5f
5091426d65sm          nop
5111426d65sm4:      sethi   %hi(utsb_vabase), %g3
5121426d65sm        ldx     [%g3 + %lo(utsb_vabase)], %g3   ! %g3 = TLB tag for first TSB
5131426d65sm        MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st)     ! %o2 = tsbreg
5151426d65sm5:      LOAD_TSBREG(%o2, %o3, %o4)              ! write TSB base register
5171426d65sm        /*
5181426d65sm         * Load the TTE for the first TSB at the appropriate location in
5191426d65sm         * the TLB
5201426d65sm         */
5211426d65sm        sethi   %hi(utsb_dtlb_ttenum), %o2
5221426d65sm        ld      [%o2 + %lo(utsb_dtlb_ttenum)], %o2
5231426d65sm        sll     %o2, DTACC_SHIFT, %o2           ! %o1 = first TSB TLB index
5241426d65sm        RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st)       ! or-in bits of TSB VA
5251426d65sm        LOAD_TSBTTE(%o1, %o2, %g3, %o4)         ! load first TSB locked TTE
5261426d65sm	CPU_TSBMISS_AREA(%o2, %o3)
5271426d65sm#endif /* UTSB_PHYS */
5291426d65sm	ldx     [%o0 + SFMMU_ISMBLKPA], %o1     ! copy members of sfmmu
5301426d65sm	              				! we need to access from
5311426d65sm        stx     %o1, [%o2 + TSBMISS_ISMBLKPA]   ! sfmmu_tsb_miss into the
5321426d65sm        ldub    [%o0 + SFMMU_TTEFLAGS], %o3     ! per-CPU tsbmiss area.
5331426d65sm        stx     %o0, [%o2 + TSBMISS_UHATID]
5341426d65sm        stub    %o3, [%o2 + TSBMISS_UTTEFLAGS]
5351426d65sm#ifdef UTSB_PHYS
5361426d65sm        ldx     [%o0 + SFMMU_SRDP], %o1
5371426d65sm        ldub    [%o0 + SFMMU_RTTEFLAGS], %o4
5381426d65sm        stub    %o4,  [%o2 + TSBMISS_URTTEFLAGS]
5391426d65sm        stx     %o1, [%o2 +  TSBMISS_SHARED_UHATID]
5401426d65sm        brz,pn  %o1, 8f				! check for sfmmu_srdp
5411426d65sm          add   %o0, SFMMU_HMERMAP, %o1
5421426d65sm        add     %o2, TSBMISS_SHMERMAP, %o2
5431426d65sm        mov     SFMMU_HMERGNMAP_WORDS, %o3
5441426d65sm                                                ! set tsbmiss shmermap
5451426d65sm        SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
5471426d65sm	ldx     [%o0 + SFMMU_SCDP], %o4         ! %o4 = sfmmu_scd
5481426d65sm        CPU_TSBMISS_AREA(%o2, %o3)              ! %o2 = tsbmiss area
5491426d65sm        mov     SFMMU_HMERGNMAP_WORDS, %o3
5501426d65sm        brnz,pt %o4, 7f                       ! check for sfmmu_scdp else
5511426d65sm          add   %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
5521426d65sm        ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
5531426d65sm	ba 8f
5541e2e7a7huah	  nop
5561426d65sm        add     %o4, SCD_HMERMAP, %o1
5571426d65sm        SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
5581426d65sm#endif /* UTSB_PHYS */
5611426d65sm	retl
5621426d65sm          nop
5631426d65sm        SET_SIZE(sfmmu_load_mmustate)
5669b0bb79John Levon * Invalidate all of the entries within the TSB, by setting the inv bit
5677c478bdstevel@tonic-gate * in the tte_tag field of each tsbe.
5687c478bdstevel@tonic-gate *
5699b0bb79John Levon * We take advantage of the fact that the TSBs are page aligned and a
5709b0bb79John Levon * multiple of PAGESIZE to use ASI_BLK_INIT_xxx ASI.
5717c478bdstevel@tonic-gate *
5727c478bdstevel@tonic-gate * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
5737c478bdstevel@tonic-gate * (in short, we set all bits in the upper word of the tag, and we give the
5747c478bdstevel@tonic-gate * invalid bit precedence over other tag bits in both places).
5757c478bdstevel@tonic-gate */
5777c478bdstevel@tonic-gate#define	VIS_BLOCKSIZE	64
5797c478bdstevel@tonic-gate	ENTRY(sfmmu_inv_tsb_fast)
5817c478bdstevel@tonic-gate	! Get space for aligned block of saved fp regs.
5827c478bdstevel@tonic-gate	save	%sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
5847c478bdstevel@tonic-gate	! kpreempt_disable();
5857c478bdstevel@tonic-gate	ldsb	[THREAD_REG + T_PREEMPT], %l3
5867c478bdstevel@tonic-gate	inc	%l3
5877c478bdstevel@tonic-gate	stb	%l3, [THREAD_REG + T_PREEMPT]
5897c478bdstevel@tonic-gate	! See if fpu was in use.  If it was, we need to save off the
5907c478bdstevel@tonic-gate	! floating point registers to the stack.
5917c478bdstevel@tonic-gate	rd	%fprs, %l0			! %l0 = cached copy of fprs
5927c478bdstevel@tonic-gate	btst	FPRS_FEF, %l0
5937c478bdstevel@tonic-gate	bz,pt	%icc, 4f
5947c478bdstevel@tonic-gate	  nop
5967c478bdstevel@tonic-gate	! save in-use fpregs on stack
5977c478bdstevel@tonic-gate	membar	#Sync				! make sure tranx to fp regs
5987c478bdstevel@tonic-gate						! have completed
5997c478bdstevel@tonic-gate	add	%fp, STACK_BIAS - 65, %l1	! get stack frame for fp regs
6007c478bdstevel@tonic-gate	and	%l1, -VIS_BLOCKSIZE, %l1	! block align frame
6017c478bdstevel@tonic-gate	stda	%d0, [%l1]ASI_BLK_P		! %l1 = addr of saved fp regs
6037c478bdstevel@tonic-gate	! enable fp
6047c478bdstevel@tonic-gate4:	membar	#StoreStore|#StoreLoad|#LoadStore
6057c478bdstevel@tonic-gate	wr	%g0, FPRS_FEF, %fprs
6067c478bdstevel@tonic-gate	wr	%g0, ASI_BLK_P, %asi
6087c478bdstevel@tonic-gate	! load up FP registers with invalid TSB tag.
6097c478bdstevel@tonic-gate	fone	%d0			! ones in tag
6107c478bdstevel@tonic-gate	fzero	%d2			! zeros in TTE
6117c478bdstevel@tonic-gate	fone	%d4			! ones in tag
6127c478bdstevel@tonic-gate	fzero	%d6			! zeros in TTE
6137c478bdstevel@tonic-gate	fone	%d8			! ones in tag
6147c478bdstevel@tonic-gate	fzero	%d10			! zeros in TTE
6157c478bdstevel@tonic-gate	fone	%d12			! ones in tag
6167c478bdstevel@tonic-gate	fzero	%d14			! zeros in TTE
6177c478bdstevel@tonic-gate	ba,pt	%xcc, .sfmmu_inv_doblock
6187c478bdstevel@tonic-gate	  mov	(4*VIS_BLOCKSIZE), %i4	! we do 4 stda's each loop below
6217c478bdstevel@tonic-gate      ! stda	%d0, [%i0+192]%asi  ! in dly slot of branch that got us here
6227c478bdstevel@tonic-gate	stda	%d0, [%i0+128]%asi
6237c478bdstevel@tonic-gate	stda	%d0, [%i0+64]%asi
6247c478bdstevel@tonic-gate	stda	%d0, [%i0]%asi
6267c478bdstevel@tonic-gate	add	%i0, %i4, %i0
6277c478bdstevel@tonic-gate	sub	%i1, %i4, %i1
6307c478bdstevel@tonic-gate	cmp	%i1, (4*VIS_BLOCKSIZE)	! check for completion
6317c478bdstevel@tonic-gate	bgeu,a	%icc, .sfmmu_inv_blkstart
6327c478bdstevel@tonic-gate	  stda	%d0, [%i0+192]%asi
6357c478bdstevel@tonic-gate	membar	#Sync
6367c478bdstevel@tonic-gate	btst	FPRS_FEF, %l0		! saved from above
6377c478bdstevel@tonic-gate	bz,a	.sfmmu_inv_finished
6387c478bdstevel@tonic-gate	  wr	%l0, 0, %fprs		! restore fprs
6407c478bdstevel@tonic-gate	! restore fpregs from stack
6417c478bdstevel@tonic-gate	ldda    [%l1]ASI_BLK_P, %d0
6427c478bdstevel@tonic-gate	membar	#Sync
6437c478bdstevel@tonic-gate	wr	%l0, 0, %fprs		! restore fprs
6467c478bdstevel@tonic-gate	! kpreempt_enable();
6477c478bdstevel@tonic-gate	ldsb	[THREAD_REG + T_PREEMPT], %l3
6487c478bdstevel@tonic-gate	dec	%l3
6497c478bdstevel@tonic-gate	stb	%l3, [THREAD_REG + T_PREEMPT]
6507c478bdstevel@tonic-gate	ret
6511e2e7a7huah	  restore
6527c478bdstevel@tonic-gate	SET_SIZE(sfmmu_inv_tsb_fast)
6557c478bdstevel@tonic-gate * Prefetch "struct tsbe" while walking TSBs.
6567c478bdstevel@tonic-gate * prefetch 7 cache lines ahead of where we are at now.
6577c478bdstevel@tonic-gate * #n_reads is being used since #one_read only applies to
6587c478bdstevel@tonic-gate * floating point reads, and we are not doing floating point
6597c478bdstevel@tonic-gate * reads.  However, this has the negative side effect of polluting
6607c478bdstevel@tonic-gate * the ecache.
6617c478bdstevel@tonic-gate * The 448 comes from (7 * 64) which is how far ahead of our current
6627c478bdstevel@tonic-gate * address, we want to prefetch.
6637c478bdstevel@tonic-gate */
6647c478bdstevel@tonic-gate	ENTRY(prefetch_tsbe_read)
6657c478bdstevel@tonic-gate	retl
6661e2e7a7huah	  prefetch	[%o0+448], #n_reads
6677c478bdstevel@tonic-gate	SET_SIZE(prefetch_tsbe_read)
6699b0bb79John Levon/* Prefetch the tsbe that we are about to write */
6707c478bdstevel@tonic-gate	ENTRY(prefetch_tsbe_write)
6717c478bdstevel@tonic-gate	retl
6721e2e7a7huah	  prefetch	[%o0], #n_writes
6737c478bdstevel@tonic-gate	SET_SIZE(prefetch_tsbe_write)