27c478bdstevel@tonic-gate * CDDL HEADER START
37c478bdstevel@tonic-gate *
47c478bdstevel@tonic-gate * The contents of this file are subject to the terms of the
525cf1a3jl * Common Development and Distribution License (the "License").
625cf1a3jl * You may not use this file except in compliance with the License.
77c478bdstevel@tonic-gate *
87c478bdstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bdstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bdstevel@tonic-gate * See the License for the specific language governing permissions
117c478bdstevel@tonic-gate * and limitations under the License.
127c478bdstevel@tonic-gate *
137c478bdstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bdstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bdstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bdstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bdstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bdstevel@tonic-gate *
197c478bdstevel@tonic-gate * CDDL HEADER END
207c478bdstevel@tonic-gate */
22d2365b0Pavel Tatashin * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
237c478bdstevel@tonic-gate * Use is subject to license terms.
2448bbca8Daniel Hoffman * Copyright (c) 2016 by Delphix. All rights reserved.
257c478bdstevel@tonic-gate */
287c478bdstevel@tonic-gate * SFMMU primitives.  These primitives should only be used by sfmmu
297c478bdstevel@tonic-gate * routines.
307c478bdstevel@tonic-gate */
327c478bdstevel@tonic-gate#include "assym.h"
347c478bdstevel@tonic-gate#include <sys/asm_linkage.h>
357c478bdstevel@tonic-gate#include <sys/machtrap.h>
367c478bdstevel@tonic-gate#include <sys/machasi.h>
377c478bdstevel@tonic-gate#include <sys/sun4asi.h>
387c478bdstevel@tonic-gate#include <sys/pte.h>
397c478bdstevel@tonic-gate#include <sys/mmu.h>
407c478bdstevel@tonic-gate#include <vm/hat_sfmmu.h>
417c478bdstevel@tonic-gate#include <vm/seg_spt.h>
427c478bdstevel@tonic-gate#include <sys/machparam.h>
437c478bdstevel@tonic-gate#include <sys/privregs.h>
447c478bdstevel@tonic-gate#include <sys/scb.h>
457c478bdstevel@tonic-gate#include <sys/intreg.h>
467c478bdstevel@tonic-gate#include <sys/machthread.h>
477c478bdstevel@tonic-gate#include <sys/intr.h>
487c478bdstevel@tonic-gate#include <sys/clock.h>
497c478bdstevel@tonic-gate#include <sys/trapstat.h>
517c478bdstevel@tonic-gate#ifdef TRAPTRACE
527c478bdstevel@tonic-gate#include <sys/traptrace.h>
557c478bdstevel@tonic-gate * Tracing macro. Adds two instructions if TRAPTRACE is defined.
567c478bdstevel@tonic-gate */
577c478bdstevel@tonic-gate#define	TT_TRACE(label)		\
587c478bdstevel@tonic-gate	ba	label		;\
597c478bdstevel@tonic-gate	rd	%pc, %g7
627c478bdstevel@tonic-gate#define	TT_TRACE(label)
647c478bdstevel@tonic-gate#endif /* TRAPTRACE */
667c478bdstevel@tonic-gate#if (TTE_SUSPEND_SHIFT > 0)
677c478bdstevel@tonic-gate#define	TTE_SUSPEND_INT_SHIFT(reg)				\
687c478bdstevel@tonic-gate	sllx	reg, TTE_SUSPEND_SHIFT, reg
707c478bdstevel@tonic-gate#define	TTE_SUSPEND_INT_SHIFT(reg)
747c478bdstevel@tonic-gate * Assumes TSBE_TAG is 0
757c478bdstevel@tonic-gate * Assumes TSBE_INTHI is 0
7648bbca8Daniel Hoffman * Assumes TSBREG.split is 0
777c478bdstevel@tonic-gate */
797c478bdstevel@tonic-gate#if TSBE_TAG != 0
807c478bdstevel@tonic-gate#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
837c478bdstevel@tonic-gate#if TSBTAG_INTHI != 0
847c478bdstevel@tonic-gate#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
887c478bdstevel@tonic-gate * The following code assumes the tsb is not split.
897c478bdstevel@tonic-gate *
907c478bdstevel@tonic-gate * With TSBs no longer shared between processes, it's no longer
917c478bdstevel@tonic-gate * necessary to hash the context bits into the tsb index to get
927c478bdstevel@tonic-gate * tsb coloring; the new implementation treats the TSB as a
937c478bdstevel@tonic-gate * direct-mapped, virtually-addressed cache.
947c478bdstevel@tonic-gate *
957c478bdstevel@tonic-gate * In:
967c478bdstevel@tonic-gate *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
977c478bdstevel@tonic-gate *    tsbbase = base address of TSB (clobbered)
987c478bdstevel@tonic-gate *    tagacc = tag access register (clobbered)
997c478bdstevel@tonic-gate *    szc = size code of TSB (ro)
1007c478bdstevel@tonic-gate *    tmp = scratch reg
1017c478bdstevel@tonic-gate * Out:
1027c478bdstevel@tonic-gate *    tsbbase = pointer to entry in TSB
1037c478bdstevel@tonic-gate */
1047c478bdstevel@tonic-gate#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
1057c478bdstevel@tonic-gate	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
1067c478bdstevel@tonic-gate	srlx	tagacc, vpshift, tagacc 				;\
1077c478bdstevel@tonic-gate	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
1087c478bdstevel@tonic-gate	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
1097c478bdstevel@tonic-gate	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
1107c478bdstevel@tonic-gate	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
1117c478bdstevel@tonic-gate	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
1147c478bdstevel@tonic-gate * When the kpm TSB is used it is assumed that it is direct mapped
1157c478bdstevel@tonic-gate * using (vaddr>>vpshift)%tsbsz as the index.
1167c478bdstevel@tonic-gate *
1177c478bdstevel@tonic-gate * Note that, for now, the kpm TSB and kernel TSB are the same for
1187c478bdstevel@tonic-gate * each mapping size.  However that need not always be the case.  If
1197c478bdstevel@tonic-gate * the trap handlers are updated to search a different TSB for kpm
1207c478bdstevel@tonic-gate * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
1217c478bdstevel@tonic-gate * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
1227c478bdstevel@tonic-gate *
1237c478bdstevel@tonic-gate * In:
1247c478bdstevel@tonic-gate *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
1257c478bdstevel@tonic-gate *    vaddr = virtual address (clobbered)
1267c478bdstevel@tonic-gate *    tsbp, szc, tmp = scratch
1277c478bdstevel@tonic-gate * Out:
1287c478bdstevel@tonic-gate *    tsbp = pointer to entry in TSB
1297c478bdstevel@tonic-gate */
1307c478bdstevel@tonic-gate#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
1317c478bdstevel@tonic-gate	cmp	vpshift, MMU_PAGESHIFT					;\
1327c478bdstevel@tonic-gate	bne,pn	%icc, 1f		/* branch if large case */	;\
1337c478bdstevel@tonic-gate	  sethi	%hi(kpmsm_tsbsz), szc					;\
1347c478bdstevel@tonic-gate	sethi	%hi(kpmsm_tsbbase), tsbp				;\
1357c478bdstevel@tonic-gate	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
1367c478bdstevel@tonic-gate	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
1377c478bdstevel@tonic-gate	ba,pt	%icc, 2f						;\
1387c478bdstevel@tonic-gate	  nop								;\
1397c478bdstevel@tonic-gate1:	sethi	%hi(kpm_tsbsz), szc					;\
1407c478bdstevel@tonic-gate	sethi	%hi(kpm_tsbbase), tsbp					;\
1417c478bdstevel@tonic-gate	ld	[szc + %lo(kpm_tsbsz)], szc				;\
1427c478bdstevel@tonic-gate	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1437c478bdstevel@tonic-gate2:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
1467c478bdstevel@tonic-gate * Lock the TSBE at virtual address tsbep.
1477c478bdstevel@tonic-gate *
1487c478bdstevel@tonic-gate * tsbep = TSBE va (ro)
1497c478bdstevel@tonic-gate * tmp1, tmp2 = scratch registers (clobbered)
1500a90a7fAmritpal Sandhu * label = label to jump to if we fail to lock the tsb entry
1517c478bdstevel@tonic-gate * %asi = ASI to use for TSB access
1527c478bdstevel@tonic-gate *
1537c478bdstevel@tonic-gate * NOTE that we flush the TSB using fast VIS instructions that
1547c478bdstevel@tonic-gate * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
1557c478bdstevel@tonic-gate * not be treated as a locked entry or we'll get stuck spinning on
1567c478bdstevel@tonic-gate * an entry that isn't locked but really invalid.
1577c478bdstevel@tonic-gate */
1597c478bdstevel@tonic-gate#if defined(UTSB_PHYS)
1617c478bdstevel@tonic-gate#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
1627c478bdstevel@tonic-gate	lda	[tsbep]ASI_MEM, tmp1					;\
1637c478bdstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
1647c478bdstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1650a90a7fAmritpal Sandhu	be,a,pn	%icc, label		/* if locked ignore */		;\
1660a90a7fAmritpal Sandhu	  nop								;\
1677c478bdstevel@tonic-gate	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
1687c478bdstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1690a90a7fAmritpal Sandhu	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
1700a90a7fAmritpal Sandhu	  nop								;\
1717c478bdstevel@tonic-gate	/* tsbe lock acquired */					;\
1727c478bdstevel@tonic-gate	membar #StoreStore
1747c478bdstevel@tonic-gate#else /* UTSB_PHYS */
1767c478bdstevel@tonic-gate#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
1777c478bdstevel@tonic-gate	lda	[tsbep]%asi, tmp1					;\
1787c478bdstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
1797c478bdstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1800a90a7fAmritpal Sandhu	be,a,pn	%icc, label		/* if locked ignore */		;\
1810a90a7fAmritpal Sandhu	  nop								;\
1827c478bdstevel@tonic-gate	casa	[tsbep]%asi, tmp1, tmp2					;\
1837c478bdstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1840a90a7fAmritpal Sandhu	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
1850a90a7fAmritpal Sandhu	  nop								;\
1867c478bdstevel@tonic-gate	/* tsbe lock acquired */					;\
1877c478bdstevel@tonic-gate	membar #StoreStore
1897c478bdstevel@tonic-gate#endif /* UTSB_PHYS */
1927c478bdstevel@tonic-gate * Atomically write TSBE at virtual address tsbep.
1937c478bdstevel@tonic-gate *
1947c478bdstevel@tonic-gate * tsbep = TSBE va (ro)
1957c478bdstevel@tonic-gate * tte = TSBE TTE (ro)
1967c478bdstevel@tonic-gate * tagtarget = TSBE tag (ro)
1977c478bdstevel@tonic-gate * %asi = ASI to use for TSB access
1987c478bdstevel@tonic-gate */
2007c478bdstevel@tonic-gate#if defined(UTSB_PHYS)
2027c478bdstevel@tonic-gate#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
2037c478bdstevel@tonic-gate	add	tsbep, TSBE_TTE, tmp1					;\
2047c478bdstevel@tonic-gate	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
2057c478bdstevel@tonic-gate	membar #StoreStore						;\
2067c478bdstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
2077c478bdstevel@tonic-gate	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
2097c478bdstevel@tonic-gate#else /* UTSB_PHYS */
2117c478bdstevel@tonic-gate#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
2127c478bdstevel@tonic-gate	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
2137c478bdstevel@tonic-gate	membar #StoreStore						;\
2147c478bdstevel@tonic-gate	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
2167c478bdstevel@tonic-gate#endif /* UTSB_PHYS */
2197c478bdstevel@tonic-gate * Load an entry into the TSB at TL > 0.
2207c478bdstevel@tonic-gate *
2217c478bdstevel@tonic-gate * tsbep = pointer to the TSBE to load as va (ro)
2227c478bdstevel@tonic-gate * tte = value of the TTE retrieved and loaded (wo)
2237c478bdstevel@tonic-gate * tagtarget = tag target register.  To get TSBE tag to load,
2247c478bdstevel@tonic-gate *   we need to mask off the context and leave only the va (clobbered)
2257c478bdstevel@tonic-gate * ttepa = pointer to the TTE to retrieve/load as pa (ro)
2267c478bdstevel@tonic-gate * tmp1, tmp2 = scratch registers
2270a90a7fAmritpal Sandhu * label = label to jump to if we fail to lock the tsb entry
2287c478bdstevel@tonic-gate * %asi = ASI to use for TSB access
2297c478bdstevel@tonic-gate */
2317c478bdstevel@tonic-gate#if defined(UTSB_PHYS)
2337c478bdstevel@tonic-gate#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
2347c478bdstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
2357c478bdstevel@tonic-gate	/*								;\
2367c478bdstevel@tonic-gate	 * I don't need to update the TSB then check for the valid tte.	;\
2377c478bdstevel@tonic-gate	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
2387c478bdstevel@tonic-gate	 * we always invalidate the hash table before we unload the TSB.;\
2397c478bdstevel@tonic-gate	 */								;\
2407c478bdstevel@tonic-gate	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2417c478bdstevel@tonic-gate	ldxa	[ttepa]ASI_MEM, tte					;\
2427c478bdstevel@tonic-gate	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2437c478bdstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
2447c478bdstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
2450a90a7fAmritpal Sandhu	brgez,a,pn tte, label						;\
2467c478bdstevel@tonic-gate	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
2477c478bdstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
2507c478bdstevel@tonic-gate#else /* UTSB_PHYS */
2527c478bdstevel@tonic-gate#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
2537c478bdstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
2547c478bdstevel@tonic-gate	/*								;\
2557c478bdstevel@tonic-gate	 * I don't need to update the TSB then check for the valid tte.	;\
2567c478bdstevel@tonic-gate	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
2577c478bdstevel@tonic-gate	 * we always invalidate the hash table before we unload the TSB.;\
2587c478bdstevel@tonic-gate	 */								;\
2597c478bdstevel@tonic-gate	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2607c478bdstevel@tonic-gate	ldxa	[ttepa]ASI_MEM, tte					;\
2617c478bdstevel@tonic-gate	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2627c478bdstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
2630a90a7fAmritpal Sandhu	brgez,a,pn tte, label						;\
2647c478bdstevel@tonic-gate	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
2657c478bdstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
26825cf1a3jl#endif /* UTSB_PHYS */
2711bd453fsusans * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
2721bd453fsusans *   for ITLB synthesis.
2731bd453fsusans *
2741bd453fsusans * tsbep = pointer to the TSBE to load as va (ro)
2751bd453fsusans * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
2761bd453fsusans *   with exec_perm turned off and exec_synth turned on
2771bd453fsusans * tagtarget = tag target register.  To get TSBE tag to load,
2781bd453fsusans *   we need to mask off the context and leave only the va (clobbered)
2791bd453fsusans * ttepa = pointer to the TTE to retrieve/load as pa (ro)
2801bd453fsusans * tmp1, tmp2 = scratch registers
2811bd453fsusans * label = label to use for branch (text)
2821bd453fsusans * %asi = ASI to use for TSB access
2831bd453fsusans */
2851bd453fsusans#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
2861bd453fsusans	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
2871bd453fsusans	/*								;\
2881bd453fsusans	 * I don't need to update the TSB then check for the valid tte.	;\
2891bd453fsusans	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
2901bd453fsusans	 * we always invalidate the hash table before we unload the TSB.;\
2911bd453fsusans	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
2921bd453fsusans	 * and exec_synth bit to 1.					;\
2931bd453fsusans	 */								;\
2941bd453fsusans	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2951bd453fsusans	mov	tte, tmp1						;\
2961bd453fsusans	ldxa	[ttepa]ASI_MEM, tte					;\
2971bd453fsusans	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2981bd453fsusans	sethi	%hi(TSBTAG_INVALID), tmp2				;\
2990a90a7fAmritpal Sandhu	brgez,a,pn tte, label						;\
3001bd453fsusans	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
3011bd453fsusans	or	tte, tmp1, tte						;\
3021bd453fsusans	andn	tte, TTE_EXECPRM_INT, tte				;\
3031bd453fsusans	or	tte, TTE_E_SYNTH_INT, tte				;\
3041bd453fsusans	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
3081bd453fsusans * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
3091bd453fsusans *
3101bd453fsusans * tte = value of the TTE, used to get tte_size bits (ro)
3111bd453fsusans * tagaccess = tag access register, used to get 4M pfn bits (ro)
3121bd453fsusans * pfn = 4M pfn bits shifted to offset for tte (out)
3131bd453fsusans * tmp1 = scratch register
3141bd453fsusans * label = label to use for branch (text)
3151bd453fsusans */
3171bd453fsusans#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
3181bd453fsusans	/*								;\
3191bd453fsusans	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
3201bd453fsusans	 * Return them, shifted, in pfn.				;\
3211bd453fsusans	 */								;\
3221bd453fsusans	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
3231bd453fsusans	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
3241bd453fsusans	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
3251bd453fsusans	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
3261bd453fsusans	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
3271bd453fsusans	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
3281bd453fsusanslabel:									;\
3291bd453fsusans	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
3321bd453fsusans * Add 4M TTE size code to a tte for a Panther 32M/256M page,
3331bd453fsusans * for ITLB synthesis.
3341bd453fsusans *
3351bd453fsusans * tte = value of the TTE, used to get tte_size bits (rw)
3361bd453fsusans * tmp1 = scratch register
3371bd453fsusans */
3391bd453fsusans#define	SET_TTE4M_PN(tte, tmp)						\
3401bd453fsusans	/*								;\
3411bd453fsusans	 * Set 4M pagesize tte bits. 					;\
3421bd453fsusans	 */								;\
3431bd453fsusans	set	TTE4M, tmp						;\
3441bd453fsusans	sllx	tmp, TTE_SZ_SHFT, tmp					;\
3451bd453fsusans	or	tte, tmp, tte
3487c478bdstevel@tonic-gate * Load an entry into the TSB at TL=0.
3497c478bdstevel@tonic-gate *
3507c478bdstevel@tonic-gate * tsbep = pointer to the TSBE to load as va (ro)
3517c478bdstevel@tonic-gate * tteva = pointer to the TTE to load as va (ro)
35248bbca8Daniel Hoffman * tagtarget = TSBE tag to load (which contains no context), synthesized
3537c478bdstevel@tonic-gate * to match va of MMU tag target register only (ro)
3547c478bdstevel@tonic-gate * tmp1, tmp2 = scratch registers (clobbered)
3557c478bdstevel@tonic-gate * label = label to use for branches (text)
3567c478bdstevel@tonic-gate * %asi = ASI to use for TSB access
3577c478bdstevel@tonic-gate */
3597c478bdstevel@tonic-gate#if defined(UTSB_PHYS)
3617c478bdstevel@tonic-gate#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
3627c478bdstevel@tonic-gate	/* can't rd tteva after locking tsb because it can tlb miss */	;\
3637c478bdstevel@tonic-gate	ldx	[tteva], tteva			/* load tte */		;\
3647c478bdstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
3657c478bdstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
3667c478bdstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
3670a90a7fAmritpal Sandhu	brgez,a,pn tteva, label						;\
3687c478bdstevel@tonic-gate	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
3697c478bdstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
3727c478bdstevel@tonic-gate#else /* UTSB_PHYS */
3747c478bdstevel@tonic-gate#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
3757c478bdstevel@tonic-gate	/* can't rd tteva after locking tsb because it can tlb miss */	;\
3767c478bdstevel@tonic-gate	ldx	[tteva], tteva			/* load tte */		;\
3777c478bdstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
3787c478bdstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
3790a90a7fAmritpal Sandhu	brgez,a,pn tteva, label						;\
3807c478bdstevel@tonic-gate	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
3817c478bdstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
3847c478bdstevel@tonic-gate#endif /* UTSB_PHYS */
3877c478bdstevel@tonic-gate * Invalidate a TSB entry in the TSB.
3887c478bdstevel@tonic-gate *
3897c478bdstevel@tonic-gate * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
3907c478bdstevel@tonic-gate *	 about this earlier to ensure this is true.  Thus when we are
3917c478bdstevel@tonic-gate *	 directly referencing tsbep below, we are referencing the tte_tag
3927c478bdstevel@tonic-gate *	 field of the TSBE.  If this  offset ever changes, the code below
3937c478bdstevel@tonic-gate *	 will need to be modified.
3947c478bdstevel@tonic-gate *
3957c478bdstevel@tonic-gate * tsbep = pointer to TSBE as va (ro)
3967c478bdstevel@tonic-gate * tag = invalidation is done if this matches the TSBE tag (ro)
3977c478bdstevel@tonic-gate * tmp1 - tmp3 = scratch registers (clobbered)
3987c478bdstevel@tonic-gate * label = label name to use for branches (text)
3997c478bdstevel@tonic-gate * %asi = ASI to use for TSB access
4007c478bdstevel@tonic-gate */
4027c478bdstevel@tonic-gate#if defined(UTSB_PHYS)
4047c478bdstevel@tonic-gate#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
4057c478bdstevel@tonic-gate	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
4067c478bdstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
4077c478bdstevel@tonic-gatelabel/**/1:								;\
4087c478bdstevel@tonic-gate	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
4097c478bdstevel@tonic-gate	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
4107c478bdstevel@tonic-gate	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
4117c478bdstevel@tonic-gate	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
4127c478bdstevel@tonic-gate	cmp	tag, tmp3		/* compare tags */		;\
4137c478bdstevel@tonic-gate	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
4147c478bdstevel@tonic-gate	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
4157c478bdstevel@tonic-gate	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
4167c478bdstevel@tonic-gate	cmp	tmp1, tmp3		/* if not successful */		;\
4177c478bdstevel@tonic-gate	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
4187c478bdstevel@tonic-gate	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
41948bbca8Daniel Hoffmanlabel/**/2:
4217c478bdstevel@tonic-gate#else /* UTSB_PHYS */
4237c478bdstevel@tonic-gate#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
4247c478bdstevel@tonic-gate	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
4257c478bdstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
4267c478bdstevel@tonic-gatelabel/**/1:								;\
4277c478bdstevel@tonic-gate	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
4287c478bdstevel@tonic-gate	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
4297c478bdstevel@tonic-gate	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
4307c478bdstevel@tonic-gate	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
4317c478bdstevel@tonic-gate	cmp	tag, tmp3		/* compare tags */		;\
4327c478bdstevel@tonic-gate	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
4337c478bdstevel@tonic-gate	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
4347c478bdstevel@tonic-gate	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
4357c478bdstevel@tonic-gate	cmp	tmp1, tmp3		/* if not successful */		;\
4367c478bdstevel@tonic-gate	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
4377c478bdstevel@tonic-gate	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
43848bbca8Daniel Hoffmanlabel/**/2:
4407c478bdstevel@tonic-gate#endif /* UTSB_PHYS */
4427c478bdstevel@tonic-gate#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
4437c478bdstevel@tonic-gate#error	- TSB_SOFTSZ_MASK too small
4487c478bdstevel@tonic-gate * An implementation of setx which will be hot patched at run time.
4497c478bdstevel@tonic-gate * since it is being hot patched, there is no value passed in.
4507c478bdstevel@tonic-gate * Thus, essentially we are implementing
4517c478bdstevel@tonic-gate *	setx value, tmp, dest
4527c478bdstevel@tonic-gate * where value is RUNTIME_PATCH (aka 0) in this case.
4537c478bdstevel@tonic-gate */
4547c478bdstevel@tonic-gate#define	RUNTIME_PATCH_SETX(dest, tmp)					\
4557c478bdstevel@tonic-gate	sethi	%hh(RUNTIME_PATCH), tmp					;\
4567c478bdstevel@tonic-gate	sethi	%lm(RUNTIME_PATCH), dest				;\
4577c478bdstevel@tonic-gate	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
4587c478bdstevel@tonic-gate	or	dest, %lo(RUNTIME_PATCH), dest				;\
4597c478bdstevel@tonic-gate	sllx	tmp, 32, tmp						;\
4607c478bdstevel@tonic-gate	nop				/* for perf reasons */		;\
4617c478bdstevel@tonic-gate	or	tmp, dest, dest		/* contents of patched value */
4647c478bdstevel@tonic-gate	.seg	".data"
4657c478bdstevel@tonic-gate	.global	sfmmu_panic1
4677c478bdstevel@tonic-gate	.asciz	"sfmmu_asm: interrupts already disabled"
4697c478bdstevel@tonic-gate	.global	sfmmu_panic3
4717c478bdstevel@tonic-gate	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
4737c478bdstevel@tonic-gate	.global	sfmmu_panic4
4757c478bdstevel@tonic-gate	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
4777c478bdstevel@tonic-gate	.global	sfmmu_panic5
4797c478bdstevel@tonic-gate	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
4811e2e7a7huah	.global	sfmmu_panic6
4831e2e7a7huah	.asciz	"sfmmu_asm: interrupts not disabled"
4851e2e7a7huah	.global	sfmmu_panic7
4871e2e7a7huah	.asciz	"sfmmu_asm: kernel as"
4891e2e7a7huah	.global	sfmmu_panic8
4911e2e7a7huah	.asciz	"sfmmu_asm: gnum is zero"
4931e2e7a7huah	.global	sfmmu_panic9
4951e2e7a7huah	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
49648bbca8Daniel Hoffman
49705d3dc4paulsan	.global	sfmmu_panic10
49905d3dc4paulsan	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
500125be06Jason Beloro
501125be06Jason Beloro	.global	sfmmu_panic11
502125be06Jason Belorosfmmu_panic11:
503125be06Jason Beloro	.asciz	"sfmmu_asm: ktsb_phys must not be 0 on a sun4v platform"
50448bbca8Daniel Hoffman
5051e2e7a7huah        ENTRY(sfmmu_disable_intrs)
5061e2e7a7huah        rdpr    %pstate, %o0
5071e2e7a7huah#ifdef DEBUG
5081e2e7a7huah	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
5091e2e7a7huah#endif /* DEBUG */
5101e2e7a7huah        retl
5111e2e7a7huah          wrpr   %o0, PSTATE_IE, %pstate
5121e2e7a7huah        SET_SIZE(sfmmu_disable_intrs)
51348bbca8Daniel Hoffman
5141e2e7a7huah	ENTRY(sfmmu_enable_intrs)
5151e2e7a7huah        retl
5161e2e7a7huah          wrpr    %g0, %o0, %pstate
5171e2e7a7huah        SET_SIZE(sfmmu_enable_intrs)
5201e2e7a7huah * This routine is called both by resume() and sfmmu_get_ctx() to
5211e2e7a7huah * allocate a new context for the process on a MMU.
5221e2e7a7huah * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
5231e2e7a7huah * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
5241e2e7a7huah * is the case when sfmmu_alloc_ctx is called from resume().
5251e2e7a7huah *
5261e2e7a7huah * The caller must disable interrupts before entering this routine.
5271e2e7a7huah * To reduce ctx switch overhead, the code contains both 'fast path' and
5281e2e7a7huah * 'slow path' code. The fast path code covers the common case where only
5291e2e7a7huah * a quick check is needed and the real ctx allocation is not required.
5301e2e7a7huah * It can be done without holding the per-process (PP) lock.
5311e2e7a7huah * The 'slow path' code must be protected by the PP Lock and performs ctx
5321e2e7a7huah * allocation.
5331e2e7a7huah * Hardware context register and HAT mmu cnum are updated accordingly.
5341e2e7a7huah *
5351e2e7a7huah * %o0 - sfmmup
5361e2e7a7huah * %o1 - allocflag
5371e2e7a7huah * %o2 - CPU
53805d3dc4paulsan * %o3 - sfmmu private/shared flag
53905d3dc4paulsan *
54005d3dc4paulsan * ret - 0: no ctx is allocated
54105d3dc4paulsan *       1: a ctx is allocated
5421e2e7a7huah */
5431e2e7a7huah        ENTRY_NP(sfmmu_alloc_ctx)
5451e2e7a7huah#ifdef DEBUG
54605d3dc4paulsan	sethi   %hi(ksfmmup), %g1
54705d3dc4paulsan	ldx     [%g1 + %lo(ksfmmup)], %g1
54805d3dc4paulsan	cmp     %g1, %o0
5491e2e7a7huah	bne,pt   %xcc, 0f
5501e2e7a7huah	  nop
5521e2e7a7huah	sethi   %hi(panicstr), %g1		! if kernel as, panic
5531e2e7a7huah        ldx     [%g1 + %lo(panicstr)], %g1
5541e2e7a7huah        tst     %g1
5551e2e7a7huah        bnz,pn  %icc, 7f
5561e2e7a7huah          nop
5581e2e7a7huah	sethi	%hi(sfmmu_panic7), %o0
5591e2e7a7huah	call	panic
5601e2e7a7huah	  or	%o0, %lo(sfmmu_panic7), %o0
5631e2e7a7huah	retl
56405d3dc4paulsan	  mov	%g0, %o0			! %o0 = ret = 0
5671e2e7a7huah	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
56805d3dc4paulsan#endif /* DEBUG */
56948bbca8Daniel Hoffman
57005d3dc4paulsan	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
57148bbca8Daniel Hoffman
5721e2e7a7huah	! load global mmu_ctxp info
5731e2e7a7huah	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
574d2365b0Pavel Tatashin
575d2365b0Pavel Tatashin#ifdef sun4v
576d2365b0Pavel Tatashin	/* During suspend on sun4v, context domains can be temporary removed */
577d2365b0Pavel Tatashin	brz,a,pn       %o3, 0f
578d2365b0Pavel Tatashin	  nop
579d2365b0Pavel Tatashin#endif
580d2365b0Pavel Tatashin
5811e2e7a7huah        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
5831e2e7a7huah	! load global mmu_ctxp gnum
5841e2e7a7huah	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
5861e2e7a7huah#ifdef DEBUG
5871e2e7a7huah	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
5881e2e7a7huah	bne,pt	%xcc, 3f
5891e2e7a7huah	  nop
59048bbca8Daniel Hoffman
5911e2e7a7huah	sethi   %hi(panicstr), %g1	! test if panicstr is already set
5921e2e7a7huah        ldx     [%g1 + %lo(panicstr)], %g1
5931e2e7a7huah        tst     %g1
59405d3dc4paulsan        bnz,pn  %icc, 1f
5951e2e7a7huah          nop
59648bbca8Daniel Hoffman
5971e2e7a7huah	sethi	%hi(sfmmu_panic8), %o0
5981e2e7a7huah	call	panic
5991e2e7a7huah	  or	%o0, %lo(sfmmu_panic8), %o0
60048bbca8Daniel Hoffman1:
60105d3dc4paulsan	retl
60205d3dc4paulsan	  mov	%g0, %o0			! %o0 = ret = 0
60348bbca8Daniel Hoffman3:
6061e2e7a7huah	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
6081e2e7a7huah	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
6091e2e7a7huah	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
6111e2e7a7huah	/*
6121e2e7a7huah	 * %g5 = sfmmu gnum returned
6131e2e7a7huah	 * %g6 = sfmmu cnum returned
6141e2e7a7huah	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
6151e2e7a7huah	 * %g4 = scratch
6161e2e7a7huah	 *
6171e2e7a7huah	 * Fast path code, do a quick check.
6181e2e7a7huah	 */
6191e2e7a7huah	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
62048bbca8Daniel Hoffman
6211e2e7a7huah	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
6221e2e7a7huah	bne,pt	%icc, 1f			! valid hat cnum, check gnum
6231e2e7a7huah	  nop
6251e2e7a7huah	! cnum == INVALID, check allocflag
62605d3dc4paulsan	mov	%g0, %g4	! %g4 = ret = 0
6271e2e7a7huah	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
6281e2e7a7huah	  mov	%g6, %o1
6301e2e7a7huah	! (invalid HAT cnum) && (allocflag == 1)
6311e2e7a7huah	ba,pt	%icc, 2f
6321e2e7a7huah	  nop
633d2365b0Pavel Tatashin#ifdef sun4v
634d2365b0Pavel Tatashin0:
635d2365b0Pavel Tatashin	set	INVALID_CONTEXT, %o1
636d2365b0Pavel Tatashin	membar	#LoadStore|#StoreStore
637d2365b0Pavel Tatashin	ba,pt	%icc, 8f
638d2365b0Pavel Tatashin	  mov   %g0, %g4                ! %g4 = ret = 0
639d2365b0Pavel Tatashin#endif
6411e2e7a7huah	! valid HAT cnum, check gnum
6421e2e7a7huah	cmp	%g5, %o4
64305d3dc4paulsan	mov	1, %g4				!%g4 = ret = 1
6441e2e7a7huah	be,a,pt	%icc, 8f			! gnum unchanged, go to done
6451e2e7a7huah	  mov	%g6, %o1
64848bbca8Daniel Hoffman	/*
6491e2e7a7huah	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
6501e2e7a7huah	 * followed by the 'slow path' code.
6511e2e7a7huah	 */
6521e2e7a7huah	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
6541e2e7a7huah	brz	%g3, 5f
6551e2e7a7huah	  nop
6571e2e7a7huah	brnz,a,pt       %g3, 4b				! spin if lock is 1
6581e2e7a7huah	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
6591e2e7a7huah	ba	%xcc, 3b				! retry the lock
6601e2e7a7huah	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
6631e2e7a7huah	membar  #LoadLoad
6641e2e7a7huah	/*
6651e2e7a7huah	 * %g5 = sfmmu gnum returned
6661e2e7a7huah	 * %g6 = sfmmu cnum returned
6671e2e7a7huah	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
6681e2e7a7huah	 * %g4 = scratch
6691e2e7a7huah	 */
6701e2e7a7huah	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
6721e2e7a7huah	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
6731e2e7a7huah	bne,pt	%icc, 1f			! valid hat cnum, check gnum
6741e2e7a7huah	  nop
6761e2e7a7huah	! cnum == INVALID, check allocflag
67705d3dc4paulsan	mov	%g0, %g4	! %g4 = ret = 0
6781e2e7a7huah	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
6791e2e7a7huah	  mov	%g6, %o1
6811e2e7a7huah	! (invalid HAT cnum) && (allocflag == 1)
6821e2e7a7huah	ba,pt	%icc, 6f
6831e2e7a7huah	  nop
6851e2e7a7huah	! valid HAT cnum, check gnum
6861e2e7a7huah	cmp	%g5, %o4
68705d3dc4paulsan	mov	1, %g4				! %g4 = ret  = 1
6881e2e7a7huah	be,a,pt	%icc, 2f			! gnum unchanged, go to done
6891e2e7a7huah	  mov	%g6, %o1
6911e2e7a7huah	ba,pt	%icc, 6f
6921e2e7a7huah	  nop
6941e2e7a7huah	membar  #LoadStore|#StoreStore
6951e2e7a7huah	ba,pt %icc, 8f
6961e2e7a7huah	  clrb  [%o0 + SFMMU_CTX_LOCK]
6981e2e7a7huah	/*
6991e2e7a7huah	 * We get here if we do not have a valid context, or
7001e2e7a7huah	 * the HAT gnum does not match global gnum. We hold
7011e2e7a7huah	 * sfmmu_ctx_lock spinlock. Allocate that context.
7021e2e7a7huah	 *
7031e2e7a7huah	 * %o3 = mmu_ctxp
7041e2e7a7huah	 */
7051e2e7a7huah	add	%o3, MMU_CTX_CNUM, %g3
7061e2e7a7huah	ld	[%o3 + MMU_CTX_NCTXS], %g4
7081e2e7a7huah	/*
7091e2e7a7huah         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
7101e2e7a7huah         * %g3 = mmu cnum address
7111e2e7a7huah	 * %g4 = mmu nctxs
7121e2e7a7huah	 *
7131e2e7a7huah	 * %o0 = sfmmup
7141e2e7a7huah	 * %o1 = mmu current cnum value (used as new cnum)
7151e2e7a7huah	 * %o4 = mmu gnum
7161e2e7a7huah	 *
7171e2e7a7huah	 * %o5 = scratch
7181e2e7a7huah	 */
7191e2e7a7huah	ld	[%g3], %o1
7211e2e7a7huah	cmp	%o1, %g4
7221e2e7a7huah	bl,a,pt %icc, 1f
7231e2e7a7huah	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
7251e2e7a7huah	/*
72605d3dc4paulsan	 * cnum reachs max, bail, so wrap around can be performed later.
7271e2e7a7huah	 */
7281e2e7a7huah	set	INVALID_CONTEXT, %o1
72905d3dc4paulsan	mov	%g0, %g4		! %g4 = ret = 0
7311e2e7a7huah	membar  #LoadStore|#StoreStore
7321e2e7a7huah	ba,pt	%icc, 8f
7331e2e7a7huah	  clrb	[%o0 + SFMMU_CTX_LOCK]
7351e2e7a7huah	! %g3 = addr of mmu_ctxp->cnum
7361e2e7a7huah	! %o5 = mmu_ctxp->cnum + 1
7371e2e7a7huah	cas	[%g3], %o1, %o5
7381e2e7a7huah	cmp	%o1, %o5
7391e2e7a7huah	bne,a,pn %xcc, 0b	! cas failed
7401e2e7a7huah	  ld	[%g3], %o1
7421e2e7a7huah#ifdef DEBUG
7431e2e7a7huah        set	MAX_SFMMU_CTX_VAL, %o5
7441e2e7a7huah	cmp	%o1, %o5
7451e2e7a7huah	ble,pt %icc, 2f
7461e2e7a7huah	  nop
74748bbca8Daniel Hoffman
7481e2e7a7huah	sethi	%hi(sfmmu_panic9), %o0
7491e2e7a7huah	call	panic
7501e2e7a7huah	  or	%o0, %lo(sfmmu_panic9), %o0
75148bbca8Daniel Hoffman2:
7531e2e7a7huah	! update hat gnum and cnum
7541e2e7a7huah	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
7551e2e7a7huah	or	%o4, %o1, %o4
7561e2e7a7huah	stx	%o4, [%g2 + SFMMU_CTXS]
7581e2e7a7huah	membar  #LoadStore|#StoreStore
7591e2e7a7huah	clrb	[%o0 + SFMMU_CTX_LOCK]
76048bbca8Daniel Hoffman
76105d3dc4paulsan	mov	1, %g4			! %g4 = ret = 1
7631e2e7a7huah	/*
7641e2e7a7huah	 * program the secondary context register
7651e2e7a7huah	 *
7661e2e7a7huah	 * %o1 = cnum
76705d3dc4paulsan	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
7681e2e7a7huah	 */
76948bbca8Daniel Hoffman
770f0856d0sm	/*
77148bbca8Daniel Hoffman	 * When we come here and context is invalid, we want to set both
772f0856d0sm	 * private and shared ctx regs to INVALID. In order to
773f0856d0sm	 * do so, we set the sfmmu priv/shared flag to 'private' regardless
774f0856d0sm	 * so that private ctx reg will be set to invalid.
775f0856d0sm	 * Note that on sun4v values written to private context register are
77648bbca8Daniel Hoffman	 * automatically written to corresponding shared context register as
777f0856d0sm	 * well. On sun4u SET_SECCTX() will invalidate shared context register
778f0856d0sm	 * when it sets a private secondary context register.
779f0856d0sm	 */
78048bbca8Daniel Hoffman
781f0856d0sm	cmp	%o1, INVALID_CONTEXT
782f0856d0sm	be,a,pn	%icc, 9f
783f0856d0sm	  clr	%g1
7861e2e7a7huah#ifdef	sun4u
7871e2e7a7huah	ldub	[%o0 + SFMMU_CEXT], %o2
7881e2e7a7huah	sll	%o2, CTXREG_EXT_SHIFT, %o2
7891e2e7a7huah	or	%o1, %o2, %o1
7901426d65sm#endif /* sun4u */
7921426d65sm	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
7941426d65sm        retl
7951426d65sm          mov   %g4, %o0                        ! %o0 = ret
7971e2e7a7huah	SET_SIZE(sfmmu_alloc_ctx)
7997c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_modifytte)
8007c478bdstevel@tonic-gate	ldx	[%o2], %g3			/* current */
8017c478bdstevel@tonic-gate	ldx	[%o0], %g1			/* original */
8037c478bdstevel@tonic-gate	ldx	[%o1], %g2			/* modified */
8047c478bdstevel@tonic-gate	cmp	%g2, %g3			/* is modified = current? */
8057c478bdstevel@tonic-gate	be,a,pt	%xcc,1f				/* yes, don't write */
8067c478bdstevel@tonic-gate	stx	%g3, [%o0]			/* update new original */
8077c478bdstevel@tonic-gate	casx	[%o2], %g1, %g2
8087c478bdstevel@tonic-gate	cmp	%g1, %g2
8097c478bdstevel@tonic-gate	be,pt	%xcc, 1f			/* cas succeeded - return */
8107c478bdstevel@tonic-gate	  nop
8117c478bdstevel@tonic-gate	ldx	[%o2], %g3			/* new current */
8127c478bdstevel@tonic-gate	stx	%g3, [%o0]			/* save as new original */
8137c478bdstevel@tonic-gate	ba,pt	%xcc, 2b
8147c478bdstevel@tonic-gate	  mov	%g3, %g1
8157c478bdstevel@tonic-gate1:	retl
8167c478bdstevel@tonic-gate	membar	#StoreLoad
8177c478bdstevel@tonic-gate	SET_SIZE(sfmmu_modifytte)
8197c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_modifytte_try)
8207c478bdstevel@tonic-gate	ldx	[%o1], %g2			/* modified */
8217c478bdstevel@tonic-gate	ldx	[%o2], %g3			/* current */
8227c478bdstevel@tonic-gate	ldx	[%o0], %g1			/* original */
8237c478bdstevel@tonic-gate	cmp	%g3, %g2			/* is modified = current? */
8247c478bdstevel@tonic-gate	be,a,pn %xcc,1f				/* yes, don't write */
8257c478bdstevel@tonic-gate	mov	0, %o1				/* as if cas failed. */
82648bbca8Daniel Hoffman
8277c478bdstevel@tonic-gate	casx	[%o2], %g1, %g2
8287c478bdstevel@tonic-gate	membar	#StoreLoad
8297c478bdstevel@tonic-gate	cmp	%g1, %g2
8307c478bdstevel@tonic-gate	movne	%xcc, -1, %o1			/* cas failed. */
8317c478bdstevel@tonic-gate	move	%xcc, 1, %o1			/* cas succeeded. */
8337c478bdstevel@tonic-gate	stx	%g2, [%o0]			/* report "current" value */
8347c478bdstevel@tonic-gate	retl
8357c478bdstevel@tonic-gate	mov	%o1, %o0
8367c478bdstevel@tonic-gate	SET_SIZE(sfmmu_modifytte_try)
8387c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_copytte)
8397c478bdstevel@tonic-gate	ldx	[%o0], %g1
8407c478bdstevel@tonic-gate	retl
8417c478bdstevel@tonic-gate	stx	%g1, [%o1]
8427c478bdstevel@tonic-gate	SET_SIZE(sfmmu_copytte)
8457c478bdstevel@tonic-gate	/*
8467c478bdstevel@tonic-gate	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
8477c478bdstevel@tonic-gate	 * %o0 = TSB base address (in), pointer to TSB entry (out)
8487c478bdstevel@tonic-gate	 * %o1 = vaddr (in)
8497c478bdstevel@tonic-gate	 * %o2 = vpshift (in)
8507c478bdstevel@tonic-gate	 * %o3 = tsb size code (in)
8517c478bdstevel@tonic-gate	 * %o4 = scratch register
8527c478bdstevel@tonic-gate	 */
8537c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_get_tsbe)
8547c478bdstevel@tonic-gate	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
8557c478bdstevel@tonic-gate	retl
8567c478bdstevel@tonic-gate	nop
8577c478bdstevel@tonic-gate	SET_SIZE(sfmmu_get_tsbe)
8597c478bdstevel@tonic-gate	/*
8607c478bdstevel@tonic-gate	 * Return a TSB tag for the given va.
8617c478bdstevel@tonic-gate	 * %o0 = va (in/clobbered)
8627c478bdstevel@tonic-gate	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
8637c478bdstevel@tonic-gate	 */
8647c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_make_tsbtag)
8657c478bdstevel@tonic-gate	retl
8667c478bdstevel@tonic-gate	srln	%o0, TTARGET_VA_SHIFT, %o0
8677c478bdstevel@tonic-gate	SET_SIZE(sfmmu_make_tsbtag)
8707c478bdstevel@tonic-gate * Other sfmmu primitives
8717c478bdstevel@tonic-gate */
8747c478bdstevel@tonic-gate#define	I_SIZE		4
87548bbca8Daniel Hoffman
8767c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_fix_ktlb_traptable)
8777c478bdstevel@tonic-gate	/*
8787c478bdstevel@tonic-gate	 * %o0 = start of patch area
8797c478bdstevel@tonic-gate	 * %o1 = size code of TSB to patch
8807c478bdstevel@tonic-gate	 * %o3 = scratch
8817c478bdstevel@tonic-gate	 */
8827c478bdstevel@tonic-gate	/* fix sll */
8837c478bdstevel@tonic-gate	ld	[%o0], %o3			/* get sll */
8847c478bdstevel@tonic-gate	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
8857c478bdstevel@tonic-gate	st	%o3, [%o0]			/* write sll */
8867c478bdstevel@tonic-gate	flush	%o0
8877c478bdstevel@tonic-gate	/* fix srl */
8887c478bdstevel@tonic-gate	add	%o0, I_SIZE, %o0		/* goto next instr. */
8897c478bdstevel@tonic-gate	ld	[%o0], %o3			/* get srl */
8907c478bdstevel@tonic-gate	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
8917c478bdstevel@tonic-gate	st	%o3, [%o0]			/* write srl */
8927c478bdstevel@tonic-gate	retl
8937c478bdstevel@tonic-gate	flush	%o0
8947c478bdstevel@tonic-gate	SET_SIZE(sfmmu_fix_ktlb_traptable)
8967c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_ktsbbase)
8977c478bdstevel@tonic-gate	/*
8987c478bdstevel@tonic-gate	 * %o0 = start of patch area
8997c478bdstevel@tonic-gate	 * %o5 = kernel virtual or physical tsb base address
9007c478bdstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
9017c478bdstevel@tonic-gate	 */
9027c478bdstevel@tonic-gate	/* fixup sethi instruction */
9037c478bdstevel@tonic-gate	ld	[%o0], %o3
9047c478bdstevel@tonic-gate	srl	%o5, 10, %o2			! offset is bits 32:10
9057c478bdstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
9067c478bdstevel@tonic-gate	st	%o3, [%o0]
9077c478bdstevel@tonic-gate	/* fixup offset of lduw/ldx */
9087c478bdstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
9097c478bdstevel@tonic-gate	ld	[%o0], %o3
9107c478bdstevel@tonic-gate	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
9117c478bdstevel@tonic-gate	or	%o3, %o2, %o3
9127c478bdstevel@tonic-gate	st	%o3, [%o0]
9137c478bdstevel@tonic-gate	retl
9147c478bdstevel@tonic-gate	flush	%o0
9157c478bdstevel@tonic-gate	SET_SIZE(sfmmu_fixup_ktsbbase)
9177c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_setx)
9187c478bdstevel@tonic-gate	/*
9197c478bdstevel@tonic-gate	 * %o0 = start of patch area
9207c478bdstevel@tonic-gate	 * %o4 = 64 bit value to patch
9217c478bdstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
9227c478bdstevel@tonic-gate	 *
9237c478bdstevel@tonic-gate	 * Note: Assuming that all parts of the instructions which need to be
9247c478bdstevel@tonic-gate	 *	 patched correspond to RUNTIME_PATCH (aka 0)
9257c478bdstevel@tonic-gate	 *
9267c478bdstevel@tonic-gate	 * Note the implementation of setx which is being patched is as follows:
9277c478bdstevel@tonic-gate	 *
9287c478bdstevel@tonic-gate	 * sethi   %hh(RUNTIME_PATCH), tmp
9297c478bdstevel@tonic-gate	 * sethi   %lm(RUNTIME_PATCH), dest
9307c478bdstevel@tonic-gate	 * or      tmp, %hm(RUNTIME_PATCH), tmp
9317c478bdstevel@tonic-gate	 * or      dest, %lo(RUNTIME_PATCH), dest
9327c478bdstevel@tonic-gate	 * sllx    tmp, 32, tmp
9337c478bdstevel@tonic-gate	 * nop
9347c478bdstevel@tonic-gate	 * or      tmp, dest, dest
9357c478bdstevel@tonic-gate	 *
93648bbca8Daniel Hoffman	 * which differs from the implementation in the
9377c478bdstevel@tonic-gate	 * "SPARC Architecture Manual"
9387c478bdstevel@tonic-gate	 */
9397c478bdstevel@tonic-gate	/* fixup sethi instruction */
9407c478bdstevel@tonic-gate	ld	[%o0], %o3
9417c478bdstevel@tonic-gate	srlx	%o4, 42, %o2			! bits [63:42]
9427c478bdstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
9437c478bdstevel@tonic-gate	st	%o3, [%o0]
9447c478bdstevel@tonic-gate	/* fixup sethi instruction */
9457c478bdstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
9467c478bdstevel@tonic-gate	ld	[%o0], %o3
9477c478bdstevel@tonic-gate	sllx	%o4, 32, %o2			! clear upper bits
9487c478bdstevel@tonic-gate	srlx	%o2, 42, %o2			! bits [31:10]
9497c478bdstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
9507c478bdstevel@tonic-gate	st	%o3, [%o0]
9517c478bdstevel@tonic-gate	/* fixup or instruction */
9527c478bdstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
9537c478bdstevel@tonic-gate	ld	[%o0], %o3
9547c478bdstevel@tonic-gate	srlx	%o4, 32, %o2			! bits [63:32]
9557c478bdstevel@tonic-gate	and	%o2, 0x3ff, %o2			! bits [41:32]
9567c478bdstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
9577c478bdstevel@tonic-gate	st	%o3, [%o0]
9587c478bdstevel@tonic-gate	/* fixup or instruction */
9597c478bdstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
9607c478bdstevel@tonic-gate	ld	[%o0], %o3
9617c478bdstevel@tonic-gate	and	%o4, 0x3ff, %o2			! bits [9:0]
9627c478bdstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
9637c478bdstevel@tonic-gate	st	%o3, [%o0]
9647c478bdstevel@tonic-gate	retl
9657c478bdstevel@tonic-gate	flush	%o0
9667c478bdstevel@tonic-gate	SET_SIZE(sfmmu_fixup_setx)
9687c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_or)
9697c478bdstevel@tonic-gate	/*
9707c478bdstevel@tonic-gate	 * %o0 = start of patch area
9717c478bdstevel@tonic-gate	 * %o4 = 32 bit value to patch
9727c478bdstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
9737c478bdstevel@tonic-gate	 * Note: Assuming that all parts of the instructions which need to be
9747c478bdstevel@tonic-gate	 *	 patched correspond to RUNTIME_PATCH (aka 0)
9757c478bdstevel@tonic-gate	 */
9767c478bdstevel@tonic-gate	ld	[%o0], %o3
9777c478bdstevel@tonic-gate	and	%o4, 0x3ff, %o2			! bits [9:0]
9787c478bdstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
9797c478bdstevel@tonic-gate	st	%o3, [%o0]
9807c478bdstevel@tonic-gate	retl
9817c478bdstevel@tonic-gate	flush	%o0
9827c478bdstevel@tonic-gate	SET_SIZE(sfmmu_fixup_or)
9847c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_shiftx)
9857c478bdstevel@tonic-gate	/*
9867c478bdstevel@tonic-gate	 * %o0 = start of patch area
9877c478bdstevel@tonic-gate	 * %o4 = signed int immediate value to add to sllx/srlx imm field
9887c478bdstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
9897c478bdstevel@tonic-gate	 *
9907c478bdstevel@tonic-gate	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
9917c478bdstevel@tonic-gate	 * so we do a simple add.  The caller must be careful to prevent
9927c478bdstevel@tonic-gate	 * overflow, which could easily occur if the initial value is nonzero!
9937c478bdstevel@tonic-gate	 */
9947c478bdstevel@tonic-gate	ld	[%o0], %o3			! %o3 = instruction to patch
9957c478bdstevel@tonic-gate	and	%o3, 0x3f, %o2			! %o2 = existing imm value
9967c478bdstevel@tonic-gate	add	%o2, %o4, %o2			! %o2 = new imm value
9977c478bdstevel@tonic-gate	andn	%o3, 0x3f, %o3			! clear old imm value
9987c478bdstevel@tonic-gate	and	%o2, 0x3f, %o2			! truncate new imm value
9997c478bdstevel@tonic-gate	or	%o3, %o2, %o3			! set new imm value
10007c478bdstevel@tonic-gate	st	%o3, [%o0]			! store updated instruction
10017c478bdstevel@tonic-gate	retl
10027c478bdstevel@tonic-gate	flush	%o0
10037c478bdstevel@tonic-gate	SET_SIZE(sfmmu_fixup_shiftx)
10057c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_mmu_asi)
10067c478bdstevel@tonic-gate	/*
10077c478bdstevel@tonic-gate	 * Patch imm_asi of all ldda instructions in the MMU
10087c478bdstevel@tonic-gate	 * trap handlers.  We search MMU_PATCH_INSTR instructions
10097c478bdstevel@tonic-gate	 * starting from the itlb miss handler (trap 0x64).
10107c478bdstevel@tonic-gate	 * %o0 = address of tt[0,1]_itlbmiss
10117c478bdstevel@tonic-gate	 * %o1 = imm_asi to setup, shifted by appropriate offset.
10127c478bdstevel@tonic-gate	 * %o3 = number of instructions to search
10137c478bdstevel@tonic-gate	 * %o4 = reserved by caller: called from leaf routine
10147c478bdstevel@tonic-gate	 */
10157c478bdstevel@tonic-gate1:	ldsw	[%o0], %o2			! load instruction to %o2
10167c478bdstevel@tonic-gate	brgez,pt %o2, 2f
10177c478bdstevel@tonic-gate	  srl	%o2, 30, %o5
10187c478bdstevel@tonic-gate	btst	1, %o5				! test bit 30; skip if not set
10197c478bdstevel@tonic-gate	bz,pt	%icc, 2f
10207c478bdstevel@tonic-gate	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
10217c478bdstevel@tonic-gate	srlx	%o5, 58, %o5			! isolate op3 part of opcode
10227c478bdstevel@tonic-gate	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
10237c478bdstevel@tonic-gate	brnz,pt	%o5, 2f				! skip if not a match
10247c478bdstevel@tonic-gate	  or	%o2, %o1, %o2			! or in imm_asi
10257c478bdstevel@tonic-gate	st	%o2, [%o0]			! write patched instruction
10267c478bdstevel@tonic-gate2:	dec	%o3
10277c478bdstevel@tonic-gate	brnz,a,pt %o3, 1b			! loop until we're done
10287c478bdstevel@tonic-gate	  add	%o0, I_SIZE, %o0
10297c478bdstevel@tonic-gate	retl
10307c478bdstevel@tonic-gate	flush	%o0
10317c478bdstevel@tonic-gate	SET_SIZE(sfmmu_fixup_mmu_asi)
10337c478bdstevel@tonic-gate	/*
10347c478bdstevel@tonic-gate	 * Patch immediate ASI used to access the TSB in the
10357c478bdstevel@tonic-gate	 * trap table.
10367c478bdstevel@tonic-gate	 * inputs: %o0 = value of ktsb_phys
10377c478bdstevel@tonic-gate	 */
10387c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_patch_mmu_asi)
10397c478bdstevel@tonic-gate	mov	%o7, %o4			! save return pc in %o4
1040125be06Jason Beloro	mov	ASI_QUAD_LDD_PHYS, %o3		! set QUAD_LDD_PHYS by default
1041125be06Jason Beloro
1042125be06Jason Beloro#ifdef sun4v
1043125be06Jason Beloro
1044125be06Jason Beloro	/*
1045125be06Jason Beloro	 * Check ktsb_phys. It must be non-zero for sun4v, panic if not.
1046125be06Jason Beloro	 */
1047125be06Jason Beloro
1048125be06Jason Beloro	brnz,pt %o0, do_patch
1049125be06Jason Beloro	nop
1050125be06Jason Beloro
1051125be06Jason Beloro	sethi	%hi(sfmmu_panic11), %o0
1052125be06Jason Beloro	call	panic
1053125be06Jason Beloro	  or	%o0, %lo(sfmmu_panic11), %o0
1054125be06Jason Belorodo_patch:
1055125be06Jason Beloro
1056125be06Jason Beloro#else /* sun4v */
1057125be06Jason Beloro	/*
1058125be06Jason Beloro	 * Some non-sun4v platforms deploy virtual ktsb (ktsb_phys==0).
1059125be06Jason Beloro	 * Note that ASI_NQUAD_LD is not defined/used for sun4v
1060125be06Jason Beloro	 */
10617c478bdstevel@tonic-gate	movrz	%o0, ASI_NQUAD_LD, %o3
1062125be06Jason Beloro
1063125be06Jason Beloro#endif /* sun4v */
1064125be06Jason Beloro
10657c478bdstevel@tonic-gate	sll	%o3, 5, %o1			! imm_asi offset
10667c478bdstevel@tonic-gate	mov	6, %o3				! number of instructions
10677c478bdstevel@tonic-gate	sethi	%hi(dktsb), %o0			! to search
10687c478bdstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
10697c478bdstevel@tonic-gate	  or	%o0, %lo(dktsb), %o0
10707c478bdstevel@tonic-gate	mov	6, %o3				! number of instructions
10717c478bdstevel@tonic-gate	sethi	%hi(dktsb4m), %o0		! to search
10727c478bdstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
10737c478bdstevel@tonic-gate	  or	%o0, %lo(dktsb4m), %o0
10747c478bdstevel@tonic-gate	mov	6, %o3				! number of instructions
10757c478bdstevel@tonic-gate	sethi	%hi(iktsb), %o0			! to search
10767c478bdstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
10777c478bdstevel@tonic-gate	  or	%o0, %lo(iktsb), %o0
107805d3dc4paulsan	mov	6, %o3				! number of instructions
107905d3dc4paulsan	sethi	%hi(iktsb4m), %o0		! to search
108005d3dc4paulsan	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
108105d3dc4paulsan	  or	%o0, %lo(iktsb4m), %o0
10827c478bdstevel@tonic-gate	mov	%o4, %o7			! retore return pc -- leaf
10837c478bdstevel@tonic-gate	retl
10847c478bdstevel@tonic-gate	nop
10857c478bdstevel@tonic-gate	SET_SIZE(sfmmu_patch_mmu_asi)
1087125be06Jason Beloro
10887c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_patch_ktsb)
10897c478bdstevel@tonic-gate	/*
10907c478bdstevel@tonic-gate	 * We need to fix iktsb, dktsb, et. al.
10917c478bdstevel@tonic-gate	 */
10927c478bdstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
10937c478bdstevel@tonic-gate	set	ktsb_phys, %o1
10947c478bdstevel@tonic-gate	ld	[%o1], %o4
10957c478bdstevel@tonic-gate	set	ktsb_base, %o5
10967c478bdstevel@tonic-gate	set	ktsb4m_base, %l1
10977c478bdstevel@tonic-gate	brz,pt	%o4, 1f
10987c478bdstevel@tonic-gate	  nop
10997c478bdstevel@tonic-gate	set	ktsb_pbase, %o5
11007c478bdstevel@tonic-gate	set	ktsb4m_pbase, %l1
11027c478bdstevel@tonic-gate	sethi	%hi(ktsb_szcode), %o1
11037c478bdstevel@tonic-gate	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
11057c478bdstevel@tonic-gate	sethi	%hi(iktsb), %o0
11067c478bdstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
11077c478bdstevel@tonic-gate	  or	%o0, %lo(iktsb), %o0
11097c478bdstevel@tonic-gate	sethi	%hi(dktsb), %o0
11107c478bdstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
11117c478bdstevel@tonic-gate	  or	%o0, %lo(dktsb), %o0
11137c478bdstevel@tonic-gate	sethi	%hi(ktsb4m_szcode), %o1
11147c478bdstevel@tonic-gate	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
111605d3dc4paulsan	sethi	%hi(iktsb4m), %o0
111705d3dc4paulsan	call	sfmmu_fix_ktlb_traptable
111805d3dc4paulsan	  or	%o0, %lo(iktsb4m), %o0
111948bbca8Daniel Hoffman
11207c478bdstevel@tonic-gate	sethi	%hi(dktsb4m), %o0
11217c478bdstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
11227c478bdstevel@tonic-gate	  or	%o0, %lo(dktsb4m), %o0
11247c478bdstevel@tonic-gate#ifndef sun4v
11257c478bdstevel@tonic-gate	mov	ASI_N, %o2
11267c478bdstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
11277c478bdstevel@tonic-gate	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
11287c478bdstevel@tonic-gate	sethi	%hi(tsb_kernel_patch_asi), %o0
11297c478bdstevel@tonic-gate	call	sfmmu_fixup_or
11307c478bdstevel@tonic-gate	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
11311426d65sm#endif /* !sun4v */
11337c478bdstevel@tonic-gate	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
11357c478bdstevel@tonic-gate	sethi	%hi(dktsbbase), %o0
11367c478bdstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
11377c478bdstevel@tonic-gate	  or	%o0, %lo(dktsbbase), %o0
11397c478bdstevel@tonic-gate	sethi	%hi(iktsbbase), %o0
11407c478bdstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
11417c478bdstevel@tonic-gate	  or	%o0, %lo(iktsbbase), %o0
11437c478bdstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
11447c478bdstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
11457c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
11477c478bdstevel@tonic-gate#ifdef sun4v
11487c478bdstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
11497c478bdstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
11507c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
11517c478bdstevel@tonic-gate#endif /* sun4v */
11537c478bdstevel@tonic-gate	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
11557c478bdstevel@tonic-gate	sethi	%hi(dktsb4mbase), %o0
11567c478bdstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
11577c478bdstevel@tonic-gate	  or	%o0, %lo(dktsb4mbase), %o0
115905d3dc4paulsan	sethi	%hi(iktsb4mbase), %o0
116005d3dc4paulsan	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
116105d3dc4paulsan	  or	%o0, %lo(iktsb4mbase), %o0
116248bbca8Daniel Hoffman
11637c478bdstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
11647c478bdstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
11657c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
11677c478bdstevel@tonic-gate#ifdef sun4v
11687c478bdstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
11697c478bdstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
11707c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
11717c478bdstevel@tonic-gate#endif /* sun4v */
11737c478bdstevel@tonic-gate	set	ktsb_szcode, %o4
11747c478bdstevel@tonic-gate	ld	[%o4], %o4
11757c478bdstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
11767c478bdstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb_szcode
11777c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
11797c478bdstevel@tonic-gate#ifdef sun4v
11807c478bdstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
11817c478bdstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb_szcode
11827c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
11837c478bdstevel@tonic-gate#endif /* sun4v */
11857c478bdstevel@tonic-gate	set	ktsb4m_szcode, %o4
11867c478bdstevel@tonic-gate	ld	[%o4], %o4
11877c478bdstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
11887c478bdstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
11897c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
11917c478bdstevel@tonic-gate#ifdef sun4v
11927c478bdstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
11937c478bdstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
11947c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
11957c478bdstevel@tonic-gate#endif /* sun4v */
11977c478bdstevel@tonic-gate	ret
11987c478bdstevel@tonic-gate	restore
11997c478bdstevel@tonic-gate	SET_SIZE(sfmmu_patch_ktsb)
12017c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_patch_tlbm)
12027c478bdstevel@tonic-gate	/*
12037c478bdstevel@tonic-gate	 * Fixup trap handlers in common segkpm case.  This is reserved
12047c478bdstevel@tonic-gate	 * for future use should kpm TSB be changed to be other than the
12057c478bdstevel@tonic-gate	 * kernel TSB.
12067c478bdstevel@tonic-gate	 */
12077c478bdstevel@tonic-gate	retl
12087c478bdstevel@tonic-gate	nop
12097c478bdstevel@tonic-gate	SET_SIZE(sfmmu_kpm_patch_tlbm)
12117c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_patch_tsbm)
12127c478bdstevel@tonic-gate	/*
121348bbca8Daniel Hoffman	 * nop the branch to sfmmu_kpm_dtsb_miss_small
12147c478bdstevel@tonic-gate	 * in the case where we are using large pages for
12157c478bdstevel@tonic-gate	 * seg_kpm (and hence must probe the second TSB for
12167c478bdstevel@tonic-gate	 * seg_kpm VAs)
12177c478bdstevel@tonic-gate	 */
12187c478bdstevel@tonic-gate	set	dktsb4m_kpmcheck_small, %o0
12197c478bdstevel@tonic-gate	MAKE_NOP_INSTR(%o1)
12207c478bdstevel@tonic-gate	st	%o1, [%o0]
12217c478bdstevel@tonic-gate	flush	%o0
12227c478bdstevel@tonic-gate	retl
12237c478bdstevel@tonic-gate	nop
12247c478bdstevel@tonic-gate	SET_SIZE(sfmmu_kpm_patch_tsbm)
12267c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_patch_utsb)
122725cf1a3jl#ifdef UTSB_PHYS
12287c478bdstevel@tonic-gate	retl
12297c478bdstevel@tonic-gate	nop
123025cf1a3jl#else /* UTSB_PHYS */
12317c478bdstevel@tonic-gate	/*
12327c478bdstevel@tonic-gate	 * We need to hot patch utsb_vabase and utsb4m_vabase
12337c478bdstevel@tonic-gate	 */
12347c478bdstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
12367c478bdstevel@tonic-gate	/* patch value of utsb_vabase */
12377c478bdstevel@tonic-gate	set	utsb_vabase, %o1
12387c478bdstevel@tonic-gate	ldx	[%o1], %o4
12397c478bdstevel@tonic-gate	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
12407c478bdstevel@tonic-gate	call	sfmmu_fixup_setx
12417c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
12427c478bdstevel@tonic-gate	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
12437c478bdstevel@tonic-gate	call	sfmmu_fixup_setx
12447c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
12457c478bdstevel@tonic-gate	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
12467c478bdstevel@tonic-gate	call	sfmmu_fixup_setx
12477c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
12497c478bdstevel@tonic-gate	/* patch value of utsb4m_vabase */
12507c478bdstevel@tonic-gate	set	utsb4m_vabase, %o1
12517c478bdstevel@tonic-gate	ldx	[%o1], %o4
12527c478bdstevel@tonic-gate	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
12537c478bdstevel@tonic-gate	call	sfmmu_fixup_setx
12547c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
12557c478bdstevel@tonic-gate	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
12567c478bdstevel@tonic-gate	call	sfmmu_fixup_setx
12577c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
12587c478bdstevel@tonic-gate	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
12597c478bdstevel@tonic-gate	call	sfmmu_fixup_setx
12607c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
12627c478bdstevel@tonic-gate	/*
12637c478bdstevel@tonic-gate	 * Patch TSB base register masks and shifts if needed.
12647c478bdstevel@tonic-gate	 * By default the TSB base register contents are set up for 4M slab.
12657c478bdstevel@tonic-gate	 * If we're using a smaller slab size and reserved VA range we need
12667c478bdstevel@tonic-gate	 * to patch up those values here.
12677c478bdstevel@tonic-gate	 */
12687c478bdstevel@tonic-gate	set	tsb_slab_shift, %o1
12697c478bdstevel@tonic-gate	set	MMU_PAGESHIFT4M, %o4
127005d3dc4paulsan	lduw	[%o1], %o3
12717c478bdstevel@tonic-gate	subcc	%o4, %o3, %o4
12727c478bdstevel@tonic-gate	bz,pt	%icc, 1f
12737c478bdstevel@tonic-gate	  /* delay slot safe */
12757c478bdstevel@tonic-gate	/* patch reserved VA range size if needed. */
12767c478bdstevel@tonic-gate	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
12777c478bdstevel@tonic-gate	call	sfmmu_fixup_shiftx
12787c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
12797c478bdstevel@tonic-gate	call	sfmmu_fixup_shiftx
12807c478bdstevel@tonic-gate	  add	%o0, I_SIZE, %o0
12817c478bdstevel@tonic-gate	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
12827c478bdstevel@tonic-gate	call	sfmmu_fixup_shiftx
12837c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
12847c478bdstevel@tonic-gate	call	sfmmu_fixup_shiftx
12857c478bdstevel@tonic-gate	  add	%o0, I_SIZE, %o0
12877c478bdstevel@tonic-gate	/* patch TSBREG_VAMASK used to set up TSB base register */
12887c478bdstevel@tonic-gate	set	tsb_slab_mask, %o1
128905d3dc4paulsan	ldx	[%o1], %o4
12907c478bdstevel@tonic-gate	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
12917c478bdstevel@tonic-gate	call	sfmmu_fixup_or
12927c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
12937c478bdstevel@tonic-gate	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
12947c478bdstevel@tonic-gate	call	sfmmu_fixup_or
12957c478bdstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
12977c478bdstevel@tonic-gate	ret
12987c478bdstevel@tonic-gate	restore
129925cf1a3jl#endif /* UTSB_PHYS */
13007c478bdstevel@tonic-gate	SET_SIZE(sfmmu_patch_utsb)
130205d3dc4paulsan	ENTRY_NP(sfmmu_patch_shctx)
130305d3dc4paulsan#ifdef sun4u
130405d3dc4paulsan	retl
130505d3dc4paulsan	  nop
130605d3dc4paulsan#else /* sun4u */
130705d3dc4paulsan	set	sfmmu_shctx_cpu_mondo_patch, %o0
130805d3dc4paulsan	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
130905d3dc4paulsan	st	%o1, [%o0]
131005d3dc4paulsan	flush	%o0
131105d3dc4paulsan	MAKE_NOP_INSTR(%o1)
131205d3dc4paulsan	add	%o0, I_SIZE, %o0	! next instr
131305d3dc4paulsan	st	%o1, [%o0]
131405d3dc4paulsan	flush	%o0
131605d3dc4paulsan	set	sfmmu_shctx_user_rtt_patch, %o0
131705d3dc4paulsan	st      %o1, [%o0]		! nop 1st instruction
131805d3dc4paulsan	flush	%o0
131905d3dc4paulsan	add     %o0, I_SIZE, %o0
132005d3dc4paulsan	st      %o1, [%o0]		! nop 2nd instruction
132105d3dc4paulsan	flush	%o0
132205d3dc4paulsan	add     %o0, I_SIZE, %o0
132305d3dc4paulsan	st      %o1, [%o0]		! nop 3rd instruction
132405d3dc4paulsan	flush	%o0
132505d3dc4paulsan	add     %o0, I_SIZE, %o0
132605d3dc4paulsan	st      %o1, [%o0]		! nop 4th instruction
1327a6a9116Jason Beloro	flush	%o0
1328a6a9116Jason Beloro	add     %o0, I_SIZE, %o0
1329a6a9116Jason Beloro	st      %o1, [%o0]		! nop 5th instruction
1330a6a9116Jason Beloro	flush	%o0
1331a6a9116Jason Beloro	add     %o0, I_SIZE, %o0
1332a6a9116Jason Beloro	st      %o1, [%o0]		! nop 6th instruction
133305d3dc4paulsan	retl
13342f0fcb9Jason Beloro	flush	%o0
133505d3dc4paulsan#endif /* sun4u */
133605d3dc4paulsan	SET_SIZE(sfmmu_patch_shctx)
13387c478bdstevel@tonic-gate	/*
13397c478bdstevel@tonic-gate	 * Routine that loads an entry into a tsb using virtual addresses.
13407c478bdstevel@tonic-gate	 * Locking is required since all cpus can use the same TSB.
13417c478bdstevel@tonic-gate	 * Note that it is no longer required to have a valid context
13427c478bdstevel@tonic-gate	 * when calling this function.
13437c478bdstevel@tonic-gate	 */
13447c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_load_tsbe)
13457c478bdstevel@tonic-gate	/*
13467c478bdstevel@tonic-gate	 * %o0 = pointer to tsbe to load
13477c478bdstevel@tonic-gate	 * %o1 = tsb tag
13487c478bdstevel@tonic-gate	 * %o2 = virtual pointer to TTE
13497c478bdstevel@tonic-gate	 * %o3 = 1 if physical address in %o0 else 0
13507c478bdstevel@tonic-gate	 */
13517c478bdstevel@tonic-gate	rdpr	%pstate, %o5
13527c478bdstevel@tonic-gate#ifdef DEBUG
13531e2e7a7huah	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
13547c478bdstevel@tonic-gate#endif /* DEBUG */
13567c478bdstevel@tonic-gate	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
13587c478bdstevel@tonic-gate	SETUP_TSB_ASI(%o3, %g3)
13590a90a7fAmritpal Sandhu	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, locked_tsb_l8)
13617c478bdstevel@tonic-gate	wrpr	%g0, %o5, %pstate		/* enable interrupts */
136248bbca8Daniel Hoffman
13637c478bdstevel@tonic-gate	retl
13647c478bdstevel@tonic-gate	membar	#StoreStore|#StoreLoad
13657c478bdstevel@tonic-gate	SET_SIZE(sfmmu_load_tsbe)
13677c478bdstevel@tonic-gate	/*
13687c478bdstevel@tonic-gate	 * Flush TSB of a given entry if the tag matches.
136948bbca8Daniel Hoffman	 */
13707c478bdstevel@tonic-gate	ENTRY(sfmmu_unload_tsbe)
13717c478bdstevel@tonic-gate	/*
13727c478bdstevel@tonic-gate	 * %o0 = pointer to tsbe to be flushed
13737c478bdstevel@tonic-gate	 * %o1 = tag to match
13747c478bdstevel@tonic-gate	 * %o2 = 1 if physical address in %o0 else 0
13757c478bdstevel@tonic-gate	 */
13767c478bdstevel@tonic-gate	SETUP_TSB_ASI(%o2, %g1)
13777c478bdstevel@tonic-gate	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
13787c478bdstevel@tonic-gate	retl
13797c478bdstevel@tonic-gate	membar	#StoreStore|#StoreLoad
13807c478bdstevel@tonic-gate	SET_SIZE(sfmmu_unload_tsbe)
13827c478bdstevel@tonic-gate	/*
13837c478bdstevel@tonic-gate	 * Routine that loads a TTE into the kpm TSB from C code.
13847c478bdstevel@tonic-gate	 * Locking is required since kpm TSB is shared among all CPUs.
13857c478bdstevel@tonic-gate	 */
13867c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_load_tsb)
13877c478bdstevel@tonic-gate	/*
13887c478bdstevel@tonic-gate	 * %o0 = vaddr
13897c478bdstevel@tonic-gate	 * %o1 = ttep
13907c478bdstevel@tonic-gate	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
13917c478bdstevel@tonic-gate	 */
13927c478bdstevel@tonic-gate	rdpr	%pstate, %o5			! %o5 = saved pstate
13937c478bdstevel@tonic-gate#ifdef DEBUG
13941e2e7a7huah	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
13957c478bdstevel@tonic-gate#endif /* DEBUG */
13967c478bdstevel@tonic-gate	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
13987c478bdstevel@tonic-gate#ifndef sun4v
13997c478bdstevel@tonic-gate	sethi	%hi(ktsb_phys), %o4
14007c478bdstevel@tonic-gate	mov	ASI_N, %o3
14017c478bdstevel@tonic-gate	ld	[%o4 + %lo(ktsb_phys)], %o4
14027c478bdstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o3
14037c478bdstevel@tonic-gate	mov	%o3, %asi
14041426d65sm#endif /* !sun4v */
14057c478bdstevel@tonic-gate	mov	%o0, %g1			! %g1 = vaddr
14077c478bdstevel@tonic-gate	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
14087c478bdstevel@tonic-gate	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
14097c478bdstevel@tonic-gate	/* %g2 = tsbep, %g1 clobbered */
14117c478bdstevel@tonic-gate	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
14127c478bdstevel@tonic-gate	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
14130a90a7fAmritpal Sandhu	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, locked_tsb_l9)
14157c478bdstevel@tonic-gate	wrpr	%g0, %o5, %pstate		! enable interrupts
14167c478bdstevel@tonic-gate	retl
14177c478bdstevel@tonic-gate	  membar #StoreStore|#StoreLoad
14187c478bdstevel@tonic-gate	SET_SIZE(sfmmu_kpm_load_tsb)
14207c478bdstevel@tonic-gate	/*
14217c478bdstevel@tonic-gate	 * Routine that shoots down a TTE in the kpm TSB or in the
14227c478bdstevel@tonic-gate	 * kernel TSB depending on virtpg. Locking is required since
14237c478bdstevel@tonic-gate	 * kpm/kernel TSB is shared among all CPUs.
14247c478bdstevel@tonic-gate	 */
14257c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_unload_tsb)
14267c478bdstevel@tonic-gate	/*
14277c478bdstevel@tonic-gate	 * %o0 = vaddr
14287c478bdstevel@tonic-gate	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
14297c478bdstevel@tonic-gate	 */
14307c478bdstevel@tonic-gate#ifndef sun4v
14317c478bdstevel@tonic-gate	sethi	%hi(ktsb_phys), %o4
14327c478bdstevel@tonic-gate	mov	ASI_N, %o3
14337c478bdstevel@tonic-gate	ld	[%o4 + %lo(ktsb_phys)], %o4
14347c478bdstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o3
14357c478bdstevel@tonic-gate	mov	%o3, %asi
14361426d65sm#endif /* !sun4v */
14377c478bdstevel@tonic-gate	mov	%o0, %g1			! %g1 = vaddr
14397c478bdstevel@tonic-gate	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
14407c478bdstevel@tonic-gate	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
14417c478bdstevel@tonic-gate	/* %g2 = tsbep, %g1 clobbered */
14437c478bdstevel@tonic-gate	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
14447c478bdstevel@tonic-gate	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
14457c478bdstevel@tonic-gate	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
14477c478bdstevel@tonic-gate	retl
14487c478bdstevel@tonic-gate	  membar	#StoreStore|#StoreLoad
14497c478bdstevel@tonic-gate	SET_SIZE(sfmmu_kpm_unload_tsb)
14527c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_ttetopfn)
14537c478bdstevel@tonic-gate	ldx	[%o0], %g1			/* read tte */
14547c478bdstevel@tonic-gate	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
14557c478bdstevel@tonic-gate	/*
14567c478bdstevel@tonic-gate	 * g1 = pfn
14577c478bdstevel@tonic-gate	 */
14587c478bdstevel@tonic-gate	retl
14597c478bdstevel@tonic-gate	mov	%g1, %o0
14607c478bdstevel@tonic-gate	SET_SIZE(sfmmu_ttetopfn)
14637c478bdstevel@tonic-gate * These macros are used to update global sfmmu hme hash statistics
14647c478bdstevel@tonic-gate * in perf critical paths. It is only enabled in debug kernels or
14657c478bdstevel@tonic-gate * if SFMMU_STAT_GATHER is defined
14667c478bdstevel@tonic-gate */
14677c478bdstevel@tonic-gate#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
14687c478bdstevel@tonic-gate#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
14697c478bdstevel@tonic-gate	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
14707c478bdstevel@tonic-gate	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
14717c478bdstevel@tonic-gate	cmp	tmp1, hatid						;\
14727c478bdstevel@tonic-gate	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
14737c478bdstevel@tonic-gate	set	sfmmu_global_stat, tmp1					;\
14747c478bdstevel@tonic-gate	add	tmp1, tmp2, tmp1					;\
14757c478bdstevel@tonic-gate	ld	[tmp1], tmp2						;\
14767c478bdstevel@tonic-gate	inc	tmp2							;\
14777c478bdstevel@tonic-gate	st	tmp2, [tmp1]
14797c478bdstevel@tonic-gate#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
14807c478bdstevel@tonic-gate	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
14817c478bdstevel@tonic-gate	mov	HATSTAT_KHASH_LINKS, tmp2				;\
14827c478bdstevel@tonic-gate	cmp	tmp1, hatid						;\
14837c478bdstevel@tonic-gate	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
14847c478bdstevel@tonic-gate	set	sfmmu_global_stat, tmp1					;\
14857c478bdstevel@tonic-gate	add	tmp1, tmp2, tmp1					;\
14867c478bdstevel@tonic-gate	ld	[tmp1], tmp2						;\
14877c478bdstevel@tonic-gate	inc	tmp2							;\
14887c478bdstevel@tonic-gate	st	tmp2, [tmp1]
14917c478bdstevel@tonic-gate#else /* DEBUG || SFMMU_STAT_GATHER */
14937c478bdstevel@tonic-gate#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
14957c478bdstevel@tonic-gate#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
14977c478bdstevel@tonic-gate#endif  /* DEBUG || SFMMU_STAT_GATHER */
15007c478bdstevel@tonic-gate * This macro is used to update global sfmmu kstas in non
15017c478bdstevel@tonic-gate * perf critical areas so they are enabled all the time
15027c478bdstevel@tonic-gate */
15037c478bdstevel@tonic-gate#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
15047c478bdstevel@tonic-gate	sethi	%hi(sfmmu_global_stat), tmp1				;\
15057c478bdstevel@tonic-gate	add	tmp1, statname, tmp1					;\
15067c478bdstevel@tonic-gate	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
15077c478bdstevel@tonic-gate	inc	tmp2							;\
15087c478bdstevel@tonic-gate	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
15117c478bdstevel@tonic-gate * These macros are used to update per cpu stats in non perf
15127c478bdstevel@tonic-gate * critical areas so they are enabled all the time
15137c478bdstevel@tonic-gate */
15147c478bdstevel@tonic-gate#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
15157c478bdstevel@tonic-gate	ld	[tsbarea + stat], tmp1					;\
15167c478bdstevel@tonic-gate	inc	tmp1							;\
15177c478bdstevel@tonic-gate	st	tmp1, [tsbarea + stat]
15207c478bdstevel@tonic-gate * These macros are used to update per cpu stats in non perf
15217c478bdstevel@tonic-gate * critical areas so they are enabled all the time
15227c478bdstevel@tonic-gate */
15237c478bdstevel@tonic-gate#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
15247c478bdstevel@tonic-gate	lduh	[tsbarea + stat], tmp1					;\
15257c478bdstevel@tonic-gate	inc	tmp1							;\
15267c478bdstevel@tonic-gate	stuh	tmp1, [tsbarea + stat]
15287c478bdstevel@tonic-gate#if defined(KPM_TLBMISS_STATS_GATHER)
15297c478bdstevel@tonic-gate	/*
15307c478bdstevel@tonic-gate	 * Count kpm dtlb misses separately to allow a different
15317c478bdstevel@tonic-gate	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
15327c478bdstevel@tonic-gate	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
15337c478bdstevel@tonic-gate	 */
15347c478bdstevel@tonic-gate#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
15357c478bdstevel@tonic-gate	brgez	tagacc, label	/* KPM VA? */				;\
15367c478bdstevel@tonic-gate	nop								;\
15377c478bdstevel@tonic-gate	CPU_INDEX(tmp1, tsbma)						;\
15387c478bdstevel@tonic-gate	sethi	%hi(kpmtsbm_area), tsbma				;\
15397c478bdstevel@tonic-gate	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
15407c478bdstevel@tonic-gate	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
15417c478bdstevel@tonic-gate	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
15427c478bdstevel@tonic-gate	/* VA range check */						;\
15437c478bdstevel@tonic-gate	ldx	[tsbma + KPMTSBM_VBASE], val				;\
15447c478bdstevel@tonic-gate	cmp	tagacc, val						;\
15457c478bdstevel@tonic-gate	blu,pn	%xcc, label						;\
15467c478bdstevel@tonic-gate	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
15477c478bdstevel@tonic-gate	cmp	tagacc, tmp1						;\
15487c478bdstevel@tonic-gate	bgeu,pn	%xcc, label						;\
15497c478bdstevel@tonic-gate	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
15507c478bdstevel@tonic-gate	inc	val							;\
15517c478bdstevel@tonic-gate	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
15547c478bdstevel@tonic-gate#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
15557c478bdstevel@tonic-gate#endif	/* KPM_TLBMISS_STATS_GATHER */
15577c478bdstevel@tonic-gate#ifdef	PTL1_PANIC_DEBUG
15587c478bdstevel@tonic-gate	.seg	".data"
15597c478bdstevel@tonic-gate	.global	test_ptl1_panic
15617c478bdstevel@tonic-gate	.word	0
15627c478bdstevel@tonic-gate	.align	8
15647c478bdstevel@tonic-gate	.seg	".text"
15657c478bdstevel@tonic-gate	.align	4
15667c478bdstevel@tonic-gate#endif	/* PTL1_PANIC_DEBUG */
15689b0bb79John Levon	/*
15699b0bb79John Levon	 * The following routines are jumped to from the mmu trap handlers to do
15709b0bb79John Levon	 * the setting up to call systrap.  They are separate routines instead
15719b0bb79John Levon	 * of being part of the handlers because the handlers would exceed 32
15729b0bb79John Levon	 * instructions and since this is part of the slow path the jump cost is
15739b0bb79John Levon	 * irrelevant.
15749b0bb79John Levon	 */
15767c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_pagefault)
1577efaef81arao	SET_GL_REG(1)
15787c478bdstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
15797c478bdstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
15807c478bdstevel@tonic-gate	rdpr	%tt, %g6
15817c478bdstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
15827c478bdstevel@tonic-gate	be,a,pn	%icc, 1f
15837c478bdstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
15847c478bdstevel@tonic-gate	cmp	%g6, T_INSTR_MMU_MISS
15857c478bdstevel@tonic-gate	be,a,pn	%icc, 1f
15867c478bdstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
15877c478bdstevel@tonic-gate	mov	%g5, %g2
15887c478bdstevel@tonic-gate	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
15897c478bdstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
15907c478bdstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
15917c478bdstevel@tonic-gate	cmp	%g6, T_DATA_MMU_MISS
15927c478bdstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
15947c478bdstevel@tonic-gate#ifdef  PTL1_PANIC_DEBUG
15957c478bdstevel@tonic-gate	/* check if we want to test the tl1 panic */
15967c478bdstevel@tonic-gate	sethi	%hi(test_ptl1_panic), %g4
15977c478bdstevel@tonic-gate	ld	[%g4 + %lo(test_ptl1_panic)], %g1
15987c478bdstevel@tonic-gate	st	%g0, [%g4 + %lo(test_ptl1_panic)]
15997c478bdstevel@tonic-gate	cmp	%g1, %g0
16007c478bdstevel@tonic-gate	bne,a,pn %icc, ptl1_panic
16017c478bdstevel@tonic-gate	  or	%g0, PTL1_BAD_DEBUG, %g1
16027c478bdstevel@tonic-gate#endif	/* PTL1_PANIC_DEBUG */
16047c478bdstevel@tonic-gate	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
16057c478bdstevel@tonic-gate	/*
16067c478bdstevel@tonic-gate	 * g2 = tag access reg
16077c478bdstevel@tonic-gate	 * g3.l = type
16087c478bdstevel@tonic-gate	 * g3.h = 0
16097c478bdstevel@tonic-gate	 */
16107c478bdstevel@tonic-gate	sethi	%hi(trap), %g1
16117c478bdstevel@tonic-gate	or	%g1, %lo(trap), %g1
16137c478bdstevel@tonic-gate	ba,pt	%xcc, sys_trap
161448bbca8Daniel Hoffman	  mov	-1, %g4
16157c478bdstevel@tonic-gate	SET_SIZE(sfmmu_pagefault)
16177c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_mmu_trap)
1618efaef81arao	SET_GL_REG(1)
16197c478bdstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
16207c478bdstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
16217c478bdstevel@tonic-gate	rdpr	%tt, %g6
16227c478bdstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
16237c478bdstevel@tonic-gate	be,a,pn	%icc, 1f
16247c478bdstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
16257c478bdstevel@tonic-gate	cmp	%g6, T_INSTR_MMU_MISS
16267c478bdstevel@tonic-gate	be,a,pn	%icc, 1f
16277c478bdstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
16287c478bdstevel@tonic-gate	mov	%g5, %g2
16297c478bdstevel@tonic-gate	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
16307c478bdstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
16317c478bdstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
16327c478bdstevel@tonic-gate	cmp	%g6, T_DATA_MMU_MISS
16337c478bdstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
16357c478bdstevel@tonic-gate	/*
16367c478bdstevel@tonic-gate	 * g2 = tag access reg
16377c478bdstevel@tonic-gate	 * g3 = type
16387c478bdstevel@tonic-gate	 */
16397c478bdstevel@tonic-gate	sethi	%hi(sfmmu_tsbmiss_exception), %g1
16407c478bdstevel@tonic-gate	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
16417c478bdstevel@tonic-gate	ba,pt	%xcc, sys_trap
164248bbca8Daniel Hoffman	  mov	-1, %g4
16437c478bdstevel@tonic-gate	/*NOTREACHED*/
16447c478bdstevel@tonic-gate	SET_SIZE(sfmmu_mmu_trap)
16467c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_suspend_tl)
1647efaef81arao	SET_GL_REG(1)
16487c478bdstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
16497c478bdstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
16507c478bdstevel@tonic-gate	rdpr	%tt, %g6
16517c478bdstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
16527c478bdstevel@tonic-gate	be,a,pn	%icc, 1f
16537c478bdstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
16547c478bdstevel@tonic-gate	mov	%g5, %g2
16557c478bdstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
16567c478bdstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3
16577c478bdstevel@tonic-gate	movne	%icc, T_DATA_PROT, %g3
16597c478bdstevel@tonic-gate	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
16607c478bdstevel@tonic-gate	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
16617c478bdstevel@tonic-gate	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
16627c478bdstevel@tonic-gate	ba,pt	%xcc, sys_trap
16637c478bdstevel@tonic-gate	  mov	PIL_15, %g4
16647c478bdstevel@tonic-gate	/*NOTREACHED*/
16657c478bdstevel@tonic-gate	SET_SIZE(sfmmu_suspend_tl)
16677c478bdstevel@tonic-gate	/*
16687c478bdstevel@tonic-gate	 * No %g registers in use at this point.
16697c478bdstevel@tonic-gate	 */
16707c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_window_trap)
16717c478bdstevel@tonic-gate	rdpr	%tpc, %g1
16727c478bdstevel@tonic-gate#ifdef sun4v
16737c478bdstevel@tonic-gate#ifdef DEBUG
16747c478bdstevel@tonic-gate	/* We assume previous %gl was 1 */
16757c478bdstevel@tonic-gate	rdpr	%tstate, %g4
16767c478bdstevel@tonic-gate	srlx	%g4, TSTATE_GL_SHIFT, %g4
16777c478bdstevel@tonic-gate	and	%g4, TSTATE_GL_MASK, %g4
16787c478bdstevel@tonic-gate	cmp	%g4, 1
16797c478bdstevel@tonic-gate	bne,a,pn %icc, ptl1_panic
16807c478bdstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
16817c478bdstevel@tonic-gate#endif /* DEBUG */
16827c478bdstevel@tonic-gate	/* user miss at tl>1. better be the window handler or user_rtt */
16837c478bdstevel@tonic-gate	/* in user_rtt? */
16847c478bdstevel@tonic-gate	set	rtt_fill_start, %g4
16857c478bdstevel@tonic-gate	cmp	%g1, %g4
16867c478bdstevel@tonic-gate	blu,pn %xcc, 6f
16877c478bdstevel@tonic-gate	 .empty
16887c478bdstevel@tonic-gate	set	rtt_fill_end, %g4
16897c478bdstevel@tonic-gate	cmp	%g1, %g4
16907c478bdstevel@tonic-gate	bgeu,pn %xcc, 6f
16917c478bdstevel@tonic-gate	 nop
16927c478bdstevel@tonic-gate	set	fault_rtt_fn1, %g1
169348bbca8Daniel Hoffman	wrpr	%g0, %g1, %tnpc
16947c478bdstevel@tonic-gate	ba,a	7f
16967c478bdstevel@tonic-gate	! must save this trap level before descending trap stack
16977c478bdstevel@tonic-gate	! no need to save %tnpc, either overwritten or discarded
16987c478bdstevel@tonic-gate	! already got it: rdpr	%tpc, %g1
16997c478bdstevel@tonic-gate	rdpr	%tstate, %g6
17007c478bdstevel@tonic-gate	rdpr	%tt, %g7
17017c478bdstevel@tonic-gate	! trap level saved, go get underlying trap type
17027c478bdstevel@tonic-gate	rdpr	%tl, %g5
17037c478bdstevel@tonic-gate	sub	%g5, 1, %g3
17047c478bdstevel@tonic-gate	wrpr	%g3, %tl
17057c478bdstevel@tonic-gate	rdpr	%tt, %g2
17067c478bdstevel@tonic-gate	wrpr	%g5, %tl
17077c478bdstevel@tonic-gate	! restore saved trap level
17087c478bdstevel@tonic-gate	wrpr	%g1, %tpc
17097c478bdstevel@tonic-gate	wrpr	%g6, %tstate
17107c478bdstevel@tonic-gate	wrpr	%g7, %tt
17117c478bdstevel@tonic-gate#else /* sun4v */
17127c478bdstevel@tonic-gate	/* user miss at tl>1. better be the window handler */
17137c478bdstevel@tonic-gate	rdpr	%tl, %g5
17147c478bdstevel@tonic-gate	sub	%g5, 1, %g3
17157c478bdstevel@tonic-gate	wrpr	%g3, %tl
17167c478bdstevel@tonic-gate	rdpr	%tt, %g2
17177c478bdstevel@tonic-gate	wrpr	%g5, %tl
17187c478bdstevel@tonic-gate#endif /* sun4v */
17197c478bdstevel@tonic-gate	and	%g2, WTRAP_TTMASK, %g4
172048bbca8Daniel Hoffman	cmp	%g4, WTRAP_TYPE
17217c478bdstevel@tonic-gate	bne,pn	%xcc, 1f
17227c478bdstevel@tonic-gate	 nop
17237c478bdstevel@tonic-gate	/* tpc should be in the trap table */
17247c478bdstevel@tonic-gate	set	trap_table, %g4
17257c478bdstevel@tonic-gate	cmp	%g1, %g4
17267c478bdstevel@tonic-gate	blt,pn %xcc, 1f
17277c478bdstevel@tonic-gate	 .empty
17287c478bdstevel@tonic-gate	set	etrap_table, %g4
17297c478bdstevel@tonic-gate	cmp	%g1, %g4
17307c478bdstevel@tonic-gate	bge,pn %xcc, 1f
17317c478bdstevel@tonic-gate	 .empty
17327c478bdstevel@tonic-gate	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
17337c478bdstevel@tonic-gate	add	%g1, WTRAP_FAULTOFF, %g1
173448bbca8Daniel Hoffman	wrpr	%g0, %g1, %tnpc
17367c478bdstevel@tonic-gate	/*
17377c478bdstevel@tonic-gate	 * some wbuf handlers will call systrap to resolve the fault
17387c478bdstevel@tonic-gate	 * we pass the trap type so they figure out the correct parameters.
17397c478bdstevel@tonic-gate	 * g5 = trap type, g6 = tag access reg
17407c478bdstevel@tonic-gate	 */
17427c478bdstevel@tonic-gate	/*
17437c478bdstevel@tonic-gate	 * only use g5, g6, g7 registers after we have switched to alternate
17447c478bdstevel@tonic-gate	 * globals.
17457c478bdstevel@tonic-gate	 */
17467c478bdstevel@tonic-gate	SET_GL_REG(1)
17477c478bdstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
17487c478bdstevel@tonic-gate	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
17497c478bdstevel@tonic-gate	rdpr	%tt, %g7
17507c478bdstevel@tonic-gate	cmp	%g7, FAST_IMMU_MISS_TT
17517c478bdstevel@tonic-gate	be,a,pn	%icc, ptl1_panic
17527c478bdstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
17537c478bdstevel@tonic-gate	cmp	%g7, T_INSTR_MMU_MISS
17547c478bdstevel@tonic-gate	be,a,pn	%icc, ptl1_panic
17557c478bdstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
17567c478bdstevel@tonic-gate	mov	T_DATA_PROT, %g5
17577c478bdstevel@tonic-gate	cmp	%g7, FAST_DMMU_MISS_TT
17587c478bdstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g5
17597c478bdstevel@tonic-gate	cmp	%g7, T_DATA_MMU_MISS
17607c478bdstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g5
17617c478bdstevel@tonic-gate	! XXXQ AGS re-check out this one
17627c478bdstevel@tonic-gate	done
176401bd518wh	CPU_PADDR(%g1, %g4)
176501bd518wh	add	%g1, CPU_TL1_HDLR, %g1
176601bd518wh	lda	[%g1]ASI_MEM, %g4
17677c478bdstevel@tonic-gate	brnz,a,pt %g4, sfmmu_mmu_trap
176801bd518wh	  sta	%g0, [%g1]ASI_MEM
17697c478bdstevel@tonic-gate	ba,pt	%icc, ptl1_panic
17707c478bdstevel@tonic-gate	  mov	PTL1_BAD_TRAP, %g1
17717c478bdstevel@tonic-gate	SET_SIZE(sfmmu_window_trap)
17737c478bdstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_exception)
17747c478bdstevel@tonic-gate	/*
17757c478bdstevel@tonic-gate	 * We have accessed an unmapped segkpm address or a legal segkpm
17767c478bdstevel@tonic-gate	 * address which is involved in a VAC alias conflict prevention.
17777c478bdstevel@tonic-gate	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
17787c478bdstevel@tonic-gate	 * set. If it is, we will instead note that a fault has occurred
17797c478bdstevel@tonic-gate	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
17807c478bdstevel@tonic-gate	 * a "retry"). This will step over the faulting instruction.
17817c478bdstevel@tonic-gate	 * Note that this means that a legal segkpm address involved in
17827c478bdstevel@tonic-gate	 * a VAC alias conflict prevention (a rare case to begin with)
17837c478bdstevel@tonic-gate	 * cannot be used in DTrace.
17847c478bdstevel@tonic-gate	 */
17857c478bdstevel@tonic-gate	CPU_INDEX(%g1, %g2)
17867c478bdstevel@tonic-gate	set	cpu_core, %g2
17877c478bdstevel@tonic-gate	sllx	%g1, CPU_CORE_SHIFT, %g1
17887c478bdstevel@tonic-gate	add	%g1, %g2, %g1
17897c478bdstevel@tonic-gate	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
17907c478bdstevel@tonic-gate	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
17917c478bdstevel@tonic-gate	bz	0f
17927c478bdstevel@tonic-gate	or	%g2, CPU_DTRACE_BADADDR, %g2
17937c478bdstevel@tonic-gate	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
17947c478bdstevel@tonic-gate	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
17957c478bdstevel@tonic-gate	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
17967c478bdstevel@tonic-gate	done
17987c478bdstevel@tonic-gate	TSTAT_CHECK_TL1(1f, %g1, %g2)
1800efaef81arao	SET_GL_REG(1)
18017c478bdstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
18027c478bdstevel@tonic-gate	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
18037c478bdstevel@tonic-gate	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
18047c478bdstevel@tonic-gate	/*
18057c478bdstevel@tonic-gate	 * g2=tagacc g3.l=type g3.h=0
18067c478bdstevel@tonic-gate	 */
18077c478bdstevel@tonic-gate	sethi	%hi(trap), %g1
18087c478bdstevel@tonic-gate	or	%g1, %lo(trap), %g1
18097c478bdstevel@tonic-gate	ba,pt	%xcc, sys_trap
18107c478bdstevel@tonic-gate	mov	-1, %g4
18117c478bdstevel@tonic-gate	SET_SIZE(sfmmu_kpm_exception)
18137c478bdstevel@tonic-gate#if (IMAP_SEG != 0)
18147c478bdstevel@tonic-gate#error - ism_map->ism_seg offset is not zero
181848bbca8Daniel Hoffman * Copies ism mapping for this ctx in param "ism" if this is a ISM
181948bbca8Daniel Hoffman * tlb miss and branches to label "ismhit". If this is not an ISM
18207c478bdstevel@tonic-gate * process or an ISM tlb miss it falls thru.
18217c478bdstevel@tonic-gate *
18227c478bdstevel@tonic-gate * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
18237c478bdstevel@tonic-gate * this process.
18247c478bdstevel@tonic-gate * If so, it will branch to label "ismhit".  If not, it will fall through.
18257c478bdstevel@tonic-gate *
18267c478bdstevel@tonic-gate * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
18277c478bdstevel@tonic-gate * so that any other threads of this process will not try and walk the ism
182848bbca8Daniel Hoffman * maps while they are being changed.
18297c478bdstevel@tonic-gate *
18307c478bdstevel@tonic-gate * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
18317c478bdstevel@tonic-gate *       will make sure of that. This means we can terminate our search on
18327c478bdstevel@tonic-gate *       the first zero mapping we find.
18337c478bdstevel@tonic-gate *
18347c478bdstevel@tonic-gate * Parameters:
183560972f3jb * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
18367c478bdstevel@tonic-gate * tsbmiss	= address of tsb miss area (in)
18377c478bdstevel@tonic-gate * ismseg	= contents of ism_seg for this ism map (out)
18387c478bdstevel@tonic-gate * ismhat	= physical address of imap_ismhat for this ism map (out)
18397c478bdstevel@tonic-gate * tmp1		= scratch reg (CLOBBERED)
18407c478bdstevel@tonic-gate * tmp2		= scratch reg (CLOBBERED)
18417c478bdstevel@tonic-gate * tmp3		= scratch reg (CLOBBERED)
18427c478bdstevel@tonic-gate * label:    temporary labels
18437c478bdstevel@tonic-gate * ismhit:   label where to jump to if an ism dtlb miss
18447c478bdstevel@tonic-gate * exitlabel:label where to jump if hat is busy due to hat_unshare.
18457c478bdstevel@tonic-gate */
18467c478bdstevel@tonic-gate#define <