sfmmu_asm.s revision a6a911618075176ed839dbe7f7c90604d0954b46
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * SFMMU primitives.  These primitives should only be used by sfmmu
28 * routines.
29 */
30
31#if defined(lint)
32#include <sys/types.h>
33#else	/* lint */
34#include "assym.h"
35#endif	/* lint */
36
37#include <sys/asm_linkage.h>
38#include <sys/machtrap.h>
39#include <sys/machasi.h>
40#include <sys/sun4asi.h>
41#include <sys/pte.h>
42#include <sys/mmu.h>
43#include <vm/hat_sfmmu.h>
44#include <vm/seg_spt.h>
45#include <sys/machparam.h>
46#include <sys/privregs.h>
47#include <sys/scb.h>
48#include <sys/intreg.h>
49#include <sys/machthread.h>
50#include <sys/intr.h>
51#include <sys/clock.h>
52#include <sys/trapstat.h>
53
54#ifdef TRAPTRACE
55#include <sys/traptrace.h>
56
57/*
58 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
59 */
60#define	TT_TRACE(label)		\
61	ba	label		;\
62	rd	%pc, %g7
63#else
64
65#define	TT_TRACE(label)
66
67#endif /* TRAPTRACE */
68
69#ifndef	lint
70
71#if (TTE_SUSPEND_SHIFT > 0)
72#define	TTE_SUSPEND_INT_SHIFT(reg)				\
73	sllx	reg, TTE_SUSPEND_SHIFT, reg
74#else
75#define	TTE_SUSPEND_INT_SHIFT(reg)
76#endif
77
78#endif /* lint */
79
80#ifndef	lint
81
82/*
83 * Assumes TSBE_TAG is 0
84 * Assumes TSBE_INTHI is 0
85 * Assumes TSBREG.split is 0
86 */
87
88#if TSBE_TAG != 0
89#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
90#endif
91
92#if TSBTAG_INTHI != 0
93#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
94#endif
95
96/*
97 * The following code assumes the tsb is not split.
98 *
99 * With TSBs no longer shared between processes, it's no longer
100 * necessary to hash the context bits into the tsb index to get
101 * tsb coloring; the new implementation treats the TSB as a
102 * direct-mapped, virtually-addressed cache.
103 *
104 * In:
105 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
106 *    tsbbase = base address of TSB (clobbered)
107 *    tagacc = tag access register (clobbered)
108 *    szc = size code of TSB (ro)
109 *    tmp = scratch reg
110 * Out:
111 *    tsbbase = pointer to entry in TSB
112 */
113#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
114	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
115	srlx	tagacc, vpshift, tagacc 				;\
116	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
117	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
118	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
119	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
120	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
121
122/*
123 * When the kpm TSB is used it is assumed that it is direct mapped
124 * using (vaddr>>vpshift)%tsbsz as the index.
125 *
126 * Note that, for now, the kpm TSB and kernel TSB are the same for
127 * each mapping size.  However that need not always be the case.  If
128 * the trap handlers are updated to search a different TSB for kpm
129 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
130 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
131 *
132 * In:
133 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
134 *    vaddr = virtual address (clobbered)
135 *    tsbp, szc, tmp = scratch
136 * Out:
137 *    tsbp = pointer to entry in TSB
138 */
139#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
140	cmp	vpshift, MMU_PAGESHIFT					;\
141	bne,pn	%icc, 1f		/* branch if large case */	;\
142	  sethi	%hi(kpmsm_tsbsz), szc					;\
143	sethi	%hi(kpmsm_tsbbase), tsbp				;\
144	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
145	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
146	ba,pt	%icc, 2f						;\
147	  nop								;\
1481:	sethi	%hi(kpm_tsbsz), szc					;\
149	sethi	%hi(kpm_tsbbase), tsbp					;\
150	ld	[szc + %lo(kpm_tsbsz)], szc				;\
151	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1522:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
153
154/*
155 * Lock the TSBE at virtual address tsbep.
156 *
157 * tsbep = TSBE va (ro)
158 * tmp1, tmp2 = scratch registers (clobbered)
159 * label = label to jump to if we fail to lock the tsb entry
160 * %asi = ASI to use for TSB access
161 *
162 * NOTE that we flush the TSB using fast VIS instructions that
163 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
164 * not be treated as a locked entry or we'll get stuck spinning on
165 * an entry that isn't locked but really invalid.
166 */
167
168#if defined(UTSB_PHYS)
169
170#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
171	lda	[tsbep]ASI_MEM, tmp1					;\
172	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
173	cmp	tmp1, tmp2 						;\
174	be,a,pn	%icc, label		/* if locked ignore */		;\
175	  nop								;\
176	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
177	cmp	tmp1, tmp2 						;\
178	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
179	  nop								;\
180	/* tsbe lock acquired */					;\
181	membar #StoreStore
182
183#else /* UTSB_PHYS */
184
185#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
186	lda	[tsbep]%asi, tmp1					;\
187	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
188	cmp	tmp1, tmp2 						;\
189	be,a,pn	%icc, label		/* if locked ignore */		;\
190	  nop								;\
191	casa	[tsbep]%asi, tmp1, tmp2					;\
192	cmp	tmp1, tmp2 						;\
193	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
194	  nop								;\
195	/* tsbe lock acquired */					;\
196	membar #StoreStore
197
198#endif /* UTSB_PHYS */
199
200/*
201 * Atomically write TSBE at virtual address tsbep.
202 *
203 * tsbep = TSBE va (ro)
204 * tte = TSBE TTE (ro)
205 * tagtarget = TSBE tag (ro)
206 * %asi = ASI to use for TSB access
207 */
208
209#if defined(UTSB_PHYS)
210
211#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
212	add	tsbep, TSBE_TTE, tmp1					;\
213	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
214	membar #StoreStore						;\
215	add	tsbep, TSBE_TAG, tmp1					;\
216	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
217
218#else /* UTSB_PHYS */
219
220#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
221	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
222	membar #StoreStore						;\
223	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
224
225#endif /* UTSB_PHYS */
226
227/*
228 * Load an entry into the TSB at TL > 0.
229 *
230 * tsbep = pointer to the TSBE to load as va (ro)
231 * tte = value of the TTE retrieved and loaded (wo)
232 * tagtarget = tag target register.  To get TSBE tag to load,
233 *   we need to mask off the context and leave only the va (clobbered)
234 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
235 * tmp1, tmp2 = scratch registers
236 * label = label to jump to if we fail to lock the tsb entry
237 * %asi = ASI to use for TSB access
238 */
239
240#if defined(UTSB_PHYS)
241
242#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
243	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
244	/*								;\
245	 * I don't need to update the TSB then check for the valid tte.	;\
246	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
247	 * we always invalidate the hash table before we unload the TSB.;\
248	 */								;\
249	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
250	ldxa	[ttepa]ASI_MEM, tte					;\
251	TTE_CLR_SOFTEXEC_ML(tte)					;\
252	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
253	sethi	%hi(TSBTAG_INVALID), tmp2				;\
254	add	tsbep, TSBE_TAG, tmp1					;\
255	brgez,a,pn tte, label						;\
256	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
257	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
258label:
259
260#else /* UTSB_PHYS */
261
262#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
263	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
264	/*								;\
265	 * I don't need to update the TSB then check for the valid tte.	;\
266	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
267	 * we always invalidate the hash table before we unload the TSB.;\
268	 */								;\
269	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
270	ldxa	[ttepa]ASI_MEM, tte					;\
271	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
272	sethi	%hi(TSBTAG_INVALID), tmp2				;\
273	brgez,a,pn tte, label						;\
274	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
275	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
276label:
277
278#endif /* UTSB_PHYS */
279
280/*
281 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
282 *   for ITLB synthesis.
283 *
284 * tsbep = pointer to the TSBE to load as va (ro)
285 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
286 *   with exec_perm turned off and exec_synth turned on
287 * tagtarget = tag target register.  To get TSBE tag to load,
288 *   we need to mask off the context and leave only the va (clobbered)
289 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
290 * tmp1, tmp2 = scratch registers
291 * label = label to use for branch (text)
292 * %asi = ASI to use for TSB access
293 */
294
295#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
296	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
297	/*								;\
298	 * I don't need to update the TSB then check for the valid tte.	;\
299	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
300	 * we always invalidate the hash table before we unload the TSB.;\
301	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
302	 * and exec_synth bit to 1.					;\
303	 */								;\
304	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
305	mov	tte, tmp1						;\
306	ldxa	[ttepa]ASI_MEM, tte					;\
307	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
308	sethi	%hi(TSBTAG_INVALID), tmp2				;\
309	brgez,a,pn tte, label						;\
310	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
311	or	tte, tmp1, tte						;\
312	andn	tte, TTE_EXECPRM_INT, tte				;\
313	or	tte, TTE_E_SYNTH_INT, tte				;\
314	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
315label:
316
317/*
318 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
319 *
320 * tte = value of the TTE, used to get tte_size bits (ro)
321 * tagaccess = tag access register, used to get 4M pfn bits (ro)
322 * pfn = 4M pfn bits shifted to offset for tte (out)
323 * tmp1 = scratch register
324 * label = label to use for branch (text)
325 */
326
327#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
328	/*								;\
329	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
330	 * Return them, shifted, in pfn.				;\
331	 */								;\
332	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
333	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
334	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
335	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
336	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
337	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
338label:									;\
339	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
340
341/*
342 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
343 * for ITLB synthesis.
344 *
345 * tte = value of the TTE, used to get tte_size bits (rw)
346 * tmp1 = scratch register
347 */
348
349#define	SET_TTE4M_PN(tte, tmp)						\
350	/*								;\
351	 * Set 4M pagesize tte bits. 					;\
352	 */								;\
353	set	TTE4M, tmp						;\
354	sllx	tmp, TTE_SZ_SHFT, tmp					;\
355	or	tte, tmp, tte
356
357/*
358 * Load an entry into the TSB at TL=0.
359 *
360 * tsbep = pointer to the TSBE to load as va (ro)
361 * tteva = pointer to the TTE to load as va (ro)
362 * tagtarget = TSBE tag to load (which contains no context), synthesized
363 * to match va of MMU tag target register only (ro)
364 * tmp1, tmp2 = scratch registers (clobbered)
365 * label = label to use for branches (text)
366 * %asi = ASI to use for TSB access
367 */
368
369#if defined(UTSB_PHYS)
370
371#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
372	/* can't rd tteva after locking tsb because it can tlb miss */	;\
373	ldx	[tteva], tteva			/* load tte */		;\
374	TTE_CLR_SOFTEXEC_ML(tteva)					;\
375	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
376	sethi	%hi(TSBTAG_INVALID), tmp2				;\
377	add	tsbep, TSBE_TAG, tmp1					;\
378	brgez,a,pn tteva, label						;\
379	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
380	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
381label:
382
383#else /* UTSB_PHYS */
384
385#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
386	/* can't rd tteva after locking tsb because it can tlb miss */	;\
387	ldx	[tteva], tteva			/* load tte */		;\
388	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
389	sethi	%hi(TSBTAG_INVALID), tmp2				;\
390	brgez,a,pn tteva, label						;\
391	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
392	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
393label:
394
395#endif /* UTSB_PHYS */
396
397/*
398 * Invalidate a TSB entry in the TSB.
399 *
400 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
401 *	 about this earlier to ensure this is true.  Thus when we are
402 *	 directly referencing tsbep below, we are referencing the tte_tag
403 *	 field of the TSBE.  If this  offset ever changes, the code below
404 *	 will need to be modified.
405 *
406 * tsbep = pointer to TSBE as va (ro)
407 * tag = invalidation is done if this matches the TSBE tag (ro)
408 * tmp1 - tmp3 = scratch registers (clobbered)
409 * label = label name to use for branches (text)
410 * %asi = ASI to use for TSB access
411 */
412
413#if defined(UTSB_PHYS)
414
415#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
416	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
417	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
418label/**/1:								;\
419	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
420	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
421	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
422	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
423	cmp	tag, tmp3		/* compare tags */		;\
424	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
425	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
426	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
427	cmp	tmp1, tmp3		/* if not successful */		;\
428	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
429	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
430label/**/2:
431
432#else /* UTSB_PHYS */
433
434#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
435	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
436	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
437label/**/1:								;\
438	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
439	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
440	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
441	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
442	cmp	tag, tmp3		/* compare tags */		;\
443	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
444	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
445	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
446	cmp	tmp1, tmp3		/* if not successful */		;\
447	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
448	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
449label/**/2:
450
451#endif /* UTSB_PHYS */
452
453#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
454#error	- TSB_SOFTSZ_MASK too small
455#endif
456
457
458/*
459 * An implementation of setx which will be hot patched at run time.
460 * since it is being hot patched, there is no value passed in.
461 * Thus, essentially we are implementing
462 *	setx value, tmp, dest
463 * where value is RUNTIME_PATCH (aka 0) in this case.
464 */
465#define	RUNTIME_PATCH_SETX(dest, tmp)					\
466	sethi	%hh(RUNTIME_PATCH), tmp					;\
467	sethi	%lm(RUNTIME_PATCH), dest				;\
468	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
469	or	dest, %lo(RUNTIME_PATCH), dest				;\
470	sllx	tmp, 32, tmp						;\
471	nop				/* for perf reasons */		;\
472	or	tmp, dest, dest		/* contents of patched value */
473
474#endif /* lint */
475
476
477#if defined (lint)
478
479/*
480 * sfmmu related subroutines
481 */
482uint_t
483sfmmu_disable_intrs()
484{ return(0); }
485
486/* ARGSUSED */
487void
488sfmmu_enable_intrs(uint_t pstate_save)
489{}
490
491/* ARGSUSED */
492int
493sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
494{ return(0); }
495
496/*
497 * Use cas, if tte has changed underneath us then reread and try again.
498 * In the case of a retry, it will update sttep with the new original.
499 */
500/* ARGSUSED */
501int
502sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
503{ return(0); }
504
505/*
506 * Use cas, if tte has changed underneath us then return 1, else return 0
507 */
508/* ARGSUSED */
509int
510sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
511{ return(0); }
512
513/* ARGSUSED */
514void
515sfmmu_copytte(tte_t *sttep, tte_t *dttep)
516{}
517
518/*ARGSUSED*/
519struct tsbe *
520sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
521{ return(0); }
522
523/*ARGSUSED*/
524uint64_t
525sfmmu_make_tsbtag(caddr_t va)
526{ return(0); }
527
528#else	/* lint */
529
530	.seg	".data"
531	.global	sfmmu_panic1
532sfmmu_panic1:
533	.asciz	"sfmmu_asm: interrupts already disabled"
534
535	.global	sfmmu_panic3
536sfmmu_panic3:
537	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
538
539	.global	sfmmu_panic4
540sfmmu_panic4:
541	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
542
543	.global	sfmmu_panic5
544sfmmu_panic5:
545	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
546
547	.global	sfmmu_panic6
548sfmmu_panic6:
549	.asciz	"sfmmu_asm: interrupts not disabled"
550
551	.global	sfmmu_panic7
552sfmmu_panic7:
553	.asciz	"sfmmu_asm: kernel as"
554
555	.global	sfmmu_panic8
556sfmmu_panic8:
557	.asciz	"sfmmu_asm: gnum is zero"
558
559	.global	sfmmu_panic9
560sfmmu_panic9:
561	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
562
563	.global	sfmmu_panic10
564sfmmu_panic10:
565	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
566
567        ENTRY(sfmmu_disable_intrs)
568        rdpr    %pstate, %o0
569#ifdef DEBUG
570	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
571#endif /* DEBUG */
572        retl
573          wrpr   %o0, PSTATE_IE, %pstate
574        SET_SIZE(sfmmu_disable_intrs)
575
576	ENTRY(sfmmu_enable_intrs)
577        retl
578          wrpr    %g0, %o0, %pstate
579        SET_SIZE(sfmmu_enable_intrs)
580
581/*
582 * This routine is called both by resume() and sfmmu_get_ctx() to
583 * allocate a new context for the process on a MMU.
584 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
585 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
586 * is the case when sfmmu_alloc_ctx is called from resume().
587 *
588 * The caller must disable interrupts before entering this routine.
589 * To reduce ctx switch overhead, the code contains both 'fast path' and
590 * 'slow path' code. The fast path code covers the common case where only
591 * a quick check is needed and the real ctx allocation is not required.
592 * It can be done without holding the per-process (PP) lock.
593 * The 'slow path' code must be protected by the PP Lock and performs ctx
594 * allocation.
595 * Hardware context register and HAT mmu cnum are updated accordingly.
596 *
597 * %o0 - sfmmup
598 * %o1 - allocflag
599 * %o2 - CPU
600 * %o3 - sfmmu private/shared flag
601 *
602 * ret - 0: no ctx is allocated
603 *       1: a ctx is allocated
604 */
605        ENTRY_NP(sfmmu_alloc_ctx)
606
607#ifdef DEBUG
608	sethi   %hi(ksfmmup), %g1
609	ldx     [%g1 + %lo(ksfmmup)], %g1
610	cmp     %g1, %o0
611	bne,pt   %xcc, 0f
612	  nop
613
614	sethi   %hi(panicstr), %g1		! if kernel as, panic
615        ldx     [%g1 + %lo(panicstr)], %g1
616        tst     %g1
617        bnz,pn  %icc, 7f
618          nop
619
620	sethi	%hi(sfmmu_panic7), %o0
621	call	panic
622	  or	%o0, %lo(sfmmu_panic7), %o0
623
6247:
625	retl
626	  mov	%g0, %o0			! %o0 = ret = 0
627
6280:
629	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
630#endif /* DEBUG */
631
632	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
633
634	! load global mmu_ctxp info
635	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
636        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
637
638	! load global mmu_ctxp gnum
639	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
640
641#ifdef DEBUG
642	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
643	bne,pt	%xcc, 3f
644	  nop
645
646	sethi   %hi(panicstr), %g1	! test if panicstr is already set
647        ldx     [%g1 + %lo(panicstr)], %g1
648        tst     %g1
649        bnz,pn  %icc, 1f
650          nop
651
652	sethi	%hi(sfmmu_panic8), %o0
653	call	panic
654	  or	%o0, %lo(sfmmu_panic8), %o0
6551:
656	retl
657	  mov	%g0, %o0			! %o0 = ret = 0
6583:
659#endif
660
661	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
662
663	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
664	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
665
666	/*
667	 * %g5 = sfmmu gnum returned
668	 * %g6 = sfmmu cnum returned
669	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
670	 * %g4 = scratch
671	 *
672	 * Fast path code, do a quick check.
673	 */
674	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
675
676	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
677	bne,pt	%icc, 1f			! valid hat cnum, check gnum
678	  nop
679
680	! cnum == INVALID, check allocflag
681	mov	%g0, %g4	! %g4 = ret = 0
682	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
683	  mov	%g6, %o1
684
685	! (invalid HAT cnum) && (allocflag == 1)
686	ba,pt	%icc, 2f
687	  nop
6881:
689	! valid HAT cnum, check gnum
690	cmp	%g5, %o4
691	mov	1, %g4				!%g4 = ret = 1
692	be,a,pt	%icc, 8f			! gnum unchanged, go to done
693	  mov	%g6, %o1
694
6952:
696	/*
697	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
698	 * followed by the 'slow path' code.
699	 */
700	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
7013:
702	brz	%g3, 5f
703	  nop
7044:
705	brnz,a,pt       %g3, 4b				! spin if lock is 1
706	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
707	ba	%xcc, 3b				! retry the lock
708	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
709
7105:
711	membar  #LoadLoad
712	/*
713	 * %g5 = sfmmu gnum returned
714	 * %g6 = sfmmu cnum returned
715	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
716	 * %g4 = scratch
717	 */
718	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
719
720	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
721	bne,pt	%icc, 1f			! valid hat cnum, check gnum
722	  nop
723
724	! cnum == INVALID, check allocflag
725	mov	%g0, %g4	! %g4 = ret = 0
726	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
727	  mov	%g6, %o1
728
729	! (invalid HAT cnum) && (allocflag == 1)
730	ba,pt	%icc, 6f
731	  nop
7321:
733	! valid HAT cnum, check gnum
734	cmp	%g5, %o4
735	mov	1, %g4				! %g4 = ret  = 1
736	be,a,pt	%icc, 2f			! gnum unchanged, go to done
737	  mov	%g6, %o1
738
739	ba,pt	%icc, 6f
740	  nop
7412:
742	membar  #LoadStore|#StoreStore
743	ba,pt %icc, 8f
744	  clrb  [%o0 + SFMMU_CTX_LOCK]
7456:
746	/*
747	 * We get here if we do not have a valid context, or
748	 * the HAT gnum does not match global gnum. We hold
749	 * sfmmu_ctx_lock spinlock. Allocate that context.
750	 *
751	 * %o3 = mmu_ctxp
752	 */
753	add	%o3, MMU_CTX_CNUM, %g3
754	ld	[%o3 + MMU_CTX_NCTXS], %g4
755
756	/*
757         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
758         * %g3 = mmu cnum address
759	 * %g4 = mmu nctxs
760	 *
761	 * %o0 = sfmmup
762	 * %o1 = mmu current cnum value (used as new cnum)
763	 * %o4 = mmu gnum
764	 *
765	 * %o5 = scratch
766	 */
767	ld	[%g3], %o1
7680:
769	cmp	%o1, %g4
770	bl,a,pt %icc, 1f
771	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
772
773	/*
774	 * cnum reachs max, bail, so wrap around can be performed later.
775	 */
776	set	INVALID_CONTEXT, %o1
777	mov	%g0, %g4		! %g4 = ret = 0
778
779	membar  #LoadStore|#StoreStore
780	ba,pt	%icc, 8f
781	  clrb	[%o0 + SFMMU_CTX_LOCK]
7821:
783	! %g3 = addr of mmu_ctxp->cnum
784	! %o5 = mmu_ctxp->cnum + 1
785	cas	[%g3], %o1, %o5
786	cmp	%o1, %o5
787	bne,a,pn %xcc, 0b	! cas failed
788	  ld	[%g3], %o1
789
790#ifdef DEBUG
791        set	MAX_SFMMU_CTX_VAL, %o5
792	cmp	%o1, %o5
793	ble,pt %icc, 2f
794	  nop
795
796	sethi	%hi(sfmmu_panic9), %o0
797	call	panic
798	  or	%o0, %lo(sfmmu_panic9), %o0
7992:
800#endif
801	! update hat gnum and cnum
802	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
803	or	%o4, %o1, %o4
804	stx	%o4, [%g2 + SFMMU_CTXS]
805
806	membar  #LoadStore|#StoreStore
807	clrb	[%o0 + SFMMU_CTX_LOCK]
808
809	mov	1, %g4			! %g4 = ret = 1
8108:
811	/*
812	 * program the secondary context register
813	 *
814	 * %o1 = cnum
815	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
816	 */
817
818	/*
819	 * When we come here and context is invalid, we want to set both
820	 * private and shared ctx regs to INVALID. In order to
821	 * do so, we set the sfmmu priv/shared flag to 'private' regardless
822	 * so that private ctx reg will be set to invalid.
823	 * Note that on sun4v values written to private context register are
824	 * automatically written to corresponding shared context register as
825	 * well. On sun4u SET_SECCTX() will invalidate shared context register
826	 * when it sets a private secondary context register.
827	 */
828
829	cmp	%o1, INVALID_CONTEXT
830	be,a,pn	%icc, 9f
831	  clr	%g1
8329:
833
834#ifdef	sun4u
835	ldub	[%o0 + SFMMU_CEXT], %o2
836	sll	%o2, CTXREG_EXT_SHIFT, %o2
837	or	%o1, %o2, %o1
838#endif /* sun4u */
839
840	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
841
842        retl
843          mov   %g4, %o0                        ! %o0 = ret
844
845	SET_SIZE(sfmmu_alloc_ctx)
846
847	ENTRY_NP(sfmmu_modifytte)
848	ldx	[%o2], %g3			/* current */
849	ldx	[%o0], %g1			/* original */
8502:
851	ldx	[%o1], %g2			/* modified */
852	cmp	%g2, %g3			/* is modified = current? */
853	be,a,pt	%xcc,1f				/* yes, don't write */
854	stx	%g3, [%o0]			/* update new original */
855	casx	[%o2], %g1, %g2
856	cmp	%g1, %g2
857	be,pt	%xcc, 1f			/* cas succeeded - return */
858	  nop
859	ldx	[%o2], %g3			/* new current */
860	stx	%g3, [%o0]			/* save as new original */
861	ba,pt	%xcc, 2b
862	  mov	%g3, %g1
8631:	retl
864	membar	#StoreLoad
865	SET_SIZE(sfmmu_modifytte)
866
867	ENTRY_NP(sfmmu_modifytte_try)
868	ldx	[%o1], %g2			/* modified */
869	ldx	[%o2], %g3			/* current */
870	ldx	[%o0], %g1			/* original */
871	cmp	%g3, %g2			/* is modified = current? */
872	be,a,pn %xcc,1f				/* yes, don't write */
873	mov	0, %o1				/* as if cas failed. */
874
875	casx	[%o2], %g1, %g2
876	membar	#StoreLoad
877	cmp	%g1, %g2
878	movne	%xcc, -1, %o1			/* cas failed. */
879	move	%xcc, 1, %o1			/* cas succeeded. */
8801:
881	stx	%g2, [%o0]			/* report "current" value */
882	retl
883	mov	%o1, %o0
884	SET_SIZE(sfmmu_modifytte_try)
885
886	ENTRY_NP(sfmmu_copytte)
887	ldx	[%o0], %g1
888	retl
889	stx	%g1, [%o1]
890	SET_SIZE(sfmmu_copytte)
891
892
893	/*
894	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
895	 * %o0 = TSB base address (in), pointer to TSB entry (out)
896	 * %o1 = vaddr (in)
897	 * %o2 = vpshift (in)
898	 * %o3 = tsb size code (in)
899	 * %o4 = scratch register
900	 */
901	ENTRY_NP(sfmmu_get_tsbe)
902	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
903	retl
904	nop
905	SET_SIZE(sfmmu_get_tsbe)
906
907	/*
908	 * Return a TSB tag for the given va.
909	 * %o0 = va (in/clobbered)
910	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
911	 */
912	ENTRY_NP(sfmmu_make_tsbtag)
913	retl
914	srln	%o0, TTARGET_VA_SHIFT, %o0
915	SET_SIZE(sfmmu_make_tsbtag)
916
917#endif /* lint */
918
919/*
920 * Other sfmmu primitives
921 */
922
923
924#if defined (lint)
925void
926sfmmu_patch_ktsb(void)
927{
928}
929
930void
931sfmmu_kpm_patch_tlbm(void)
932{
933}
934
935void
936sfmmu_kpm_patch_tsbm(void)
937{
938}
939
940void
941sfmmu_patch_shctx(void)
942{
943}
944
945/* ARGSUSED */
946void
947sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
948{
949}
950
951/* ARGSUSED */
952void
953sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
954{
955}
956
957/* ARGSUSED */
958void
959sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
960{
961}
962
963/* ARGSUSED */
964void
965sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
966{
967}
968
969#else /* lint */
970
971#define	I_SIZE		4
972
973	ENTRY_NP(sfmmu_fix_ktlb_traptable)
974	/*
975	 * %o0 = start of patch area
976	 * %o1 = size code of TSB to patch
977	 * %o3 = scratch
978	 */
979	/* fix sll */
980	ld	[%o0], %o3			/* get sll */
981	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
982	st	%o3, [%o0]			/* write sll */
983	flush	%o0
984	/* fix srl */
985	add	%o0, I_SIZE, %o0		/* goto next instr. */
986	ld	[%o0], %o3			/* get srl */
987	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
988	st	%o3, [%o0]			/* write srl */
989	retl
990	flush	%o0
991	SET_SIZE(sfmmu_fix_ktlb_traptable)
992
993	ENTRY_NP(sfmmu_fixup_ktsbbase)
994	/*
995	 * %o0 = start of patch area
996	 * %o5 = kernel virtual or physical tsb base address
997	 * %o2, %o3 are used as scratch registers.
998	 */
999	/* fixup sethi instruction */
1000	ld	[%o0], %o3
1001	srl	%o5, 10, %o2			! offset is bits 32:10
1002	or	%o3, %o2, %o3			! set imm22
1003	st	%o3, [%o0]
1004	/* fixup offset of lduw/ldx */
1005	add	%o0, I_SIZE, %o0		! next instr
1006	ld	[%o0], %o3
1007	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
1008	or	%o3, %o2, %o3
1009	st	%o3, [%o0]
1010	retl
1011	flush	%o0
1012	SET_SIZE(sfmmu_fixup_ktsbbase)
1013
1014	ENTRY_NP(sfmmu_fixup_setx)
1015	/*
1016	 * %o0 = start of patch area
1017	 * %o4 = 64 bit value to patch
1018	 * %o2, %o3 are used as scratch registers.
1019	 *
1020	 * Note: Assuming that all parts of the instructions which need to be
1021	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1022	 *
1023	 * Note the implementation of setx which is being patched is as follows:
1024	 *
1025	 * sethi   %hh(RUNTIME_PATCH), tmp
1026	 * sethi   %lm(RUNTIME_PATCH), dest
1027	 * or      tmp, %hm(RUNTIME_PATCH), tmp
1028	 * or      dest, %lo(RUNTIME_PATCH), dest
1029	 * sllx    tmp, 32, tmp
1030	 * nop
1031	 * or      tmp, dest, dest
1032	 *
1033	 * which differs from the implementation in the
1034	 * "SPARC Architecture Manual"
1035	 */
1036	/* fixup sethi instruction */
1037	ld	[%o0], %o3
1038	srlx	%o4, 42, %o2			! bits [63:42]
1039	or	%o3, %o2, %o3			! set imm22
1040	st	%o3, [%o0]
1041	/* fixup sethi instruction */
1042	add	%o0, I_SIZE, %o0		! next instr
1043	ld	[%o0], %o3
1044	sllx	%o4, 32, %o2			! clear upper bits
1045	srlx	%o2, 42, %o2			! bits [31:10]
1046	or	%o3, %o2, %o3			! set imm22
1047	st	%o3, [%o0]
1048	/* fixup or instruction */
1049	add	%o0, I_SIZE, %o0		! next instr
1050	ld	[%o0], %o3
1051	srlx	%o4, 32, %o2			! bits [63:32]
1052	and	%o2, 0x3ff, %o2			! bits [41:32]
1053	or	%o3, %o2, %o3			! set imm
1054	st	%o3, [%o0]
1055	/* fixup or instruction */
1056	add	%o0, I_SIZE, %o0		! next instr
1057	ld	[%o0], %o3
1058	and	%o4, 0x3ff, %o2			! bits [9:0]
1059	or	%o3, %o2, %o3			! set imm
1060	st	%o3, [%o0]
1061	retl
1062	flush	%o0
1063	SET_SIZE(sfmmu_fixup_setx)
1064
1065	ENTRY_NP(sfmmu_fixup_or)
1066	/*
1067	 * %o0 = start of patch area
1068	 * %o4 = 32 bit value to patch
1069	 * %o2, %o3 are used as scratch registers.
1070	 * Note: Assuming that all parts of the instructions which need to be
1071	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1072	 */
1073	ld	[%o0], %o3
1074	and	%o4, 0x3ff, %o2			! bits [9:0]
1075	or	%o3, %o2, %o3			! set imm
1076	st	%o3, [%o0]
1077	retl
1078	flush	%o0
1079	SET_SIZE(sfmmu_fixup_or)
1080
1081	ENTRY_NP(sfmmu_fixup_shiftx)
1082	/*
1083	 * %o0 = start of patch area
1084	 * %o4 = signed int immediate value to add to sllx/srlx imm field
1085	 * %o2, %o3 are used as scratch registers.
1086	 *
1087	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
1088	 * so we do a simple add.  The caller must be careful to prevent
1089	 * overflow, which could easily occur if the initial value is nonzero!
1090	 */
1091	ld	[%o0], %o3			! %o3 = instruction to patch
1092	and	%o3, 0x3f, %o2			! %o2 = existing imm value
1093	add	%o2, %o4, %o2			! %o2 = new imm value
1094	andn	%o3, 0x3f, %o3			! clear old imm value
1095	and	%o2, 0x3f, %o2			! truncate new imm value
1096	or	%o3, %o2, %o3			! set new imm value
1097	st	%o3, [%o0]			! store updated instruction
1098	retl
1099	flush	%o0
1100	SET_SIZE(sfmmu_fixup_shiftx)
1101
1102	ENTRY_NP(sfmmu_fixup_mmu_asi)
1103	/*
1104	 * Patch imm_asi of all ldda instructions in the MMU
1105	 * trap handlers.  We search MMU_PATCH_INSTR instructions
1106	 * starting from the itlb miss handler (trap 0x64).
1107	 * %o0 = address of tt[0,1]_itlbmiss
1108	 * %o1 = imm_asi to setup, shifted by appropriate offset.
1109	 * %o3 = number of instructions to search
1110	 * %o4 = reserved by caller: called from leaf routine
1111	 */
11121:	ldsw	[%o0], %o2			! load instruction to %o2
1113	brgez,pt %o2, 2f
1114	  srl	%o2, 30, %o5
1115	btst	1, %o5				! test bit 30; skip if not set
1116	bz,pt	%icc, 2f
1117	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
1118	srlx	%o5, 58, %o5			! isolate op3 part of opcode
1119	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
1120	brnz,pt	%o5, 2f				! skip if not a match
1121	  or	%o2, %o1, %o2			! or in imm_asi
1122	st	%o2, [%o0]			! write patched instruction
11232:	dec	%o3
1124	brnz,a,pt %o3, 1b			! loop until we're done
1125	  add	%o0, I_SIZE, %o0
1126	retl
1127	flush	%o0
1128	SET_SIZE(sfmmu_fixup_mmu_asi)
1129
1130	/*
1131	 * Patch immediate ASI used to access the TSB in the
1132	 * trap table.
1133	 * inputs: %o0 = value of ktsb_phys
1134	 */
1135	ENTRY_NP(sfmmu_patch_mmu_asi)
1136	mov	%o7, %o4			! save return pc in %o4
1137	movrnz	%o0, ASI_QUAD_LDD_PHYS, %o3
1138	movrz	%o0, ASI_NQUAD_LD, %o3
1139	sll	%o3, 5, %o1			! imm_asi offset
1140	mov	6, %o3				! number of instructions
1141	sethi	%hi(dktsb), %o0			! to search
1142	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
1143	  or	%o0, %lo(dktsb), %o0
1144	mov	6, %o3				! number of instructions
1145	sethi	%hi(dktsb4m), %o0		! to search
1146	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
1147	  or	%o0, %lo(dktsb4m), %o0
1148	mov	6, %o3				! number of instructions
1149	sethi	%hi(iktsb), %o0			! to search
1150	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
1151	  or	%o0, %lo(iktsb), %o0
1152	mov	6, %o3				! number of instructions
1153	sethi	%hi(iktsb4m), %o0		! to search
1154	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
1155	  or	%o0, %lo(iktsb4m), %o0
1156	mov	%o4, %o7			! retore return pc -- leaf
1157	retl
1158	nop
1159	SET_SIZE(sfmmu_patch_mmu_asi)
1160
1161	ENTRY_NP(sfmmu_patch_ktsb)
1162	/*
1163	 * We need to fix iktsb, dktsb, et. al.
1164	 */
1165	save	%sp, -SA(MINFRAME), %sp
1166	set	ktsb_phys, %o1
1167	ld	[%o1], %o4
1168	set	ktsb_base, %o5
1169	set	ktsb4m_base, %l1
1170	brz,pt	%o4, 1f
1171	  nop
1172	set	ktsb_pbase, %o5
1173	set	ktsb4m_pbase, %l1
11741:
1175	sethi	%hi(ktsb_szcode), %o1
1176	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
1177
1178	sethi	%hi(iktsb), %o0
1179	call	sfmmu_fix_ktlb_traptable
1180	  or	%o0, %lo(iktsb), %o0
1181
1182	sethi	%hi(dktsb), %o0
1183	call	sfmmu_fix_ktlb_traptable
1184	  or	%o0, %lo(dktsb), %o0
1185
1186	sethi	%hi(ktsb4m_szcode), %o1
1187	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
1188
1189	sethi	%hi(iktsb4m), %o0
1190	call	sfmmu_fix_ktlb_traptable
1191	  or	%o0, %lo(iktsb4m), %o0
1192
1193	sethi	%hi(dktsb4m), %o0
1194	call	sfmmu_fix_ktlb_traptable
1195	  or	%o0, %lo(dktsb4m), %o0
1196
1197#ifndef sun4v
1198	mov	ASI_N, %o2
1199	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
1200	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
1201	sethi	%hi(tsb_kernel_patch_asi), %o0
1202	call	sfmmu_fixup_or
1203	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
1204#endif /* !sun4v */
1205
1206	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
1207
1208	sethi	%hi(dktsbbase), %o0
1209	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1210	  or	%o0, %lo(dktsbbase), %o0
1211
1212	sethi	%hi(iktsbbase), %o0
1213	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1214	  or	%o0, %lo(iktsbbase), %o0
1215
1216	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
1217	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1218	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1219
1220#ifdef sun4v
1221	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
1222	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1223	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1224#endif /* sun4v */
1225
1226	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
1227
1228	sethi	%hi(dktsb4mbase), %o0
1229	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1230	  or	%o0, %lo(dktsb4mbase), %o0
1231
1232	sethi	%hi(iktsb4mbase), %o0
1233	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1234	  or	%o0, %lo(iktsb4mbase), %o0
1235
1236	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1237	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1238	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1239
1240#ifdef sun4v
1241	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1242	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1243	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1244#endif /* sun4v */
1245
1246	set	ktsb_szcode, %o4
1247	ld	[%o4], %o4
1248	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1249	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1250	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1251
1252#ifdef sun4v
1253	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1254	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1255	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1256#endif /* sun4v */
1257
1258	set	ktsb4m_szcode, %o4
1259	ld	[%o4], %o4
1260	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1261	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1262	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1263
1264#ifdef sun4v
1265	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1266	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1267	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1268#endif /* sun4v */
1269
1270	ret
1271	restore
1272	SET_SIZE(sfmmu_patch_ktsb)
1273
1274	ENTRY_NP(sfmmu_kpm_patch_tlbm)
1275	/*
1276	 * Fixup trap handlers in common segkpm case.  This is reserved
1277	 * for future use should kpm TSB be changed to be other than the
1278	 * kernel TSB.
1279	 */
1280	retl
1281	nop
1282	SET_SIZE(sfmmu_kpm_patch_tlbm)
1283
1284	ENTRY_NP(sfmmu_kpm_patch_tsbm)
1285	/*
1286	 * nop the branch to sfmmu_kpm_dtsb_miss_small
1287	 * in the case where we are using large pages for
1288	 * seg_kpm (and hence must probe the second TSB for
1289	 * seg_kpm VAs)
1290	 */
1291	set	dktsb4m_kpmcheck_small, %o0
1292	MAKE_NOP_INSTR(%o1)
1293	st	%o1, [%o0]
1294	flush	%o0
1295	retl
1296	nop
1297	SET_SIZE(sfmmu_kpm_patch_tsbm)
1298
1299	ENTRY_NP(sfmmu_patch_utsb)
1300#ifdef UTSB_PHYS
1301	retl
1302	nop
1303#else /* UTSB_PHYS */
1304	/*
1305	 * We need to hot patch utsb_vabase and utsb4m_vabase
1306	 */
1307	save	%sp, -SA(MINFRAME), %sp
1308
1309	/* patch value of utsb_vabase */
1310	set	utsb_vabase, %o1
1311	ldx	[%o1], %o4
1312	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1313	call	sfmmu_fixup_setx
1314	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1315	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1316	call	sfmmu_fixup_setx
1317	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1318	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1319	call	sfmmu_fixup_setx
1320	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1321
1322	/* patch value of utsb4m_vabase */
1323	set	utsb4m_vabase, %o1
1324	ldx	[%o1], %o4
1325	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1326	call	sfmmu_fixup_setx
1327	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1328	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1329	call	sfmmu_fixup_setx
1330	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1331	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1332	call	sfmmu_fixup_setx
1333	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1334
1335	/*
1336	 * Patch TSB base register masks and shifts if needed.
1337	 * By default the TSB base register contents are set up for 4M slab.
1338	 * If we're using a smaller slab size and reserved VA range we need
1339	 * to patch up those values here.
1340	 */
1341	set	tsb_slab_shift, %o1
1342	set	MMU_PAGESHIFT4M, %o4
1343	lduw	[%o1], %o3
1344	subcc	%o4, %o3, %o4
1345	bz,pt	%icc, 1f
1346	  /* delay slot safe */
1347
1348	/* patch reserved VA range size if needed. */
1349	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1350	call	sfmmu_fixup_shiftx
1351	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1352	call	sfmmu_fixup_shiftx
1353	  add	%o0, I_SIZE, %o0
1354	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1355	call	sfmmu_fixup_shiftx
1356	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1357	call	sfmmu_fixup_shiftx
1358	  add	%o0, I_SIZE, %o0
13591:
1360	/* patch TSBREG_VAMASK used to set up TSB base register */
1361	set	tsb_slab_mask, %o1
1362	ldx	[%o1], %o4
1363	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1364	call	sfmmu_fixup_or
1365	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1366	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1367	call	sfmmu_fixup_or
1368	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1369
1370	ret
1371	restore
1372#endif /* UTSB_PHYS */
1373	SET_SIZE(sfmmu_patch_utsb)
1374
1375	ENTRY_NP(sfmmu_patch_shctx)
1376#ifdef sun4u
1377	retl
1378	  nop
1379#else /* sun4u */
1380	set	sfmmu_shctx_cpu_mondo_patch, %o0
1381	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
1382	st	%o1, [%o0]
1383	flush	%o0
1384	MAKE_NOP_INSTR(%o1)
1385	add	%o0, I_SIZE, %o0	! next instr
1386	st	%o1, [%o0]
1387	flush	%o0
1388
1389	set	sfmmu_shctx_user_rtt_patch, %o0
1390	st      %o1, [%o0]		! nop 1st instruction
1391	flush	%o0
1392	add     %o0, I_SIZE, %o0
1393	st      %o1, [%o0]		! nop 2nd instruction
1394	flush	%o0
1395	add     %o0, I_SIZE, %o0
1396	st      %o1, [%o0]		! nop 3rd instruction
1397	flush	%o0
1398	add     %o0, I_SIZE, %o0
1399	st      %o1, [%o0]		! nop 4th instruction
1400	flush	%o0
1401	add     %o0, I_SIZE, %o0
1402	st      %o1, [%o0]		! nop 5th instruction
1403	flush	%o0
1404	add     %o0, I_SIZE, %o0
1405	st      %o1, [%o0]		! nop 6th instruction
1406	retl
1407	flush	%o0
1408#endif /* sun4u */
1409	SET_SIZE(sfmmu_patch_shctx)
1410
1411	/*
1412	 * Routine that loads an entry into a tsb using virtual addresses.
1413	 * Locking is required since all cpus can use the same TSB.
1414	 * Note that it is no longer required to have a valid context
1415	 * when calling this function.
1416	 */
1417	ENTRY_NP(sfmmu_load_tsbe)
1418	/*
1419	 * %o0 = pointer to tsbe to load
1420	 * %o1 = tsb tag
1421	 * %o2 = virtual pointer to TTE
1422	 * %o3 = 1 if physical address in %o0 else 0
1423	 */
1424	rdpr	%pstate, %o5
1425#ifdef DEBUG
1426	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1427#endif /* DEBUG */
1428
1429	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1430
1431	SETUP_TSB_ASI(%o3, %g3)
1432	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, locked_tsb_l8)
1433
1434	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1435
1436	retl
1437	membar	#StoreStore|#StoreLoad
1438	SET_SIZE(sfmmu_load_tsbe)
1439
1440	/*
1441	 * Flush TSB of a given entry if the tag matches.
1442	 */
1443	ENTRY(sfmmu_unload_tsbe)
1444	/*
1445	 * %o0 = pointer to tsbe to be flushed
1446	 * %o1 = tag to match
1447	 * %o2 = 1 if physical address in %o0 else 0
1448	 */
1449	SETUP_TSB_ASI(%o2, %g1)
1450	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1451	retl
1452	membar	#StoreStore|#StoreLoad
1453	SET_SIZE(sfmmu_unload_tsbe)
1454
1455	/*
1456	 * Routine that loads a TTE into the kpm TSB from C code.
1457	 * Locking is required since kpm TSB is shared among all CPUs.
1458	 */
1459	ENTRY_NP(sfmmu_kpm_load_tsb)
1460	/*
1461	 * %o0 = vaddr
1462	 * %o1 = ttep
1463	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1464	 */
1465	rdpr	%pstate, %o5			! %o5 = saved pstate
1466#ifdef DEBUG
1467	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1468#endif /* DEBUG */
1469	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1470
1471#ifndef sun4v
1472	sethi	%hi(ktsb_phys), %o4
1473	mov	ASI_N, %o3
1474	ld	[%o4 + %lo(ktsb_phys)], %o4
1475	movrnz	%o4, ASI_MEM, %o3
1476	mov	%o3, %asi
1477#endif /* !sun4v */
1478	mov	%o0, %g1			! %g1 = vaddr
1479
1480	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1481	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1482	/* %g2 = tsbep, %g1 clobbered */
1483
1484	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1485	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1486	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, locked_tsb_l9)
1487
1488	wrpr	%g0, %o5, %pstate		! enable interrupts
1489	retl
1490	  membar #StoreStore|#StoreLoad
1491	SET_SIZE(sfmmu_kpm_load_tsb)
1492
1493	/*
1494	 * Routine that shoots down a TTE in the kpm TSB or in the
1495	 * kernel TSB depending on virtpg. Locking is required since
1496	 * kpm/kernel TSB is shared among all CPUs.
1497	 */
1498	ENTRY_NP(sfmmu_kpm_unload_tsb)
1499	/*
1500	 * %o0 = vaddr
1501	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1502	 */
1503#ifndef sun4v
1504	sethi	%hi(ktsb_phys), %o4
1505	mov	ASI_N, %o3
1506	ld	[%o4 + %lo(ktsb_phys)], %o4
1507	movrnz	%o4, ASI_MEM, %o3
1508	mov	%o3, %asi
1509#endif /* !sun4v */
1510	mov	%o0, %g1			! %g1 = vaddr
1511
1512	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1513	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1514	/* %g2 = tsbep, %g1 clobbered */
1515
1516	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1517	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1518	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1519
1520	retl
1521	  membar	#StoreStore|#StoreLoad
1522	SET_SIZE(sfmmu_kpm_unload_tsb)
1523
1524#endif /* lint */
1525
1526
1527#if defined (lint)
1528
1529/*ARGSUSED*/
1530pfn_t
1531sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1532{ return(0); }
1533
1534#else /* lint */
1535
1536	ENTRY_NP(sfmmu_ttetopfn)
1537	ldx	[%o0], %g1			/* read tte */
1538	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1539	/*
1540	 * g1 = pfn
1541	 */
1542	retl
1543	mov	%g1, %o0
1544	SET_SIZE(sfmmu_ttetopfn)
1545
1546#endif /* !lint */
1547
1548/*
1549 * These macros are used to update global sfmmu hme hash statistics
1550 * in perf critical paths. It is only enabled in debug kernels or
1551 * if SFMMU_STAT_GATHER is defined
1552 */
1553#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1554#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1555	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1556	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1557	cmp	tmp1, hatid						;\
1558	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1559	set	sfmmu_global_stat, tmp1					;\
1560	add	tmp1, tmp2, tmp1					;\
1561	ld	[tmp1], tmp2						;\
1562	inc	tmp2							;\
1563	st	tmp2, [tmp1]
1564
1565#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1566	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1567	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1568	cmp	tmp1, hatid						;\
1569	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1570	set	sfmmu_global_stat, tmp1					;\
1571	add	tmp1, tmp2, tmp1					;\
1572	ld	[tmp1], tmp2						;\
1573	inc	tmp2							;\
1574	st	tmp2, [tmp1]
1575
1576
1577#else /* DEBUG || SFMMU_STAT_GATHER */
1578
1579#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1580
1581#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1582
1583#endif  /* DEBUG || SFMMU_STAT_GATHER */
1584
1585/*
1586 * This macro is used to update global sfmmu kstas in non
1587 * perf critical areas so they are enabled all the time
1588 */
1589#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1590	sethi	%hi(sfmmu_global_stat), tmp1				;\
1591	add	tmp1, statname, tmp1					;\
1592	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1593	inc	tmp2							;\
1594	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1595
1596/*
1597 * These macros are used to update per cpu stats in non perf
1598 * critical areas so they are enabled all the time
1599 */
1600#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1601	ld	[tsbarea + stat], tmp1					;\
1602	inc	tmp1							;\
1603	st	tmp1, [tsbarea + stat]
1604
1605/*
1606 * These macros are used to update per cpu stats in non perf
1607 * critical areas so they are enabled all the time
1608 */
1609#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1610	lduh	[tsbarea + stat], tmp1					;\
1611	inc	tmp1							;\
1612	stuh	tmp1, [tsbarea + stat]
1613
1614#if defined(KPM_TLBMISS_STATS_GATHER)
1615	/*
1616	 * Count kpm dtlb misses separately to allow a different
1617	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1618	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1619	 */
1620#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1621	brgez	tagacc, label	/* KPM VA? */				;\
1622	nop								;\
1623	CPU_INDEX(tmp1, tsbma)						;\
1624	sethi	%hi(kpmtsbm_area), tsbma				;\
1625	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1626	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1627	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1628	/* VA range check */						;\
1629	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1630	cmp	tagacc, val						;\
1631	blu,pn	%xcc, label						;\
1632	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1633	cmp	tagacc, tmp1						;\
1634	bgeu,pn	%xcc, label						;\
1635	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1636	inc	val							;\
1637	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1638label:
1639#else
1640#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1641#endif	/* KPM_TLBMISS_STATS_GATHER */
1642
1643#if defined (lint)
1644/*
1645 * The following routines are jumped to from the mmu trap handlers to do
1646 * the setting up to call systrap.  They are separate routines instead of
1647 * being part of the handlers because the handlers would exceed 32
1648 * instructions and since this is part of the slow path the jump
1649 * cost is irrelevant.
1650 */
1651void
1652sfmmu_pagefault(void)
1653{
1654}
1655
1656void
1657sfmmu_mmu_trap(void)
1658{
1659}
1660
1661void
1662sfmmu_window_trap(void)
1663{
1664}
1665
1666void
1667sfmmu_kpm_exception(void)
1668{
1669}
1670
1671#else /* lint */
1672
1673#ifdef	PTL1_PANIC_DEBUG
1674	.seg	".data"
1675	.global	test_ptl1_panic
1676test_ptl1_panic:
1677	.word	0
1678	.align	8
1679
1680	.seg	".text"
1681	.align	4
1682#endif	/* PTL1_PANIC_DEBUG */
1683
1684
1685	ENTRY_NP(sfmmu_pagefault)
1686	SET_GL_REG(1)
1687	USE_ALTERNATE_GLOBALS(%g5)
1688	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1689	rdpr	%tt, %g6
1690	cmp	%g6, FAST_IMMU_MISS_TT
1691	be,a,pn	%icc, 1f
1692	  mov	T_INSTR_MMU_MISS, %g3
1693	cmp	%g6, T_INSTR_MMU_MISS
1694	be,a,pn	%icc, 1f
1695	  mov	T_INSTR_MMU_MISS, %g3
1696	mov	%g5, %g2
1697	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1698	cmp	%g6, FAST_DMMU_MISS_TT
1699	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1700	cmp	%g6, T_DATA_MMU_MISS
1701	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1702
1703#ifdef  PTL1_PANIC_DEBUG
1704	/* check if we want to test the tl1 panic */
1705	sethi	%hi(test_ptl1_panic), %g4
1706	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1707	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1708	cmp	%g1, %g0
1709	bne,a,pn %icc, ptl1_panic
1710	  or	%g0, PTL1_BAD_DEBUG, %g1
1711#endif	/* PTL1_PANIC_DEBUG */
17121:
1713	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1714	/*
1715	 * g2 = tag access reg
1716	 * g3.l = type
1717	 * g3.h = 0
1718	 */
1719	sethi	%hi(trap), %g1
1720	or	%g1, %lo(trap), %g1
17212:
1722	ba,pt	%xcc, sys_trap
1723	  mov	-1, %g4
1724	SET_SIZE(sfmmu_pagefault)
1725
1726	ENTRY_NP(sfmmu_mmu_trap)
1727	SET_GL_REG(1)
1728	USE_ALTERNATE_GLOBALS(%g5)
1729	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1730	rdpr	%tt, %g6
1731	cmp	%g6, FAST_IMMU_MISS_TT
1732	be,a,pn	%icc, 1f
1733	  mov	T_INSTR_MMU_MISS, %g3
1734	cmp	%g6, T_INSTR_MMU_MISS
1735	be,a,pn	%icc, 1f
1736	  mov	T_INSTR_MMU_MISS, %g3
1737	mov	%g5, %g2
1738	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1739	cmp	%g6, FAST_DMMU_MISS_TT
1740	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1741	cmp	%g6, T_DATA_MMU_MISS
1742	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17431:
1744	/*
1745	 * g2 = tag access reg
1746	 * g3 = type
1747	 */
1748	sethi	%hi(sfmmu_tsbmiss_exception), %g1
1749	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
1750	ba,pt	%xcc, sys_trap
1751	  mov	-1, %g4
1752	/*NOTREACHED*/
1753	SET_SIZE(sfmmu_mmu_trap)
1754
1755	ENTRY_NP(sfmmu_suspend_tl)
1756	SET_GL_REG(1)
1757	USE_ALTERNATE_GLOBALS(%g5)
1758	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1759	rdpr	%tt, %g6
1760	cmp	%g6, FAST_IMMU_MISS_TT
1761	be,a,pn	%icc, 1f
1762	  mov	T_INSTR_MMU_MISS, %g3
1763	mov	%g5, %g2
1764	cmp	%g6, FAST_DMMU_MISS_TT
1765	move	%icc, T_DATA_MMU_MISS, %g3
1766	movne	%icc, T_DATA_PROT, %g3
17671:
1768	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
1769	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
1770	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1771	ba,pt	%xcc, sys_trap
1772	  mov	PIL_15, %g4
1773	/*NOTREACHED*/
1774	SET_SIZE(sfmmu_suspend_tl)
1775
1776	/*
1777	 * No %g registers in use at this point.
1778	 */
1779	ENTRY_NP(sfmmu_window_trap)
1780	rdpr	%tpc, %g1
1781#ifdef sun4v
1782#ifdef DEBUG
1783	/* We assume previous %gl was 1 */
1784	rdpr	%tstate, %g4
1785	srlx	%g4, TSTATE_GL_SHIFT, %g4
1786	and	%g4, TSTATE_GL_MASK, %g4
1787	cmp	%g4, 1
1788	bne,a,pn %icc, ptl1_panic
1789	  mov	PTL1_BAD_WTRAP, %g1
1790#endif /* DEBUG */
1791	/* user miss at tl>1. better be the window handler or user_rtt */
1792	/* in user_rtt? */
1793	set	rtt_fill_start, %g4
1794	cmp	%g1, %g4
1795	blu,pn %xcc, 6f
1796	 .empty
1797	set	rtt_fill_end, %g4
1798	cmp	%g1, %g4
1799	bgeu,pn %xcc, 6f
1800	 nop
1801	set	fault_rtt_fn1, %g1
1802	wrpr	%g0, %g1, %tnpc
1803	ba,a	7f
18046:
1805	! must save this trap level before descending trap stack
1806	! no need to save %tnpc, either overwritten or discarded
1807	! already got it: rdpr	%tpc, %g1
1808	rdpr	%tstate, %g6
1809	rdpr	%tt, %g7
1810	! trap level saved, go get underlying trap type
1811	rdpr	%tl, %g5
1812	sub	%g5, 1, %g3
1813	wrpr	%g3, %tl
1814	rdpr	%tt, %g2
1815	wrpr	%g5, %tl
1816	! restore saved trap level
1817	wrpr	%g1, %tpc
1818	wrpr	%g6, %tstate
1819	wrpr	%g7, %tt
1820#else /* sun4v */
1821	/* user miss at tl>1. better be the window handler */
1822	rdpr	%tl, %g5
1823	sub	%g5, 1, %g3
1824	wrpr	%g3, %tl
1825	rdpr	%tt, %g2
1826	wrpr	%g5, %tl
1827#endif /* sun4v */
1828	and	%g2, WTRAP_TTMASK, %g4
1829	cmp	%g4, WTRAP_TYPE
1830	bne,pn	%xcc, 1f
1831	 nop
1832	/* tpc should be in the trap table */
1833	set	trap_table, %g4
1834	cmp	%g1, %g4
1835	blt,pn %xcc, 1f
1836	 .empty
1837	set	etrap_table, %g4
1838	cmp	%g1, %g4
1839	bge,pn %xcc, 1f
1840	 .empty
1841	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
1842	add	%g1, WTRAP_FAULTOFF, %g1
1843	wrpr	%g0, %g1, %tnpc
18447:
1845	/*
1846	 * some wbuf handlers will call systrap to resolve the fault
1847	 * we pass the trap type so they figure out the correct parameters.
1848	 * g5 = trap type, g6 = tag access reg
1849	 */
1850
1851	/*
1852	 * only use g5, g6, g7 registers after we have switched to alternate
1853	 * globals.
1854	 */
1855	SET_GL_REG(1)
1856	USE_ALTERNATE_GLOBALS(%g5)
1857	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
1858	rdpr	%tt, %g7
1859	cmp	%g7, FAST_IMMU_MISS_TT
1860	be,a,pn	%icc, ptl1_panic
1861	  mov	PTL1_BAD_WTRAP, %g1
1862	cmp	%g7, T_INSTR_MMU_MISS
1863	be,a,pn	%icc, ptl1_panic
1864	  mov	PTL1_BAD_WTRAP, %g1
1865	mov	T_DATA_PROT, %g5
1866	cmp	%g7, FAST_DMMU_MISS_TT
1867	move	%icc, T_DATA_MMU_MISS, %g5
1868	cmp	%g7, T_DATA_MMU_MISS
1869	move	%icc, T_DATA_MMU_MISS, %g5
1870	! XXXQ AGS re-check out this one
1871	done
18721:
1873	CPU_PADDR(%g1, %g4)
1874	add	%g1, CPU_TL1_HDLR, %g1
1875	lda	[%g1]ASI_MEM, %g4
1876	brnz,a,pt %g4, sfmmu_mmu_trap
1877	  sta	%g0, [%g1]ASI_MEM
1878	ba,pt	%icc, ptl1_panic
1879	  mov	PTL1_BAD_TRAP, %g1
1880	SET_SIZE(sfmmu_window_trap)
1881
1882	ENTRY_NP(sfmmu_kpm_exception)
1883	/*
1884	 * We have accessed an unmapped segkpm address or a legal segkpm
1885	 * address which is involved in a VAC alias conflict prevention.
1886	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
1887	 * set. If it is, we will instead note that a fault has occurred
1888	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
1889	 * a "retry"). This will step over the faulting instruction.
1890	 * Note that this means that a legal segkpm address involved in
1891	 * a VAC alias conflict prevention (a rare case to begin with)
1892	 * cannot be used in DTrace.
1893	 */
1894	CPU_INDEX(%g1, %g2)
1895	set	cpu_core, %g2
1896	sllx	%g1, CPU_CORE_SHIFT, %g1
1897	add	%g1, %g2, %g1
1898	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
1899	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
1900	bz	0f
1901	or	%g2, CPU_DTRACE_BADADDR, %g2
1902	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
1903	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1904	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
1905	done
19060:
1907	TSTAT_CHECK_TL1(1f, %g1, %g2)
19081:
1909	SET_GL_REG(1)
1910	USE_ALTERNATE_GLOBALS(%g5)
1911	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
1912	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1913	/*
1914	 * g2=tagacc g3.l=type g3.h=0
1915	 */
1916	sethi	%hi(trap), %g1
1917	or	%g1, %lo(trap), %g1
1918	ba,pt	%xcc, sys_trap
1919	mov	-1, %g4
1920	SET_SIZE(sfmmu_kpm_exception)
1921
1922#endif /* lint */
1923
1924#if defined (lint)
1925
1926void
1927sfmmu_tsb_miss(void)
1928{
1929}
1930
1931void
1932sfmmu_kpm_dtsb_miss(void)
1933{
1934}
1935
1936void
1937sfmmu_kpm_dtsb_miss_small(void)
1938{
1939}
1940
1941#else /* lint */
1942
1943#if (IMAP_SEG != 0)
1944#error - ism_map->ism_seg offset is not zero
1945#endif
1946
1947/*
1948 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1949 * tlb miss and branches to label "ismhit". If this is not an ISM
1950 * process or an ISM tlb miss it falls thru.
1951 *
1952 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1953 * this process.
1954 * If so, it will branch to label "ismhit".  If not, it will fall through.
1955 *
1956 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1957 * so that any other threads of this process will not try and walk the ism
1958 * maps while they are being changed.
1959 *
1960 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
1961 *       will make sure of that. This means we can terminate our search on
1962 *       the first zero mapping we find.
1963 *
1964 * Parameters:
1965 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
1966 * tsbmiss	= address of tsb miss area (in)
1967 * ismseg	= contents of ism_seg for this ism map (out)
1968 * ismhat	= physical address of imap_ismhat for this ism map (out)
1969 * tmp1		= scratch reg (CLOBBERED)
1970 * tmp2		= scratch reg (CLOBBERED)
1971 * tmp3		= scratch reg (CLOBBERED)
1972 * label:    temporary labels
1973 * ismhit:   label where to jump to if an ism dtlb miss
1974 * exitlabel:label where to jump if hat is busy due to hat_unshare.
1975 */
1976#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
1977	label, ismhit)							\
1978	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
1979	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
1980	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
1981label/**/1:								;\
1982	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
1983	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
1984label/**/2:								;\
1985	brz,pt  ismseg, label/**/3		/* no mapping */	;\
1986	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
1987	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
1988	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
1989	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
1990	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
1991	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
1992	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
1993	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
1994	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
1995	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
1996	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
1997									;\
1998	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
1999	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
2000	cmp	ismhat, tmp1						;\
2001	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
2002	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
2003									;\
2004	add	tmp3, IBLK_NEXTPA, tmp1					;\
2005	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
2006	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
2007	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
2008label/**/3:
2009
2010/*
2011 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
2012 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
2013 * Parameters:
2014 * tagacc = reg containing virtual address
2015 * hatid = reg containing sfmmu pointer
2016 * hmeshift = constant/register to shift vaddr to obtain vapg
2017 * hmebp = register where bucket pointer will be stored
2018 * vapg = register where virtual page will be stored
2019 * tmp1, tmp2 = tmp registers
2020 */
2021
2022
2023#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
2024	vapg, label, tmp1, tmp2)					\
2025	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
2026	brnz,a,pt tmp1, label/**/1					;\
2027	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
2028	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
2029	ba,pt	%xcc, label/**/2					;\
2030	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
2031label/**/1:								;\
2032	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
2033label/**/2:								;\
2034	srlx	tagacc, hmeshift, vapg					;\
2035	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
2036	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
2037	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
2038	add	hmebp, tmp1, hmebp
2039
2040/*
2041 * hashtag includes bspage + hashno (64 bits).
2042 */
2043
2044#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
2045	sllx	vapg, hmeshift, vapg					;\
2046	mov	hashno, hblktag						;\
2047	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
2048	or	vapg, hblktag, hblktag
2049
2050/*
2051 * Function to traverse hmeblk hash link list and find corresponding match.
2052 * The search is done using physical pointers. It returns the physical address
2053 * pointer to the hmeblk that matches with the tag provided.
2054 * Parameters:
2055 * hmebp	= register that points to hme hash bucket, also used as
2056 *		  tmp reg (clobbered)
2057 * hmeblktag	= register with hmeblk tag match
2058 * hatid	= register with hatid
2059 * hmeblkpa	= register where physical ptr will be stored
2060 * tmp1		= tmp reg
2061 * label: temporary label
2062 */
2063
2064#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, tsbarea, 	\
2065	tmp1, label)							\
2066	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
2067	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2068	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2069label/**/1:								;\
2070	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2071	be,pn   %xcc, label/**/2					;\
2072	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2073	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
2074	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2075	add	hmebp, CLONGSIZE, hmebp					;\
2076	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
2077	xor	tmp1, hmeblktag, tmp1					;\
2078	xor	hmebp, hatid, hmebp					;\
2079	or	hmebp, tmp1, hmebp					;\
2080	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
2081	  add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
2082	ba,pt	%xcc, label/**/1					;\
2083	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
2084label/**/2:
2085
2086/*
2087 * Function to traverse hmeblk hash link list and find corresponding match.
2088 * The search is done using physical pointers. It returns the physical address
2089 * pointer to the hmeblk that matches with the tag
2090 * provided.
2091 * Parameters:
2092 * hmeblktag	= register with hmeblk tag match (rid field is 0)
2093 * hatid	= register with hatid (pointer to SRD)
2094 * hmeblkpa	= register where physical ptr will be stored
2095 * tmp1		= tmp reg
2096 * tmp2		= tmp reg
2097 * label: temporary label
2098 */
2099
2100#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, tsbarea,	\
2101	tmp1, tmp2, label)			 			\
2102label/**/1:								;\
2103	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2104	be,pn   %xcc, label/**/4					;\
2105	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
2106	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
2107	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2108	add	tmp2, CLONGSIZE, tmp2					;\
2109	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
2110	xor	tmp1, hmeblktag, tmp1					;\
2111	xor	tmp2, hatid, tmp2					;\
2112	brz,pn	tmp2, label/**/3	/* branch on hit */		;\
2113	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2114label/**/2:								;\
2115	ba,pt	%xcc, label/**/1					;\
2116	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
2117label/**/3:								;\
2118	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
2119	bgeu,pt	%xcc, label/**/2					;\
2120	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2121	and	tmp1, BT_ULMASK, tmp2					;\
2122	srlx	tmp1, BT_ULSHIFT, tmp1					;\
2123	sllx	tmp1, CLONGSHIFT, tmp1					;\
2124	add	tsbarea, tmp1, tmp1					;\
2125	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
2126	srlx	tmp1, tmp2, tmp1					;\
2127	btst	0x1, tmp1						;\
2128	bz,pn	%xcc, label/**/2					;\
2129	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2130label/**/4:
2131
2132#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2133#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2134#endif
2135
2136/*
2137 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2138 * he offset for the corresponding hment.
2139 * Parameters:
2140 * In:
2141 *	vaddr = register with virtual address
2142 *	hmeblkpa = physical pointer to hme_blk
2143 * Out:
2144 *	hmentoff = register where hment offset will be stored
2145 *	hmemisc = hblk_misc
2146 * Scratch:
2147 *	tmp1
2148 */
2149#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
2150	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2151	lda	[hmentoff]ASI_MEM, hmemisc 				;\
2152	andcc	hmemisc, HBLK_SZMASK, %g0				;\
2153	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2154	  or	%g0, HMEBLK_HME1, hmentoff				;\
2155	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2156	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2157	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2158	add	tmp1, HMEBLK_HME1, hmentoff				;\
2159label1:
2160
2161/*
2162 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2163 *
2164 * tagacc	= (pseudo-)tag access register (in)
2165 * hatid	= sfmmu pointer for TSB miss (in)
2166 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2167 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2168 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2169 * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
2170 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2171 *		  for this page size.
2172 * hashno	= constant/register hash number
2173 * tmp		= temp value - clobbered
2174 * label	= temporary label for branching within macro.
2175 * foundlabel	= label to jump to when tte is found.
2176 * suspendlabel= label to jump to when tte is suspended.
2177 * exitlabel	= label to jump to when tte is not found.
2178 *
2179 */
2180#define GET_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, hmeshift, \
2181		 hashno, tmp, label, foundlabel, suspendlabel, exitlabel) \
2182									;\
2183	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2184	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2185	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2186		hmeblkpa, label/**/5, hmemisc, tmp)			;\
2187									;\
2188	/*								;\
2189	 * tagacc = tagacc						;\
2190	 * hatid = hatid						;\
2191	 * tsbarea = tsbarea						;\
2192	 * tte   = hmebp (hme bucket pointer)				;\
2193	 * hmeblkpa  = vapg  (virtual page)				;\
2194	 * hmemisc, tmp = scratch					;\
2195	 */								;\
2196	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2197	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
2198									;\
2199	/*								;\
2200	 * tagacc = tagacc						;\
2201	 * hatid = hatid						;\
2202	 * tte   = hmebp						;\
2203	 * hmeblkpa  = CLOBBERED					;\
2204	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
2205	 * tmp  = scratch						;\
2206	 */								;\
2207	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2208	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, 	 		\
2209		tsbarea, tagacc, label/**/1)				;\
2210	/*								;\
2211	 * tagacc = CLOBBERED						;\
2212	 * tte = CLOBBERED						;\
2213	 * hmeblkpa = hmeblkpa						;\
2214	 * tmp = scratch						;\
2215	 */								;\
2216	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2217	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
2218	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2219	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2220	  nop								;\
2221label/**/4:								;\
2222	/*								;\
2223	 * We have found the hmeblk containing the hment.		;\
2224	 * Now we calculate the corresponding tte.			;\
2225	 *								;\
2226	 * tagacc = tagacc						;\
2227	 * hatid = hatid						;\
2228	 * tte   = clobbered						;\
2229	 * hmeblkpa  = hmeblkpa						;\
2230	 * hmemisc  = hblktag						;\
2231	 * tmp = scratch						;\
2232	 */								;\
2233	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2234		label/**/2)						;\
2235									;\
2236	/*								;\
2237	 * tagacc = tagacc						;\
2238	 * hatid = hmentoff						;\
2239	 * tte   = clobbered						;\
2240	 * hmeblkpa  = hmeblkpa						;\
2241	 * hmemisc  = hblk_misc						;\
2242	 * tmp = scratch						;\
2243	 */								;\
2244									;\
2245	add	hatid, SFHME_TTE, hatid					;\
2246	add	hmeblkpa, hatid, hmeblkpa				;\
2247	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2248	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2249	set	TTE_SUSPEND, hatid					;\
2250	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2251	btst	tte, hatid						;\
2252	bz,pt	%xcc, foundlabel					;\
2253	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2254									;\
2255	/*								;\
2256	 * Mapping is suspended, so goto suspend label.			;\
2257	 */								;\
2258	ba,pt	%xcc, suspendlabel					;\
2259	  nop
2260
2261/*
2262 * GET_SHME_TTE is similar to GET_TTE() except it searches
2263 * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
2264 * If valid tte is found, hmemisc = shctx flag, i.e., shme is
2265 * either 0 (not part of scd) or 1 (part of scd).
2266 */
2267#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, 	\
2268		hmeshift, hashno, tmp, label, foundlabel,		\
2269		suspendlabel, exitlabel)				\
2270									;\
2271	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2272	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2273	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2274		hmeblkpa, label/**/5, hmemisc, tmp)			;\
2275									;\
2276	/*								;\
2277	 * tagacc = tagacc						;\
2278	 * hatid = hatid						;\
2279	 * tsbarea = tsbarea						;\
2280	 * tte   = hmebp (hme bucket pointer)				;\
2281	 * hmeblkpa  = vapg  (virtual page)				;\
2282	 * hmemisc, tmp = scratch					;\
2283	 */								;\
2284	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2285									;\
2286	/*								;\
2287	 * tagacc = tagacc						;\
2288	 * hatid = hatid						;\
2289	 * tsbarea = tsbarea						;\
2290	 * tte   = hmebp						;\
2291	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
2292	 * hmeblkpa  = CLOBBERED					;\
2293	 * tmp = scratch						;\
2294	 */								;\
2295	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2296									;\
2297	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
2298	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2299	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
2300									;\
2301label/**/8:								;\
2302	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa,			\
2303		tsbarea, tagacc, tte, label/**/1)			;\
2304	/*								;\
2305	 * tagacc = CLOBBERED						;\
2306	 * tte = CLOBBERED						;\
2307	 * hmeblkpa = hmeblkpa						;\
2308	 * tmp = scratch						;\
2309	 */								;\
2310	cmp	hmeblkpa, HMEBLK_ENDPA					;\
2311	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
2312	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2313	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2314	  nop								;\
2315label/**/4:								;\
2316	/*								;\
2317	 * We have found the hmeblk containing the hment.		;\
2318	 * Now we calculate the corresponding tte.			;\
2319	 *								;\
2320	 * tagacc = tagacc						;\
2321	 * hatid = hatid						;\
2322	 * tte   = clobbered						;\
2323	 * hmeblkpa  = hmeblkpa						;\
2324	 * hmemisc  = hblktag						;\
2325	 * tsbarea = tsbmiss area					;\
2326	 * tmp = scratch						;\
2327	 */								;\
2328	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2329		label/**/2)						;\
2330									;\
2331	/*								;\
2332	 * tagacc = tagacc						;\
2333	 * hatid = hmentoff						;\
2334	 * tte = clobbered						;\
2335	 * hmeblkpa  = hmeblkpa						;\
2336	 * hmemisc  = hblk_misc						;\
2337	 * tsbarea = tsbmiss area					;\
2338	 * tmp = scratch						;\
2339	 */								;\
2340									;\
2341	add	hatid, SFHME_TTE, hatid					;\
2342	add	hmeblkpa, hatid, hmeblkpa				;\
2343	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2344	brlz,pt tte, label/**/6						;\
2345	  nop								;\
2346	btst	HBLK_SZMASK, hmemisc					;\
2347	bnz,a,pt %icc, label/**/7					;\
2348	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2349									;\
2350	/*								;\
2351 	 * We found an invalid 8K tte in shme.				;\
2352	 * it may not belong to shme's region since			;\
2353	 * region size/alignment granularity is 8K but different	;\
2354	 * regions don't share hmeblks. Continue the search.		;\
2355	 */								;\
2356	sub	hmeblkpa, hatid, hmeblkpa				;\
2357	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2358	srlx	tagacc, hmeshift, tte					;\
2359	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
2360	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
2361	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
2362	ba,a,pt	%xcc, label/**/8					;\
2363label/**/6:								;\
2364	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
2365	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2366label/**/7:								;\
2367	set	TTE_SUSPEND, hatid					;\
2368	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2369	btst	tte, hatid						;\
2370	bz,pt	%xcc, foundlabel					;\
2371	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2372									;\
2373	/*								;\
2374	 * Mapping is suspended, so goto suspend label.			;\
2375	 */								;\
2376	ba,pt	%xcc, suspendlabel					;\
2377	  nop
2378
2379	/*
2380	 * KERNEL PROTECTION HANDLER
2381	 *
2382	 * g1 = tsb8k pointer register (clobbered)
2383	 * g2 = tag access register (ro)
2384	 * g3 - g7 = scratch registers
2385	 *
2386	 * Note: This function is patched at runtime for performance reasons.
2387	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2388	 */
2389	ENTRY_NP(sfmmu_kprot_trap)
2390	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2391sfmmu_kprot_patch_ktsb_base:
2392	RUNTIME_PATCH_SETX(%g1, %g6)
2393	/* %g1 = contents of ktsb_base or ktsb_pbase */
2394sfmmu_kprot_patch_ktsb_szcode:
2395	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2396
2397	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2398	! %g1 = First TSB entry pointer, as TSB miss handler expects
2399
2400	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2401sfmmu_kprot_patch_ktsb4m_base:
2402	RUNTIME_PATCH_SETX(%g3, %g6)
2403	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2404sfmmu_kprot_patch_ktsb4m_szcode:
2405	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2406
2407	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2408	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2409
2410        CPU_TSBMISS_AREA(%g6, %g7)
2411        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2412	ba,pt	%xcc, sfmmu_tsb_miss_tt
2413	  nop
2414
2415	/*
2416	 * USER PROTECTION HANDLER
2417	 *
2418	 * g1 = tsb8k pointer register (ro)
2419	 * g2 = tag access register (ro)
2420	 * g3 = faulting context (clobbered, currently not used)
2421	 * g4 - g7 = scratch registers
2422	 */
2423	ALTENTRY(sfmmu_uprot_trap)
2424#ifdef sun4v
2425	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2426	/* %g1 = first TSB entry ptr now, %g2 preserved */
2427
2428	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2429	brlz,pt %g3, 9f				/* check for 2nd TSB */
2430	  nop
2431
2432	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2433	/* %g3 = second TSB entry ptr now, %g2 preserved */
2434
2435#else /* sun4v */
2436#ifdef UTSB_PHYS
2437	/* g1 = first TSB entry ptr */
2438	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2439	brlz,pt %g3, 9f			/* check for 2nd TSB */
2440	  nop
2441
2442	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2443	/* %g3 = second TSB entry ptr now, %g2 preserved */
2444#else /* UTSB_PHYS */
2445	brgez,pt %g1, 9f		/* check for 2nd TSB */
2446	  mov	-1, %g3			/* set second tsbe ptr to -1 */
2447
2448	mov	%g2, %g7
2449	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2450	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2451	mov	%g1, %g7
2452	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2453#endif /* UTSB_PHYS */
2454#endif /* sun4v */
24559:
2456	CPU_TSBMISS_AREA(%g6, %g7)
2457	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2458	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2459	  nop
2460
2461	/*
2462	 * Kernel 8K page iTLB miss.  We also get here if we took a
2463	 * fast instruction access mmu miss trap while running in
2464	 * invalid context.
2465	 *
2466	 * %g1 = 8K TSB pointer register (not used, clobbered)
2467	 * %g2 = tag access register (used)
2468	 * %g3 = faulting context id (used)
2469	 * %g7 = TSB tag to match (used)
2470	 */
2471	.align	64
2472	ALTENTRY(sfmmu_kitlb_miss)
2473	brnz,pn %g3, tsb_tl0_noctxt
2474	  nop
2475
2476	/* kernel miss */
2477	/* get kernel tsb pointer */
2478	/* we patch the next set of instructions at run time */
2479	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2480iktsbbase:
2481	RUNTIME_PATCH_SETX(%g4, %g5)
2482	/* %g4 = contents of ktsb_base or ktsb_pbase */
2483
2484iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2485	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2486	or	%g4, %g1, %g1			! form tsb ptr
2487	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2488	cmp	%g4, %g7
2489	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
2490	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
2491
2492	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2493	bz,pn	%icc, exec_fault
2494	  nop
2495	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2496	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2497	retry
2498
2499iktsb4mbase:
2500        RUNTIME_PATCH_SETX(%g4, %g6)
2501        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
2502iktsb4m:
2503	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2504        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2505	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
2506	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2507	cmp	%g4, %g7
2508	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2509	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2510	bz,pn	%icc, exec_fault
2511	  nop
2512	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2513	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2514	retry
2515
2516	/*
2517	 * Kernel dTLB miss.  We also get here if we took a fast data
2518	 * access mmu miss trap while running in invalid context.
2519	 *
2520	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2521	 *	We select the TSB miss handler to branch to depending on
2522	 *	the virtual address of the access.  In the future it may
2523	 *	be desirable to separate kpm TTEs into their own TSB,
2524	 *	in which case all that needs to be done is to set
2525	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2526	 *	early in the miss if we detect a kpm VA to a new handler.
2527	 *
2528	 * %g1 = 8K TSB pointer register (not used, clobbered)
2529	 * %g2 = tag access register (used)
2530	 * %g3 = faulting context id (used)
2531	 */
2532	.align	64
2533	ALTENTRY(sfmmu_kdtlb_miss)
2534	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2535	  nop
2536
2537	/* Gather some stats for kpm misses in the TLB. */
2538	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2539	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2540
2541	/*
2542	 * Get first TSB offset and look for 8K/64K/512K mapping
2543	 * using the 8K virtual page as the index.
2544	 *
2545	 * We patch the next set of instructions at run time;
2546	 * any changes here require sfmmu_patch_ktsb changes too.
2547	 */
2548dktsbbase:
2549	RUNTIME_PATCH_SETX(%g7, %g6)
2550	/* %g7 = contents of ktsb_base or ktsb_pbase */
2551
2552dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2553	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2554
2555	/*
2556	 * At this point %g1 is our index into the TSB.
2557	 * We just masked off enough bits of the VA depending
2558	 * on our TSB size code.
2559	 */
2560	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2561	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2562	cmp	%g6, %g4			! compare tag
2563	bne,pn	%xcc, dktsb4m_kpmcheck_small
2564	  add	%g7, %g1, %g1			/* form tsb ptr */
2565	TT_TRACE(trace_tsbhit)
2566	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2567	/* trapstat expects tte in %g5 */
2568	retry
2569
2570	/*
2571	 * If kpm is using large pages, the following instruction needs
2572	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2573	 * so that we will probe the 4M TSB regardless of the VA.  In
2574	 * the case kpm is using small pages, we know no large kernel
2575	 * mappings are located above 0x80000000.00000000 so we skip the
2576	 * probe as an optimization.
2577	 */
2578dktsb4m_kpmcheck_small:
2579	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2580	  /* delay slot safe, below */
2581
2582	/*
2583	 * Get second TSB offset and look for 4M mapping
2584	 * using 4M virtual page as the TSB index.
2585	 *
2586	 * Here:
2587	 * %g1 = 8K TSB pointer.  Don't squash it.
2588	 * %g2 = tag access register (we still need it)
2589	 */
2590	srlx	%g2, MMU_PAGESHIFT4M, %g3
2591
2592	/*
2593	 * We patch the next set of instructions at run time;
2594	 * any changes here require sfmmu_patch_ktsb changes too.
2595	 */
2596dktsb4mbase:
2597	RUNTIME_PATCH_SETX(%g7, %g6)
2598	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2599dktsb4m:
2600	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2601	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2602
2603	/*
2604	 * At this point %g3 is our index into the TSB.
2605	 * We just masked off enough bits of the VA depending
2606	 * on our TSB size code.
2607	 */
2608	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2609	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2610	cmp	%g6, %g4			! compare tag
2611
2612dktsb4m_tsbmiss:
2613	bne,pn	%xcc, dktsb4m_kpmcheck
2614	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2615	TT_TRACE(trace_tsbhit)
2616	/* we don't check TTE size here since we assume 4M TSB is separate */
2617	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2618	/* trapstat expects tte in %g5 */
2619	retry
2620
2621	/*
2622	 * So, we failed to find a valid TTE to match the faulting
2623	 * address in either TSB.  There are a few cases that could land
2624	 * us here:
2625	 *
2626	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2627	 *    to sfmmu_tsb_miss_tt to handle the miss.
2628	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2629	 *    4M TSB.  Let segkpm handle it.
2630	 *
2631	 * Note that we shouldn't land here in the case of a kpm VA when
2632	 * kpm_smallpages is active -- we handled that case earlier at
2633	 * dktsb4m_kpmcheck_small.
2634	 *
2635	 * At this point:
2636	 *  g1 = 8K-indexed primary TSB pointer
2637	 *  g2 = tag access register
2638	 *  g3 = 4M-indexed secondary TSB pointer
2639	 */
2640dktsb4m_kpmcheck:
2641	cmp	%g2, %g0
2642	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2643	  nop
2644	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2645	  nop
2646
2647#ifdef sun4v
2648	/*
2649	 * User instruction miss w/ single TSB.
2650	 * The first probe covers 8K, 64K, and 512K page sizes,
2651	 * because 64K and 512K mappings are replicated off 8K
2652	 * pointer.
2653	 *
2654	 * g1 = tsb8k pointer register
2655	 * g2 = tag access register
2656	 * g3 - g6 = scratch registers
2657	 * g7 = TSB tag to match
2658	 */
2659	.align	64
2660	ALTENTRY(sfmmu_uitlb_fastpath)
2661
2662	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2663	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2664	ba,pn	%xcc, sfmmu_tsb_miss_tt
2665	  mov	-1, %g3
2666
2667	/*
2668	 * User data miss w/ single TSB.
2669	 * The first probe covers 8K, 64K, and 512K page sizes,
2670	 * because 64K and 512K mappings are replicated off 8K
2671	 * pointer.
2672	 *
2673	 * g1 = tsb8k pointer register
2674	 * g2 = tag access register
2675	 * g3 - g6 = scratch registers
2676	 * g7 = TSB tag to match
2677	 */
2678	.align 64
2679	ALTENTRY(sfmmu_udtlb_fastpath)
2680
2681	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2682	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2683	ba,pn	%xcc, sfmmu_tsb_miss_tt
2684	  mov	-1, %g3
2685
2686	/*
2687	 * User instruction miss w/ multiple TSBs (sun4v).
2688	 * The first probe covers 8K, 64K, and 512K page sizes,
2689	 * because 64K and 512K mappings are replicated off 8K
2690	 * pointer.  Second probe covers 4M page size only.
2691	 *
2692	 * Just like sfmmu_udtlb_slowpath, except:
2693	 *   o Uses ASI_ITLB_IN
2694	 *   o checks for execute permission
2695	 *   o No ISM prediction.
2696	 *
2697	 * g1 = tsb8k pointer register
2698	 * g2 = tag access register
2699	 * g3 - g6 = scratch registers
2700	 * g7 = TSB tag to match
2701	 */
2702	.align	64
2703	ALTENTRY(sfmmu_uitlb_slowpath)
2704
2705	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2706	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2707	/* g4 - g5 = clobbered here */
2708
2709	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2710	/* g1 = first TSB pointer, g3 = second TSB pointer */
2711	srlx	%g2, TAG_VALO_SHIFT, %g7
2712	PROBE_2ND_ITSB(%g3, %g7)
2713	/* NOT REACHED */
2714
2715#else /* sun4v */
2716
2717	/*
2718	 * User instruction miss w/ multiple TSBs (sun4u).
2719	 * The first probe covers 8K, 64K, and 512K page sizes,
2720	 * because 64K and 512K mappings are replicated off 8K
2721	 * pointer.  Probe of 1st TSB has already been done prior to entry
2722	 * into this routine. For the UTSB_PHYS case we probe up to 3
2723	 * valid other TSBs in the following order:
2724	 * 1) shared TSB for 4M-256M pages
2725	 * 2) private TSB for 4M-256M pages
2726	 * 3) shared TSB for 8K-512K pages
2727	 *
2728	 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
2729	 * 4M-256M pages.
2730	 *
2731	 * Just like sfmmu_udtlb_slowpath, except:
2732	 *   o Uses ASI_ITLB_IN
2733	 *   o checks for execute permission
2734	 *   o No ISM prediction.
2735	 *
2736	 * g1 = tsb8k pointer register
2737	 * g2 = tag access register
2738	 * g4 - g6 = scratch registers
2739	 * g7 = TSB tag to match
2740	 */
2741	.align	64
2742	ALTENTRY(sfmmu_uitlb_slowpath)
2743
2744#ifdef UTSB_PHYS
2745
2746       GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2747        brlz,pt %g6, 1f
2748          nop
2749        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2750        PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
27511:
2752        GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2753        brlz,pt %g3, 2f
2754          nop
2755        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2756        PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
27572:
2758        GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2759        brlz,pt %g6, sfmmu_tsb_miss_tt
2760          nop
2761        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2762        PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
2763        ba,pn   %xcc, sfmmu_tsb_miss_tt
2764          nop
2765
2766#else /* UTSB_PHYS */
2767	mov	%g1, %g3	/* save tsb8k reg in %g3 */
2768	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2769	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2770	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
2771	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
2772	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2773       /* g1 = first TSB pointer, g3 = second TSB pointer */
2774        srlx    %g2, TAG_VALO_SHIFT, %g7
2775        PROBE_2ND_ITSB(%g3, %g7, isynth)
2776	ba,pn	%xcc, sfmmu_tsb_miss_tt
2777	  nop
2778
2779#endif /* UTSB_PHYS */
2780#endif /* sun4v */
2781
2782#if defined(sun4u) && defined(UTSB_PHYS)
2783
2784        /*
2785	 * We come here for ism predict DTLB_MISS case or if
2786	 * if probe in first TSB failed.
2787         */
2788
2789        .align 64
2790        ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
2791
2792	/*
2793         * g1 = tsb8k pointer register
2794         * g2 = tag access register
2795         * g4 - %g6 = scratch registers
2796         * g7 = TSB tag to match
2797	 */
2798
2799	/*
2800	 * ISM non-predict probe order
2801         * probe 1ST_TSB (8K index)
2802         * probe 2ND_TSB (4M index)
2803         * probe 4TH_TSB (4M index)
2804         * probe 3RD_TSB (8K index)
2805	 *
2806	 * We already probed first TSB in DTLB_MISS handler.
2807	 */
2808
2809        /*
2810         * Private 2ND TSB 4M-256 pages
2811         */
2812	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2813	brlz,pt %g3, 1f
2814	  nop
2815        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2816        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2817
2818	/*
2819	 * Shared Context 4TH TSB 4M-256 pages
2820	 */
28211:
2822	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2823	brlz,pt %g6, 2f
2824	  nop
2825        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2826        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
2827
2828        /*
2829         * Shared Context 3RD TSB 8K-512K pages
2830         */
28312:
2832	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2833	brlz,pt %g6, sfmmu_tsb_miss_tt
2834	  nop
2835        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2836        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
2837	ba,pn	%xcc, sfmmu_tsb_miss_tt
2838	  nop
2839
2840	.align 64
2841        ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
2842
2843	/*
2844         * g1 = tsb8k pointer register
2845         * g2 = tag access register
2846         * g4 - g6 = scratch registers
2847         * g7 = TSB tag to match
2848	 */
2849
2850	/*
2851	 * ISM predict probe order
2852	 * probe 4TH_TSB (4M index)
2853	 * probe 2ND_TSB (4M index)
2854	 * probe 1ST_TSB (8K index)
2855	 * probe 3RD_TSB (8K index)
2856
2857	/*
2858	 * Shared Context 4TH TSB 4M-256 pages
2859	 */
2860	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2861	brlz,pt %g6, 4f
2862	  nop
2863        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2864        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
2865
2866        /*
2867         * Private 2ND TSB 4M-256 pages
2868         */
28694:
2870	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2871	brlz,pt %g3, 5f
2872	  nop
2873        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2874        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
2875
28765:
2877        PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
2878
2879        /*
2880         * Shared Context 3RD TSB 8K-512K pages
2881         */
2882	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2883	brlz,pt %g6, 6f
2884	  nop
2885        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2886        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
28876:
2888	ba,pn	%xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
2889	  nop
2890
2891#else /* sun4u && UTSB_PHYS */
2892
2893       .align 64
2894        ALTENTRY(sfmmu_udtlb_slowpath)
2895
2896	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
2897	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
2898	  mov	%g1, %g3
2899
2900udtlb_miss_probefirst:
2901	/*
2902	 * g1 = 8K TSB pointer register
2903	 * g2 = tag access register
2904	 * g3 = (potentially) second TSB entry ptr
2905	 * g6 = ism pred.
2906	 * g7 = vpg_4m
2907	 */
2908#ifdef sun4v
2909	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2910	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2911
2912	/*
2913	 * Here:
2914	 *   g1 = first TSB pointer
2915	 *   g2 = tag access reg
2916	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2917	 */
2918	brgz,pn	%g6, sfmmu_tsb_miss_tt
2919	  nop
2920#else /* sun4v */
2921	mov	%g1, %g4
2922	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2923	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2924
2925	/*
2926	 * Here:
2927	 *   g1 = first TSB pointer
2928	 *   g2 = tag access reg
2929	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2930	 */
2931	brgz,pn	%g6, sfmmu_tsb_miss_tt
2932	  nop
2933	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
2934	/* fall through in 8K->4M probe order */
2935#endif /* sun4v */
2936
2937udtlb_miss_probesecond:
2938	/*
2939	 * Look in the second TSB for the TTE
2940	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2941	 * g2 = tag access reg
2942	 * g3 = 8K TSB pointer register
2943	 * g6 = ism pred.
2944	 * g7 = vpg_4m
2945	 */
2946#ifdef sun4v
2947	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
2948	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2949	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
2950#else /* sun4v */
2951	mov	%g3, %g7
2952	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
2953	/* %g2 clobbered, %g3 =second tsbe ptr */
2954	mov	MMU_TAG_ACCESS, %g2
2955	ldxa	[%g2]ASI_DMMU, %g2
2956#endif /* sun4v */
2957
2958	srlx	%g2, TAG_VALO_SHIFT, %g7
2959	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2960	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
2961	brgz,pn	%g6, udtlb_miss_probefirst
2962	  nop
2963
2964	/* fall through to sfmmu_tsb_miss_tt */
2965#endif /* sun4u && UTSB_PHYS */
2966
2967
2968	ALTENTRY(sfmmu_tsb_miss_tt)
2969	TT_TRACE(trace_tsbmiss)
2970	/*
2971	 * We get here if there is a TSB miss OR a write protect trap.
2972	 *
2973	 * g1 = First TSB entry pointer
2974	 * g2 = tag access register
2975	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
2976	 * g4 - g7 = scratch registers
2977	 */
2978
2979	ALTENTRY(sfmmu_tsb_miss)
2980
2981	/*
2982	 * If trapstat is running, we need to shift the %tpc and %tnpc to
2983	 * point to trapstat's TSB miss return code (note that trapstat
2984	 * itself will patch the correct offset to add).
2985	 */
2986	rdpr	%tl, %g7
2987	cmp	%g7, 1
2988	ble,pt	%xcc, 0f
2989	  sethi	%hi(KERNELBASE), %g6
2990	rdpr	%tpc, %g7
2991	or	%g6, %lo(KERNELBASE), %g6
2992	cmp	%g7, %g6
2993	bgeu,pt	%xcc, 0f
2994	/* delay slot safe */
2995
2996	ALTENTRY(tsbmiss_trapstat_patch_point)
2997	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
2998	wrpr	%g7, %tpc
2999	add	%g7, 4, %g7
3000	wrpr	%g7, %tnpc
30010:
3002	CPU_TSBMISS_AREA(%g6, %g7)
3003	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save 1ST tsb pointer */
3004	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save 2ND tsb pointer */
3005
3006	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
3007	brz,a,pn %g3, 1f			/* skip ahead if kernel */
3008	  ldn	[%g6 + TSBMISS_KHATID], %g7
3009	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
3010	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
3011
3012	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
3013
3014	cmp	%g3, INVALID_CONTEXT
3015	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
3016	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
3017
3018#if defined(sun4v) || defined(UTSB_PHYS)
3019        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
3020        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
3021        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
3022#endif /* sun4v || UTSB_PHYS */
3023
3024	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
3025	/*
3026	 * The miss wasn't in an ISM segment.
3027	 *
3028	 * %g1 %g3, %g4, %g5, %g7 all clobbered
3029	 * %g2 = (pseudo) tag access
3030	 */
3031
3032	ba,pt	%icc, 2f
3033	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
3034
30351:
3036	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
3037	/*
3038	 * 8K and 64K hash.
3039	 */
30402:
3041
3042	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3043		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_l8K, tsb_checktte,
3044		sfmmu_suspend_tl, tsb_512K)
3045	/* NOT REACHED */
3046
3047tsb_512K:
3048	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3049	brz,pn	%g5, 3f
3050	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3051	and	%g4, HAT_512K_FLAG, %g5
3052
3053	/*
3054	 * Note that there is a small window here where we may have
3055	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
3056	 * flag yet, so we will skip searching the 512k hash list.
3057	 * In this case we will end up in pagefault which will find
3058	 * the mapping and return.  So, in this instance we will end up
3059	 * spending a bit more time resolving this TSB miss, but it can
3060	 * only happen once per process and even then, the chances of that
3061	 * are very small, so it's not worth the extra overhead it would
3062	 * take to close this window.
3063	 */
3064	brz,pn	%g5, tsb_4M
3065	  nop
30663:
3067	/*
3068	 * 512K hash
3069	 */
3070
3071	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3072		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_l512K, tsb_checktte,
3073		sfmmu_suspend_tl, tsb_4M)
3074	/* NOT REACHED */
3075
3076tsb_4M:
3077	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3078	brz,pn	%g5, 4f
3079	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3080	and	%g4, HAT_4M_FLAG, %g5
3081	brz,pn	%g5, tsb_32M
3082	  nop
30834:
3084	/*
3085	 * 4M hash
3086	 */
3087
3088	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3089		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_l4M, tsb_checktte,
3090		sfmmu_suspend_tl, tsb_32M)
3091	/* NOT REACHED */
3092
3093tsb_32M:
3094	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3095#ifdef	sun4v
3096        brz,pn	%g5, 6f
3097#else
3098	brz,pn  %g5, tsb_pagefault
3099#endif
3100	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3101	and	%g4, HAT_32M_FLAG, %g5
3102	brz,pn	%g5, tsb_256M
3103	  nop
31045:
3105	/*
3106	 * 32M hash
3107	 */
3108
3109	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3110		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_l32M, tsb_checktte,
3111		sfmmu_suspend_tl, tsb_256M)
3112	/* NOT REACHED */
3113
3114#if defined(sun4u) && !defined(UTSB_PHYS)
3115#define tsb_shme        tsb_pagefault
3116#endif
3117tsb_256M:
3118	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3119	and	%g4, HAT_256M_FLAG, %g5
3120	brz,pn	%g5, tsb_shme
3121	  nop
31226:
3123	/*
3124	 * 256M hash
3125	 */
3126
3127	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3128	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_l256M, tsb_checktte,
3129	    sfmmu_suspend_tl, tsb_shme)
3130	/* NOT REACHED */
3131
3132tsb_checktte:
3133	/*
3134	 * g1 = hblk_misc
3135	 * g2 = tagacc
3136	 * g3 = tte
3137	 * g4 = tte pa
3138	 * g6 = tsbmiss area
3139	 * g7 = hatid
3140	 */
3141	brlz,a,pt %g3, tsb_validtte
3142	  rdpr	%tt, %g7
3143
3144#if defined(sun4u) && !defined(UTSB_PHYS)
3145#undef tsb_shme
3146	ba      tsb_pagefault
3147	  nop
3148#else /* sun4u && !UTSB_PHYS */
3149
3150tsb_shme:
3151	/*
3152	 * g2 = tagacc
3153	 * g6 = tsbmiss area
3154	 */
3155	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3156	brz,pn	%g5, tsb_pagefault
3157	  nop
3158	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
3159	brz,pn	%g7, tsb_pagefault
3160	  nop
3161
3162	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3163		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_shme_l8K, tsb_shme_checktte,
3164		sfmmu_suspend_tl, tsb_shme_512K)
3165	/* NOT REACHED */
3166
3167tsb_shme_512K:
3168	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3169	and	%g4, HAT_512K_FLAG, %g5
3170	brz,pn	%g5, tsb_shme_4M
3171	  nop
3172
3173	/*
3174	 * 512K hash
3175	 */
3176
3177	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3178		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_shme_l512K, tsb_shme_checktte,
3179		sfmmu_suspend_tl, tsb_shme_4M)
3180	/* NOT REACHED */
3181
3182tsb_shme_4M:
3183	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3184	and	%g4, HAT_4M_FLAG, %g5
3185	brz,pn	%g5, tsb_shme_32M
3186	  nop
31874:
3188	/*
3189	 * 4M hash
3190	 */
3191	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3192		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_shme_l4M, tsb_shme_checktte,
3193		sfmmu_suspend_tl, tsb_shme_32M)
3194	/* NOT REACHED */
3195
3196tsb_shme_32M:
3197	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3198	and	%g4, HAT_32M_FLAG, %g5
3199	brz,pn	%g5, tsb_shme_256M
3200	  nop
3201
3202	/*
3203	 * 32M hash
3204	 */
3205
3206	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3207		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_shme_l32M, tsb_shme_checktte,
3208		sfmmu_suspend_tl, tsb_shme_256M)
3209	/* NOT REACHED */
3210
3211tsb_shme_256M:
3212	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3213	and	%g4, HAT_256M_FLAG, %g5
3214	brz,pn	%g5, tsb_pagefault
3215	  nop
3216
3217	/*
3218	 * 256M hash
3219	 */
3220
3221	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3222	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_shme_l256M, tsb_shme_checktte,
3223	    sfmmu_suspend_tl, tsb_pagefault)
3224	/* NOT REACHED */
3225
3226tsb_shme_checktte:
3227
3228	brgez,pn %g3, tsb_pagefault
3229	  rdpr	%tt, %g7
3230	/*
3231	 * g1 = ctx1 flag
3232	 * g3 = tte
3233	 * g4 = tte pa
3234	 * g6 = tsbmiss area
3235	 * g7 = tt
3236	 */
3237
3238	brz,pt  %g1, tsb_validtte
3239	  nop
3240	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
3241	  or	%g1, HAT_CHKCTX1_FLAG, %g1
3242	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
3243
3244	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
3245#endif /* sun4u && !UTSB_PHYS */
3246
3247tsb_validtte:
3248	/*
3249	 * g3 = tte
3250	 * g4 = tte pa
3251	 * g6 = tsbmiss area
3252	 * g7 = tt
3253	 */
3254
3255	/*
3256	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
3257	 */
3258	cmp	%g7, FAST_PROT_TT
3259	bne,pt	%icc, 4f
3260	  nop
3261
3262	TTE_SET_REFMOD_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_refmod,
3263	    tsb_protfault)
3264
3265	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3266#ifdef sun4v
3267	MMU_FAULT_STATUS_AREA(%g7)
3268	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* load fault addr for later */
3269#else /* sun4v */
3270	mov     MMU_TAG_ACCESS, %g5
3271	ldxa    [%g5]ASI_DMMU, %g5
3272#endif /* sun4v */
3273	ba,pt	%xcc, tsb_update_tl1
3274	  nop
32754:
3276	/*
3277	 * ITLB translation was found but execute permission is
3278	 * disabled. If we have software execute permission (soft exec
3279	 * bit is set), then enable hardware execute permission.
3280	 * Otherwise continue with a protection violation.
3281	 */
3282	cmp     %g7, T_INSTR_MMU_MISS
3283	be,pn	%icc, 5f
3284	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
3285	cmp     %g7, FAST_IMMU_MISS_TT
3286	bne,pt %icc, 3f
3287	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
32885:
3289	bnz,pn %icc, 3f
3290	  TTE_CHK_SOFTEXEC_ML(%g3)		/* check soft execute */
3291	bz,pn %icc, tsb_protfault
3292	  nop
3293	TTE_SET_EXEC_ML(%g3, %g4, %g7, tsb_lset_exec)
32943:
3295	/*
3296	 * Set reference bit if not already set
3297	 */
3298	TTE_SET_REF_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_ref)
3299
3300	/*
3301	 * Now, load into TSB/TLB.  At this point:
3302	 * g3 = tte
3303	 * g4 = patte
3304	 * g6 = tsbmiss area
3305	 */
3306	rdpr	%tt, %g7
3307#ifdef sun4v
3308	MMU_FAULT_STATUS_AREA(%g2)
3309	cmp	%g7, T_INSTR_MMU_MISS
3310	be,a,pt	%icc, 9f
3311	  nop
3312	cmp	%g7, FAST_IMMU_MISS_TT
3313	be,a,pt	%icc, 9f
3314	  nop
3315	add	%g2, MMFSA_D_, %g2
33169:
3317	ldx	[%g2 + MMFSA_CTX_], %g7
3318	sllx	%g7, TTARGET_CTX_SHIFT, %g7
3319	ldx	[%g2 + MMFSA_ADDR_], %g2
3320	mov	%g2, %g5		! load the fault addr for later use
3321	srlx	%g2, TTARGET_VA_SHIFT, %g2
3322	or	%g2, %g7, %g2
3323#else /* sun4v */
3324	mov     MMU_TAG_ACCESS, %g5
3325	cmp     %g7, FAST_IMMU_MISS_TT
3326	be,a,pt %icc, 9f
3327	   ldxa  [%g0]ASI_IMMU, %g2
3328	ldxa    [%g0]ASI_DMMU, %g2
3329	ba,pt   %icc, tsb_update_tl1
3330	   ldxa  [%g5]ASI_DMMU, %g5
33319:
3332	ldxa    [%g5]ASI_IMMU, %g5
3333#endif /* sun4v */
3334
3335tsb_update_tl1:
3336	TTE_CLR_SOFTEXEC_ML(%g3)
3337	srlx	%g2, TTARGET_CTX_SHIFT, %g7
3338	brz,pn	%g7, tsb_kernel
3339#ifdef sun4v
3340	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
3341#else  /* sun4v */
3342	  srlx	%g3, TTE_SZ_SHFT, %g7
3343#endif /* sun4v */
3344
3345tsb_user:
3346#ifdef sun4v
3347	cmp	%g7, TTE4M
3348	bge,pn	%icc, tsb_user4m
3349	  nop
3350#else /* sun4v */
3351	cmp	%g7, TTESZ_VALID | TTE4M
3352	be,pn	%icc, tsb_user4m
3353	  srlx	%g3, TTE_SZ2_SHFT, %g7
3354	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
3355#ifdef ITLB_32M_256M_SUPPORT
3356	bnz,pn	%icc, tsb_user4m
3357	  nop
3358#else /* ITLB_32M_256M_SUPPORT */
3359	bnz,a,pn %icc, tsb_user_pn_synth
3360	 nop
3361#endif /* ITLB_32M_256M_SUPPORT */
3362#endif /* sun4v */
3363
3364tsb_user8k:
3365#if defined(sun4v) || defined(UTSB_PHYS)
3366	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3367	and	%g7, HAT_CHKCTX1_FLAG, %g1
3368	brz,a,pn %g1, 1f
3369	  ldn	[%g6 + TSBMISS_TSBPTR], %g1		! g1 = 1ST TSB ptr
3370	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
3371	brlz,a,pn %g1, ptl1_panic			! if no shared 3RD tsb
3372	  mov PTL1_NO_SCDTSB8K, %g1			! panic
3373        GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
33741:
3375#else /* defined(sun4v) || defined(UTSB_PHYS) */
3376	ldn   [%g6 + TSBMISS_TSBPTR], %g1             ! g1 = 1ST TSB ptr
3377#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3378
3379#ifndef UTSB_PHYS
3380	mov	ASI_N, %g7	! user TSBs accessed by VA
3381	mov	%g7, %asi
3382#endif /* !UTSB_PHYS */
3383
3384	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l3)
3385
3386	rdpr    %tt, %g5
3387#ifdef sun4v
3388	cmp	%g5, T_INSTR_MMU_MISS
3389	be,a,pn	%xcc, 9f
3390	  mov	%g3, %g5
3391#endif /* sun4v */
3392	cmp	%g5, FAST_IMMU_MISS_TT
3393	be,pn	%xcc, 9f
3394	  mov	%g3, %g5
3395
3396	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3397	! trapstat wants TTE in %g5
3398	retry
33999:
3400	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3401	! trapstat wants TTE in %g5
3402	retry
3403
3404tsb_user4m:
3405#if defined(sun4v) || defined(UTSB_PHYS)
3406	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3407	and	%g7, HAT_CHKCTX1_FLAG, %g1
3408	brz,a,pn %g1, 4f
3409	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = 2ND TSB ptr
3410	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
3411	brlz,a,pn %g1, 5f				! if no shared 4TH TSB
3412	  nop
3413        GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3414
3415#else /* defined(sun4v) || defined(UTSB_PHYS) */
3416	ldn   [%g6 + TSBMISS_TSBPTR4M], %g1             ! g1 = 2ND TSB ptr
3417#endif /* defined(sun4v) || defined(UTSB_PHYS) */
34184:
3419	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
3420	  nop
3421
3422#ifndef UTSB_PHYS
3423	mov	ASI_N, %g7	! user TSBs accessed by VA
3424	mov	%g7, %asi
3425#endif /* UTSB_PHYS */
3426
3427        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l4)
3428
34295:
3430	rdpr    %tt, %g5
3431#ifdef sun4v
3432        cmp     %g5, T_INSTR_MMU_MISS
3433        be,a,pn %xcc, 9f
3434          mov   %g3, %g5
3435#endif /* sun4v */
3436        cmp     %g5, FAST_IMMU_MISS_TT
3437        be,pn   %xcc, 9f
3438        mov     %g3, %g5
3439
3440        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3441        ! trapstat wants TTE in %g5
3442        retry
34439:
3444        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3445        ! trapstat wants TTE in %g5
3446        retry
3447
3448#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3449	/*
3450	 * Panther ITLB synthesis.
3451	 * The Panther 32M and 256M ITLB code simulates these two large page
3452	 * sizes with 4M pages, to provide support for programs, for example
3453	 * Java, that may copy instructions into a 32M or 256M data page and
3454	 * then execute them. The code below generates the 4M pfn bits and
3455	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3456	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3457	 * are ignored by the hardware.
3458	 *
3459	 * Now, load into TSB/TLB.  At this point:
3460	 * g2 = tagtarget
3461	 * g3 = tte
3462	 * g4 = patte
3463	 * g5 = tt
3464	 * g6 = tsbmiss area
3465	 */
3466tsb_user_pn_synth:
3467	rdpr %tt, %g5
3468	cmp    %g5, FAST_IMMU_MISS_TT
3469	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3470	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3471	bz,pn %icc, 4b				/* if not, been here before */
3472	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
3473	brlz,a,pn %g1, 5f			/* no 2nd tsb */
3474	  mov	%g3, %g5
3475
3476	mov	MMU_TAG_ACCESS, %g7
3477	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3478	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3479
3480	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3481	mov	%g7, %asi
3482	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l5) /* update TSB */
34835:
3484        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3485        retry
3486
3487tsb_user_itlb_synth:
3488	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 =  2ND TSB */
3489
3490	mov	MMU_TAG_ACCESS, %g7
3491	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3492	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
3493	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3494	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3495
3496	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3497	mov	%g7, %asi
3498	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l6) /* update TSB */
34997:
3500	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3501        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3502        retry
3503#endif /* sun4v && ITLB_32M_256M_SUPPORT */
3504
3505tsb_kernel:
3506	rdpr	%tt, %g5
3507#ifdef sun4v
3508	cmp	%g7, TTE4M
3509	bge,pn	%icc, 5f
3510#else
3511	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3512	be,pn	%icc, 5f
3513#endif /* sun4v */
3514	  nop
3515	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8K TSB ptr
3516	ba,pt	%xcc, 6f
3517	  nop
35185:
3519	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4M TSB ptr
3520	brlz,pn	%g1, 3f		/* skip programming if 4M TSB ptr is -1 */
3521	  nop
35226:
3523#ifndef sun4v
3524tsb_kernel_patch_asi:
3525	or	%g0, RUNTIME_PATCH, %g6
3526	mov	%g6, %asi	! XXX avoid writing to %asi !!
3527#endif
3528	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l7)
35293:
3530#ifdef sun4v
3531	cmp	%g5, T_INSTR_MMU_MISS
3532	be,a,pn	%icc, 1f
3533	  mov	%g3, %g5			! trapstat wants TTE in %g5
3534#endif /* sun4v */
3535	cmp	%g5, FAST_IMMU_MISS_TT
3536	be,pn	%icc, 1f
3537	  mov	%g3, %g5			! trapstat wants TTE in %g5
3538	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3539	! trapstat wants TTE in %g5
3540	retry
35411:
3542	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3543	! trapstat wants TTE in %g5
3544	retry
3545
3546tsb_ism:
3547	/*
3548	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3549	 * page size down to smallest.
3550	 *
3551	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3552	 *	register
3553	 * g3 = ismmap->ism_seg
3554	 * g4 = physical address of ismmap->ism_sfmmu
3555	 * g6 = tsbmiss area
3556	 */
3557	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3558	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3559	  mov	PTL1_BAD_ISM, %g1
3560						/* g5 = pa of imap_vb_shift */
3561	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3562	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3563	srlx	%g3, %g4, %g3			/* clr size field */
3564	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
3565	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
3566	and     %g2, %g1, %g4                   /* g4 = ctx number */
3567	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
3568	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
3569	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
3570	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3571	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
3572#if defined(sun4v) || defined(UTSB_PHYS)
3573	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
3574	brz,pt %g5, tsb_chk4M_ism
3575	  nop
3576	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
3577	or      %g5, HAT_CHKCTX1_FLAG, %g5
3578	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
3579	rdpr    %tt, %g5
3580	SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
3581#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3582
3583	/*
3584	 * ISM pages are always locked down.
3585	 * If we can't find the tte then pagefault
3586	 * and let the spt segment driver resolve it.
3587	 *
3588	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
3589	 * g4 = imap_hatflags
3590	 * g6 = tsb miss area
3591	 * g7 = ISM hatid
3592	 */
3593
3594tsb_chk4M_ism:
3595	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3596	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3597	  nop
3598
3599tsb_ism_32M:
3600	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3601	brz,pn	%g5, tsb_ism_256M
3602	  nop
3603
3604	/*
3605	 * 32M hash.
3606	 */
3607
3608	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT32M,
3609	    TTE32M, %g5, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3610	    tsb_ism_4M)
3611	/* NOT REACHED */
3612
3613tsb_ism_32M_found:
3614	brlz,a,pt %g3, tsb_validtte
3615	  rdpr	%tt, %g7
3616	ba,pt	%xcc, tsb_ism_4M
3617	  nop
3618
3619tsb_ism_256M:
3620	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3621	brz,a,pn %g5, ptl1_panic
3622	  mov	PTL1_BAD_ISM, %g1
3623
3624	/*
3625	 * 256M hash.
3626	 */
3627	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT256M,
3628	    TTE256M, %g5, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3629	    tsb_ism_4M)
3630
3631tsb_ism_256M_found:
3632	brlz,a,pt %g3, tsb_validtte
3633	  rdpr	%tt, %g7
3634
3635tsb_ism_4M:
3636	/*
3637	 * 4M hash.
3638	 */
3639	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT4M,
3640	    TTE4M, %g5, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3641	    tsb_ism_8K)
3642	/* NOT REACHED */
3643
3644tsb_ism_4M_found:
3645	brlz,a,pt %g3, tsb_validtte
3646	  rdpr	%tt, %g7
3647
3648tsb_ism_8K:
3649	/*
3650	 * 8K and 64K hash.
3651	 */
3652
3653	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT64K,
3654	    TTE64K, %g5, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3655	    tsb_pagefault)
3656	/* NOT REACHED */
3657
3658tsb_ism_8K_found:
3659	brlz,a,pt %g3, tsb_validtte
3660	  rdpr	%tt, %g7
3661
3662tsb_pagefault:
3663	rdpr	%tt, %g7
3664	cmp	%g7, FAST_PROT_TT
3665	be,a,pn	%icc, tsb_protfault
3666	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3667
3668tsb_protfault:
3669	/*
3670	 * we get here if we couldn't find a valid tte in the hash.
3671	 *
3672	 * If user and we are at tl>1 we go to window handling code.
3673	 *
3674	 * If kernel and the fault is on the same page as our stack
3675	 * pointer, then we know the stack is bad and the trap handler
3676	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3677	 *
3678	 * If this is a kernel trap and tl>1, panic.
3679	 *
3680	 * Otherwise we call pagefault.
3681	 */
3682	cmp	%g7, FAST_IMMU_MISS_TT
3683#ifdef sun4v
3684	MMU_FAULT_STATUS_AREA(%g4)
3685	ldx	[%g4 + MMFSA_I_CTX], %g5
3686	ldx	[%g4 + MMFSA_D_CTX], %g4
3687	move	%icc, %g5, %g4
3688	cmp	%g7, T_INSTR_MMU_MISS
3689	move	%icc, %g5, %g4
3690#else
3691	mov	MMU_TAG_ACCESS, %g4
3692	ldxa	[%g4]ASI_DMMU, %g2
3693	ldxa	[%g4]ASI_IMMU, %g5
3694	move	%icc, %g5, %g2
3695	cmp	%g7, T_INSTR_MMU_MISS
3696	move	%icc, %g5, %g2
3697	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3698#endif /* sun4v */
3699	brnz,pn	%g4, 3f				/* skip if not kernel */
3700	  rdpr	%tl, %g5
3701
3702	add	%sp, STACK_BIAS, %g3
3703	srlx	%g3, MMU_PAGESHIFT, %g3
3704	srlx	%g2, MMU_PAGESHIFT, %g4
3705	cmp	%g3, %g4
3706	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
3707	  mov	PTL1_BAD_STACK, %g1
3708
3709	cmp	%g5, 1
3710	ble,pt	%icc, 2f
3711	  nop
3712	TSTAT_CHECK_TL1(2f, %g1, %g2)
3713	rdpr	%tt, %g2
3714	cmp	%g2, FAST_PROT_TT
3715	mov	PTL1_BAD_KPROT_FAULT, %g1
3716	movne	%icc, PTL1_BAD_KMISS, %g1
3717	ba,pt	%icc, ptl1_panic
3718	  nop
3719
37202:
3721	/*
3722	 * We are taking a pagefault in the kernel on a kernel address.  If
3723	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3724	 * want to call sfmmu_pagefault -- we will instead note that a fault
3725	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3726	 * (instead of a "retry").  This will step over the faulting
3727	 * instruction.
3728	 */
3729	CPU_INDEX(%g1, %g2)
3730	set	cpu_core, %g2
3731	sllx	%g1, CPU_CORE_SHIFT, %g1
3732	add	%g1, %g2, %g1
3733	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3734	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3735	bz	sfmmu_pagefault
3736	or	%g2, CPU_DTRACE_BADADDR, %g2
3737	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3738	GET_MMU_D_ADDR(%g3, %g4)
3739	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3740	done
3741
37423:
3743	cmp	%g5, 1
3744	ble,pt	%icc, 4f
3745	  nop
3746	TSTAT_CHECK_TL1(4f, %g1, %g2)
3747	ba,pt	%icc, sfmmu_window_trap
3748	  nop
3749
37504:
3751	/*
3752	 * We are taking a pagefault on a non-kernel address.  If we are in
3753	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3754	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3755	 */
3756	CPU_INDEX(%g1, %g2)
3757	set	cpu_core, %g2
3758	sllx	%g1, CPU_CORE_SHIFT, %g1
3759	add	%g1, %g2, %g1
3760	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3761	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3762	bz	sfmmu_mmu_trap
3763	or	%g2, CPU_DTRACE_BADADDR, %g2
3764	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3765	GET_MMU_D_ADDR(%g3, %g4)
3766	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3767
3768	/*
3769	 * Be sure that we're actually taking this miss from the kernel --
3770	 * otherwise we have managed to return to user-level with
3771	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3772	 */
3773	rdpr	%tstate, %g2
3774	btst	TSTATE_PRIV, %g2
3775	bz,a	ptl1_panic
3776	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3777	done
3778
3779	ALTENTRY(tsb_tl0_noctxt)
3780	/*
3781	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3782	 * if it is, indicated that we have faulted and issue a done.
3783	 */
3784	CPU_INDEX(%g5, %g6)
3785	set	cpu_core, %g6
3786	sllx	%g5, CPU_CORE_SHIFT, %g5
3787	add	%g5, %g6, %g5
3788	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3789	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3790	bz	1f
3791	or	%g6, CPU_DTRACE_BADADDR, %g6
3792	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3793	GET_MMU_D_ADDR(%g3, %g4)
3794	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3795
3796	/*
3797	 * Be sure that we're actually taking this miss from the kernel --
3798	 * otherwise we have managed to return to user-level with
3799	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3800	 */
3801	rdpr	%tstate, %g5
3802	btst	TSTATE_PRIV, %g5
3803	bz,a	ptl1_panic
3804	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3805	TSTAT_CHECK_TL1(2f, %g1, %g2);
38062:
3807	done
3808
38091:
3810	rdpr	%tt, %g5
3811	cmp	%g5, FAST_IMMU_MISS_TT
3812#ifdef sun4v
3813	MMU_FAULT_STATUS_AREA(%g2)
3814	be,a,pt	%icc, 2f
3815	  ldx	[%g2 + MMFSA_I_CTX], %g3
3816	cmp	%g5, T_INSTR_MMU_MISS
3817	be,a,pt	%icc, 2f
3818	  ldx	[%g2 + MMFSA_I_CTX], %g3
3819	ldx	[%g2 + MMFSA_D_CTX], %g3
38202:
3821#else
3822	mov	MMU_TAG_ACCESS, %g2
3823	be,a,pt	%icc, 2f
3824	  ldxa	[%g2]ASI_IMMU, %g3
3825	ldxa	[%g2]ASI_DMMU, %g3
38262:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3827#endif /* sun4v */
3828	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3829	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3830	rdpr	%tl, %g5
3831	cmp	%g5, 1
3832	ble,pt	%icc, sfmmu_mmu_trap
3833	  nop
3834	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3835	ba,pt	%icc, sfmmu_window_trap
3836	  nop
3837	SET_SIZE(sfmmu_tsb_miss)
3838#endif  /* lint */
3839
3840#if defined (lint)
3841/*
3842 * This routine will look for a user or kernel vaddr in the hash
3843 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
3844 * grab any locks.  It should only be used by other sfmmu routines.
3845 */
3846/* ARGSUSED */
3847pfn_t
3848sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3849{
3850	return(0);
3851}
3852
3853/* ARGSUSED */
3854pfn_t
3855sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
3856{
3857	return(0);
3858}
3859
3860#else /* lint */
3861
3862	ENTRY_NP(sfmmu_vatopfn)
3863 	/*
3864 	 * disable interrupts
3865 	 */
3866 	rdpr	%pstate, %o3
3867#ifdef DEBUG
3868	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3869#endif
3870	/*
3871	 * disable interrupts to protect the TSBMISS area
3872	 */
3873	andn    %o3, PSTATE_IE, %o5
3874	wrpr    %o5, 0, %pstate
3875
3876	/*
3877	 * o0 = vaddr
3878	 * o1 = sfmmup
3879	 * o2 = ttep
3880	 */
3881	CPU_TSBMISS_AREA(%g1, %o5)
3882	ldn	[%g1 + TSBMISS_KHATID], %o4
3883	cmp	%o4, %o1
3884	bne,pn	%ncc, vatopfn_nokernel
3885	  mov	TTE64K, %g5			/* g5 = rehash # */
3886	mov %g1,%o5				/* o5 = tsbmiss_area */
3887	/*
3888	 * o0 = vaddr
3889	 * o1 & o4 = hatid
3890	 * o2 = ttep
3891	 * o5 = tsbmiss area
3892	 */
3893	mov	HBLK_RANGE_SHIFT, %g6
38941:
3895
3896	/*
3897	 * o0 = vaddr
3898	 * o1 = sfmmup
3899	 * o2 = ttep
3900	 * o3 = old %pstate
3901	 * o4 = hatid
3902	 * o5 = tsbmiss
3903	 * g5 = rehash #
3904	 * g6 = hmeshift
3905	 *
3906	 * The first arg to GET_TTE is actually tagaccess register
3907	 * not just vaddr. Since this call is for kernel we need to clear
3908	 * any lower vaddr bits that would be interpreted as ctx bits.
3909	 */
3910	set     TAGACC_CTX_MASK, %g1
3911	andn    %o0, %g1, %o0
3912	GET_TTE(%o0, %o4, %g1, %g2, %o5, %g4, %g6, %g5, %g3,
3913		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3914
3915kvtop_hblk_found:
3916	/*
3917	 * o0 = vaddr
3918	 * o1 = sfmmup
3919	 * o2 = ttep
3920	 * g1 = tte
3921	 * g2 = tte pa
3922	 * g3 = scratch
3923	 * o2 = tsbmiss area
3924	 * o1 = hat id
3925	 */
3926	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
3927	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3928	stx %g1,[%o2]				/* put tte into *ttep */
3929	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
3930	/*
3931	 * o0 = vaddr
3932	 * o1 = sfmmup
3933	 * o2 = ttep
3934	 * g1 = pfn
3935	 */
3936	ba,pt	%xcc, 6f
3937	  mov	%g1, %o0
3938
3939kvtop_nohblk:
3940	/*
3941	 * we get here if we couldn't find valid hblk in hash.  We rehash
3942	 * if neccesary.
3943	 */
3944	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
3945#ifdef sun4v
3946	cmp	%g5, MAX_HASHCNT
3947#else
3948	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
3949#endif /* sun4v */
3950	be,a,pn	%icc, 6f
3951	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3952	mov	%o1, %o4			/* restore hatid */
3953#ifdef sun4v
3954        add	%g5, 2, %g5
3955	cmp	%g5, 3
3956	move	%icc, MMU_PAGESHIFT4M, %g6
3957	ba,pt	%icc, 1b
3958	movne	%icc, MMU_PAGESHIFT256M, %g6
3959#else
3960        inc	%g5
3961	cmp	%g5, 2
3962	move	%icc, MMU_PAGESHIFT512K, %g6
3963	ba,pt	%icc, 1b
3964	movne	%icc, MMU_PAGESHIFT4M, %g6
3965#endif /* sun4v */
39666:
3967	retl
3968 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3969
3970tsb_suspend:
3971	/*
3972	 * o0 = vaddr
3973	 * o1 = sfmmup
3974	 * o2 = ttep
3975	 * g1 = tte
3976	 * g2 = tte pa
3977	 * g3 = tte va
3978	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
3979	 */
3980	stx %g1,[%o2]				/* put tte into *ttep */
3981	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
3982	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
3983	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
39848:
3985	retl
3986	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
3987
3988vatopfn_nokernel:
3989	/*
3990	 * This routine does NOT support user addresses
3991	 * There is a routine in C that supports this.
3992	 * The only reason why we don't have the C routine
3993	 * support kernel addresses as well is because
3994	 * we do va_to_pa while holding the hashlock.
3995	 */
3996 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3997	save	%sp, -SA(MINFRAME), %sp
3998	sethi	%hi(sfmmu_panic3), %o0
3999	call	panic
4000	 or	%o0, %lo(sfmmu_panic3), %o0
4001
4002	SET_SIZE(sfmmu_vatopfn)
4003
4004	/*
4005	 * %o0 = vaddr
4006	 * %o1 = hashno (aka szc)
4007	 *
4008	 *
4009	 * This routine is similar to sfmmu_vatopfn() but will only look for
4010	 * a kernel vaddr in the hash structure for the specified rehash value.
4011	 * It's just an optimization for the case when pagesize for a given
4012	 * va range is already known (e.g. large page heap) and we don't want
4013	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
4014	 *
4015	 * Returns valid pfn or PFN_INVALID if
4016	 * tte for specified rehash # is not found, invalid or suspended.
4017	 */
4018	ENTRY_NP(sfmmu_kvaszc2pfn)
4019 	/*
4020 	 * disable interrupts
4021 	 */
4022 	rdpr	%pstate, %o3
4023#ifdef DEBUG
4024	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
4025#endif
4026	/*
4027	 * disable interrupts to protect the TSBMISS area
4028	 */
4029	andn    %o3, PSTATE_IE, %o5
4030	wrpr    %o5, 0, %pstate
4031
4032	CPU_TSBMISS_AREA(%g1, %o5)
4033	ldn	[%g1 + TSBMISS_KHATID], %o4
4034	sll	%o1, 1, %g6
4035	add	%g6, %o1, %g6
4036	add	%g6, MMU_PAGESHIFT, %g6
4037	/*
4038	 * %o0 = vaddr
4039	 * %o1 = hashno
4040	 * %o3 = old %pstate
4041	 * %o4 = ksfmmup
4042	 * %g1 = tsbmiss area
4043	 * %g6 = hmeshift
4044	 */
4045
4046	/*
4047	 * The first arg to GET_TTE is actually tagaccess register
4048	 * not just vaddr. Since this call is for kernel we need to clear
4049	 * any lower vaddr bits that would be interpreted as ctx bits.
4050	 */
4051	srlx	%o0, MMU_PAGESHIFT, %o0
4052	sllx	%o0, MMU_PAGESHIFT, %o0
4053	GET_TTE(%o0, %o4, %g3, %g4, %g1, %o5, %g6, %o1, %g5,
4054		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
4055		kvaszc2pfn_nohblk)
4056
4057kvaszc2pfn_hblk_found:
4058	/*
4059	 * %g3 = tte
4060	 * %o0 = vaddr
4061	 */
4062	brgez,a,pn %g3, 1f			/* check if tte is invalid */
4063	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4064	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4065	/*
4066	 * g3 = pfn
4067	 */
4068	ba,pt	%xcc, 1f
4069	  mov	%g3, %o0
4070
4071kvaszc2pfn_nohblk:
4072	mov	-1, %o0
4073
40741:
4075	retl
4076 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4077
4078	SET_SIZE(sfmmu_kvaszc2pfn)
4079
4080#endif /* lint */
4081
4082
4083
4084#if !defined(lint)
4085
4086/*
4087 * kpm lock used between trap level tsbmiss handler and kpm C level.
4088 */
4089#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
4090	mov     0xff, tmp1						;\
4091label1:									;\
4092	casa    [kpmlckp]asi, %g0, tmp1					;\
4093	brnz,pn tmp1, label1						;\
4094	mov     0xff, tmp1						;\
4095	membar  #LoadLoad
4096
4097#define KPMLOCK_EXIT(kpmlckp, asi)					\
4098	membar  #LoadStore|#StoreStore					;\
4099	sta     %g0, [kpmlckp]asi
4100
4101/*
4102 * Lookup a memseg for a given pfn and if found, return the physical
4103 * address of the corresponding struct memseg in mseg, otherwise
4104 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
4105 * tsbmp, %asi is assumed to be ASI_MEM.
4106 * This lookup is done by strictly traversing only the physical memseg
4107 * linkage. The more generic approach, to check the virtual linkage
4108 * before using the physical (used e.g. with hmehash buckets), cannot
4109 * be used here. Memory DR operations can run in parallel to this
4110 * lookup w/o any locks and updates of the physical and virtual linkage
4111 * cannot be done atomically wrt. to each other. Because physical
4112 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
4113 * as "physical NULL" pointer.
4114 */
4115#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
4116	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
4117	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
4118	udivx	pfn, mseg, mseg						;\
4119	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
4120	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
4121	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
4122	add	tmp1, mseg, tmp1					;\
4123	ldxa	[tmp1]%asi, mseg					;\
4124	cmp	mseg, MSEG_NULLPTR_PA					;\
4125	be,pn	%xcc, label/**/1		/* if not found */	;\
4126	  nop								;\
4127	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4128	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4129	blu,pn	%xcc, label/**/1					;\
4130	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4131	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4132	bgeu,pn	%xcc, label/**/1					;\
4133	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
4134	mulx	tmp1, PAGE_SIZE, tmp1					;\
4135	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
4136	add	tmp2, tmp1, tmp1			/* pp */	;\
4137	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
4138	cmp	tmp2, pfn						;\
4139	be,pt	%xcc, label/**/_ok			/* found */	;\
4140label/**/1:								;\
4141	/* brute force lookup */					;\
4142	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
4143	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
4144label/**/2:								;\
4145	cmp	mseg, MSEG_NULLPTR_PA					;\
4146	be,pn	%xcc, label/**/_ok		/* if not found */	;\
4147	  nop								;\
4148	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4149	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4150	blu,a,pt %xcc, label/**/2					;\
4151	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4152	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4153	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4154	bgeu,a,pt %xcc, label/**/2					;\
4155	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4156label/**/_ok:
4157
4158	/*
4159	 * kpm tsb miss handler large pages
4160	 * g1 = 8K kpm TSB entry pointer
4161	 * g2 = tag access register
4162	 * g3 = 4M kpm TSB entry pointer
4163	 */
4164	ALTENTRY(sfmmu_kpm_dtsb_miss)
4165	TT_TRACE(trace_tsbmiss)
4166
4167	CPU_INDEX(%g7, %g6)
4168	sethi	%hi(kpmtsbm_area), %g6
4169	sllx	%g7, KPMTSBM_SHIFT, %g7
4170	or	%g6, %lo(kpmtsbm_area), %g6
4171	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4172
4173	/* check enable flag */
4174	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4175	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4176	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4177	  nop
4178
4179	/* VA range check */
4180	ldx	[%g6 + KPMTSBM_VBASE], %g7
4181	cmp	%g2, %g7
4182	blu,pn	%xcc, sfmmu_tsb_miss
4183	  ldx	[%g6 + KPMTSBM_VEND], %g5
4184	cmp	%g2, %g5
4185	bgeu,pn	%xcc, sfmmu_tsb_miss
4186	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
4187
4188	/*
4189	 * check TL tsbmiss handling flag
4190	 * bump tsbmiss counter
4191	 */
4192	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4193#ifdef	DEBUG
4194	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
4195	inc	%g5
4196	brz,pn	%g3, sfmmu_kpm_exception
4197	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4198#else
4199	inc	%g5
4200	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4201#endif
4202	/*
4203	 * At this point:
4204	 *  g1 = 8K kpm TSB pointer (not used)
4205	 *  g2 = tag access register
4206	 *  g3 = clobbered
4207	 *  g6 = per-CPU kpm tsbmiss area
4208	 *  g7 = kpm_vbase
4209	 */
4210
4211	/* vaddr2pfn */
4212	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4213	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4214	srax    %g4, %g3, %g2			/* which alias range (r) */
4215	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4216	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4217
4218	/*
4219	 * Setup %asi
4220	 * mseg_pa = page_numtomemseg_nolock(pfn)
4221	 * if (mseg_pa == NULL) sfmmu_kpm_exception
4222	 * g2=pfn
4223	 */
4224	mov	ASI_MEM, %asi
4225	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
4226	cmp	%g3, MSEG_NULLPTR_PA
4227	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4228	  nop
4229
4230	/*
4231	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
4232	 * g2=pfn g3=mseg_pa
4233	 */
4234	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
4235	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4236	srlx	%g2, %g5, %g4
4237	sllx	%g4, %g5, %g4
4238	sub	%g4, %g7, %g4
4239	srlx	%g4, %g5, %g4
4240
4241	/*
4242	 * Validate inx value
4243	 * g2=pfn g3=mseg_pa g4=inx
4244	 */
4245#ifdef	DEBUG
4246	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4247	cmp	%g4, %g5			/* inx - nkpmpgs */
4248	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4249	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4250#else
4251	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4252#endif
4253	/*
4254	 * kp = &mseg_pa->kpm_pages[inx]
4255	 */
4256	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
4257	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
4258	add	%g5, %g4, %g5			/* kp */
4259
4260	/*
4261	 * KPMP_HASH(kp)
4262	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
4263	 */
4264	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4265	sub	%g7, 1, %g7			/* mask */
4266	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
4267	add	%g5, %g1, %g5			/* y = ksp + x */
4268	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4269
4270	/*
4271	 * Calculate physical kpm_page pointer
4272	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4273	 */
4274	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
4275	add	%g1, %g4, %g1			/* kp_pa */
4276
4277	/*
4278	 * Calculate physical hash lock address
4279	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
4280	 */
4281	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
4282	sllx	%g5, KPMHLK_SHIFT, %g5
4283	add	%g4, %g5, %g3
4284	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
4285
4286	/*
4287	 * Assemble tte
4288	 * g1=kp_pa g2=pfn g3=hlck_pa
4289	 */
4290#ifdef sun4v
4291	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4292	sllx	%g5, 32, %g5
4293	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4294	or	%g4, TTE4M, %g4
4295	or	%g5, %g4, %g5
4296#else
4297	sethi	%hi(TTE_VALID_INT), %g4
4298	mov	TTE4M, %g5
4299	sllx	%g5, TTE_SZ_SHFT_INT, %g5
4300	or	%g5, %g4, %g5			/* upper part */
4301	sllx	%g5, 32, %g5
4302	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4303	or	%g5, %g4, %g5
4304#endif
4305	sllx	%g2, MMU_PAGESHIFT, %g4
4306	or	%g5, %g4, %g5			/* tte */
4307	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4308	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4309
4310	/*
4311	 * tsb dropin
4312	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4313	 */
4314
4315	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4316	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4317
4318	/* use C-handler if there's no go for dropin */
4319	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4320	cmp	%g7, -1
4321	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
4322	  nop
4323
4324#ifdef	DEBUG
4325	/* double check refcnt */
4326	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
4327	brz,pn	%g7, 5f			/* let C-handler deal with this */
4328	  nop
4329#endif
4330
4331#ifndef sun4v
4332	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4333	mov	ASI_N, %g1
4334	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4335	movnz	%icc, ASI_MEM, %g1
4336	mov	%g1, %asi
4337#endif
4338
4339	/*
4340	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4341	 * If we fail to lock the TSB entry then just load the tte into the
4342	 * TLB.
4343	 */
4344	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l1)
4345
4346	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4347	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4348locked_tsb_l1:
4349	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4350
4351	/* KPMLOCK_EXIT(kpmlckp, asi) */
4352	KPMLOCK_EXIT(%g3, ASI_MEM)
4353
4354	/*
4355	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4356	 * point to trapstat's TSB miss return code (note that trapstat
4357	 * itself will patch the correct offset to add).
4358	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4359	 */
4360	rdpr	%tl, %g7
4361	cmp	%g7, 1
4362	ble	%icc, 0f
4363	sethi	%hi(KERNELBASE), %g6
4364	rdpr	%tpc, %g7
4365	or	%g6, %lo(KERNELBASE), %g6
4366	cmp	%g7, %g6
4367	bgeu	%xcc, 0f
4368	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4369	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4370	wrpr	%g7, %tpc
4371	add	%g7, 4, %g7
4372	wrpr	%g7, %tnpc
43730:
4374	retry
43755:
4376	/* g3=hlck_pa */
4377	KPMLOCK_EXIT(%g3, ASI_MEM)
4378	ba,pt	%icc, sfmmu_kpm_exception
4379	  nop
4380	SET_SIZE(sfmmu_kpm_dtsb_miss)
4381
4382	/*
4383	 * kpm tsbmiss handler for smallpages
4384	 * g1 = 8K kpm TSB pointer
4385	 * g2 = tag access register
4386	 * g3 = 4M kpm TSB pointer
4387	 */
4388	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4389	TT_TRACE(trace_tsbmiss)
4390	CPU_INDEX(%g7, %g6)
4391	sethi	%hi(kpmtsbm_area), %g6
4392	sllx	%g7, KPMTSBM_SHIFT, %g7
4393	or	%g6, %lo(kpmtsbm_area), %g6
4394	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4395
4396	/* check enable flag */
4397	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4398	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4399	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4400	  nop
4401
4402	/*
4403	 * VA range check
4404	 * On fail: goto sfmmu_tsb_miss
4405	 */
4406	ldx	[%g6 + KPMTSBM_VBASE], %g7
4407	cmp	%g2, %g7
4408	blu,pn	%xcc, sfmmu_tsb_miss
4409	  ldx	[%g6 + KPMTSBM_VEND], %g5
4410	cmp	%g2, %g5
4411	bgeu,pn	%xcc, sfmmu_tsb_miss
4412	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
4413
4414	/*
4415	 * check TL tsbmiss handling flag
4416	 * bump tsbmiss counter
4417	 */
4418	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4419#ifdef	DEBUG
4420	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
4421	inc	%g5
4422	brz,pn	%g1, sfmmu_kpm_exception
4423	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4424#else
4425	inc	%g5
4426	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4427#endif
4428	/*
4429	 * At this point:
4430	 *  g1 = clobbered
4431	 *  g2 = tag access register
4432	 *  g3 = 4M kpm TSB pointer (not used)
4433	 *  g6 = per-CPU kpm tsbmiss area
4434	 *  g7 = kpm_vbase
4435	 */
4436
4437	/*
4438	 * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
4439	 * which is defined in mach_kpm.h. Any changes in that macro
4440	 * should also be ported back to this assembly code.
4441	 */
4442	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3	/* g3 = kpm_size_shift */
4443	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4444	srax    %g4, %g3, %g7			/* which alias range (r) */
4445	brz,pt	%g7, 2f
4446	  sethi   %hi(vac_colors_mask), %g5
4447	ld	[%g5 + %lo(vac_colors_mask)], %g5
4448
4449	srlx	%g2, MMU_PAGESHIFT, %g1		/* vaddr >> MMU_PAGESHIFT */
4450	and	%g1, %g5, %g1			/* g1 = v */
4451	sllx	%g7, %g3, %g5			/* g5 = r << kpm_size_shift */
4452	cmp	%g7, %g1			/* if (r > v) */
4453	bleu,pn %xcc, 1f
4454	  sub   %g4, %g5, %g4			/* paddr -= r << kpm_size_shift */
4455	sub	%g7, %g1, %g5			/* g5 = r - v */
4456	sllx	%g5, MMU_PAGESHIFT, %g7		/* (r-v) << MMU_PAGESHIFT */
4457	add	%g4, %g7, %g4			/* paddr += (r-v)<<MMU_PAGESHIFT */
4458	ba	2f
4459	  nop
44601:
4461	sllx	%g7, MMU_PAGESHIFT, %g5		/* else */
4462	sub	%g4, %g5, %g4			/* paddr -= r << MMU_PAGESHIFT */
4463
4464	/*
4465	 * paddr2pfn
4466	 *  g1 = vcolor (not used)
4467	 *  g2 = tag access register
4468	 *  g3 = clobbered
4469	 *  g4 = paddr
4470	 *  g5 = clobbered
4471	 *  g6 = per-CPU kpm tsbmiss area
4472	 *  g7 = clobbered
4473	 */
44742:
4475	srlx	%g4, MMU_PAGESHIFT, %g2		/* g2 = pfn */
4476
4477	/*
4478	 * Setup %asi
4479	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4480	 * if (mseg not found) sfmmu_kpm_exception
4481	 * g2=pfn g6=per-CPU kpm tsbmiss area
4482	 * g4 g5 g7 for scratch use.
4483	 */
4484	mov	ASI_MEM, %asi
4485	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4486	cmp	%g3, MSEG_NULLPTR_PA
4487	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4488	  nop
4489
4490	/*
4491	 * inx = pfn - mseg_pa->kpm_pbase
4492	 * g2=pfn  g3=mseg_pa  g6=per-CPU kpm tsbmiss area
4493	 */
4494	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4495	sub	%g2, %g7, %g4
4496
4497#ifdef	DEBUG
4498	/*
4499	 * Validate inx value
4500	 * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
4501	 */
4502	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4503	cmp	%g4, %g5			/* inx - nkpmpgs */
4504	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4505	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4506#else
4507	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4508#endif
4509	/* ksp = &mseg_pa->kpm_spages[inx] */
4510	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4511	add	%g5, %g4, %g5			/* ksp */
4512
4513	/*
4514	 * KPMP_SHASH(kp)
4515	 * g2=pfn g3=mseg_pa g4=inx g5=ksp
4516	 * g6=per-CPU kpm tsbmiss area  g7=kpmp_stable_sz
4517	 */
4518	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4519	sub	%g7, 1, %g7			/* mask */
4520	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
4521	add	%g5, %g1, %g5			/* y = ksp + x */
4522	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4523
4524	/*
4525	 * Calculate physical kpm_spage pointer
4526	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4527	 * g6=per-CPU kpm tsbmiss area
4528	 */
4529	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4530	add	%g1, %g4, %g1			/* ksp_pa */
4531
4532	/*
4533	 * Calculate physical hash lock address.
4534	 * Note: Changes in kpm_shlk_t must be reflected here.
4535	 * g1=ksp_pa g2=pfn g5=hashinx
4536	 * g6=per-CPU kpm tsbmiss area
4537	 */
4538	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4539	sllx	%g5, KPMSHLK_SHIFT, %g5
4540	add	%g4, %g5, %g3			/* hlck_pa */
4541
4542	/*
4543	 * Assemble non-cacheable tte initially
4544	 * g1=ksp_pa g2=pfn g3=hlck_pa
4545	 * g6=per-CPU kpm tsbmiss area
4546	 */
4547	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4548	sllx	%g5, 32, %g5
4549	mov	(TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4550	or	%g5, %g4, %g5
4551	sllx	%g2, MMU_PAGESHIFT, %g4
4552	or	%g5, %g4, %g5			/* tte */
4553	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4554	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4555
4556	/*
4557	 * tsb dropin
4558	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
4559	 * g6=per-CPU kpm tsbmiss area  g7=scratch register
4560	 */
4561
4562	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4563	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4564
4565	/* use C-handler if there's no go for dropin */
4566	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7	/* kp_mapped */
4567	andcc	%g7, KPM_MAPPED_GO, %g0			/* go or no go ? */
4568	bz,pt	%icc, 5f				/* no go */
4569	  nop
4570	and	%g7, KPM_MAPPED_MASK, %g7		/* go */
4571	cmp	%g7, KPM_MAPPEDS			/* cacheable ? */
4572	be,a,pn	%xcc, 3f
4573	  or	%g5, TTE_CV_INT, %g5			/* cacheable */
45743:
4575#ifndef sun4v
4576	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4577	mov	ASI_N, %g1
4578	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4579	movnz	%icc, ASI_MEM, %g1
4580	mov	%g1, %asi
4581#endif
4582
4583	/*
4584	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4585	 * If we fail to lock the TSB entry then just load the tte into the
4586	 * TLB.
4587	 */
4588	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l2)
4589
4590	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4591	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4592locked_tsb_l2:
4593	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4594
4595	/* KPMLOCK_EXIT(kpmlckp, asi) */
4596	KPMLOCK_EXIT(%g3, ASI_MEM)
4597
4598	/*
4599	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4600	 * point to trapstat's TSB miss return code (note that trapstat
4601	 * itself will patch the correct offset to add).
4602	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4603	 */
4604	rdpr	%tl, %g7
4605	cmp	%g7, 1
4606	ble	%icc, 0f
4607	sethi	%hi(KERNELBASE), %g6
4608	rdpr	%tpc, %g7
4609	or	%g6, %lo(KERNELBASE), %g6
4610	cmp	%g7, %g6
4611	bgeu	%xcc, 0f
4612	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4613	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4614	wrpr	%g7, %tpc
4615	add	%g7, 4, %g7
4616	wrpr	%g7, %tnpc
46170:
4618	retry
46195:
4620	/* g3=hlck_pa */
4621	KPMLOCK_EXIT(%g3, ASI_MEM)
4622	ba,pt	%icc, sfmmu_kpm_exception
4623	  nop
4624	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4625
4626#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4627#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4628#endif
4629
4630#endif /* lint */
4631
4632#ifdef	lint
4633/*
4634 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4635 * Called from C-level, sets/clears "go" indication for trap level handler.
4636 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4637 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4638 * Assumes khl_mutex is held when called from C-level.
4639 */
4640/* ARGSUSED */
4641void
4642sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4643{
4644}
4645
4646/*
4647 * kpm_smallpages: stores val to byte at address mapped within
4648 * low level lock brackets. The old value is returned.
4649 * Called from C-level.
4650 */
4651/* ARGSUSED */
4652int
4653sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
4654{
4655	return (0);
4656}
4657
4658#else /* lint */
4659
4660	.seg	".data"
4661sfmmu_kpm_tsbmtl_panic:
4662	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4663	.byte	0
4664sfmmu_kpm_stsbmtl_panic:
4665	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4666	.byte	0
4667	.align	4
4668	.seg	".text"
4669
4670	ENTRY_NP(sfmmu_kpm_tsbmtl)
4671	rdpr	%pstate, %o3
4672	/*
4673	 * %o0 = &kp_refcntc
4674	 * %o1 = &khl_lock
4675	 * %o2 = 0/1 (off/on)
4676	 * %o3 = pstate save
4677	 */
4678#ifdef DEBUG
4679	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4680	bnz,pt %icc, 1f				/* disabled, panic	 */
4681	  nop
4682	save	%sp, -SA(MINFRAME), %sp
4683	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4684	call	panic
4685	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4686	ret
4687	restore
46881:
4689#endif /* DEBUG */
4690	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4691
4692	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4693	mov	-1, %o5
4694	brz,a	%o2, 2f
4695	  mov	0, %o5
46962:
4697	sth	%o5, [%o0]
4698	KPMLOCK_EXIT(%o1, ASI_N)
4699
4700	retl
4701	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4702	SET_SIZE(sfmmu_kpm_tsbmtl)
4703
4704	ENTRY_NP(sfmmu_kpm_stsbmtl)
4705	rdpr	%pstate, %o3
4706	/*
4707	 * %o0 = &mapped
4708	 * %o1 = &kshl_lock
4709	 * %o2 = val
4710	 * %o3 = pstate save
4711	 */
4712#ifdef DEBUG
4713	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4714	bnz,pt %icc, 1f				/* disabled, panic	 */
4715	  nop
4716	save	%sp, -SA(MINFRAME), %sp
4717	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4718	call	panic
4719	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4720	ret
4721	restore
47221:
4723#endif /* DEBUG */
4724	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4725
4726	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4727	ldsb	[%o0], %o5
4728	stb	%o2, [%o0]
4729	KPMLOCK_EXIT(%o1, ASI_N)
4730
4731	and	%o5, KPM_MAPPED_MASK, %o0	/* return old val */
4732	retl
4733	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4734	SET_SIZE(sfmmu_kpm_stsbmtl)
4735
4736#endif /* lint */
4737
4738#ifndef lint
4739#ifdef sun4v
4740	/*
4741	 * User/kernel data miss w// multiple TSBs
4742	 * The first probe covers 8K, 64K, and 512K page sizes,
4743	 * because 64K and 512K mappings are replicated off 8K
4744	 * pointer.  Second probe covers 4M page size only.
4745	 *
4746	 * MMU fault area contains miss address and context.
4747	 */
4748	ALTENTRY(sfmmu_slow_dmmu_miss)
4749	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
4750
4751slow_miss_common:
4752	/*
4753	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4754	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
4755	 */
4756	brnz,pt	%g3, 8f			! check for user context
4757	  nop
4758
4759	/*
4760	 * Kernel miss
4761	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4762	 * branch to sfmmu_tsb_miss_tt to handle it.
4763	 */
4764	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4765sfmmu_dslow_patch_ktsb_base:
4766	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
4767sfmmu_dslow_patch_ktsb_szcode:
4768	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
4769
4770	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4771	! %g1 = First TSB entry pointer, as TSB miss handler expects
4772
4773	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4774sfmmu_dslow_patch_ktsb4m_base:
4775	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
4776sfmmu_dslow_patch_ktsb4m_szcode:
4777	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
4778
4779	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4780	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4781	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4782	.empty
4783
47848:
4785	/*
4786	 * User miss
4787	 * Get first TSB pointer in %g1
4788	 * Get second TSB pointer (or NULL if no second TSB) in %g3
4789	 * Branch to sfmmu_tsb_miss_tt to handle it
4790	 */
4791	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4792	/* %g1 = first TSB entry ptr now, %g2 preserved */
4793
4794	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
4795	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
4796	  nop
4797
4798	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4799	/* %g3 = second TSB entry ptr now, %g2 preserved */
48009:
4801	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4802	.empty
4803	SET_SIZE(sfmmu_slow_dmmu_miss)
4804
4805
4806	/*
4807	 * User/kernel instruction miss w/ multiple TSBs
4808	 * The first probe covers 8K, 64K, and 512K page sizes,
4809	 * because 64K and 512K mappings are replicated off 8K
4810	 * pointer.  Second probe covers 4M page size only.
4811	 *
4812	 * MMU fault area contains miss address and context.
4813	 */
4814	ALTENTRY(sfmmu_slow_immu_miss)
4815	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4816	ba,a,pt	%xcc, slow_miss_common
4817	SET_SIZE(sfmmu_slow_immu_miss)
4818
4819#endif /* sun4v */
4820#endif	/* lint */
4821
4822#ifndef lint
4823
4824/*
4825 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4826 */
4827	.seg	".data"
4828	.align	64
4829	.global tsbmiss_area
4830tsbmiss_area:
4831	.skip	(TSBMISS_SIZE * NCPU)
4832
4833	.align	64
4834	.global kpmtsbm_area
4835kpmtsbm_area:
4836	.skip	(KPMTSBM_SIZE * NCPU)
4837#endif	/* lint */
4838