sfmmu_asm.s revision bd6b3d1f3fc963bf15d7069a0576d0fa2a0802aa
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * SFMMU primitives.  These primitives should only be used by sfmmu
30 * routines.
31 */
32
33#if defined(lint)
34#include <sys/types.h>
35#else	/* lint */
36#include "assym.h"
37#endif	/* lint */
38
39#include <sys/asm_linkage.h>
40#include <sys/machtrap.h>
41#include <sys/machasi.h>
42#include <sys/sun4asi.h>
43#include <sys/pte.h>
44#include <sys/mmu.h>
45#include <vm/hat_sfmmu.h>
46#include <vm/seg_spt.h>
47#include <sys/machparam.h>
48#include <sys/privregs.h>
49#include <sys/scb.h>
50#include <sys/intreg.h>
51#include <sys/machthread.h>
52#include <sys/intr.h>
53#include <sys/clock.h>
54#include <sys/trapstat.h>
55
56#ifdef TRAPTRACE
57#include <sys/traptrace.h>
58
59/*
60 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
61 */
62#define	TT_TRACE(label)		\
63	ba	label		;\
64	rd	%pc, %g7
65#else
66
67#define	TT_TRACE(label)
68
69#endif /* TRAPTRACE */
70
71#ifndef	lint
72
73#if (TTE_SUSPEND_SHIFT > 0)
74#define	TTE_SUSPEND_INT_SHIFT(reg)				\
75	sllx	reg, TTE_SUSPEND_SHIFT, reg
76#else
77#define	TTE_SUSPEND_INT_SHIFT(reg)
78#endif
79
80#endif /* lint */
81
82#ifndef	lint
83
84/*
85 * Assumes TSBE_TAG is 0
86 * Assumes TSBE_INTHI is 0
87 * Assumes TSBREG.split is 0
88 */
89
90#if TSBE_TAG != 0
91#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
92#endif
93
94#if TSBTAG_INTHI != 0
95#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
96#endif
97
98/*
99 * The following code assumes the tsb is not split.
100 *
101 * With TSBs no longer shared between processes, it's no longer
102 * necessary to hash the context bits into the tsb index to get
103 * tsb coloring; the new implementation treats the TSB as a
104 * direct-mapped, virtually-addressed cache.
105 *
106 * In:
107 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
108 *    tsbbase = base address of TSB (clobbered)
109 *    tagacc = tag access register (clobbered)
110 *    szc = size code of TSB (ro)
111 *    tmp = scratch reg
112 * Out:
113 *    tsbbase = pointer to entry in TSB
114 */
115#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
116	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
117	srlx	tagacc, vpshift, tagacc 				;\
118	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
119	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
120	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
121	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
122	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
123
124/*
125 * When the kpm TSB is used it is assumed that it is direct mapped
126 * using (vaddr>>vpshift)%tsbsz as the index.
127 *
128 * Note that, for now, the kpm TSB and kernel TSB are the same for
129 * each mapping size.  However that need not always be the case.  If
130 * the trap handlers are updated to search a different TSB for kpm
131 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
132 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
133 *
134 * In:
135 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
136 *    vaddr = virtual address (clobbered)
137 *    tsbp, szc, tmp = scratch
138 * Out:
139 *    tsbp = pointer to entry in TSB
140 */
141#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
142	cmp	vpshift, MMU_PAGESHIFT					;\
143	bne,pn	%icc, 1f		/* branch if large case */	;\
144	  sethi	%hi(kpmsm_tsbsz), szc					;\
145	sethi	%hi(kpmsm_tsbbase), tsbp				;\
146	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
147	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
148	ba,pt	%icc, 2f						;\
149	  nop								;\
1501:	sethi	%hi(kpm_tsbsz), szc					;\
151	sethi	%hi(kpm_tsbbase), tsbp					;\
152	ld	[szc + %lo(kpm_tsbsz)], szc				;\
153	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1542:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
155
156/*
157 * Lock the TSBE at virtual address tsbep.
158 *
159 * tsbep = TSBE va (ro)
160 * tmp1, tmp2 = scratch registers (clobbered)
161 * label = label to use for branches (text)
162 * %asi = ASI to use for TSB access
163 *
164 * NOTE that we flush the TSB using fast VIS instructions that
165 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
166 * not be treated as a locked entry or we'll get stuck spinning on
167 * an entry that isn't locked but really invalid.
168 */
169
170#if defined(UTSB_PHYS)
171
172#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
173	lda	[tsbep]ASI_MEM, tmp1					;\
174label:									;\
175	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
176	cmp	tmp1, tmp2 						;\
177	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
178	  lda	[tsbep]ASI_MEM, tmp1					;\
179	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
180	cmp	tmp1, tmp2 						;\
181	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
182	  lda	[tsbep]ASI_MEM, tmp1					;\
183	/* tsbe lock acquired */					;\
184	membar #StoreStore
185
186#else /* UTSB_PHYS */
187
188#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
189	lda	[tsbep]%asi, tmp1					;\
190label:									;\
191	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
192	cmp	tmp1, tmp2 						;\
193	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
194	  lda	[tsbep]%asi, tmp1					;\
195	casa	[tsbep]%asi, tmp1, tmp2					;\
196	cmp	tmp1, tmp2 						;\
197	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
198	  lda	[tsbep]%asi, tmp1					;\
199	/* tsbe lock acquired */					;\
200	membar #StoreStore
201
202#endif /* UTSB_PHYS */
203
204/*
205 * Atomically write TSBE at virtual address tsbep.
206 *
207 * tsbep = TSBE va (ro)
208 * tte = TSBE TTE (ro)
209 * tagtarget = TSBE tag (ro)
210 * %asi = ASI to use for TSB access
211 */
212
213#if defined(UTSB_PHYS)
214
215#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
216	add	tsbep, TSBE_TTE, tmp1					;\
217	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
218	membar #StoreStore						;\
219	add	tsbep, TSBE_TAG, tmp1					;\
220	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
221
222#else /* UTSB_PHYS */
223
224#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
225	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
226	membar #StoreStore						;\
227	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
228
229#endif /* UTSB_PHYS */
230
231/*
232 * Load an entry into the TSB at TL > 0.
233 *
234 * tsbep = pointer to the TSBE to load as va (ro)
235 * tte = value of the TTE retrieved and loaded (wo)
236 * tagtarget = tag target register.  To get TSBE tag to load,
237 *   we need to mask off the context and leave only the va (clobbered)
238 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
239 * tmp1, tmp2 = scratch registers
240 * label = label to use for branches (text)
241 * %asi = ASI to use for TSB access
242 */
243
244#if defined(UTSB_PHYS)
245
246#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
247	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
248	/*								;\
249	 * I don't need to update the TSB then check for the valid tte.	;\
250	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
251	 * we always invalidate the hash table before we unload the TSB.;\
252	 */								;\
253	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
254	ldxa	[ttepa]ASI_MEM, tte					;\
255	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
256	sethi	%hi(TSBTAG_INVALID), tmp2				;\
257	add	tsbep, TSBE_TAG, tmp1					;\
258	brgez,a,pn tte, label/**/f					;\
259	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
260	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
261label:
262
263#else /* UTSB_PHYS */
264
265#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
266	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
267	/*								;\
268	 * I don't need to update the TSB then check for the valid tte.	;\
269	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
270	 * we always invalidate the hash table before we unload the TSB.;\
271	 */								;\
272	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
273	ldxa	[ttepa]ASI_MEM, tte					;\
274	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
275	sethi	%hi(TSBTAG_INVALID), tmp2				;\
276	brgez,a,pn tte, label/**/f					;\
277	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
278	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
279label:
280
281#endif /* UTSB_PHYS */
282
283/*
284 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
285 *   for ITLB synthesis.
286 *
287 * tsbep = pointer to the TSBE to load as va (ro)
288 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
289 *   with exec_perm turned off and exec_synth turned on
290 * tagtarget = tag target register.  To get TSBE tag to load,
291 *   we need to mask off the context and leave only the va (clobbered)
292 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
293 * tmp1, tmp2 = scratch registers
294 * label = label to use for branch (text)
295 * %asi = ASI to use for TSB access
296 */
297
298#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
299	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
300	/*								;\
301	 * I don't need to update the TSB then check for the valid tte.	;\
302	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
303	 * we always invalidate the hash table before we unload the TSB.;\
304	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
305	 * and exec_synth bit to 1.					;\
306	 */								;\
307	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
308	mov	tte, tmp1						;\
309	ldxa	[ttepa]ASI_MEM, tte					;\
310	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
311	sethi	%hi(TSBTAG_INVALID), tmp2				;\
312	brgez,a,pn tte, label/**/f					;\
313	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
314	or	tte, tmp1, tte						;\
315	andn	tte, TTE_EXECPRM_INT, tte				;\
316	or	tte, TTE_E_SYNTH_INT, tte				;\
317	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
318label:
319
320/*
321 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
322 *
323 * tte = value of the TTE, used to get tte_size bits (ro)
324 * tagaccess = tag access register, used to get 4M pfn bits (ro)
325 * pfn = 4M pfn bits shifted to offset for tte (out)
326 * tmp1 = scratch register
327 * label = label to use for branch (text)
328 */
329
330#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
331	/*								;\
332	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
333	 * Return them, shifted, in pfn.				;\
334	 */								;\
335	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
336	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
337	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
338	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
339	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
340	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
341label:									;\
342	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
343
344/*
345 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
346 * for ITLB synthesis.
347 *
348 * tte = value of the TTE, used to get tte_size bits (rw)
349 * tmp1 = scratch register
350 */
351
352#define	SET_TTE4M_PN(tte, tmp)						\
353	/*								;\
354	 * Set 4M pagesize tte bits. 					;\
355	 */								;\
356	set	TTE4M, tmp						;\
357	sllx	tmp, TTE_SZ_SHFT, tmp					;\
358	or	tte, tmp, tte
359
360/*
361 * Load an entry into the TSB at TL=0.
362 *
363 * tsbep = pointer to the TSBE to load as va (ro)
364 * tteva = pointer to the TTE to load as va (ro)
365 * tagtarget = TSBE tag to load (which contains no context), synthesized
366 * to match va of MMU tag target register only (ro)
367 * tmp1, tmp2 = scratch registers (clobbered)
368 * label = label to use for branches (text)
369 * %asi = ASI to use for TSB access
370 */
371
372#if defined(UTSB_PHYS)
373
374#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
375	/* can't rd tteva after locking tsb because it can tlb miss */	;\
376	ldx	[tteva], tteva			/* load tte */		;\
377	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
378	sethi	%hi(TSBTAG_INVALID), tmp2				;\
379	add	tsbep, TSBE_TAG, tmp1					;\
380	brgez,a,pn tteva, label/**/f					;\
381	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
382	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
383label:
384
385#else /* UTSB_PHYS */
386
387#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
388	/* can't rd tteva after locking tsb because it can tlb miss */	;\
389	ldx	[tteva], tteva			/* load tte */		;\
390	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
391	sethi	%hi(TSBTAG_INVALID), tmp2				;\
392	brgez,a,pn tteva, label/**/f					;\
393	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
394	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
395label:
396
397#endif /* UTSB_PHYS */
398
399/*
400 * Invalidate a TSB entry in the TSB.
401 *
402 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
403 *	 about this earlier to ensure this is true.  Thus when we are
404 *	 directly referencing tsbep below, we are referencing the tte_tag
405 *	 field of the TSBE.  If this  offset ever changes, the code below
406 *	 will need to be modified.
407 *
408 * tsbep = pointer to TSBE as va (ro)
409 * tag = invalidation is done if this matches the TSBE tag (ro)
410 * tmp1 - tmp3 = scratch registers (clobbered)
411 * label = label name to use for branches (text)
412 * %asi = ASI to use for TSB access
413 */
414
415#if defined(UTSB_PHYS)
416
417#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
418	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
419	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
420label/**/1:								;\
421	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
422	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
423	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
424	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
425	cmp	tag, tmp3		/* compare tags */		;\
426	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
427	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
428	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
429	cmp	tmp1, tmp3		/* if not successful */		;\
430	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
431	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
432label/**/2:
433
434#else /* UTSB_PHYS */
435
436#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
437	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
438	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
439label/**/1:								;\
440	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
441	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
442	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
443	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
444	cmp	tag, tmp3		/* compare tags */		;\
445	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
446	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
447	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
448	cmp	tmp1, tmp3		/* if not successful */		;\
449	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
450	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
451label/**/2:
452
453#endif /* UTSB_PHYS */
454
455#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
456#error	- TSB_SOFTSZ_MASK too small
457#endif
458
459
460/*
461 * An implementation of setx which will be hot patched at run time.
462 * since it is being hot patched, there is no value passed in.
463 * Thus, essentially we are implementing
464 *	setx value, tmp, dest
465 * where value is RUNTIME_PATCH (aka 0) in this case.
466 */
467#define	RUNTIME_PATCH_SETX(dest, tmp)					\
468	sethi	%hh(RUNTIME_PATCH), tmp					;\
469	sethi	%lm(RUNTIME_PATCH), dest				;\
470	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
471	or	dest, %lo(RUNTIME_PATCH), dest				;\
472	sllx	tmp, 32, tmp						;\
473	nop				/* for perf reasons */		;\
474	or	tmp, dest, dest		/* contents of patched value */
475
476
477#endif (lint)
478
479
480#if defined (lint)
481
482/*
483 * sfmmu related subroutines
484 */
485uint_t
486sfmmu_disable_intrs()
487{ return(0); }
488
489/* ARGSUSED */
490void
491sfmmu_enable_intrs(uint_t pstate_save)
492{}
493
494/* ARGSUSED */
495int
496sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
497{ return(0); }
498
499/*
500 * Use cas, if tte has changed underneath us then reread and try again.
501 * In the case of a retry, it will update sttep with the new original.
502 */
503/* ARGSUSED */
504int
505sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
506{ return(0); }
507
508/*
509 * Use cas, if tte has changed underneath us then return 1, else return 0
510 */
511/* ARGSUSED */
512int
513sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
514{ return(0); }
515
516/* ARGSUSED */
517void
518sfmmu_copytte(tte_t *sttep, tte_t *dttep)
519{}
520
521/*ARGSUSED*/
522struct tsbe *
523sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
524{ return(0); }
525
526/*ARGSUSED*/
527uint64_t
528sfmmu_make_tsbtag(caddr_t va)
529{ return(0); }
530
531#else	/* lint */
532
533	.seg	".data"
534	.global	sfmmu_panic1
535sfmmu_panic1:
536	.asciz	"sfmmu_asm: interrupts already disabled"
537
538	.global	sfmmu_panic3
539sfmmu_panic3:
540	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
541
542	.global	sfmmu_panic4
543sfmmu_panic4:
544	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
545
546	.global	sfmmu_panic5
547sfmmu_panic5:
548	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
549
550	.global	sfmmu_panic6
551sfmmu_panic6:
552	.asciz	"sfmmu_asm: interrupts not disabled"
553
554	.global	sfmmu_panic7
555sfmmu_panic7:
556	.asciz	"sfmmu_asm: kernel as"
557
558	.global	sfmmu_panic8
559sfmmu_panic8:
560	.asciz	"sfmmu_asm: gnum is zero"
561
562	.global	sfmmu_panic9
563sfmmu_panic9:
564	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
565
566	.global	sfmmu_panic10
567sfmmu_panic10:
568	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
569
570        ENTRY(sfmmu_disable_intrs)
571        rdpr    %pstate, %o0
572#ifdef DEBUG
573	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
574#endif /* DEBUG */
575        retl
576          wrpr   %o0, PSTATE_IE, %pstate
577        SET_SIZE(sfmmu_disable_intrs)
578
579	ENTRY(sfmmu_enable_intrs)
580        retl
581          wrpr    %g0, %o0, %pstate
582        SET_SIZE(sfmmu_enable_intrs)
583
584/*
585 * This routine is called both by resume() and sfmmu_get_ctx() to
586 * allocate a new context for the process on a MMU.
587 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
588 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
589 * is the case when sfmmu_alloc_ctx is called from resume().
590 *
591 * The caller must disable interrupts before entering this routine.
592 * To reduce ctx switch overhead, the code contains both 'fast path' and
593 * 'slow path' code. The fast path code covers the common case where only
594 * a quick check is needed and the real ctx allocation is not required.
595 * It can be done without holding the per-process (PP) lock.
596 * The 'slow path' code must be protected by the PP Lock and performs ctx
597 * allocation.
598 * Hardware context register and HAT mmu cnum are updated accordingly.
599 *
600 * %o0 - sfmmup
601 * %o1 - allocflag
602 * %o2 - CPU
603 * %o3 - sfmmu private/shared flag
604 *
605 * ret - 0: no ctx is allocated
606 *       1: a ctx is allocated
607 */
608        ENTRY_NP(sfmmu_alloc_ctx)
609
610#ifdef DEBUG
611	sethi   %hi(ksfmmup), %g1
612	ldx     [%g1 + %lo(ksfmmup)], %g1
613	cmp     %g1, %o0
614	bne,pt   %xcc, 0f
615	  nop
616
617	sethi   %hi(panicstr), %g1		! if kernel as, panic
618        ldx     [%g1 + %lo(panicstr)], %g1
619        tst     %g1
620        bnz,pn  %icc, 7f
621          nop
622
623	sethi	%hi(sfmmu_panic7), %o0
624	call	panic
625	  or	%o0, %lo(sfmmu_panic7), %o0
626
6277:
628	retl
629	  mov	%g0, %o0			! %o0 = ret = 0
630
6310:
632	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
633#endif /* DEBUG */
634
635	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
636
637	! load global mmu_ctxp info
638	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
639        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
640
641	! load global mmu_ctxp gnum
642	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
643
644#ifdef DEBUG
645	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
646	bne,pt	%xcc, 3f
647	  nop
648
649	sethi   %hi(panicstr), %g1	! test if panicstr is already set
650        ldx     [%g1 + %lo(panicstr)], %g1
651        tst     %g1
652        bnz,pn  %icc, 1f
653          nop
654
655	sethi	%hi(sfmmu_panic8), %o0
656	call	panic
657	  or	%o0, %lo(sfmmu_panic8), %o0
6581:
659	retl
660	  mov	%g0, %o0			! %o0 = ret = 0
6613:
662#endif
663
664	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
665
666	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
667	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
668
669	/*
670	 * %g5 = sfmmu gnum returned
671	 * %g6 = sfmmu cnum returned
672	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
673	 * %g4 = scratch
674	 *
675	 * Fast path code, do a quick check.
676	 */
677	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
678
679	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
680	bne,pt	%icc, 1f			! valid hat cnum, check gnum
681	  nop
682
683	! cnum == INVALID, check allocflag
684	mov	%g0, %g4	! %g4 = ret = 0
685	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
686	  mov	%g6, %o1
687
688	! (invalid HAT cnum) && (allocflag == 1)
689	ba,pt	%icc, 2f
690	  nop
6911:
692	! valid HAT cnum, check gnum
693	cmp	%g5, %o4
694	mov	1, %g4				!%g4 = ret = 1
695	be,a,pt	%icc, 8f			! gnum unchanged, go to done
696	  mov	%g6, %o1
697
6982:
699	/*
700	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
701	 * followed by the 'slow path' code.
702	 */
703	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
7043:
705	brz	%g3, 5f
706	  nop
7074:
708	brnz,a,pt       %g3, 4b				! spin if lock is 1
709	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
710	ba	%xcc, 3b				! retry the lock
711	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
712
7135:
714	membar  #LoadLoad
715	/*
716	 * %g5 = sfmmu gnum returned
717	 * %g6 = sfmmu cnum returned
718	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
719	 * %g4 = scratch
720	 */
721	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
722
723	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
724	bne,pt	%icc, 1f			! valid hat cnum, check gnum
725	  nop
726
727	! cnum == INVALID, check allocflag
728	mov	%g0, %g4	! %g4 = ret = 0
729	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
730	  mov	%g6, %o1
731
732	! (invalid HAT cnum) && (allocflag == 1)
733	ba,pt	%icc, 6f
734	  nop
7351:
736	! valid HAT cnum, check gnum
737	cmp	%g5, %o4
738	mov	1, %g4				! %g4 = ret  = 1
739	be,a,pt	%icc, 2f			! gnum unchanged, go to done
740	  mov	%g6, %o1
741
742	ba,pt	%icc, 6f
743	  nop
7442:
745	membar  #LoadStore|#StoreStore
746	ba,pt %icc, 8f
747	  clrb  [%o0 + SFMMU_CTX_LOCK]
7486:
749	/*
750	 * We get here if we do not have a valid context, or
751	 * the HAT gnum does not match global gnum. We hold
752	 * sfmmu_ctx_lock spinlock. Allocate that context.
753	 *
754	 * %o3 = mmu_ctxp
755	 */
756	add	%o3, MMU_CTX_CNUM, %g3
757	ld	[%o3 + MMU_CTX_NCTXS], %g4
758
759	/*
760         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
761         * %g3 = mmu cnum address
762	 * %g4 = mmu nctxs
763	 *
764	 * %o0 = sfmmup
765	 * %o1 = mmu current cnum value (used as new cnum)
766	 * %o4 = mmu gnum
767	 *
768	 * %o5 = scratch
769	 */
770	ld	[%g3], %o1
7710:
772	cmp	%o1, %g4
773	bl,a,pt %icc, 1f
774	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
775
776	/*
777	 * cnum reachs max, bail, so wrap around can be performed later.
778	 */
779	set	INVALID_CONTEXT, %o1
780	/*
781	 * When the routine is called by shared ctx, we want to set
782	 * both private and shared ctx regs to INVALID. In order to
783	 * do so, we set the sfmmu priv/shared flag to 'private' regardless.
784	 * so that private ctx reg will be set to invalid.
785	 * Note that values written to private context register are
786	 * automatically written to shared context register as well.
787	 */
788	mov	%g0, %g1		! %g1 = sfmmu private/shared flag
789	mov	%g0, %g4		! %g4 = ret = 0
790
791	membar  #LoadStore|#StoreStore
792	ba,pt	%icc, 8f
793	  clrb	[%o0 + SFMMU_CTX_LOCK]
7941:
795	! %g3 = addr of mmu_ctxp->cnum
796	! %o5 = mmu_ctxp->cnum + 1
797	cas	[%g3], %o1, %o5
798	cmp	%o1, %o5
799	bne,a,pn %xcc, 0b	! cas failed
800	  ld	[%g3], %o1
801
802#ifdef DEBUG
803        set	MAX_SFMMU_CTX_VAL, %o5
804	cmp	%o1, %o5
805	ble,pt %icc, 2f
806	  nop
807
808	sethi	%hi(sfmmu_panic9), %o0
809	call	panic
810	  or	%o0, %lo(sfmmu_panic9), %o0
8112:
812#endif
813	! update hat gnum and cnum
814	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
815	or	%o4, %o1, %o4
816	stx	%o4, [%g2 + SFMMU_CTXS]
817
818	membar  #LoadStore|#StoreStore
819	clrb	[%o0 + SFMMU_CTX_LOCK]
820
821	mov	1, %g4			! %g4 = ret = 1
8228:
823	/*
824	 * program the secondary context register
825	 *
826	 * %o1 = cnum
827	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
828	 */
829
830#ifdef	sun4u
831	ldub	[%o0 + SFMMU_CEXT], %o2
832	sll	%o2, CTXREG_EXT_SHIFT, %o2
833	or	%o1, %o2, %o1
834#endif /* sun4u */
835
836	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
837
838        retl
839          mov   %g4, %o0                        ! %o0 = ret
840
841	SET_SIZE(sfmmu_alloc_ctx)
842
843	ENTRY_NP(sfmmu_modifytte)
844	ldx	[%o2], %g3			/* current */
845	ldx	[%o0], %g1			/* original */
8462:
847	ldx	[%o1], %g2			/* modified */
848	cmp	%g2, %g3			/* is modified = current? */
849	be,a,pt	%xcc,1f				/* yes, don't write */
850	stx	%g3, [%o0]			/* update new original */
851	casx	[%o2], %g1, %g2
852	cmp	%g1, %g2
853	be,pt	%xcc, 1f			/* cas succeeded - return */
854	  nop
855	ldx	[%o2], %g3			/* new current */
856	stx	%g3, [%o0]			/* save as new original */
857	ba,pt	%xcc, 2b
858	  mov	%g3, %g1
8591:	retl
860	membar	#StoreLoad
861	SET_SIZE(sfmmu_modifytte)
862
863	ENTRY_NP(sfmmu_modifytte_try)
864	ldx	[%o1], %g2			/* modified */
865	ldx	[%o2], %g3			/* current */
866	ldx	[%o0], %g1			/* original */
867	cmp	%g3, %g2			/* is modified = current? */
868	be,a,pn %xcc,1f				/* yes, don't write */
869	mov	0, %o1				/* as if cas failed. */
870
871	casx	[%o2], %g1, %g2
872	membar	#StoreLoad
873	cmp	%g1, %g2
874	movne	%xcc, -1, %o1			/* cas failed. */
875	move	%xcc, 1, %o1			/* cas succeeded. */
8761:
877	stx	%g2, [%o0]			/* report "current" value */
878	retl
879	mov	%o1, %o0
880	SET_SIZE(sfmmu_modifytte_try)
881
882	ENTRY_NP(sfmmu_copytte)
883	ldx	[%o0], %g1
884	retl
885	stx	%g1, [%o1]
886	SET_SIZE(sfmmu_copytte)
887
888
889	/*
890	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
891	 * %o0 = TSB base address (in), pointer to TSB entry (out)
892	 * %o1 = vaddr (in)
893	 * %o2 = vpshift (in)
894	 * %o3 = tsb size code (in)
895	 * %o4 = scratch register
896	 */
897	ENTRY_NP(sfmmu_get_tsbe)
898	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
899	retl
900	nop
901	SET_SIZE(sfmmu_get_tsbe)
902
903	/*
904	 * Return a TSB tag for the given va.
905	 * %o0 = va (in/clobbered)
906	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
907	 */
908	ENTRY_NP(sfmmu_make_tsbtag)
909	retl
910	srln	%o0, TTARGET_VA_SHIFT, %o0
911	SET_SIZE(sfmmu_make_tsbtag)
912
913#endif /* lint */
914
915/*
916 * Other sfmmu primitives
917 */
918
919
920#if defined (lint)
921void
922sfmmu_patch_ktsb(void)
923{
924}
925
926void
927sfmmu_kpm_patch_tlbm(void)
928{
929}
930
931void
932sfmmu_kpm_patch_tsbm(void)
933{
934}
935
936void
937sfmmu_patch_shctx(void)
938{
939}
940
941/* ARGSUSED */
942void
943sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
944{
945}
946
947/* ARGSUSED */
948void
949sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
950{
951}
952
953/* ARGSUSED */
954void
955sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
956{
957}
958
959/* ARGSUSED */
960void
961sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
962{
963}
964
965#else /* lint */
966
967#define	I_SIZE		4
968
969	ENTRY_NP(sfmmu_fix_ktlb_traptable)
970	/*
971	 * %o0 = start of patch area
972	 * %o1 = size code of TSB to patch
973	 * %o3 = scratch
974	 */
975	/* fix sll */
976	ld	[%o0], %o3			/* get sll */
977	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
978	st	%o3, [%o0]			/* write sll */
979	flush	%o0
980	/* fix srl */
981	add	%o0, I_SIZE, %o0		/* goto next instr. */
982	ld	[%o0], %o3			/* get srl */
983	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
984	st	%o3, [%o0]			/* write srl */
985	retl
986	flush	%o0
987	SET_SIZE(sfmmu_fix_ktlb_traptable)
988
989	ENTRY_NP(sfmmu_fixup_ktsbbase)
990	/*
991	 * %o0 = start of patch area
992	 * %o5 = kernel virtual or physical tsb base address
993	 * %o2, %o3 are used as scratch registers.
994	 */
995	/* fixup sethi instruction */
996	ld	[%o0], %o3
997	srl	%o5, 10, %o2			! offset is bits 32:10
998	or	%o3, %o2, %o3			! set imm22
999	st	%o3, [%o0]
1000	/* fixup offset of lduw/ldx */
1001	add	%o0, I_SIZE, %o0		! next instr
1002	ld	[%o0], %o3
1003	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
1004	or	%o3, %o2, %o3
1005	st	%o3, [%o0]
1006	retl
1007	flush	%o0
1008	SET_SIZE(sfmmu_fixup_ktsbbase)
1009
1010	ENTRY_NP(sfmmu_fixup_setx)
1011	/*
1012	 * %o0 = start of patch area
1013	 * %o4 = 64 bit value to patch
1014	 * %o2, %o3 are used as scratch registers.
1015	 *
1016	 * Note: Assuming that all parts of the instructions which need to be
1017	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1018	 *
1019	 * Note the implementation of setx which is being patched is as follows:
1020	 *
1021	 * sethi   %hh(RUNTIME_PATCH), tmp
1022	 * sethi   %lm(RUNTIME_PATCH), dest
1023	 * or      tmp, %hm(RUNTIME_PATCH), tmp
1024	 * or      dest, %lo(RUNTIME_PATCH), dest
1025	 * sllx    tmp, 32, tmp
1026	 * nop
1027	 * or      tmp, dest, dest
1028	 *
1029	 * which differs from the implementation in the
1030	 * "SPARC Architecture Manual"
1031	 */
1032	/* fixup sethi instruction */
1033	ld	[%o0], %o3
1034	srlx	%o4, 42, %o2			! bits [63:42]
1035	or	%o3, %o2, %o3			! set imm22
1036	st	%o3, [%o0]
1037	/* fixup sethi instruction */
1038	add	%o0, I_SIZE, %o0		! next instr
1039	ld	[%o0], %o3
1040	sllx	%o4, 32, %o2			! clear upper bits
1041	srlx	%o2, 42, %o2			! bits [31:10]
1042	or	%o3, %o2, %o3			! set imm22
1043	st	%o3, [%o0]
1044	/* fixup or instruction */
1045	add	%o0, I_SIZE, %o0		! next instr
1046	ld	[%o0], %o3
1047	srlx	%o4, 32, %o2			! bits [63:32]
1048	and	%o2, 0x3ff, %o2			! bits [41:32]
1049	or	%o3, %o2, %o3			! set imm
1050	st	%o3, [%o0]
1051	/* fixup or instruction */
1052	add	%o0, I_SIZE, %o0		! next instr
1053	ld	[%o0], %o3
1054	and	%o4, 0x3ff, %o2			! bits [9:0]
1055	or	%o3, %o2, %o3			! set imm
1056	st	%o3, [%o0]
1057	retl
1058	flush	%o0
1059	SET_SIZE(sfmmu_fixup_setx)
1060
1061	ENTRY_NP(sfmmu_fixup_or)
1062	/*
1063	 * %o0 = start of patch area
1064	 * %o4 = 32 bit value to patch
1065	 * %o2, %o3 are used as scratch registers.
1066	 * Note: Assuming that all parts of the instructions which need to be
1067	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1068	 */
1069	ld	[%o0], %o3
1070	and	%o4, 0x3ff, %o2			! bits [9:0]
1071	or	%o3, %o2, %o3			! set imm
1072	st	%o3, [%o0]
1073	retl
1074	flush	%o0
1075	SET_SIZE(sfmmu_fixup_or)
1076
1077	ENTRY_NP(sfmmu_fixup_shiftx)
1078	/*
1079	 * %o0 = start of patch area
1080	 * %o4 = signed int immediate value to add to sllx/srlx imm field
1081	 * %o2, %o3 are used as scratch registers.
1082	 *
1083	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
1084	 * so we do a simple add.  The caller must be careful to prevent
1085	 * overflow, which could easily occur if the initial value is nonzero!
1086	 */
1087	ld	[%o0], %o3			! %o3 = instruction to patch
1088	and	%o3, 0x3f, %o2			! %o2 = existing imm value
1089	add	%o2, %o4, %o2			! %o2 = new imm value
1090	andn	%o3, 0x3f, %o3			! clear old imm value
1091	and	%o2, 0x3f, %o2			! truncate new imm value
1092	or	%o3, %o2, %o3			! set new imm value
1093	st	%o3, [%o0]			! store updated instruction
1094	retl
1095	flush	%o0
1096	SET_SIZE(sfmmu_fixup_shiftx)
1097
1098	ENTRY_NP(sfmmu_fixup_mmu_asi)
1099	/*
1100	 * Patch imm_asi of all ldda instructions in the MMU
1101	 * trap handlers.  We search MMU_PATCH_INSTR instructions
1102	 * starting from the itlb miss handler (trap 0x64).
1103	 * %o0 = address of tt[0,1]_itlbmiss
1104	 * %o1 = imm_asi to setup, shifted by appropriate offset.
1105	 * %o3 = number of instructions to search
1106	 * %o4 = reserved by caller: called from leaf routine
1107	 */
11081:	ldsw	[%o0], %o2			! load instruction to %o2
1109	brgez,pt %o2, 2f
1110	  srl	%o2, 30, %o5
1111	btst	1, %o5				! test bit 30; skip if not set
1112	bz,pt	%icc, 2f
1113	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
1114	srlx	%o5, 58, %o5			! isolate op3 part of opcode
1115	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
1116	brnz,pt	%o5, 2f				! skip if not a match
1117	  or	%o2, %o1, %o2			! or in imm_asi
1118	st	%o2, [%o0]			! write patched instruction
11192:	dec	%o3
1120	brnz,a,pt %o3, 1b			! loop until we're done
1121	  add	%o0, I_SIZE, %o0
1122	retl
1123	flush	%o0
1124	SET_SIZE(sfmmu_fixup_mmu_asi)
1125
1126	/*
1127	 * Patch immediate ASI used to access the TSB in the
1128	 * trap table.
1129	 * inputs: %o0 = value of ktsb_phys
1130	 */
1131	ENTRY_NP(sfmmu_patch_mmu_asi)
1132	mov	%o7, %o4			! save return pc in %o4
1133	movrnz	%o0, ASI_QUAD_LDD_PHYS, %o3
1134	movrz	%o0, ASI_NQUAD_LD, %o3
1135	sll	%o3, 5, %o1			! imm_asi offset
1136	mov	6, %o3				! number of instructions
1137	sethi	%hi(dktsb), %o0			! to search
1138	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
1139	  or	%o0, %lo(dktsb), %o0
1140	mov	6, %o3				! number of instructions
1141	sethi	%hi(dktsb4m), %o0		! to search
1142	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
1143	  or	%o0, %lo(dktsb4m), %o0
1144	mov	6, %o3				! number of instructions
1145	sethi	%hi(iktsb), %o0			! to search
1146	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
1147	  or	%o0, %lo(iktsb), %o0
1148	mov	6, %o3				! number of instructions
1149	sethi	%hi(iktsb4m), %o0		! to search
1150	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
1151	  or	%o0, %lo(iktsb4m), %o0
1152	mov	%o4, %o7			! retore return pc -- leaf
1153	retl
1154	nop
1155	SET_SIZE(sfmmu_patch_mmu_asi)
1156
1157	ENTRY_NP(sfmmu_patch_ktsb)
1158	/*
1159	 * We need to fix iktsb, dktsb, et. al.
1160	 */
1161	save	%sp, -SA(MINFRAME), %sp
1162	set	ktsb_phys, %o1
1163	ld	[%o1], %o4
1164	set	ktsb_base, %o5
1165	set	ktsb4m_base, %l1
1166	brz,pt	%o4, 1f
1167	  nop
1168	set	ktsb_pbase, %o5
1169	set	ktsb4m_pbase, %l1
11701:
1171	sethi	%hi(ktsb_szcode), %o1
1172	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
1173
1174	sethi	%hi(iktsb), %o0
1175	call	sfmmu_fix_ktlb_traptable
1176	  or	%o0, %lo(iktsb), %o0
1177
1178	sethi	%hi(dktsb), %o0
1179	call	sfmmu_fix_ktlb_traptable
1180	  or	%o0, %lo(dktsb), %o0
1181
1182	sethi	%hi(ktsb4m_szcode), %o1
1183	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
1184
1185	sethi	%hi(iktsb4m), %o0
1186	call	sfmmu_fix_ktlb_traptable
1187	  or	%o0, %lo(iktsb4m), %o0
1188
1189	sethi	%hi(dktsb4m), %o0
1190	call	sfmmu_fix_ktlb_traptable
1191	  or	%o0, %lo(dktsb4m), %o0
1192
1193#ifndef sun4v
1194	mov	ASI_N, %o2
1195	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
1196	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
1197	sethi	%hi(tsb_kernel_patch_asi), %o0
1198	call	sfmmu_fixup_or
1199	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
1200#endif /* !sun4v */
1201
1202	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
1203
1204	sethi	%hi(dktsbbase), %o0
1205	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1206	  or	%o0, %lo(dktsbbase), %o0
1207
1208	sethi	%hi(iktsbbase), %o0
1209	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1210	  or	%o0, %lo(iktsbbase), %o0
1211
1212	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
1213	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1214	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1215
1216#ifdef sun4v
1217	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
1218	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1219	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1220#endif /* sun4v */
1221
1222	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
1223
1224	sethi	%hi(dktsb4mbase), %o0
1225	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1226	  or	%o0, %lo(dktsb4mbase), %o0
1227
1228	sethi	%hi(iktsb4mbase), %o0
1229	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1230	  or	%o0, %lo(iktsb4mbase), %o0
1231
1232	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1233	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1234	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1235
1236#ifdef sun4v
1237	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1238	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1239	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1240#endif /* sun4v */
1241
1242	set	ktsb_szcode, %o4
1243	ld	[%o4], %o4
1244	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1245	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1246	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1247
1248#ifdef sun4v
1249	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1250	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1251	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1252#endif /* sun4v */
1253
1254	set	ktsb4m_szcode, %o4
1255	ld	[%o4], %o4
1256	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1257	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1258	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1259
1260#ifdef sun4v
1261	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1262	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1263	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1264#endif /* sun4v */
1265
1266	ret
1267	restore
1268	SET_SIZE(sfmmu_patch_ktsb)
1269
1270	ENTRY_NP(sfmmu_kpm_patch_tlbm)
1271	/*
1272	 * Fixup trap handlers in common segkpm case.  This is reserved
1273	 * for future use should kpm TSB be changed to be other than the
1274	 * kernel TSB.
1275	 */
1276	retl
1277	nop
1278	SET_SIZE(sfmmu_kpm_patch_tlbm)
1279
1280	ENTRY_NP(sfmmu_kpm_patch_tsbm)
1281	/*
1282	 * nop the branch to sfmmu_kpm_dtsb_miss_small
1283	 * in the case where we are using large pages for
1284	 * seg_kpm (and hence must probe the second TSB for
1285	 * seg_kpm VAs)
1286	 */
1287	set	dktsb4m_kpmcheck_small, %o0
1288	MAKE_NOP_INSTR(%o1)
1289	st	%o1, [%o0]
1290	flush	%o0
1291	retl
1292	nop
1293	SET_SIZE(sfmmu_kpm_patch_tsbm)
1294
1295	ENTRY_NP(sfmmu_patch_utsb)
1296#ifdef UTSB_PHYS
1297	retl
1298	nop
1299#else /* UTSB_PHYS */
1300	/*
1301	 * We need to hot patch utsb_vabase and utsb4m_vabase
1302	 */
1303	save	%sp, -SA(MINFRAME), %sp
1304
1305	/* patch value of utsb_vabase */
1306	set	utsb_vabase, %o1
1307	ldx	[%o1], %o4
1308	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1309	call	sfmmu_fixup_setx
1310	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1311	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1312	call	sfmmu_fixup_setx
1313	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1314	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1315	call	sfmmu_fixup_setx
1316	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1317
1318	/* patch value of utsb4m_vabase */
1319	set	utsb4m_vabase, %o1
1320	ldx	[%o1], %o4
1321	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1322	call	sfmmu_fixup_setx
1323	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1324	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1325	call	sfmmu_fixup_setx
1326	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1327	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1328	call	sfmmu_fixup_setx
1329	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1330
1331	/*
1332	 * Patch TSB base register masks and shifts if needed.
1333	 * By default the TSB base register contents are set up for 4M slab.
1334	 * If we're using a smaller slab size and reserved VA range we need
1335	 * to patch up those values here.
1336	 */
1337	set	tsb_slab_shift, %o1
1338	set	MMU_PAGESHIFT4M, %o4
1339	lduw	[%o1], %o3
1340	subcc	%o4, %o3, %o4
1341	bz,pt	%icc, 1f
1342	  /* delay slot safe */
1343
1344	/* patch reserved VA range size if needed. */
1345	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1346	call	sfmmu_fixup_shiftx
1347	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1348	call	sfmmu_fixup_shiftx
1349	  add	%o0, I_SIZE, %o0
1350	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1351	call	sfmmu_fixup_shiftx
1352	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1353	call	sfmmu_fixup_shiftx
1354	  add	%o0, I_SIZE, %o0
13551:
1356	/* patch TSBREG_VAMASK used to set up TSB base register */
1357	set	tsb_slab_mask, %o1
1358	ldx	[%o1], %o4
1359	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1360	call	sfmmu_fixup_or
1361	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1362	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1363	call	sfmmu_fixup_or
1364	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1365
1366	ret
1367	restore
1368#endif /* UTSB_PHYS */
1369	SET_SIZE(sfmmu_patch_utsb)
1370
1371	ENTRY_NP(sfmmu_patch_shctx)
1372#ifdef sun4u
1373	retl
1374	  nop
1375#else /* sun4u */
1376	set	sfmmu_shctx_cpu_mondo_patch, %o0
1377	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
1378	st	%o1, [%o0]
1379	flush	%o0
1380	MAKE_NOP_INSTR(%o1)
1381	add	%o0, I_SIZE, %o0	! next instr
1382	st	%o1, [%o0]
1383	flush	%o0
1384
1385	set	sfmmu_shctx_user_rtt_patch, %o0
1386	st      %o1, [%o0]		! nop 1st instruction
1387	flush	%o0
1388	add     %o0, I_SIZE, %o0
1389	st      %o1, [%o0]		! nop 2nd instruction
1390	flush	%o0
1391	add     %o0, I_SIZE, %o0
1392	st      %o1, [%o0]		! nop 3rd instruction
1393	flush	%o0
1394	add     %o0, I_SIZE, %o0
1395	st      %o1, [%o0]		! nop 4th instruction
1396	flush	%o0
1397	add     %o0, I_SIZE, %o0
1398	st      %o1, [%o0]		! nop 5th instruction
1399	retl
1400	  flush	%o0
1401#endif /* sun4u */
1402	SET_SIZE(sfmmu_patch_shctx)
1403
1404	/*
1405	 * Routine that loads an entry into a tsb using virtual addresses.
1406	 * Locking is required since all cpus can use the same TSB.
1407	 * Note that it is no longer required to have a valid context
1408	 * when calling this function.
1409	 */
1410	ENTRY_NP(sfmmu_load_tsbe)
1411	/*
1412	 * %o0 = pointer to tsbe to load
1413	 * %o1 = tsb tag
1414	 * %o2 = virtual pointer to TTE
1415	 * %o3 = 1 if physical address in %o0 else 0
1416	 */
1417	rdpr	%pstate, %o5
1418#ifdef DEBUG
1419	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1420#endif /* DEBUG */
1421
1422	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1423
1424	SETUP_TSB_ASI(%o3, %g3)
1425	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, 1)
1426
1427	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1428
1429	retl
1430	membar	#StoreStore|#StoreLoad
1431	SET_SIZE(sfmmu_load_tsbe)
1432
1433	/*
1434	 * Flush TSB of a given entry if the tag matches.
1435	 */
1436	ENTRY(sfmmu_unload_tsbe)
1437	/*
1438	 * %o0 = pointer to tsbe to be flushed
1439	 * %o1 = tag to match
1440	 * %o2 = 1 if physical address in %o0 else 0
1441	 */
1442	SETUP_TSB_ASI(%o2, %g1)
1443	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1444	retl
1445	membar	#StoreStore|#StoreLoad
1446	SET_SIZE(sfmmu_unload_tsbe)
1447
1448	/*
1449	 * Routine that loads a TTE into the kpm TSB from C code.
1450	 * Locking is required since kpm TSB is shared among all CPUs.
1451	 */
1452	ENTRY_NP(sfmmu_kpm_load_tsb)
1453	/*
1454	 * %o0 = vaddr
1455	 * %o1 = ttep
1456	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1457	 */
1458	rdpr	%pstate, %o5			! %o5 = saved pstate
1459#ifdef DEBUG
1460	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1461#endif /* DEBUG */
1462	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1463
1464#ifndef sun4v
1465	sethi	%hi(ktsb_phys), %o4
1466	mov	ASI_N, %o3
1467	ld	[%o4 + %lo(ktsb_phys)], %o4
1468	movrnz	%o4, ASI_MEM, %o3
1469	mov	%o3, %asi
1470#endif /* !sun4v */
1471	mov	%o0, %g1			! %g1 = vaddr
1472
1473	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1474	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1475	/* %g2 = tsbep, %g1 clobbered */
1476
1477	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1478	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1479	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, 1)
1480
1481	wrpr	%g0, %o5, %pstate		! enable interrupts
1482	retl
1483	  membar #StoreStore|#StoreLoad
1484	SET_SIZE(sfmmu_kpm_load_tsb)
1485
1486	/*
1487	 * Routine that shoots down a TTE in the kpm TSB or in the
1488	 * kernel TSB depending on virtpg. Locking is required since
1489	 * kpm/kernel TSB is shared among all CPUs.
1490	 */
1491	ENTRY_NP(sfmmu_kpm_unload_tsb)
1492	/*
1493	 * %o0 = vaddr
1494	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1495	 */
1496#ifndef sun4v
1497	sethi	%hi(ktsb_phys), %o4
1498	mov	ASI_N, %o3
1499	ld	[%o4 + %lo(ktsb_phys)], %o4
1500	movrnz	%o4, ASI_MEM, %o3
1501	mov	%o3, %asi
1502#endif /* !sun4v */
1503	mov	%o0, %g1			! %g1 = vaddr
1504
1505	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1506	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1507	/* %g2 = tsbep, %g1 clobbered */
1508
1509	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1510	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1511	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1512
1513	retl
1514	  membar	#StoreStore|#StoreLoad
1515	SET_SIZE(sfmmu_kpm_unload_tsb)
1516
1517#endif /* lint */
1518
1519
1520#if defined (lint)
1521
1522/*ARGSUSED*/
1523pfn_t
1524sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1525{ return(0); }
1526
1527#else /* lint */
1528
1529	ENTRY_NP(sfmmu_ttetopfn)
1530	ldx	[%o0], %g1			/* read tte */
1531	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1532	/*
1533	 * g1 = pfn
1534	 */
1535	retl
1536	mov	%g1, %o0
1537	SET_SIZE(sfmmu_ttetopfn)
1538
1539#endif /* !lint */
1540
1541
1542#if defined (lint)
1543/*
1544 * The sfmmu_hblk_hash_add is the assembly primitive for adding hmeblks to the
1545 * the hash list.
1546 */
1547/* ARGSUSED */
1548void
1549sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1550	uint64_t hblkpa)
1551{
1552}
1553
1554/*
1555 * The sfmmu_hblk_hash_rm is the assembly primitive to remove hmeblks from the
1556 * hash list.
1557 */
1558/* ARGSUSED */
1559void
1560sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1561	uint64_t hblkpa, struct hme_blk *prev_hblkp)
1562{
1563}
1564#else /* lint */
1565
1566/*
1567 * Functions to grab/release hme bucket list lock.  I only use a byte
1568 * instead of the whole int because eventually we might want to
1569 * put some counters on the other bytes (of course, these routines would
1570 * have to change).  The code that grab this lock should execute
1571 * with interrupts disabled and hold the lock for the least amount of time
1572 * possible.
1573 */
1574
1575/*
1576 * Even though hmeh_listlock is updated using pa there's no need to flush
1577 * dcache since hmeh_listlock will be restored to the original value (0)
1578 * before interrupts are reenabled.
1579 */
1580
1581/*
1582 * For sparcv9 hme hash buckets may not be in the nucleus.  hme hash update
1583 * routines still use virtual addresses to update the bucket fields. But they
1584 * must not cause a TLB miss after grabbing the low level bucket lock. To
1585 * achieve this we must make sure the bucket structure is completely within an
1586 * 8K page.
1587 */
1588
1589#if (HMEBUCK_SIZE & (HMEBUCK_SIZE - 1))
1590#error - the size of hmehash_bucket structure is not power of 2
1591#endif
1592
1593/*
1594 * Enable backoff to significantly reduce locking overhead and reduce a chance
1595 * of xcall timeout. This is only enabled for sun4v as a Makefile compile-
1596 * time option.
1597 * The rd %ccr is better for performance compared to a non pipeline releasing
1598 * tight spin on N2/VF.
1599 * Backoff based fix is a temporary solution and doesn't allow scaling above
1600 * lock saturation point. The final fix is to eliminate HMELOCK_ENTER()
1601 * to avoid xcall timeouts and improve GET_TTE() performance.
1602 */
1603
1604#ifdef HMELOCK_BACKOFF_ENABLE
1605
1606#define HMELOCK_BACKOFF(reg, val)                               \
1607	set     val, reg                                        ;\
1608	rd	%ccr, %g0                                       ;\
1609	brnz	reg, .-4                                        ;\
1610	dec	reg
1611
1612#define CAS_HME(tmp1, tmp2, exitlabel, asi)                     \
1613	mov     0xff, tmp2                                      ;\
1614	casa    [tmp1]asi, %g0, tmp2                            ;\
1615	brz,a,pt tmp2, exitlabel                                ;\
1616	membar  #LoadLoad
1617
1618#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label, asi)            \
1619	mov     0xff, tmp2                                      ;\
1620	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1621	casa    [tmp1]asi, %g0, tmp2                            ;\
1622	brz,a,pt tmp2, label/**/2                               ;\
1623	membar  #LoadLoad                                       ;\
1624	HMELOCK_BACKOFF(tmp2,0x8)                               ;\
1625	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1626	HMELOCK_BACKOFF(tmp2,0x10)                              ;\
1627	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1628	HMELOCK_BACKOFF(tmp2,0x20)                              ;\
1629	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1630	HMELOCK_BACKOFF(tmp2,0x40)                              ;\
1631	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1632	HMELOCK_BACKOFF(tmp2,0x80)                              ;\
1633	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1634label/**/1:                                                     ;\
1635	HMELOCK_BACKOFF(tmp2,0x100)                             ;\
1636	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1637	HMELOCK_BACKOFF(tmp2,0x200)                             ;\
1638	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1639	HMELOCK_BACKOFF(tmp2,0x400)                             ;\
1640	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1641	HMELOCK_BACKOFF(tmp2,0x800)                             ;\
1642	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1643	HMELOCK_BACKOFF(tmp2,0x1000)                            ;\
1644	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1645	HMELOCK_BACKOFF(tmp2,0x2000)                            ;\
1646	mov     0xff, tmp2                                      ;\
1647	casa    [tmp1]asi, %g0, tmp2                            ;\
1648	brnz,pn tmp2, label/**/1     /* reset backoff */        ;\
1649	membar  #LoadLoad                                       ;\
1650label/**/2:
1651
1652#else /* HMELOCK_BACKOFF_ENABLE */
1653
1654#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label1, asi)           \
1655	mov     0xff, tmp2                                      ;\
1656	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1657label1:                                                         ;\
1658	casa    [tmp1]asi, %g0, tmp2                            ;\
1659	brnz,pn tmp2, label1                                    ;\
1660	mov     0xff, tmp2                                      ;\
1661	membar  #LoadLoad
1662
1663#endif /* HMELOCK_BACKOFF_ENABLE */
1664
1665#define HMELOCK_EXIT(hmebp, tmp1, asi)                          \
1666	membar  #LoadStore|#StoreStore                          ;\
1667	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1668	sta     %g0, [tmp1]asi
1669
1670	.seg	".data"
1671hblk_add_panic1:
1672	.ascii	"sfmmu_hblk_hash_add: interrupts disabled"
1673	.byte	0
1674hblk_add_panic2:
1675	.ascii	"sfmmu_hblk_hash_add: va hmeblkp is NULL but pa is not"
1676	.byte	0
1677	.align	4
1678	.seg	".text"
1679
1680	ENTRY_NP(sfmmu_hblk_hash_add)
1681	/*
1682	 * %o0 = hmebp
1683	 * %o1 = hmeblkp
1684	 * %o2 = hblkpa
1685	 */
1686	rdpr	%pstate, %o5
1687#ifdef DEBUG
1688	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1689	bnz,pt %icc, 3f				/* disabled, panic	 */
1690	  nop
1691	save	%sp, -SA(MINFRAME), %sp
1692	sethi	%hi(hblk_add_panic1), %o0
1693	call	panic
1694	 or	%o0, %lo(hblk_add_panic1), %o0
1695	ret
1696	restore
1697
16983:
1699#endif /* DEBUG */
1700	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1701	mov	%o2, %g1
1702
1703	/*
1704	 * g1 = hblkpa
1705	 */
1706	ldn	[%o0 + HMEBUCK_HBLK], %o4	/* next hmeblk */
1707	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = next hblkpa */
1708#ifdef	DEBUG
1709	cmp	%o4, %g0
1710	bne,pt %xcc, 1f
1711	 nop
1712	brz,pt %g2, 1f
1713	 nop
1714	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1715	save	%sp, -SA(MINFRAME), %sp
1716	sethi	%hi(hblk_add_panic2), %o0
1717	call	panic
1718	  or	%o0, %lo(hblk_add_panic2), %o0
1719	ret
1720	restore
17211:
1722#endif /* DEBUG */
1723	/*
1724	 * We update hmeblks entries before grabbing lock because the stores
1725	 * could take a tlb miss and require the hash lock.  The buckets
1726	 * are part of the nucleus so we are cool with those stores.
1727	 *
1728	 * if buckets are not part of the nucleus our game is to
1729	 * not touch any other page via va until we drop the lock.
1730	 * This guarantees we won't get a tlb miss before the lock release
1731	 * since interrupts are disabled.
1732	 */
1733	stn	%o4, [%o1 + HMEBLK_NEXT]	/* update hmeblk's next */
1734	stx	%g2, [%o1 + HMEBLK_NEXTPA]	/* update hmeblk's next pa */
1735	HMELOCK_ENTER(%o0, %o2, %o3, hashadd1, ASI_N)
1736	stn	%o1, [%o0 + HMEBUCK_HBLK]	/* update bucket hblk next */
1737	stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* add hmeblk to list */
1738	HMELOCK_EXIT(%o0, %g2, ASI_N)
1739	retl
1740	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1741	SET_SIZE(sfmmu_hblk_hash_add)
1742
1743	ENTRY_NP(sfmmu_hblk_hash_rm)
1744	/*
1745	 * This function removes an hmeblk from the hash chain.
1746	 * It is written to guarantee we don't take a tlb miss
1747	 * by using physical addresses to update the list.
1748	 *
1749	 * %o0 = hmebp
1750	 * %o1 = hmeblkp
1751	 * %o2 = hmeblkp previous pa
1752	 * %o3 = hmeblkp previous
1753	 */
1754
1755	mov	%o3, %o4			/* o4 = hmeblkp previous */
1756
1757	rdpr	%pstate, %o5
1758#ifdef DEBUG
1759	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l4, %g1)
1760#endif /* DEBUG */
1761	/*
1762	 * disable interrupts, clear Address Mask to access 64 bit physaddr
1763	 */
1764	andn    %o5, PSTATE_IE, %g1
1765	wrpr    %g1, 0, %pstate
1766
1767#ifndef sun4v
1768	sethi   %hi(dcache_line_mask), %g4
1769	ld      [%g4 + %lo(dcache_line_mask)], %g4
1770#endif /* sun4v */
1771
1772	/*
1773	 * if buckets are not part of the nucleus our game is to
1774	 * not touch any other page via va until we drop the lock.
1775	 * This guarantees we won't get a tlb miss before the lock release
1776	 * since interrupts are disabled.
1777	 */
1778	HMELOCK_ENTER(%o0, %g1, %g3, hashrm1, ASI_N)
1779	ldn	[%o0 + HMEBUCK_HBLK], %g2	/* first hmeblk in list */
1780	cmp	%g2, %o1
1781	bne,pt	%ncc,1f
1782	 mov	ASI_MEM, %asi
1783	/*
1784	 * hmeblk is first on list
1785	 */
1786	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = hmeblk pa */
1787	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1788	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1789	stn	%o3, [%o0 + HMEBUCK_HBLK]	/* write va */
1790	ba,pt	%xcc, 2f
1791	  stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* write pa */
17921:
1793	/* hmeblk is not first on list */
1794
1795	mov	%o2, %g3
1796#ifndef sun4v
1797	GET_CPU_IMPL(%g2)
1798	cmp 	%g2, CHEETAH_IMPL
1799	bge,a,pt %icc, hblk_hash_rm_1
1800	  and	%o4, %g4, %g2
1801	cmp	%g2, SPITFIRE_IMPL
1802	blt	%icc, hblk_hash_rm_2		/* no flushing needed for OPL */
1803	  and	%o4, %g4, %g2
1804	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev pa from dcache */
1805	add	%o4, HMEBLK_NEXT, %o4
1806	and	%o4, %g4, %g2
1807	ba	hblk_hash_rm_2
1808	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev va from dcache */
1809hblk_hash_rm_1:
1810
1811	stxa	%g0, [%g3]ASI_DC_INVAL		/* flush prev pa from dcache */
1812	membar	#Sync
1813	add     %g3, HMEBLK_NEXT, %g2
1814	stxa	%g0, [%g2]ASI_DC_INVAL		/* flush prev va from dcache */
1815hblk_hash_rm_2:
1816	membar	#Sync
1817#endif /* sun4v */
1818	ldxa	[%g3 + HMEBLK_NEXTPA] %asi, %g2	/* g2 = hmeblk pa */
1819	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1820	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1821	stna	%o3, [%g3 + HMEBLK_NEXT] %asi	/* write va */
1822	stxa	%g1, [%g3 + HMEBLK_NEXTPA] %asi	/* write pa */
18232:
1824	HMELOCK_EXIT(%o0, %g2, ASI_N)
1825	retl
1826	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1827	SET_SIZE(sfmmu_hblk_hash_rm)
1828
1829#endif /* lint */
1830
1831/*
1832 * These macros are used to update global sfmmu hme hash statistics
1833 * in perf critical paths. It is only enabled in debug kernels or
1834 * if SFMMU_STAT_GATHER is defined
1835 */
1836#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1837#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1838	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1839	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1840	cmp	tmp1, hatid						;\
1841	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1842	set	sfmmu_global_stat, tmp1					;\
1843	add	tmp1, tmp2, tmp1					;\
1844	ld	[tmp1], tmp2						;\
1845	inc	tmp2							;\
1846	st	tmp2, [tmp1]
1847
1848#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1849	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1850	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1851	cmp	tmp1, hatid						;\
1852	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1853	set	sfmmu_global_stat, tmp1					;\
1854	add	tmp1, tmp2, tmp1					;\
1855	ld	[tmp1], tmp2						;\
1856	inc	tmp2							;\
1857	st	tmp2, [tmp1]
1858
1859
1860#else /* DEBUG || SFMMU_STAT_GATHER */
1861
1862#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1863
1864#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1865
1866#endif  /* DEBUG || SFMMU_STAT_GATHER */
1867
1868/*
1869 * This macro is used to update global sfmmu kstas in non
1870 * perf critical areas so they are enabled all the time
1871 */
1872#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1873	sethi	%hi(sfmmu_global_stat), tmp1				;\
1874	add	tmp1, statname, tmp1					;\
1875	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1876	inc	tmp2							;\
1877	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1878
1879/*
1880 * These macros are used to update per cpu stats in non perf
1881 * critical areas so they are enabled all the time
1882 */
1883#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1884	ld	[tsbarea + stat], tmp1					;\
1885	inc	tmp1							;\
1886	st	tmp1, [tsbarea + stat]
1887
1888/*
1889 * These macros are used to update per cpu stats in non perf
1890 * critical areas so they are enabled all the time
1891 */
1892#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1893	lduh	[tsbarea + stat], tmp1					;\
1894	inc	tmp1							;\
1895	stuh	tmp1, [tsbarea + stat]
1896
1897#if defined(KPM_TLBMISS_STATS_GATHER)
1898	/*
1899	 * Count kpm dtlb misses separately to allow a different
1900	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1901	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1902	 */
1903#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1904	brgez	tagacc, label	/* KPM VA? */				;\
1905	nop								;\
1906	CPU_INDEX(tmp1, tsbma)						;\
1907	sethi	%hi(kpmtsbm_area), tsbma				;\
1908	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1909	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1910	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1911	/* VA range check */						;\
1912	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1913	cmp	tagacc, val						;\
1914	blu,pn	%xcc, label						;\
1915	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1916	cmp	tagacc, tmp1						;\
1917	bgeu,pn	%xcc, label						;\
1918	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1919	inc	val							;\
1920	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1921label:
1922#else
1923#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1924#endif	/* KPM_TLBMISS_STATS_GATHER */
1925
1926#if defined (lint)
1927/*
1928 * The following routines are jumped to from the mmu trap handlers to do
1929 * the setting up to call systrap.  They are separate routines instead of
1930 * being part of the handlers because the handlers would exceed 32
1931 * instructions and since this is part of the slow path the jump
1932 * cost is irrelevant.
1933 */
1934void
1935sfmmu_pagefault(void)
1936{
1937}
1938
1939void
1940sfmmu_mmu_trap(void)
1941{
1942}
1943
1944void
1945sfmmu_window_trap(void)
1946{
1947}
1948
1949void
1950sfmmu_kpm_exception(void)
1951{
1952}
1953
1954#else /* lint */
1955
1956#ifdef	PTL1_PANIC_DEBUG
1957	.seg	".data"
1958	.global	test_ptl1_panic
1959test_ptl1_panic:
1960	.word	0
1961	.align	8
1962
1963	.seg	".text"
1964	.align	4
1965#endif	/* PTL1_PANIC_DEBUG */
1966
1967
1968	ENTRY_NP(sfmmu_pagefault)
1969	SET_GL_REG(1)
1970	USE_ALTERNATE_GLOBALS(%g5)
1971	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1972	rdpr	%tt, %g6
1973	cmp	%g6, FAST_IMMU_MISS_TT
1974	be,a,pn	%icc, 1f
1975	  mov	T_INSTR_MMU_MISS, %g3
1976	cmp	%g6, T_INSTR_MMU_MISS
1977	be,a,pn	%icc, 1f
1978	  mov	T_INSTR_MMU_MISS, %g3
1979	mov	%g5, %g2
1980	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1981	cmp	%g6, FAST_DMMU_MISS_TT
1982	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1983	cmp	%g6, T_DATA_MMU_MISS
1984	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1985
1986#ifdef  PTL1_PANIC_DEBUG
1987	/* check if we want to test the tl1 panic */
1988	sethi	%hi(test_ptl1_panic), %g4
1989	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1990	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1991	cmp	%g1, %g0
1992	bne,a,pn %icc, ptl1_panic
1993	  or	%g0, PTL1_BAD_DEBUG, %g1
1994#endif	/* PTL1_PANIC_DEBUG */
19951:
1996	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1997	/*
1998	 * g2 = tag access reg
1999	 * g3.l = type
2000	 * g3.h = 0
2001	 */
2002	sethi	%hi(trap), %g1
2003	or	%g1, %lo(trap), %g1
20042:
2005	ba,pt	%xcc, sys_trap
2006	  mov	-1, %g4
2007	SET_SIZE(sfmmu_pagefault)
2008
2009	ENTRY_NP(sfmmu_mmu_trap)
2010	SET_GL_REG(1)
2011	USE_ALTERNATE_GLOBALS(%g5)
2012	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
2013	rdpr	%tt, %g6
2014	cmp	%g6, FAST_IMMU_MISS_TT
2015	be,a,pn	%icc, 1f
2016	  mov	T_INSTR_MMU_MISS, %g3
2017	cmp	%g6, T_INSTR_MMU_MISS
2018	be,a,pn	%icc, 1f
2019	  mov	T_INSTR_MMU_MISS, %g3
2020	mov	%g5, %g2
2021	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
2022	cmp	%g6, FAST_DMMU_MISS_TT
2023	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
2024	cmp	%g6, T_DATA_MMU_MISS
2025	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
20261:
2027	/*
2028	 * g2 = tag access reg
2029	 * g3 = type
2030	 */
2031	sethi	%hi(sfmmu_tsbmiss_exception), %g1
2032	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
2033	ba,pt	%xcc, sys_trap
2034	  mov	-1, %g4
2035	/*NOTREACHED*/
2036	SET_SIZE(sfmmu_mmu_trap)
2037
2038	ENTRY_NP(sfmmu_suspend_tl)
2039	SET_GL_REG(1)
2040	USE_ALTERNATE_GLOBALS(%g5)
2041	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
2042	rdpr	%tt, %g6
2043	cmp	%g6, FAST_IMMU_MISS_TT
2044	be,a,pn	%icc, 1f
2045	  mov	T_INSTR_MMU_MISS, %g3
2046	mov	%g5, %g2
2047	cmp	%g6, FAST_DMMU_MISS_TT
2048	move	%icc, T_DATA_MMU_MISS, %g3
2049	movne	%icc, T_DATA_PROT, %g3
20501:
2051	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
2052	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
2053	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
2054	ba,pt	%xcc, sys_trap
2055	  mov	PIL_15, %g4
2056	/*NOTREACHED*/
2057	SET_SIZE(sfmmu_suspend_tl)
2058
2059	/*
2060	 * No %g registers in use at this point.
2061	 */
2062	ENTRY_NP(sfmmu_window_trap)
2063	rdpr	%tpc, %g1
2064#ifdef sun4v
2065#ifdef DEBUG
2066	/* We assume previous %gl was 1 */
2067	rdpr	%tstate, %g4
2068	srlx	%g4, TSTATE_GL_SHIFT, %g4
2069	and	%g4, TSTATE_GL_MASK, %g4
2070	cmp	%g4, 1
2071	bne,a,pn %icc, ptl1_panic
2072	  mov	PTL1_BAD_WTRAP, %g1
2073#endif /* DEBUG */
2074	/* user miss at tl>1. better be the window handler or user_rtt */
2075	/* in user_rtt? */
2076	set	rtt_fill_start, %g4
2077	cmp	%g1, %g4
2078	blu,pn %xcc, 6f
2079	 .empty
2080	set	rtt_fill_end, %g4
2081	cmp	%g1, %g4
2082	bgeu,pn %xcc, 6f
2083	 nop
2084	set	fault_rtt_fn1, %g1
2085	wrpr	%g0, %g1, %tnpc
2086	ba,a	7f
20876:
2088	! must save this trap level before descending trap stack
2089	! no need to save %tnpc, either overwritten or discarded
2090	! already got it: rdpr	%tpc, %g1
2091	rdpr	%tstate, %g6
2092	rdpr	%tt, %g7
2093	! trap level saved, go get underlying trap type
2094	rdpr	%tl, %g5
2095	sub	%g5, 1, %g3
2096	wrpr	%g3, %tl
2097	rdpr	%tt, %g2
2098	wrpr	%g5, %tl
2099	! restore saved trap level
2100	wrpr	%g1, %tpc
2101	wrpr	%g6, %tstate
2102	wrpr	%g7, %tt
2103#else /* sun4v */
2104	/* user miss at tl>1. better be the window handler */
2105	rdpr	%tl, %g5
2106	sub	%g5, 1, %g3
2107	wrpr	%g3, %tl
2108	rdpr	%tt, %g2
2109	wrpr	%g5, %tl
2110#endif /* sun4v */
2111	and	%g2, WTRAP_TTMASK, %g4
2112	cmp	%g4, WTRAP_TYPE
2113	bne,pn	%xcc, 1f
2114	 nop
2115	/* tpc should be in the trap table */
2116	set	trap_table, %g4
2117	cmp	%g1, %g4
2118	blt,pn %xcc, 1f
2119	 .empty
2120	set	etrap_table, %g4
2121	cmp	%g1, %g4
2122	bge,pn %xcc, 1f
2123	 .empty
2124	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
2125	add	%g1, WTRAP_FAULTOFF, %g1
2126	wrpr	%g0, %g1, %tnpc
21277:
2128	/*
2129	 * some wbuf handlers will call systrap to resolve the fault
2130	 * we pass the trap type so they figure out the correct parameters.
2131	 * g5 = trap type, g6 = tag access reg
2132	 */
2133
2134	/*
2135	 * only use g5, g6, g7 registers after we have switched to alternate
2136	 * globals.
2137	 */
2138	SET_GL_REG(1)
2139	USE_ALTERNATE_GLOBALS(%g5)
2140	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
2141	rdpr	%tt, %g7
2142	cmp	%g7, FAST_IMMU_MISS_TT
2143	be,a,pn	%icc, ptl1_panic
2144	  mov	PTL1_BAD_WTRAP, %g1
2145	cmp	%g7, T_INSTR_MMU_MISS
2146	be,a,pn	%icc, ptl1_panic
2147	  mov	PTL1_BAD_WTRAP, %g1
2148	mov	T_DATA_PROT, %g5
2149	cmp	%g7, FAST_DMMU_MISS_TT
2150	move	%icc, T_DATA_MMU_MISS, %g5
2151	cmp	%g7, T_DATA_MMU_MISS
2152	move	%icc, T_DATA_MMU_MISS, %g5
2153	! XXXQ AGS re-check out this one
2154	done
21551:
2156	CPU_PADDR(%g1, %g4)
2157	add	%g1, CPU_TL1_HDLR, %g1
2158	lda	[%g1]ASI_MEM, %g4
2159	brnz,a,pt %g4, sfmmu_mmu_trap
2160	  sta	%g0, [%g1]ASI_MEM
2161	ba,pt	%icc, ptl1_panic
2162	  mov	PTL1_BAD_TRAP, %g1
2163	SET_SIZE(sfmmu_window_trap)
2164
2165	ENTRY_NP(sfmmu_kpm_exception)
2166	/*
2167	 * We have accessed an unmapped segkpm address or a legal segkpm
2168	 * address which is involved in a VAC alias conflict prevention.
2169	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
2170	 * set. If it is, we will instead note that a fault has occurred
2171	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
2172	 * a "retry"). This will step over the faulting instruction.
2173	 * Note that this means that a legal segkpm address involved in
2174	 * a VAC alias conflict prevention (a rare case to begin with)
2175	 * cannot be used in DTrace.
2176	 */
2177	CPU_INDEX(%g1, %g2)
2178	set	cpu_core, %g2
2179	sllx	%g1, CPU_CORE_SHIFT, %g1
2180	add	%g1, %g2, %g1
2181	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
2182	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
2183	bz	0f
2184	or	%g2, CPU_DTRACE_BADADDR, %g2
2185	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
2186	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
2187	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
2188	done
21890:
2190	TSTAT_CHECK_TL1(1f, %g1, %g2)
21911:
2192	SET_GL_REG(1)
2193	USE_ALTERNATE_GLOBALS(%g5)
2194	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
2195	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
2196	/*
2197	 * g2=tagacc g3.l=type g3.h=0
2198	 */
2199	sethi	%hi(trap), %g1
2200	or	%g1, %lo(trap), %g1
2201	ba,pt	%xcc, sys_trap
2202	mov	-1, %g4
2203	SET_SIZE(sfmmu_kpm_exception)
2204
2205#endif /* lint */
2206
2207#if defined (lint)
2208
2209void
2210sfmmu_tsb_miss(void)
2211{
2212}
2213
2214void
2215sfmmu_kpm_dtsb_miss(void)
2216{
2217}
2218
2219void
2220sfmmu_kpm_dtsb_miss_small(void)
2221{
2222}
2223
2224#else /* lint */
2225
2226#if (IMAP_SEG != 0)
2227#error - ism_map->ism_seg offset is not zero
2228#endif
2229
2230/*
2231 * Copies ism mapping for this ctx in param "ism" if this is a ISM
2232 * tlb miss and branches to label "ismhit". If this is not an ISM
2233 * process or an ISM tlb miss it falls thru.
2234 *
2235 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
2236 * this process.
2237 * If so, it will branch to label "ismhit".  If not, it will fall through.
2238 *
2239 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
2240 * so that any other threads of this process will not try and walk the ism
2241 * maps while they are being changed.
2242 *
2243 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
2244 *       will make sure of that. This means we can terminate our search on
2245 *       the first zero mapping we find.
2246 *
2247 * Parameters:
2248 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
2249 * tsbmiss	= address of tsb miss area (in)
2250 * ismseg	= contents of ism_seg for this ism map (out)
2251 * ismhat	= physical address of imap_ismhat for this ism map (out)
2252 * tmp1		= scratch reg (CLOBBERED)
2253 * tmp2		= scratch reg (CLOBBERED)
2254 * tmp3		= scratch reg (CLOBBERED)
2255 * label:    temporary labels
2256 * ismhit:   label where to jump to if an ism dtlb miss
2257 * exitlabel:label where to jump if hat is busy due to hat_unshare.
2258 */
2259#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
2260	label, ismhit)							\
2261	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
2262	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
2263	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
2264label/**/1:								;\
2265	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
2266	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
2267label/**/2:								;\
2268	brz,pt  ismseg, label/**/3		/* no mapping */	;\
2269	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
2270	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
2271	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
2272	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
2273	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
2274	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
2275	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
2276	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
2277	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
2278	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
2279	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
2280									;\
2281	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
2282	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
2283	cmp	ismhat, tmp1						;\
2284	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
2285	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
2286									;\
2287	add	tmp3, IBLK_NEXTPA, tmp1					;\
2288	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
2289	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
2290	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
2291label/**/3:
2292
2293/*
2294 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
2295 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
2296 * Parameters:
2297 * tagacc = reg containing virtual address
2298 * hatid = reg containing sfmmu pointer
2299 * hmeshift = constant/register to shift vaddr to obtain vapg
2300 * hmebp = register where bucket pointer will be stored
2301 * vapg = register where virtual page will be stored
2302 * tmp1, tmp2 = tmp registers
2303 */
2304
2305
2306#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
2307	vapg, label, tmp1, tmp2)					\
2308	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
2309	brnz,a,pt tmp1, label/**/1					;\
2310	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
2311	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
2312	ba,pt	%xcc, label/**/2					;\
2313	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
2314label/**/1:								;\
2315	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
2316label/**/2:								;\
2317	srlx	tagacc, hmeshift, vapg					;\
2318	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
2319	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
2320	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
2321	add	hmebp, tmp1, hmebp
2322
2323/*
2324 * hashtag includes bspage + hashno (64 bits).
2325 */
2326
2327#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
2328	sllx	vapg, hmeshift, vapg					;\
2329	mov	hashno, hblktag						;\
2330	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
2331	or	vapg, hblktag, hblktag
2332
2333/*
2334 * Function to traverse hmeblk hash link list and find corresponding match.
2335 * The search is done using physical pointers. It returns the physical address
2336 * and virtual address pointers to the hmeblk that matches with the tag
2337 * provided.
2338 * Parameters:
2339 * hmebp	= register that points to hme hash bucket, also used as
2340 *		  tmp reg (clobbered)
2341 * hmeblktag	= register with hmeblk tag match
2342 * hatid	= register with hatid
2343 * hmeblkpa	= register where physical ptr will be stored
2344 * hmeblkva	= register where virtual ptr will be stored
2345 * tmp1		= tmp reg
2346 * label: temporary label
2347 */
2348
2349#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, hmeblkva,	\
2350	tsbarea, tmp1, label)					 	\
2351	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
2352	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2353	add     hmebp, HMEBUCK_HBLK, hmeblkva				;\
2354	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
2355	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2356label/**/1:								;\
2357	brz,pn	hmeblkva, label/**/2					;\
2358	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2359	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
2360	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2361	add	hmebp, CLONGSIZE, hmebp					;\
2362	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
2363	xor	tmp1, hmeblktag, tmp1					;\
2364	xor	hmebp, hatid, hmebp					;\
2365	or	hmebp, tmp1, hmebp					;\
2366	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
2367	  add	hmeblkpa, HMEBLK_NEXT, hmebp				;\
2368	ldna	[hmebp]ASI_MEM, hmeblkva	/* hmeblk ptr va */	;\
2369	add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
2370	ba,pt	%xcc, label/**/1					;\
2371	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
2372label/**/2:
2373
2374/*
2375 * Function to traverse hmeblk hash link list and find corresponding match.
2376 * The search is done using physical pointers. It returns the physical address
2377 * and virtual address pointers to the hmeblk that matches with the tag
2378 * provided.
2379 * Parameters:
2380 * hmeblktag	= register with hmeblk tag match (rid field is 0)
2381 * hatid	= register with hatid (pointer to SRD)
2382 * hmeblkpa	= register where physical ptr will be stored
2383 * hmeblkva	= register where virtual ptr will be stored
2384 * tmp1		= tmp reg
2385 * tmp2		= tmp reg
2386 * label: temporary label
2387 */
2388
2389#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, hmeblkva,	\
2390	tsbarea, tmp1, tmp2, label)			 		\
2391label/**/1:								;\
2392	brz,pn	hmeblkva, label/**/4					;\
2393	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
2394	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
2395	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2396	add	tmp2, CLONGSIZE, tmp2					;\
2397	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
2398	xor	tmp1, hmeblktag, tmp1					;\
2399	xor	tmp2, hatid, tmp2					;\
2400	brz,pn	tmp2, label/**/3	/* branch on hit */		;\
2401	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2402label/**/2:								;\
2403	ldna	[tmp2]ASI_MEM, hmeblkva	/* hmeblk ptr va */		;\
2404	add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2405	ba,pt	%xcc, label/**/1					;\
2406	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
2407label/**/3:								;\
2408	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
2409	bgeu,pt	%xcc, label/**/2					;\
2410	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2411	and	tmp1, BT_ULMASK, tmp2					;\
2412	srlx	tmp1, BT_ULSHIFT, tmp1					;\
2413	sllx	tmp1, CLONGSHIFT, tmp1					;\
2414	add	tsbarea, tmp1, tmp1					;\
2415	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
2416	srlx	tmp1, tmp2, tmp1					;\
2417	btst	0x1, tmp1						;\
2418	bz,pn	%xcc, label/**/2					;\
2419	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2420label/**/4:
2421
2422#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2423#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2424#endif
2425
2426/*
2427 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2428 * he offset for the corresponding hment.
2429 * Parameters:
2430 * In:
2431 *	vaddr = register with virtual address
2432 *	hmeblkpa = physical pointer to hme_blk
2433 * Out:
2434 *	hmentoff = register where hment offset will be stored
2435 *	hmemisc = hblk_misc
2436 * Scratch:
2437 *	tmp1
2438 */
2439#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
2440	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2441	lda	[hmentoff]ASI_MEM, hmemisc 				;\
2442	andcc	hmemisc, HBLK_SZMASK, %g0				;\
2443	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2444	  or	%g0, HMEBLK_HME1, hmentoff				;\
2445	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2446	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2447	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2448	add	tmp1, HMEBLK_HME1, hmentoff				;\
2449label1:
2450
2451/*
2452 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2453 *
2454 * tagacc	= (pseudo-)tag access register (in)
2455 * hatid	= sfmmu pointer for TSB miss (in)
2456 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2457 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2458 * hmeblkva	= VA of hment if found, otherwise clobbered (out)
2459 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2460 * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
2461 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2462 *		  for this page size.
2463 * hashno	= constant/register hash number
2464 * label	= temporary label for branching within macro.
2465 * foundlabel	= label to jump to when tte is found.
2466 * suspendlabel= label to jump to when tte is suspended.
2467 * exitlabel	= label to jump to when tte is not found.
2468 *
2469 */
2470#define GET_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea, hmemisc, \
2471		hmeshift, hashno, label, foundlabel, suspendlabel, exitlabel) \
2472									;\
2473	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2474	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2475	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2476		hmeblkpa, label/**/5, hmemisc, hmeblkva)		;\
2477									;\
2478	/*								;\
2479	 * tagacc = tagacc						;\
2480	 * hatid = hatid						;\
2481	 * tsbarea = tsbarea						;\
2482	 * tte   = hmebp (hme bucket pointer)				;\
2483	 * hmeblkpa  = vapg  (virtual page)				;\
2484	 * hmemisc, hmeblkva = scratch					;\
2485	 */								;\
2486	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2487	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
2488									;\
2489	/*								;\
2490	 * tagacc = tagacc						;\
2491	 * hatid = hatid						;\
2492	 * tte   = hmebp						;\
2493	 * hmeblkpa  = CLOBBERED					;\
2494	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
2495	 * hmeblkva  = scratch						;\
2496	 */								;\
2497	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2498	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2499	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, hmeblkva, 	\
2500		tsbarea, tagacc, label/**/1)				;\
2501	/*								;\
2502	 * tagacc = CLOBBERED						;\
2503	 * tte = CLOBBERED						;\
2504	 * hmeblkpa = hmeblkpa						;\
2505	 * hmeblkva = hmeblkva						;\
2506	 */								;\
2507	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2508	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2509	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2510	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2511	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2512	  nop								;\
2513label/**/4:								;\
2514	/*								;\
2515	 * We have found the hmeblk containing the hment.		;\
2516	 * Now we calculate the corresponding tte.			;\
2517	 *								;\
2518	 * tagacc = tagacc						;\
2519	 * hatid = hatid						;\
2520	 * tte   = clobbered						;\
2521	 * hmeblkpa  = hmeblkpa						;\
2522	 * hmemisc  = hblktag						;\
2523	 * hmeblkva  = hmeblkva 					;\
2524	 */								;\
2525	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2526		label/**/2)						;\
2527									;\
2528	/*								;\
2529	 * tagacc = tagacc						;\
2530	 * hatid = hmentoff						;\
2531	 * tte   = clobbered						;\
2532	 * hmeblkpa  = hmeblkpa						;\
2533	 * hmemisc  = hblk_misc						;\
2534	 * hmeblkva  = hmeblkva 					;\
2535	 */								;\
2536									;\
2537	add	hatid, SFHME_TTE, hatid					;\
2538	add	hmeblkpa, hatid, hmeblkpa				;\
2539	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2540	add	hmeblkva, hatid, hmeblkva				;\
2541	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2542	HMELOCK_EXIT(hatid, hatid, ASI_MEM)	/* drop lock */		;\
2543	set	TTE_SUSPEND, hatid					;\
2544	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2545	btst	tte, hatid						;\
2546	bz,pt	%xcc, foundlabel					;\
2547	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2548									;\
2549	/*								;\
2550	 * Mapping is suspended, so goto suspend label.			;\
2551	 */								;\
2552	ba,pt	%xcc, suspendlabel					;\
2553	  nop
2554
2555/*
2556 * GET_SHME_TTE is similar to GET_TTE() except it searches
2557 * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
2558 * If valid tte is found, hmemisc = shctx flag, i.e., shme is
2559 * either 0 (not part of scd) or 1 (part of scd).
2560 */
2561#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea,	\
2562		hmemisc, hmeshift, hashno, label, foundlabel,		\
2563		suspendlabel, exitlabel)				\
2564									;\
2565	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2566	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2567	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2568		hmeblkpa, label/**/5, hmemisc, hmeblkva)		;\
2569									;\
2570	/*								;\
2571	 * tagacc = tagacc						;\
2572	 * hatid = hatid						;\
2573	 * tsbarea = tsbarea						;\
2574	 * tte   = hmebp (hme bucket pointer)				;\
2575	 * hmeblkpa  = vapg  (virtual page)				;\
2576	 * hmemisc, hmeblkva = scratch					;\
2577	 */								;\
2578	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2579									;\
2580	/*								;\
2581	 * tagacc = tagacc						;\
2582	 * hatid = hatid						;\
2583	 * tsbarea = tsbarea						;\
2584	 * tte   = hmebp						;\
2585	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
2586	 * hmeblkpa  = CLOBBERED					;\
2587	 * hmeblkva  = scratch						;\
2588	 */								;\
2589	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2590	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2591									;\
2592	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
2593	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2594	add     tte, HMEBUCK_HBLK, hmeblkva				;\
2595	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
2596	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
2597									;\
2598label/**/8:								;\
2599	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa, hmeblkva, 	\
2600		tsbarea, tagacc, tte, label/**/1)			;\
2601	/*								;\
2602	 * tagacc = CLOBBERED						;\
2603	 * tte = CLOBBERED						;\
2604	 * hmeblkpa = hmeblkpa						;\
2605	 * hmeblkva = hmeblkva						;\
2606	 */								;\
2607	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2608	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2609	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2610	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2611	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2612	  nop								;\
2613label/**/4:								;\
2614	/*								;\
2615	 * We have found the hmeblk containing the hment.		;\
2616	 * Now we calculate the corresponding tte.			;\
2617	 *								;\
2618	 * tagacc = tagacc						;\
2619	 * hatid = hatid						;\
2620	 * tte   = clobbered						;\
2621	 * hmeblkpa  = hmeblkpa						;\
2622	 * hmemisc  = hblktag						;\
2623	 * hmeblkva  = hmeblkva 					;\
2624	 * tsbarea = tsbmiss area					;\
2625	 */								;\
2626	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2627		label/**/2)						;\
2628									;\
2629	/*								;\
2630	 * tagacc = tagacc						;\
2631	 * hatid = hmentoff						;\
2632	 * tte = clobbered						;\
2633	 * hmeblkpa  = hmeblkpa						;\
2634	 * hmemisc  = hblk_misc						;\
2635	 * hmeblkva  = hmeblkva						;\
2636	 * tsbarea = tsbmiss area					;\
2637	 */								;\
2638									;\
2639	add	hatid, SFHME_TTE, hatid					;\
2640	add	hmeblkpa, hatid, hmeblkpa				;\
2641	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2642	brlz,pt tte, label/**/6						;\
2643	  add	hmeblkva, hatid, hmeblkva				;\
2644	btst	HBLK_SZMASK, hmemisc					;\
2645	bnz,a,pt %icc, label/**/7					;\
2646	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2647									;\
2648	/*								;\
2649 	 * We found an invalid 8K tte in shme.				;\
2650	 * it may not belong to shme's region since			;\
2651	 * region size/alignment granularity is 8K but different	;\
2652	 * regions don't share hmeblks. Continue the search.		;\
2653	 */								;\
2654	sub	hmeblkpa, hatid, hmeblkpa				;\
2655	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2656	srlx	tagacc, hmeshift, tte					;\
2657	add	hmeblkpa, HMEBLK_NEXT, hmeblkva				;\
2658	ldxa	[hmeblkva]ASI_MEM, hmeblkva				;\
2659	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
2660	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
2661	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
2662	ba,a,pt	%xcc, label/**/8					;\
2663label/**/6:								;\
2664	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
2665	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2666label/**/7:								;\
2667	HMELOCK_EXIT(hatid, hatid, ASI_MEM)	/* drop lock */		;\
2668	set	TTE_SUSPEND, hatid					;\
2669	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2670	btst	tte, hatid						;\
2671	bz,pt	%xcc, foundlabel					;\
2672	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2673									;\
2674	/*								;\
2675	 * Mapping is suspended, so goto suspend label.			;\
2676	 */								;\
2677	ba,pt	%xcc, suspendlabel					;\
2678	  nop
2679
2680	/*
2681	 * KERNEL PROTECTION HANDLER
2682	 *
2683	 * g1 = tsb8k pointer register (clobbered)
2684	 * g2 = tag access register (ro)
2685	 * g3 - g7 = scratch registers
2686	 *
2687	 * Note: This function is patched at runtime for performance reasons.
2688	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2689	 */
2690	ENTRY_NP(sfmmu_kprot_trap)
2691	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2692sfmmu_kprot_patch_ktsb_base:
2693	RUNTIME_PATCH_SETX(%g1, %g6)
2694	/* %g1 = contents of ktsb_base or ktsb_pbase */
2695sfmmu_kprot_patch_ktsb_szcode:
2696	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2697
2698	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2699	! %g1 = First TSB entry pointer, as TSB miss handler expects
2700
2701	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2702sfmmu_kprot_patch_ktsb4m_base:
2703	RUNTIME_PATCH_SETX(%g3, %g6)
2704	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2705sfmmu_kprot_patch_ktsb4m_szcode:
2706	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2707
2708	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2709	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2710
2711        CPU_TSBMISS_AREA(%g6, %g7)
2712        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2713	ba,pt	%xcc, sfmmu_tsb_miss_tt
2714	  nop
2715
2716	/*
2717	 * USER PROTECTION HANDLER
2718	 *
2719	 * g1 = tsb8k pointer register (ro)
2720	 * g2 = tag access register (ro)
2721	 * g3 = faulting context (clobbered, currently not used)
2722	 * g4 - g7 = scratch registers
2723	 */
2724	ALTENTRY(sfmmu_uprot_trap)
2725#ifdef sun4v
2726	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2727	/* %g1 = first TSB entry ptr now, %g2 preserved */
2728
2729	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2730	brlz,pt %g3, 9f				/* check for 2nd TSB */
2731	  nop
2732
2733	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2734	/* %g3 = second TSB entry ptr now, %g2 preserved */
2735
2736#else /* sun4v */
2737#ifdef UTSB_PHYS
2738	/* g1 = first TSB entry ptr */
2739	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2740	brlz,pt %g3, 9f			/* check for 2nd TSB */
2741	  nop
2742
2743	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2744	/* %g3 = second TSB entry ptr now, %g2 preserved */
2745#else /* UTSB_PHYS */
2746	brgez,pt %g1, 9f		/* check for 2nd TSB */
2747	  mov	-1, %g3			/* set second tsbe ptr to -1 */
2748
2749	mov	%g2, %g7
2750	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2751	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2752	mov	%g1, %g7
2753	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2754#endif /* UTSB_PHYS */
2755#endif /* sun4v */
27569:
2757	CPU_TSBMISS_AREA(%g6, %g7)
2758	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2759	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2760	  nop
2761
2762	/*
2763	 * Kernel 8K page iTLB miss.  We also get here if we took a
2764	 * fast instruction access mmu miss trap while running in
2765	 * invalid context.
2766	 *
2767	 * %g1 = 8K TSB pointer register (not used, clobbered)
2768	 * %g2 = tag access register (used)
2769	 * %g3 = faulting context id (used)
2770	 * %g7 = TSB tag to match (used)
2771	 */
2772	.align	64
2773	ALTENTRY(sfmmu_kitlb_miss)
2774	brnz,pn %g3, tsb_tl0_noctxt
2775	  nop
2776
2777	/* kernel miss */
2778	/* get kernel tsb pointer */
2779	/* we patch the next set of instructions at run time */
2780	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2781iktsbbase:
2782	RUNTIME_PATCH_SETX(%g4, %g5)
2783	/* %g4 = contents of ktsb_base or ktsb_pbase */
2784
2785iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2786	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2787	or	%g4, %g1, %g1			! form tsb ptr
2788	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2789	cmp	%g4, %g7
2790	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
2791	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
2792
2793	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2794	bz,pn	%icc, exec_fault
2795	  nop
2796	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2797	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2798	retry
2799
2800iktsb4mbase:
2801        RUNTIME_PATCH_SETX(%g4, %g6)
2802        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
2803iktsb4m:
2804	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2805        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2806	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
2807	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2808	cmp	%g4, %g7
2809	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2810	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2811	bz,pn	%icc, exec_fault
2812	  nop
2813	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2814	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2815	retry
2816
2817	/*
2818	 * Kernel dTLB miss.  We also get here if we took a fast data
2819	 * access mmu miss trap while running in invalid context.
2820	 *
2821	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2822	 *	We select the TSB miss handler to branch to depending on
2823	 *	the virtual address of the access.  In the future it may
2824	 *	be desirable to separate kpm TTEs into their own TSB,
2825	 *	in which case all that needs to be done is to set
2826	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2827	 *	early in the miss if we detect a kpm VA to a new handler.
2828	 *
2829	 * %g1 = 8K TSB pointer register (not used, clobbered)
2830	 * %g2 = tag access register (used)
2831	 * %g3 = faulting context id (used)
2832	 */
2833	.align	64
2834	ALTENTRY(sfmmu_kdtlb_miss)
2835	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2836	  nop
2837
2838	/* Gather some stats for kpm misses in the TLB. */
2839	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2840	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2841
2842	/*
2843	 * Get first TSB offset and look for 8K/64K/512K mapping
2844	 * using the 8K virtual page as the index.
2845	 *
2846	 * We patch the next set of instructions at run time;
2847	 * any changes here require sfmmu_patch_ktsb changes too.
2848	 */
2849dktsbbase:
2850	RUNTIME_PATCH_SETX(%g7, %g6)
2851	/* %g7 = contents of ktsb_base or ktsb_pbase */
2852
2853dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2854	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2855
2856	/*
2857	 * At this point %g1 is our index into the TSB.
2858	 * We just masked off enough bits of the VA depending
2859	 * on our TSB size code.
2860	 */
2861	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2862	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2863	cmp	%g6, %g4			! compare tag
2864	bne,pn	%xcc, dktsb4m_kpmcheck_small
2865	  add	%g7, %g1, %g1			/* form tsb ptr */
2866	TT_TRACE(trace_tsbhit)
2867	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2868	/* trapstat expects tte in %g5 */
2869	retry
2870
2871	/*
2872	 * If kpm is using large pages, the following instruction needs
2873	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2874	 * so that we will probe the 4M TSB regardless of the VA.  In
2875	 * the case kpm is using small pages, we know no large kernel
2876	 * mappings are located above 0x80000000.00000000 so we skip the
2877	 * probe as an optimization.
2878	 */
2879dktsb4m_kpmcheck_small:
2880	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2881	  /* delay slot safe, below */
2882
2883	/*
2884	 * Get second TSB offset and look for 4M mapping
2885	 * using 4M virtual page as the TSB index.
2886	 *
2887	 * Here:
2888	 * %g1 = 8K TSB pointer.  Don't squash it.
2889	 * %g2 = tag access register (we still need it)
2890	 */
2891	srlx	%g2, MMU_PAGESHIFT4M, %g3
2892
2893	/*
2894	 * We patch the next set of instructions at run time;
2895	 * any changes here require sfmmu_patch_ktsb changes too.
2896	 */
2897dktsb4mbase:
2898	RUNTIME_PATCH_SETX(%g7, %g6)
2899	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2900dktsb4m:
2901	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2902	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2903
2904	/*
2905	 * At this point %g3 is our index into the TSB.
2906	 * We just masked off enough bits of the VA depending
2907	 * on our TSB size code.
2908	 */
2909	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2910	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2911	cmp	%g6, %g4			! compare tag
2912
2913dktsb4m_tsbmiss:
2914	bne,pn	%xcc, dktsb4m_kpmcheck
2915	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2916	TT_TRACE(trace_tsbhit)
2917	/* we don't check TTE size here since we assume 4M TSB is separate */
2918	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2919	/* trapstat expects tte in %g5 */
2920	retry
2921
2922	/*
2923	 * So, we failed to find a valid TTE to match the faulting
2924	 * address in either TSB.  There are a few cases that could land
2925	 * us here:
2926	 *
2927	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2928	 *    to sfmmu_tsb_miss_tt to handle the miss.
2929	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2930	 *    4M TSB.  Let segkpm handle it.
2931	 *
2932	 * Note that we shouldn't land here in the case of a kpm VA when
2933	 * kpm_smallpages is active -- we handled that case earlier at
2934	 * dktsb4m_kpmcheck_small.
2935	 *
2936	 * At this point:
2937	 *  g1 = 8K-indexed primary TSB pointer
2938	 *  g2 = tag access register
2939	 *  g3 = 4M-indexed secondary TSB pointer
2940	 */
2941dktsb4m_kpmcheck:
2942	cmp	%g2, %g0
2943	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2944	  nop
2945	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2946	  nop
2947
2948#ifdef sun4v
2949	/*
2950	 * User instruction miss w/ single TSB.
2951	 * The first probe covers 8K, 64K, and 512K page sizes,
2952	 * because 64K and 512K mappings are replicated off 8K
2953	 * pointer.
2954	 *
2955	 * g1 = tsb8k pointer register
2956	 * g2 = tag access register
2957	 * g3 - g6 = scratch registers
2958	 * g7 = TSB tag to match
2959	 */
2960	.align	64
2961	ALTENTRY(sfmmu_uitlb_fastpath)
2962
2963	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2964	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2965	ba,pn	%xcc, sfmmu_tsb_miss_tt
2966	  mov	-1, %g3
2967
2968	/*
2969	 * User data miss w/ single TSB.
2970	 * The first probe covers 8K, 64K, and 512K page sizes,
2971	 * because 64K and 512K mappings are replicated off 8K
2972	 * pointer.
2973	 *
2974	 * g1 = tsb8k pointer register
2975	 * g2 = tag access register
2976	 * g3 - g6 = scratch registers
2977	 * g7 = TSB tag to match
2978	 */
2979	.align 64
2980	ALTENTRY(sfmmu_udtlb_fastpath)
2981
2982	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2983	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2984	ba,pn	%xcc, sfmmu_tsb_miss_tt
2985	  mov	-1, %g3
2986
2987	/*
2988	 * User instruction miss w/ multiple TSBs (sun4v).
2989	 * The first probe covers 8K, 64K, and 512K page sizes,
2990	 * because 64K and 512K mappings are replicated off 8K
2991	 * pointer.  Second probe covers 4M page size only.
2992	 *
2993	 * Just like sfmmu_udtlb_slowpath, except:
2994	 *   o Uses ASI_ITLB_IN
2995	 *   o checks for execute permission
2996	 *   o No ISM prediction.
2997	 *
2998	 * g1 = tsb8k pointer register
2999	 * g2 = tag access register
3000	 * g3 - g6 = scratch registers
3001	 * g7 = TSB tag to match
3002	 */
3003	.align	64
3004	ALTENTRY(sfmmu_uitlb_slowpath)
3005
3006	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
3007	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
3008	/* g4 - g5 = clobbered here */
3009
3010	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3011	/* g1 = first TSB pointer, g3 = second TSB pointer */
3012	srlx	%g2, TAG_VALO_SHIFT, %g7
3013	PROBE_2ND_ITSB(%g3, %g7)
3014	/* NOT REACHED */
3015
3016#else /* sun4v */
3017
3018	/*
3019	 * User instruction miss w/ multiple TSBs (sun4u).
3020	 * The first probe covers 8K, 64K, and 512K page sizes,
3021	 * because 64K and 512K mappings are replicated off 8K
3022	 * pointer.  Probe of 1st TSB has already been done prior to entry
3023	 * into this routine. For the UTSB_PHYS case we probe up to 3
3024	 * valid other TSBs in the following order:
3025	 * 1) shared TSB for 4M-256M pages
3026	 * 2) private TSB for 4M-256M pages
3027	 * 3) shared TSB for 8K-512K pages
3028	 *
3029	 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
3030	 * 4M-256M pages.
3031	 *
3032	 * Just like sfmmu_udtlb_slowpath, except:
3033	 *   o Uses ASI_ITLB_IN
3034	 *   o checks for execute permission
3035	 *   o No ISM prediction.
3036	 *
3037	 * g1 = tsb8k pointer register
3038	 * g2 = tag access register
3039	 * g4 - g6 = scratch registers
3040	 * g7 = TSB tag to match
3041	 */
3042	.align	64
3043	ALTENTRY(sfmmu_uitlb_slowpath)
3044
3045#ifdef UTSB_PHYS
3046
3047       GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
3048        brlz,pt %g6, 1f
3049          nop
3050        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
3051        PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
30521:
3053        GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
3054        brlz,pt %g3, 2f
3055          nop
3056        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3057        PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
30582:
3059        GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
3060        brlz,pt %g6, sfmmu_tsb_miss_tt
3061          nop
3062        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
3063        PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
3064        ba,pn   %xcc, sfmmu_tsb_miss_tt
3065          nop
3066
3067#else /* UTSB_PHYS */
3068	mov	%g1, %g3	/* save tsb8k reg in %g3 */
3069	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
3070	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
3071	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
3072	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
3073	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
3074       /* g1 = first TSB pointer, g3 = second TSB pointer */
3075        srlx    %g2, TAG_VALO_SHIFT, %g7
3076        PROBE_2ND_ITSB(%g3, %g7, isynth)
3077	ba,pn	%xcc, sfmmu_tsb_miss_tt
3078	  nop
3079
3080#endif /* UTSB_PHYS */
3081#endif /* sun4v */
3082
3083#if defined(sun4u) && defined(UTSB_PHYS)
3084
3085        /*
3086	 * We come here for ism predict DTLB_MISS case or if
3087	 * if probe in first TSB failed.
3088         */
3089
3090        .align 64
3091        ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
3092
3093	/*
3094         * g1 = tsb8k pointer register
3095         * g2 = tag access register
3096         * g4 - %g6 = scratch registers
3097         * g7 = TSB tag to match
3098	 */
3099
3100	/*
3101	 * ISM non-predict probe order
3102         * probe 1ST_TSB (8K index)
3103         * probe 2ND_TSB (4M index)
3104         * probe 4TH_TSB (4M index)
3105         * probe 3RD_TSB (8K index)
3106	 *
3107	 * We already probed first TSB in DTLB_MISS handler.
3108	 */
3109
3110        /*
3111         * Private 2ND TSB 4M-256 pages
3112         */
3113	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
3114	brlz,pt %g3, 1f
3115	  nop
3116        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3117        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
3118
3119	/*
3120	 * Shared Context 4TH TSB 4M-256 pages
3121	 */
31221:
3123	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
3124	brlz,pt %g6, 2f
3125	  nop
3126        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
3127        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
3128
3129        /*
3130         * Shared Context 3RD TSB 8K-512K pages
3131         */
31322:
3133	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
3134	brlz,pt %g6, sfmmu_tsb_miss_tt
3135	  nop
3136        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
3137        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
3138	ba,pn	%xcc, sfmmu_tsb_miss_tt
3139	  nop
3140
3141	.align 64
3142        ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
3143
3144	/*
3145         * g1 = tsb8k pointer register
3146         * g2 = tag access register
3147         * g4 - g6 = scratch registers
3148         * g7 = TSB tag to match
3149	 */
3150
3151	/*
3152	 * ISM predict probe order
3153	 * probe 4TH_TSB (4M index)
3154	 * probe 2ND_TSB (4M index)
3155	 * probe 1ST_TSB (8K index)
3156	 * probe 3RD_TSB (8K index)
3157
3158	/*
3159	 * Shared Context 4TH TSB 4M-256 pages
3160	 */
3161	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
3162	brlz,pt %g6, 4f
3163	  nop
3164        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
3165        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
3166
3167        /*
3168         * Private 2ND TSB 4M-256 pages
3169         */
31704:
3171	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
3172	brlz,pt %g3, 5f
3173	  nop
3174        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3175        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
3176
31775:
3178        PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
3179
3180        /*
3181         * Shared Context 3RD TSB 8K-512K pages
3182         */
3183	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
3184	brlz,pt %g6, 6f
3185	  nop
3186        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
3187        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
31886:
3189	ba,pn	%xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
3190	  nop
3191
3192#else /* sun4u && UTSB_PHYS */
3193
3194       .align 64
3195        ALTENTRY(sfmmu_udtlb_slowpath)
3196
3197	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
3198	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
3199	  mov	%g1, %g3
3200
3201udtlb_miss_probefirst:
3202	/*
3203	 * g1 = 8K TSB pointer register
3204	 * g2 = tag access register
3205	 * g3 = (potentially) second TSB entry ptr
3206	 * g6 = ism pred.
3207	 * g7 = vpg_4m
3208	 */
3209#ifdef sun4v
3210	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
3211	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
3212
3213	/*
3214	 * Here:
3215	 *   g1 = first TSB pointer
3216	 *   g2 = tag access reg
3217	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
3218	 */
3219	brgz,pn	%g6, sfmmu_tsb_miss_tt
3220	  nop
3221#else /* sun4v */
3222	mov	%g1, %g4
3223	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
3224	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
3225
3226	/*
3227	 * Here:
3228	 *   g1 = first TSB pointer
3229	 *   g2 = tag access reg
3230	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
3231	 */
3232	brgz,pn	%g6, sfmmu_tsb_miss_tt
3233	  nop
3234	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
3235	/* fall through in 8K->4M probe order */
3236#endif /* sun4v */
3237
3238udtlb_miss_probesecond:
3239	/*
3240	 * Look in the second TSB for the TTE
3241	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
3242	 * g2 = tag access reg
3243	 * g3 = 8K TSB pointer register
3244	 * g6 = ism pred.
3245	 * g7 = vpg_4m
3246	 */
3247#ifdef sun4v
3248	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
3249	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3250	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
3251#else /* sun4v */
3252	mov	%g3, %g7
3253	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
3254	/* %g2 clobbered, %g3 =second tsbe ptr */
3255	mov	MMU_TAG_ACCESS, %g2
3256	ldxa	[%g2]ASI_DMMU, %g2
3257#endif /* sun4v */
3258
3259	srlx	%g2, TAG_VALO_SHIFT, %g7
3260	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
3261	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
3262	brgz,pn	%g6, udtlb_miss_probefirst
3263	  nop
3264
3265	/* fall through to sfmmu_tsb_miss_tt */
3266#endif /* sun4u && UTSB_PHYS */
3267
3268
3269	ALTENTRY(sfmmu_tsb_miss_tt)
3270	TT_TRACE(trace_tsbmiss)
3271	/*
3272	 * We get here if there is a TSB miss OR a write protect trap.
3273	 *
3274	 * g1 = First TSB entry pointer
3275	 * g2 = tag access register
3276	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
3277	 * g4 - g7 = scratch registers
3278	 */
3279
3280	ALTENTRY(sfmmu_tsb_miss)
3281
3282	/*
3283	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3284	 * point to trapstat's TSB miss return code (note that trapstat
3285	 * itself will patch the correct offset to add).
3286	 */
3287	rdpr	%tl, %g7
3288	cmp	%g7, 1
3289	ble,pt	%xcc, 0f
3290	  sethi	%hi(KERNELBASE), %g6
3291	rdpr	%tpc, %g7
3292	or	%g6, %lo(KERNELBASE), %g6
3293	cmp	%g7, %g6
3294	bgeu,pt	%xcc, 0f
3295	/* delay slot safe */
3296
3297	ALTENTRY(tsbmiss_trapstat_patch_point)
3298	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3299	wrpr	%g7, %tpc
3300	add	%g7, 4, %g7
3301	wrpr	%g7, %tnpc
33020:
3303	CPU_TSBMISS_AREA(%g6, %g7)
3304	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save 1ST tsb pointer */
3305	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save 2ND tsb pointer */
3306
3307	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
3308	brz,a,pn %g3, 1f			/* skip ahead if kernel */
3309	  ldn	[%g6 + TSBMISS_KHATID], %g7
3310	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
3311	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
3312
3313	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
3314
3315	cmp	%g3, INVALID_CONTEXT
3316	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
3317	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
3318
3319#if defined(sun4v) || defined(UTSB_PHYS)
3320        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
3321        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
3322        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
3323#endif /* sun4v || UTSB_PHYS */
3324
3325	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
3326	/*
3327	 * The miss wasn't in an ISM segment.
3328	 *
3329	 * %g1 %g3, %g4, %g5, %g7 all clobbered
3330	 * %g2 = (pseudo) tag access
3331	 */
3332
3333	ba,pt	%icc, 2f
3334	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
3335
33361:
3337	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
3338	/*
3339	 * 8K and 64K hash.
3340	 */
33412:
3342
3343	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3344		MMU_PAGESHIFT64K, TTE64K, tsb_l8K, tsb_checktte,
3345		sfmmu_suspend_tl, tsb_512K)
3346	/* NOT REACHED */
3347
3348tsb_512K:
3349	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3350	brz,pn	%g5, 3f
3351	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3352	and	%g4, HAT_512K_FLAG, %g5
3353
3354	/*
3355	 * Note that there is a small window here where we may have
3356	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
3357	 * flag yet, so we will skip searching the 512k hash list.
3358	 * In this case we will end up in pagefault which will find
3359	 * the mapping and return.  So, in this instance we will end up
3360	 * spending a bit more time resolving this TSB miss, but it can
3361	 * only happen once per process and even then, the chances of that
3362	 * are very small, so it's not worth the extra overhead it would
3363	 * take to close this window.
3364	 */
3365	brz,pn	%g5, tsb_4M
3366	  nop
33673:
3368	/*
3369	 * 512K hash
3370	 */
3371
3372	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3373		MMU_PAGESHIFT512K, TTE512K, tsb_l512K, tsb_checktte,
3374		sfmmu_suspend_tl, tsb_4M)
3375	/* NOT REACHED */
3376
3377tsb_4M:
3378	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3379	brz,pn	%g5, 4f
3380	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3381	and	%g4, HAT_4M_FLAG, %g5
3382	brz,pn	%g5, tsb_32M
3383	  nop
33844:
3385	/*
3386	 * 4M hash
3387	 */
3388
3389	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3390		MMU_PAGESHIFT4M, TTE4M, tsb_l4M, tsb_checktte,
3391		sfmmu_suspend_tl, tsb_32M)
3392	/* NOT REACHED */
3393
3394tsb_32M:
3395	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3396#ifdef	sun4v
3397        brz,pn	%g5, 6f
3398#else
3399	brz,pn  %g5, tsb_pagefault
3400#endif
3401	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3402	and	%g4, HAT_32M_FLAG, %g5
3403	brz,pn	%g5, tsb_256M
3404	  nop
34055:
3406	/*
3407	 * 32M hash
3408	 */
3409
3410	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3411		MMU_PAGESHIFT32M, TTE32M, tsb_l32M, tsb_checktte,
3412		sfmmu_suspend_tl, tsb_256M)
3413	/* NOT REACHED */
3414
3415#if defined(sun4u) && !defined(UTSB_PHYS)
3416#define tsb_shme        tsb_pagefault
3417#endif
3418tsb_256M:
3419	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3420	and	%g4, HAT_256M_FLAG, %g5
3421	brz,pn	%g5, tsb_shme
3422	  nop
34236:
3424	/*
3425	 * 256M hash
3426	 */
3427
3428	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3429	    MMU_PAGESHIFT256M, TTE256M, tsb_l256M, tsb_checktte,
3430	    sfmmu_suspend_tl, tsb_shme)
3431	/* NOT REACHED */
3432
3433tsb_checktte:
3434	/*
3435	 * g1 = hblk_misc
3436	 * g2 = tagacc
3437	 * g3 = tte
3438	 * g4 = tte pa
3439	 * g5 = tte va
3440	 * g6 = tsbmiss area
3441	 * g7 = hatid
3442	 */
3443	brlz,a,pt %g3, tsb_validtte
3444	  rdpr	%tt, %g7
3445
3446#if defined(sun4u) && !defined(UTSB_PHYS)
3447#undef tsb_shme
3448	ba      tsb_pagefault
3449	  nop
3450#else /* sun4u && !UTSB_PHYS */
3451
3452tsb_shme:
3453	/*
3454	 * g2 = tagacc
3455	 * g6 = tsbmiss area
3456	 */
3457	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3458	brz,pn	%g5, tsb_pagefault
3459	  nop
3460	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
3461	brz,pn	%g7, tsb_pagefault
3462	  nop
3463
3464	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3465		MMU_PAGESHIFT64K, TTE64K, tsb_shme_l8K, tsb_shme_checktte,
3466		sfmmu_suspend_tl, tsb_shme_512K)
3467	/* NOT REACHED */
3468
3469tsb_shme_512K:
3470	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3471	and	%g4, HAT_512K_FLAG, %g5
3472	brz,pn	%g5, tsb_shme_4M
3473	  nop
3474
3475	/*
3476	 * 512K hash
3477	 */
3478
3479	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3480		MMU_PAGESHIFT512K, TTE512K, tsb_shme_l512K, tsb_shme_checktte,
3481		sfmmu_suspend_tl, tsb_shme_4M)
3482	/* NOT REACHED */
3483
3484tsb_shme_4M:
3485	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3486	and	%g4, HAT_4M_FLAG, %g5
3487	brz,pn	%g5, tsb_shme_32M
3488	  nop
34894:
3490	/*
3491	 * 4M hash
3492	 */
3493	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3494		MMU_PAGESHIFT4M, TTE4M, tsb_shme_l4M, tsb_shme_checktte,
3495		sfmmu_suspend_tl, tsb_shme_32M)
3496	/* NOT REACHED */
3497
3498tsb_shme_32M:
3499	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3500	and	%g4, HAT_32M_FLAG, %g5
3501	brz,pn	%g5, tsb_shme_256M
3502	  nop
3503
3504	/*
3505	 * 32M hash
3506	 */
3507
3508	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3509		MMU_PAGESHIFT32M, TTE32M, tsb_shme_l32M, tsb_shme_checktte,
3510		sfmmu_suspend_tl, tsb_shme_256M)
3511	/* NOT REACHED */
3512
3513tsb_shme_256M:
3514	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3515	and	%g4, HAT_256M_FLAG, %g5
3516	brz,pn	%g5, tsb_pagefault
3517	  nop
3518
3519	/*
3520	 * 256M hash
3521	 */
3522
3523	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3524	    MMU_PAGESHIFT256M, TTE256M, tsb_shme_l256M, tsb_shme_checktte,
3525	    sfmmu_suspend_tl, tsb_pagefault)
3526	/* NOT REACHED */
3527
3528tsb_shme_checktte:
3529
3530	brgez,pn %g3, tsb_pagefault
3531	  rdpr	%tt, %g7
3532	/*
3533	 * g1 = ctx1 flag
3534	 * g3 = tte
3535	 * g4 = tte pa
3536	 * g5 = tte va
3537	 * g6 = tsbmiss area
3538	 * g7 = tt
3539	 */
3540
3541	brz,pt  %g1, tsb_validtte
3542	  nop
3543	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
3544	  or	%g1, HAT_CHKCTX1_FLAG, %g1
3545	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
3546
3547	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
3548#endif /* sun4u && !UTSB_PHYS */
3549
3550tsb_validtte:
3551	/*
3552	 * g3 = tte
3553	 * g4 = tte pa
3554	 * g5 = tte va
3555	 * g6 = tsbmiss area
3556	 * g7 = tt
3557	 */
3558
3559	/*
3560	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
3561	 */
3562	cmp	%g7, FAST_PROT_TT
3563	bne,pt	%icc, 4f
3564	  nop
3565
3566	TTE_SET_REFMOD_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_refmod,
3567	    tsb_protfault)
3568
3569	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3570#ifdef sun4v
3571	MMU_FAULT_STATUS_AREA(%g7)
3572	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* load fault addr for later */
3573#else /* sun4v */
3574	mov     MMU_TAG_ACCESS, %g5
3575	ldxa    [%g5]ASI_DMMU, %g5
3576#endif /* sun4v */
3577	ba,pt	%xcc, tsb_update_tl1
3578	  nop
35794:
3580	/*
3581	 * If ITLB miss check exec bit.
3582	 * If not set treat as invalid TTE.
3583	 */
3584	cmp     %g7, T_INSTR_MMU_MISS
3585	be,pn	%icc, 5f
3586	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
3587	cmp     %g7, FAST_IMMU_MISS_TT
3588	bne,pt %icc, 3f
3589	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
35905:
3591	bz,pn %icc, tsb_protfault
3592	  nop
3593
35943:
3595	/*
3596	 * Set reference bit if not already set
3597	 */
3598	TTE_SET_REF_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_ref)
3599
3600	/*
3601	 * Now, load into TSB/TLB.  At this point:
3602	 * g3 = tte
3603	 * g4 = patte
3604	 * g6 = tsbmiss area
3605	 */
3606	rdpr	%tt, %g7
3607#ifdef sun4v
3608	MMU_FAULT_STATUS_AREA(%g2)
3609	cmp	%g7, T_INSTR_MMU_MISS
3610	be,a,pt	%icc, 9f
3611	  nop
3612	cmp	%g7, FAST_IMMU_MISS_TT
3613	be,a,pt	%icc, 9f
3614	  nop
3615	add	%g2, MMFSA_D_, %g2
36169:
3617	ldx	[%g2 + MMFSA_CTX_], %g7
3618	sllx	%g7, TTARGET_CTX_SHIFT, %g7
3619	ldx	[%g2 + MMFSA_ADDR_], %g2
3620	mov	%g2, %g5		! load the fault addr for later use
3621	srlx	%g2, TTARGET_VA_SHIFT, %g2
3622	or	%g2, %g7, %g2
3623#else /* sun4v */
3624	mov     MMU_TAG_ACCESS, %g5
3625	cmp     %g7, FAST_IMMU_MISS_TT
3626	be,a,pt %icc, 9f
3627	   ldxa  [%g0]ASI_IMMU, %g2
3628	ldxa    [%g0]ASI_DMMU, %g2
3629	ba,pt   %icc, tsb_update_tl1
3630	   ldxa  [%g5]ASI_DMMU, %g5
36319:
3632	ldxa    [%g5]ASI_IMMU, %g5
3633#endif /* sun4v */
3634
3635tsb_update_tl1:
3636	srlx	%g2, TTARGET_CTX_SHIFT, %g7
3637	brz,pn	%g7, tsb_kernel
3638#ifdef sun4v
3639	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
3640#else  /* sun4v */
3641	  srlx	%g3, TTE_SZ_SHFT, %g7
3642#endif /* sun4v */
3643
3644tsb_user:
3645#ifdef sun4v
3646	cmp	%g7, TTE4M
3647	bge,pn	%icc, tsb_user4m
3648	  nop
3649#else /* sun4v */
3650	cmp	%g7, TTESZ_VALID | TTE4M
3651	be,pn	%icc, tsb_user4m
3652	  srlx	%g3, TTE_SZ2_SHFT, %g7
3653	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
3654#ifdef ITLB_32M_256M_SUPPORT
3655	bnz,pn	%icc, tsb_user4m
3656	  nop
3657#else /* ITLB_32M_256M_SUPPORT */
3658	bnz,a,pn %icc, tsb_user_pn_synth
3659	 nop
3660#endif /* ITLB_32M_256M_SUPPORT */
3661#endif /* sun4v */
3662
3663tsb_user8k:
3664#if defined(sun4v) || defined(UTSB_PHYS)
3665	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3666	and	%g7, HAT_CHKCTX1_FLAG, %g1
3667	brz,a,pn %g1, 1f
3668	  ldn	[%g6 + TSBMISS_TSBPTR], %g1		! g1 = 1ST TSB ptr
3669	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
3670	brlz,a,pn %g1, ptl1_panic			! if no shared 3RD tsb
3671	  mov PTL1_NO_SCDTSB8K, %g1			! panic
3672        GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
36731:
3674#else /* defined(sun4v) || defined(UTSB_PHYS) */
3675	ldn   [%g6 + TSBMISS_TSBPTR], %g1             ! g1 = 1ST TSB ptr
3676#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3677
3678#ifndef UTSB_PHYS
3679	mov	ASI_N, %g7	! user TSBs accessed by VA
3680	mov	%g7, %asi
3681#endif /* !UTSB_PHYS */
3682
3683	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5)
3684
3685	rdpr    %tt, %g5
3686#ifdef sun4v
3687	cmp	%g5, T_INSTR_MMU_MISS
3688	be,a,pn	%xcc, 9f
3689	  mov	%g3, %g5
3690#endif /* sun4v */
3691	cmp	%g5, FAST_IMMU_MISS_TT
3692	be,pn	%xcc, 9f
3693	  mov	%g3, %g5
3694
3695	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3696	! trapstat wants TTE in %g5
3697	retry
36989:
3699	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3700	! trapstat wants TTE in %g5
3701	retry
3702
3703tsb_user4m:
3704#if defined(sun4v) || defined(UTSB_PHYS)
3705	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3706	and	%g7, HAT_CHKCTX1_FLAG, %g1
3707	brz,a,pn %g1, 4f
3708	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = 2ND TSB ptr
3709	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
3710	brlz,a,pn %g1, 5f				! if no shared 4TH TSB
3711	  nop
3712        GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3713
3714#else /* defined(sun4v) || defined(UTSB_PHYS) */
3715	ldn   [%g6 + TSBMISS_TSBPTR4M], %g1             ! g1 = 2ND TSB ptr
3716#endif /* defined(sun4v) || defined(UTSB_PHYS) */
37174:
3718	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
3719	  nop
3720
3721#ifndef UTSB_PHYS
3722	mov	ASI_N, %g7	! user TSBs accessed by VA
3723	mov	%g7, %asi
3724#endif /* UTSB_PHYS */
3725
3726        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6)
3727
37285:
3729	rdpr    %tt, %g5
3730#ifdef sun4v
3731        cmp     %g5, T_INSTR_MMU_MISS
3732        be,a,pn %xcc, 9f
3733          mov   %g3, %g5
3734#endif /* sun4v */
3735        cmp     %g5, FAST_IMMU_MISS_TT
3736        be,pn   %xcc, 9f
3737        mov     %g3, %g5
3738
3739        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3740        ! trapstat wants TTE in %g5
3741        retry
37429:
3743        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3744        ! trapstat wants TTE in %g5
3745        retry
3746
3747#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3748	/*
3749	 * Panther ITLB synthesis.
3750	 * The Panther 32M and 256M ITLB code simulates these two large page
3751	 * sizes with 4M pages, to provide support for programs, for example
3752	 * Java, that may copy instructions into a 32M or 256M data page and
3753	 * then execute them. The code below generates the 4M pfn bits and
3754	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3755	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3756	 * are ignored by the hardware.
3757	 *
3758	 * Now, load into TSB/TLB.  At this point:
3759	 * g2 = tagtarget
3760	 * g3 = tte
3761	 * g4 = patte
3762	 * g5 = tt
3763	 * g6 = tsbmiss area
3764	 */
3765tsb_user_pn_synth:
3766	rdpr %tt, %g5
3767	cmp    %g5, FAST_IMMU_MISS_TT
3768	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3769	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3770	bz,pn %icc, 4b				/* if not, been here before */
3771	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
3772	brlz,a,pn %g1, 5f			/* no 2nd tsb */
3773	  mov	%g3, %g5
3774
3775	mov	MMU_TAG_ACCESS, %g7
3776	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3777	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3778
3779	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3780	mov	%g7, %asi
3781	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 4) /* update TSB */
37825:
3783        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3784        retry
3785
3786tsb_user_itlb_synth:
3787	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 =  2ND TSB */
3788
3789	mov	MMU_TAG_ACCESS, %g7
3790	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3791	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
3792	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3793	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3794
3795	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3796	mov	%g7, %asi
3797	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 6) /* update TSB */
37987:
3799	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3800        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3801        retry
3802#endif /* sun4v && ITLB_32M_256M_SUPPORT */
3803
3804tsb_kernel:
3805	rdpr	%tt, %g5
3806#ifdef sun4v
3807	cmp	%g7, TTE4M
3808	bge,pn	%icc, 5f
3809#else
3810	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3811	be,pn	%icc, 5f
3812#endif /* sun4v */
3813	  nop
3814	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8K TSB ptr
3815	ba,pt	%xcc, 6f
3816	  nop
38175:
3818	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4M TSB ptr
3819	brlz,pn	%g1, 3f		/* skip programming if 4M TSB ptr is -1 */
3820	  nop
38216:
3822#ifndef sun4v
3823tsb_kernel_patch_asi:
3824	or	%g0, RUNTIME_PATCH, %g6
3825	mov	%g6, %asi	! XXX avoid writing to %asi !!
3826#endif
3827	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 7)
38283:
3829#ifdef sun4v
3830	cmp	%g5, T_INSTR_MMU_MISS
3831	be,a,pn	%icc, 1f
3832	  mov	%g3, %g5			! trapstat wants TTE in %g5
3833#endif /* sun4v */
3834	cmp	%g5, FAST_IMMU_MISS_TT
3835	be,pn	%icc, 1f
3836	  mov	%g3, %g5			! trapstat wants TTE in %g5
3837	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3838	! trapstat wants TTE in %g5
3839	retry
38401:
3841	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3842	! trapstat wants TTE in %g5
3843	retry
3844
3845tsb_ism:
3846	/*
3847	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3848	 * page size down to smallest.
3849	 *
3850	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3851	 *	register
3852	 * g3 = ismmap->ism_seg
3853	 * g4 = physical address of ismmap->ism_sfmmu
3854	 * g6 = tsbmiss area
3855	 */
3856	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3857	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3858	  mov	PTL1_BAD_ISM, %g1
3859						/* g5 = pa of imap_vb_shift */
3860	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3861	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3862	srlx	%g3, %g4, %g3			/* clr size field */
3863	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
3864	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
3865	and     %g2, %g1, %g4                   /* g4 = ctx number */
3866	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
3867	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
3868	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
3869	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3870	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
3871#if defined(sun4v) || defined(UTSB_PHYS)
3872	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
3873	brz,pt %g5, tsb_chk4M_ism
3874	  nop
3875	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
3876	or      %g5, HAT_CHKCTX1_FLAG, %g5
3877	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
3878	rdpr    %tt, %g5
3879	SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
3880#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3881
3882	/*
3883	 * ISM pages are always locked down.
3884	 * If we can't find the tte then pagefault
3885	 * and let the spt segment driver resolve it.
3886	 *
3887	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
3888	 * g4 = imap_hatflags
3889	 * g6 = tsb miss area
3890	 * g7 = ISM hatid
3891	 */
3892
3893tsb_chk4M_ism:
3894	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3895	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3896	  nop
3897
3898tsb_ism_32M:
3899	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3900	brz,pn	%g5, tsb_ism_256M
3901	  nop
3902
3903	/*
3904	 * 32M hash.
3905	 */
3906
3907	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT32M,
3908	    TTE32M, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3909	    tsb_ism_4M)
3910	/* NOT REACHED */
3911
3912tsb_ism_32M_found:
3913	brlz,a,pt %g3, tsb_validtte
3914	  rdpr	%tt, %g7
3915	ba,pt	%xcc, tsb_ism_4M
3916	  nop
3917
3918tsb_ism_256M:
3919	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3920	brz,a,pn %g5, ptl1_panic
3921	  mov	PTL1_BAD_ISM, %g1
3922
3923	/*
3924	 * 256M hash.
3925	 */
3926	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT256M,
3927	    TTE256M, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3928	    tsb_ism_4M)
3929
3930tsb_ism_256M_found:
3931	brlz,a,pt %g3, tsb_validtte
3932	  rdpr	%tt, %g7
3933
3934tsb_ism_4M:
3935	/*
3936	 * 4M hash.
3937	 */
3938	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT4M,
3939	    TTE4M, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3940	    tsb_ism_8K)
3941	/* NOT REACHED */
3942
3943tsb_ism_4M_found:
3944	brlz,a,pt %g3, tsb_validtte
3945	  rdpr	%tt, %g7
3946
3947tsb_ism_8K:
3948	/*
3949	 * 8K and 64K hash.
3950	 */
3951
3952	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT64K,
3953	    TTE64K, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3954	    tsb_pagefault)
3955	/* NOT REACHED */
3956
3957tsb_ism_8K_found:
3958	brlz,a,pt %g3, tsb_validtte
3959	  rdpr	%tt, %g7
3960
3961tsb_pagefault:
3962	rdpr	%tt, %g7
3963	cmp	%g7, FAST_PROT_TT
3964	be,a,pn	%icc, tsb_protfault
3965	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3966
3967tsb_protfault:
3968	/*
3969	 * we get here if we couldn't find a valid tte in the hash.
3970	 *
3971	 * If user and we are at tl>1 we go to window handling code.
3972	 *
3973	 * If kernel and the fault is on the same page as our stack
3974	 * pointer, then we know the stack is bad and the trap handler
3975	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3976	 *
3977	 * If this is a kernel trap and tl>1, panic.
3978	 *
3979	 * Otherwise we call pagefault.
3980	 */
3981	cmp	%g7, FAST_IMMU_MISS_TT
3982#ifdef sun4v
3983	MMU_FAULT_STATUS_AREA(%g4)
3984	ldx	[%g4 + MMFSA_I_CTX], %g5
3985	ldx	[%g4 + MMFSA_D_CTX], %g4
3986	move	%icc, %g5, %g4
3987	cmp	%g7, T_INSTR_MMU_MISS
3988	move	%icc, %g5, %g4
3989#else
3990	mov	MMU_TAG_ACCESS, %g4
3991	ldxa	[%g4]ASI_DMMU, %g2
3992	ldxa	[%g4]ASI_IMMU, %g5
3993	move	%icc, %g5, %g2
3994	cmp	%g7, T_INSTR_MMU_MISS
3995	move	%icc, %g5, %g2
3996	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3997#endif /* sun4v */
3998	brnz,pn	%g4, 3f				/* skip if not kernel */
3999	  rdpr	%tl, %g5
4000
4001	add	%sp, STACK_BIAS, %g3
4002	srlx	%g3, MMU_PAGESHIFT, %g3
4003	srlx	%g2, MMU_PAGESHIFT, %g4
4004	cmp	%g3, %g4
4005	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
4006	  mov	PTL1_BAD_STACK, %g1
4007
4008	cmp	%g5, 1
4009	ble,pt	%icc, 2f
4010	  nop
4011	TSTAT_CHECK_TL1(2f, %g1, %g2)
4012	rdpr	%tt, %g2
4013	cmp	%g2, FAST_PROT_TT
4014	mov	PTL1_BAD_KPROT_FAULT, %g1
4015	movne	%icc, PTL1_BAD_KMISS, %g1
4016	ba,pt	%icc, ptl1_panic
4017	  nop
4018
40192:
4020	/*
4021	 * We are taking a pagefault in the kernel on a kernel address.  If
4022	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
4023	 * want to call sfmmu_pagefault -- we will instead note that a fault
4024	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
4025	 * (instead of a "retry").  This will step over the faulting
4026	 * instruction.
4027	 */
4028	CPU_INDEX(%g1, %g2)
4029	set	cpu_core, %g2
4030	sllx	%g1, CPU_CORE_SHIFT, %g1
4031	add	%g1, %g2, %g1
4032	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
4033	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
4034	bz	sfmmu_pagefault
4035	or	%g2, CPU_DTRACE_BADADDR, %g2
4036	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
4037	GET_MMU_D_ADDR(%g3, %g4)
4038	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
4039	done
4040
40413:
4042	cmp	%g5, 1
4043	ble,pt	%icc, 4f
4044	  nop
4045	TSTAT_CHECK_TL1(4f, %g1, %g2)
4046	ba,pt	%icc, sfmmu_window_trap
4047	  nop
4048
40494:
4050	/*
4051	 * We are taking a pagefault on a non-kernel address.  If we are in
4052	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
4053	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
4054	 */
4055	CPU_INDEX(%g1, %g2)
4056	set	cpu_core, %g2
4057	sllx	%g1, CPU_CORE_SHIFT, %g1
4058	add	%g1, %g2, %g1
4059	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
4060	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
4061	bz	sfmmu_mmu_trap
4062	or	%g2, CPU_DTRACE_BADADDR, %g2
4063	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
4064	GET_MMU_D_ADDR(%g3, %g4)
4065	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
4066
4067	/*
4068	 * Be sure that we're actually taking this miss from the kernel --
4069	 * otherwise we have managed to return to user-level with
4070	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
4071	 */
4072	rdpr	%tstate, %g2
4073	btst	TSTATE_PRIV, %g2
4074	bz,a	ptl1_panic
4075	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
4076	done
4077
4078	ALTENTRY(tsb_tl0_noctxt)
4079	/*
4080	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
4081	 * if it is, indicated that we have faulted and issue a done.
4082	 */
4083	CPU_INDEX(%g5, %g6)
4084	set	cpu_core, %g6
4085	sllx	%g5, CPU_CORE_SHIFT, %g5
4086	add	%g5, %g6, %g5
4087	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
4088	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
4089	bz	1f
4090	or	%g6, CPU_DTRACE_BADADDR, %g6
4091	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
4092	GET_MMU_D_ADDR(%g3, %g4)
4093	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
4094
4095	/*
4096	 * Be sure that we're actually taking this miss from the kernel --
4097	 * otherwise we have managed to return to user-level with
4098	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
4099	 */
4100	rdpr	%tstate, %g5
4101	btst	TSTATE_PRIV, %g5
4102	bz,a	ptl1_panic
4103	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
4104	TSTAT_CHECK_TL1(2f, %g1, %g2);
41052:
4106	done
4107
41081:
4109	rdpr	%tt, %g5
4110	cmp	%g5, FAST_IMMU_MISS_TT
4111#ifdef sun4v
4112	MMU_FAULT_STATUS_AREA(%g2)
4113	be,a,pt	%icc, 2f
4114	  ldx	[%g2 + MMFSA_I_CTX], %g3
4115	cmp	%g5, T_INSTR_MMU_MISS
4116	be,a,pt	%icc, 2f
4117	  ldx	[%g2 + MMFSA_I_CTX], %g3
4118	ldx	[%g2 + MMFSA_D_CTX], %g3
41192:
4120#else
4121	mov	MMU_TAG_ACCESS, %g2
4122	be,a,pt	%icc, 2f
4123	  ldxa	[%g2]ASI_IMMU, %g3
4124	ldxa	[%g2]ASI_DMMU, %g3
41252:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
4126#endif /* sun4v */
4127	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
4128	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
4129	rdpr	%tl, %g5
4130	cmp	%g5, 1
4131	ble,pt	%icc, sfmmu_mmu_trap
4132	  nop
4133	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
4134	ba,pt	%icc, sfmmu_window_trap
4135	  nop
4136	SET_SIZE(sfmmu_tsb_miss)
4137#endif  /* lint */
4138
4139#if defined (lint)
4140/*
4141 * This routine will look for a user or kernel vaddr in the hash
4142 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
4143 * grab any locks.  It should only be used by other sfmmu routines.
4144 */
4145/* ARGSUSED */
4146pfn_t
4147sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
4148{
4149	return(0);
4150}
4151
4152/* ARGSUSED */
4153pfn_t
4154sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
4155{
4156	return(0);
4157}
4158
4159#else /* lint */
4160
4161	ENTRY_NP(sfmmu_vatopfn)
4162 	/*
4163 	 * disable interrupts
4164 	 */
4165 	rdpr	%pstate, %o3
4166#ifdef DEBUG
4167	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
4168#endif
4169	/*
4170	 * disable interrupts to protect the TSBMISS area
4171	 */
4172	andn    %o3, PSTATE_IE, %o5
4173	wrpr    %o5, 0, %pstate
4174
4175	/*
4176	 * o0 = vaddr
4177	 * o1 = sfmmup
4178	 * o2 = ttep
4179	 */
4180	CPU_TSBMISS_AREA(%g1, %o5)
4181	ldn	[%g1 + TSBMISS_KHATID], %o4
4182	cmp	%o4, %o1
4183	bne,pn	%ncc, vatopfn_nokernel
4184	  mov	TTE64K, %g5			/* g5 = rehash # */
4185	mov %g1,%o5				/* o5 = tsbmiss_area */
4186	/*
4187	 * o0 = vaddr
4188	 * o1 & o4 = hatid
4189	 * o2 = ttep
4190	 * o5 = tsbmiss area
4191	 */
4192	mov	HBLK_RANGE_SHIFT, %g6
41931:
4194
4195	/*
4196	 * o0 = vaddr
4197	 * o1 = sfmmup
4198	 * o2 = ttep
4199	 * o3 = old %pstate
4200	 * o4 = hatid
4201	 * o5 = tsbmiss
4202	 * g5 = rehash #
4203	 * g6 = hmeshift
4204	 *
4205	 * The first arg to GET_TTE is actually tagaccess register
4206	 * not just vaddr. Since this call is for kernel we need to clear
4207	 * any lower vaddr bits that would be interpreted as ctx bits.
4208	 */
4209	set     TAGACC_CTX_MASK, %g1
4210	andn    %o0, %g1, %o0
4211	GET_TTE(%o0, %o4, %g1, %g2, %g3, %o5, %g4, %g6, %g5,
4212		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
4213
4214kvtop_hblk_found:
4215	/*
4216	 * o0 = vaddr
4217	 * o1 = sfmmup
4218	 * o2 = ttep
4219	 * g1 = tte
4220	 * g2 = tte pa
4221	 * g3 = tte va
4222	 * o2 = tsbmiss area
4223	 * o1 = hat id
4224	 */
4225	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
4226	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4227	stx %g1,[%o2]				/* put tte into *ttep */
4228	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
4229	/*
4230	 * o0 = vaddr
4231	 * o1 = sfmmup
4232	 * o2 = ttep
4233	 * g1 = pfn
4234	 */
4235	ba,pt	%xcc, 6f
4236	  mov	%g1, %o0
4237
4238kvtop_nohblk:
4239	/*
4240	 * we get here if we couldn't find valid hblk in hash.  We rehash
4241	 * if neccesary.
4242	 */
4243	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
4244#ifdef sun4v
4245	cmp	%g5, MAX_HASHCNT
4246#else
4247	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
4248#endif /* sun4v */
4249	be,a,pn	%icc, 6f
4250	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4251	mov	%o1, %o4			/* restore hatid */
4252#ifdef sun4v
4253        add	%g5, 2, %g5
4254	cmp	%g5, 3
4255	move	%icc, MMU_PAGESHIFT4M, %g6
4256	ba,pt	%icc, 1b
4257	movne	%icc, MMU_PAGESHIFT256M, %g6
4258#else
4259        inc	%g5
4260	cmp	%g5, 2
4261	move	%icc, MMU_PAGESHIFT512K, %g6
4262	ba,pt	%icc, 1b
4263	movne	%icc, MMU_PAGESHIFT4M, %g6
4264#endif /* sun4v */
42656:
4266	retl
4267 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4268
4269tsb_suspend:
4270	/*
4271	 * o0 = vaddr
4272	 * o1 = sfmmup
4273	 * o2 = ttep
4274	 * g1 = tte
4275	 * g2 = tte pa
4276	 * g3 = tte va
4277	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
4278	 */
4279	stx %g1,[%o2]				/* put tte into *ttep */
4280	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
4281	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
4282	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
42838:
4284	retl
4285	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
4286
4287vatopfn_nokernel:
4288	/*
4289	 * This routine does NOT support user addresses
4290	 * There is a routine in C that supports this.
4291	 * The only reason why we don't have the C routine
4292	 * support kernel addresses as well is because
4293	 * we do va_to_pa while holding the hashlock.
4294	 */
4295 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4296	save	%sp, -SA(MINFRAME), %sp
4297	sethi	%hi(sfmmu_panic3), %o0
4298	call	panic
4299	 or	%o0, %lo(sfmmu_panic3), %o0
4300
4301	SET_SIZE(sfmmu_vatopfn)
4302
4303	/*
4304	 * %o0 = vaddr
4305	 * %o1 = hashno (aka szc)
4306	 *
4307	 *
4308	 * This routine is similar to sfmmu_vatopfn() but will only look for
4309	 * a kernel vaddr in the hash structure for the specified rehash value.
4310	 * It's just an optimization for the case when pagesize for a given
4311	 * va range is already known (e.g. large page heap) and we don't want
4312	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
4313	 *
4314	 * Returns valid pfn or PFN_INVALID if
4315	 * tte for specified rehash # is not found, invalid or suspended.
4316	 */
4317	ENTRY_NP(sfmmu_kvaszc2pfn)
4318 	/*
4319 	 * disable interrupts
4320 	 */
4321 	rdpr	%pstate, %o3
4322#ifdef DEBUG
4323	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
4324#endif
4325	/*
4326	 * disable interrupts to protect the TSBMISS area
4327	 */
4328	andn    %o3, PSTATE_IE, %o5
4329	wrpr    %o5, 0, %pstate
4330
4331	CPU_TSBMISS_AREA(%g1, %o5)
4332	ldn	[%g1 + TSBMISS_KHATID], %o4
4333	sll	%o1, 1, %g6
4334	add	%g6, %o1, %g6
4335	add	%g6, MMU_PAGESHIFT, %g6
4336	/*
4337	 * %o0 = vaddr
4338	 * %o1 = hashno
4339	 * %o3 = old %pstate
4340	 * %o4 = ksfmmup
4341	 * %g1 = tsbmiss area
4342	 * %g6 = hmeshift
4343	 */
4344
4345	/*
4346	 * The first arg to GET_TTE is actually tagaccess register
4347	 * not just vaddr. Since this call is for kernel we need to clear
4348	 * any lower vaddr bits that would be interpreted as ctx bits.
4349	 */
4350	srlx	%o0, MMU_PAGESHIFT, %o0
4351	sllx	%o0, MMU_PAGESHIFT, %o0
4352	GET_TTE(%o0, %o4, %g3, %g4, %g5, %g1, %o5, %g6, %o1,
4353		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
4354		kvaszc2pfn_nohblk)
4355
4356kvaszc2pfn_hblk_found:
4357	/*
4358	 * %g3 = tte
4359	 * %o0 = vaddr
4360	 */
4361	brgez,a,pn %g3, 1f			/* check if tte is invalid */
4362	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4363	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4364	/*
4365	 * g3 = pfn
4366	 */
4367	ba,pt	%xcc, 1f
4368	  mov	%g3, %o0
4369
4370kvaszc2pfn_nohblk:
4371	mov	-1, %o0
4372
43731:
4374	retl
4375 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4376
4377	SET_SIZE(sfmmu_kvaszc2pfn)
4378
4379#endif /* lint */
4380
4381
4382
4383#if !defined(lint)
4384
4385/*
4386 * kpm lock used between trap level tsbmiss handler and kpm C level.
4387 */
4388#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
4389	mov     0xff, tmp1						;\
4390label1:									;\
4391	casa    [kpmlckp]asi, %g0, tmp1					;\
4392	brnz,pn tmp1, label1						;\
4393	mov     0xff, tmp1						;\
4394	membar  #LoadLoad
4395
4396#define KPMLOCK_EXIT(kpmlckp, asi)					\
4397	membar  #LoadStore|#StoreStore					;\
4398	sta     %g0, [kpmlckp]asi
4399
4400/*
4401 * Lookup a memseg for a given pfn and if found, return the physical
4402 * address of the corresponding struct memseg in mseg, otherwise
4403 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
4404 * tsbmp, %asi is assumed to be ASI_MEM.
4405 * This lookup is done by strictly traversing only the physical memseg
4406 * linkage. The more generic approach, to check the virtual linkage
4407 * before using the physical (used e.g. with hmehash buckets), cannot
4408 * be used here. Memory DR operations can run in parallel to this
4409 * lookup w/o any locks and updates of the physical and virtual linkage
4410 * cannot be done atomically wrt. to each other. Because physical
4411 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
4412 * as "physical NULL" pointer.
4413 */
4414#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
4415	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
4416	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
4417	udivx	pfn, mseg, mseg						;\
4418	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
4419	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
4420	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
4421	add	tmp1, mseg, tmp1					;\
4422	ldxa	[tmp1]%asi, mseg					;\
4423	cmp	mseg, MSEG_NULLPTR_PA					;\
4424	be,pn	%xcc, label/**/1		/* if not found */	;\
4425	  nop								;\
4426	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4427	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4428	blu,pn	%xcc, label/**/1					;\
4429	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4430	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4431	bgeu,pn	%xcc, label/**/1					;\
4432	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
4433	mulx	tmp1, PAGE_SIZE, tmp1					;\
4434	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
4435	add	tmp2, tmp1, tmp1			/* pp */	;\
4436	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
4437	cmp	tmp2, pfn						;\
4438	be,pt	%xcc, label/**/_ok			/* found */	;\
4439label/**/1:								;\
4440	/* brute force lookup */					;\
4441	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
4442	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
4443label/**/2:								;\
4444	cmp	mseg, MSEG_NULLPTR_PA					;\
4445	be,pn	%xcc, label/**/_ok		/* if not found */	;\
4446	  nop								;\
4447	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4448	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4449	blu,a,pt %xcc, label/**/2					;\
4450	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4451	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4452	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4453	bgeu,a,pt %xcc, label/**/2					;\
4454	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4455label/**/_ok:
4456
4457	/*
4458	 * kpm tsb miss handler large pages
4459	 * g1 = 8K kpm TSB entry pointer
4460	 * g2 = tag access register
4461	 * g3 = 4M kpm TSB entry pointer
4462	 */
4463	ALTENTRY(sfmmu_kpm_dtsb_miss)
4464	TT_TRACE(trace_tsbmiss)
4465
4466	CPU_INDEX(%g7, %g6)
4467	sethi	%hi(kpmtsbm_area), %g6
4468	sllx	%g7, KPMTSBM_SHIFT, %g7
4469	or	%g6, %lo(kpmtsbm_area), %g6
4470	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4471
4472	/* check enable flag */
4473	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4474	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4475	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4476	  nop
4477
4478	/* VA range check */
4479	ldx	[%g6 + KPMTSBM_VBASE], %g7
4480	cmp	%g2, %g7
4481	blu,pn	%xcc, sfmmu_tsb_miss
4482	  ldx	[%g6 + KPMTSBM_VEND], %g5
4483	cmp	%g2, %g5
4484	bgeu,pn	%xcc, sfmmu_tsb_miss
4485	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
4486
4487	/*
4488	 * check TL tsbmiss handling flag
4489	 * bump tsbmiss counter
4490	 */
4491	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4492#ifdef	DEBUG
4493	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
4494	inc	%g5
4495	brz,pn	%g3, sfmmu_kpm_exception
4496	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4497#else
4498	inc	%g5
4499	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4500#endif
4501	/*
4502	 * At this point:
4503	 *  g1 = 8K kpm TSB pointer (not used)
4504	 *  g2 = tag access register
4505	 *  g3 = clobbered
4506	 *  g6 = per-CPU kpm tsbmiss area
4507	 *  g7 = kpm_vbase
4508	 */
4509
4510	/* vaddr2pfn */
4511	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4512	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4513	srax    %g4, %g3, %g2			/* which alias range (r) */
4514	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4515	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4516
4517	/*
4518	 * Setup %asi
4519	 * mseg_pa = page_numtomemseg_nolock(pfn)
4520	 * if (mseg_pa == NULL) sfmmu_kpm_exception
4521	 * g2=pfn
4522	 */
4523	mov	ASI_MEM, %asi
4524	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
4525	cmp	%g3, MSEG_NULLPTR_PA
4526	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4527	  nop
4528
4529	/*
4530	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
4531	 * g2=pfn g3=mseg_pa
4532	 */
4533	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
4534	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4535	srlx	%g2, %g5, %g4
4536	sllx	%g4, %g5, %g4
4537	sub	%g4, %g7, %g4
4538	srlx	%g4, %g5, %g4
4539
4540	/*
4541	 * Validate inx value
4542	 * g2=pfn g3=mseg_pa g4=inx
4543	 */
4544#ifdef	DEBUG
4545	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4546	cmp	%g4, %g5			/* inx - nkpmpgs */
4547	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4548	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4549#else
4550	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4551#endif
4552	/*
4553	 * kp = &mseg_pa->kpm_pages[inx]
4554	 */
4555	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
4556	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
4557	add	%g5, %g4, %g5			/* kp */
4558
4559	/*
4560	 * KPMP_HASH(kp)
4561	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
4562	 */
4563	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4564	sub	%g7, 1, %g7			/* mask */
4565	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
4566	add	%g5, %g1, %g5			/* y = ksp + x */
4567	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4568
4569	/*
4570	 * Calculate physical kpm_page pointer
4571	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4572	 */
4573	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
4574	add	%g1, %g4, %g1			/* kp_pa */
4575
4576	/*
4577	 * Calculate physical hash lock address
4578	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
4579	 */
4580	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
4581	sllx	%g5, KPMHLK_SHIFT, %g5
4582	add	%g4, %g5, %g3
4583	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
4584
4585	/*
4586	 * Assemble tte
4587	 * g1=kp_pa g2=pfn g3=hlck_pa
4588	 */
4589#ifdef sun4v
4590	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4591	sllx	%g5, 32, %g5
4592	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4593	or	%g4, TTE4M, %g4
4594	or	%g5, %g4, %g5
4595#else
4596	sethi	%hi(TTE_VALID_INT), %g4
4597	mov	TTE4M, %g5
4598	sllx	%g5, TTE_SZ_SHFT_INT, %g5
4599	or	%g5, %g4, %g5			/* upper part */
4600	sllx	%g5, 32, %g5
4601	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4602	or	%g5, %g4, %g5
4603#endif
4604	sllx	%g2, MMU_PAGESHIFT, %g4
4605	or	%g5, %g4, %g5			/* tte */
4606	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4607	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4608
4609	/*
4610	 * tsb dropin
4611	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4612	 */
4613
4614	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4615	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4616
4617	/* use C-handler if there's no go for dropin */
4618	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4619	cmp	%g7, -1
4620	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
4621	  nop
4622
4623#ifdef	DEBUG
4624	/* double check refcnt */
4625	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
4626	brz,pn	%g7, 5f			/* let C-handler deal with this */
4627	  nop
4628#endif
4629
4630#ifndef sun4v
4631	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4632	mov	ASI_N, %g1
4633	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4634	movnz	%icc, ASI_MEM, %g1
4635	mov	%g1, %asi
4636#endif
4637
4638	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
4639	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
4640
4641	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4642	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4643
4644	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4645
4646	/* KPMLOCK_EXIT(kpmlckp, asi) */
4647	KPMLOCK_EXIT(%g3, ASI_MEM)
4648
4649	/*
4650	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4651	 * point to trapstat's TSB miss return code (note that trapstat
4652	 * itself will patch the correct offset to add).
4653	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4654	 */
4655	rdpr	%tl, %g7
4656	cmp	%g7, 1
4657	ble	%icc, 0f
4658	sethi	%hi(KERNELBASE), %g6
4659	rdpr	%tpc, %g7
4660	or	%g6, %lo(KERNELBASE), %g6
4661	cmp	%g7, %g6
4662	bgeu	%xcc, 0f
4663	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4664	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4665	wrpr	%g7, %tpc
4666	add	%g7, 4, %g7
4667	wrpr	%g7, %tnpc
46680:
4669	retry
46705:
4671	/* g3=hlck_pa */
4672	KPMLOCK_EXIT(%g3, ASI_MEM)
4673	ba,pt	%icc, sfmmu_kpm_exception
4674	  nop
4675	SET_SIZE(sfmmu_kpm_dtsb_miss)
4676
4677	/*
4678	 * kpm tsbmiss handler for smallpages
4679	 * g1 = 8K kpm TSB pointer
4680	 * g2 = tag access register
4681	 * g3 = 4M kpm TSB pointer
4682	 */
4683	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4684	TT_TRACE(trace_tsbmiss)
4685	CPU_INDEX(%g7, %g6)
4686	sethi	%hi(kpmtsbm_area), %g6
4687	sllx	%g7, KPMTSBM_SHIFT, %g7
4688	or	%g6, %lo(kpmtsbm_area), %g6
4689	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4690
4691	/* check enable flag */
4692	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4693	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4694	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4695	  nop
4696
4697	/*
4698	 * VA range check
4699	 * On fail: goto sfmmu_tsb_miss
4700	 */
4701	ldx	[%g6 + KPMTSBM_VBASE], %g7
4702	cmp	%g2, %g7
4703	blu,pn	%xcc, sfmmu_tsb_miss
4704	  ldx	[%g6 + KPMTSBM_VEND], %g5
4705	cmp	%g2, %g5
4706	bgeu,pn	%xcc, sfmmu_tsb_miss
4707	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
4708
4709	/*
4710	 * check TL tsbmiss handling flag
4711	 * bump tsbmiss counter
4712	 */
4713	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4714#ifdef	DEBUG
4715	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
4716	inc	%g5
4717	brz,pn	%g1, sfmmu_kpm_exception
4718	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4719#else
4720	inc	%g5
4721	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4722#endif
4723	/*
4724	 * At this point:
4725	 *  g1 = clobbered
4726	 *  g2 = tag access register
4727	 *  g3 = 4M kpm TSB pointer (not used)
4728	 *  g6 = per-CPU kpm tsbmiss area
4729	 *  g7 = kpm_vbase
4730	 */
4731
4732	/* vaddr2pfn */
4733	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4734	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4735	srax    %g4, %g3, %g2			/* which alias range (r) */
4736	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4737	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4738
4739	/*
4740	 * Setup %asi
4741	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4742	 * if (mseg not found) sfmmu_kpm_exception
4743	 * g2=pfn
4744	 */
4745	mov	ASI_MEM, %asi
4746	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4747	cmp	%g3, MSEG_NULLPTR_PA
4748	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4749	  nop
4750
4751	/*
4752	 * inx = pfn - mseg_pa->kpm_pbase
4753	 * g2=pfn g3=mseg_pa
4754	 */
4755	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4756	sub	%g2, %g7, %g4
4757
4758#ifdef	DEBUG
4759	/*
4760	 * Validate inx value
4761	 * g2=pfn g3=mseg_pa g4=inx
4762	 */
4763	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4764	cmp	%g4, %g5			/* inx - nkpmpgs */
4765	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4766	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4767#else
4768	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4769#endif
4770	/* ksp = &mseg_pa->kpm_spages[inx] */
4771	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4772	add	%g5, %g4, %g5			/* ksp */
4773
4774	/*
4775	 * KPMP_SHASH(kp)
4776	 * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz
4777	 */
4778	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4779	sub	%g7, 1, %g7			/* mask */
4780	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
4781	add	%g5, %g1, %g5			/* y = ksp + x */
4782	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4783
4784	/*
4785	 * Calculate physical kpm_spage pointer
4786	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4787	 */
4788	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4789	add	%g1, %g4, %g1			/* ksp_pa */
4790
4791	/*
4792	 * Calculate physical hash lock address.
4793	 * Note: Changes in kpm_shlk_t must be reflected here.
4794	 * g1=ksp_pa g2=pfn g5=hashinx
4795	 */
4796	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4797	sllx	%g5, KPMSHLK_SHIFT, %g5
4798	add	%g4, %g5, %g3			/* hlck_pa */
4799
4800	/*
4801	 * Assemble tte
4802	 * g1=ksp_pa g2=pfn g3=hlck_pa
4803	 */
4804	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4805	sllx	%g5, 32, %g5
4806	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4807	or	%g5, %g4, %g5
4808	sllx	%g2, MMU_PAGESHIFT, %g4
4809	or	%g5, %g4, %g5			/* tte */
4810	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4811	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4812
4813	/*
4814	 * tsb dropin
4815	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte
4816	 */
4817
4818	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4819	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4820
4821	/* use C-handler if there's no go for dropin */
4822	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
4823	cmp	%g7, -1
4824	bne,pn	%xcc, 5f
4825	  nop
4826
4827#ifndef sun4v
4828	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4829	mov	ASI_N, %g1
4830	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4831	movnz	%icc, ASI_MEM, %g1
4832	mov	%g1, %asi
4833#endif
4834
4835	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
4836	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
4837
4838	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4839	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4840
4841	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4842
4843	/* KPMLOCK_EXIT(kpmlckp, asi) */
4844	KPMLOCK_EXIT(%g3, ASI_MEM)
4845
4846	/*
4847	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4848	 * point to trapstat's TSB miss return code (note that trapstat
4849	 * itself will patch the correct offset to add).
4850	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4851	 */
4852	rdpr	%tl, %g7
4853	cmp	%g7, 1
4854	ble	%icc, 0f
4855	sethi	%hi(KERNELBASE), %g6
4856	rdpr	%tpc, %g7
4857	or	%g6, %lo(KERNELBASE), %g6
4858	cmp	%g7, %g6
4859	bgeu	%xcc, 0f
4860	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4861	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4862	wrpr	%g7, %tpc
4863	add	%g7, 4, %g7
4864	wrpr	%g7, %tnpc
48650:
4866	retry
48675:
4868	/* g3=hlck_pa */
4869	KPMLOCK_EXIT(%g3, ASI_MEM)
4870	ba,pt	%icc, sfmmu_kpm_exception
4871	  nop
4872	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4873
4874#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4875#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4876#endif
4877
4878#endif /* lint */
4879
4880#ifdef	lint
4881/*
4882 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4883 * Called from C-level, sets/clears "go" indication for trap level handler.
4884 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4885 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4886 * Assumes khl_mutex is held when called from C-level.
4887 */
4888/* ARGSUSED */
4889void
4890sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4891{
4892}
4893
4894/*
4895 * kpm_smallpages: stores val to byte at address mapped within
4896 * low level lock brackets. The old value is returned.
4897 * Called from C-level.
4898 */
4899/* ARGSUSED */
4900int
4901sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val)
4902{
4903	return (0);
4904}
4905
4906#else /* lint */
4907
4908	.seg	".data"
4909sfmmu_kpm_tsbmtl_panic:
4910	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4911	.byte	0
4912sfmmu_kpm_stsbmtl_panic:
4913	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4914	.byte	0
4915	.align	4
4916	.seg	".text"
4917
4918	ENTRY_NP(sfmmu_kpm_tsbmtl)
4919	rdpr	%pstate, %o3
4920	/*
4921	 * %o0 = &kp_refcntc
4922	 * %o1 = &khl_lock
4923	 * %o2 = 0/1 (off/on)
4924	 * %o3 = pstate save
4925	 */
4926#ifdef DEBUG
4927	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4928	bnz,pt %icc, 1f				/* disabled, panic	 */
4929	  nop
4930	save	%sp, -SA(MINFRAME), %sp
4931	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4932	call	panic
4933	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4934	ret
4935	restore
49361:
4937#endif /* DEBUG */
4938	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4939
4940	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4941	mov	-1, %o5
4942	brz,a	%o2, 2f
4943	  mov	0, %o5
49442:
4945	sth	%o5, [%o0]
4946	KPMLOCK_EXIT(%o1, ASI_N)
4947
4948	retl
4949	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4950	SET_SIZE(sfmmu_kpm_tsbmtl)
4951
4952	ENTRY_NP(sfmmu_kpm_stsbmtl)
4953	rdpr	%pstate, %o3
4954	/*
4955	 * %o0 = &mapped
4956	 * %o1 = &kshl_lock
4957	 * %o2 = val
4958	 * %o3 = pstate save
4959	 */
4960#ifdef DEBUG
4961	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4962	bnz,pt %icc, 1f				/* disabled, panic	 */
4963	  nop
4964	save	%sp, -SA(MINFRAME), %sp
4965	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4966	call	panic
4967	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4968	ret
4969	restore
49701:
4971#endif /* DEBUG */
4972	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4973
4974	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4975	ldsb	[%o0], %o5
4976	stb	%o2, [%o0]
4977	KPMLOCK_EXIT(%o1, ASI_N)
4978
4979	mov	%o5, %o0			/* return old val */
4980	retl
4981	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4982	SET_SIZE(sfmmu_kpm_stsbmtl)
4983
4984#endif /* lint */
4985
4986#ifndef lint
4987#ifdef sun4v
4988	/*
4989	 * User/kernel data miss w// multiple TSBs
4990	 * The first probe covers 8K, 64K, and 512K page sizes,
4991	 * because 64K and 512K mappings are replicated off 8K
4992	 * pointer.  Second probe covers 4M page size only.
4993	 *
4994	 * MMU fault area contains miss address and context.
4995	 */
4996	ALTENTRY(sfmmu_slow_dmmu_miss)
4997	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
4998
4999slow_miss_common:
5000	/*
5001	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
5002	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
5003	 */
5004	brnz,pt	%g3, 8f			! check for user context
5005	  nop
5006
5007	/*
5008	 * Kernel miss
5009	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
5010	 * branch to sfmmu_tsb_miss_tt to handle it.
5011	 */
5012	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
5013sfmmu_dslow_patch_ktsb_base:
5014	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
5015sfmmu_dslow_patch_ktsb_szcode:
5016	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
5017
5018	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
5019	! %g1 = First TSB entry pointer, as TSB miss handler expects
5020
5021	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
5022sfmmu_dslow_patch_ktsb4m_base:
5023	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
5024sfmmu_dslow_patch_ktsb4m_szcode:
5025	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
5026
5027	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
5028	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
5029	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
5030	.empty
5031
50328:
5033	/*
5034	 * User miss
5035	 * Get first TSB pointer in %g1
5036	 * Get second TSB pointer (or NULL if no second TSB) in %g3
5037	 * Branch to sfmmu_tsb_miss_tt to handle it
5038	 */
5039	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
5040	/* %g1 = first TSB entry ptr now, %g2 preserved */
5041
5042	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
5043	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
5044	  nop
5045
5046	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
5047	/* %g3 = second TSB entry ptr now, %g2 preserved */
50489:
5049	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
5050	.empty
5051	SET_SIZE(sfmmu_slow_dmmu_miss)
5052
5053
5054	/*
5055	 * User/kernel instruction miss w/ multiple TSBs
5056	 * The first probe covers 8K, 64K, and 512K page sizes,
5057	 * because 64K and 512K mappings are replicated off 8K
5058	 * pointer.  Second probe covers 4M page size only.
5059	 *
5060	 * MMU fault area contains miss address and context.
5061	 */
5062	ALTENTRY(sfmmu_slow_immu_miss)
5063	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
5064	ba,a,pt	%xcc, slow_miss_common
5065	SET_SIZE(sfmmu_slow_immu_miss)
5066
5067#endif /* sun4v */
5068#endif	/* lint */
5069
5070#ifndef lint
5071
5072/*
5073 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
5074 */
5075	.seg	".data"
5076	.align	64
5077	.global tsbmiss_area
5078tsbmiss_area:
5079	.skip	(TSBMISS_SIZE * NCPU)
5080
5081	.align	64
5082	.global kpmtsbm_area
5083kpmtsbm_area:
5084	.skip	(KPMTSBM_SIZE * NCPU)
5085#endif	/* lint */
5086