1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * SFMMU primitives.  These primitives should only be used by sfmmu
28 * routines.
29 */
30
31#include "assym.h"
32
33#include <sys/asm_linkage.h>
34#include <sys/machtrap.h>
35#include <sys/machasi.h>
36#include <sys/sun4asi.h>
37#include <sys/pte.h>
38#include <sys/mmu.h>
39#include <vm/hat_sfmmu.h>
40#include <vm/seg_spt.h>
41#include <sys/machparam.h>
42#include <sys/privregs.h>
43#include <sys/scb.h>
44#include <sys/intreg.h>
45#include <sys/machthread.h>
46#include <sys/clock.h>
47#include <sys/trapstat.h>
48
49/*
50 * sfmmu related subroutines
51 */
52
53/*
54 * Invalidate either the context of a specific victim or any process
55 * currently running on this CPU.
56 *
57 * %g1 = sfmmup whose ctx is being invalidated
58 *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
59 * Note %g1 is the only input argument used by this xcall handler.
60 */
61	ENTRY(sfmmu_raise_tsb_exception)
62	!
63	! if (victim == INVALID_CONTEXT ||
64	!     current CPU tsbmiss->usfmmup == victim sfmmup) {
65	!       if (shctx_on) {
66	!               shctx = INVALID;
67	!       }
68	!	if (sec-ctx > INVALID_CONTEXT) {
69	!		write INVALID_CONTEXT to sec-ctx
70	!	}
71	!	if (pri-ctx > INVALID_CONTEXT) {
72	!		write INVALID_CONTEXT to pri-ctx
73	!	}
74	! }
75
76	sethi   %hi(ksfmmup), %g3
77        ldx     [%g3 + %lo(ksfmmup)], %g3
78	cmp	%g1, %g3
79	be,a,pn %xcc, ptl1_panic		/* can't invalidate kernel ctx */
80	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
81
82	set	INVALID_CONTEXT, %g2
83	cmp	%g1, INVALID_CONTEXT
84	be,pn	%xcc, 0f			/* called from wrap_around? */
85	  mov	MMU_SCONTEXT, %g3
86
87	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
88	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
89	cmp	%g5, %g1			/* hat toBe-invalid running? */
90	bne,pt	%xcc, 3f
91	  nop
92
930:
94	sethi   %hi(shctx_on), %g5
95        ld      [%g5 + %lo(shctx_on)], %g5
96        brz     %g5, 1f
97          mov     MMU_SHARED_CONTEXT, %g5
98        sethi   %hi(FLUSH_ADDR), %g4
99        stxa    %g0, [%g5]ASI_MMU_CTX
100        flush   %g4
101
1021:
103	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = pgsz | sec-ctx */
104	set     CTXREG_CTX_MASK, %g4
105	and	%g5, %g4, %g5			/* %g5 = sec-ctx */
106	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
107	ble,pn	%xcc, 2f			/* yes, no need to change */
108	  mov   MMU_PCONTEXT, %g7
109
110	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
111	membar	#Sync
112
1132:
114	ldxa	[%g7]ASI_MMU_CTX, %g3		/* get pgz | pri-ctx */
115	and     %g3, %g4, %g5			/* %g5 = pri-ctx */
116	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
117	ble,pn	%xcc, 3f			/* yes, no need to change */
118	  srlx	%g3, CTXREG_NEXT_SHIFT, %g3	/* %g3 = nucleus pgsz */
119	sllx	%g3, CTXREG_NEXT_SHIFT, %g3	/* need to preserve nucleus pgsz */
120	or	%g3, %g2, %g2			/* %g2 = nucleus pgsz | INVALID_CONTEXT */
121
122	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
1233:
124	retry
125	SET_SIZE(sfmmu_raise_tsb_exception)
126
127
128
129	/*
130	 * %o0 = virtual address
131	 * %o1 = address of TTE to be loaded
132	 */
133	ENTRY_NP(sfmmu_itlb_ld_kva)
134	rdpr	%pstate, %o3
135#ifdef DEBUG
136	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1)
137#endif /* DEBUG */
138	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
139	srln	%o0, MMU_PAGESHIFT, %o0
140	slln	%o0, MMU_PAGESHIFT, %o0		! Clear page offset
141
142	ldx	[%o1], %g1
143	set	MMU_TAG_ACCESS, %o5
144#ifdef	CHEETAHPLUS_ERRATUM_34
145	!
146	! If this is Cheetah or derivative and the specified TTE is locked
147	! and hence to be loaded into the T16, fully-associative TLB, we
148	! must avoid Cheetah+ erratum 34.  In Cheetah+ erratum 34, under
149	! certain conditions an ITLB locked index 0 TTE will erroneously be
150	! displaced when a new TTE is loaded via ASI_ITLB_IN.  To avoid
151	! this erratum, we scan the T16 top down for an unlocked TTE and
152	! explicitly load the specified TTE into that index.
153	!
154	GET_CPU_IMPL(%g2)
155	cmp	%g2, CHEETAH_IMPL
156	bl,pn	%icc, 0f
157	  nop
158
159	andcc	%g1, TTE_LCK_INT, %g0
160	bz	%icc, 0f			! Lock bit is not set;
161						!   load normally.
162	  or	%g0, (15 << 3), %g3		! Start searching from the
163						!   top down.
164
1651:
166	ldxa	[%g3]ASI_ITLB_ACCESS, %g4	! Load TTE from t16
167
168	!
169	! If this entry isn't valid, we'll choose to displace it (regardless
170	! of the lock bit).
171	!
172	cmp	%g4, %g0
173	bge	%xcc, 2f			! TTE is > 0 iff not valid
174	  andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
175	bz	%icc, 2f			! If unlocked, go displace
176	  nop
177	sub	%g3, (1 << 3), %g3
178	brgz	%g3, 1b				! Still more TLB entries
179	  nop					! to search
180
181	sethi   %hi(sfmmu_panic5), %o0          ! We searched all entries and
182	call    panic                           ! found no unlocked TTE so
183	  or    %o0, %lo(sfmmu_panic5), %o0     ! give up.
184
185
1862:
187	!
188	! We have found an unlocked or non-valid entry; we'll explicitly load
189	! our locked entry here.
190	!
191	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
192	stxa	%o0, [%o5]ASI_IMMU
193	stxa	%g1, [%g3]ASI_ITLB_ACCESS
194	flush	%o1				! Flush required for I-MMU
195	ba	3f				! Delay slot of ba is empty
196	  nop					!   per Erratum 64
197
1980:
199#endif	/* CHEETAHPLUS_ERRATUM_34 */
200	sethi	%hi(FLUSH_ADDR), %o1		! Flush addr doesn't matter
201	stxa	%o0, [%o5]ASI_IMMU
202	stxa	%g1, [%g0]ASI_ITLB_IN
203	flush	%o1				! Flush required for I-MMU
2043:
205	retl
206	  wrpr	%g0, %o3, %pstate		! Enable interrupts
207	SET_SIZE(sfmmu_itlb_ld_kva)
208
209	/*
210	 * Load an entry into the DTLB.
211	 *
212	 * Special handling is required for locked entries since there
213	 * are some TLB slots that are reserved for the kernel but not
214	 * always held locked.  We want to avoid loading locked TTEs
215	 * into those slots since they could be displaced.
216	 *
217	 * %o0 = virtual address
218	 * %o1 = address of TTE to be loaded
219	 */
220	ENTRY_NP(sfmmu_dtlb_ld_kva)
221	rdpr	%pstate, %o3
222#ifdef DEBUG
223	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1)
224#endif /* DEBUG */
225	wrpr	%o3, PSTATE_IE, %pstate		! disable interrupts
226	srln	%o0, MMU_PAGESHIFT, %o0
227	slln	%o0, MMU_PAGESHIFT, %o0		! clear page offset
228
229	ldx	[%o1], %g1
230
231	set	MMU_TAG_ACCESS, %o5
232
233	set	cpu_impl_dual_pgsz, %o2
234	ld	[%o2], %o2
235	brz	%o2, 1f
236	  nop
237
238	sethi	%hi(ksfmmup), %o2
239	ldx	[%o2 + %lo(ksfmmup)], %o2
240	ldub    [%o2 + SFMMU_CEXT], %o2
241        sll     %o2, TAGACCEXT_SHIFT, %o2
242
243	set	MMU_TAG_ACCESS_EXT, %o4		! can go into T8 if unlocked
244	stxa	%o2,[%o4]ASI_DMMU
245	membar	#Sync
2461:
247	andcc	%g1, TTE_LCK_INT, %g0		! Locked entries require
248	bnz,pn	%icc, 2f			! special handling
249	  sethi	%hi(dtlb_resv_ttenum), %g3
250	stxa	%o0,[%o5]ASI_DMMU		! Load unlocked TTE
251	stxa	%g1,[%g0]ASI_DTLB_IN		! via DTLB_IN
252	membar	#Sync
253	retl
254	  wrpr	%g0, %o3, %pstate		! enable interrupts
2552:
256#ifdef	CHEETAHPLUS_ERRATUM_34
257	GET_CPU_IMPL(%g2)
258#endif
259	ld	[%g3 + %lo(dtlb_resv_ttenum)], %g3
260	sll	%g3, 3, %g3			! First reserved idx in TLB 0
261	sub	%g3, (1 << 3), %g3		! Decrement idx
262	! Erratum 15 workaround due to ld [%g3 + %lo(dtlb_resv_ttenum)], %g3
263	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
2643:
265	ldxa	[%g3]ASI_DTLB_ACCESS, %g4	! Load TTE from TLB 0
266	!
267	! If this entry isn't valid, we'll choose to displace it (regardless
268	! of the lock bit).
269	!
270	brgez,pn %g4, 4f			! TTE is > 0 iff not valid
271	  nop
272	andcc	%g4, TTE_LCK_INT, %g0		! Check for lock bit
273	bz,pn	%icc, 4f			! If unlocked, go displace
274	  nop
275	sub	%g3, (1 << 3), %g3		! Decrement idx
276#ifdef	CHEETAHPLUS_ERRATUM_34
277	!
278	! If this is a Cheetah or derivative, we must work around Erratum 34
279	! for the DTLB.  Erratum 34 states that under certain conditions,
280	! a locked entry 0 TTE may be improperly displaced.  To avoid this,
281	! we do not place a locked TTE in entry 0.
282	!
283	brgz	%g3, 3b
284	  nop
285	cmp	%g2, CHEETAH_IMPL
286	bge,pt	%icc, 5f
287	  nop
288	brz	%g3, 3b
289	 nop
290#else	/* CHEETAHPLUS_ERRATUM_34 */
291	brgez	%g3, 3b
292	  nop
293#endif	/* CHEETAHPLUS_ERRATUM_34 */
2945:
295	sethi	%hi(sfmmu_panic5), %o0		! We searched all entries and
296	call	panic				! found no unlocked TTE so
297	  or	%o0, %lo(sfmmu_panic5), %o0	! give up.
2984:
299	stxa	%o0,[%o5]ASI_DMMU		! Setup tag access
300#ifdef	OLYMPUS_SHARED_FTLB
301	stxa	%g1,[%g0]ASI_DTLB_IN
302#else
303	stxa	%g1,[%g3]ASI_DTLB_ACCESS	! Displace entry at idx
304#endif
305	membar	#Sync
306	retl
307	  wrpr	%g0, %o3, %pstate		! enable interrupts
308	SET_SIZE(sfmmu_dtlb_ld_kva)
309
310	ENTRY_NP(sfmmu_getctx_pri)
311	set	MMU_PCONTEXT, %o0
312	retl
313	  ldxa	[%o0]ASI_MMU_CTX, %o0
314	SET_SIZE(sfmmu_getctx_pri)
315
316	ENTRY_NP(sfmmu_getctx_sec)
317	set	MMU_SCONTEXT, %o0
318	set	CTXREG_CTX_MASK, %o1
319	ldxa	[%o0]ASI_MMU_CTX, %o0
320	retl
321	  and	%o0, %o1, %o0
322	SET_SIZE(sfmmu_getctx_sec)
323
324	/*
325	 * Set the secondary context register for this process.
326	 * %o0 = page_size | context number for this process.
327	 */
328	ENTRY_NP(sfmmu_setctx_sec)
329	/*
330	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
331	 * But we can also get called from C with interrupts enabled. So,
332	 * we need to check first.
333	 */
334
335	/* If interrupts are not disabled, then disable them */
336	rdpr	%pstate, %g1
337	btst	PSTATE_IE, %g1
338	bnz,a,pt %icc, 1f
339	  wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
340
3411:
342	mov	MMU_SCONTEXT, %o1
343
344	sethi	%hi(FLUSH_ADDR), %o4
345	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
346	flush	%o4
347        sethi   %hi(shctx_on), %g3
348        ld      [%g3 + %lo(shctx_on)], %g3
349	brz     %g3, 2f
350	  nop
351	set	CTXREG_CTX_MASK, %o4
352	and	%o0,%o4,%o1
353	cmp	%o1, INVALID_CONTEXT
354	bne,pn %icc, 2f
355   	  mov     MMU_SHARED_CONTEXT, %o1
356        sethi   %hi(FLUSH_ADDR), %o4
357        stxa    %g0, [%o1]ASI_MMU_CTX           /* set 2nd context reg. */
358        flush   %o4
359
360	/*
361	 * if the routine was entered with intr enabled, then enable intr now.
362	 * otherwise, keep intr disabled, return without enabing intr.
363	 * %g1 - old intr state
364	 */
3652:	btst	PSTATE_IE, %g1
366	bnz,a,pt %icc, 3f
367	  wrpr	%g0, %g1, %pstate		/* enable interrupts */
3683:	retl
369	  nop
370	SET_SIZE(sfmmu_setctx_sec)
371
372	/*
373	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
374	 * returns the detection value in %o0.
375	 *
376	 * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
377	 *  - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
378	 *  - FJ OPL Olympus-C and later  (less than SPITFIRE_IMPL)
379	 *
380	 */
381	ENTRY_NP(sfmmu_setup_4lp)
382	GET_CPU_IMPL(%o0);
383	cmp	%o0, CHEETAH_PLUS_IMPL
384	bge,pt	%icc, 4f
385	  mov	1, %o1
386	cmp	%o0, SPITFIRE_IMPL
387	bge,a,pn %icc, 3f
388	  clr	%o1
3894:
390	set	ktsb_phys, %o2
391	st	%o1, [%o2]
3923:	retl
393	mov	%o1, %o0
394	SET_SIZE(sfmmu_setup_4lp)
395
396
397	/*
398	 * Called to load MMU registers and tsbmiss area
399	 * for the active process.  This function should
400	 * only be called from TL=0.
401	 *
402	 * %o0 - hat pointer
403	 *
404	 */
405	ENTRY_NP(sfmmu_load_mmustate)
406
407#ifdef DEBUG
408        PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1)
409#endif /* DEBUG */
410
411        sethi   %hi(ksfmmup), %o3
412        ldx     [%o3 + %lo(ksfmmup)], %o3
413        cmp     %o3, %o0
414        be,pn   %xcc, 8f			! if kernel as, do nothing
415          nop
416        /*
417         * We need to set up the TSB base register, tsbmiss
418         * area, and load locked TTE(s) for the TSB.
419         */
420        ldx     [%o0 + SFMMU_TSB], %o1          ! %o1 = first tsbinfo
421        ldx     [%o1 + TSBINFO_NEXTPTR], %g2    ! %g2 = second tsbinfo
422
423#ifdef UTSB_PHYS
424        /*
425         * UTSB_PHYS accesses user TSBs via physical addresses.  The first
426         * TSB is in the MMU I/D TSB Base registers.  The 2nd, 3rd and
427	 * 4th TSBs use designated ASI_SCRATCHPAD regs as pseudo TSB base regs.
428	 */
429
430        /* create/set first UTSBREG actually loaded into MMU_TSB  */
431        MAKE_UTSBREG(%o1, %o2, %o3)             ! %o2 = first utsbreg
432 	LOAD_TSBREG(%o2, %o3, %o4)              ! write TSB base register
433
434        brz,a,pt  %g2, 2f
435          mov   -1, %o2                         ! use -1 if no second TSB
436
437        MAKE_UTSBREG(%g2, %o2, %o3)             ! %o2 = second utsbreg
4382:
439        SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
440
441	/* make 3rd and 4th TSB */
442	CPU_TSBMISS_AREA(%o4, %o3) 		! %o4 = tsbmiss area
443
444        ldx     [%o0 + SFMMU_SCDP], %g2         ! %g2 = sfmmu_scd
445        brz,pt  %g2, 3f
446          mov   -1, %o2                         ! use -1 if no third TSB
447
448        ldx     [%g2 + SCD_SFMMUP], %g3         ! %g3 = scdp->scd_sfmmup
449        ldx     [%g3 + SFMMU_TSB], %o1          ! %o1 = first scd tsbinfo
450        brz,pn %o1, 5f
451          nop                                   ! panic if no third TSB
452
453	/* make 3rd UTSBREG */
454        MAKE_UTSBREG(%o1, %o2, %o3)             ! %o2 = third utsbreg
4553:
456        SET_UTSBREG(SCRATCHPAD_UTSBREG3, %o2, %o3)
457	stn	%o2, [%o4 + TSBMISS_TSBSCDPTR]
458
459        brz,pt  %g2, 4f
460          mov   -1, %o2                         ! use -1 if no 3rd or 4th TSB
461
462        ldx     [%o1 + TSBINFO_NEXTPTR], %g2    ! %g2 = second scd tsbinfo
463        brz,pt  %g2, 4f
464          mov   -1, %o2                         ! use -1 if no 4th TSB
465
466	/* make 4th UTSBREG */
467        MAKE_UTSBREG(%g2, %o2, %o3)             ! %o2 = fourth utsbreg
4684:
469        SET_UTSBREG(SCRATCHPAD_UTSBREG4, %o2, %o3)
470	stn	%o2, [%o4 + TSBMISS_TSBSCDPTR4M]
471	ba,pt	%icc, 6f
472	  mov	%o4, %o2			! %o2 = tsbmiss area
4735:
474        sethi   %hi(panicstr), %g1              ! panic if no 3rd TSB
475        ldx     [%g1 + %lo(panicstr)], %g1
476        tst     %g1
477
478        bnz,pn  %xcc, 8f
479          nop
480
481        sethi   %hi(sfmmu_panic10), %o0
482        call    panic
483          or     %o0, %lo(sfmmu_panic10), %o0
484
485#else /* UTSBREG_PHYS */
486
487        brz,pt  %g2, 4f
488          nop
489        /*
490         * We have a second TSB for this process, so we need to
491         * encode data for both the first and second TSB in our single
492         * TSB base register.  See hat_sfmmu.h for details on what bits
493         * correspond to which TSB.
494         * We also need to load a locked TTE into the TLB for the second TSB
495         * in this case.
496         */
497        MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd)
498        ! %o2 = tsbreg
499        sethi   %hi(utsb4m_dtlb_ttenum), %o3
500        sethi   %hi(utsb4m_vabase), %o4
501        ld      [%o3 + %lo(utsb4m_dtlb_ttenum)], %o3
502        ldx     [%o4 + %lo(utsb4m_vabase)], %o4 ! %o4 = TLB tag for sec TSB
503        sll     %o3, DTACC_SHIFT, %o3           ! %o3 = sec TSB TLB index
504        RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd)       ! or-in bits of TSB VA
505        LOAD_TSBTTE(%g2, %o3, %o4, %g3)         ! load sec TSB locked TTE
506        sethi   %hi(utsb_vabase), %g3
507        ldx     [%g3 + %lo(utsb_vabase)], %g3   ! %g3 = TLB tag for first TSB
508        ba,pt   %xcc, 5f
509          nop
510
5114:      sethi   %hi(utsb_vabase), %g3
512        ldx     [%g3 + %lo(utsb_vabase)], %g3   ! %g3 = TLB tag for first TSB
513        MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st)     ! %o2 = tsbreg
514
5155:      LOAD_TSBREG(%o2, %o3, %o4)              ! write TSB base register
516
517        /*
518         * Load the TTE for the first TSB at the appropriate location in
519         * the TLB
520         */
521        sethi   %hi(utsb_dtlb_ttenum), %o2
522        ld      [%o2 + %lo(utsb_dtlb_ttenum)], %o2
523        sll     %o2, DTACC_SHIFT, %o2           ! %o1 = first TSB TLB index
524        RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st)       ! or-in bits of TSB VA
525        LOAD_TSBTTE(%o1, %o2, %g3, %o4)         ! load first TSB locked TTE
526	CPU_TSBMISS_AREA(%o2, %o3)
527#endif /* UTSB_PHYS */
5286:
529	ldx     [%o0 + SFMMU_ISMBLKPA], %o1     ! copy members of sfmmu
530	              				! we need to access from
531        stx     %o1, [%o2 + TSBMISS_ISMBLKPA]   ! sfmmu_tsb_miss into the
532        ldub    [%o0 + SFMMU_TTEFLAGS], %o3     ! per-CPU tsbmiss area.
533        stx     %o0, [%o2 + TSBMISS_UHATID]
534        stub    %o3, [%o2 + TSBMISS_UTTEFLAGS]
535#ifdef UTSB_PHYS
536        ldx     [%o0 + SFMMU_SRDP], %o1
537        ldub    [%o0 + SFMMU_RTTEFLAGS], %o4
538        stub    %o4,  [%o2 + TSBMISS_URTTEFLAGS]
539        stx     %o1, [%o2 +  TSBMISS_SHARED_UHATID]
540        brz,pn  %o1, 8f				! check for sfmmu_srdp
541          add   %o0, SFMMU_HMERMAP, %o1
542        add     %o2, TSBMISS_SHMERMAP, %o2
543        mov     SFMMU_HMERGNMAP_WORDS, %o3
544                                                ! set tsbmiss shmermap
545        SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
546
547	ldx     [%o0 + SFMMU_SCDP], %o4         ! %o4 = sfmmu_scd
548        CPU_TSBMISS_AREA(%o2, %o3)              ! %o2 = tsbmiss area
549        mov     SFMMU_HMERGNMAP_WORDS, %o3
550        brnz,pt %o4, 7f                       ! check for sfmmu_scdp else
551          add   %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
552        ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
553	ba 8f
554	  nop
5557:
556        add     %o4, SCD_HMERMAP, %o1
557        SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
558#endif /* UTSB_PHYS */
559
5608:
561	retl
562          nop
563        SET_SIZE(sfmmu_load_mmustate)
564
565/*
566 * Invalidate all of the entries within the TSB, by setting the inv bit
567 * in the tte_tag field of each tsbe.
568 *
569 * We take advantage of the fact that the TSBs are page aligned and a
570 * multiple of PAGESIZE to use ASI_BLK_INIT_xxx ASI.
571 *
572 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
573 * (in short, we set all bits in the upper word of the tag, and we give the
574 * invalid bit precedence over other tag bits in both places).
575 */
576
577#define	VIS_BLOCKSIZE	64
578
579	ENTRY(sfmmu_inv_tsb_fast)
580
581	! Get space for aligned block of saved fp regs.
582	save	%sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
583
584	! kpreempt_disable();
585	ldsb	[THREAD_REG + T_PREEMPT], %l3
586	inc	%l3
587	stb	%l3, [THREAD_REG + T_PREEMPT]
588
589	! See if fpu was in use.  If it was, we need to save off the
590	! floating point registers to the stack.
591	rd	%fprs, %l0			! %l0 = cached copy of fprs
592	btst	FPRS_FEF, %l0
593	bz,pt	%icc, 4f
594	  nop
595
596	! save in-use fpregs on stack
597	membar	#Sync				! make sure tranx to fp regs
598						! have completed
599	add	%fp, STACK_BIAS - 65, %l1	! get stack frame for fp regs
600	and	%l1, -VIS_BLOCKSIZE, %l1	! block align frame
601	stda	%d0, [%l1]ASI_BLK_P		! %l1 = addr of saved fp regs
602
603	! enable fp
6044:	membar	#StoreStore|#StoreLoad|#LoadStore
605	wr	%g0, FPRS_FEF, %fprs
606	wr	%g0, ASI_BLK_P, %asi
607
608	! load up FP registers with invalid TSB tag.
609	fone	%d0			! ones in tag
610	fzero	%d2			! zeros in TTE
611	fone	%d4			! ones in tag
612	fzero	%d6			! zeros in TTE
613	fone	%d8			! ones in tag
614	fzero	%d10			! zeros in TTE
615	fone	%d12			! ones in tag
616	fzero	%d14			! zeros in TTE
617	ba,pt	%xcc, .sfmmu_inv_doblock
618	  mov	(4*VIS_BLOCKSIZE), %i4	! we do 4 stda's each loop below
619
620.sfmmu_inv_blkstart:
621      ! stda	%d0, [%i0+192]%asi  ! in dly slot of branch that got us here
622	stda	%d0, [%i0+128]%asi
623	stda	%d0, [%i0+64]%asi
624	stda	%d0, [%i0]%asi
625
626	add	%i0, %i4, %i0
627	sub	%i1, %i4, %i1
628
629.sfmmu_inv_doblock:
630	cmp	%i1, (4*VIS_BLOCKSIZE)	! check for completion
631	bgeu,a	%icc, .sfmmu_inv_blkstart
632	  stda	%d0, [%i0+192]%asi
633
634.sfmmu_inv_finish:
635	membar	#Sync
636	btst	FPRS_FEF, %l0		! saved from above
637	bz,a	.sfmmu_inv_finished
638	  wr	%l0, 0, %fprs		! restore fprs
639
640	! restore fpregs from stack
641	ldda    [%l1]ASI_BLK_P, %d0
642	membar	#Sync
643	wr	%l0, 0, %fprs		! restore fprs
644
645.sfmmu_inv_finished:
646	! kpreempt_enable();
647	ldsb	[THREAD_REG + T_PREEMPT], %l3
648	dec	%l3
649	stb	%l3, [THREAD_REG + T_PREEMPT]
650	ret
651	  restore
652	SET_SIZE(sfmmu_inv_tsb_fast)
653
654/*
655 * Prefetch "struct tsbe" while walking TSBs.
656 * prefetch 7 cache lines ahead of where we are at now.
657 * #n_reads is being used since #one_read only applies to
658 * floating point reads, and we are not doing floating point
659 * reads.  However, this has the negative side effect of polluting
660 * the ecache.
661 * The 448 comes from (7 * 64) which is how far ahead of our current
662 * address, we want to prefetch.
663 */
664	ENTRY(prefetch_tsbe_read)
665	retl
666	  prefetch	[%o0+448], #n_reads
667	SET_SIZE(prefetch_tsbe_read)
668
669/* Prefetch the tsbe that we are about to write */
670	ENTRY(prefetch_tsbe_write)
671	retl
672	  prefetch	[%o0], #n_writes
673	SET_SIZE(prefetch_tsbe_write)
674
675