1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * Copyright (c) 2012 Joyent, Inc.  All rights reserved.
29 */
30
31#include <sys/mmu.h>
32#include <sys/systm.h>
33#include <sys/trap.h>
34#include <sys/machtrap.h>
35#include <sys/vtrace.h>
36#include <sys/prsystm.h>
37#include <sys/archsystm.h>
38#include <sys/machsystm.h>
39#include <sys/fpu/fpusystm.h>
40#include <sys/tnf.h>
41#include <sys/tnf_probe.h>
42#include <sys/simulate.h>
43#include <sys/ftrace.h>
44#include <sys/ontrap.h>
45#include <sys/kcpc.h>
46#include <sys/kobj.h>
47#include <sys/procfs.h>
48#include <sys/sun4asi.h>
49#include <sys/sdt.h>
50#include <sys/fpras.h>
51#include <sys/contract/process_impl.h>
52
53#ifdef  TRAPTRACE
54#include <sys/traptrace.h>
55#endif
56
57int tudebug = 0;
58static int tudebugbpt = 0;
59static int tudebugfpe = 0;
60
61static int alignfaults = 0;
62
63#if defined(TRAPDEBUG) || defined(lint)
64static int lodebug = 0;
65#else
66#define	lodebug	0
67#endif /* defined(TRAPDEBUG) || defined(lint) */
68
69
70int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault);
71#pragma weak vis1_partial_support
72
73void showregs(unsigned, struct regs *, caddr_t, uint_t);
74#pragma weak showregs
75
76void trap_async_hwerr(void);
77#pragma weak trap_async_hwerr
78
79void trap_async_berr_bto(int, struct regs *);
80#pragma weak trap_async_berr_bto
81
82static enum seg_rw get_accesstype(struct regs *);
83static int nfload(struct regs *, int *);
84static int swap_nc(struct regs *, int);
85static int ldstub_nc(struct regs *, int);
86void	trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int);
87void	trap_rtt(void);
88
89static int __NORETURN
90die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr)
91{
92	struct panic_trap_info ti;
93
94#ifdef TRAPTRACE
95	TRAPTRACE_FREEZE;
96#endif
97
98	ti.trap_regs = rp;
99	ti.trap_type = type;
100	ti.trap_addr = addr;
101	ti.trap_mmu_fsr = mmu_fsr;
102
103	curthread->t_panic_trap = &ti;
104
105	if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) {
106		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x "
107		    "occurred in module \"%s\" due to %s",
108		    type, (void *)rp, (void *)addr, mmu_fsr,
109		    mod_containing_pc((caddr_t)rp->r_pc),
110		    addr < (caddr_t)PAGESIZE ?
111		    "a NULL pointer dereference" :
112		    "an illegal access to a user address");
113	} else {
114		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x",
115		    type, (void *)rp, (void *)addr, mmu_fsr);
116	}
117}
118
119#if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
120int	ill_calls;
121#endif
122
123/*
124 * Currently, the only PREFETCH/PREFETCHA instructions which cause traps
125 * are the "strong" prefetches (fcn=20-23).  But we check for all flavors of
126 * PREFETCH, in case some future variant also causes a DATA_MMU_MISS.
127 */
128#define	IS_PREFETCH(i)	(((i) & 0xc1780000) == 0xc1680000)
129
130#define	IS_FLUSH(i)	(((i) & 0xc1f80000) == 0x81d80000)
131#define	IS_SWAP(i)	(((i) & 0xc1f80000) == 0xc0780000)
132#define	IS_LDSTUB(i)	(((i) & 0xc1f80000) == 0xc0680000)
133#define	IS_FLOAT(i)	(((i) & 0x1000000) != 0)
134#define	IS_STORE(i)	(((i) >> 21) & 1)
135
136/*
137 * Called from the trap handler when a processor trap occurs.
138 */
139/*VARARGS2*/
140void
141trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr)
142{
143	proc_t *p = ttoproc(curthread);
144	klwp_id_t lwp = ttolwp(curthread);
145	struct machpcb *mpcb = NULL;
146	k_siginfo_t siginfo;
147	uint_t op3, fault = 0;
148	int stepped = 0;
149	greg_t oldpc;
150	int mstate;
151	char *badaddr;
152	faultcode_t res;
153	enum fault_type fault_type;
154	enum seg_rw rw;
155	uintptr_t lofault;
156	label_t *onfault;
157	int instr;
158	int iskernel;
159	int watchcode;
160	int watchpage;
161	extern faultcode_t pagefault(caddr_t, enum fault_type,
162	    enum seg_rw, int);
163#ifdef sun4v
164	extern boolean_t tick_stick_emulation_active;
165#endif	/* sun4v */
166
167	CPU_STATS_ADDQ(CPU, sys, trap, 1);
168
169#ifdef SF_ERRATA_23 /* call causes illegal-insn */
170	ASSERT((curthread->t_schedflag & TS_DONT_SWAP) ||
171	    (type == T_UNIMP_INSTR));
172#else
173	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
174#endif /* SF_ERRATA_23 */
175
176	if (USERMODE(rp->r_tstate) || (type & T_USER)) {
177		/*
178		 * Set lwp_state before trying to acquire any
179		 * adaptive lock
180		 */
181		ASSERT(lwp != NULL);
182		lwp->lwp_state = LWP_SYS;
183		/*
184		 * Set up the current cred to use during this trap. u_cred
185		 * no longer exists.  t_cred is used instead.
186		 * The current process credential applies to the thread for
187		 * the entire trap.  If trapping from the kernel, this
188		 * should already be set up.
189		 */
190		if (curthread->t_cred != p->p_cred) {
191			cred_t *oldcred = curthread->t_cred;
192			/*
193			 * DTrace accesses t_cred in probe context.  t_cred
194			 * must always be either NULL, or point to a valid,
195			 * allocated cred structure.
196			 */
197			curthread->t_cred = crgetcred();
198			crfree(oldcred);
199		}
200		type |= T_USER;
201		ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) ||
202		    (type == (T_SYS_RTT_ALIGN | T_USER)) ||
203		    lwp->lwp_regs == rp);
204		mpcb = lwptompcb(lwp);
205		switch (type) {
206		case T_WIN_OVERFLOW + T_USER:
207		case T_WIN_UNDERFLOW + T_USER:
208		case T_SYS_RTT_PAGE + T_USER:
209		case T_DATA_MMU_MISS + T_USER:
210			mstate = LMS_DFAULT;
211			break;
212		case T_INSTR_MMU_MISS + T_USER:
213			mstate = LMS_TFAULT;
214			break;
215		default:
216			mstate = LMS_TRAP;
217			break;
218		}
219		/* Kernel probe */
220		TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
221		    tnf_microstate, state, (char)mstate);
222		mstate = new_mstate(curthread, mstate);
223		siginfo.si_signo = 0;
224		stepped =
225		    lwp->lwp_pcb.pcb_step != STEP_NONE &&
226		    ((oldpc = rp->r_pc), prundostep()) &&
227		    mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc);
228		/* this assignment must not precede call to prundostep() */
229		oldpc = rp->r_pc;
230	}
231
232	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
233	    "C_trap_handler_enter:type %x", type);
234
235#ifdef	F_DEFERRED
236	/*
237	 * Take any pending floating point exceptions now.
238	 * If the floating point unit has an exception to handle,
239	 * just return to user-level to let the signal handler run.
240	 * The instruction that got us to trap() will be reexecuted on
241	 * return from the signal handler and we will trap to here again.
242	 * This is necessary to disambiguate simultaneous traps which
243	 * happen when a floating-point exception is pending and a
244	 * machine fault is incurred.
245	 */
246	if (type & USER) {
247		/*
248		 * FP_TRAPPED is set only by sendsig() when it copies
249		 * out the floating-point queue for the signal handler.
250		 * It is set there so we can test it here and in syscall().
251		 */
252		mpcb->mpcb_flags &= ~FP_TRAPPED;
253		syncfpu();
254		if (mpcb->mpcb_flags & FP_TRAPPED) {
255			/*
256			 * trap() has have been called recursively and may
257			 * have stopped the process, so do single step
258			 * support for /proc.
259			 */
260			mpcb->mpcb_flags &= ~FP_TRAPPED;
261			goto out;
262		}
263	}
264#endif
265	switch (type) {
266		case T_DATA_MMU_MISS:
267		case T_INSTR_MMU_MISS + T_USER:
268		case T_DATA_MMU_MISS + T_USER:
269		case T_DATA_PROT + T_USER:
270		case T_AST + T_USER:
271		case T_SYS_RTT_PAGE + T_USER:
272		case T_FLUSH_PCB + T_USER:
273		case T_FLUSHW + T_USER:
274			break;
275
276		default:
277			FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx",
278			    (ulong_t)type, (ulong_t)rp, (ulong_t)addr);
279			break;
280	}
281
282	switch (type) {
283
284	default:
285		/*
286		 * Check for user software trap.
287		 */
288		if (type & T_USER) {
289			if (tudebug)
290				showregs(type, rp, (caddr_t)0, 0);
291			if ((type & ~T_USER) >= T_SOFTWARE_TRAP) {
292				bzero(&siginfo, sizeof (siginfo));
293				siginfo.si_signo = SIGILL;
294				siginfo.si_code  = ILL_ILLTRP;
295				siginfo.si_addr  = (caddr_t)rp->r_pc;
296				siginfo.si_trapno = type &~ T_USER;
297				fault = FLTILL;
298				break;
299			}
300		}
301		addr = (caddr_t)rp->r_pc;
302		(void) die(type, rp, addr, 0);
303		/*NOTREACHED*/
304
305	case T_ALIGNMENT:	/* supv alignment error */
306		if (nfload(rp, NULL))
307			goto cleanup;
308
309		if (curthread->t_lofault) {
310			if (lodebug) {
311				showregs(type, rp, addr, 0);
312				traceback((caddr_t)rp->r_sp);
313			}
314			rp->r_g1 = EFAULT;
315			rp->r_pc = curthread->t_lofault;
316			rp->r_npc = rp->r_pc + 4;
317			goto cleanup;
318		}
319		(void) die(type, rp, addr, 0);
320		/*NOTREACHED*/
321
322	case T_INSTR_EXCEPTION:		/* sys instruction access exception */
323		addr = (caddr_t)rp->r_pc;
324		(void) die(type, rp, addr, mmu_fsr);
325		/*NOTREACHED*/
326
327	case T_INSTR_MMU_MISS:		/* sys instruction mmu miss */
328		addr = (caddr_t)rp->r_pc;
329		(void) die(type, rp, addr, 0);
330		/*NOTREACHED*/
331
332	case T_DATA_EXCEPTION:		/* system data access exception */
333		switch (X_FAULT_TYPE(mmu_fsr)) {
334		case FT_RANGE:
335			/*
336			 * This happens when we attempt to dereference an
337			 * address in the address hole.  If t_ontrap is set,
338			 * then break and fall through to T_DATA_MMU_MISS /
339			 * T_DATA_PROT case below.  If lofault is set, then
340			 * honour it (perhaps the user gave us a bogus
341			 * address in the hole to copyin from or copyout to?)
342			 */
343
344			if (curthread->t_ontrap != NULL)
345				break;
346
347			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
348			if (curthread->t_lofault) {
349				if (lodebug) {
350					showregs(type, rp, addr, 0);
351					traceback((caddr_t)rp->r_sp);
352				}
353				rp->r_g1 = EFAULT;
354				rp->r_pc = curthread->t_lofault;
355				rp->r_npc = rp->r_pc + 4;
356				goto cleanup;
357			}
358			(void) die(type, rp, addr, mmu_fsr);
359			/*NOTREACHED*/
360
361		case FT_PRIV:
362			/*
363			 * This can happen if we access ASI_USER from a kernel
364			 * thread.  To support pxfs, we need to honor lofault if
365			 * we're doing a copyin/copyout from a kernel thread.
366			 */
367
368			if (nfload(rp, NULL))
369				goto cleanup;
370			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
371			if (curthread->t_lofault) {
372				if (lodebug) {
373					showregs(type, rp, addr, 0);
374					traceback((caddr_t)rp->r_sp);
375				}
376				rp->r_g1 = EFAULT;
377				rp->r_pc = curthread->t_lofault;
378				rp->r_npc = rp->r_pc + 4;
379				goto cleanup;
380			}
381			(void) die(type, rp, addr, mmu_fsr);
382			/*NOTREACHED*/
383
384		default:
385			if (nfload(rp, NULL))
386				goto cleanup;
387			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
388			(void) die(type, rp, addr, mmu_fsr);
389			/*NOTREACHED*/
390
391		case FT_NFO:
392			break;
393		}
394		/* fall into ... */
395
396	case T_DATA_MMU_MISS:		/* system data mmu miss */
397	case T_DATA_PROT:		/* system data protection fault */
398		if (nfload(rp, &instr))
399			goto cleanup;
400
401		/*
402		 * If we're under on_trap() protection (see <sys/ontrap.h>),
403		 * set ot_trap and return from the trap to the trampoline.
404		 */
405		if (curthread->t_ontrap != NULL) {
406			on_trap_data_t *otp = curthread->t_ontrap;
407
408			TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT,
409			    "C_trap_handler_exit");
410			TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end");
411
412			if (otp->ot_prot & OT_DATA_ACCESS) {
413				otp->ot_trap |= OT_DATA_ACCESS;
414				rp->r_pc = otp->ot_trampoline;
415				rp->r_npc = rp->r_pc + 4;
416				goto cleanup;
417			}
418		}
419		lofault = curthread->t_lofault;
420		onfault = curthread->t_onfault;
421		curthread->t_lofault = 0;
422
423		mstate = new_mstate(curthread, LMS_KFAULT);
424
425		switch (type) {
426		case T_DATA_PROT:
427			fault_type = F_PROT;
428			rw = S_WRITE;
429			break;
430		case T_INSTR_MMU_MISS:
431			fault_type = F_INVAL;
432			rw = S_EXEC;
433			break;
434		case T_DATA_MMU_MISS:
435		case T_DATA_EXCEPTION:
436			/*
437			 * The hardware doesn't update the sfsr on mmu
438			 * misses so it is not easy to find out whether
439			 * the access was a read or a write so we need
440			 * to decode the actual instruction.
441			 */
442			fault_type = F_INVAL;
443			rw = get_accesstype(rp);
444			break;
445		default:
446			cmn_err(CE_PANIC, "trap: unknown type %x", type);
447			break;
448		}
449		/*
450		 * We determine if access was done to kernel or user
451		 * address space.  The addr passed into trap is really the
452		 * tag access register.
453		 */
454		iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT);
455		addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
456
457		res = pagefault(addr, fault_type, rw, iskernel);
458		if (!iskernel && res == FC_NOMAP &&
459		    addr < p->p_usrstack && grow(addr))
460			res = 0;
461
462		(void) new_mstate(curthread, mstate);
463
464		/*
465		 * Restore lofault and onfault.  If we resolved the fault, exit.
466		 * If we didn't and lofault wasn't set, die.
467		 */
468		curthread->t_lofault = lofault;
469		curthread->t_onfault = onfault;
470
471		if (res == 0)
472			goto cleanup;
473
474		if (IS_PREFETCH(instr)) {
475			/* skip prefetch instructions in kernel-land */
476			rp->r_pc = rp->r_npc;
477			rp->r_npc += 4;
478			goto cleanup;
479		}
480
481		if ((lofault == 0 || lodebug) &&
482		    (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS))
483			addr = badaddr;
484		if (lofault == 0)
485			(void) die(type, rp, addr, 0);
486		/*
487		 * Cannot resolve fault.  Return to lofault.
488		 */
489		if (lodebug) {
490			showregs(type, rp, addr, 0);
491			traceback((caddr_t)rp->r_sp);
492		}
493		if (FC_CODE(res) == FC_OBJERR)
494			res = FC_ERRNO(res);
495		else
496			res = EFAULT;
497		rp->r_g1 = res;
498		rp->r_pc = curthread->t_lofault;
499		rp->r_npc = curthread->t_lofault + 4;
500		goto cleanup;
501
502	case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */
503		bzero(&siginfo, sizeof (siginfo));
504		siginfo.si_addr = (caddr_t)rp->r_pc;
505		siginfo.si_signo = SIGSEGV;
506		siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ?
507		    SEGV_ACCERR : SEGV_MAPERR;
508		fault = FLTBOUNDS;
509		break;
510
511	case T_WIN_OVERFLOW + T_USER:	/* window overflow in ??? */
512	case T_WIN_UNDERFLOW + T_USER:	/* window underflow in ??? */
513	case T_SYS_RTT_PAGE + T_USER:	/* window underflow in user_rtt */
514	case T_INSTR_MMU_MISS + T_USER:	/* user instruction mmu miss */
515	case T_DATA_MMU_MISS + T_USER:	/* user data mmu miss */
516	case T_DATA_PROT + T_USER:	/* user data protection fault */
517		switch (type) {
518		case T_INSTR_MMU_MISS + T_USER:
519			addr = (caddr_t)rp->r_pc;
520			fault_type = F_INVAL;
521			rw = S_EXEC;
522			break;
523
524		case T_DATA_MMU_MISS + T_USER:
525			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
526			fault_type = F_INVAL;
527			/*
528			 * The hardware doesn't update the sfsr on mmu misses
529			 * so it is not easy to find out whether the access
530			 * was a read or a write so we need to decode the
531			 * actual instruction.  XXX BUGLY HW
532			 */
533			rw = get_accesstype(rp);
534			break;
535
536		case T_DATA_PROT + T_USER:
537			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
538			fault_type = F_PROT;
539			rw = S_WRITE;
540			break;
541
542		case T_WIN_OVERFLOW + T_USER:
543			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
544			fault_type = F_INVAL;
545			rw = S_WRITE;
546			break;
547
548		case T_WIN_UNDERFLOW + T_USER:
549		case T_SYS_RTT_PAGE + T_USER:
550			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
551			fault_type = F_INVAL;
552			rw = S_READ;
553			break;
554
555		default:
556			cmn_err(CE_PANIC, "trap: unknown type %x", type);
557			break;
558		}
559
560		/*
561		 * If we are single stepping do not call pagefault
562		 */
563		if (stepped) {
564			res = FC_NOMAP;
565		} else {
566			caddr_t vaddr = addr;
567			size_t sz;
568			int ta;
569
570			ASSERT(!(curthread->t_flag & T_WATCHPT));
571			watchpage = (pr_watch_active(p) &&
572			    type != T_WIN_OVERFLOW + T_USER &&
573			    type != T_WIN_UNDERFLOW + T_USER &&
574			    type != T_SYS_RTT_PAGE + T_USER &&
575			    pr_is_watchpage(addr, rw));
576
577			if (!watchpage ||
578			    (sz = instr_size(rp, &vaddr, rw)) <= 0)
579				/* EMPTY */;
580			else if ((watchcode = pr_is_watchpoint(&vaddr, &ta,
581			    sz, NULL, rw)) != 0) {
582				if (ta) {
583					do_watch_step(vaddr, sz, rw,
584					    watchcode, rp->r_pc);
585					fault_type = F_INVAL;
586				} else {
587					bzero(&siginfo,	sizeof (siginfo));
588					siginfo.si_signo = SIGTRAP;
589					siginfo.si_code = watchcode;
590					siginfo.si_addr = vaddr;
591					siginfo.si_trapafter = 0;
592					siginfo.si_pc = (caddr_t)rp->r_pc;
593					fault = FLTWATCH;
594					break;
595				}
596			} else {
597				if (rw != S_EXEC &&
598				    pr_watch_emul(rp, vaddr, rw))
599					goto out;
600				do_watch_step(vaddr, sz, rw, 0, 0);
601				fault_type = F_INVAL;
602			}
603
604			if (pr_watch_active(p) &&
605			    (type == T_WIN_OVERFLOW + T_USER ||
606			    type == T_WIN_UNDERFLOW + T_USER ||
607			    type == T_SYS_RTT_PAGE + T_USER)) {
608				int dotwo = (type == T_WIN_UNDERFLOW + T_USER);
609				if (copy_return_window(dotwo))
610					goto out;
611				fault_type = F_INVAL;
612			}
613
614			res = pagefault(addr, fault_type, rw, 0);
615
616			/*
617			 * If pagefault succeed, ok.
618			 * Otherwise grow the stack automatically.
619			 */
620			if (res == 0 ||
621			    (res == FC_NOMAP &&
622			    type != T_INSTR_MMU_MISS + T_USER &&
623			    addr < p->p_usrstack &&
624			    grow(addr))) {
625				int ismem = prismember(&p->p_fltmask, FLTPAGE);
626
627				/*
628				 * instr_size() is used to get the exact
629				 * address of the fault, instead of the
630				 * page of the fault. Unfortunately it is
631				 * very slow, and this is an important
632				 * code path. Don't call it unless
633				 * correctness is needed. ie. if FLTPAGE
634				 * is set, or we're profiling.
635				 */
636
637				if (curthread->t_rprof != NULL || ismem)
638					(void) instr_size(rp, &addr, rw);
639
640				lwp->lwp_lastfault = FLTPAGE;
641				lwp->lwp_lastfaddr = addr;
642
643				if (ismem) {
644					bzero(&siginfo, sizeof (siginfo));
645					siginfo.si_addr = addr;
646					(void) stop_on_fault(FLTPAGE, &siginfo);
647				}
648				goto out;
649			}
650
651			if (type != (T_INSTR_MMU_MISS + T_USER)) {
652				/*
653				 * check for non-faulting loads, also
654				 * fetch the instruction to check for
655				 * flush
656				 */
657				if (nfload(rp, &instr))
658					goto out;
659
660				/* skip userland prefetch instructions */
661				if (IS_PREFETCH(instr)) {
662					rp->r_pc = rp->r_npc;
663					rp->r_npc += 4;
664					goto out;
665					/*NOTREACHED*/
666				}
667
668				/*
669				 * check if the instruction was a
670				 * flush.  ABI allows users to specify
671				 * an illegal address on the flush
672				 * instruction so we simply return in
673				 * this case.
674				 *
675				 * NB: the hardware should set a bit
676				 * indicating this trap was caused by
677				 * a flush instruction.  Instruction
678				 * decoding is bugly!
679				 */
680				if (IS_FLUSH(instr)) {
681					/* skip the flush instruction */
682					rp->r_pc = rp->r_npc;
683					rp->r_npc += 4;
684					goto out;
685					/*NOTREACHED*/
686				}
687			} else if (res == FC_PROT) {
688				report_stack_exec(p, addr);
689			}
690
691			if (tudebug)
692				showregs(type, rp, addr, 0);
693		}
694
695		/*
696		 * In the case where both pagefault and grow fail,
697		 * set the code to the value provided by pagefault.
698		 */
699		(void) instr_size(rp, &addr, rw);
700		bzero(&siginfo, sizeof (siginfo));
701		siginfo.si_addr = addr;
702		if (FC_CODE(res) == FC_OBJERR) {
703			siginfo.si_errno = FC_ERRNO(res);
704			if (siginfo.si_errno != EINTR) {
705				siginfo.si_signo = SIGBUS;
706				siginfo.si_code = BUS_OBJERR;
707				fault = FLTACCESS;
708			}
709		} else { /* FC_NOMAP || FC_PROT */
710			siginfo.si_signo = SIGSEGV;
711			siginfo.si_code = (res == FC_NOMAP) ?
712			    SEGV_MAPERR : SEGV_ACCERR;
713			fault = FLTBOUNDS;
714		}
715		/*
716		 * If this is the culmination of a single-step,
717		 * reset the addr, code, signal and fault to
718		 * indicate a hardware trace trap.
719		 */
720		if (stepped) {
721			pcb_t *pcb = &lwp->lwp_pcb;
722
723			siginfo.si_signo = 0;
724			fault = 0;
725			if (pcb->pcb_step == STEP_WASACTIVE) {
726				pcb->pcb_step = STEP_NONE;
727				pcb->pcb_tracepc = NULL;
728				oldpc = rp->r_pc - 4;
729			}
730			/*
731			 * If both NORMAL_STEP and WATCH_STEP are in
732			 * effect, give precedence to WATCH_STEP.
733			 * One or the other must be set at this point.
734			 */
735			ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP));
736			if ((fault = undo_watch_step(&siginfo)) == 0 &&
737			    (pcb->pcb_flags & NORMAL_STEP)) {
738				siginfo.si_signo = SIGTRAP;
739				siginfo.si_code = TRAP_TRACE;
740				siginfo.si_addr = (caddr_t)rp->r_pc;
741				fault = FLTTRACE;
742			}
743			pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
744		}
745		break;
746
747	case T_DATA_EXCEPTION + T_USER:	/* user data access exception */
748
749		if (&vis1_partial_support != NULL) {
750			bzero(&siginfo, sizeof (siginfo));
751			if (vis1_partial_support(rp,
752			    &siginfo, &fault) == 0)
753				goto out;
754		}
755
756		if (nfload(rp, &instr))
757			goto out;
758		if (IS_FLUSH(instr)) {
759			/* skip the flush instruction */
760			rp->r_pc = rp->r_npc;
761			rp->r_npc += 4;
762			goto out;
763			/*NOTREACHED*/
764		}
765		bzero(&siginfo, sizeof (siginfo));
766		siginfo.si_addr = addr;
767		switch (X_FAULT_TYPE(mmu_fsr)) {
768		case FT_ATOMIC_NC:
769			if ((IS_SWAP(instr) && swap_nc(rp, instr)) ||
770			    (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) {
771				/* skip the atomic */
772				rp->r_pc = rp->r_npc;
773				rp->r_npc += 4;
774				goto out;
775			}
776			/* FALLTHROUGH */
777		case FT_PRIV:
778			siginfo.si_signo = SIGSEGV;
779			siginfo.si_code = SEGV_ACCERR;
780			fault = FLTBOUNDS;
781			break;
782		case FT_SPEC_LD:
783		case FT_ILL_ALT:
784			siginfo.si_signo = SIGILL;
785			siginfo.si_code = ILL_ILLADR;
786			fault = FLTILL;
787			break;
788		default:
789			siginfo.si_signo = SIGSEGV;
790			siginfo.si_code = SEGV_MAPERR;
791			fault = FLTBOUNDS;
792			break;
793		}
794		break;
795
796	case T_SYS_RTT_ALIGN + T_USER:	/* user alignment error */
797	case T_ALIGNMENT + T_USER:	/* user alignment error */
798		if (tudebug)
799			showregs(type, rp, addr, 0);
800		/*
801		 * If the user has to do unaligned references
802		 * the ugly stuff gets done here.
803		 */
804		alignfaults++;
805		if (&vis1_partial_support != NULL) {
806			bzero(&siginfo, sizeof (siginfo));
807			if (vis1_partial_support(rp,
808			    &siginfo, &fault) == 0)
809				goto out;
810		}
811
812		bzero(&siginfo, sizeof (siginfo));
813		if (type == T_SYS_RTT_ALIGN + T_USER) {
814			if (nfload(rp, NULL))
815				goto out;
816			/*
817			 * Can't do unaligned stack access
818			 */
819			siginfo.si_signo = SIGBUS;
820			siginfo.si_code = BUS_ADRALN;
821			siginfo.si_addr = addr;
822			fault = FLTACCESS;
823			break;
824		}
825
826		/*
827		 * Try to fix alignment before non-faulting load test.
828		 */
829		if (p->p_fixalignment) {
830			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
831				rp->r_pc = rp->r_npc;
832				rp->r_npc += 4;
833				goto out;
834			}
835			if (nfload(rp, NULL))
836				goto out;
837			siginfo.si_signo = SIGSEGV;
838			siginfo.si_code = SEGV_MAPERR;
839			siginfo.si_addr = badaddr;
840			fault = FLTBOUNDS;
841		} else {
842			if (nfload(rp, NULL))
843				goto out;
844			siginfo.si_signo = SIGBUS;
845			siginfo.si_code = BUS_ADRALN;
846			if (rp->r_pc & 3) {	/* offending address, if pc */
847				siginfo.si_addr = (caddr_t)rp->r_pc;
848			} else {
849				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
850					siginfo.si_addr = badaddr;
851				else
852					siginfo.si_addr = (caddr_t)rp->r_pc;
853			}
854			fault = FLTACCESS;
855		}
856		break;
857
858	case T_PRIV_INSTR + T_USER:	/* privileged instruction fault */
859		if (tudebug)
860			showregs(type, rp, (caddr_t)0, 0);
861
862		bzero(&siginfo, sizeof (siginfo));
863#ifdef	sun4v
864		/*
865		 * If this instruction fault is a non-privileged %tick
866		 * or %stick trap, and %tick/%stick user emulation is
867		 * enabled as a result of an OS suspend, then simulate
868		 * the register read. We rely on simulate_rdtick to fail
869		 * if the instruction is not a %tick or %stick read,
870		 * causing us to fall through to the normal privileged
871		 * instruction handling.
872		 */
873		if (tick_stick_emulation_active &&
874		    (X_FAULT_TYPE(mmu_fsr) == FT_NEW_PRVACT) &&
875		    simulate_rdtick(rp) == SIMU_SUCCESS) {
876			/* skip the successfully simulated instruction */
877			rp->r_pc = rp->r_npc;
878			rp->r_npc += 4;
879			goto out;
880		}
881#endif
882		siginfo.si_signo = SIGILL;
883		siginfo.si_code = ILL_PRVOPC;
884		siginfo.si_addr = (caddr_t)rp->r_pc;
885		fault = FLTILL;
886		break;
887
888	case T_UNIMP_INSTR:		/* priv illegal instruction fault */
889		if (fpras_implemented) {
890			/*
891			 * Call fpras_chktrap indicating that
892			 * we've come from a trap handler and pass
893			 * the regs.  That function may choose to panic
894			 * (in which case it won't return) or it may
895			 * determine that a reboot is desired.  In the
896			 * latter case it must alter pc/npc to skip
897			 * the illegal instruction and continue at
898			 * a controlled address.
899			 */
900			if (&fpras_chktrap) {
901				if (fpras_chktrap(rp))
902					goto cleanup;
903			}
904		}
905#if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
906		instr = *(int *)rp->r_pc;
907		if ((instr & 0xc0000000) == 0x40000000) {
908			long pc;
909
910			rp->r_o7 = (long long)rp->r_pc;
911			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
912			rp->r_pc = rp->r_npc;
913			rp->r_npc = pc;
914			ill_calls++;
915			goto cleanup;
916		}
917#endif /* SF_ERRATA_23 || SF_ERRATA_30 */
918		/*
919		 * It's not an fpras failure and it's not SF_ERRATA_23 - die
920		 */
921		addr = (caddr_t)rp->r_pc;
922		(void) die(type, rp, addr, 0);
923		/*NOTREACHED*/
924
925	case T_UNIMP_INSTR + T_USER:	/* illegal instruction fault */
926#if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
927		instr = fetch_user_instr((caddr_t)rp->r_pc);
928		if ((instr & 0xc0000000) == 0x40000000) {
929			long pc;
930
931			rp->r_o7 = (long long)rp->r_pc;
932			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
933			rp->r_pc = rp->r_npc;
934			rp->r_npc = pc;
935			ill_calls++;
936			goto out;
937		}
938#endif /* SF_ERRATA_23 || SF_ERRATA_30 */
939		if (tudebug)
940			showregs(type, rp, (caddr_t)0, 0);
941		bzero(&siginfo, sizeof (siginfo));
942		/*
943		 * Try to simulate the instruction.
944		 */
945		switch (simulate_unimp(rp, &badaddr)) {
946		case SIMU_RETRY:
947			goto out;	/* regs are already set up */
948			/*NOTREACHED*/
949
950		case SIMU_SUCCESS:
951			/* skip the successfully simulated instruction */
952			rp->r_pc = rp->r_npc;
953			rp->r_npc += 4;
954			goto out;
955			/*NOTREACHED*/
956
957		case SIMU_FAULT:
958			siginfo.si_signo = SIGSEGV;
959			siginfo.si_code = SEGV_MAPERR;
960			siginfo.si_addr = badaddr;
961			fault = FLTBOUNDS;
962			break;
963
964		case SIMU_DZERO:
965			siginfo.si_signo = SIGFPE;
966			siginfo.si_code = FPE_INTDIV;
967			siginfo.si_addr = (caddr_t)rp->r_pc;
968			fault = FLTIZDIV;
969			break;
970
971		case SIMU_UNALIGN:
972			siginfo.si_signo = SIGBUS;
973			siginfo.si_code = BUS_ADRALN;
974			siginfo.si_addr = badaddr;
975			fault = FLTACCESS;
976			break;
977
978		case SIMU_ILLEGAL:
979		default:
980			siginfo.si_signo = SIGILL;
981			op3 = (instr >> 19) & 0x3F;
982			if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
983			    (op3 == IOP_V8_STDFA)))
984				siginfo.si_code = ILL_ILLADR;
985			else
986				siginfo.si_code = ILL_ILLOPC;
987			siginfo.si_addr = (caddr_t)rp->r_pc;
988			fault = FLTILL;
989			break;
990		}
991		break;
992
993	case T_UNIMP_LDD + T_USER:
994	case T_UNIMP_STD + T_USER:
995		if (tudebug)
996			showregs(type, rp, (caddr_t)0, 0);
997		switch (simulate_lddstd(rp, &badaddr)) {
998		case SIMU_SUCCESS:
999			/* skip the successfully simulated instruction */
1000			rp->r_pc = rp->r_npc;
1001			rp->r_npc += 4;
1002			goto out;
1003			/*NOTREACHED*/
1004
1005		case SIMU_FAULT:
1006			if (nfload(rp, NULL))
1007				goto out;
1008			siginfo.si_signo = SIGSEGV;
1009			siginfo.si_code = SEGV_MAPERR;
1010			siginfo.si_addr = badaddr;
1011			fault = FLTBOUNDS;
1012			break;
1013
1014		case SIMU_UNALIGN:
1015			if (nfload(rp, NULL))
1016				goto out;
1017			siginfo.si_signo = SIGBUS;
1018			siginfo.si_code = BUS_ADRALN;
1019			siginfo.si_addr = badaddr;
1020			fault = FLTACCESS;
1021			break;
1022
1023		case SIMU_ILLEGAL:
1024		default:
1025			siginfo.si_signo = SIGILL;
1026			siginfo.si_code = ILL_ILLOPC;
1027			siginfo.si_addr = (caddr_t)rp->r_pc;
1028			fault = FLTILL;
1029			break;
1030		}
1031		break;
1032
1033	case T_UNIMP_LDD:
1034	case T_UNIMP_STD:
1035		if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) {
1036			/* skip the successfully simulated instruction */
1037			rp->r_pc = rp->r_npc;
1038			rp->r_npc += 4;
1039			goto cleanup;
1040			/*NOTREACHED*/
1041		}
1042		/*
1043		 * A third party driver executed an {LDD,STD,LDDA,STDA}
1044		 * that we couldn't simulate.
1045		 */
1046		if (nfload(rp, NULL))
1047			goto cleanup;
1048
1049		if (curthread->t_lofault) {
1050			if (lodebug) {
1051				showregs(type, rp, addr, 0);
1052				traceback((caddr_t)rp->r_sp);
1053			}
1054			rp->r_g1 = EFAULT;
1055			rp->r_pc = curthread->t_lofault;
1056			rp->r_npc = rp->r_pc + 4;
1057			goto cleanup;
1058		}
1059		(void) die(type, rp, addr, 0);
1060		/*NOTREACHED*/
1061
1062	case T_IDIV0 + T_USER:		/* integer divide by zero */
1063	case T_DIV0 + T_USER:		/* integer divide by zero */
1064		if (tudebug && tudebugfpe)
1065			showregs(type, rp, (caddr_t)0, 0);
1066		bzero(&siginfo, sizeof (siginfo));
1067		siginfo.si_signo = SIGFPE;
1068		siginfo.si_code = FPE_INTDIV;
1069		siginfo.si_addr = (caddr_t)rp->r_pc;
1070		fault = FLTIZDIV;
1071		break;
1072
1073	case T_INT_OVERFLOW + T_USER:	/* integer overflow */
1074		if (tudebug && tudebugfpe)
1075			showregs(type, rp, (caddr_t)0, 0);
1076		bzero(&siginfo, sizeof (siginfo));
1077		siginfo.si_signo = SIGFPE;
1078		siginfo.si_code  = FPE_INTOVF;
1079		siginfo.si_addr  = (caddr_t)rp->r_pc;
1080		fault = FLTIOVF;
1081		break;
1082
1083	case T_BREAKPOINT + T_USER:	/* breakpoint trap (t 1) */
1084		if (tudebug && tudebugbpt)
1085			showregs(type, rp, (caddr_t)0, 0);
1086		bzero(&siginfo, sizeof (siginfo));
1087		siginfo.si_signo = SIGTRAP;
1088		siginfo.si_code = TRAP_BRKPT;
1089		siginfo.si_addr = (caddr_t)rp->r_pc;
1090		fault = FLTBPT;
1091		break;
1092
1093	case T_TAG_OVERFLOW + T_USER:	/* tag overflow (taddcctv, tsubcctv) */
1094		if (tudebug)
1095			showregs(type, rp, (caddr_t)0, 0);
1096		bzero(&siginfo, sizeof (siginfo));
1097		siginfo.si_signo = SIGEMT;
1098		siginfo.si_code = EMT_TAGOVF;
1099		siginfo.si_addr = (caddr_t)rp->r_pc;
1100		fault = FLTACCESS;
1101		break;
1102
1103	case T_FLUSH_PCB + T_USER:	/* finish user window overflow */
1104	case T_FLUSHW + T_USER:		/* finish user window flush */
1105		/*
1106		 * This trap is entered from sys_rtt in locore.s when,
1107		 * upon return to user is is found that there are user
1108		 * windows in pcb_wbuf.  This happens because they could
1109		 * not be saved on the user stack, either because it
1110		 * wasn't resident or because it was misaligned.
1111		 */
1112	{
1113		int error;
1114		caddr_t sp;
1115
1116		error = flush_user_windows_to_stack(&sp);
1117		/*
1118		 * Possible errors:
1119		 *	error copying out
1120		 *	unaligned stack pointer
1121		 * The first is given to us as the return value
1122		 * from flush_user_windows_to_stack().  The second
1123		 * results in residual windows in the pcb.
1124		 */
1125		if (error != 0) {
1126			/*
1127			 * EINTR comes from a signal during copyout;
1128			 * we should not post another signal.
1129			 */
1130			if (error != EINTR) {
1131				/*
1132				 * Zap the process with a SIGSEGV - process
1133				 * may be managing its own stack growth by
1134				 * taking SIGSEGVs on a different signal stack.
1135				 */
1136				bzero(&siginfo, sizeof (siginfo));
1137				siginfo.si_signo = SIGSEGV;
1138				siginfo.si_code  = SEGV_MAPERR;
1139				siginfo.si_addr  = sp;
1140				fault = FLTBOUNDS;
1141			}
1142			break;
1143		} else if (mpcb->mpcb_wbcnt) {
1144			bzero(&siginfo, sizeof (siginfo));
1145			siginfo.si_signo = SIGILL;
1146			siginfo.si_code  = ILL_BADSTK;
1147			siginfo.si_addr  = (caddr_t)rp->r_pc;
1148			fault = FLTILL;
1149			break;
1150		}
1151	}
1152
1153		/*
1154		 * T_FLUSHW is used when handling a ta 0x3 -- the old flush
1155		 * window trap -- which is implemented by executing the
1156		 * flushw instruction. The flushw can trap if any of the
1157		 * stack pages are not writable for whatever reason. In this
1158		 * case only, we advance the pc to the next instruction so
1159		 * that the user thread doesn't needlessly execute the trap
1160		 * again. Normally this wouldn't be a problem -- we'll
1161		 * usually only end up here if this is the first touch to a
1162		 * stack page -- since the second execution won't trap, but
1163		 * if there's a watchpoint on the stack page the user thread
1164		 * would spin, continuously executing the trap instruction.
1165		 */
1166		if (type == T_FLUSHW + T_USER) {
1167			rp->r_pc = rp->r_npc;
1168			rp->r_npc += 4;
1169		}
1170		goto out;
1171
1172	case T_AST + T_USER:		/* profiling or resched pseudo trap */
1173		if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) {
1174			lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW;
1175			if (kcpc_overflow_ast()) {
1176				/*
1177				 * Signal performance counter overflow
1178				 */
1179				if (tudebug)
1180					showregs(type, rp, (caddr_t)0, 0);
1181				bzero(&siginfo, sizeof (siginfo));
1182				siginfo.si_signo = SIGEMT;
1183				siginfo.si_code = EMT_CPCOVF;
1184				siginfo.si_addr = (caddr_t)rp->r_pc;
1185				/* for trap_cleanup(), below */
1186				oldpc = rp->r_pc - 4;
1187				fault = FLTCPCOVF;
1188			}
1189		}
1190
1191		/*
1192		 * The CPC_OVERFLOW check above may already have populated
1193		 * siginfo and set fault, so the checks below must not
1194		 * touch these and the functions they call must use
1195		 * trapsig() directly.
1196		 */
1197
1198		if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1199			lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1200			trap_async_hwerr();
1201		}
1202
1203		if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) {
1204			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR;
1205			trap_async_berr_bto(ASYNC_BERR, rp);
1206		}
1207
1208		if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) {
1209			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO;
1210			trap_async_berr_bto(ASYNC_BTO, rp);
1211		}
1212
1213		break;
1214	}
1215
1216	if (fault) {
1217		/* We took a fault so abort single step. */
1218		lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1219	}
1220	trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc);
1221
1222out:	/* We can't get here from a system trap */
1223	ASSERT(type & T_USER);
1224	trap_rtt();
1225	(void) new_mstate(curthread, mstate);
1226	/* Kernel probe */
1227	TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
1228		tnf_microstate, state, LMS_USER);
1229
1230	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1231	return;
1232
1233cleanup:	/* system traps end up here */
1234	ASSERT(!(type & T_USER));
1235
1236	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1237}
1238
1239void
1240trap_cleanup(
1241	struct regs *rp,
1242	uint_t fault,
1243	k_siginfo_t *sip,
1244	int restartable)
1245{
1246	extern void aio_cleanup();
1247	proc_t *p = ttoproc(curthread);
1248	klwp_id_t lwp = ttolwp(curthread);
1249
1250	if (fault) {
1251		/*
1252		 * Remember the fault and fault address
1253		 * for real-time (SIGPROF) profiling.
1254		 */
1255		lwp->lwp_lastfault = fault;
1256		lwp->lwp_lastfaddr = sip->si_addr;
1257
1258		DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip);
1259
1260		/*
1261		 * If a debugger has declared this fault to be an
1262		 * event of interest, stop the lwp.  Otherwise just
1263		 * deliver the associated signal.
1264		 */
1265		if (sip->si_signo != SIGKILL &&
1266		    prismember(&p->p_fltmask, fault) &&
1267		    stop_on_fault(fault, sip) == 0)
1268			sip->si_signo = 0;
1269	}
1270
1271	if (sip->si_signo)
1272		trapsig(sip, restartable);
1273
1274	if (lwp->lwp_oweupc)
1275		profil_tick(rp->r_pc);
1276
1277	if (curthread->t_astflag | curthread->t_sig_check) {
1278		/*
1279		 * Turn off the AST flag before checking all the conditions that
1280		 * may have caused an AST.  This flag is on whenever a signal or
1281		 * unusual condition should be handled after the next trap or
1282		 * syscall.
1283		 */
1284		astoff(curthread);
1285		curthread->t_sig_check = 0;
1286
1287		/*
1288		 * The following check is legal for the following reasons:
1289		 *	1) The thread we are checking, is ourselves, so there is
1290		 *	   no way the proc can go away.
1291		 *	2) The only time we need to be protected by the
1292		 *	   lock is if the binding is changed.
1293		 *
1294		 *	Note we will still take the lock and check the binding
1295		 *	if the condition was true without the lock held.  This
1296		 *	prevents lock contention among threads owned by the
1297		 *	same proc.
1298		 */
1299
1300		if (curthread->t_proc_flag & TP_CHANGEBIND) {
1301			mutex_enter(&p->p_lock);
1302			if (curthread->t_proc_flag & TP_CHANGEBIND) {
1303				timer_lwpbind();
1304				curthread->t_proc_flag &= ~TP_CHANGEBIND;
1305			}
1306			mutex_exit(&p->p_lock);
1307		}
1308
1309		/*
1310		 * for kaio requests that are on the per-process poll queue,
1311		 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1312		 * should copyout their result_t to user memory. by copying
1313		 * out the result_t, the user can poll on memory waiting
1314		 * for the kaio request to complete.
1315		 */
1316		if (p->p_aio)
1317			aio_cleanup(0);
1318
1319		/*
1320		 * If this LWP was asked to hold, call holdlwp(), which will
1321		 * stop.  holdlwps() sets this up and calls pokelwps() which
1322		 * sets the AST flag.
1323		 *
1324		 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1325		 * through lwp_rtt().  That flag is set if the lwp_create(2)
1326		 * syscall failed after creating the LWP.
1327		 */
1328		if (ISHOLD(p))
1329			holdlwp();
1330
1331		/*
1332		 * All code that sets signals and makes ISSIG evaluate true must
1333		 * set t_astflag afterwards.
1334		 */
1335		if (ISSIG_PENDING(curthread, lwp, p)) {
1336			if (issig(FORREAL))
1337				psig();
1338			curthread->t_sig_check = 1;
1339		}
1340
1341		if (curthread->t_rprof != NULL) {
1342			realsigprof(0, 0, 0);
1343			curthread->t_sig_check = 1;
1344		}
1345	}
1346}
1347
1348/*
1349 * Called from fp_traps when a floating point trap occurs.
1350 * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr),
1351 * because mmu_fsr (now changed to code) is always 0.
1352 * Note that the T_UNIMP_INSTR case does not call simulate_unimp(),
1353 * because the simulator only simulates multiply and divide instructions,
1354 * which would not cause floating point traps in the first place.
1355 * XXX - Supervisor mode floating point traps?
1356 */
1357void
1358fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code)
1359{
1360	proc_t *p = ttoproc(curthread);
1361	klwp_id_t lwp = ttolwp(curthread);
1362	k_siginfo_t siginfo;
1363	uint_t op3, fault = 0;
1364	int mstate;
1365	char *badaddr;
1366	kfpu_t *fp;
1367	struct _fpq *pfpq;
1368	uint32_t inst;
1369	utrap_handler_t *utrapp;
1370
1371	CPU_STATS_ADDQ(CPU, sys, trap, 1);
1372
1373	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
1374
1375	if (USERMODE(rp->r_tstate)) {
1376		/*
1377		 * Set lwp_state before trying to acquire any
1378		 * adaptive lock
1379		 */
1380		ASSERT(lwp != NULL);
1381		lwp->lwp_state = LWP_SYS;
1382		/*
1383		 * Set up the current cred to use during this trap. u_cred
1384		 * no longer exists.  t_cred is used instead.
1385		 * The current process credential applies to the thread for
1386		 * the entire trap.  If trapping from the kernel, this
1387		 * should already be set up.
1388		 */
1389		if (curthread->t_cred != p->p_cred) {
1390			cred_t *oldcred = curthread->t_cred;
1391			/*
1392			 * DTrace accesses t_cred in probe context.  t_cred
1393			 * must always be either NULL, or point to a valid,
1394			 * allocated cred structure.
1395			 */
1396			curthread->t_cred = crgetcred();
1397			crfree(oldcred);
1398		}
1399		ASSERT(lwp->lwp_regs == rp);
1400		mstate = new_mstate(curthread, LMS_TRAP);
1401		siginfo.si_signo = 0;
1402		type |= T_USER;
1403	}
1404
1405	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
1406	    "C_fpu_trap_handler_enter:type %x", type);
1407
1408	if (tudebug && tudebugfpe)
1409		showregs(type, rp, addr, 0);
1410
1411	bzero(&siginfo, sizeof (siginfo));
1412	siginfo.si_code = code;
1413	siginfo.si_addr = addr;
1414
1415	switch (type) {
1416
1417	case T_FP_EXCEPTION_IEEE + T_USER:	/* FPU arithmetic exception */
1418		/*
1419		 * FPU arithmetic exception - fake up a fpq if we
1420		 *	came here directly from _fp_ieee_exception,
1421		 *	which is indicated by a zero fpu_qcnt.
1422		 */
1423		fp = lwptofpu(curthread->t_lwp);
1424		utrapp = curthread->t_procp->p_utraps;
1425		if (fp->fpu_qcnt == 0) {
1426			inst = fetch_user_instr((caddr_t)rp->r_pc);
1427			lwp->lwp_state = LWP_SYS;
1428			pfpq = &fp->fpu_q->FQu.fpq;
1429			pfpq->fpq_addr = (uint32_t *)rp->r_pc;
1430			pfpq->fpq_instr = inst;
1431			fp->fpu_qcnt = 1;
1432			fp->fpu_q_entrysize = sizeof (struct _fpq);
1433#ifdef SF_V9_TABLE_28
1434			/*
1435			 * Spitfire and blackbird followed the SPARC V9 manual
1436			 * paragraph 3 of section 5.1.7.9 FSR_current_exception
1437			 * (cexc) for setting fsr.cexc bits on underflow and
1438			 * overflow traps when the fsr.tem.inexact bit is set,
1439			 * instead of following Table 28. Bugid 1263234.
1440			 */
1441			{
1442				extern int spitfire_bb_fsr_bug;
1443
1444				if (spitfire_bb_fsr_bug &&
1445				    (fp->fpu_fsr & FSR_TEM_NX)) {
1446					if (((fp->fpu_fsr & FSR_TEM_OF) == 0) &&
1447					    (fp->fpu_fsr & FSR_CEXC_OF)) {
1448						fp->fpu_fsr &= ~FSR_CEXC_OF;
1449						fp->fpu_fsr |= FSR_CEXC_NX;
1450						_fp_write_pfsr(&fp->fpu_fsr);
1451						siginfo.si_code = FPE_FLTRES;
1452					}
1453					if (((fp->fpu_fsr & FSR_TEM_UF) == 0) &&
1454					    (fp->fpu_fsr & FSR_CEXC_UF)) {
1455						fp->fpu_fsr &= ~FSR_CEXC_UF;
1456						fp->fpu_fsr |= FSR_CEXC_NX;
1457						_fp_write_pfsr(&fp->fpu_fsr);
1458						siginfo.si_code = FPE_FLTRES;
1459					}
1460				}
1461			}
1462#endif /* SF_V9_TABLE_28 */
1463			rp->r_pc = rp->r_npc;
1464			rp->r_npc += 4;
1465		} else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) {
1466			/*
1467			 * The user had a trap handler installed.  Jump to
1468			 * the trap handler instead of signalling the process.
1469			 */
1470			rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754];
1471			rp->r_npc = rp->r_pc + 4;
1472			break;
1473		}
1474		siginfo.si_signo = SIGFPE;
1475		fault = FLTFPE;
1476		break;
1477
1478	case T_DATA_EXCEPTION + T_USER:		/* user data access exception */
1479		siginfo.si_signo = SIGSEGV;
1480		fault = FLTBOUNDS;
1481		break;
1482
1483	case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */
1484	case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */
1485		alignfaults++;
1486		lwp->lwp_state = LWP_SYS;
1487		if (&vis1_partial_support != NULL) {
1488			bzero(&siginfo, sizeof (siginfo));
1489			if (vis1_partial_support(rp,
1490			    &siginfo, &fault) == 0)
1491				goto out;
1492		}
1493		if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1494			rp->r_pc = rp->r_npc;
1495			rp->r_npc += 4;
1496			goto out;
1497		}
1498		fp = lwptofpu(curthread->t_lwp);
1499		fp->fpu_qcnt = 0;
1500		siginfo.si_signo = SIGSEGV;
1501		siginfo.si_code = SEGV_MAPERR;
1502		siginfo.si_addr = badaddr;
1503		fault = FLTBOUNDS;
1504		break;
1505
1506	case T_ALIGNMENT + T_USER:		/* user alignment error */
1507		/*
1508		 * If the user has to do unaligned references
1509		 * the ugly stuff gets done here.
1510		 * Only handles vanilla loads and stores.
1511		 */
1512		alignfaults++;
1513		if (p->p_fixalignment) {
1514			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1515				rp->r_pc = rp->r_npc;
1516				rp->r_npc += 4;
1517				goto out;
1518			}
1519			siginfo.si_signo = SIGSEGV;
1520			siginfo.si_code = SEGV_MAPERR;
1521			siginfo.si_addr = badaddr;
1522			fault = FLTBOUNDS;
1523		} else {
1524			siginfo.si_signo = SIGBUS;
1525			siginfo.si_code = BUS_ADRALN;
1526			if (rp->r_pc & 3) {	/* offending address, if pc */
1527				siginfo.si_addr = (caddr_t)rp->r_pc;
1528			} else {
1529				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
1530					siginfo.si_addr = badaddr;
1531				else
1532					siginfo.si_addr = (caddr_t)rp->r_pc;
1533			}
1534			fault = FLTACCESS;
1535		}
1536		break;
1537
1538	case T_UNIMP_INSTR + T_USER:		/* illegal instruction fault */
1539		siginfo.si_signo = SIGILL;
1540		inst = fetch_user_instr((caddr_t)rp->r_pc);
1541		op3 = (inst >> 19) & 0x3F;
1542		if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA))
1543			siginfo.si_code = ILL_ILLADR;
1544		else
1545			siginfo.si_code = ILL_ILLTRP;
1546		fault = FLTILL;
1547		break;
1548
1549	default:
1550		(void) die(type, rp, addr, 0);
1551		/*NOTREACHED*/
1552	}
1553
1554	/*
1555	 * We can't get here from a system trap
1556	 * Never restart any instruction which got here from an fp trap.
1557	 */
1558	ASSERT(type & T_USER);
1559
1560	trap_cleanup(rp, fault, &siginfo, 0);
1561out:
1562	trap_rtt();
1563	(void) new_mstate(curthread, mstate);
1564}
1565
1566void
1567trap_rtt(void)
1568{
1569	klwp_id_t lwp = ttolwp(curthread);
1570
1571	/*
1572	 * Restore register window if a debugger modified it.
1573	 * Set up to perform a single-step if a debugger requested it.
1574	 */
1575	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1576		xregrestore(lwp, 0);
1577
1578	/*
1579	 * Set state to LWP_USER here so preempt won't give us a kernel
1580	 * priority if it occurs after this point.  Call CL_TRAPRET() to
1581	 * restore the user-level priority.
1582	 *
1583	 * It is important that no locks (other than spinlocks) be entered
1584	 * after this point before returning to user mode (unless lwp_state
1585	 * is set back to LWP_SYS).
1586	 */
1587	lwp->lwp_state = LWP_USER;
1588	if (curthread->t_trapret) {
1589		curthread->t_trapret = 0;
1590		thread_lock(curthread);
1591		CL_TRAPRET(curthread);
1592		thread_unlock(curthread);
1593	}
1594	if (CPU->cpu_runrun || curthread->t_schedflag & TS_ANYWAITQ)
1595		preempt();
1596	prunstop();
1597	if (lwp->lwp_pcb.pcb_step != STEP_NONE)
1598		prdostep();
1599
1600	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1601}
1602
1603#define	IS_LDASI(o)	\
1604	((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 ||	\
1605	(o) == (uint32_t)0xC1800000)
1606#define	IS_IMM_ASI(i)	(((i) & 0x2000) == 0)
1607#define	IS_ASINF(a)	(((a) & 0xF6) == 0x82)
1608#define	IS_LDDA(i)	(((i) & 0xC1F80000) == 0xC0980000)
1609
1610static int
1611nfload(struct regs *rp, int *instrp)
1612{
1613	uint_t	instr, asi, op3, rd;
1614	size_t	len;
1615	struct as *as;
1616	caddr_t addr;
1617	FPU_DREGS_TYPE zero;
1618	extern int segnf_create();
1619
1620	if (USERMODE(rp->r_tstate))
1621		instr = fetch_user_instr((caddr_t)rp->r_pc);
1622	else
1623		instr = *(int *)rp->r_pc;
1624
1625	if (instrp)
1626		*instrp = instr;
1627
1628	op3 = (uint_t)(instr & 0xC1E00000);
1629	if (!IS_LDASI(op3))
1630		return (0);
1631	if (IS_IMM_ASI(instr))
1632		asi = (instr & 0x1FE0) >> 5;
1633	else
1634		asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
1635		    TSTATE_ASI_MASK);
1636	if (!IS_ASINF(asi))
1637		return (0);
1638	if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) {
1639		len = 1;
1640		as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas;
1641		as_rangelock(as);
1642		if (as_gap(as, len, &addr, &len, 0, addr) == 0)
1643			(void) as_map(as, addr, len, segnf_create, NULL);
1644		as_rangeunlock(as);
1645	}
1646	zero = 0;
1647	rd = (instr >> 25) & 0x1f;
1648	if (IS_FLOAT(instr)) {
1649		uint_t dbflg = ((instr >> 19) & 3) == 3;
1650
1651		if (dbflg) {		/* clever v9 reg encoding */
1652			if (rd & 1)
1653				rd = (rd & 0x1e) | 0x20;
1654			rd >>= 1;
1655		}
1656		if (fpu_exists) {
1657			if (!(_fp_read_fprs() & FPRS_FEF))
1658				fp_enable();
1659
1660			if (dbflg)
1661				_fp_write_pdreg(&zero, rd);
1662			else
1663				_fp_write_pfreg((uint_t *)&zero, rd);
1664		} else {
1665			kfpu_t *fp = lwptofpu(curthread->t_lwp);
1666
1667			if (!fp->fpu_en)
1668				fp_enable();
1669
1670			if (dbflg)
1671				fp->fpu_fr.fpu_dregs[rd] = zero;
1672			else
1673				fp->fpu_fr.fpu_regs[rd] = 0;
1674		}
1675	} else {
1676		(void) putreg(&zero, rp, rd, &addr);
1677		if (IS_LDDA(instr))
1678			(void) putreg(&zero, rp, rd + 1, &addr);
1679	}
1680	rp->r_pc = rp->r_npc;
1681	rp->r_npc += 4;
1682	return (1);
1683}
1684
1685kmutex_t atomic_nc_mutex;
1686
1687/*
1688 * The following couple of routines are for userland drivers which
1689 * do atomics to noncached addresses.  This sort of worked on previous
1690 * platforms -- the operation really wasn't atomic, but it didn't generate
1691 * a trap as sun4u systems do.
1692 */
1693static int
1694swap_nc(struct regs *rp, int instr)
1695{
1696	uint64_t rdata, mdata;
1697	caddr_t addr, badaddr;
1698	uint_t tmp, rd;
1699
1700	(void) flush_user_windows_to_stack(NULL);
1701	rd = (instr >> 25) & 0x1f;
1702	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1703		return (0);
1704	if (getreg(rp, rd, &rdata, &badaddr))
1705		return (0);
1706	mutex_enter(&atomic_nc_mutex);
1707	if (fuword32(addr, &tmp) == -1) {
1708		mutex_exit(&atomic_nc_mutex);
1709		return (0);
1710	}
1711	mdata = (u_longlong_t)tmp;
1712	if (suword32(addr, (uint32_t)rdata) == -1) {
1713		mutex_exit(&atomic_nc_mutex);
1714		return (0);
1715	}
1716	(void) putreg(&mdata, rp, rd, &badaddr);
1717	mutex_exit(&atomic_nc_mutex);
1718	return (1);
1719}
1720
1721static int
1722ldstub_nc(struct regs *rp, int instr)
1723{
1724	uint64_t mdata;
1725	caddr_t addr, badaddr;
1726	uint_t rd;
1727	uint8_t tmp;
1728
1729	(void) flush_user_windows_to_stack(NULL);
1730	rd = (instr >> 25) & 0x1f;
1731	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1732		return (0);
1733	mutex_enter(&atomic_nc_mutex);
1734	if (fuword8(addr, &tmp) == -1) {
1735		mutex_exit(&atomic_nc_mutex);
1736		return (0);
1737	}
1738	mdata = (u_longlong_t)tmp;
1739	if (suword8(addr, (uint8_t)0xff) == -1) {
1740		mutex_exit(&atomic_nc_mutex);
1741		return (0);
1742	}
1743	(void) putreg(&mdata, rp, rd, &badaddr);
1744	mutex_exit(&atomic_nc_mutex);
1745	return (1);
1746}
1747
1748/*
1749 * This function helps instr_size() determine the operand size.
1750 * It is called for the extended ldda/stda asi's.
1751 */
1752int
1753extended_asi_size(int asi)
1754{
1755	switch (asi) {
1756	case ASI_PST8_P:
1757	case ASI_PST8_S:
1758	case ASI_PST16_P:
1759	case ASI_PST16_S:
1760	case ASI_PST32_P:
1761	case ASI_PST32_S:
1762	case ASI_PST8_PL:
1763	case ASI_PST8_SL:
1764	case ASI_PST16_PL:
1765	case ASI_PST16_SL:
1766	case ASI_PST32_PL:
1767	case ASI_PST32_SL:
1768		return (8);
1769	case ASI_FL8_P:
1770	case ASI_FL8_S:
1771	case ASI_FL8_PL:
1772	case ASI_FL8_SL:
1773		return (1);
1774	case ASI_FL16_P:
1775	case ASI_FL16_S:
1776	case ASI_FL16_PL:
1777	case ASI_FL16_SL:
1778		return (2);
1779	case ASI_BLK_P:
1780	case ASI_BLK_S:
1781	case ASI_BLK_PL:
1782	case ASI_BLK_SL:
1783	case ASI_BLK_COMMIT_P:
1784	case ASI_BLK_COMMIT_S:
1785		return (64);
1786	}
1787
1788	return (0);
1789}
1790
1791/*
1792 * Patch non-zero to disable preemption of threads in the kernel.
1793 */
1794int IGNORE_KERNEL_PREEMPTION = 0;	/* XXX - delete this someday */
1795
1796struct kpreempt_cnts {	/* kernel preemption statistics */
1797	int	kpc_idle;	/* executing idle thread */
1798	int	kpc_intr;	/* executing interrupt thread */
1799	int	kpc_clock;	/* executing clock thread */
1800	int	kpc_blocked;	/* thread has blocked preemption (t_preempt) */
1801	int	kpc_notonproc;	/* thread is surrendering processor */
1802	int	kpc_inswtch;	/* thread has ratified scheduling decision */
1803	int	kpc_prilevel;	/* processor interrupt level is too high */
1804	int	kpc_apreempt;	/* asynchronous preemption */
1805	int	kpc_spreempt;	/* synchronous preemption */
1806}	kpreempt_cnts;
1807
1808/*
1809 * kernel preemption: forced rescheduling
1810 *	preempt the running kernel thread.
1811 */
1812void
1813kpreempt(int asyncspl)
1814{
1815	if (IGNORE_KERNEL_PREEMPTION) {
1816		aston(CPU->cpu_dispthread);
1817		return;
1818	}
1819	/*
1820	 * Check that conditions are right for kernel preemption
1821	 */
1822	do {
1823		if (curthread->t_preempt) {
1824			/*
1825			 * either a privileged thread (idle, panic, interrupt)
1826			 * or will check when t_preempt is lowered
1827			 * We need to specifically handle the case where
1828			 * the thread is in the middle of swtch (resume has
1829			 * been called) and has its t_preempt set
1830			 * [idle thread and a thread which is in kpreempt
1831			 * already] and then a high priority thread is
1832			 * available in the local dispatch queue.
1833			 * In this case the resumed thread needs to take a
1834			 * trap so that it can call kpreempt. We achieve
1835			 * this by using siron().
1836			 * How do we detect this condition:
1837			 * idle thread is running and is in the midst of
1838			 * resume: curthread->t_pri == -1 && CPU->dispthread
1839			 * != CPU->thread
1840			 * Need to ensure that this happens only at high pil
1841			 * resume is called at high pil
1842			 * Only in resume_from_idle is the pil changed.
1843			 */
1844			if (curthread->t_pri < 0) {
1845				kpreempt_cnts.kpc_idle++;
1846				if (CPU->cpu_dispthread != CPU->cpu_thread)
1847					siron();
1848			} else if (curthread->t_flag & T_INTR_THREAD) {
1849				kpreempt_cnts.kpc_intr++;
1850				if (curthread->t_pil == CLOCK_LEVEL)
1851					kpreempt_cnts.kpc_clock++;
1852			} else {
1853				kpreempt_cnts.kpc_blocked++;
1854				if (CPU->cpu_dispthread != CPU->cpu_thread)
1855					siron();
1856			}
1857			aston(CPU->cpu_dispthread);
1858			return;
1859		}
1860		if (curthread->t_state != TS_ONPROC ||
1861		    curthread->t_disp_queue != CPU->cpu_disp) {
1862			/* this thread will be calling swtch() shortly */
1863			kpreempt_cnts.kpc_notonproc++;
1864			if (CPU->cpu_thread != CPU->cpu_dispthread) {
1865				/* already in swtch(), force another */
1866				kpreempt_cnts.kpc_inswtch++;
1867				siron();
1868			}
1869			return;
1870		}
1871
1872		if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) :
1873		    getpil()) >= DISP_LEVEL) {
1874			/*
1875			 * We can't preempt this thread if it is at
1876			 * a PIL >= DISP_LEVEL since it may be holding
1877			 * a spin lock (like sched_lock).
1878			 */
1879			siron();	/* check back later */
1880			kpreempt_cnts.kpc_prilevel++;
1881			return;
1882		}
1883
1884		/*
1885		 * block preemption so we don't have multiple preemptions
1886		 * pending on the interrupt stack
1887		 */
1888		curthread->t_preempt++;
1889		if (asyncspl != KPREEMPT_SYNC) {
1890			splx(asyncspl);
1891			kpreempt_cnts.kpc_apreempt++;
1892		} else
1893			kpreempt_cnts.kpc_spreempt++;
1894
1895		preempt();
1896		curthread->t_preempt--;
1897	} while (CPU->cpu_kprunrun);
1898}
1899
1900static enum seg_rw
1901get_accesstype(struct regs *rp)
1902{
1903	uint32_t instr;
1904
1905	if (USERMODE(rp->r_tstate))
1906		instr = fetch_user_instr((caddr_t)rp->r_pc);
1907	else
1908		instr = *(uint32_t *)rp->r_pc;
1909
1910	if (IS_FLUSH(instr))
1911		return (S_OTHER);
1912
1913	if (IS_STORE(instr))
1914		return (S_WRITE);
1915	else
1916		return (S_READ);
1917}
1918
1919/*
1920 * Handle an asynchronous hardware error.
1921 * The policy is currently to send a hardware error contract event to
1922 * the process's process contract and to kill the process.  Eventually
1923 * we may want to instead send a special signal whose default
1924 * disposition is to generate the contract event.
1925 */
1926void
1927trap_async_hwerr(void)
1928{
1929	k_siginfo_t si;
1930	proc_t *p = ttoproc(curthread);
1931	extern void print_msg_hwerr(ctid_t ct_id, proc_t *p);
1932
1933	errorq_drain(ue_queue); /* flush pending async error messages */
1934
1935	print_msg_hwerr(p->p_ct_process->conp_contract.ct_id, p);
1936
1937	contract_process_hwerr(p->p_ct_process, p);
1938
1939	bzero(&si, sizeof (k_siginfo_t));
1940	si.si_signo = SIGKILL;
1941	si.si_code = SI_NOINFO;
1942	trapsig(&si, 1);
1943}
1944
1945/*
1946 * Handle bus error and bus timeout for a user process by sending SIGBUS
1947 * The type is either ASYNC_BERR or ASYNC_BTO.
1948 */
1949void
1950trap_async_berr_bto(int type, struct regs *rp)
1951{
1952	k_siginfo_t si;
1953
1954	errorq_drain(ue_queue); /* flush pending async error messages */
1955	bzero(&si, sizeof (k_siginfo_t));
1956
1957	si.si_signo = SIGBUS;
1958	si.si_code = (type == ASYNC_BERR ? BUS_OBJERR : BUS_ADRERR);
1959	si.si_addr = (caddr_t)rp->r_pc; /* AFAR unavailable - future RFE */
1960	si.si_errno = ENXIO;
1961
1962	trapsig(&si, 1);
1963}
1964