1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28/*	  All Rights Reserved	*/
29
30#include <sys/param.h>
31#include <sys/types.h>
32#include <sys/vmparam.h>
33#include <sys/systm.h>
34#include <sys/signal.h>
35#include <sys/stack.h>
36#include <sys/frame.h>
37#include <sys/proc.h>
38#include <sys/brand.h>
39#include <sys/ucontext.h>
40#include <sys/asm_linkage.h>
41#include <sys/kmem.h>
42#include <sys/errno.h>
43#include <sys/archsystm.h>
44#include <sys/fpu/fpusystm.h>
45#include <sys/debug.h>
46#include <sys/model.h>
47#include <sys/cmn_err.h>
48#include <sys/sysmacros.h>
49#include <sys/privregs.h>
50#include <sys/schedctl.h>
51
52
53/*
54 * Save user context.
55 */
56void
57savecontext(ucontext_t *ucp, const k_sigset_t *mask)
58{
59	proc_t *p = ttoproc(curthread);
60	klwp_t *lwp = ttolwp(curthread);
61
62	/*
63	 * We assign to every field through uc_mcontext.fpregs.fpu_en,
64	 * but we have to bzero() everything after that.
65	 */
66	bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext_t) -
67	    offsetof(ucontext_t, uc_mcontext.fpregs.fpu_en));
68	/*
69	 * There are unused holes in the ucontext_t structure, zero-fill
70	 * them so that we don't expose kernel data to the user.
71	 */
72	(&ucp->uc_flags)[1] = 0;
73	(&ucp->uc_stack.ss_flags)[1] = 0;
74
75	/*
76	 * Flushing the user windows isn't strictly necessary; we do
77	 * it to maintain backward compatibility.
78	 */
79	(void) flush_user_windows_to_stack(NULL);
80
81	ucp->uc_flags = UC_ALL;
82	ucp->uc_link = (ucontext_t *)lwp->lwp_oldcontext;
83
84	/*
85	 * Try to copyin() the ustack if one is registered. If the stack
86	 * has zero size, this indicates that stack bounds checking has
87	 * been disabled for this LWP. If stack bounds checking is disabled
88	 * or the copyin() fails, we fall back to the legacy behavior.
89	 */
90	if (lwp->lwp_ustack == (uintptr_t)NULL ||
91	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
92	    sizeof (ucp->uc_stack)) != 0 ||
93	    ucp->uc_stack.ss_size == 0) {
94
95		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
96			ucp->uc_stack = lwp->lwp_sigaltstack;
97		} else {
98			ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
99			ucp->uc_stack.ss_size = p->p_stksize;
100			ucp->uc_stack.ss_flags = 0;
101		}
102	}
103
104	getgregs(lwp, ucp->uc_mcontext.gregs);
105	getasrs(lwp, ucp->uc_mcontext.asrs);
106
107	getfpregs(lwp, &ucp->uc_mcontext.fpregs);
108	getfpasrs(lwp, ucp->uc_mcontext.asrs);
109	if (ucp->uc_mcontext.fpregs.fpu_en == 0)
110		ucp->uc_flags &= ~UC_FPU;
111	ucp->uc_mcontext.gwins = (gwindows_t *)NULL;
112
113	/*
114	 * Save signal mask.
115	 */
116	sigktou(mask, &ucp->uc_sigmask);
117}
118
119
120void
121restorecontext(ucontext_t *ucp)
122{
123	kthread_t *t = curthread;
124	klwp_t *lwp = ttolwp(t);
125	mcontext_t *mcp = &ucp->uc_mcontext;
126	model_t model = lwp_getdatamodel(lwp);
127
128	(void) flush_user_windows_to_stack(NULL);
129	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
130		xregrestore(lwp, 0);
131
132	lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
133
134	if (ucp->uc_flags & UC_STACK) {
135		if (ucp->uc_stack.ss_flags == SS_ONSTACK)
136			lwp->lwp_sigaltstack = ucp->uc_stack;
137		else
138			lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
139	}
140
141	if (ucp->uc_flags & UC_CPU) {
142		if (mcp->gwins != 0)
143			setgwins(lwp, mcp->gwins);
144		setgregs(lwp, mcp->gregs);
145		if (model == DATAMODEL_LP64)
146			setasrs(lwp, mcp->asrs);
147		else
148			xregs_setgregs(lwp, xregs_getptr(lwp, ucp));
149	}
150
151	if (ucp->uc_flags & UC_FPU) {
152		fpregset_t *fp = &ucp->uc_mcontext.fpregs;
153
154		setfpregs(lwp, fp);
155		if (model == DATAMODEL_LP64)
156			setfpasrs(lwp, mcp->asrs);
157		else
158			xregs_setfpregs(lwp, xregs_getptr(lwp, ucp));
159		run_fpq(lwp, fp);
160	}
161
162	if (ucp->uc_flags & UC_SIGMASK) {
163		/*
164		 * We don't need to acquire p->p_lock here;
165		 * we are manipulating thread-private data.
166		 */
167		schedctl_finish_sigblock(t);
168		sigutok(&ucp->uc_sigmask, &t->t_hold);
169		if (sigcheck(ttoproc(t), t))
170			t->t_sig_check = 1;
171	}
172}
173
174
175int
176getsetcontext(int flag, void *arg)
177{
178	ucontext_t uc;
179	struct _fq fpu_q[MAXFPQ]; /* to hold floating queue */
180	fpregset_t *fpp;
181	gwindows_t *gwin = NULL;	/* to hold windows */
182	caddr_t xregs = NULL;
183	int xregs_size = 0;
184	extern int nwindows;
185	ucontext_t *ucp;
186	klwp_t *lwp = ttolwp(curthread);
187	stack_t dummy_stk;
188
189	/*
190	 * In future releases, when the ucontext structure grows,
191	 * getcontext should be modified to only return the fields
192	 * specified in the uc_flags.  That way, the structure can grow
193	 * and still be binary compatible will all .o's which will only
194	 * have old fields defined in uc_flags
195	 */
196
197	switch (flag) {
198	default:
199		return (set_errno(EINVAL));
200
201	case GETCONTEXT:
202		schedctl_finish_sigblock(curthread);
203		savecontext(&uc, &curthread->t_hold);
204		if (uc.uc_flags & UC_SIGMASK)
205			SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
206		/*
207		 * When using floating point it should not be possible to
208		 * get here with a fpu_qcnt other than zero since we go
209		 * to great pains to handle all outstanding FP exceptions
210		 * before any system call code gets executed. However we
211		 * clear fpu_q and fpu_qcnt here before copyout anyway -
212		 * this will prevent us from interpreting the garbage we
213		 * get back (when FP is not enabled) as valid queue data on
214		 * a later setcontext(2).
215		 */
216		uc.uc_mcontext.fpregs.fpu_qcnt = 0;
217		uc.uc_mcontext.fpregs.fpu_q = (struct _fq *)NULL;
218
219		if (copyout(&uc, arg, sizeof (ucontext_t)))
220			return (set_errno(EFAULT));
221		return (0);
222
223	case SETCONTEXT:
224		ucp = arg;
225		if (ucp == NULL)
226			exit(CLD_EXITED, 0);
227		/*
228		 * Don't copyin filler or floating state unless we need it.
229		 * The ucontext_t struct and fields are specified in the ABI.
230		 */
231		if (copyin(ucp, &uc, sizeof (ucontext_t) -
232		    sizeof (uc.uc_filler) -
233		    sizeof (uc.uc_mcontext.fpregs) -
234		    sizeof (uc.uc_mcontext.xrs) -
235		    sizeof (uc.uc_mcontext.asrs) -
236		    sizeof (uc.uc_mcontext.filler))) {
237			return (set_errno(EFAULT));
238		}
239		if (uc.uc_flags & UC_SIGMASK)
240			SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
241		if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
242		    sizeof (uc.uc_mcontext.xrs))) {
243			return (set_errno(EFAULT));
244		}
245		fpp = &uc.uc_mcontext.fpregs;
246		if (uc.uc_flags & UC_FPU) {
247			/*
248			 * Need to copyin floating point state
249			 */
250			if (copyin(&ucp->uc_mcontext.fpregs,
251			    &uc.uc_mcontext.fpregs,
252			    sizeof (uc.uc_mcontext.fpregs)))
253				return (set_errno(EFAULT));
254			/* if floating queue not empty */
255			if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
256				if (fpp->fpu_qcnt > MAXFPQ ||
257				    fpp->fpu_q_entrysize <= 0 ||
258				    fpp->fpu_q_entrysize > sizeof (struct _fq))
259					return (set_errno(EINVAL));
260				if (copyin(fpp->fpu_q, fpu_q,
261				    fpp->fpu_qcnt * fpp->fpu_q_entrysize))
262					return (set_errno(EFAULT));
263				fpp->fpu_q = fpu_q;
264			} else {
265				fpp->fpu_qcnt = 0; /* avoid confusion later */
266			}
267		} else {
268			fpp->fpu_qcnt = 0;
269		}
270		if (uc.uc_mcontext.gwins) {	/* if windows in context */
271			size_t gwin_size;
272
273			/*
274			 * We do the same computation here to determine
275			 * how many bytes of gwindows_t to copy in that
276			 * is also done in sendsig() to decide how many
277			 * bytes to copy out.  We just *know* that wbcnt
278			 * is the first element of the structure.
279			 */
280			gwin = kmem_zalloc(sizeof (gwindows_t), KM_SLEEP);
281			if (copyin(uc.uc_mcontext.gwins,
282			    &gwin->wbcnt, sizeof (gwin->wbcnt))) {
283				kmem_free(gwin, sizeof (gwindows_t));
284				return (set_errno(EFAULT));
285			}
286			if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
287				kmem_free(gwin, sizeof (gwindows_t));
288				return (set_errno(EINVAL));
289			}
290			gwin_size = gwin->wbcnt * sizeof (struct rwindow) +
291			    SPARC_MAXREGWINDOW * sizeof (int *) + sizeof (long);
292			if (gwin_size > sizeof (gwindows_t) ||
293			    copyin(uc.uc_mcontext.gwins, gwin, gwin_size)) {
294				kmem_free(gwin, sizeof (gwindows_t));
295				return (set_errno(EFAULT));
296			}
297			uc.uc_mcontext.gwins = gwin;
298		}
299
300		/*
301		 * get extra register state or asrs if any exists
302		 * there is no extra register state for _LP64 user programs
303		 */
304		xregs_clrptr(lwp, &uc);
305		if (copyin(&ucp->uc_mcontext.asrs, &uc.uc_mcontext.asrs,
306		    sizeof (asrset_t))) {
307			/* Free up gwin structure if used */
308			if (gwin)
309				kmem_free(gwin, sizeof (gwindows_t));
310			return (set_errno(EFAULT));
311		}
312
313		restorecontext(&uc);
314
315		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
316			(void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
317			    sizeof (stack_t));
318		}
319
320		/*
321		 * free extra register state area
322		 */
323		if (xregs_size)
324			kmem_free(xregs, xregs_size);
325
326		if (gwin)
327			kmem_free(gwin, sizeof (gwindows_t));
328
329		return (0);
330
331	case GETUSTACK:
332		if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
333			return (set_errno(EFAULT));
334
335		return (0);
336
337	case SETUSTACK:
338		if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
339			return (set_errno(EFAULT));
340
341		lwp->lwp_ustack = (uintptr_t)arg;
342
343		return (0);
344	}
345}
346
347
348#ifdef _SYSCALL32_IMPL
349
350/*
351 * Save user context for 32-bit processes.
352 */
353void
354savecontext32(ucontext32_t *ucp, const k_sigset_t *mask, struct fq32 *dfq)
355{
356	proc_t *p = ttoproc(curthread);
357	klwp_t *lwp = ttolwp(curthread);
358	fpregset_t fpregs;
359
360	/*
361	 * We assign to every field through uc_mcontext.fpregs.fpu_en,
362	 * but we have to bzero() everything after that.
363	 */
364	bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext32_t) -
365	    offsetof(ucontext32_t, uc_mcontext.fpregs.fpu_en));
366	/*
367	 * There is an unused hole in the ucontext32_t structure; zero-fill
368	 * it so that we don't expose kernel data to the user.
369	 */
370	(&ucp->uc_stack.ss_flags)[1] = 0;
371
372	/*
373	 * Flushing the user windows isn't strictly necessary; we do
374	 * it to maintain backward compatibility.
375	 */
376	(void) flush_user_windows_to_stack(NULL);
377
378	ucp->uc_flags = UC_ALL;
379	ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
380
381	/*
382	 * Try to copyin() the ustack if one is registered. If the stack
383	 * has zero size, this indicates that stack bounds checking has
384	 * been disabled for this LWP. If stack bounds checking is disabled
385	 * or the copyin() fails, we fall back to the legacy behavior.
386	 */
387	if (lwp->lwp_ustack == (uintptr_t)NULL ||
388	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
389	    sizeof (ucp->uc_stack)) != 0 ||
390	    ucp->uc_stack.ss_size == 0) {
391
392		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
393			ucp->uc_stack.ss_sp =
394			    (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
395			ucp->uc_stack.ss_size =
396			    (size32_t)lwp->lwp_sigaltstack.ss_size;
397			ucp->uc_stack.ss_flags = SS_ONSTACK;
398		} else {
399			ucp->uc_stack.ss_sp =
400			    (caddr32_t)(uintptr_t)p->p_usrstack - p->p_stksize;
401			ucp->uc_stack.ss_size =
402			    (size32_t)p->p_stksize;
403			ucp->uc_stack.ss_flags = 0;
404		}
405	}
406
407	getgregs32(lwp, ucp->uc_mcontext.gregs);
408	getfpregs(lwp, &fpregs);
409	fpuregset_nto32(&fpregs, &ucp->uc_mcontext.fpregs, dfq);
410
411	if (ucp->uc_mcontext.fpregs.fpu_en == 0)
412		ucp->uc_flags &= ~UC_FPU;
413	ucp->uc_mcontext.gwins = (caddr32_t)(uintptr_t)NULL;
414
415	/*
416	 * Save signal mask (the 32- and 64-bit sigset_t structures are
417	 * identical).
418	 */
419	sigktou(mask, (sigset_t *)&ucp->uc_sigmask);
420}
421
422int
423getsetcontext32(int flag, void *arg)
424{
425	ucontext32_t uc;
426	ucontext_t   ucnat;
427	struct _fq fpu_qnat[MAXFPQ]; /* to hold "native" floating queue */
428	struct fq32 fpu_q[MAXFPQ]; /* to hold 32 bit floating queue */
429	fpregset32_t *fpp;
430	gwindows32_t *gwin = NULL;	/* to hold windows */
431	caddr_t xregs;
432	int xregs_size = 0;
433	extern int nwindows;
434	klwp_t *lwp = ttolwp(curthread);
435	ucontext32_t *ucp;
436	uint32_t ustack32;
437	stack32_t dummy_stk32;
438
439	/*
440	 * In future releases, when the ucontext structure grows,
441	 * getcontext should be modified to only return the fields
442	 * specified in the uc_flags.  That way, the structure can grow
443	 * and still be binary compatible will all .o's which will only
444	 * have old fields defined in uc_flags
445	 */
446
447	switch (flag) {
448	default:
449		return (set_errno(EINVAL));
450
451	case GETCONTEXT:
452		schedctl_finish_sigblock(curthread);
453		savecontext32(&uc, &curthread->t_hold, NULL);
454		if (uc.uc_flags & UC_SIGMASK)
455			SIGSET_NATIVE_TO_BRAND(&uc.uc_sigmask);
456		/*
457		 * When using floating point it should not be possible to
458		 * get here with a fpu_qcnt other than zero since we go
459		 * to great pains to handle all outstanding FP exceptions
460		 * before any system call code gets executed. However we
461		 * clear fpu_q and fpu_qcnt here before copyout anyway -
462		 * this will prevent us from interpreting the garbage we
463		 * get back (when FP is not enabled) as valid queue data on
464		 * a later setcontext(2).
465		 */
466		uc.uc_mcontext.fpregs.fpu_qcnt = 0;
467		uc.uc_mcontext.fpregs.fpu_q = (caddr32_t)(uintptr_t)NULL;
468
469		if (copyout(&uc, arg, sizeof (ucontext32_t)))
470			return (set_errno(EFAULT));
471		return (0);
472
473	case SETCONTEXT:
474		ucp = arg;
475		if (ucp == NULL)
476			exit(CLD_EXITED, 0);
477		/*
478		 * Don't copyin filler or floating state unless we need it.
479		 * The ucontext_t struct and fields are specified in the ABI.
480		 */
481		if (copyin(ucp, &uc, sizeof (uc) - sizeof (uc.uc_filler) -
482		    sizeof (uc.uc_mcontext.fpregs) -
483		    sizeof (uc.uc_mcontext.xrs) -
484		    sizeof (uc.uc_mcontext.filler))) {
485			return (set_errno(EFAULT));
486		}
487		if (uc.uc_flags & UC_SIGMASK)
488			SIGSET_BRAND_TO_NATIVE(&uc.uc_sigmask);
489		if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
490		    sizeof (uc.uc_mcontext.xrs))) {
491			return (set_errno(EFAULT));
492		}
493		fpp = &uc.uc_mcontext.fpregs;
494		if (uc.uc_flags & UC_FPU) {
495			/*
496			 * Need to copyin floating point state
497			 */
498			if (copyin(&ucp->uc_mcontext.fpregs,
499			    &uc.uc_mcontext.fpregs,
500			    sizeof (uc.uc_mcontext.fpregs)))
501				return (set_errno(EFAULT));
502			/* if floating queue not empty */
503			if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
504				if (fpp->fpu_qcnt > MAXFPQ ||
505				    fpp->fpu_q_entrysize <= 0 ||
506				    fpp->fpu_q_entrysize > sizeof (struct fq32))
507					return (set_errno(EINVAL));
508				if (copyin((void *)(uintptr_t)fpp->fpu_q, fpu_q,
509				    fpp->fpu_qcnt * fpp->fpu_q_entrysize))
510					return (set_errno(EFAULT));
511			} else {
512				fpp->fpu_qcnt = 0; /* avoid confusion later */
513			}
514		} else {
515			fpp->fpu_qcnt = 0;
516		}
517
518		if (uc.uc_mcontext.gwins) {	/* if windows in context */
519			size_t gwin_size;
520
521			/*
522			 * We do the same computation here to determine
523			 * how many bytes of gwindows_t to copy in that
524			 * is also done in sendsig() to decide how many
525			 * bytes to copy out.  We just *know* that wbcnt
526			 * is the first element of the structure.
527			 */
528			gwin = kmem_zalloc(sizeof (gwindows32_t), KM_SLEEP);
529			if (copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
530			    &gwin->wbcnt, sizeof (gwin->wbcnt))) {
531				kmem_free(gwin, sizeof (gwindows32_t));
532				return (set_errno(EFAULT));
533			}
534			if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
535				kmem_free(gwin, sizeof (gwindows32_t));
536				return (set_errno(EINVAL));
537			}
538			gwin_size = gwin->wbcnt * sizeof (struct rwindow32) +
539			    SPARC_MAXREGWINDOW * sizeof (caddr32_t) +
540			    sizeof (int32_t);
541			if (gwin_size > sizeof (gwindows32_t) ||
542			    copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
543			    gwin, gwin_size)) {
544				kmem_free(gwin, sizeof (gwindows32_t));
545				return (set_errno(EFAULT));
546			}
547			/* restorecontext() should ignore this */
548			uc.uc_mcontext.gwins = (caddr32_t)0;
549		}
550
551		ucontext_32ton(&uc, &ucnat, fpu_q, fpu_qnat);
552
553		/*
554		 * get extra register state if any exists
555		 */
556		if (xregs_hasptr32(lwp, &uc) &&
557		    ((xregs_size = xregs_getsize(curproc)) > 0)) {
558			xregs = kmem_zalloc(xregs_size, KM_SLEEP);
559			if (copyin((void *)(uintptr_t)xregs_getptr32(lwp, &uc),
560			    xregs, xregs_size)) {
561				kmem_free(xregs, xregs_size);
562				if (gwin)
563					kmem_free(gwin, sizeof (gwindows32_t));
564				return (set_errno(EFAULT));
565			}
566			xregs_setptr(lwp, &ucnat, xregs);
567		} else {
568			xregs_clrptr(lwp, &ucnat);
569		}
570
571		restorecontext(&ucnat);
572
573		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
574			(void) copyout(&uc.uc_stack,
575			    (stack32_t *)lwp->lwp_ustack, sizeof (stack32_t));
576		}
577
578		if (gwin)
579			setgwins32(lwp, gwin);
580
581		/*
582		 * free extra register state area
583		 */
584		if (xregs_size)
585			kmem_free(xregs, xregs_size);
586
587		if (gwin)
588			kmem_free(gwin, sizeof (gwindows32_t));
589
590		return (0);
591
592	case GETUSTACK:
593		ustack32 = (uint32_t)lwp->lwp_ustack;
594		if (copyout(&ustack32, arg, sizeof (caddr32_t)))
595			return (set_errno(EFAULT));
596
597		return (0);
598
599	case SETUSTACK:
600		if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
601			return (set_errno(EFAULT));
602
603		lwp->lwp_ustack = (uintptr_t)arg;
604
605		return (0);
606	}
607}
608
609#endif	/* _SYSCALL32_IMPL */
610