xref: /illumos-gate/usr/src/uts/sparc/syscall/getcontext.c (revision 7be238fce69ba74b2163fc0ea898dfdc01a4aa22)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/vmparam.h>
33 #include <sys/systm.h>
34 #include <sys/signal.h>
35 #include <sys/stack.h>
36 #include <sys/frame.h>
37 #include <sys/proc.h>
38 #include <sys/ucontext.h>
39 #include <sys/asm_linkage.h>
40 #include <sys/kmem.h>
41 #include <sys/errno.h>
42 #include <sys/archsystm.h>
43 #include <sys/fpu/fpusystm.h>
44 #include <sys/debug.h>
45 #include <sys/model.h>
46 #include <sys/cmn_err.h>
47 #include <sys/sysmacros.h>
48 #include <sys/privregs.h>
49 #include <sys/schedctl.h>
50 
51 
52 /*
53  * Save user context.
54  */
55 void
56 savecontext(ucontext_t *ucp, k_sigset_t mask)
57 {
58 	proc_t *p = ttoproc(curthread);
59 	klwp_t *lwp = ttolwp(curthread);
60 
61 	/*
62 	 * We assign to every field through uc_mcontext.fpregs.fpu_en,
63 	 * but we have to bzero() everything after that.
64 	 */
65 	bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext_t) -
66 	    offsetof(ucontext_t, uc_mcontext.fpregs.fpu_en));
67 	/*
68 	 * There are unused holes in the ucontext_t structure, zero-fill
69 	 * them so that we don't expose kernel data to the user.
70 	 */
71 	(&ucp->uc_flags)[1] = 0;
72 	(&ucp->uc_stack.ss_flags)[1] = 0;
73 
74 	/*
75 	 * Flushing the user windows isn't strictly necessary; we do
76 	 * it to maintain backward compatibility.
77 	 */
78 	(void) flush_user_windows_to_stack(NULL);
79 
80 	ucp->uc_flags = UC_ALL;
81 	ucp->uc_link = (ucontext_t *)lwp->lwp_oldcontext;
82 
83 	/*
84 	 * Try to copyin() the ustack if one is registered. If the stack
85 	 * has zero size, this indicates that stack bounds checking has
86 	 * been disabled for this LWP. If stack bounds checking is disabled
87 	 * or the copyin() fails, we fall back to the legacy behavior.
88 	 */
89 	if (lwp->lwp_ustack == NULL ||
90 	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
91 	    sizeof (ucp->uc_stack)) != 0 ||
92 	    ucp->uc_stack.ss_size == 0) {
93 
94 		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
95 			ucp->uc_stack = lwp->lwp_sigaltstack;
96 		} else {
97 			ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
98 			ucp->uc_stack.ss_size = p->p_stksize;
99 			ucp->uc_stack.ss_flags = 0;
100 		}
101 	}
102 
103 	getgregs(lwp, ucp->uc_mcontext.gregs);
104 	getasrs(lwp, ucp->uc_mcontext.asrs);
105 
106 	getfpregs(lwp, &ucp->uc_mcontext.fpregs);
107 	getfpasrs(lwp, ucp->uc_mcontext.asrs);
108 	if (ucp->uc_mcontext.fpregs.fpu_en == 0)
109 		ucp->uc_flags &= ~UC_FPU;
110 	ucp->uc_mcontext.gwins = (gwindows_t *)NULL;
111 
112 	/*
113 	 * Save signal mask.
114 	 */
115 	sigktou(&mask, &ucp->uc_sigmask);
116 }
117 
118 
119 void
120 restorecontext(ucontext_t *ucp)
121 {
122 	kthread_t *t = curthread;
123 	klwp_t *lwp = ttolwp(t);
124 	mcontext_t *mcp = &ucp->uc_mcontext;
125 	model_t model = lwp_getdatamodel(lwp);
126 
127 	(void) flush_user_windows_to_stack(NULL);
128 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
129 		xregrestore(lwp, 0);
130 
131 	lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
132 
133 	if (ucp->uc_flags & UC_STACK) {
134 		if (ucp->uc_stack.ss_flags == SS_ONSTACK)
135 			lwp->lwp_sigaltstack = ucp->uc_stack;
136 		else
137 			lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
138 	}
139 
140 	if (ucp->uc_flags & UC_CPU) {
141 		if (mcp->gwins != 0)
142 			setgwins(lwp, mcp->gwins);
143 		setgregs(lwp, mcp->gregs);
144 		if (model == DATAMODEL_LP64)
145 			setasrs(lwp, mcp->asrs);
146 		else
147 			xregs_setgregs(lwp, xregs_getptr(lwp, ucp));
148 	}
149 
150 	if (ucp->uc_flags & UC_FPU) {
151 		fpregset_t *fp = &ucp->uc_mcontext.fpregs;
152 
153 		setfpregs(lwp, fp);
154 		if (model == DATAMODEL_LP64)
155 			setfpasrs(lwp, mcp->asrs);
156 		else
157 			xregs_setfpregs(lwp, xregs_getptr(lwp, ucp));
158 		run_fpq(lwp, fp);
159 	}
160 
161 	if (ucp->uc_flags & UC_SIGMASK) {
162 		/*
163 		 * We don't need to acquire p->p_lock here;
164 		 * we are manipulating thread-private data.
165 		 */
166 		schedctl_finish_sigblock(t);
167 		sigutok(&ucp->uc_sigmask, &t->t_hold);
168 		if (sigcheck(ttoproc(t), t))
169 			t->t_sig_check = 1;
170 	}
171 }
172 
173 
174 int
175 getsetcontext(int flag, void *arg)
176 {
177 	ucontext_t uc;
178 	struct fq fpu_q[MAXFPQ]; /* to hold floating queue */
179 	fpregset_t *fpp;
180 	gwindows_t *gwin = NULL;	/* to hold windows */
181 	caddr_t xregs = NULL;
182 	int xregs_size = 0;
183 	extern int nwindows;
184 	ucontext_t *ucp;
185 	klwp_t *lwp = ttolwp(curthread);
186 	stack_t dummy_stk;
187 
188 	/*
189 	 * In future releases, when the ucontext structure grows,
190 	 * getcontext should be modified to only return the fields
191 	 * specified in the uc_flags.  That way, the structure can grow
192 	 * and still be binary compatible will all .o's which will only
193 	 * have old fields defined in uc_flags
194 	 */
195 
196 	switch (flag) {
197 	default:
198 		return (set_errno(EINVAL));
199 
200 	case GETCONTEXT:
201 		schedctl_finish_sigblock(curthread);
202 		savecontext(&uc, curthread->t_hold);
203 		/*
204 		 * When using floating point it should not be possible to
205 		 * get here with a fpu_qcnt other than zero since we go
206 		 * to great pains to handle all outstanding FP exceptions
207 		 * before any system call code gets executed. However we
208 		 * clear fpu_q and fpu_qcnt here before copyout anyway -
209 		 * this will prevent us from interpreting the garbage we
210 		 * get back (when FP is not enabled) as valid queue data on
211 		 * a later setcontext(2).
212 		 */
213 		uc.uc_mcontext.fpregs.fpu_qcnt = 0;
214 		uc.uc_mcontext.fpregs.fpu_q = (struct fq *)NULL;
215 
216 		if (copyout(&uc, arg, sizeof (ucontext_t)))
217 			return (set_errno(EFAULT));
218 		return (0);
219 
220 	case SETCONTEXT:
221 		ucp = arg;
222 		if (ucp == NULL)
223 			exit(CLD_EXITED, 0);
224 		/*
225 		 * Don't copyin filler or floating state unless we need it.
226 		 * The ucontext_t struct and fields are specified in the ABI.
227 		 */
228 		if (copyin(ucp, &uc, sizeof (ucontext_t) -
229 		    sizeof (uc.uc_filler) -
230 		    sizeof (uc.uc_mcontext.fpregs) -
231 		    sizeof (uc.uc_mcontext.xrs) -
232 		    sizeof (uc.uc_mcontext.asrs) -
233 		    sizeof (uc.uc_mcontext.filler))) {
234 			return (set_errno(EFAULT));
235 		}
236 		if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
237 		    sizeof (uc.uc_mcontext.xrs))) {
238 			return (set_errno(EFAULT));
239 		}
240 		fpp = &uc.uc_mcontext.fpregs;
241 		if (uc.uc_flags & UC_FPU) {
242 			/*
243 			 * Need to copyin floating point state
244 			 */
245 			if (copyin(&ucp->uc_mcontext.fpregs,
246 			    &uc.uc_mcontext.fpregs,
247 			    sizeof (uc.uc_mcontext.fpregs)))
248 				return (set_errno(EFAULT));
249 			/* if floating queue not empty */
250 			if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
251 				if (fpp->fpu_qcnt > MAXFPQ ||
252 				    fpp->fpu_q_entrysize <= 0 ||
253 				    fpp->fpu_q_entrysize > sizeof (struct fq))
254 					return (set_errno(EINVAL));
255 				if (copyin(fpp->fpu_q, fpu_q,
256 				    fpp->fpu_qcnt * fpp->fpu_q_entrysize))
257 					return (set_errno(EFAULT));
258 				fpp->fpu_q = fpu_q;
259 			} else {
260 				fpp->fpu_qcnt = 0; /* avoid confusion later */
261 			}
262 		} else {
263 			fpp->fpu_qcnt = 0;
264 		}
265 		if (uc.uc_mcontext.gwins) {	/* if windows in context */
266 			size_t gwin_size;
267 
268 			/*
269 			 * We do the same computation here to determine
270 			 * how many bytes of gwindows_t to copy in that
271 			 * is also done in sendsig() to decide how many
272 			 * bytes to copy out.  We just *know* that wbcnt
273 			 * is the first element of the structure.
274 			 */
275 			gwin = kmem_zalloc(sizeof (gwindows_t), KM_SLEEP);
276 			if (copyin(uc.uc_mcontext.gwins,
277 			    &gwin->wbcnt, sizeof (gwin->wbcnt))) {
278 				kmem_free(gwin, sizeof (gwindows_t));
279 				return (set_errno(EFAULT));
280 			}
281 			if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
282 				kmem_free(gwin, sizeof (gwindows_t));
283 				return (set_errno(EINVAL));
284 			}
285 			gwin_size = gwin->wbcnt * sizeof (struct rwindow) +
286 			    SPARC_MAXREGWINDOW * sizeof (int *) + sizeof (long);
287 			if (gwin_size > sizeof (gwindows_t) ||
288 			    copyin(uc.uc_mcontext.gwins, gwin, gwin_size)) {
289 				kmem_free(gwin, sizeof (gwindows_t));
290 				return (set_errno(EFAULT));
291 			}
292 			uc.uc_mcontext.gwins = gwin;
293 		}
294 
295 		/*
296 		 * get extra register state or asrs if any exists
297 		 * there is no extra register state for _LP64 user programs
298 		 */
299 		xregs_clrptr(lwp, &uc);
300 		if (copyin(&ucp->uc_mcontext.asrs, &uc.uc_mcontext.asrs,
301 		    sizeof (asrset_t))) {
302 			/* Free up gwin structure if used */
303 			if (gwin)
304 				kmem_free(gwin, sizeof (gwindows_t));
305 			return (set_errno(EFAULT));
306 		}
307 
308 		restorecontext(&uc);
309 
310 		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
311 			(void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
312 			    sizeof (stack_t));
313 		}
314 
315 		/*
316 		 * free extra register state area
317 		 */
318 		if (xregs_size)
319 			kmem_free(xregs, xregs_size);
320 
321 		if (gwin)
322 			kmem_free(gwin, sizeof (gwindows_t));
323 
324 		return (0);
325 
326 	case GETUSTACK:
327 		if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
328 			return (set_errno(EFAULT));
329 
330 		return (0);
331 
332 	case SETUSTACK:
333 		if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
334 			return (set_errno(EFAULT));
335 
336 		lwp->lwp_ustack = (uintptr_t)arg;
337 
338 		return (0);
339 	}
340 }
341 
342 
343 #ifdef _SYSCALL32_IMPL
344 
345 /*
346  * Save user context for 32-bit processes.
347  */
348 void
349 savecontext32(ucontext32_t *ucp, k_sigset_t mask, struct fq32 *dfq)
350 {
351 	proc_t *p = ttoproc(curthread);
352 	klwp_t *lwp = ttolwp(curthread);
353 	fpregset_t fpregs;
354 
355 	/*
356 	 * We assign to every field through uc_mcontext.fpregs.fpu_en,
357 	 * but we have to bzero() everything after that.
358 	 */
359 	bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext32_t) -
360 	    offsetof(ucontext32_t, uc_mcontext.fpregs.fpu_en));
361 	/*
362 	 * There is an unused hole in the ucontext32_t structure; zero-fill
363 	 * it so that we don't expose kernel data to the user.
364 	 */
365 	(&ucp->uc_stack.ss_flags)[1] = 0;
366 
367 	/*
368 	 * Flushing the user windows isn't strictly necessary; we do
369 	 * it to maintain backward compatibility.
370 	 */
371 	(void) flush_user_windows_to_stack(NULL);
372 
373 	ucp->uc_flags = UC_ALL;
374 	ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
375 
376 	/*
377 	 * Try to copyin() the ustack if one is registered. If the stack
378 	 * has zero size, this indicates that stack bounds checking has
379 	 * been disabled for this LWP. If stack bounds checking is disabled
380 	 * or the copyin() fails, we fall back to the legacy behavior.
381 	 */
382 	if (lwp->lwp_ustack == NULL ||
383 	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
384 	    sizeof (ucp->uc_stack)) != 0 ||
385 	    ucp->uc_stack.ss_size == 0) {
386 
387 		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
388 			ucp->uc_stack.ss_sp =
389 			    (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
390 			ucp->uc_stack.ss_size =
391 			    (size32_t)lwp->lwp_sigaltstack.ss_size;
392 			ucp->uc_stack.ss_flags = SS_ONSTACK;
393 		} else {
394 			ucp->uc_stack.ss_sp =
395 			    (caddr32_t)(uintptr_t)p->p_usrstack - p->p_stksize;
396 			ucp->uc_stack.ss_size =
397 			    (size32_t)p->p_stksize;
398 			ucp->uc_stack.ss_flags = 0;
399 		}
400 	}
401 
402 	getgregs32(lwp, ucp->uc_mcontext.gregs);
403 	getfpregs(lwp, &fpregs);
404 	fpuregset_nto32(&fpregs, &ucp->uc_mcontext.fpregs, dfq);
405 
406 	if (ucp->uc_mcontext.fpregs.fpu_en == 0)
407 		ucp->uc_flags &= ~UC_FPU;
408 	ucp->uc_mcontext.gwins = (caddr32_t)NULL;
409 
410 	/*
411 	 * Save signal mask (the 32- and 64-bit sigset_t structures are
412 	 * identical).
413 	 */
414 	sigktou(&mask, (sigset_t *)&ucp->uc_sigmask);
415 }
416 
417 int
418 getsetcontext32(int flag, void *arg)
419 {
420 	ucontext32_t uc;
421 	ucontext_t   ucnat;
422 	struct fq fpu_qnat[MAXFPQ]; /* to hold "native" floating queue */
423 	struct fq32 fpu_q[MAXFPQ]; /* to hold 32 bit floating queue */
424 	fpregset32_t *fpp;
425 	gwindows32_t *gwin = NULL;	/* to hold windows */
426 	caddr_t xregs;
427 	int xregs_size = 0;
428 	extern int nwindows;
429 	klwp_t *lwp = ttolwp(curthread);
430 	ucontext32_t *ucp;
431 	uint32_t ustack32;
432 	stack32_t dummy_stk32;
433 
434 	/*
435 	 * In future releases, when the ucontext structure grows,
436 	 * getcontext should be modified to only return the fields
437 	 * specified in the uc_flags.  That way, the structure can grow
438 	 * and still be binary compatible will all .o's which will only
439 	 * have old fields defined in uc_flags
440 	 */
441 
442 	switch (flag) {
443 	default:
444 		return (set_errno(EINVAL));
445 
446 	case GETCONTEXT:
447 		schedctl_finish_sigblock(curthread);
448 		savecontext32(&uc, curthread->t_hold, NULL);
449 		/*
450 		 * When using floating point it should not be possible to
451 		 * get here with a fpu_qcnt other than zero since we go
452 		 * to great pains to handle all outstanding FP exceptions
453 		 * before any system call code gets executed. However we
454 		 * clear fpu_q and fpu_qcnt here before copyout anyway -
455 		 * this will prevent us from interpreting the garbage we
456 		 * get back (when FP is not enabled) as valid queue data on
457 		 * a later setcontext(2).
458 		 */
459 		uc.uc_mcontext.fpregs.fpu_qcnt = 0;
460 		uc.uc_mcontext.fpregs.fpu_q = (caddr32_t)NULL;
461 
462 		if (copyout(&uc, arg, sizeof (ucontext32_t)))
463 			return (set_errno(EFAULT));
464 		return (0);
465 
466 	case SETCONTEXT:
467 		ucp = arg;
468 		if (ucp == NULL)
469 			exit(CLD_EXITED, 0);
470 		/*
471 		 * Don't copyin filler or floating state unless we need it.
472 		 * The ucontext_t struct and fields are specified in the ABI.
473 		 */
474 		if (copyin(ucp, &uc, sizeof (uc) - sizeof (uc.uc_filler) -
475 		    sizeof (uc.uc_mcontext.fpregs) -
476 		    sizeof (uc.uc_mcontext.xrs) -
477 		    sizeof (uc.uc_mcontext.filler))) {
478 			return (set_errno(EFAULT));
479 		}
480 		if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
481 		    sizeof (uc.uc_mcontext.xrs))) {
482 			return (set_errno(EFAULT));
483 		}
484 		fpp = &uc.uc_mcontext.fpregs;
485 		if (uc.uc_flags & UC_FPU) {
486 			/*
487 			 * Need to copyin floating point state
488 			 */
489 			if (copyin(&ucp->uc_mcontext.fpregs,
490 			    &uc.uc_mcontext.fpregs,
491 			    sizeof (uc.uc_mcontext.fpregs)))
492 				return (set_errno(EFAULT));
493 			/* if floating queue not empty */
494 			if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
495 				if (fpp->fpu_qcnt > MAXFPQ ||
496 				    fpp->fpu_q_entrysize <= 0 ||
497 				    fpp->fpu_q_entrysize > sizeof (struct fq32))
498 					return (set_errno(EINVAL));
499 				if (copyin((void *)(uintptr_t)fpp->fpu_q, fpu_q,
500 				    fpp->fpu_qcnt * fpp->fpu_q_entrysize))
501 					return (set_errno(EFAULT));
502 			} else {
503 				fpp->fpu_qcnt = 0; /* avoid confusion later */
504 			}
505 		} else {
506 			fpp->fpu_qcnt = 0;
507 		}
508 
509 		if (uc.uc_mcontext.gwins) {	/* if windows in context */
510 			size_t gwin_size;
511 
512 			/*
513 			 * We do the same computation here to determine
514 			 * how many bytes of gwindows_t to copy in that
515 			 * is also done in sendsig() to decide how many
516 			 * bytes to copy out.  We just *know* that wbcnt
517 			 * is the first element of the structure.
518 			 */
519 			gwin = kmem_zalloc(sizeof (gwindows32_t), KM_SLEEP);
520 			if (copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
521 			    &gwin->wbcnt, sizeof (gwin->wbcnt))) {
522 				kmem_free(gwin, sizeof (gwindows32_t));
523 				return (set_errno(EFAULT));
524 			}
525 			if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
526 				kmem_free(gwin, sizeof (gwindows32_t));
527 				return (set_errno(EINVAL));
528 			}
529 			gwin_size = gwin->wbcnt * sizeof (struct rwindow32) +
530 			    SPARC_MAXREGWINDOW * sizeof (caddr32_t) +
531 			    sizeof (int32_t);
532 			if (gwin_size > sizeof (gwindows32_t) ||
533 			    copyin((void *)(uintptr_t)uc.uc_mcontext.gwins,
534 			    gwin, gwin_size)) {
535 				kmem_free(gwin, sizeof (gwindows32_t));
536 				return (set_errno(EFAULT));
537 			}
538 			/* restorecontext() should ignore this */
539 			uc.uc_mcontext.gwins = (caddr32_t)0;
540 		}
541 
542 		ucontext_32ton(&uc, &ucnat, fpu_q, fpu_qnat);
543 
544 		/*
545 		 * get extra register state if any exists
546 		 */
547 		if (xregs_hasptr32(lwp, &uc) &&
548 		    ((xregs_size = xregs_getsize(curproc)) > 0)) {
549 			xregs = kmem_zalloc(xregs_size, KM_SLEEP);
550 			if (copyin((void *)(uintptr_t)xregs_getptr32(lwp, &uc),
551 			    xregs, xregs_size)) {
552 				kmem_free(xregs, xregs_size);
553 				if (gwin)
554 					kmem_free(gwin, sizeof (gwindows32_t));
555 				return (set_errno(EFAULT));
556 			}
557 			xregs_setptr(lwp, &ucnat, xregs);
558 		} else {
559 			xregs_clrptr(lwp, &ucnat);
560 		}
561 
562 		restorecontext(&ucnat);
563 
564 		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
565 			(void) copyout(&uc.uc_stack,
566 			    (stack32_t *)lwp->lwp_ustack, sizeof (stack32_t));
567 		}
568 
569 		if (gwin)
570 			setgwins32(lwp, gwin);
571 
572 		/*
573 		 * free extra register state area
574 		 */
575 		if (xregs_size)
576 			kmem_free(xregs, xregs_size);
577 
578 		if (gwin)
579 			kmem_free(gwin, sizeof (gwindows32_t));
580 
581 		return (0);
582 
583 	case GETUSTACK:
584 		ustack32 = (uint32_t)lwp->lwp_ustack;
585 		if (copyout(&ustack32, arg, sizeof (caddr32_t)))
586 			return (set_errno(EFAULT));
587 
588 		return (0);
589 
590 	case SETUSTACK:
591 		if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
592 			return (set_errno(EFAULT));
593 
594 		lwp->lwp_ustack = (uintptr_t)arg;
595 
596 		return (0);
597 	}
598 }
599 
600 #endif	/* _SYSCALL32_IMPL */
601