xref: /illumos-gate/usr/src/lib/libc/port/threads/scalls.c (revision 2e14588420ccfbaa5be20605ed2be8b9802d1d49)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <stdarg.h>
32 #include <poll.h>
33 #include <stropts.h>
34 #include <dlfcn.h>
35 #include <sys/uio.h>
36 
37 /*
38  * fork_lock_enter() does triple-duty.  Not only does it (and atfork_lock)
39  * serialize calls to fork() and forkall(), but it also serializes calls to
40  * thr_suspend() and thr_continue() (fork() and forkall() also suspend other
41  * threads), and furthermore it serializes I18N calls to functions in other
42  * dlopen()ed L10N objects that might be calling malloc()/free().
43  */
44 void
45 fork_lock_enter(void)
46 {
47 	ulwp_t *self = curthread;
48 
49 	ASSERT(self->ul_critical == 0);
50 	(void) _private_mutex_lock(&self->ul_uberdata->fork_lock);
51 }
52 
53 void
54 fork_lock_exit(void)
55 {
56 	ulwp_t *self = curthread;
57 
58 	ASSERT(self->ul_critical == 0);
59 	(void) _private_mutex_unlock(&self->ul_uberdata->fork_lock);
60 }
61 
62 #pragma weak forkx = _private_forkx
63 #pragma weak _forkx = _private_forkx
64 pid_t
65 _private_forkx(int flags)
66 {
67 	ulwp_t *self = curthread;
68 	uberdata_t *udp = self->ul_uberdata;
69 	pid_t pid;
70 
71 	if (self->ul_vfork) {
72 		/*
73 		 * We are a child of vfork(); omit all of the fork
74 		 * logic and go straight to the system call trap.
75 		 * A vfork() child of a multithreaded parent
76 		 * must never call fork().
77 		 */
78 		if (udp->uberflags.uf_mt) {
79 			errno = ENOTSUP;
80 			return (-1);
81 		}
82 		pid = __forkx(flags);
83 		if (pid == 0) {		/* child */
84 			udp->pid = _private_getpid();
85 			self->ul_vfork = 0;
86 		}
87 		return (pid);
88 	}
89 
90 	sigoff(self);
91 	if (self->ul_fork) {
92 		/*
93 		 * Cannot call fork() from a fork handler.
94 		 */
95 		sigon(self);
96 		errno = EDEADLK;
97 		return (-1);
98 	}
99 	self->ul_fork = 1;
100 	(void) _private_mutex_lock(&udp->atfork_lock);
101 
102 	/*
103 	 * The functions registered by pthread_atfork() are defined by
104 	 * the application and its libraries and we must not hold any
105 	 * internal lmutex_lock()-acquired locks while invoking them.
106 	 * We hold only udp->atfork_lock to protect the atfork linkages.
107 	 * If one of these pthread_atfork() functions attempts to fork
108 	 * or to call pthread_atfork(), it will detect the error and
109 	 * fail with EDEADLK.  Otherwise, the pthread_atfork() functions
110 	 * are free to do anything they please (except they will not
111 	 * receive any signals).
112 	 */
113 	_prefork_handler();
114 
115 	/*
116 	 * Block every other thread attempting thr_suspend() or thr_continue().
117 	 * This also blocks every other thread attempting calls to I18N
118 	 * functions in dlopen()ed L10N objects, but this is benign;
119 	 * the other threads will soon be suspended anyway.
120 	 */
121 	fork_lock_enter();
122 
123 	/*
124 	 * Block all signals.
125 	 * Just deferring them via sigoff() is not enough.
126 	 * We have to avoid taking a deferred signal in the child
127 	 * that was actually sent to the parent before __forkx().
128 	 */
129 	block_all_signals(self);
130 
131 	/*
132 	 * This suspends all threads but this one, leaving them
133 	 * suspended outside of any critical regions in the library.
134 	 * Thus, we are assured that no library locks are held
135 	 * while we invoke fork() from the current thread.
136 	 */
137 	suspend_fork();
138 
139 	pid = __forkx(flags);
140 
141 	if (pid == 0) {		/* child */
142 		/*
143 		 * Clear our schedctl pointer.
144 		 * Discard any deferred signal that was sent to the parent.
145 		 * Because we blocked all signals before __forkx(), a
146 		 * deferred signal cannot have been taken by the child.
147 		 */
148 		self->ul_schedctl_called = NULL;
149 		self->ul_schedctl = NULL;
150 		self->ul_cursig = 0;
151 		self->ul_siginfo.si_signo = 0;
152 		udp->pid = _private_getpid();
153 		/* reset the library's data structures to reflect one thread */
154 		unregister_locks();
155 		postfork1_child();
156 		restore_signals(self);
157 		fork_lock_exit();
158 		_postfork_child_handler();
159 	} else {
160 		/* restart all threads that were suspended for fork() */
161 		continue_fork(0);
162 		restore_signals(self);
163 		fork_lock_exit();
164 		_postfork_parent_handler();
165 	}
166 
167 	(void) _private_mutex_unlock(&udp->atfork_lock);
168 	self->ul_fork = 0;
169 	sigon(self);
170 
171 	return (pid);
172 }
173 
174 /*
175  * fork() is fork1() for both Posix threads and Solaris threads.
176  * The forkall() interface exists for applications that require
177  * the semantics of replicating all threads.
178  */
179 #pragma weak fork1 = _fork
180 #pragma weak _fork1 = _fork
181 #pragma weak fork = _fork
182 pid_t
183 _fork(void)
184 {
185 	return (_private_forkx(0));
186 }
187 
188 /*
189  * Much of the logic here is the same as in forkx().
190  * See the comments in forkx(), above.
191  */
192 #pragma weak forkallx = _private_forkallx
193 #pragma weak _forkallx = _private_forkallx
194 pid_t
195 _private_forkallx(int flags)
196 {
197 	ulwp_t *self = curthread;
198 	uberdata_t *udp = self->ul_uberdata;
199 	pid_t pid;
200 
201 	if (self->ul_vfork) {
202 		if (udp->uberflags.uf_mt) {
203 			errno = ENOTSUP;
204 			return (-1);
205 		}
206 		pid = __forkallx(flags);
207 		if (pid == 0) {		/* child */
208 			udp->pid = _private_getpid();
209 			self->ul_vfork = 0;
210 		}
211 		return (pid);
212 	}
213 
214 	sigoff(self);
215 	if (self->ul_fork) {
216 		sigon(self);
217 		errno = EDEADLK;
218 		return (-1);
219 	}
220 	self->ul_fork = 1;
221 
222 	fork_lock_enter();
223 	block_all_signals(self);
224 	suspend_fork();
225 
226 	pid = __forkallx(flags);
227 
228 	if (pid == 0) {
229 		self->ul_schedctl_called = NULL;
230 		self->ul_schedctl = NULL;
231 		self->ul_cursig = 0;
232 		self->ul_siginfo.si_signo = 0;
233 		udp->pid = _private_getpid();
234 		unregister_locks();
235 		continue_fork(1);
236 	} else {
237 		continue_fork(0);
238 	}
239 	restore_signals(self);
240 	fork_lock_exit();
241 	self->ul_fork = 0;
242 	sigon(self);
243 
244 	return (pid);
245 }
246 
247 #pragma weak forkall = _forkall
248 pid_t
249 _forkall(void)
250 {
251 	return (_private_forkallx(0));
252 }
253 
254 /*
255  * Hacks for system calls to provide cancellation
256  * and improve java garbage collection.
257  */
258 #define	PROLOGUE							\
259 {									\
260 	ulwp_t *self = curthread;					\
261 	int nocancel = (self->ul_vfork | self->ul_nocancel);		\
262 	if (nocancel == 0) {						\
263 		self->ul_save_async = self->ul_cancel_async;		\
264 		if (!self->ul_cancel_disabled) {			\
265 			self->ul_cancel_async = 1;			\
266 			if (self->ul_cancel_pending)			\
267 				_pthread_exit(PTHREAD_CANCELED);	\
268 		}							\
269 		self->ul_sp = stkptr();					\
270 	}
271 
272 #define	EPILOGUE							\
273 	if (nocancel == 0) {						\
274 		self->ul_sp = 0;					\
275 		self->ul_cancel_async = self->ul_save_async;		\
276 	}								\
277 }
278 
279 /*
280  * Perform the body of the action required by most of the cancelable
281  * function calls.  The return(function_call) part is to allow the
282  * compiler to make the call be executed with tail recursion, which
283  * saves a register window on sparc and slightly (not much) improves
284  * the code for x86/x64 compilations.
285  */
286 #define	PERFORM(function_call)						\
287 	PROLOGUE							\
288 	if (nocancel)							\
289 		return (function_call);					\
290 	rv = function_call;						\
291 	EPILOGUE							\
292 	return (rv);
293 
294 /*
295  * Specialized prologue for sigsuspend() and pollsys().
296  * These system calls pass a signal mask to the kernel.
297  * The kernel replaces the thread's signal mask with the
298  * temporary mask before the thread goes to sleep.  If
299  * a signal is received, the signal handler will execute
300  * with the temporary mask, as modified by the sigaction
301  * for the particular signal.
302  *
303  * We block all signals until we reach the kernel with the
304  * temporary mask.  This eliminates race conditions with
305  * setting the signal mask while signals are being posted.
306  */
307 #define	PROLOGUE_MASK(sigmask)						\
308 {									\
309 	ulwp_t *self = curthread;					\
310 	int nocancel = (self->ul_vfork | self->ul_nocancel);		\
311 	if (!self->ul_vfork) {						\
312 		if (sigmask) {						\
313 			block_all_signals(self);			\
314 			self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \
315 			self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \
316 			delete_reserved_signals(&self->ul_tmpmask);	\
317 			self->ul_sigsuspend = 1;			\
318 		}							\
319 		if (nocancel == 0) {					\
320 			self->ul_save_async = self->ul_cancel_async;	\
321 			if (!self->ul_cancel_disabled) {		\
322 				self->ul_cancel_async = 1;		\
323 				if (self->ul_cancel_pending) {		\
324 					if (self->ul_sigsuspend) {	\
325 						self->ul_sigsuspend = 0;\
326 						restore_signals(self);	\
327 					}				\
328 					_pthread_exit(PTHREAD_CANCELED);\
329 				}					\
330 			}						\
331 			self->ul_sp = stkptr();				\
332 		}							\
333 	}
334 
335 /*
336  * If a signal is taken, we return from the system call wrapper with
337  * our original signal mask restored (see code in call_user_handler()).
338  * If not (self->ul_sigsuspend is still non-zero), we must restore our
339  * original signal mask ourself.
340  */
341 #define	EPILOGUE_MASK							\
342 	if (nocancel == 0) {						\
343 		self->ul_sp = 0;					\
344 		self->ul_cancel_async = self->ul_save_async;		\
345 	}								\
346 	if (self->ul_sigsuspend) {					\
347 		self->ul_sigsuspend = 0;				\
348 		restore_signals(self);					\
349 	}								\
350 }
351 
352 /*
353  * Cancellation prologue and epilogue functions,
354  * for cancellation points too complex to include here.
355  */
356 void
357 _cancel_prologue(void)
358 {
359 	ulwp_t *self = curthread;
360 
361 	self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel);
362 	if (self->ul_cancel_prologue == 0) {
363 		self->ul_save_async = self->ul_cancel_async;
364 		if (!self->ul_cancel_disabled) {
365 			self->ul_cancel_async = 1;
366 			if (self->ul_cancel_pending)
367 				_pthread_exit(PTHREAD_CANCELED);
368 		}
369 		self->ul_sp = stkptr();
370 	}
371 }
372 
373 void
374 _cancel_epilogue(void)
375 {
376 	ulwp_t *self = curthread;
377 
378 	if (self->ul_cancel_prologue == 0) {
379 		self->ul_sp = 0;
380 		self->ul_cancel_async = self->ul_save_async;
381 	}
382 }
383 
384 /*
385  * Called from _thrp_join() (thr_join() is a cancellation point)
386  */
387 int
388 lwp_wait(thread_t tid, thread_t *found)
389 {
390 	int error;
391 
392 	PROLOGUE
393 	while ((error = __lwp_wait(tid, found)) == EINTR)
394 		;
395 	EPILOGUE
396 	return (error);
397 }
398 
399 ssize_t
400 read(int fd, void *buf, size_t size)
401 {
402 	extern ssize_t _read(int, void *, size_t);
403 	ssize_t rv;
404 
405 	PERFORM(_read(fd, buf, size))
406 }
407 
408 ssize_t
409 write(int fd, const void *buf, size_t size)
410 {
411 	extern ssize_t _write(int, const void *, size_t);
412 	ssize_t rv;
413 
414 	PERFORM(_write(fd, buf, size))
415 }
416 
417 int
418 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
419 	int *flagsp)
420 {
421 	extern int _getmsg(int, struct strbuf *, struct strbuf *, int *);
422 	int rv;
423 
424 	PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp))
425 }
426 
427 int
428 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
429 	int *bandp, int *flagsp)
430 {
431 	extern int _getpmsg(int, struct strbuf *, struct strbuf *,
432 	    int *, int *);
433 	int rv;
434 
435 	PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
436 }
437 
438 int
439 putmsg(int fd, const struct strbuf *ctlptr,
440 	const struct strbuf *dataptr, int flags)
441 {
442 	extern int _putmsg(int, const struct strbuf *,
443 	    const struct strbuf *, int);
444 	int rv;
445 
446 	PERFORM(_putmsg(fd, ctlptr, dataptr, flags))
447 }
448 
449 int
450 __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
451 	const struct strbuf *dataptr, int flags)
452 {
453 	extern int _putmsg(int, const struct strbuf *,
454 	    const struct strbuf *, int);
455 	int rv;
456 
457 	PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
458 }
459 
460 int
461 putpmsg(int fd, const struct strbuf *ctlptr,
462 	const struct strbuf *dataptr, int band, int flags)
463 {
464 	extern int _putpmsg(int, const struct strbuf *,
465 	    const struct strbuf *, int, int);
466 	int rv;
467 
468 	PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags))
469 }
470 
471 int
472 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
473 	const struct strbuf *dataptr, int band, int flags)
474 {
475 	extern int _putpmsg(int, const struct strbuf *,
476 	    const struct strbuf *, int, int);
477 	int rv;
478 
479 	PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
480 }
481 
482 #pragma weak nanosleep = _nanosleep
483 int
484 _nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
485 {
486 	int error;
487 
488 	PROLOGUE
489 	error = __nanosleep(rqtp, rmtp);
490 	EPILOGUE
491 	if (error) {
492 		errno = error;
493 		return (-1);
494 	}
495 	return (0);
496 }
497 
498 #pragma weak clock_nanosleep = _clock_nanosleep
499 int
500 _clock_nanosleep(clockid_t clock_id, int flags,
501 	const timespec_t *rqtp, timespec_t *rmtp)
502 {
503 	timespec_t reltime;
504 	hrtime_t start;
505 	hrtime_t rqlapse;
506 	hrtime_t lapse;
507 	int error;
508 
509 	switch (clock_id) {
510 	case CLOCK_VIRTUAL:
511 	case CLOCK_PROCESS_CPUTIME_ID:
512 	case CLOCK_THREAD_CPUTIME_ID:
513 		return (ENOTSUP);
514 	case CLOCK_REALTIME:
515 	case CLOCK_HIGHRES:
516 		break;
517 	default:
518 		return (EINVAL);
519 	}
520 	if (flags & TIMER_ABSTIME) {
521 		abstime_to_reltime(clock_id, rqtp, &reltime);
522 		rmtp = NULL;
523 	} else {
524 		reltime = *rqtp;
525 		if (clock_id == CLOCK_HIGHRES)
526 			start = gethrtime();
527 	}
528 restart:
529 	PROLOGUE
530 	error = __nanosleep(&reltime, rmtp);
531 	EPILOGUE
532 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
533 		/*
534 		 * Don't return yet if we didn't really get a timeout.
535 		 * This can happen if we return because someone resets
536 		 * the system clock.
537 		 */
538 		if (flags & TIMER_ABSTIME) {
539 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
540 			    rqtp->tv_nsec > gethrtime()) {
541 				abstime_to_reltime(clock_id, rqtp, &reltime);
542 				goto restart;
543 			}
544 		} else {
545 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
546 			    rqtp->tv_nsec;
547 			lapse = gethrtime() - start;
548 			if (rqlapse > lapse) {
549 				hrt2ts(rqlapse - lapse, &reltime);
550 				goto restart;
551 			}
552 		}
553 	}
554 	if (error == 0 && clock_id == CLOCK_REALTIME &&
555 	    (flags & TIMER_ABSTIME)) {
556 		/*
557 		 * Don't return yet just because someone reset the
558 		 * system clock.  Recompute the new relative time
559 		 * and reissue the nanosleep() call if necessary.
560 		 *
561 		 * Resetting the system clock causes all sorts of
562 		 * problems and the SUSV3 standards body should
563 		 * have made the behavior of clock_nanosleep() be
564 		 * implementation-defined in such a case rather than
565 		 * being specific about honoring the new system time.
566 		 * Standards bodies are filled with fools and idiots.
567 		 */
568 		abstime_to_reltime(clock_id, rqtp, &reltime);
569 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
570 			goto restart;
571 	}
572 	return (error);
573 }
574 
575 #pragma weak sleep = _sleep
576 unsigned int
577 _sleep(unsigned int sec)
578 {
579 	unsigned int rem = 0;
580 	int error;
581 	timespec_t ts;
582 	timespec_t tsr;
583 
584 	ts.tv_sec = (time_t)sec;
585 	ts.tv_nsec = 0;
586 	PROLOGUE
587 	error = __nanosleep(&ts, &tsr);
588 	EPILOGUE
589 	if (error == EINTR) {
590 		rem = (unsigned int)tsr.tv_sec;
591 		if (tsr.tv_nsec >= NANOSEC / 2)
592 			rem++;
593 	}
594 	return (rem);
595 }
596 
597 #pragma weak usleep = _usleep
598 int
599 _usleep(useconds_t usec)
600 {
601 	timespec_t ts;
602 
603 	ts.tv_sec = usec / MICROSEC;
604 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
605 	PROLOGUE
606 	(void) __nanosleep(&ts, NULL);
607 	EPILOGUE
608 	return (0);
609 }
610 
611 int
612 close(int fildes)
613 {
614 	extern void _aio_close(int);
615 	extern int _close(int);
616 	int rv;
617 
618 	_aio_close(fildes);
619 	PERFORM(_close(fildes))
620 }
621 
622 int
623 creat(const char *path, mode_t mode)
624 {
625 	extern int _creat(const char *, mode_t);
626 	int rv;
627 
628 	PERFORM(_creat(path, mode))
629 }
630 
631 #if !defined(_LP64)
632 int
633 creat64(const char *path, mode_t mode)
634 {
635 	extern int _creat64(const char *, mode_t);
636 	int rv;
637 
638 	PERFORM(_creat64(path, mode))
639 }
640 #endif	/* !_LP64 */
641 
642 int
643 fcntl(int fildes, int cmd, ...)
644 {
645 	extern int _fcntl(int, int, ...);
646 	intptr_t arg;
647 	int rv;
648 	va_list ap;
649 
650 	va_start(ap, cmd);
651 	arg = va_arg(ap, intptr_t);
652 	va_end(ap);
653 	if (cmd != F_SETLKW)
654 		return (_fcntl(fildes, cmd, arg));
655 	PERFORM(_fcntl(fildes, cmd, arg))
656 }
657 
658 int
659 fdatasync(int fildes)
660 {
661 	extern int _fdatasync(int);
662 	int rv;
663 
664 	PERFORM(_fdatasync(fildes))
665 }
666 
667 int
668 fsync(int fildes)
669 {
670 	extern int _fsync(int);
671 	int rv;
672 
673 	PERFORM(_fsync(fildes))
674 }
675 
676 int
677 lockf(int fildes, int function, off_t size)
678 {
679 	extern int _lockf(int, int, off_t);
680 	int rv;
681 
682 	PERFORM(_lockf(fildes, function, size))
683 }
684 
685 #if !defined(_LP64)
686 int
687 lockf64(int fildes, int function, off64_t size)
688 {
689 	extern int _lockf64(int, int, off64_t);
690 	int rv;
691 
692 	PERFORM(_lockf64(fildes, function, size))
693 }
694 #endif	/* !_LP64 */
695 
696 ssize_t
697 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
698 {
699 	extern ssize_t _msgrcv(int, void *, size_t, long, int);
700 	ssize_t rv;
701 
702 	PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
703 }
704 
705 int
706 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
707 {
708 	extern int _msgsnd(int, const void *, size_t, int);
709 	int rv;
710 
711 	PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg))
712 }
713 
714 int
715 msync(caddr_t addr, size_t len, int flags)
716 {
717 	extern int _msync(caddr_t, size_t, int);
718 	int rv;
719 
720 	PERFORM(_msync(addr, len, flags))
721 }
722 
723 int
724 open(const char *path, int oflag, ...)
725 {
726 	extern int _open(const char *, int, ...);
727 	mode_t mode;
728 	int rv;
729 	va_list ap;
730 
731 	va_start(ap, oflag);
732 	mode = va_arg(ap, mode_t);
733 	va_end(ap);
734 	PERFORM(_open(path, oflag, mode))
735 }
736 
737 #if !defined(_LP64)
738 int
739 open64(const char *path, int oflag, ...)
740 {
741 	extern int _open64(const char *, int, ...);
742 	mode_t mode;
743 	int rv;
744 	va_list ap;
745 
746 	va_start(ap, oflag);
747 	mode = va_arg(ap, mode_t);
748 	va_end(ap);
749 	PERFORM(_open64(path, oflag, mode))
750 }
751 #endif	/* !_LP64 */
752 
753 int
754 pause(void)
755 {
756 	extern int _pause(void);
757 	int rv;
758 
759 	PERFORM(_pause())
760 }
761 
762 ssize_t
763 pread(int fildes, void *buf, size_t nbyte, off_t offset)
764 {
765 	extern ssize_t _pread(int, void *, size_t, off_t);
766 	ssize_t rv;
767 
768 	PERFORM(_pread(fildes, buf, nbyte, offset))
769 }
770 
771 #if !defined(_LP64)
772 ssize_t
773 pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
774 {
775 	extern ssize_t _pread64(int, void *, size_t, off64_t);
776 	ssize_t rv;
777 
778 	PERFORM(_pread64(fildes, buf, nbyte, offset))
779 }
780 #endif	/* !_LP64 */
781 
782 ssize_t
783 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
784 {
785 	extern ssize_t _pwrite(int, const void *, size_t, off_t);
786 	ssize_t rv;
787 
788 	PERFORM(_pwrite(fildes, buf, nbyte, offset))
789 }
790 
791 #if !defined(_LP64)
792 ssize_t
793 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
794 {
795 	extern ssize_t _pwrite64(int, const void *, size_t, off64_t);
796 	ssize_t rv;
797 
798 	PERFORM(_pwrite64(fildes, buf, nbyte, offset))
799 }
800 #endif	/* !_LP64 */
801 
802 ssize_t
803 readv(int fildes, const struct iovec *iov, int iovcnt)
804 {
805 	extern ssize_t _readv(int, const struct iovec *, int);
806 	ssize_t rv;
807 
808 	PERFORM(_readv(fildes, iov, iovcnt))
809 }
810 
811 int
812 sigpause(int sig)
813 {
814 	extern int _sigpause(int);
815 	int rv;
816 
817 	PERFORM(_sigpause(sig))
818 }
819 
820 #pragma weak sigsuspend = _sigsuspend
821 int
822 _sigsuspend(const sigset_t *set)
823 {
824 	extern int __sigsuspend(const sigset_t *);
825 	int rv;
826 
827 	PROLOGUE_MASK(set)
828 	rv = __sigsuspend(set);
829 	EPILOGUE_MASK
830 	return (rv);
831 }
832 
833 int
834 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
835 	const sigset_t *sigmask)
836 {
837 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
838 	    const sigset_t *);
839 	int rv;
840 
841 	PROLOGUE_MASK(sigmask)
842 	rv = __pollsys(fds, nfd, timeout, sigmask);
843 	EPILOGUE_MASK
844 	return (rv);
845 }
846 
847 #pragma weak sigtimedwait = _sigtimedwait
848 int
849 _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
850 {
851 	extern int __sigtimedwait(const sigset_t *, siginfo_t *,
852 	    const timespec_t *);
853 	siginfo_t info;
854 	int sig;
855 
856 	PROLOGUE
857 	sig = __sigtimedwait(set, &info, timeout);
858 	if (sig == SIGCANCEL &&
859 	    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
860 		do_sigcancel();
861 		errno = EINTR;
862 		sig = -1;
863 	}
864 	EPILOGUE
865 	if (sig != -1 && infop)
866 		(void) _private_memcpy(infop, &info, sizeof (*infop));
867 	return (sig);
868 }
869 
870 #pragma weak sigwait = _sigwait
871 int
872 _sigwait(sigset_t *set)
873 {
874 	return (_sigtimedwait(set, NULL, NULL));
875 }
876 
877 #pragma weak sigwaitinfo = _sigwaitinfo
878 int
879 _sigwaitinfo(const sigset_t *set, siginfo_t *info)
880 {
881 	return (_sigtimedwait(set, info, NULL));
882 }
883 
884 #pragma weak sigqueue = _sigqueue
885 int
886 _sigqueue(pid_t pid, int signo, const union sigval value)
887 {
888 	extern int __sigqueue(pid_t pid, int signo,
889 	    /* const union sigval */ void *value, int si_code, int block);
890 	return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
891 }
892 
893 int
894 tcdrain(int fildes)
895 {
896 	extern int _tcdrain(int);
897 	int rv;
898 
899 	PERFORM(_tcdrain(fildes))
900 }
901 
902 pid_t
903 wait(int *stat_loc)
904 {
905 	extern pid_t _wait(int *);
906 	pid_t rv;
907 
908 	PERFORM(_wait(stat_loc))
909 }
910 
911 pid_t
912 wait3(int *statusp, int options, struct rusage *rusage)
913 {
914 	extern pid_t _wait3(int *, int, struct rusage *);
915 	pid_t rv;
916 
917 	PERFORM(_wait3(statusp, options, rusage))
918 }
919 
920 int
921 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
922 {
923 	extern int _waitid(idtype_t, id_t, siginfo_t *, int);
924 	int rv;
925 
926 	PERFORM(_waitid(idtype, id, infop, options))
927 }
928 
929 /*
930  * waitpid_cancel() is a libc-private symbol for internal use
931  * where cancellation semantics is desired (see system()).
932  */
933 #pragma weak waitpid_cancel = waitpid
934 pid_t
935 waitpid(pid_t pid, int *stat_loc, int options)
936 {
937 	extern pid_t _waitpid(pid_t, int *, int);
938 	pid_t rv;
939 
940 	PERFORM(_waitpid(pid, stat_loc, options))
941 }
942 
943 ssize_t
944 writev(int fildes, const struct iovec *iov, int iovcnt)
945 {
946 	extern ssize_t _writev(int, const struct iovec *, int);
947 	ssize_t rv;
948 
949 	PERFORM(_writev(fildes, iov, iovcnt))
950 }
951