xref: /illumos-gate/usr/src/lib/libc/port/threads/cancel.c (revision 4a38094c)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5a574db85Sraf  * Common Development and Distribution License (the "License").
6a574db85Sraf  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
21a574db85Sraf 
227c478bd9Sstevel@tonic-gate /*
235ad42b1bSSurya Prakki  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate #include "lint.h"
287c478bd9Sstevel@tonic-gate #include "thr_uberdata.h"
297c478bd9Sstevel@tonic-gate 
307c478bd9Sstevel@tonic-gate /*
317c478bd9Sstevel@tonic-gate  * pthread_cancel: tries to cancel the targeted thread.
327c478bd9Sstevel@tonic-gate  * If the target thread has already exited no action is taken.
337c478bd9Sstevel@tonic-gate  * Else send SIGCANCEL to request the other thread to cancel itself.
347c478bd9Sstevel@tonic-gate  */
357c478bd9Sstevel@tonic-gate int
pthread_cancel(thread_t tid)367257d1b4Sraf pthread_cancel(thread_t tid)
377c478bd9Sstevel@tonic-gate {
387c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
397c478bd9Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
407c478bd9Sstevel@tonic-gate 	ulwp_t *ulwp;
417c478bd9Sstevel@tonic-gate 	int error = 0;
427c478bd9Sstevel@tonic-gate 
437c478bd9Sstevel@tonic-gate 	if ((ulwp = find_lwp(tid)) == NULL)
447c478bd9Sstevel@tonic-gate 		return (ESRCH);
457c478bd9Sstevel@tonic-gate 
467c478bd9Sstevel@tonic-gate 	if (ulwp->ul_cancel_pending) {
477c478bd9Sstevel@tonic-gate 		/*
487c478bd9Sstevel@tonic-gate 		 * Don't send SIGCANCEL more than once.
497c478bd9Sstevel@tonic-gate 		 */
507c478bd9Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
517c478bd9Sstevel@tonic-gate 	} else if (ulwp == self) {
527c478bd9Sstevel@tonic-gate 		/*
537c478bd9Sstevel@tonic-gate 		 * Unlock self before cancelling.
547c478bd9Sstevel@tonic-gate 		 */
55a574db85Sraf 		ulwp_unlock(self, udp);
56a574db85Sraf 		self->ul_nocancel = 0;	/* cancellation is now possible */
57a574db85Sraf 		if (self->ul_sigdefer == 0)
587c478bd9Sstevel@tonic-gate 			do_sigcancel();
59a574db85Sraf 		else {
60a574db85Sraf 			self->ul_cancel_pending = 1;
61a574db85Sraf 			set_cancel_pending_flag(self, 0);
62a574db85Sraf 		}
637c478bd9Sstevel@tonic-gate 	} else if (ulwp->ul_cancel_disabled) {
647c478bd9Sstevel@tonic-gate 		/*
657c478bd9Sstevel@tonic-gate 		 * Don't send SIGCANCEL if cancellation is disabled;
667c478bd9Sstevel@tonic-gate 		 * just set the thread's ulwp->ul_cancel_pending flag.
677c478bd9Sstevel@tonic-gate 		 * This avoids a potential EINTR for the target thread.
68a574db85Sraf 		 * We don't call set_cancel_pending_flag() here because
69a574db85Sraf 		 * we cannot modify another thread's schedctl data.
707c478bd9Sstevel@tonic-gate 		 */
717c478bd9Sstevel@tonic-gate 		ulwp->ul_cancel_pending = 1;
727c478bd9Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
737c478bd9Sstevel@tonic-gate 	} else {
747c478bd9Sstevel@tonic-gate 		/*
757c478bd9Sstevel@tonic-gate 		 * Request the other thread to cancel itself.
767c478bd9Sstevel@tonic-gate 		 */
777257d1b4Sraf 		error = _lwp_kill(tid, SIGCANCEL);
787c478bd9Sstevel@tonic-gate 		ulwp_unlock(ulwp, udp);
797c478bd9Sstevel@tonic-gate 	}
807c478bd9Sstevel@tonic-gate 
817c478bd9Sstevel@tonic-gate 	return (error);
827c478bd9Sstevel@tonic-gate }
837c478bd9Sstevel@tonic-gate 
847c478bd9Sstevel@tonic-gate /*
85a574db85Sraf  * pthread_setcancelstate: sets the state ENABLED or DISABLED.
86a574db85Sraf  * If the state is already ENABLED or is being set to ENABLED,
87a574db85Sraf  * the type of cancellation is ASYNCHRONOUS, and a cancel request
88a574db85Sraf  * is pending, then the thread is cancelled right here.
89a574db85Sraf  * Otherwise, pthread_setcancelstate() is not a cancellation point.
907c478bd9Sstevel@tonic-gate  */
917c478bd9Sstevel@tonic-gate int
pthread_setcancelstate(int state,int * oldstate)927257d1b4Sraf pthread_setcancelstate(int state, int *oldstate)
937c478bd9Sstevel@tonic-gate {
947c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
957c478bd9Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
967c478bd9Sstevel@tonic-gate 	int was_disabled;
977c478bd9Sstevel@tonic-gate 
987c478bd9Sstevel@tonic-gate 	/*
997c478bd9Sstevel@tonic-gate 	 * Grab ulwp_lock(self) to protect the setting of ul_cancel_disabled
1007c478bd9Sstevel@tonic-gate 	 * since it is tested under this lock by pthread_cancel(), above.
1017c478bd9Sstevel@tonic-gate 	 * This has the side-effect of calling enter_critical() and this
1027c478bd9Sstevel@tonic-gate 	 * defers SIGCANCEL until ulwp_unlock(self) when exit_critical()
1037c478bd9Sstevel@tonic-gate 	 * is called.  (self->ul_cancel_pending is set in the SIGCANCEL
1047c478bd9Sstevel@tonic-gate 	 * handler and we must be async-signal safe here.)
1057c478bd9Sstevel@tonic-gate 	 */
1067c478bd9Sstevel@tonic-gate 	ulwp_lock(self, udp);
1077c478bd9Sstevel@tonic-gate 
1087c478bd9Sstevel@tonic-gate 	was_disabled = self->ul_cancel_disabled;
1097c478bd9Sstevel@tonic-gate 	switch (state) {
1107c478bd9Sstevel@tonic-gate 	case PTHREAD_CANCEL_ENABLE:
1117c478bd9Sstevel@tonic-gate 		self->ul_cancel_disabled = 0;
1127c478bd9Sstevel@tonic-gate 		break;
1137c478bd9Sstevel@tonic-gate 	case PTHREAD_CANCEL_DISABLE:
1147c478bd9Sstevel@tonic-gate 		self->ul_cancel_disabled = 1;
1157c478bd9Sstevel@tonic-gate 		break;
1167c478bd9Sstevel@tonic-gate 	default:
1177c478bd9Sstevel@tonic-gate 		ulwp_unlock(self, udp);
1187c478bd9Sstevel@tonic-gate 		return (EINVAL);
1197c478bd9Sstevel@tonic-gate 	}
120a574db85Sraf 	set_cancel_pending_flag(self, 0);
1217c478bd9Sstevel@tonic-gate 
1227c478bd9Sstevel@tonic-gate 	/*
1237c478bd9Sstevel@tonic-gate 	 * If this thread has been requested to be canceled and
1247c478bd9Sstevel@tonic-gate 	 * is in async mode and is or was enabled, then exit.
1257c478bd9Sstevel@tonic-gate 	 */
1267c478bd9Sstevel@tonic-gate 	if ((!self->ul_cancel_disabled || !was_disabled) &&
1277c478bd9Sstevel@tonic-gate 	    self->ul_cancel_async && self->ul_cancel_pending) {
1287c478bd9Sstevel@tonic-gate 		ulwp_unlock(self, udp);
1297257d1b4Sraf 		pthread_exit(PTHREAD_CANCELED);
1307c478bd9Sstevel@tonic-gate 	}
1317c478bd9Sstevel@tonic-gate 
1327c478bd9Sstevel@tonic-gate 	ulwp_unlock(self, udp);
1337c478bd9Sstevel@tonic-gate 
1347c478bd9Sstevel@tonic-gate 	if (oldstate != NULL) {
1357c478bd9Sstevel@tonic-gate 		if (was_disabled)
1367c478bd9Sstevel@tonic-gate 			*oldstate = PTHREAD_CANCEL_DISABLE;
1377c478bd9Sstevel@tonic-gate 		else
1387c478bd9Sstevel@tonic-gate 			*oldstate = PTHREAD_CANCEL_ENABLE;
1397c478bd9Sstevel@tonic-gate 	}
1407c478bd9Sstevel@tonic-gate 	return (0);
1417c478bd9Sstevel@tonic-gate }
1427c478bd9Sstevel@tonic-gate 
1437c478bd9Sstevel@tonic-gate /*
1447c478bd9Sstevel@tonic-gate  * pthread_setcanceltype: sets the type DEFERRED or ASYNCHRONOUS
1457c478bd9Sstevel@tonic-gate  * If the type is being set as ASYNC, then it becomes
1467c478bd9Sstevel@tonic-gate  * a cancellation point if there is a cancellation pending.
1477c478bd9Sstevel@tonic-gate  */
1487c478bd9Sstevel@tonic-gate int
pthread_setcanceltype(int type,int * oldtype)1497257d1b4Sraf pthread_setcanceltype(int type, int *oldtype)
1507c478bd9Sstevel@tonic-gate {
1517c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
1527c478bd9Sstevel@tonic-gate 	int was_async;
1537c478bd9Sstevel@tonic-gate 
1547c478bd9Sstevel@tonic-gate 	/*
1557c478bd9Sstevel@tonic-gate 	 * Call enter_critical() to defer SIGCANCEL until exit_critical().
1567c478bd9Sstevel@tonic-gate 	 * We do this because curthread->ul_cancel_pending is set in the
1577c478bd9Sstevel@tonic-gate 	 * SIGCANCEL handler and we must be async-signal safe here.
1587c478bd9Sstevel@tonic-gate 	 */
1597c478bd9Sstevel@tonic-gate 	enter_critical(self);
1607c478bd9Sstevel@tonic-gate 
1617c478bd9Sstevel@tonic-gate 	was_async = self->ul_cancel_async;
1627c478bd9Sstevel@tonic-gate 	switch (type) {
1637c478bd9Sstevel@tonic-gate 	case PTHREAD_CANCEL_ASYNCHRONOUS:
1647c478bd9Sstevel@tonic-gate 		self->ul_cancel_async = 1;
1657c478bd9Sstevel@tonic-gate 		break;
1667c478bd9Sstevel@tonic-gate 	case PTHREAD_CANCEL_DEFERRED:
1677c478bd9Sstevel@tonic-gate 		self->ul_cancel_async = 0;
1687c478bd9Sstevel@tonic-gate 		break;
1697c478bd9Sstevel@tonic-gate 	default:
1707c478bd9Sstevel@tonic-gate 		exit_critical(self);
1717c478bd9Sstevel@tonic-gate 		return (EINVAL);
1727c478bd9Sstevel@tonic-gate 	}
1737c478bd9Sstevel@tonic-gate 	self->ul_save_async = self->ul_cancel_async;
1747c478bd9Sstevel@tonic-gate 
1757c478bd9Sstevel@tonic-gate 	/*
1767c478bd9Sstevel@tonic-gate 	 * If this thread has been requested to be canceled and
1777c478bd9Sstevel@tonic-gate 	 * is in enabled mode and is or was in async mode, exit.
1787c478bd9Sstevel@tonic-gate 	 */
1797c478bd9Sstevel@tonic-gate 	if ((self->ul_cancel_async || was_async) &&
1807c478bd9Sstevel@tonic-gate 	    self->ul_cancel_pending && !self->ul_cancel_disabled) {
1817c478bd9Sstevel@tonic-gate 		exit_critical(self);
1827257d1b4Sraf 		pthread_exit(PTHREAD_CANCELED);
1837c478bd9Sstevel@tonic-gate 	}
1847c478bd9Sstevel@tonic-gate 
1857c478bd9Sstevel@tonic-gate 	exit_critical(self);
1867c478bd9Sstevel@tonic-gate 
1877c478bd9Sstevel@tonic-gate 	if (oldtype != NULL) {
1887c478bd9Sstevel@tonic-gate 		if (was_async)
1897c478bd9Sstevel@tonic-gate 			*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
1907c478bd9Sstevel@tonic-gate 		else
1917c478bd9Sstevel@tonic-gate 			*oldtype = PTHREAD_CANCEL_DEFERRED;
1927c478bd9Sstevel@tonic-gate 	}
1937c478bd9Sstevel@tonic-gate 	return (0);
1947c478bd9Sstevel@tonic-gate }
1957c478bd9Sstevel@tonic-gate 
1967c478bd9Sstevel@tonic-gate /*
1977c478bd9Sstevel@tonic-gate  * pthread_testcancel: tests for any cancellation pending
1987c478bd9Sstevel@tonic-gate  * if the cancellation is enabled and is pending, act on
1997c478bd9Sstevel@tonic-gate  * it by calling thr_exit. thr_exit takes care of calling
2007c478bd9Sstevel@tonic-gate  * cleanup handlers.
2017c478bd9Sstevel@tonic-gate  */
2027c478bd9Sstevel@tonic-gate void
pthread_testcancel(void)2037257d1b4Sraf pthread_testcancel(void)
2047c478bd9Sstevel@tonic-gate {
2057c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
2067c478bd9Sstevel@tonic-gate 
2077c478bd9Sstevel@tonic-gate 	if (self->ul_cancel_pending && !self->ul_cancel_disabled)
2087257d1b4Sraf 		pthread_exit(PTHREAD_CANCELED);
2097c478bd9Sstevel@tonic-gate }
2107c478bd9Sstevel@tonic-gate 
2117c478bd9Sstevel@tonic-gate /*
2127c478bd9Sstevel@tonic-gate  * For deferred mode, this routine makes a thread cancelable.
2137c478bd9Sstevel@tonic-gate  * It is called from the functions which want to be cancellation
2147c478bd9Sstevel@tonic-gate  * points and are about to block, such as cond_wait().
2157c478bd9Sstevel@tonic-gate  */
2167c478bd9Sstevel@tonic-gate void
_cancelon()2177c478bd9Sstevel@tonic-gate _cancelon()
2187c478bd9Sstevel@tonic-gate {
2197c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
2207c478bd9Sstevel@tonic-gate 
2217c478bd9Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2227c478bd9Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2237c478bd9Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2247c478bd9Sstevel@tonic-gate 		self->ul_cancelable++;
2257c478bd9Sstevel@tonic-gate 		if (self->ul_cancel_pending)
2267257d1b4Sraf 			pthread_exit(PTHREAD_CANCELED);
2277c478bd9Sstevel@tonic-gate 	}
2287c478bd9Sstevel@tonic-gate }
2297c478bd9Sstevel@tonic-gate 
2307c478bd9Sstevel@tonic-gate /*
2317c478bd9Sstevel@tonic-gate  * This routine turns cancelability off and possible calls pthread_exit().
2327c478bd9Sstevel@tonic-gate  * It is called from functions which are cancellation points, like cond_wait().
2337c478bd9Sstevel@tonic-gate  */
2347c478bd9Sstevel@tonic-gate void
_canceloff()2357c478bd9Sstevel@tonic-gate _canceloff()
2367c478bd9Sstevel@tonic-gate {
2377c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
2387c478bd9Sstevel@tonic-gate 
2397c478bd9Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2407c478bd9Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2417c478bd9Sstevel@tonic-gate 		if (self->ul_cancel_pending)
2427257d1b4Sraf 			pthread_exit(PTHREAD_CANCELED);
2437c478bd9Sstevel@tonic-gate 		self->ul_cancelable--;
2447c478bd9Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2457c478bd9Sstevel@tonic-gate 	}
2467c478bd9Sstevel@tonic-gate }
2477c478bd9Sstevel@tonic-gate 
2487c478bd9Sstevel@tonic-gate /*
2497c478bd9Sstevel@tonic-gate  * Same as _canceloff() but don't actually cancel the thread.
2507c478bd9Sstevel@tonic-gate  * This is used by cond_wait() and sema_wait() when they don't get EINTR.
2517c478bd9Sstevel@tonic-gate  */
2527c478bd9Sstevel@tonic-gate void
_canceloff_nocancel()2537c478bd9Sstevel@tonic-gate _canceloff_nocancel()
2547c478bd9Sstevel@tonic-gate {
2557c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
2567c478bd9Sstevel@tonic-gate 
2577c478bd9Sstevel@tonic-gate 	ASSERT(!(self->ul_cancelable && self->ul_cancel_disabled));
2587c478bd9Sstevel@tonic-gate 	if (!self->ul_cancel_disabled) {
2597c478bd9Sstevel@tonic-gate 		self->ul_cancelable--;
2607c478bd9Sstevel@tonic-gate 		ASSERT(self->ul_cancelable >= 0);
2617c478bd9Sstevel@tonic-gate 	}
2627c478bd9Sstevel@tonic-gate }
2637c478bd9Sstevel@tonic-gate 
2647c478bd9Sstevel@tonic-gate /*
2657c478bd9Sstevel@tonic-gate  * __pthread_cleanup_push: called by macro in pthread.h which defines
2667c478bd9Sstevel@tonic-gate  * POSIX.1c pthread_cleanup_push(). Macro in pthread.h allocates the
2677c478bd9Sstevel@tonic-gate  * cleanup struct and calls this routine to push the handler off the
2687c478bd9Sstevel@tonic-gate  * curthread's struct.
2697c478bd9Sstevel@tonic-gate  */
2707c478bd9Sstevel@tonic-gate void
__pthread_cleanup_push(void (* routine)(void *),void * args,caddr_t fp,_cleanup_t * clnup_info)2717c478bd9Sstevel@tonic-gate __pthread_cleanup_push(void (*routine)(void *),
272*4a38094cSToomas Soome     void *args, caddr_t fp, _cleanup_t *clnup_info)
2737c478bd9Sstevel@tonic-gate {
2747c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
2757c478bd9Sstevel@tonic-gate 	__cleanup_t *infop = (__cleanup_t *)clnup_info;
2767c478bd9Sstevel@tonic-gate 
2777c478bd9Sstevel@tonic-gate 	infop->func = routine;
2787c478bd9Sstevel@tonic-gate 	infop->arg = args;
2797c478bd9Sstevel@tonic-gate 	infop->fp = fp;
2807c478bd9Sstevel@tonic-gate 	infop->next = self->ul_clnup_hdr;
2817c478bd9Sstevel@tonic-gate 	self->ul_clnup_hdr = infop;
2827c478bd9Sstevel@tonic-gate }
2837c478bd9Sstevel@tonic-gate 
2847c478bd9Sstevel@tonic-gate /*
2857c478bd9Sstevel@tonic-gate  * __pthread_cleanup_pop: called by macro in pthread.h which defines
2867c478bd9Sstevel@tonic-gate  * POSIX.1c pthread_cleanup_pop(). It calls this routine to pop the
2877c478bd9Sstevel@tonic-gate  * handler off the curthread's struct and execute it if necessary.
2887c478bd9Sstevel@tonic-gate  */
2897c478bd9Sstevel@tonic-gate void
__pthread_cleanup_pop(int ex,_cleanup_t * clnup_info __unused)290*4a38094cSToomas Soome __pthread_cleanup_pop(int ex, _cleanup_t *clnup_info __unused)
2917c478bd9Sstevel@tonic-gate {
2927c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
2937c478bd9Sstevel@tonic-gate 	__cleanup_t *infop = self->ul_clnup_hdr;
2947c478bd9Sstevel@tonic-gate 
2957c478bd9Sstevel@tonic-gate 	self->ul_clnup_hdr = infop->next;
2967c478bd9Sstevel@tonic-gate 	if (ex)
2977c478bd9Sstevel@tonic-gate 		(*infop->func)(infop->arg);
2987c478bd9Sstevel@tonic-gate }
299a574db85Sraf 
300a574db85Sraf /*
301a574db85Sraf  * Called when either self->ul_cancel_disabled or self->ul_cancel_pending
302a574db85Sraf  * is modified.  Setting SC_CANCEL_FLG informs the kernel that we have
303a574db85Sraf  * a pending cancellation and we do not have cancellation disabled.
304a574db85Sraf  * In this situation, we will not go to sleep on any system call but
305a574db85Sraf  * will instead return EINTR immediately on any attempt to sleep,
306a574db85Sraf  * with SC_EINTR_FLG set in sc_flgs.  Clearing SC_CANCEL_FLG rescinds
307a574db85Sraf  * this condition, but SC_EINTR_FLG never goes away until the thread
308a574db85Sraf  * terminates (indicated by clear_flags != 0).
309a574db85Sraf  */
310a574db85Sraf void
set_cancel_pending_flag(ulwp_t * self,int clear_flags)311a574db85Sraf set_cancel_pending_flag(ulwp_t *self, int clear_flags)
312a574db85Sraf {
313a574db85Sraf 	volatile sc_shared_t *scp;
314a574db85Sraf 
315a574db85Sraf 	if (self->ul_vfork | self->ul_nocancel)
316a574db85Sraf 		return;
317a574db85Sraf 	enter_critical(self);
318a574db85Sraf 	if ((scp = self->ul_schedctl) != NULL ||
319a574db85Sraf 	    (scp = setup_schedctl()) != NULL) {
320a574db85Sraf 		if (clear_flags)
321a574db85Sraf 			scp->sc_flgs &= ~(SC_CANCEL_FLG | SC_EINTR_FLG);
322a574db85Sraf 		else if (self->ul_cancel_pending && !self->ul_cancel_disabled)
323a574db85Sraf 			scp->sc_flgs |= SC_CANCEL_FLG;
324a574db85Sraf 		else
325a574db85Sraf 			scp->sc_flgs &= ~SC_CANCEL_FLG;
326a574db85Sraf 	}
327a574db85Sraf 	exit_critical(self);
328a574db85Sraf }
329a574db85Sraf 
330a574db85Sraf /*
331a574db85Sraf  * Called from the PROLOGUE macro in scalls.c to inform subsequent
332a574db85Sraf  * code that a cancellation point has been called and that the
333a574db85Sraf  * current thread should cancel itself as soon as all of its locks
334a574db85Sraf  * have been dropped (see safe_mutex_unlock()).
335a574db85Sraf  */
336a574db85Sraf void
set_cancel_eintr_flag(ulwp_t * self)337a574db85Sraf set_cancel_eintr_flag(ulwp_t *self)
338a574db85Sraf {
339a574db85Sraf 	volatile sc_shared_t *scp;
340a574db85Sraf 
341a574db85Sraf 	if (self->ul_vfork | self->ul_nocancel)
342a574db85Sraf 		return;
343a574db85Sraf 	enter_critical(self);
344a574db85Sraf 	if ((scp = self->ul_schedctl) != NULL ||
345a574db85Sraf 	    (scp = setup_schedctl()) != NULL)
346a574db85Sraf 		scp->sc_flgs |= SC_EINTR_FLG;
347a574db85Sraf 	exit_critical(self);
348a574db85Sraf }
349a574db85Sraf 
350a574db85Sraf /*
351a574db85Sraf  * Calling set_parking_flag(curthread, 1) informs the kernel that we are
352a574db85Sraf  * calling __lwp_park or ___lwp_cond_wait().  If we take a signal in
353a574db85Sraf  * the unprotected (from signals) interval before reaching the kernel,
354a574db85Sraf  * sigacthandler() will call set_parking_flag(curthread, 0) to inform
355a574db85Sraf  * the kernel to return immediately from these system calls, giving us
356a574db85Sraf  * a spurious wakeup but not a deadlock.
357a574db85Sraf  */
358a574db85Sraf void
set_parking_flag(ulwp_t * self,int park)359a574db85Sraf set_parking_flag(ulwp_t *self, int park)
360a574db85Sraf {
361a574db85Sraf 	volatile sc_shared_t *scp;
362a574db85Sraf 
363a574db85Sraf 	enter_critical(self);
364a574db85Sraf 	if ((scp = self->ul_schedctl) != NULL ||
365a574db85Sraf 	    (scp = setup_schedctl()) != NULL) {
366a574db85Sraf 		if (park) {
367a574db85Sraf 			scp->sc_flgs |= SC_PARK_FLG;
368a574db85Sraf 			/*
369a574db85Sraf 			 * We are parking; allow the __lwp_park() call to
370a574db85Sraf 			 * block even if we have a pending cancellation.
371a574db85Sraf 			 */
372a574db85Sraf 			scp->sc_flgs &= ~SC_CANCEL_FLG;
373a574db85Sraf 		} else {
374a574db85Sraf 			scp->sc_flgs &= ~(SC_PARK_FLG | SC_CANCEL_FLG);
375a574db85Sraf 			/*
376a574db85Sraf 			 * We are no longer parking; restore the
377a574db85Sraf 			 * pending cancellation flag if necessary.
378a574db85Sraf 			 */
379a574db85Sraf 			if (self->ul_cancel_pending &&
380a574db85Sraf 			    !self->ul_cancel_disabled)
381a574db85Sraf 				scp->sc_flgs |= SC_CANCEL_FLG;
382a574db85Sraf 		}
383a574db85Sraf 	} else if (park == 0) {	/* schedctl failed, do it the long way */
3845ad42b1bSSurya Prakki 		(void) __lwp_unpark(self->ul_lwpid);
385a574db85Sraf 	}
386a574db85Sraf 	exit_critical(self);
387a574db85Sraf }
388a574db85Sraf 
389a574db85Sraf /*
390a574db85Sraf  * Test if the current thread is due to exit because of cancellation.
391a574db85Sraf  */
392a574db85Sraf int
cancel_active(void)393a574db85Sraf cancel_active(void)
394a574db85Sraf {
395a574db85Sraf 	ulwp_t *self = curthread;
396a574db85Sraf 	volatile sc_shared_t *scp;
397a574db85Sraf 	int exit_soon;
398a574db85Sraf 
399a574db85Sraf 	/*
400a574db85Sraf 	 * If there is a pending cancellation and cancellation
401a574db85Sraf 	 * is not disabled (SC_CANCEL_FLG) and we received
402a574db85Sraf 	 * EINTR from a recent system call (SC_EINTR_FLG),
403a574db85Sraf 	 * then we will soon be exiting.
404a574db85Sraf 	 */
405a574db85Sraf 	enter_critical(self);
406a574db85Sraf 	exit_soon =
407a574db85Sraf 	    (((scp = self->ul_schedctl) != NULL ||
408a574db85Sraf 	    (scp = setup_schedctl()) != NULL) &&
409a574db85Sraf 	    (scp->sc_flgs & (SC_CANCEL_FLG | SC_EINTR_FLG)) ==
410a574db85Sraf 	    (SC_CANCEL_FLG | SC_EINTR_FLG));
411a574db85Sraf 	exit_critical(self);
412a574db85Sraf 
413a574db85Sraf 	return (exit_soon);
414a574db85Sraf }
415