17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
57257d1b4Sraf * Common Development and Distribution License (the "License").
67257d1b4Sraf * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217257d1b4Sraf
227c478bd9Sstevel@tonic-gate /*
2349b225e1SGavin Maltby * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
247c478bd9Sstevel@tonic-gate * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate */
267c478bd9Sstevel@tonic-gate
277257d1b4Sraf #include "lint.h"
287c478bd9Sstevel@tonic-gate #include "thr_uberdata.h"
297c478bd9Sstevel@tonic-gate #include "libc.h"
307c478bd9Sstevel@tonic-gate
317c478bd9Sstevel@tonic-gate #include <alloca.h>
327c478bd9Sstevel@tonic-gate #include <unistd.h>
337c478bd9Sstevel@tonic-gate #include <thread.h>
347c478bd9Sstevel@tonic-gate #include <pthread.h>
357c478bd9Sstevel@tonic-gate #include <stdio.h>
367c478bd9Sstevel@tonic-gate #include <errno.h>
377c478bd9Sstevel@tonic-gate #include <door.h>
387c478bd9Sstevel@tonic-gate #include <signal.h>
397c478bd9Sstevel@tonic-gate #include <ucred.h>
4049b225e1SGavin Maltby #include <strings.h>
4149b225e1SGavin Maltby #include <ucontext.h>
427c478bd9Sstevel@tonic-gate #include <sys/ucred.h>
4349b225e1SGavin Maltby #include <atomic.h>
447c478bd9Sstevel@tonic-gate
457c478bd9Sstevel@tonic-gate static door_server_func_t door_create_server;
467c478bd9Sstevel@tonic-gate
477c478bd9Sstevel@tonic-gate /*
487c478bd9Sstevel@tonic-gate * Global state -- the non-statics are accessed from the __door_return()
497c478bd9Sstevel@tonic-gate * syscall wrapper.
507c478bd9Sstevel@tonic-gate */
517c478bd9Sstevel@tonic-gate static mutex_t door_state_lock = DEFAULTMUTEX;
527c478bd9Sstevel@tonic-gate door_server_func_t *door_server_func = door_create_server;
537c478bd9Sstevel@tonic-gate pid_t door_create_pid = 0;
547c478bd9Sstevel@tonic-gate static pid_t door_create_first_pid = 0;
557c478bd9Sstevel@tonic-gate static pid_t door_create_unref_pid = 0;
567c478bd9Sstevel@tonic-gate
577c478bd9Sstevel@tonic-gate /*
587c478bd9Sstevel@tonic-gate * The raw system call interfaces
597c478bd9Sstevel@tonic-gate */
607c478bd9Sstevel@tonic-gate extern int __door_create(void (*)(void *, char *, size_t, door_desc_t *,
617c478bd9Sstevel@tonic-gate uint_t), void *, uint_t);
627c478bd9Sstevel@tonic-gate extern int __door_return(caddr_t, size_t, door_return_desc_t *, caddr_t,
637c478bd9Sstevel@tonic-gate size_t);
647c478bd9Sstevel@tonic-gate extern int __door_ucred(ucred_t *);
657c478bd9Sstevel@tonic-gate extern int __door_unref(void);
6649b225e1SGavin Maltby extern int __door_unbind(void);
6749b225e1SGavin Maltby
6849b225e1SGavin Maltby /*
6949b225e1SGavin Maltby * Key for per-door data for doors created with door_xcreate.
7049b225e1SGavin Maltby */
7149b225e1SGavin Maltby static pthread_key_t privdoor_key = PTHREAD_ONCE_KEY_NP;
7249b225e1SGavin Maltby
7349b225e1SGavin Maltby /*
7449b225e1SGavin Maltby * Each door_xcreate'd door has a struct privdoor_data allocated for it,
7549b225e1SGavin Maltby * and each of the initial pool of service threads for the door
7649b225e1SGavin Maltby * has TSD for the privdoor_key set to point to this structure.
7749b225e1SGavin Maltby * When a thread in door_return decides it is time to perform a
7849b225e1SGavin Maltby * thread depletion callback we can retrieve this door information
7949b225e1SGavin Maltby * via a TSD lookup on the privdoor key.
8049b225e1SGavin Maltby */
8149b225e1SGavin Maltby struct privdoor_data {
8249b225e1SGavin Maltby int pd_dfd;
8349b225e1SGavin Maltby door_id_t pd_uniqid;
8449b225e1SGavin Maltby volatile uint32_t pd_refcnt;
8549b225e1SGavin Maltby door_xcreate_server_func_t *pd_crf;
8649b225e1SGavin Maltby void *pd_crcookie;
8749b225e1SGavin Maltby door_xcreate_thrsetup_func_t *pd_setupf;
8849b225e1SGavin Maltby };
8949b225e1SGavin Maltby
9049b225e1SGavin Maltby static int door_xcreate_n(door_info_t *, struct privdoor_data *, int);
9149b225e1SGavin Maltby
9249b225e1SGavin Maltby /*
9349b225e1SGavin Maltby * door_create_cmn holds the privdoor data before kicking off server
9449b225e1SGavin Maltby * thread creation, all of which must succeed; if they don't then
9549b225e1SGavin Maltby * they return leaving the refcnt unchanged overall, and door_create_cmn
9649b225e1SGavin Maltby * releases its hold after revoking the door and we're done. Otherwise
9749b225e1SGavin Maltby * all n threads created add one each to the refcnt, and door_create_cmn
9849b225e1SGavin Maltby * drops its hold. If and when a server thread exits the key destructor
9949b225e1SGavin Maltby * function will be called, and we use that to decrement the reference
10049b225e1SGavin Maltby * count. We also decrement the reference count on door_unbind().
10149b225e1SGavin Maltby * If ever we get the reference count to 0 then we will free that data.
10249b225e1SGavin Maltby */
10349b225e1SGavin Maltby static void
privdoor_data_hold(struct privdoor_data * pdd)10449b225e1SGavin Maltby privdoor_data_hold(struct privdoor_data *pdd)
10549b225e1SGavin Maltby {
10649b225e1SGavin Maltby atomic_inc_32(&pdd->pd_refcnt);
10749b225e1SGavin Maltby }
10849b225e1SGavin Maltby
10949b225e1SGavin Maltby static void
privdoor_data_rele(struct privdoor_data * pdd)11049b225e1SGavin Maltby privdoor_data_rele(struct privdoor_data *pdd)
11149b225e1SGavin Maltby {
11249b225e1SGavin Maltby if (atomic_dec_32_nv(&pdd->pd_refcnt) == 0)
11349b225e1SGavin Maltby free(pdd);
11449b225e1SGavin Maltby }
11549b225e1SGavin Maltby
11649b225e1SGavin Maltby void
privdoor_destructor(void * data)11749b225e1SGavin Maltby privdoor_destructor(void *data)
11849b225e1SGavin Maltby {
11949b225e1SGavin Maltby privdoor_data_rele((struct privdoor_data *)data);
12049b225e1SGavin Maltby }
1217c478bd9Sstevel@tonic-gate
1227c478bd9Sstevel@tonic-gate /*
1237c478bd9Sstevel@tonic-gate * We park the ourselves in the kernel to serve as the "caller" for
1247c478bd9Sstevel@tonic-gate * unreferenced upcalls for this process. If the call returns with
1257c478bd9Sstevel@tonic-gate * EINTR (e.g., someone did a forkall), we repeat as long as we're still
1267c478bd9Sstevel@tonic-gate * in the parent. If the child creates an unref door it will create
1277c478bd9Sstevel@tonic-gate * a new thread.
1287c478bd9Sstevel@tonic-gate */
1297c478bd9Sstevel@tonic-gate static void *
door_unref_func(void * arg)1307c478bd9Sstevel@tonic-gate door_unref_func(void *arg)
1317c478bd9Sstevel@tonic-gate {
1327c478bd9Sstevel@tonic-gate pid_t mypid = (pid_t)(uintptr_t)arg;
1337c478bd9Sstevel@tonic-gate
1347c478bd9Sstevel@tonic-gate sigset_t fillset;
1357c478bd9Sstevel@tonic-gate
1367c478bd9Sstevel@tonic-gate /* mask signals before diving into the kernel */
1377c478bd9Sstevel@tonic-gate (void) sigfillset(&fillset);
1387c478bd9Sstevel@tonic-gate (void) thr_sigsetmask(SIG_SETMASK, &fillset, NULL);
1397c478bd9Sstevel@tonic-gate
1407c478bd9Sstevel@tonic-gate while (getpid() == mypid && __door_unref() && errno == EINTR)
1417c478bd9Sstevel@tonic-gate continue;
1427c478bd9Sstevel@tonic-gate
1437c478bd9Sstevel@tonic-gate return (NULL);
1447c478bd9Sstevel@tonic-gate }
1457c478bd9Sstevel@tonic-gate
14649b225e1SGavin Maltby static int
door_create_cmn(door_server_procedure_t * f,void * cookie,uint_t flags,door_xcreate_server_func_t * crf,door_xcreate_thrsetup_func_t * setupf,void * crcookie,int nthread)14749b225e1SGavin Maltby door_create_cmn(door_server_procedure_t *f, void *cookie, uint_t flags,
14849b225e1SGavin Maltby door_xcreate_server_func_t *crf, door_xcreate_thrsetup_func_t *setupf,
14949b225e1SGavin Maltby void *crcookie, int nthread)
1507c478bd9Sstevel@tonic-gate {
1517c478bd9Sstevel@tonic-gate int d;
1527c478bd9Sstevel@tonic-gate
1537c478bd9Sstevel@tonic-gate int is_private = (flags & DOOR_PRIVATE);
1547c478bd9Sstevel@tonic-gate int is_unref = (flags & (DOOR_UNREF | DOOR_UNREF_MULTI));
1557c478bd9Sstevel@tonic-gate int do_create_first = 0;
1567c478bd9Sstevel@tonic-gate int do_create_unref = 0;
1577c478bd9Sstevel@tonic-gate
1587c478bd9Sstevel@tonic-gate ulwp_t *self = curthread;
1597c478bd9Sstevel@tonic-gate
1607c478bd9Sstevel@tonic-gate pid_t mypid;
1617c478bd9Sstevel@tonic-gate
1627c478bd9Sstevel@tonic-gate if (self->ul_vfork) {
1637c478bd9Sstevel@tonic-gate errno = ENOTSUP;
1647c478bd9Sstevel@tonic-gate return (-1);
1657c478bd9Sstevel@tonic-gate }
1667c478bd9Sstevel@tonic-gate
16749b225e1SGavin Maltby if (crf)
16849b225e1SGavin Maltby flags |= DOOR_PRIVCREATE;
16949b225e1SGavin Maltby
1707c478bd9Sstevel@tonic-gate /*
1717c478bd9Sstevel@tonic-gate * Doors are associated with the processes which created them. In
1727c478bd9Sstevel@tonic-gate * the face of forkall(), this gets quite complicated. To simplify
1737c478bd9Sstevel@tonic-gate * it somewhat, we include the call to __door_create() in a critical
1747c478bd9Sstevel@tonic-gate * section, and figure out what additional actions to take while
1757c478bd9Sstevel@tonic-gate * still in the critical section.
1767c478bd9Sstevel@tonic-gate */
1777c478bd9Sstevel@tonic-gate enter_critical(self);
1787c478bd9Sstevel@tonic-gate if ((d = __door_create(f, cookie, flags)) < 0) {
1797c478bd9Sstevel@tonic-gate exit_critical(self);
18049b225e1SGavin Maltby return (-1); /* errno is set */
1817c478bd9Sstevel@tonic-gate }
1827c478bd9Sstevel@tonic-gate mypid = getpid();
1837c478bd9Sstevel@tonic-gate if (mypid != door_create_pid ||
1847c478bd9Sstevel@tonic-gate (!is_private && mypid != door_create_first_pid) ||
1857c478bd9Sstevel@tonic-gate (is_unref && mypid != door_create_unref_pid)) {
1867c478bd9Sstevel@tonic-gate
1877c478bd9Sstevel@tonic-gate lmutex_lock(&door_state_lock);
1887c478bd9Sstevel@tonic-gate door_create_pid = mypid;
1897c478bd9Sstevel@tonic-gate
1907c478bd9Sstevel@tonic-gate if (!is_private && mypid != door_create_first_pid) {
1917c478bd9Sstevel@tonic-gate do_create_first = 1;
1927c478bd9Sstevel@tonic-gate door_create_first_pid = mypid;
1937c478bd9Sstevel@tonic-gate }
1947c478bd9Sstevel@tonic-gate if (is_unref && mypid != door_create_unref_pid) {
1957c478bd9Sstevel@tonic-gate do_create_unref = 1;
1967c478bd9Sstevel@tonic-gate door_create_unref_pid = mypid;
1977c478bd9Sstevel@tonic-gate }
1987c478bd9Sstevel@tonic-gate lmutex_unlock(&door_state_lock);
1997c478bd9Sstevel@tonic-gate }
2007c478bd9Sstevel@tonic-gate exit_critical(self);
2017c478bd9Sstevel@tonic-gate
2027c478bd9Sstevel@tonic-gate if (do_create_unref) {
2037c478bd9Sstevel@tonic-gate /*
2047c478bd9Sstevel@tonic-gate * Create an unref thread the first time we create an
2057c478bd9Sstevel@tonic-gate * unref door for this process. Create it as a daemon
2067c478bd9Sstevel@tonic-gate * thread, so that it doesn't interfere with normal exit
2077c478bd9Sstevel@tonic-gate * processing.
2087c478bd9Sstevel@tonic-gate */
2097c478bd9Sstevel@tonic-gate (void) thr_create(NULL, 0, door_unref_func,
2107c478bd9Sstevel@tonic-gate (void *)(uintptr_t)mypid, THR_DAEMON, NULL);
2117c478bd9Sstevel@tonic-gate }
2127c478bd9Sstevel@tonic-gate
2137c478bd9Sstevel@tonic-gate if (is_private) {
2147c478bd9Sstevel@tonic-gate door_info_t di;
2157c478bd9Sstevel@tonic-gate
21649b225e1SGavin Maltby /*
21749b225e1SGavin Maltby * Create the first thread(s) for this private door.
21849b225e1SGavin Maltby */
2197c478bd9Sstevel@tonic-gate if (__door_info(d, &di) < 0)
22049b225e1SGavin Maltby return (-1); /* errno is set */
22149b225e1SGavin Maltby
22249b225e1SGavin Maltby /*
22349b225e1SGavin Maltby * This key must be available for lookup for all private
22449b225e1SGavin Maltby * door threads, whether associated with a door created via
22549b225e1SGavin Maltby * door_create or door_xcreate.
22649b225e1SGavin Maltby */
22749b225e1SGavin Maltby (void) pthread_key_create_once_np(&privdoor_key,
22849b225e1SGavin Maltby privdoor_destructor);
22949b225e1SGavin Maltby
23049b225e1SGavin Maltby if (crf == NULL) {
23149b225e1SGavin Maltby (*door_server_func)(&di);
23249b225e1SGavin Maltby } else {
23349b225e1SGavin Maltby struct privdoor_data *pdd = malloc(sizeof (*pdd));
23449b225e1SGavin Maltby
23549b225e1SGavin Maltby if (pdd == NULL) {
23649b225e1SGavin Maltby (void) door_revoke(d);
23749b225e1SGavin Maltby errno = ENOMEM;
23849b225e1SGavin Maltby return (-1);
23949b225e1SGavin Maltby }
24049b225e1SGavin Maltby
24149b225e1SGavin Maltby pdd->pd_dfd = d;
24249b225e1SGavin Maltby pdd->pd_uniqid = di.di_uniquifier;
24349b225e1SGavin Maltby pdd->pd_refcnt = 1; /* prevent free during xcreate_n */
24449b225e1SGavin Maltby pdd->pd_crf = crf;
24549b225e1SGavin Maltby pdd->pd_crcookie = crcookie;
24649b225e1SGavin Maltby pdd->pd_setupf = setupf;
24749b225e1SGavin Maltby
24849b225e1SGavin Maltby if (!door_xcreate_n(&di, pdd, nthread)) {
24949b225e1SGavin Maltby int errnocp = errno;
25049b225e1SGavin Maltby
25149b225e1SGavin Maltby (void) door_revoke(d);
25249b225e1SGavin Maltby privdoor_data_rele(pdd);
25349b225e1SGavin Maltby errno = errnocp;
25449b225e1SGavin Maltby return (-1);
25549b225e1SGavin Maltby } else {
25649b225e1SGavin Maltby privdoor_data_rele(pdd);
25749b225e1SGavin Maltby }
25849b225e1SGavin Maltby }
25949b225e1SGavin Maltby } else if (do_create_first) {
26049b225e1SGavin Maltby /* First non-private door created in the process */
26149b225e1SGavin Maltby (*door_server_func)(NULL);
2627c478bd9Sstevel@tonic-gate }
2637c478bd9Sstevel@tonic-gate
2647c478bd9Sstevel@tonic-gate return (d);
2657c478bd9Sstevel@tonic-gate }
2667c478bd9Sstevel@tonic-gate
26749b225e1SGavin Maltby int
door_create(door_server_procedure_t * f,void * cookie,uint_t flags)26849b225e1SGavin Maltby door_create(door_server_procedure_t *f, void *cookie, uint_t flags)
26949b225e1SGavin Maltby {
27049b225e1SGavin Maltby if (flags & (DOOR_NO_DEPLETION_CB | DOOR_PRIVCREATE)) {
27149b225e1SGavin Maltby errno = EINVAL;
27249b225e1SGavin Maltby return (-1);
27349b225e1SGavin Maltby }
27449b225e1SGavin Maltby
27549b225e1SGavin Maltby return (door_create_cmn(f, cookie, flags, NULL, NULL, NULL, 1));
27649b225e1SGavin Maltby }
27749b225e1SGavin Maltby
27849b225e1SGavin Maltby int
door_xcreate(door_server_procedure_t * f,void * cookie,uint_t flags,door_xcreate_server_func_t * crf,door_xcreate_thrsetup_func_t * setupf,void * crcookie,int nthread)27949b225e1SGavin Maltby door_xcreate(door_server_procedure_t *f, void *cookie, uint_t flags,
28049b225e1SGavin Maltby door_xcreate_server_func_t *crf, door_xcreate_thrsetup_func_t *setupf,
28149b225e1SGavin Maltby void *crcookie, int nthread)
28249b225e1SGavin Maltby {
28349b225e1SGavin Maltby if (flags & DOOR_PRIVCREATE || nthread < 1 || crf == NULL) {
28449b225e1SGavin Maltby errno = EINVAL;
28549b225e1SGavin Maltby return (-1);
28649b225e1SGavin Maltby }
28749b225e1SGavin Maltby
28849b225e1SGavin Maltby return (door_create_cmn(f, cookie, flags | DOOR_PRIVATE,
28949b225e1SGavin Maltby crf, setupf, crcookie, nthread));
29049b225e1SGavin Maltby }
29149b225e1SGavin Maltby
2927c478bd9Sstevel@tonic-gate int
door_ucred(ucred_t ** uc)2937c478bd9Sstevel@tonic-gate door_ucred(ucred_t **uc)
2947c478bd9Sstevel@tonic-gate {
2957c478bd9Sstevel@tonic-gate ucred_t *ucp = *uc;
2967c478bd9Sstevel@tonic-gate
2977c478bd9Sstevel@tonic-gate if (ucp == NULL) {
2987c478bd9Sstevel@tonic-gate ucp = _ucred_alloc();
2997c478bd9Sstevel@tonic-gate if (ucp == NULL)
3007c478bd9Sstevel@tonic-gate return (-1);
3017c478bd9Sstevel@tonic-gate }
3027c478bd9Sstevel@tonic-gate
3037c478bd9Sstevel@tonic-gate if (__door_ucred(ucp) != 0) {
3047c478bd9Sstevel@tonic-gate if (*uc == NULL)
3057c478bd9Sstevel@tonic-gate ucred_free(ucp);
3067c478bd9Sstevel@tonic-gate return (-1);
3077c478bd9Sstevel@tonic-gate }
3087c478bd9Sstevel@tonic-gate
3097c478bd9Sstevel@tonic-gate *uc = ucp;
3107c478bd9Sstevel@tonic-gate
3117c478bd9Sstevel@tonic-gate return (0);
3127c478bd9Sstevel@tonic-gate }
3137c478bd9Sstevel@tonic-gate
3147c478bd9Sstevel@tonic-gate int
door_cred(door_cred_t * dc)3157c478bd9Sstevel@tonic-gate door_cred(door_cred_t *dc)
3167c478bd9Sstevel@tonic-gate {
3177c478bd9Sstevel@tonic-gate /*
3187c478bd9Sstevel@tonic-gate * Ucred size is small and alloca is fast
3197c478bd9Sstevel@tonic-gate * and cannot fail.
3207c478bd9Sstevel@tonic-gate */
3217c478bd9Sstevel@tonic-gate ucred_t *ucp = alloca(ucred_size());
3227c478bd9Sstevel@tonic-gate int ret;
3237c478bd9Sstevel@tonic-gate
3247c478bd9Sstevel@tonic-gate if ((ret = __door_ucred(ucp)) == 0) {
3257c478bd9Sstevel@tonic-gate dc->dc_euid = ucred_geteuid(ucp);
3267c478bd9Sstevel@tonic-gate dc->dc_ruid = ucred_getruid(ucp);
3277c478bd9Sstevel@tonic-gate dc->dc_egid = ucred_getegid(ucp);
3287c478bd9Sstevel@tonic-gate dc->dc_rgid = ucred_getrgid(ucp);
3297c478bd9Sstevel@tonic-gate dc->dc_pid = ucred_getpid(ucp);
3307c478bd9Sstevel@tonic-gate }
3317c478bd9Sstevel@tonic-gate return (ret);
3327c478bd9Sstevel@tonic-gate }
3337c478bd9Sstevel@tonic-gate
33449b225e1SGavin Maltby int
door_unbind(void)33549b225e1SGavin Maltby door_unbind(void)
33649b225e1SGavin Maltby {
33749b225e1SGavin Maltby struct privdoor_data *pdd;
33849b225e1SGavin Maltby int rv = __door_unbind();
33949b225e1SGavin Maltby
34049b225e1SGavin Maltby /*
34149b225e1SGavin Maltby * If we were indeed bound to the door then check to see whether
34249b225e1SGavin Maltby * we are part of a door_xcreate'd door by checking for our TSD.
34349b225e1SGavin Maltby * If so, then clear the TSD for this key to avoid destructor
34449b225e1SGavin Maltby * callback on future thread exit, and release the private door data.
34549b225e1SGavin Maltby */
34649b225e1SGavin Maltby if (rv == 0 && (pdd = pthread_getspecific(privdoor_key)) != NULL) {
34749b225e1SGavin Maltby (void) pthread_setspecific(privdoor_key, NULL);
34849b225e1SGavin Maltby privdoor_data_rele(pdd);
34949b225e1SGavin Maltby }
35049b225e1SGavin Maltby
35149b225e1SGavin Maltby return (rv);
35249b225e1SGavin Maltby }
35349b225e1SGavin Maltby
3547c478bd9Sstevel@tonic-gate int
door_return(char * data_ptr,size_t data_size,door_desc_t * desc_ptr,uint_t num_desc)3557c478bd9Sstevel@tonic-gate door_return(char *data_ptr, size_t data_size,
3567c478bd9Sstevel@tonic-gate door_desc_t *desc_ptr, uint_t num_desc)
3577c478bd9Sstevel@tonic-gate {
3587c478bd9Sstevel@tonic-gate caddr_t sp;
3597c478bd9Sstevel@tonic-gate size_t ssize;
3607c478bd9Sstevel@tonic-gate size_t reserve;
3617c478bd9Sstevel@tonic-gate ulwp_t *self = curthread;
3627c478bd9Sstevel@tonic-gate
3637c478bd9Sstevel@tonic-gate {
3647c478bd9Sstevel@tonic-gate stack_t s;
3657c478bd9Sstevel@tonic-gate if (thr_stksegment(&s) != 0) {
3667c478bd9Sstevel@tonic-gate errno = EINVAL;
3677c478bd9Sstevel@tonic-gate return (-1);
3687c478bd9Sstevel@tonic-gate }
3697c478bd9Sstevel@tonic-gate sp = s.ss_sp;
3707c478bd9Sstevel@tonic-gate ssize = s.ss_size;
3717c478bd9Sstevel@tonic-gate }
3727c478bd9Sstevel@tonic-gate
3737c478bd9Sstevel@tonic-gate if (!self->ul_door_noreserve) {
3747c478bd9Sstevel@tonic-gate /*
3757c478bd9Sstevel@tonic-gate * When we return from the kernel, we must have enough stack
3767c478bd9Sstevel@tonic-gate * available to handle the request. Since the creator of
3777c478bd9Sstevel@tonic-gate * the thread has control over its stack size, and larger
3787c478bd9Sstevel@tonic-gate * stacks generally indicate bigger request queues, we
3797c478bd9Sstevel@tonic-gate * use the heuristic of reserving 1/32nd of the stack size
3807c478bd9Sstevel@tonic-gate * (up to the default stack size), with a minimum of 1/8th
3817c478bd9Sstevel@tonic-gate * of MINSTACK. Currently, this translates to:
3827c478bd9Sstevel@tonic-gate *
3837c478bd9Sstevel@tonic-gate * _ILP32 _LP64
3847c478bd9Sstevel@tonic-gate * min resv 512 bytes 1024 bytes
3857c478bd9Sstevel@tonic-gate * max resv 32k bytes 64k bytes
3867c478bd9Sstevel@tonic-gate *
3877c478bd9Sstevel@tonic-gate * This reservation can be disabled by setting
3887c478bd9Sstevel@tonic-gate * _THREAD_DOOR_NORESERVE=1
3897c478bd9Sstevel@tonic-gate * in the environment, but shouldn't be.
3907c478bd9Sstevel@tonic-gate */
3917c478bd9Sstevel@tonic-gate
3927c478bd9Sstevel@tonic-gate #define STACK_FRACTION 32
3937c478bd9Sstevel@tonic-gate #define MINSTACK_FRACTION 8
3947c478bd9Sstevel@tonic-gate
3957c478bd9Sstevel@tonic-gate if (ssize < (MINSTACK * (STACK_FRACTION/MINSTACK_FRACTION)))
3967c478bd9Sstevel@tonic-gate reserve = MINSTACK / MINSTACK_FRACTION;
3977c478bd9Sstevel@tonic-gate else if (ssize < DEFAULTSTACK)
3987c478bd9Sstevel@tonic-gate reserve = ssize / STACK_FRACTION;
3997c478bd9Sstevel@tonic-gate else
4007c478bd9Sstevel@tonic-gate reserve = DEFAULTSTACK / STACK_FRACTION;
4017c478bd9Sstevel@tonic-gate
4027c478bd9Sstevel@tonic-gate #undef STACK_FRACTION
4037c478bd9Sstevel@tonic-gate #undef MINSTACK_FRACTION
4047c478bd9Sstevel@tonic-gate
4057c478bd9Sstevel@tonic-gate if (ssize > reserve)
4067c478bd9Sstevel@tonic-gate ssize -= reserve;
4077c478bd9Sstevel@tonic-gate else
4087c478bd9Sstevel@tonic-gate ssize = 0;
4097c478bd9Sstevel@tonic-gate }
4107c478bd9Sstevel@tonic-gate
4117c478bd9Sstevel@tonic-gate /*
4127c478bd9Sstevel@tonic-gate * Historically, the __door_return() syscall wrapper subtracted
4137c478bd9Sstevel@tonic-gate * some "slop" from the stack pointer before trapping into the
4147c478bd9Sstevel@tonic-gate * kernel. We now do this here, so that ssize can be adjusted
4157c478bd9Sstevel@tonic-gate * correctly. Eventually, this should be removed, since it is
4167c478bd9Sstevel@tonic-gate * unnecessary. (note that TNF on x86 currently relies upon this
4177c478bd9Sstevel@tonic-gate * idiocy)
4187c478bd9Sstevel@tonic-gate */
4197c478bd9Sstevel@tonic-gate #if defined(__sparc)
4207c478bd9Sstevel@tonic-gate reserve = SA(MINFRAME);
4217c478bd9Sstevel@tonic-gate #elif defined(__x86)
4227c478bd9Sstevel@tonic-gate reserve = SA(512);
4237c478bd9Sstevel@tonic-gate #else
4247c478bd9Sstevel@tonic-gate #error need to define stack base reserve
4257c478bd9Sstevel@tonic-gate #endif
4267c478bd9Sstevel@tonic-gate
4277c478bd9Sstevel@tonic-gate #ifdef _STACK_GROWS_DOWNWARD
4287c478bd9Sstevel@tonic-gate sp -= reserve;
4297c478bd9Sstevel@tonic-gate #else
4307c478bd9Sstevel@tonic-gate #error stack does not grow downwards, routine needs update
4317c478bd9Sstevel@tonic-gate #endif
4327c478bd9Sstevel@tonic-gate
4337c478bd9Sstevel@tonic-gate if (ssize > reserve)
4347c478bd9Sstevel@tonic-gate ssize -= reserve;
4357c478bd9Sstevel@tonic-gate else
4367c478bd9Sstevel@tonic-gate ssize = 0;
4377c478bd9Sstevel@tonic-gate
4387c478bd9Sstevel@tonic-gate /*
4397c478bd9Sstevel@tonic-gate * Normally, the above will leave plenty of space in sp for a
4407c478bd9Sstevel@tonic-gate * request. Just in case some bozo overrides thr_stksegment() to
4417c478bd9Sstevel@tonic-gate * return an uncommonly small stack size, we turn off stack size
4427c478bd9Sstevel@tonic-gate * checking if there is less than 1k remaining.
4437c478bd9Sstevel@tonic-gate */
4447c478bd9Sstevel@tonic-gate #define MIN_DOOR_STACK 1024
4457c478bd9Sstevel@tonic-gate if (ssize < MIN_DOOR_STACK)
4467c478bd9Sstevel@tonic-gate ssize = 0;
4477c478bd9Sstevel@tonic-gate
4487c478bd9Sstevel@tonic-gate #undef MIN_DOOR_STACK
4497c478bd9Sstevel@tonic-gate
4507c478bd9Sstevel@tonic-gate /*
4517c478bd9Sstevel@tonic-gate * We have to wrap the desc_* arguments for the syscall. If there are
4527c478bd9Sstevel@tonic-gate * no descriptors being returned, we can skip the wrapping.
4537c478bd9Sstevel@tonic-gate */
4547c478bd9Sstevel@tonic-gate if (num_desc != 0) {
4557c478bd9Sstevel@tonic-gate door_return_desc_t d;
4567c478bd9Sstevel@tonic-gate
4577c478bd9Sstevel@tonic-gate d.desc_ptr = desc_ptr;
4587c478bd9Sstevel@tonic-gate d.desc_num = num_desc;
4597c478bd9Sstevel@tonic-gate return (__door_return(data_ptr, data_size, &d, sp, ssize));
4607c478bd9Sstevel@tonic-gate }
4617c478bd9Sstevel@tonic-gate return (__door_return(data_ptr, data_size, NULL, sp, ssize));
4627c478bd9Sstevel@tonic-gate }
4637c478bd9Sstevel@tonic-gate
4647c478bd9Sstevel@tonic-gate /*
46549b225e1SGavin Maltby * To start and synchronize a number of door service threads at once
46649b225e1SGavin Maltby * we use a struct door_xsync_shared shared by all threads, and
46749b225e1SGavin Maltby * a struct door_xsync for each thread. While each thread
46849b225e1SGavin Maltby * has its own startup state, all such state are protected by the same
46949b225e1SGavin Maltby * shared lock. This could cause a little contention but it is a one-off
47049b225e1SGavin Maltby * cost at door creation.
47149b225e1SGavin Maltby */
47249b225e1SGavin Maltby enum door_xsync_state {
47349b225e1SGavin Maltby DOOR_XSYNC_CREATEWAIT = 0x1c8c8c80, /* awaits creation handshake */
47449b225e1SGavin Maltby DOOR_XSYNC_ABORT, /* aborting door_xcreate */
47549b225e1SGavin Maltby DOOR_XSYNC_ABORTED, /* thread heeded abort request */
47649b225e1SGavin Maltby DOOR_XSYNC_MAXCONCUR, /* create func decided no more */
47749b225e1SGavin Maltby DOOR_XSYNC_CREATEFAIL, /* thr_create/pthread_create failure */
47849b225e1SGavin Maltby DOOR_XSYNC_SETSPEC_FAIL, /* setspecific failed */
47949b225e1SGavin Maltby DOOR_XSYNC_BINDFAIL, /* door_bind failed */
48049b225e1SGavin Maltby DOOR_XSYNC_BOUND, /* door_bind succeeded */
48149b225e1SGavin Maltby DOOR_XSYNC_ENTER_SERVICE /* Go on to door_return */
48249b225e1SGavin Maltby };
48349b225e1SGavin Maltby
48449b225e1SGavin Maltby /* These stats are incremented non-atomically - indicative only */
48549b225e1SGavin Maltby uint64_t door_xcreate_n_stats[DOOR_XSYNC_ENTER_SERVICE -
48649b225e1SGavin Maltby DOOR_XSYNC_CREATEWAIT + 1];
48749b225e1SGavin Maltby
48849b225e1SGavin Maltby struct door_xsync_shared {
48949b225e1SGavin Maltby pthread_mutex_t lock;
49049b225e1SGavin Maltby pthread_cond_t cv_m2s;
49149b225e1SGavin Maltby pthread_cond_t cv_s2m;
49249b225e1SGavin Maltby struct privdoor_data *pdd;
49349b225e1SGavin Maltby volatile uint32_t waiting;
49449b225e1SGavin Maltby };
49549b225e1SGavin Maltby
49649b225e1SGavin Maltby struct door_xsync {
49749b225e1SGavin Maltby volatile enum door_xsync_state state;
49849b225e1SGavin Maltby struct door_xsync_shared *sharedp;
49949b225e1SGavin Maltby };
50049b225e1SGavin Maltby
50149b225e1SGavin Maltby /*
50249b225e1SGavin Maltby * Thread start function that xcreated private doors must use in
50349b225e1SGavin Maltby * thr_create or pthread_create. They must also use the argument we
50449b225e1SGavin Maltby * provide. We:
50549b225e1SGavin Maltby *
50649b225e1SGavin Maltby * o call a thread setup function if supplied, or apply sensible defaults
50749b225e1SGavin Maltby * o bind the newly-created thread to the door it will service
50849b225e1SGavin Maltby * o synchronize with door_xcreate to indicate that we have successfully
50949b225e1SGavin Maltby * bound to the door; door_xcreate will not return until all
51049b225e1SGavin Maltby * requested threads have at least bound
51149b225e1SGavin Maltby * o enter service with door_return quoting magic sentinel args
51249b225e1SGavin Maltby */
51349b225e1SGavin Maltby void *
door_xcreate_startf(void * arg)51449b225e1SGavin Maltby door_xcreate_startf(void *arg)
51549b225e1SGavin Maltby {
51649b225e1SGavin Maltby struct door_xsync *xsp = (struct door_xsync *)arg;
51749b225e1SGavin Maltby struct door_xsync_shared *xssp = xsp->sharedp;
51849b225e1SGavin Maltby struct privdoor_data *pdd = xssp->pdd;
51949b225e1SGavin Maltby enum door_xsync_state next_state;
52049b225e1SGavin Maltby
52149b225e1SGavin Maltby privdoor_data_hold(pdd);
52249b225e1SGavin Maltby if (pthread_setspecific(privdoor_key, (const void *)pdd) != 0) {
52349b225e1SGavin Maltby next_state = DOOR_XSYNC_SETSPEC_FAIL;
52449b225e1SGavin Maltby privdoor_data_rele(pdd);
52549b225e1SGavin Maltby goto handshake;
52649b225e1SGavin Maltby }
52749b225e1SGavin Maltby
52849b225e1SGavin Maltby if (pdd->pd_setupf != NULL) {
52949b225e1SGavin Maltby (pdd->pd_setupf)(pdd->pd_crcookie);
53049b225e1SGavin Maltby } else {
53149b225e1SGavin Maltby (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
53249b225e1SGavin Maltby (void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
53349b225e1SGavin Maltby }
53449b225e1SGavin Maltby
53549b225e1SGavin Maltby if (door_bind(pdd->pd_dfd) == 0)
53649b225e1SGavin Maltby next_state = DOOR_XSYNC_BOUND;
53749b225e1SGavin Maltby else
53849b225e1SGavin Maltby next_state = DOOR_XSYNC_BINDFAIL;
53949b225e1SGavin Maltby
54049b225e1SGavin Maltby handshake:
54149b225e1SGavin Maltby (void) pthread_mutex_lock(&xssp->lock);
54249b225e1SGavin Maltby
54349b225e1SGavin Maltby ASSERT(xsp->state == DOOR_XSYNC_CREATEWAIT ||
54449b225e1SGavin Maltby xsp->state == DOOR_XSYNC_ABORT);
54549b225e1SGavin Maltby
54649b225e1SGavin Maltby if (xsp->state == DOOR_XSYNC_ABORT)
54749b225e1SGavin Maltby next_state = DOOR_XSYNC_ABORTED;
54849b225e1SGavin Maltby
54949b225e1SGavin Maltby xsp->state = next_state;
55049b225e1SGavin Maltby
55149b225e1SGavin Maltby if (--xssp->waiting == 0)
55249b225e1SGavin Maltby (void) pthread_cond_signal(&xssp->cv_s2m);
55349b225e1SGavin Maltby
55449b225e1SGavin Maltby if (next_state != DOOR_XSYNC_BOUND) {
55549b225e1SGavin Maltby (void) pthread_mutex_unlock(&xssp->lock);
55649b225e1SGavin Maltby return (NULL); /* thread exits, key destructor called */
55749b225e1SGavin Maltby }
55849b225e1SGavin Maltby
55949b225e1SGavin Maltby while (xsp->state == DOOR_XSYNC_BOUND)
56049b225e1SGavin Maltby (void) pthread_cond_wait(&xssp->cv_m2s, &xssp->lock);
56149b225e1SGavin Maltby
56249b225e1SGavin Maltby next_state = xsp->state;
56349b225e1SGavin Maltby ASSERT(next_state == DOOR_XSYNC_ENTER_SERVICE ||
56449b225e1SGavin Maltby next_state == DOOR_XSYNC_ABORT);
56549b225e1SGavin Maltby
56649b225e1SGavin Maltby if (--xssp->waiting == 0)
56749b225e1SGavin Maltby (void) pthread_cond_signal(&xssp->cv_s2m);
56849b225e1SGavin Maltby
56949b225e1SGavin Maltby (void) pthread_mutex_unlock(&xssp->lock); /* xssp/xsp can be freed */
57049b225e1SGavin Maltby
57149b225e1SGavin Maltby if (next_state == DOOR_XSYNC_ABORT)
57249b225e1SGavin Maltby return (NULL); /* thread exits, key destructor called */
57349b225e1SGavin Maltby
57449b225e1SGavin Maltby (void) door_return(NULL, 0, NULL, 0);
57549b225e1SGavin Maltby return (NULL);
57649b225e1SGavin Maltby }
57749b225e1SGavin Maltby
57849b225e1SGavin Maltby static int
door_xcreate_n(door_info_t * dip,struct privdoor_data * pdd,int n)57949b225e1SGavin Maltby door_xcreate_n(door_info_t *dip, struct privdoor_data *pdd, int n)
58049b225e1SGavin Maltby {
58149b225e1SGavin Maltby struct door_xsync_shared *xssp;
58249b225e1SGavin Maltby struct door_xsync *xsp;
58349b225e1SGavin Maltby int i, failidx = -1;
58449b225e1SGavin Maltby int isdepcb = 0;
58549b225e1SGavin Maltby int failerrno;
58649b225e1SGavin Maltby int bound = 0;
58749b225e1SGavin Maltby #ifdef _STACK_GROWS_DOWNWARD
58849b225e1SGavin Maltby int stkdir = -1;
58949b225e1SGavin Maltby #else
59049b225e1SGavin Maltby int stkdir = 1;
59149b225e1SGavin Maltby #endif
59249b225e1SGavin Maltby int rv = 0;
59349b225e1SGavin Maltby
59449b225e1SGavin Maltby /*
59549b225e1SGavin Maltby * If we're called during door creation then we have the
59649b225e1SGavin Maltby * privdoor_data. If we're called as part of a depletion callback
59749b225e1SGavin Maltby * then the current thread has the privdoor_data as TSD.
59849b225e1SGavin Maltby */
59949b225e1SGavin Maltby if (pdd == NULL) {
60049b225e1SGavin Maltby isdepcb = 1;
60149b225e1SGavin Maltby if ((pdd = pthread_getspecific(privdoor_key)) == NULL)
60249b225e1SGavin Maltby thr_panic("door_xcreate_n - no privdoor_data "
60349b225e1SGavin Maltby "on existing server thread");
60449b225e1SGavin Maltby }
60549b225e1SGavin Maltby
60649b225e1SGavin Maltby /*
60749b225e1SGavin Maltby * Allocate on our stack. We'll pass pointers to this to the
60849b225e1SGavin Maltby * newly-created threads, therefore this function must not return until
60949b225e1SGavin Maltby * we have synced with server threads that are created.
61049b225e1SGavin Maltby * We do not limit the number of threads so begin by checking
61149b225e1SGavin Maltby * that we have space on the stack for this.
61249b225e1SGavin Maltby */
61349b225e1SGavin Maltby {
61449b225e1SGavin Maltby size_t sz = sizeof (*xssp) + n * sizeof (*xsp) + 32;
61549b225e1SGavin Maltby char dummy;
61649b225e1SGavin Maltby
61749b225e1SGavin Maltby if (!stack_inbounds(&dummy + stkdir * sz)) {
61849b225e1SGavin Maltby errno = E2BIG;
61949b225e1SGavin Maltby return (0);
62049b225e1SGavin Maltby }
62149b225e1SGavin Maltby }
62249b225e1SGavin Maltby
62349b225e1SGavin Maltby if ((xssp = alloca(sizeof (*xssp))) == NULL ||
62449b225e1SGavin Maltby (xsp = alloca(n * sizeof (*xsp))) == NULL) {
62549b225e1SGavin Maltby errno = E2BIG;
62649b225e1SGavin Maltby return (0);
62749b225e1SGavin Maltby }
62849b225e1SGavin Maltby
62949b225e1SGavin Maltby (void) pthread_mutex_init(&xssp->lock, NULL);
63049b225e1SGavin Maltby (void) pthread_cond_init(&xssp->cv_m2s, NULL);
63149b225e1SGavin Maltby (void) pthread_cond_init(&xssp->cv_s2m, NULL);
63249b225e1SGavin Maltby xssp->pdd = pdd;
63349b225e1SGavin Maltby xssp->waiting = 0;
63449b225e1SGavin Maltby
63549b225e1SGavin Maltby (void) pthread_mutex_lock(&xssp->lock);
63649b225e1SGavin Maltby
63749b225e1SGavin Maltby for (i = 0; failidx == -1 && i < n; i++) {
63849b225e1SGavin Maltby xsp[i].sharedp = xssp;
63949b225e1SGavin Maltby membar_producer(); /* xssp and xsp[i] for new thread */
64049b225e1SGavin Maltby
64149b225e1SGavin Maltby switch ((pdd->pd_crf)(dip, door_xcreate_startf,
64249b225e1SGavin Maltby (void *)&xsp[i], pdd->pd_crcookie)) {
64349b225e1SGavin Maltby case 1:
64449b225e1SGavin Maltby /*
64549b225e1SGavin Maltby * Thread successfully created. Set mailbox
64649b225e1SGavin Maltby * state and increment the number we have to
64749b225e1SGavin Maltby * sync with.
64849b225e1SGavin Maltby */
64949b225e1SGavin Maltby xsp[i].state = DOOR_XSYNC_CREATEWAIT;
65049b225e1SGavin Maltby xssp->waiting++;
65149b225e1SGavin Maltby break;
65249b225e1SGavin Maltby case 0:
65349b225e1SGavin Maltby /*
65449b225e1SGavin Maltby * Elected to create no further threads. OK for
65549b225e1SGavin Maltby * a depletion callback, but not during door_xcreate.
65649b225e1SGavin Maltby */
65749b225e1SGavin Maltby xsp[i].state = DOOR_XSYNC_MAXCONCUR;
65849b225e1SGavin Maltby if (!isdepcb) {
65949b225e1SGavin Maltby failidx = i;
66049b225e1SGavin Maltby failerrno = EINVAL;
66149b225e1SGavin Maltby }
66249b225e1SGavin Maltby break;
66349b225e1SGavin Maltby case -1:
66449b225e1SGavin Maltby /*
66549b225e1SGavin Maltby * Thread creation was attempted but failed.
66649b225e1SGavin Maltby */
66749b225e1SGavin Maltby xsp[i].state = DOOR_XSYNC_CREATEFAIL;
66849b225e1SGavin Maltby failidx = i;
66949b225e1SGavin Maltby failerrno = EPIPE;
67049b225e1SGavin Maltby break;
67149b225e1SGavin Maltby default:
67249b225e1SGavin Maltby /*
67349b225e1SGavin Maltby * The application-supplied function did not return
67449b225e1SGavin Maltby * -1/0/1 - best we can do is panic because anything
67549b225e1SGavin Maltby * else is harder to debug.
67649b225e1SGavin Maltby */
67749b225e1SGavin Maltby thr_panic("door server create function illegal return");
67849b225e1SGavin Maltby /*NOTREACHED*/
67949b225e1SGavin Maltby }
68049b225e1SGavin Maltby }
68149b225e1SGavin Maltby
68249b225e1SGavin Maltby /*
68349b225e1SGavin Maltby * On initial creation all must succeed; if not then abort
68449b225e1SGavin Maltby */
68549b225e1SGavin Maltby if (!isdepcb && failidx != -1) {
68649b225e1SGavin Maltby for (i = 0; i < failidx; i++)
68749b225e1SGavin Maltby if (xsp[i].state == DOOR_XSYNC_CREATEWAIT)
68849b225e1SGavin Maltby xsp[i].state = DOOR_XSYNC_ABORT;
68949b225e1SGavin Maltby }
69049b225e1SGavin Maltby
69149b225e1SGavin Maltby /*
69249b225e1SGavin Maltby * Wait for thread startup handshake to complete for all threads
69349b225e1SGavin Maltby */
69449b225e1SGavin Maltby while (xssp->waiting)
69549b225e1SGavin Maltby (void) pthread_cond_wait(&xssp->cv_s2m, &xssp->lock);
69649b225e1SGavin Maltby
69749b225e1SGavin Maltby /*
69849b225e1SGavin Maltby * If we are aborting for a failed thread create in door_xcreate
69949b225e1SGavin Maltby * then we're done.
70049b225e1SGavin Maltby */
70149b225e1SGavin Maltby if (!isdepcb && failidx != -1) {
70249b225e1SGavin Maltby rv = 0;
70349b225e1SGavin Maltby goto out; /* lock held, failerrno is set */
70449b225e1SGavin Maltby }
70549b225e1SGavin Maltby
70649b225e1SGavin Maltby /*
70749b225e1SGavin Maltby * Did we all succeed in binding?
70849b225e1SGavin Maltby */
70949b225e1SGavin Maltby for (i = 0; i < n; i++) {
71049b225e1SGavin Maltby int statidx = xsp[i].state - DOOR_XSYNC_CREATEWAIT;
71149b225e1SGavin Maltby
71249b225e1SGavin Maltby door_xcreate_n_stats[statidx]++;
71349b225e1SGavin Maltby if (xsp[i].state == DOOR_XSYNC_BOUND)
71449b225e1SGavin Maltby bound++;
71549b225e1SGavin Maltby }
71649b225e1SGavin Maltby
71749b225e1SGavin Maltby if (bound == n) {
71849b225e1SGavin Maltby rv = 1;
71949b225e1SGavin Maltby } else {
72049b225e1SGavin Maltby failerrno = EBADF;
72149b225e1SGavin Maltby rv = 0;
72249b225e1SGavin Maltby }
72349b225e1SGavin Maltby
72449b225e1SGavin Maltby /*
72549b225e1SGavin Maltby * During door_xcreate all must succeed in binding - if not then
72649b225e1SGavin Maltby * we command even those that did bind to abort. Threads that
72749b225e1SGavin Maltby * did not get as far as binding have already exited.
72849b225e1SGavin Maltby */
72949b225e1SGavin Maltby for (i = 0; i < n; i++) {
73049b225e1SGavin Maltby if (xsp[i].state == DOOR_XSYNC_BOUND) {
73149b225e1SGavin Maltby xsp[i].state = (rv == 1 || isdepcb) ?
73249b225e1SGavin Maltby DOOR_XSYNC_ENTER_SERVICE : DOOR_XSYNC_ABORT;
73349b225e1SGavin Maltby xssp->waiting++;
73449b225e1SGavin Maltby }
73549b225e1SGavin Maltby }
73649b225e1SGavin Maltby
73749b225e1SGavin Maltby (void) pthread_cond_broadcast(&xssp->cv_m2s);
73849b225e1SGavin Maltby
73949b225e1SGavin Maltby while (xssp->waiting)
74049b225e1SGavin Maltby (void) pthread_cond_wait(&xssp->cv_s2m, &xssp->lock);
74149b225e1SGavin Maltby
74249b225e1SGavin Maltby out:
74349b225e1SGavin Maltby (void) pthread_mutex_unlock(&xssp->lock);
74449b225e1SGavin Maltby (void) pthread_mutex_destroy(&xssp->lock);
74549b225e1SGavin Maltby (void) pthread_cond_destroy(&xssp->cv_m2s);
74649b225e1SGavin Maltby (void) pthread_cond_destroy(&xssp->cv_s2m);
74749b225e1SGavin Maltby
74849b225e1SGavin Maltby if (rv == 0)
74949b225e1SGavin Maltby errno = failerrno;
75049b225e1SGavin Maltby
75149b225e1SGavin Maltby return (rv);
75249b225e1SGavin Maltby }
75349b225e1SGavin Maltby
75449b225e1SGavin Maltby /*
75549b225e1SGavin Maltby * Call the server creation function to give it the opportunity to
75649b225e1SGavin Maltby * create more threads. Called during a door invocation when we
75749b225e1SGavin Maltby * return from door_return(NULL,0, NULL, 0) and notice that we're
75849b225e1SGavin Maltby * running on the last available thread.
75949b225e1SGavin Maltby */
76049b225e1SGavin Maltby void
door_depletion_cb(door_info_t * dip)76149b225e1SGavin Maltby door_depletion_cb(door_info_t *dip)
76249b225e1SGavin Maltby {
76349b225e1SGavin Maltby if (dip == NULL) {
76449b225e1SGavin Maltby /*
76549b225e1SGavin Maltby * Non-private doors always use door_server_func.
76649b225e1SGavin Maltby */
76749b225e1SGavin Maltby (*door_server_func)(NULL);
76849b225e1SGavin Maltby return;
76949b225e1SGavin Maltby }
77049b225e1SGavin Maltby
77149b225e1SGavin Maltby if (dip->di_attributes & DOOR_NO_DEPLETION_CB) {
77249b225e1SGavin Maltby /*
77349b225e1SGavin Maltby * Private, door_xcreate'd door specified no callbacks.
77449b225e1SGavin Maltby */
77549b225e1SGavin Maltby return;
77649b225e1SGavin Maltby } else if (!(dip->di_attributes & DOOR_PRIVCREATE)) {
77749b225e1SGavin Maltby /*
77849b225e1SGavin Maltby * Private door with standard/legacy creation semantics.
77949b225e1SGavin Maltby */
78049b225e1SGavin Maltby dip->di_attributes |= DOOR_DEPLETION_CB;
78149b225e1SGavin Maltby (*door_server_func)(dip);
78249b225e1SGavin Maltby return;
78349b225e1SGavin Maltby } else {
78449b225e1SGavin Maltby /*
78549b225e1SGavin Maltby * Private, door_xcreate'd door.
78649b225e1SGavin Maltby */
78749b225e1SGavin Maltby dip->di_attributes |= DOOR_DEPLETION_CB;
78849b225e1SGavin Maltby (void) door_xcreate_n(dip, NULL, 1);
78949b225e1SGavin Maltby }
79049b225e1SGavin Maltby }
79149b225e1SGavin Maltby
79249b225e1SGavin Maltby /*
79349b225e1SGavin Maltby * Install a new server creation function. The appointed function
79449b225e1SGavin Maltby * will receieve depletion callbacks for non-private doors and private
79549b225e1SGavin Maltby * doors created with door_create(..., DOOR_PRIVATE).
7967c478bd9Sstevel@tonic-gate */
7977c478bd9Sstevel@tonic-gate door_server_func_t *
door_server_create(door_server_func_t * create_func)7987c478bd9Sstevel@tonic-gate door_server_create(door_server_func_t *create_func)
7997c478bd9Sstevel@tonic-gate {
8007c478bd9Sstevel@tonic-gate door_server_func_t *prev;
8017c478bd9Sstevel@tonic-gate
8027c478bd9Sstevel@tonic-gate lmutex_lock(&door_state_lock);
8037c478bd9Sstevel@tonic-gate prev = door_server_func;
8047c478bd9Sstevel@tonic-gate door_server_func = create_func;
8057c478bd9Sstevel@tonic-gate lmutex_unlock(&door_state_lock);
8067c478bd9Sstevel@tonic-gate
8077c478bd9Sstevel@tonic-gate return (prev);
8087c478bd9Sstevel@tonic-gate }
8097c478bd9Sstevel@tonic-gate
8107c478bd9Sstevel@tonic-gate /*
81149b225e1SGavin Maltby * Thread start function for door_create_server() below.
812bbf21555SRichard Lowe * Create door server threads with cancellation(7) disabled.
8137c478bd9Sstevel@tonic-gate */
8147c478bd9Sstevel@tonic-gate static void *
door_create_func(void * arg)8157c478bd9Sstevel@tonic-gate door_create_func(void *arg)
8167c478bd9Sstevel@tonic-gate {
8177c478bd9Sstevel@tonic-gate (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
8187c478bd9Sstevel@tonic-gate (void) door_return(NULL, 0, NULL, 0);
8197c478bd9Sstevel@tonic-gate
8207c478bd9Sstevel@tonic-gate return (arg);
8217c478bd9Sstevel@tonic-gate }
8227c478bd9Sstevel@tonic-gate
8237c478bd9Sstevel@tonic-gate /*
82449b225e1SGavin Maltby * The default door_server_func_t.
8257c478bd9Sstevel@tonic-gate */
8267c478bd9Sstevel@tonic-gate static void
door_create_server(door_info_t * dip __unused)827*4a38094cSToomas Soome door_create_server(door_info_t *dip __unused)
8287c478bd9Sstevel@tonic-gate {
8297c478bd9Sstevel@tonic-gate (void) thr_create(NULL, 0, door_create_func, NULL, THR_DETACHED, NULL);
8307c478bd9Sstevel@tonic-gate yield(); /* Gives server thread a chance to run */
8317c478bd9Sstevel@tonic-gate }
832