xref: /illumos-gate/usr/src/lib/libnsl/rpc/svc.c (revision 7c478bd9)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate /*
27*7c478bd9Sstevel@tonic-gate  * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
28*7c478bd9Sstevel@tonic-gate  */
29*7c478bd9Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30*7c478bd9Sstevel@tonic-gate /* All Rights Reserved */
31*7c478bd9Sstevel@tonic-gate /*
32*7c478bd9Sstevel@tonic-gate  * Portions of this source code were derived from Berkeley
33*7c478bd9Sstevel@tonic-gate  * 4.3 BSD under license from the Regents of the University of
34*7c478bd9Sstevel@tonic-gate  * California.
35*7c478bd9Sstevel@tonic-gate  */
36*7c478bd9Sstevel@tonic-gate 
37*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
38*7c478bd9Sstevel@tonic-gate 
39*7c478bd9Sstevel@tonic-gate /*
40*7c478bd9Sstevel@tonic-gate  * svc.c, Server-side remote procedure call interface.
41*7c478bd9Sstevel@tonic-gate  *
42*7c478bd9Sstevel@tonic-gate  * There are two sets of procedures here.  The xprt routines are
43*7c478bd9Sstevel@tonic-gate  * for handling transport handles.  The svc routines handle the
44*7c478bd9Sstevel@tonic-gate  * list of service routines.
45*7c478bd9Sstevel@tonic-gate  *
46*7c478bd9Sstevel@tonic-gate  */
47*7c478bd9Sstevel@tonic-gate 
48*7c478bd9Sstevel@tonic-gate 
49*7c478bd9Sstevel@tonic-gate #include "mt.h"
50*7c478bd9Sstevel@tonic-gate #include "rpc_mt.h"
51*7c478bd9Sstevel@tonic-gate #include <assert.h>
52*7c478bd9Sstevel@tonic-gate #include <errno.h>
53*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
54*7c478bd9Sstevel@tonic-gate #include <stropts.h>
55*7c478bd9Sstevel@tonic-gate #include <sys/conf.h>
56*7c478bd9Sstevel@tonic-gate #include <rpc/trace.h>
57*7c478bd9Sstevel@tonic-gate #include <rpc/rpc.h>
58*7c478bd9Sstevel@tonic-gate #ifdef PORTMAP
59*7c478bd9Sstevel@tonic-gate #include <rpc/pmap_clnt.h>
60*7c478bd9Sstevel@tonic-gate #endif
61*7c478bd9Sstevel@tonic-gate #include <sys/poll.h>
62*7c478bd9Sstevel@tonic-gate #include <netconfig.h>
63*7c478bd9Sstevel@tonic-gate #include <syslog.h>
64*7c478bd9Sstevel@tonic-gate #include <stdlib.h>
65*7c478bd9Sstevel@tonic-gate #include <unistd.h>
66*7c478bd9Sstevel@tonic-gate #include <string.h>
67*7c478bd9Sstevel@tonic-gate #include <limits.h>
68*7c478bd9Sstevel@tonic-gate 
69*7c478bd9Sstevel@tonic-gate extern bool_t __svc_get_door_cred();
70*7c478bd9Sstevel@tonic-gate extern bool_t __rpc_get_local_cred();
71*7c478bd9Sstevel@tonic-gate 
72*7c478bd9Sstevel@tonic-gate SVCXPRT **svc_xports;
73*7c478bd9Sstevel@tonic-gate static int nsvc_xports; 	/* total number of svc_xports allocated */
74*7c478bd9Sstevel@tonic-gate 
75*7c478bd9Sstevel@tonic-gate XDR **svc_xdrs;		/* common XDR receive area */
76*7c478bd9Sstevel@tonic-gate int nsvc_xdrs;		/* total number of svc_xdrs allocated */
77*7c478bd9Sstevel@tonic-gate 
78*7c478bd9Sstevel@tonic-gate int __rpc_use_pollfd_done;	/* to unlimit the number of connections */
79*7c478bd9Sstevel@tonic-gate 
80*7c478bd9Sstevel@tonic-gate #define	NULL_SVC ((struct svc_callout *)0)
81*7c478bd9Sstevel@tonic-gate #define	RQCRED_SIZE	400		/* this size is excessive */
82*7c478bd9Sstevel@tonic-gate 
83*7c478bd9Sstevel@tonic-gate /*
84*7c478bd9Sstevel@tonic-gate  * The services list
85*7c478bd9Sstevel@tonic-gate  * Each entry represents a set of procedures (an rpc program).
86*7c478bd9Sstevel@tonic-gate  * The dispatch routine takes request structs and runs the
87*7c478bd9Sstevel@tonic-gate  * appropriate procedure.
88*7c478bd9Sstevel@tonic-gate  */
89*7c478bd9Sstevel@tonic-gate static struct svc_callout {
90*7c478bd9Sstevel@tonic-gate 	struct svc_callout *sc_next;
91*7c478bd9Sstevel@tonic-gate 	rpcprog_t	    sc_prog;
92*7c478bd9Sstevel@tonic-gate 	rpcvers_t	    sc_vers;
93*7c478bd9Sstevel@tonic-gate 	char		   *sc_netid;
94*7c478bd9Sstevel@tonic-gate 	void		    (*sc_dispatch)();
95*7c478bd9Sstevel@tonic-gate } *svc_head;
96*7c478bd9Sstevel@tonic-gate extern rwlock_t	svc_lock;
97*7c478bd9Sstevel@tonic-gate 
98*7c478bd9Sstevel@tonic-gate static struct svc_callout *svc_find();
99*7c478bd9Sstevel@tonic-gate int _svc_prog_dispatch();
100*7c478bd9Sstevel@tonic-gate void svc_getreq_common();
101*7c478bd9Sstevel@tonic-gate char *strdup();
102*7c478bd9Sstevel@tonic-gate 
103*7c478bd9Sstevel@tonic-gate extern mutex_t	svc_door_mutex;
104*7c478bd9Sstevel@tonic-gate extern cond_t	svc_door_waitcv;
105*7c478bd9Sstevel@tonic-gate extern int	svc_ndoorfds;
106*7c478bd9Sstevel@tonic-gate extern SVCXPRT_LIST *_svc_xprtlist;
107*7c478bd9Sstevel@tonic-gate extern mutex_t xprtlist_lock;
108*7c478bd9Sstevel@tonic-gate extern void __svc_rm_from_xlist();
109*7c478bd9Sstevel@tonic-gate 
110*7c478bd9Sstevel@tonic-gate extern fd_set _new_svc_fdset;
111*7c478bd9Sstevel@tonic-gate 
112*7c478bd9Sstevel@tonic-gate /*
113*7c478bd9Sstevel@tonic-gate  * If the allocated array of reactor is too small, this value is used as a
114*7c478bd9Sstevel@tonic-gate  * margin. This reduces the number of allocations.
115*7c478bd9Sstevel@tonic-gate  */
116*7c478bd9Sstevel@tonic-gate #define	USER_FD_INCREMENT 5
117*7c478bd9Sstevel@tonic-gate 
118*7c478bd9Sstevel@tonic-gate static void add_pollfd(int fd, short events);
119*7c478bd9Sstevel@tonic-gate static void remove_pollfd(int fd);
120*7c478bd9Sstevel@tonic-gate static void __svc_remove_input_of_fd(int fd);
121*7c478bd9Sstevel@tonic-gate 
122*7c478bd9Sstevel@tonic-gate 
123*7c478bd9Sstevel@tonic-gate /*
124*7c478bd9Sstevel@tonic-gate  * Data used to handle reactor:
125*7c478bd9Sstevel@tonic-gate  * 	- one file descriptor we listen to,
126*7c478bd9Sstevel@tonic-gate  *	- one callback we call if the fd pops,
127*7c478bd9Sstevel@tonic-gate  *	- and a cookie passed as a parameter to the callback.
128*7c478bd9Sstevel@tonic-gate  *
129*7c478bd9Sstevel@tonic-gate  * The structure is an array indexed on the file descriptor. Each entry is
130*7c478bd9Sstevel@tonic-gate  * pointing to the first element of a double-linked list of callback.
131*7c478bd9Sstevel@tonic-gate  * only one callback may be associated to a couple (fd, event).
132*7c478bd9Sstevel@tonic-gate  */
133*7c478bd9Sstevel@tonic-gate 
134*7c478bd9Sstevel@tonic-gate struct _svc_user_fd_head;
135*7c478bd9Sstevel@tonic-gate 
136*7c478bd9Sstevel@tonic-gate typedef struct
137*7c478bd9Sstevel@tonic-gate {
138*7c478bd9Sstevel@tonic-gate 	struct _svc_user_fd_node *next;
139*7c478bd9Sstevel@tonic-gate 	struct _svc_user_fd_node *previous;
140*7c478bd9Sstevel@tonic-gate } _svc_user_link;
141*7c478bd9Sstevel@tonic-gate 
142*7c478bd9Sstevel@tonic-gate typedef struct _svc_user_fd_node
143*7c478bd9Sstevel@tonic-gate {
144*7c478bd9Sstevel@tonic-gate 	/* The lnk field must be the first field. */
145*7c478bd9Sstevel@tonic-gate 	_svc_user_link lnk;
146*7c478bd9Sstevel@tonic-gate 	svc_input_id_t id;
147*7c478bd9Sstevel@tonic-gate 	int	    fd;
148*7c478bd9Sstevel@tonic-gate 	unsigned int   events;
149*7c478bd9Sstevel@tonic-gate 	svc_callback_t callback;
150*7c478bd9Sstevel@tonic-gate 	void*	  cookie;
151*7c478bd9Sstevel@tonic-gate } _svc_user_fd_node;
152*7c478bd9Sstevel@tonic-gate 
153*7c478bd9Sstevel@tonic-gate typedef struct _svc_user_fd_head
154*7c478bd9Sstevel@tonic-gate {
155*7c478bd9Sstevel@tonic-gate 	/* The lnk field must be the first field. */
156*7c478bd9Sstevel@tonic-gate 	_svc_user_link lnk;
157*7c478bd9Sstevel@tonic-gate 	unsigned int mask;    /* logical OR of all sub-masks */
158*7c478bd9Sstevel@tonic-gate } _svc_user_fd_head;
159*7c478bd9Sstevel@tonic-gate 
160*7c478bd9Sstevel@tonic-gate 
161*7c478bd9Sstevel@tonic-gate /* Define some macros to manage the linked list. */
162*7c478bd9Sstevel@tonic-gate #define	LIST_ISEMPTY(l) ((_svc_user_fd_node *) &(l.lnk) == l.lnk.next)
163*7c478bd9Sstevel@tonic-gate #define	LIST_CLR(l) \
164*7c478bd9Sstevel@tonic-gate 	(l.lnk.previous = l.lnk.next = (_svc_user_fd_node *) &(l.lnk))
165*7c478bd9Sstevel@tonic-gate 
166*7c478bd9Sstevel@tonic-gate /* Array of defined reactor - indexed on file descriptor */
167*7c478bd9Sstevel@tonic-gate static _svc_user_fd_head *svc_userfds  = NULL;
168*7c478bd9Sstevel@tonic-gate 
169*7c478bd9Sstevel@tonic-gate /* current size of file descriptor */
170*7c478bd9Sstevel@tonic-gate static int svc_nuserfds = 0;
171*7c478bd9Sstevel@tonic-gate 
172*7c478bd9Sstevel@tonic-gate /* Mutex to ensure MT safe operations for user fds callbacks. */
173*7c478bd9Sstevel@tonic-gate static mutex_t svc_userfds_lock = DEFAULTMUTEX;
174*7c478bd9Sstevel@tonic-gate 
175*7c478bd9Sstevel@tonic-gate 
176*7c478bd9Sstevel@tonic-gate /*
177*7c478bd9Sstevel@tonic-gate  * This structure is used to have constant time alogrithms. There is an array
178*7c478bd9Sstevel@tonic-gate  * of this structure as large as svc_nuserfds. When the user is registering a
179*7c478bd9Sstevel@tonic-gate  * new callback, the address of the created structure is stored in a cell of
180*7c478bd9Sstevel@tonic-gate  * this array. The address of this cell is the returned unique identifier.
181*7c478bd9Sstevel@tonic-gate  *
182*7c478bd9Sstevel@tonic-gate  * On removing, the id is given by the user, then we know if this cell is
183*7c478bd9Sstevel@tonic-gate  * filled or not (with free). If it is free, we return an error. Otherwise,
184*7c478bd9Sstevel@tonic-gate  * we can free the structure pointed by fd_node.
185*7c478bd9Sstevel@tonic-gate  *
186*7c478bd9Sstevel@tonic-gate  * On insertion, we use the linked list created by (first_free,
187*7c478bd9Sstevel@tonic-gate  * next_free). In this way with a constant time computation, we can give a
188*7c478bd9Sstevel@tonic-gate  * correct index to the user.
189*7c478bd9Sstevel@tonic-gate  */
190*7c478bd9Sstevel@tonic-gate 
191*7c478bd9Sstevel@tonic-gate typedef struct _svc_management_user_fd
192*7c478bd9Sstevel@tonic-gate {
193*7c478bd9Sstevel@tonic-gate 	bool_t free;
194*7c478bd9Sstevel@tonic-gate 	union {
195*7c478bd9Sstevel@tonic-gate 		svc_input_id_t next_free;
196*7c478bd9Sstevel@tonic-gate 		_svc_user_fd_node *fd_node;
197*7c478bd9Sstevel@tonic-gate 	} data;
198*7c478bd9Sstevel@tonic-gate } _svc_management_user_fd;
199*7c478bd9Sstevel@tonic-gate 
200*7c478bd9Sstevel@tonic-gate /* index to the first free elem */
201*7c478bd9Sstevel@tonic-gate static svc_input_id_t first_free = (svc_input_id_t)-1;
202*7c478bd9Sstevel@tonic-gate /* the size of this array is the same as svc_nuserfds */
203*7c478bd9Sstevel@tonic-gate static _svc_management_user_fd* user_fd_mgt_array = NULL;
204*7c478bd9Sstevel@tonic-gate 
205*7c478bd9Sstevel@tonic-gate /* current size of user_fd_mgt_array */
206*7c478bd9Sstevel@tonic-gate static int svc_nmgtuserfds = 0;
207*7c478bd9Sstevel@tonic-gate 
208*7c478bd9Sstevel@tonic-gate 
209*7c478bd9Sstevel@tonic-gate /* Define some macros to access data associated to registration ids. */
210*7c478bd9Sstevel@tonic-gate #define	node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
211*7c478bd9Sstevel@tonic-gate #define	is_free_id(id) (user_fd_mgt_array[(int)id].free)
212*7c478bd9Sstevel@tonic-gate 
213*7c478bd9Sstevel@tonic-gate #ifndef POLLSTANDARD
214*7c478bd9Sstevel@tonic-gate #define	POLLSTANDARD \
215*7c478bd9Sstevel@tonic-gate 	(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
216*7c478bd9Sstevel@tonic-gate 	POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
217*7c478bd9Sstevel@tonic-gate #endif
218*7c478bd9Sstevel@tonic-gate 
219*7c478bd9Sstevel@tonic-gate /*
220*7c478bd9Sstevel@tonic-gate  * To free an Id, we set the cell as free and insert its address in the list
221*7c478bd9Sstevel@tonic-gate  * of free cell.
222*7c478bd9Sstevel@tonic-gate  */
223*7c478bd9Sstevel@tonic-gate 
224*7c478bd9Sstevel@tonic-gate static void
225*7c478bd9Sstevel@tonic-gate _svc_free_id(const svc_input_id_t id)
226*7c478bd9Sstevel@tonic-gate {
227*7c478bd9Sstevel@tonic-gate 	assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
228*7c478bd9Sstevel@tonic-gate 	user_fd_mgt_array[(int)id].free = TRUE;
229*7c478bd9Sstevel@tonic-gate 	user_fd_mgt_array[(int)id].data.next_free = first_free;
230*7c478bd9Sstevel@tonic-gate 	first_free = id;
231*7c478bd9Sstevel@tonic-gate }
232*7c478bd9Sstevel@tonic-gate 
233*7c478bd9Sstevel@tonic-gate /*
234*7c478bd9Sstevel@tonic-gate  * To get a free cell, we just have to take it from the free linked list and
235*7c478bd9Sstevel@tonic-gate  * set the flag to "not free". This function also allocates new memory if
236*7c478bd9Sstevel@tonic-gate  * necessary
237*7c478bd9Sstevel@tonic-gate  */
238*7c478bd9Sstevel@tonic-gate static svc_input_id_t
239*7c478bd9Sstevel@tonic-gate _svc_attribute_new_id(_svc_user_fd_node *node)
240*7c478bd9Sstevel@tonic-gate {
241*7c478bd9Sstevel@tonic-gate 	int selected_index = (int)first_free;
242*7c478bd9Sstevel@tonic-gate 	assert(node != NULL);
243*7c478bd9Sstevel@tonic-gate 
244*7c478bd9Sstevel@tonic-gate 	if (selected_index == -1) {
245*7c478bd9Sstevel@tonic-gate 		/* Allocate new entries */
246*7c478bd9Sstevel@tonic-gate 		int L_inOldSize = svc_nmgtuserfds;
247*7c478bd9Sstevel@tonic-gate 		int i;
248*7c478bd9Sstevel@tonic-gate 		_svc_management_user_fd* alloc_array;
249*7c478bd9Sstevel@tonic-gate 
250*7c478bd9Sstevel@tonic-gate 		svc_nmgtuserfds += USER_FD_INCREMENT;
251*7c478bd9Sstevel@tonic-gate 
252*7c478bd9Sstevel@tonic-gate 		user_fd_mgt_array = (_svc_management_user_fd *)
253*7c478bd9Sstevel@tonic-gate 		    realloc(user_fd_mgt_array, svc_nmgtuserfds
254*7c478bd9Sstevel@tonic-gate 			* sizeof (_svc_management_user_fd));
255*7c478bd9Sstevel@tonic-gate 
256*7c478bd9Sstevel@tonic-gate 		if (user_fd_mgt_array == NULL) {
257*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
258*7c478bd9Sstevel@tonic-gate 			errno = ENOMEM;
259*7c478bd9Sstevel@tonic-gate 			return ((svc_input_id_t)-1);
260*7c478bd9Sstevel@tonic-gate 		}
261*7c478bd9Sstevel@tonic-gate 
262*7c478bd9Sstevel@tonic-gate 		for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
263*7c478bd9Sstevel@tonic-gate 			_svc_free_id((svc_input_id_t)i);
264*7c478bd9Sstevel@tonic-gate 		selected_index = (int)first_free;
265*7c478bd9Sstevel@tonic-gate 	}
266*7c478bd9Sstevel@tonic-gate 
267*7c478bd9Sstevel@tonic-gate 	node->id = (svc_input_id_t)selected_index;
268*7c478bd9Sstevel@tonic-gate 	first_free = user_fd_mgt_array[selected_index].data.next_free;
269*7c478bd9Sstevel@tonic-gate 
270*7c478bd9Sstevel@tonic-gate 	user_fd_mgt_array[selected_index].data.fd_node = node;
271*7c478bd9Sstevel@tonic-gate 	user_fd_mgt_array[selected_index].free = FALSE;
272*7c478bd9Sstevel@tonic-gate 
273*7c478bd9Sstevel@tonic-gate 	return ((svc_input_id_t)selected_index);
274*7c478bd9Sstevel@tonic-gate }
275*7c478bd9Sstevel@tonic-gate 
276*7c478bd9Sstevel@tonic-gate /*
277*7c478bd9Sstevel@tonic-gate  * Access to a pollfd treatment. Scan all the associated callbacks that have
278*7c478bd9Sstevel@tonic-gate  * at least one bit in their mask that masks a received event.
279*7c478bd9Sstevel@tonic-gate  *
280*7c478bd9Sstevel@tonic-gate  * If event POLLNVAL is received, we check that one callback processes it, if
281*7c478bd9Sstevel@tonic-gate  * not, then remove the file descriptor from the poll. If there is one, let
282*7c478bd9Sstevel@tonic-gate  * the user do the work.
283*7c478bd9Sstevel@tonic-gate  */
284*7c478bd9Sstevel@tonic-gate void
285*7c478bd9Sstevel@tonic-gate __svc_getreq_user(struct pollfd *pfd)
286*7c478bd9Sstevel@tonic-gate {
287*7c478bd9Sstevel@tonic-gate 	int fd = pfd->fd;
288*7c478bd9Sstevel@tonic-gate 	short revents = pfd->revents;
289*7c478bd9Sstevel@tonic-gate 	bool_t invalHandled = FALSE;
290*7c478bd9Sstevel@tonic-gate 	_svc_user_fd_node *node;
291*7c478bd9Sstevel@tonic-gate 
292*7c478bd9Sstevel@tonic-gate 	mutex_lock(&svc_userfds_lock);
293*7c478bd9Sstevel@tonic-gate 
294*7c478bd9Sstevel@tonic-gate 	if ((fd < 0) || (fd >= svc_nuserfds)) {
295*7c478bd9Sstevel@tonic-gate 		mutex_unlock(&svc_userfds_lock);
296*7c478bd9Sstevel@tonic-gate 		return;
297*7c478bd9Sstevel@tonic-gate 	}
298*7c478bd9Sstevel@tonic-gate 
299*7c478bd9Sstevel@tonic-gate 	node = svc_userfds[fd].lnk.next;
300*7c478bd9Sstevel@tonic-gate 
301*7c478bd9Sstevel@tonic-gate 	/* check if at least one mask fits */
302*7c478bd9Sstevel@tonic-gate 	if (0 == (revents & svc_userfds[fd].mask)) {
303*7c478bd9Sstevel@tonic-gate 		mutex_unlock(&svc_userfds_lock);
304*7c478bd9Sstevel@tonic-gate 		return;
305*7c478bd9Sstevel@tonic-gate 	}
306*7c478bd9Sstevel@tonic-gate 
307*7c478bd9Sstevel@tonic-gate 	while ((svc_userfds[fd].mask != 0) &&
308*7c478bd9Sstevel@tonic-gate 	    ((_svc_user_link *)node != &(svc_userfds[fd].lnk))) {
309*7c478bd9Sstevel@tonic-gate 		/*
310*7c478bd9Sstevel@tonic-gate 		 * If one of the received events maps the ones the node listens
311*7c478bd9Sstevel@tonic-gate 		 * to
312*7c478bd9Sstevel@tonic-gate 		 */
313*7c478bd9Sstevel@tonic-gate 		_svc_user_fd_node *next = node->lnk.next;
314*7c478bd9Sstevel@tonic-gate 
315*7c478bd9Sstevel@tonic-gate 		if (node->callback != NULL) {
316*7c478bd9Sstevel@tonic-gate 			if (node->events & revents) {
317*7c478bd9Sstevel@tonic-gate 				if (revents & POLLNVAL) {
318*7c478bd9Sstevel@tonic-gate 					invalHandled = TRUE;
319*7c478bd9Sstevel@tonic-gate 				}
320*7c478bd9Sstevel@tonic-gate 
321*7c478bd9Sstevel@tonic-gate 				/*
322*7c478bd9Sstevel@tonic-gate 				 * The lock must be released before calling the
323*7c478bd9Sstevel@tonic-gate 				 * user function, as this function can call
324*7c478bd9Sstevel@tonic-gate 				 * svc_remove_input() for example.
325*7c478bd9Sstevel@tonic-gate 				 */
326*7c478bd9Sstevel@tonic-gate 				mutex_unlock(&svc_userfds_lock);
327*7c478bd9Sstevel@tonic-gate 				node->callback(node->id, node->fd,
328*7c478bd9Sstevel@tonic-gate 				    node->events & revents, node->cookie);
329*7c478bd9Sstevel@tonic-gate 				/*
330*7c478bd9Sstevel@tonic-gate 				 * Do not use the node structure anymore, as it
331*7c478bd9Sstevel@tonic-gate 				 * could have been deallocated by the previous
332*7c478bd9Sstevel@tonic-gate 				 * callback.
333*7c478bd9Sstevel@tonic-gate 				 */
334*7c478bd9Sstevel@tonic-gate 				mutex_lock(&svc_userfds_lock);
335*7c478bd9Sstevel@tonic-gate 			}
336*7c478bd9Sstevel@tonic-gate 		}
337*7c478bd9Sstevel@tonic-gate 		node = next;
338*7c478bd9Sstevel@tonic-gate 	}
339*7c478bd9Sstevel@tonic-gate 
340*7c478bd9Sstevel@tonic-gate 	if ((revents & POLLNVAL) && !invalHandled)
341*7c478bd9Sstevel@tonic-gate 		__svc_remove_input_of_fd(fd);
342*7c478bd9Sstevel@tonic-gate 	mutex_unlock(&svc_userfds_lock);
343*7c478bd9Sstevel@tonic-gate }
344*7c478bd9Sstevel@tonic-gate 
345*7c478bd9Sstevel@tonic-gate 
346*7c478bd9Sstevel@tonic-gate /*
347*7c478bd9Sstevel@tonic-gate  * Check if a file descriptor is associated with a user reactor.
348*7c478bd9Sstevel@tonic-gate  * To do this, just check that the array indexed on fd has a non-void linked
349*7c478bd9Sstevel@tonic-gate  * list (ie. first element is not NULL)
350*7c478bd9Sstevel@tonic-gate  */
351*7c478bd9Sstevel@tonic-gate bool_t
352*7c478bd9Sstevel@tonic-gate __is_a_userfd(int fd)
353*7c478bd9Sstevel@tonic-gate {
354*7c478bd9Sstevel@tonic-gate 	/* Checks argument */
355*7c478bd9Sstevel@tonic-gate 	if ((fd < 0) || (fd >= svc_nuserfds))
356*7c478bd9Sstevel@tonic-gate 		return (FALSE);
357*7c478bd9Sstevel@tonic-gate 	return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
358*7c478bd9Sstevel@tonic-gate }
359*7c478bd9Sstevel@tonic-gate 
360*7c478bd9Sstevel@tonic-gate /* free everything concerning user fd */
361*7c478bd9Sstevel@tonic-gate /* used in svc_run.c => no static */
362*7c478bd9Sstevel@tonic-gate 
363*7c478bd9Sstevel@tonic-gate void
364*7c478bd9Sstevel@tonic-gate __destroy_userfd()
365*7c478bd9Sstevel@tonic-gate {
366*7c478bd9Sstevel@tonic-gate 	int one_fd;
367*7c478bd9Sstevel@tonic-gate 	/* Clean user fd */
368*7c478bd9Sstevel@tonic-gate 	if (svc_userfds != NULL) {
369*7c478bd9Sstevel@tonic-gate 		for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
370*7c478bd9Sstevel@tonic-gate 			_svc_user_fd_node *node;
371*7c478bd9Sstevel@tonic-gate 
372*7c478bd9Sstevel@tonic-gate 			node = svc_userfds[one_fd].lnk.next;
373*7c478bd9Sstevel@tonic-gate 			while ((_svc_user_link *) node
374*7c478bd9Sstevel@tonic-gate 			    != (_svc_user_link *) &(svc_userfds[one_fd])) {
375*7c478bd9Sstevel@tonic-gate 				_svc_free_id(node->id);
376*7c478bd9Sstevel@tonic-gate 				node = node->lnk.next;
377*7c478bd9Sstevel@tonic-gate 				free(node->lnk.previous);
378*7c478bd9Sstevel@tonic-gate 			}
379*7c478bd9Sstevel@tonic-gate 		}
380*7c478bd9Sstevel@tonic-gate 
381*7c478bd9Sstevel@tonic-gate 		free(user_fd_mgt_array);
382*7c478bd9Sstevel@tonic-gate 		user_fd_mgt_array = NULL;
383*7c478bd9Sstevel@tonic-gate 		first_free = (svc_input_id_t)-1;
384*7c478bd9Sstevel@tonic-gate 
385*7c478bd9Sstevel@tonic-gate 		free(svc_userfds);
386*7c478bd9Sstevel@tonic-gate 		svc_userfds = NULL;
387*7c478bd9Sstevel@tonic-gate 		svc_nuserfds = 0;
388*7c478bd9Sstevel@tonic-gate 	}
389*7c478bd9Sstevel@tonic-gate }
390*7c478bd9Sstevel@tonic-gate 
391*7c478bd9Sstevel@tonic-gate /*
392*7c478bd9Sstevel@tonic-gate  * Remove all the callback associated with a fd => useful when the fd is
393*7c478bd9Sstevel@tonic-gate  * closed for instance
394*7c478bd9Sstevel@tonic-gate  */
395*7c478bd9Sstevel@tonic-gate static void
396*7c478bd9Sstevel@tonic-gate __svc_remove_input_of_fd(int fd)
397*7c478bd9Sstevel@tonic-gate {
398*7c478bd9Sstevel@tonic-gate 	_svc_user_fd_node *one_node;
399*7c478bd9Sstevel@tonic-gate 
400*7c478bd9Sstevel@tonic-gate 	if ((fd < 0) || (fd >= svc_nuserfds))
401*7c478bd9Sstevel@tonic-gate 		return;
402*7c478bd9Sstevel@tonic-gate 
403*7c478bd9Sstevel@tonic-gate 	one_node = svc_userfds[fd].lnk.next;
404*7c478bd9Sstevel@tonic-gate 	while ((_svc_user_link *) one_node
405*7c478bd9Sstevel@tonic-gate 	    != (_svc_user_link *) &(svc_userfds[fd].lnk)) {
406*7c478bd9Sstevel@tonic-gate 		_svc_free_id(one_node->id);
407*7c478bd9Sstevel@tonic-gate 		one_node = one_node->lnk.next;
408*7c478bd9Sstevel@tonic-gate 		free(one_node->lnk.previous);
409*7c478bd9Sstevel@tonic-gate 	}
410*7c478bd9Sstevel@tonic-gate 
411*7c478bd9Sstevel@tonic-gate 	LIST_CLR(svc_userfds[fd]);
412*7c478bd9Sstevel@tonic-gate 	svc_userfds[fd].mask = 0;
413*7c478bd9Sstevel@tonic-gate }
414*7c478bd9Sstevel@tonic-gate 
415*7c478bd9Sstevel@tonic-gate /*
416*7c478bd9Sstevel@tonic-gate  * Allow user to add an fd in the poll list. If it does not succeed, return
417*7c478bd9Sstevel@tonic-gate  * -1. Otherwise, return a svc_id
418*7c478bd9Sstevel@tonic-gate  */
419*7c478bd9Sstevel@tonic-gate 
420*7c478bd9Sstevel@tonic-gate svc_input_id_t
421*7c478bd9Sstevel@tonic-gate svc_add_input(int user_fd, unsigned int events,
422*7c478bd9Sstevel@tonic-gate     svc_callback_t user_callback, void *cookie)
423*7c478bd9Sstevel@tonic-gate {
424*7c478bd9Sstevel@tonic-gate 	_svc_user_fd_node *new_node;
425*7c478bd9Sstevel@tonic-gate 
426*7c478bd9Sstevel@tonic-gate 	if (user_fd < 0) {
427*7c478bd9Sstevel@tonic-gate 		errno = EINVAL;
428*7c478bd9Sstevel@tonic-gate 		return ((svc_input_id_t)-1);
429*7c478bd9Sstevel@tonic-gate 	}
430*7c478bd9Sstevel@tonic-gate 
431*7c478bd9Sstevel@tonic-gate 	if ((events == 0x0000) ||
432*7c478bd9Sstevel@tonic-gate 	    (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
433*7c478bd9Sstevel@tonic-gate 	    POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
434*7c478bd9Sstevel@tonic-gate 		errno = EINVAL;
435*7c478bd9Sstevel@tonic-gate 		return ((svc_input_id_t)-1);
436*7c478bd9Sstevel@tonic-gate 	}
437*7c478bd9Sstevel@tonic-gate 
438*7c478bd9Sstevel@tonic-gate 	mutex_lock(&svc_userfds_lock);
439*7c478bd9Sstevel@tonic-gate 
440*7c478bd9Sstevel@tonic-gate 	if ((user_fd < svc_nuserfds) &&
441*7c478bd9Sstevel@tonic-gate 	    (svc_userfds[user_fd].mask & events) != 0) {
442*7c478bd9Sstevel@tonic-gate 		/* Already registrated call-back */
443*7c478bd9Sstevel@tonic-gate 		errno = EEXIST;
444*7c478bd9Sstevel@tonic-gate 		mutex_unlock(&svc_userfds_lock);
445*7c478bd9Sstevel@tonic-gate 		return ((svc_input_id_t)-1);
446*7c478bd9Sstevel@tonic-gate 	}
447*7c478bd9Sstevel@tonic-gate 
448*7c478bd9Sstevel@tonic-gate 	/* Handle memory allocation. */
449*7c478bd9Sstevel@tonic-gate 	if (user_fd >= svc_nuserfds) {
450*7c478bd9Sstevel@tonic-gate 		int oldSize = svc_nuserfds;
451*7c478bd9Sstevel@tonic-gate 		int i;
452*7c478bd9Sstevel@tonic-gate 
453*7c478bd9Sstevel@tonic-gate 		svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
454*7c478bd9Sstevel@tonic-gate 
455*7c478bd9Sstevel@tonic-gate 		svc_userfds = (_svc_user_fd_head *)
456*7c478bd9Sstevel@tonic-gate 		    realloc(svc_userfds,
457*7c478bd9Sstevel@tonic-gate 			svc_nuserfds * sizeof (_svc_user_fd_head));
458*7c478bd9Sstevel@tonic-gate 
459*7c478bd9Sstevel@tonic-gate 		if (svc_userfds == NULL) {
460*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "svc_add_input: out of memory");
461*7c478bd9Sstevel@tonic-gate 			errno = ENOMEM;
462*7c478bd9Sstevel@tonic-gate 			mutex_unlock(&svc_userfds_lock);
463*7c478bd9Sstevel@tonic-gate 			return ((svc_input_id_t)-1);
464*7c478bd9Sstevel@tonic-gate 		}
465*7c478bd9Sstevel@tonic-gate 
466*7c478bd9Sstevel@tonic-gate 		for (i = oldSize; i < svc_nuserfds; i++) {
467*7c478bd9Sstevel@tonic-gate 			LIST_CLR(svc_userfds[i]);
468*7c478bd9Sstevel@tonic-gate 			svc_userfds[i].mask = 0;
469*7c478bd9Sstevel@tonic-gate 		}
470*7c478bd9Sstevel@tonic-gate 	}
471*7c478bd9Sstevel@tonic-gate 
472*7c478bd9Sstevel@tonic-gate 	new_node = (_svc_user_fd_node *)malloc(sizeof (_svc_user_fd_node));
473*7c478bd9Sstevel@tonic-gate 	if (new_node == NULL) {
474*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "svc_add_input: out of memory");
475*7c478bd9Sstevel@tonic-gate 		errno = ENOMEM;
476*7c478bd9Sstevel@tonic-gate 		mutex_unlock(&svc_userfds_lock);
477*7c478bd9Sstevel@tonic-gate 		return ((svc_input_id_t)-1);
478*7c478bd9Sstevel@tonic-gate 	}
479*7c478bd9Sstevel@tonic-gate 
480*7c478bd9Sstevel@tonic-gate 	/* create a new node */
481*7c478bd9Sstevel@tonic-gate 	new_node->fd		= user_fd;
482*7c478bd9Sstevel@tonic-gate 	new_node->events	= events;
483*7c478bd9Sstevel@tonic-gate 	new_node->callback	= user_callback;
484*7c478bd9Sstevel@tonic-gate 	new_node->cookie	= cookie;
485*7c478bd9Sstevel@tonic-gate 
486*7c478bd9Sstevel@tonic-gate 	(void) _svc_attribute_new_id(new_node);
487*7c478bd9Sstevel@tonic-gate 
488*7c478bd9Sstevel@tonic-gate 	/* Add the new element at the beginning of the list. */
489*7c478bd9Sstevel@tonic-gate 	if (LIST_ISEMPTY(svc_userfds[user_fd])) {
490*7c478bd9Sstevel@tonic-gate 		svc_userfds[user_fd].lnk.previous = new_node;
491*7c478bd9Sstevel@tonic-gate 	}
492*7c478bd9Sstevel@tonic-gate 	new_node->lnk.next = svc_userfds[user_fd].lnk.next;
493*7c478bd9Sstevel@tonic-gate 	new_node->lnk.previous = (_svc_user_fd_node *)&(svc_userfds[user_fd]);
494*7c478bd9Sstevel@tonic-gate 
495*7c478bd9Sstevel@tonic-gate 	svc_userfds[user_fd].lnk.next = new_node;
496*7c478bd9Sstevel@tonic-gate 
497*7c478bd9Sstevel@tonic-gate 	/* refresh global mask for this file desciptor */
498*7c478bd9Sstevel@tonic-gate 	svc_userfds[user_fd].mask |= events;
499*7c478bd9Sstevel@tonic-gate 
500*7c478bd9Sstevel@tonic-gate 	/* refresh mask for the poll */
501*7c478bd9Sstevel@tonic-gate 	add_pollfd(user_fd, (svc_userfds[user_fd].mask));
502*7c478bd9Sstevel@tonic-gate 
503*7c478bd9Sstevel@tonic-gate 	mutex_unlock(&svc_userfds_lock);
504*7c478bd9Sstevel@tonic-gate 	return (new_node->id);
505*7c478bd9Sstevel@tonic-gate }
506*7c478bd9Sstevel@tonic-gate 
507*7c478bd9Sstevel@tonic-gate 
508*7c478bd9Sstevel@tonic-gate int
509*7c478bd9Sstevel@tonic-gate svc_remove_input(svc_input_id_t id)
510*7c478bd9Sstevel@tonic-gate {
511*7c478bd9Sstevel@tonic-gate 	_svc_user_fd_node* node;
512*7c478bd9Sstevel@tonic-gate 	_svc_user_fd_node* next;
513*7c478bd9Sstevel@tonic-gate 	_svc_user_fd_node* previous;
514*7c478bd9Sstevel@tonic-gate 	int fd;		/* caching optim */
515*7c478bd9Sstevel@tonic-gate 
516*7c478bd9Sstevel@tonic-gate 	mutex_lock(&svc_userfds_lock);
517*7c478bd9Sstevel@tonic-gate 
518*7c478bd9Sstevel@tonic-gate 	/* Immediately update data for id management */
519*7c478bd9Sstevel@tonic-gate 	if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
520*7c478bd9Sstevel@tonic-gate 	    is_free_id(id)) {
521*7c478bd9Sstevel@tonic-gate 		errno = EINVAL;
522*7c478bd9Sstevel@tonic-gate 		mutex_unlock(&svc_userfds_lock);
523*7c478bd9Sstevel@tonic-gate 		return (-1);
524*7c478bd9Sstevel@tonic-gate 	}
525*7c478bd9Sstevel@tonic-gate 
526*7c478bd9Sstevel@tonic-gate 	node = node_from_id(id);
527*7c478bd9Sstevel@tonic-gate 	assert(node != NULL);
528*7c478bd9Sstevel@tonic-gate 
529*7c478bd9Sstevel@tonic-gate 	_svc_free_id(id);
530*7c478bd9Sstevel@tonic-gate 	next		= node->lnk.next;
531*7c478bd9Sstevel@tonic-gate 	previous	= node->lnk.previous;
532*7c478bd9Sstevel@tonic-gate 	fd		= node->fd; /* caching optim */
533*7c478bd9Sstevel@tonic-gate 
534*7c478bd9Sstevel@tonic-gate 	    /* Remove this node from the list. */
535*7c478bd9Sstevel@tonic-gate 	previous->lnk.next = next;
536*7c478bd9Sstevel@tonic-gate 	next->lnk.previous = previous;
537*7c478bd9Sstevel@tonic-gate 
538*7c478bd9Sstevel@tonic-gate 	    /* Remove the node flags from the global mask */
539*7c478bd9Sstevel@tonic-gate 	svc_userfds[fd].mask ^= node->events;
540*7c478bd9Sstevel@tonic-gate 
541*7c478bd9Sstevel@tonic-gate 	free(node);
542*7c478bd9Sstevel@tonic-gate 	if (svc_userfds[fd].mask == 0) {
543*7c478bd9Sstevel@tonic-gate 		LIST_CLR(svc_userfds[fd]);
544*7c478bd9Sstevel@tonic-gate 		assert(LIST_ISEMPTY(svc_userfds[fd]));
545*7c478bd9Sstevel@tonic-gate 		remove_pollfd(fd);
546*7c478bd9Sstevel@tonic-gate 	}
547*7c478bd9Sstevel@tonic-gate 	/* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
548*7c478bd9Sstevel@tonic-gate 
549*7c478bd9Sstevel@tonic-gate 	mutex_unlock(&svc_userfds_lock);
550*7c478bd9Sstevel@tonic-gate 	return (0);
551*7c478bd9Sstevel@tonic-gate }
552*7c478bd9Sstevel@tonic-gate 
553*7c478bd9Sstevel@tonic-gate 
554*7c478bd9Sstevel@tonic-gate /*
555*7c478bd9Sstevel@tonic-gate  * Provides default service-side functions for authentication flavors
556*7c478bd9Sstevel@tonic-gate  * that do not use all the fields in struct svc_auth_ops.
557*7c478bd9Sstevel@tonic-gate  */
558*7c478bd9Sstevel@tonic-gate 
559*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
560*7c478bd9Sstevel@tonic-gate static int
561*7c478bd9Sstevel@tonic-gate authany_wrap(auth, xdrs, xfunc, xwhere)
562*7c478bd9Sstevel@tonic-gate 	AUTH		*auth;
563*7c478bd9Sstevel@tonic-gate 	XDR		*xdrs;
564*7c478bd9Sstevel@tonic-gate 	xdrproc_t	xfunc;
565*7c478bd9Sstevel@tonic-gate 	caddr_t		xwhere;
566*7c478bd9Sstevel@tonic-gate {
567*7c478bd9Sstevel@tonic-gate 	return (*xfunc)(xdrs, xwhere);
568*7c478bd9Sstevel@tonic-gate }
569*7c478bd9Sstevel@tonic-gate 
570*7c478bd9Sstevel@tonic-gate struct svc_auth_ops svc_auth_any_ops = {
571*7c478bd9Sstevel@tonic-gate 	authany_wrap,
572*7c478bd9Sstevel@tonic-gate 	authany_wrap,
573*7c478bd9Sstevel@tonic-gate };
574*7c478bd9Sstevel@tonic-gate 
575*7c478bd9Sstevel@tonic-gate /*
576*7c478bd9Sstevel@tonic-gate  * Return pointer to server authentication structure.
577*7c478bd9Sstevel@tonic-gate  */
578*7c478bd9Sstevel@tonic-gate SVCAUTH *
579*7c478bd9Sstevel@tonic-gate __svc_get_svcauth(xprt)
580*7c478bd9Sstevel@tonic-gate 	SVCXPRT	*xprt;
581*7c478bd9Sstevel@tonic-gate {
582*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
583*7c478bd9Sstevel@tonic-gate 	return (&SVC_XP_AUTH(xprt));
584*7c478bd9Sstevel@tonic-gate }
585*7c478bd9Sstevel@tonic-gate 
586*7c478bd9Sstevel@tonic-gate /*
587*7c478bd9Sstevel@tonic-gate  * A callback routine to cleanup after a procedure is executed.
588*7c478bd9Sstevel@tonic-gate  */
589*7c478bd9Sstevel@tonic-gate void (*__proc_cleanup_cb)() = NULL;
590*7c478bd9Sstevel@tonic-gate 
591*7c478bd9Sstevel@tonic-gate void *
592*7c478bd9Sstevel@tonic-gate __svc_set_proc_cleanup_cb(cb)
593*7c478bd9Sstevel@tonic-gate 	void	*cb;
594*7c478bd9Sstevel@tonic-gate {
595*7c478bd9Sstevel@tonic-gate 	void	*tmp = (void *)__proc_cleanup_cb;
596*7c478bd9Sstevel@tonic-gate 
597*7c478bd9Sstevel@tonic-gate 	__proc_cleanup_cb = (void (*)())cb;
598*7c478bd9Sstevel@tonic-gate 	return (tmp);
599*7c478bd9Sstevel@tonic-gate }
600*7c478bd9Sstevel@tonic-gate 
601*7c478bd9Sstevel@tonic-gate /* ***************  SVCXPRT related stuff **************** */
602*7c478bd9Sstevel@tonic-gate 
603*7c478bd9Sstevel@tonic-gate 
604*7c478bd9Sstevel@tonic-gate static int pollfd_shrinking = 1;
605*7c478bd9Sstevel@tonic-gate 
606*7c478bd9Sstevel@tonic-gate 
607*7c478bd9Sstevel@tonic-gate /*
608*7c478bd9Sstevel@tonic-gate  * Add fd to svc_pollfd
609*7c478bd9Sstevel@tonic-gate  */
610*7c478bd9Sstevel@tonic-gate static void
611*7c478bd9Sstevel@tonic-gate add_pollfd(int fd, short events)
612*7c478bd9Sstevel@tonic-gate {
613*7c478bd9Sstevel@tonic-gate 	if (fd < FD_SETSIZE) {
614*7c478bd9Sstevel@tonic-gate 		FD_SET(fd, &svc_fdset);
615*7c478bd9Sstevel@tonic-gate #if !defined(_LP64)
616*7c478bd9Sstevel@tonic-gate 		FD_SET(fd, &_new_svc_fdset);
617*7c478bd9Sstevel@tonic-gate #endif
618*7c478bd9Sstevel@tonic-gate 		svc_nfds++;
619*7c478bd9Sstevel@tonic-gate 		svc_nfds_set++;
620*7c478bd9Sstevel@tonic-gate 		if (fd >= svc_max_fd)
621*7c478bd9Sstevel@tonic-gate 			svc_max_fd = fd + 1;
622*7c478bd9Sstevel@tonic-gate 	}
623*7c478bd9Sstevel@tonic-gate 	if (fd >= svc_max_pollfd)
624*7c478bd9Sstevel@tonic-gate 		svc_max_pollfd = fd + 1;
625*7c478bd9Sstevel@tonic-gate 	if (svc_max_pollfd > svc_pollfd_allocd) {
626*7c478bd9Sstevel@tonic-gate 		int i = svc_pollfd_allocd;
627*7c478bd9Sstevel@tonic-gate 		pollfd_t *tmp;
628*7c478bd9Sstevel@tonic-gate 		do {
629*7c478bd9Sstevel@tonic-gate 			svc_pollfd_allocd += POLLFD_EXTEND;
630*7c478bd9Sstevel@tonic-gate 		} while (svc_max_pollfd > svc_pollfd_allocd);
631*7c478bd9Sstevel@tonic-gate 		tmp = realloc(svc_pollfd,
632*7c478bd9Sstevel@tonic-gate 					sizeof (pollfd_t) * svc_pollfd_allocd);
633*7c478bd9Sstevel@tonic-gate 		if (tmp != NULL) {
634*7c478bd9Sstevel@tonic-gate 			svc_pollfd = tmp;
635*7c478bd9Sstevel@tonic-gate 			for (; i < svc_pollfd_allocd; i++)
636*7c478bd9Sstevel@tonic-gate 				POLLFD_CLR(i, tmp);
637*7c478bd9Sstevel@tonic-gate 		} else {
638*7c478bd9Sstevel@tonic-gate 			/*
639*7c478bd9Sstevel@tonic-gate 			 * give an error message; undo fdset setting
640*7c478bd9Sstevel@tonic-gate 			 * above;  reset the pollfd_shrinking flag.
641*7c478bd9Sstevel@tonic-gate 			 * because of this poll will not be done
642*7c478bd9Sstevel@tonic-gate 			 * on these fds.
643*7c478bd9Sstevel@tonic-gate 			 */
644*7c478bd9Sstevel@tonic-gate 			if (fd < FD_SETSIZE) {
645*7c478bd9Sstevel@tonic-gate 				FD_CLR(fd, &svc_fdset);
646*7c478bd9Sstevel@tonic-gate #if !defined(_LP64)
647*7c478bd9Sstevel@tonic-gate 				FD_CLR(fd, &_new_svc_fdset);
648*7c478bd9Sstevel@tonic-gate #endif
649*7c478bd9Sstevel@tonic-gate 				svc_nfds--;
650*7c478bd9Sstevel@tonic-gate 				svc_nfds_set--;
651*7c478bd9Sstevel@tonic-gate 				if (fd == (svc_max_fd - 1))
652*7c478bd9Sstevel@tonic-gate 					svc_max_fd--;
653*7c478bd9Sstevel@tonic-gate 			}
654*7c478bd9Sstevel@tonic-gate 			if (fd == (svc_max_pollfd - 1))
655*7c478bd9Sstevel@tonic-gate 				svc_max_pollfd--;
656*7c478bd9Sstevel@tonic-gate 			pollfd_shrinking = 0;
657*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "add_pollfd: out of memory");
658*7c478bd9Sstevel@tonic-gate 			_exit(1);
659*7c478bd9Sstevel@tonic-gate 		}
660*7c478bd9Sstevel@tonic-gate 	}
661*7c478bd9Sstevel@tonic-gate 	svc_pollfd[fd].fd	= fd;
662*7c478bd9Sstevel@tonic-gate 	svc_pollfd[fd].events	= events;
663*7c478bd9Sstevel@tonic-gate 	svc_npollfds++;
664*7c478bd9Sstevel@tonic-gate 	svc_npollfds_set++;
665*7c478bd9Sstevel@tonic-gate }
666*7c478bd9Sstevel@tonic-gate 
667*7c478bd9Sstevel@tonic-gate /*
668*7c478bd9Sstevel@tonic-gate  * the fd is still active but only the bit in fdset is cleared.
669*7c478bd9Sstevel@tonic-gate  * do not subtract svc_nfds or svc_npollfds
670*7c478bd9Sstevel@tonic-gate  */
671*7c478bd9Sstevel@tonic-gate void
672*7c478bd9Sstevel@tonic-gate clear_pollfd(int fd)
673*7c478bd9Sstevel@tonic-gate {
674*7c478bd9Sstevel@tonic-gate 	if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
675*7c478bd9Sstevel@tonic-gate 		FD_CLR(fd, &svc_fdset);
676*7c478bd9Sstevel@tonic-gate #if !defined(_LP64)
677*7c478bd9Sstevel@tonic-gate 		FD_CLR(fd, &_new_svc_fdset);
678*7c478bd9Sstevel@tonic-gate #endif
679*7c478bd9Sstevel@tonic-gate 		svc_nfds_set--;
680*7c478bd9Sstevel@tonic-gate 	}
681*7c478bd9Sstevel@tonic-gate 	if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
682*7c478bd9Sstevel@tonic-gate 		POLLFD_CLR(fd, svc_pollfd);
683*7c478bd9Sstevel@tonic-gate 		svc_npollfds_set--;
684*7c478bd9Sstevel@tonic-gate 	}
685*7c478bd9Sstevel@tonic-gate }
686*7c478bd9Sstevel@tonic-gate 
687*7c478bd9Sstevel@tonic-gate /*
688*7c478bd9Sstevel@tonic-gate  * sets the bit in fdset for an active fd so that poll() is done for that
689*7c478bd9Sstevel@tonic-gate  */
690*7c478bd9Sstevel@tonic-gate void
691*7c478bd9Sstevel@tonic-gate set_pollfd(int fd, short events)
692*7c478bd9Sstevel@tonic-gate {
693*7c478bd9Sstevel@tonic-gate 	if (fd < FD_SETSIZE) {
694*7c478bd9Sstevel@tonic-gate 		FD_SET(fd, &svc_fdset);
695*7c478bd9Sstevel@tonic-gate #if !defined(_LP64)
696*7c478bd9Sstevel@tonic-gate 		FD_SET(fd, &_new_svc_fdset);
697*7c478bd9Sstevel@tonic-gate #endif
698*7c478bd9Sstevel@tonic-gate 		svc_nfds_set++;
699*7c478bd9Sstevel@tonic-gate 	}
700*7c478bd9Sstevel@tonic-gate 	if (fd < svc_pollfd_allocd) {
701*7c478bd9Sstevel@tonic-gate 		svc_pollfd[fd].fd	= fd;
702*7c478bd9Sstevel@tonic-gate 		svc_pollfd[fd].events	= events;
703*7c478bd9Sstevel@tonic-gate 		svc_npollfds_set++;
704*7c478bd9Sstevel@tonic-gate 	}
705*7c478bd9Sstevel@tonic-gate }
706*7c478bd9Sstevel@tonic-gate 
707*7c478bd9Sstevel@tonic-gate /*
708*7c478bd9Sstevel@tonic-gate  * remove a svc_pollfd entry; it does not shrink the memory
709*7c478bd9Sstevel@tonic-gate  */
710*7c478bd9Sstevel@tonic-gate static void
711*7c478bd9Sstevel@tonic-gate remove_pollfd(fd)
712*7c478bd9Sstevel@tonic-gate 	int fd;
713*7c478bd9Sstevel@tonic-gate {
714*7c478bd9Sstevel@tonic-gate 	clear_pollfd(fd);
715*7c478bd9Sstevel@tonic-gate 	if (fd == (svc_max_fd - 1))
716*7c478bd9Sstevel@tonic-gate 		svc_max_fd--;
717*7c478bd9Sstevel@tonic-gate 	svc_nfds--;
718*7c478bd9Sstevel@tonic-gate 	if (fd == (svc_max_pollfd - 1))
719*7c478bd9Sstevel@tonic-gate 		svc_max_pollfd--;
720*7c478bd9Sstevel@tonic-gate 	svc_npollfds--;
721*7c478bd9Sstevel@tonic-gate }
722*7c478bd9Sstevel@tonic-gate 
723*7c478bd9Sstevel@tonic-gate /*
724*7c478bd9Sstevel@tonic-gate  * delete a svc_pollfd entry; it shrinks the memory
725*7c478bd9Sstevel@tonic-gate  * use remove_pollfd if you do not want to shrink
726*7c478bd9Sstevel@tonic-gate  */
727*7c478bd9Sstevel@tonic-gate static void
728*7c478bd9Sstevel@tonic-gate delete_pollfd(int fd)
729*7c478bd9Sstevel@tonic-gate {
730*7c478bd9Sstevel@tonic-gate 	remove_pollfd(fd);
731*7c478bd9Sstevel@tonic-gate 	if (pollfd_shrinking && svc_max_pollfd <
732*7c478bd9Sstevel@tonic-gate 			(svc_pollfd_allocd - POLLFD_SHRINK)) {
733*7c478bd9Sstevel@tonic-gate 		do {
734*7c478bd9Sstevel@tonic-gate 			svc_pollfd_allocd -= POLLFD_SHRINK;
735*7c478bd9Sstevel@tonic-gate 		} while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
736*7c478bd9Sstevel@tonic-gate 		svc_pollfd = realloc(svc_pollfd,
737*7c478bd9Sstevel@tonic-gate 				sizeof (pollfd_t) * svc_pollfd_allocd);
738*7c478bd9Sstevel@tonic-gate 		if (svc_pollfd == NULL) {
739*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "delete_pollfd: out of memory");
740*7c478bd9Sstevel@tonic-gate 			_exit(1);
741*7c478bd9Sstevel@tonic-gate 		}
742*7c478bd9Sstevel@tonic-gate 	}
743*7c478bd9Sstevel@tonic-gate }
744*7c478bd9Sstevel@tonic-gate 
745*7c478bd9Sstevel@tonic-gate 
746*7c478bd9Sstevel@tonic-gate /*
747*7c478bd9Sstevel@tonic-gate  * Activate a transport handle.
748*7c478bd9Sstevel@tonic-gate  */
749*7c478bd9Sstevel@tonic-gate void
750*7c478bd9Sstevel@tonic-gate xprt_register(xprt)
751*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
752*7c478bd9Sstevel@tonic-gate {
753*7c478bd9Sstevel@tonic-gate 	int fd = xprt->xp_fd;
754*7c478bd9Sstevel@tonic-gate #ifdef CALLBACK
755*7c478bd9Sstevel@tonic-gate 	extern void (*_svc_getreqset_proc)();
756*7c478bd9Sstevel@tonic-gate #endif
757*7c478bd9Sstevel@tonic-gate /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
758*7c478bd9Sstevel@tonic-gate 
759*7c478bd9Sstevel@tonic-gate 	trace1(TR_xprt_register, 0);
760*7c478bd9Sstevel@tonic-gate 	rw_wrlock(&svc_fd_lock);
761*7c478bd9Sstevel@tonic-gate 	if (svc_xports == NULL) {
762*7c478bd9Sstevel@tonic-gate 		/* allocate some small amount first */
763*7c478bd9Sstevel@tonic-gate 		svc_xports = calloc(FD_INCREMENT,  sizeof (SVCXPRT *));
764*7c478bd9Sstevel@tonic-gate 		if (svc_xports == NULL) {
765*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "xprt_register: out of memory");
766*7c478bd9Sstevel@tonic-gate 			_exit(1);
767*7c478bd9Sstevel@tonic-gate 		}
768*7c478bd9Sstevel@tonic-gate 		nsvc_xports = FD_INCREMENT;
769*7c478bd9Sstevel@tonic-gate 
770*7c478bd9Sstevel@tonic-gate #ifdef CALLBACK
771*7c478bd9Sstevel@tonic-gate 		/*
772*7c478bd9Sstevel@tonic-gate 		 * XXX: This code does not keep track of the server state.
773*7c478bd9Sstevel@tonic-gate 		 *
774*7c478bd9Sstevel@tonic-gate 		 * This provides for callback support.	When a client
775*7c478bd9Sstevel@tonic-gate 		 * recv's a call from another client on the server fd's,
776*7c478bd9Sstevel@tonic-gate 		 * it calls _svc_getreqset_proc() which would return
777*7c478bd9Sstevel@tonic-gate 		 * after serving all the server requests.  Also look under
778*7c478bd9Sstevel@tonic-gate 		 * clnt_dg.c and clnt_vc.c  (clnt_call part of it)
779*7c478bd9Sstevel@tonic-gate 		 */
780*7c478bd9Sstevel@tonic-gate 		_svc_getreqset_proc = svc_getreq_poll;
781*7c478bd9Sstevel@tonic-gate #endif
782*7c478bd9Sstevel@tonic-gate 	}
783*7c478bd9Sstevel@tonic-gate 
784*7c478bd9Sstevel@tonic-gate 	while (fd >= nsvc_xports) {
785*7c478bd9Sstevel@tonic-gate 		SVCXPRT **tmp_xprts = svc_xports;
786*7c478bd9Sstevel@tonic-gate 
787*7c478bd9Sstevel@tonic-gate 		/* time to expand svc_xprts */
788*7c478bd9Sstevel@tonic-gate 		tmp_xprts = realloc(svc_xports,
789*7c478bd9Sstevel@tonic-gate 			sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
790*7c478bd9Sstevel@tonic-gate 		if (tmp_xprts == NULL) {
791*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "xprt_register : out of memory.");
792*7c478bd9Sstevel@tonic-gate 			_exit(1);
793*7c478bd9Sstevel@tonic-gate 		}
794*7c478bd9Sstevel@tonic-gate 
795*7c478bd9Sstevel@tonic-gate 		svc_xports = tmp_xprts;
796*7c478bd9Sstevel@tonic-gate 		(void) memset(&svc_xports[nsvc_xports], 0,
797*7c478bd9Sstevel@tonic-gate 					sizeof (SVCXPRT *) * FD_INCREMENT);
798*7c478bd9Sstevel@tonic-gate 		nsvc_xports += FD_INCREMENT;
799*7c478bd9Sstevel@tonic-gate 	}
800*7c478bd9Sstevel@tonic-gate 
801*7c478bd9Sstevel@tonic-gate 	svc_xports[fd] = (SVCXPRT *)xprt;
802*7c478bd9Sstevel@tonic-gate 
803*7c478bd9Sstevel@tonic-gate 	add_pollfd(fd, MASKVAL);
804*7c478bd9Sstevel@tonic-gate 
805*7c478bd9Sstevel@tonic-gate 	if (svc_polling) {
806*7c478bd9Sstevel@tonic-gate 		char dummy;
807*7c478bd9Sstevel@tonic-gate 
808*7c478bd9Sstevel@tonic-gate 		/*
809*7c478bd9Sstevel@tonic-gate 		 * This happens only in one of the MT modes.
810*7c478bd9Sstevel@tonic-gate 		 * Wake up poller.
811*7c478bd9Sstevel@tonic-gate 		 */
812*7c478bd9Sstevel@tonic-gate 		write(svc_pipe[1], &dummy, sizeof (dummy));
813*7c478bd9Sstevel@tonic-gate 	}
814*7c478bd9Sstevel@tonic-gate 	/*
815*7c478bd9Sstevel@tonic-gate 	 * If already dispatching door based services, start
816*7c478bd9Sstevel@tonic-gate 	 * dispatching TLI based services now.
817*7c478bd9Sstevel@tonic-gate 	 */
818*7c478bd9Sstevel@tonic-gate 	mutex_lock(&svc_door_mutex);
819*7c478bd9Sstevel@tonic-gate 	if (svc_ndoorfds > 0)
820*7c478bd9Sstevel@tonic-gate 		cond_signal(&svc_door_waitcv);
821*7c478bd9Sstevel@tonic-gate 	mutex_unlock(&svc_door_mutex);
822*7c478bd9Sstevel@tonic-gate 
823*7c478bd9Sstevel@tonic-gate 	if (svc_xdrs == NULL) {
824*7c478bd9Sstevel@tonic-gate 		/* allocate initial chunk */
825*7c478bd9Sstevel@tonic-gate 		svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
826*7c478bd9Sstevel@tonic-gate 		if (svc_xdrs != NULL)
827*7c478bd9Sstevel@tonic-gate 			nsvc_xdrs = FD_INCREMENT;
828*7c478bd9Sstevel@tonic-gate 		else {
829*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "xprt_register : out of memory.");
830*7c478bd9Sstevel@tonic-gate 			_exit(1);
831*7c478bd9Sstevel@tonic-gate 		}
832*7c478bd9Sstevel@tonic-gate 	}
833*7c478bd9Sstevel@tonic-gate 	rw_unlock(&svc_fd_lock);
834*7c478bd9Sstevel@tonic-gate 
835*7c478bd9Sstevel@tonic-gate 	trace1(TR_xprt_register, 1);
836*7c478bd9Sstevel@tonic-gate }
837*7c478bd9Sstevel@tonic-gate 
838*7c478bd9Sstevel@tonic-gate /*
839*7c478bd9Sstevel@tonic-gate  * De-activate a transport handle.
840*7c478bd9Sstevel@tonic-gate  */
841*7c478bd9Sstevel@tonic-gate void
842*7c478bd9Sstevel@tonic-gate __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
843*7c478bd9Sstevel@tonic-gate {
844*7c478bd9Sstevel@tonic-gate 	int fd = xprt->xp_fd;
845*7c478bd9Sstevel@tonic-gate 
846*7c478bd9Sstevel@tonic-gate 	trace1(TR_xprt_unregister, 0);
847*7c478bd9Sstevel@tonic-gate 	if (lock_not_held)
848*7c478bd9Sstevel@tonic-gate 		rw_wrlock(&svc_fd_lock);
849*7c478bd9Sstevel@tonic-gate 	if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
850*7c478bd9Sstevel@tonic-gate 		svc_xports[fd] = (SVCXPRT *)NULL;
851*7c478bd9Sstevel@tonic-gate 		delete_pollfd(fd);
852*7c478bd9Sstevel@tonic-gate 	}
853*7c478bd9Sstevel@tonic-gate 	if (lock_not_held)
854*7c478bd9Sstevel@tonic-gate 		rw_unlock(&svc_fd_lock);
855*7c478bd9Sstevel@tonic-gate 	__svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
856*7c478bd9Sstevel@tonic-gate 	trace1(TR_xprt_unregister, 1);
857*7c478bd9Sstevel@tonic-gate }
858*7c478bd9Sstevel@tonic-gate 
859*7c478bd9Sstevel@tonic-gate void
860*7c478bd9Sstevel@tonic-gate xprt_unregister(xprt)
861*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
862*7c478bd9Sstevel@tonic-gate {
863*7c478bd9Sstevel@tonic-gate 	__xprt_unregister_private(xprt, TRUE);
864*7c478bd9Sstevel@tonic-gate }
865*7c478bd9Sstevel@tonic-gate 
866*7c478bd9Sstevel@tonic-gate /* ********************** CALLOUT list related stuff ************* */
867*7c478bd9Sstevel@tonic-gate 
868*7c478bd9Sstevel@tonic-gate /*
869*7c478bd9Sstevel@tonic-gate  * Add a service program to the callout list.
870*7c478bd9Sstevel@tonic-gate  * The dispatch routine will be called when a rpc request for this
871*7c478bd9Sstevel@tonic-gate  * program number comes in.
872*7c478bd9Sstevel@tonic-gate  */
873*7c478bd9Sstevel@tonic-gate bool_t
874*7c478bd9Sstevel@tonic-gate svc_reg(xprt, prog, vers, dispatch, nconf)
875*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
876*7c478bd9Sstevel@tonic-gate 	rpcprog_t prog;
877*7c478bd9Sstevel@tonic-gate 	rpcvers_t vers;
878*7c478bd9Sstevel@tonic-gate 	void (*dispatch)();
879*7c478bd9Sstevel@tonic-gate 	const struct netconfig *nconf;
880*7c478bd9Sstevel@tonic-gate {
881*7c478bd9Sstevel@tonic-gate 	bool_t dummy;
882*7c478bd9Sstevel@tonic-gate 	struct svc_callout *prev;
883*7c478bd9Sstevel@tonic-gate 	struct svc_callout *s, **s2;
884*7c478bd9Sstevel@tonic-gate 	struct netconfig *tnconf;
885*7c478bd9Sstevel@tonic-gate 	char *netid = NULL;
886*7c478bd9Sstevel@tonic-gate 	int flag = 0;
887*7c478bd9Sstevel@tonic-gate 
888*7c478bd9Sstevel@tonic-gate /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
889*7c478bd9Sstevel@tonic-gate 
890*7c478bd9Sstevel@tonic-gate 	trace3(TR_svc_reg, 0, prog, vers);
891*7c478bd9Sstevel@tonic-gate 	if (xprt->xp_netid) {
892*7c478bd9Sstevel@tonic-gate 		netid = strdup(xprt->xp_netid);
893*7c478bd9Sstevel@tonic-gate 		flag = 1;
894*7c478bd9Sstevel@tonic-gate 	} else if (nconf && nconf->nc_netid) {
895*7c478bd9Sstevel@tonic-gate 		netid = strdup(nconf->nc_netid);
896*7c478bd9Sstevel@tonic-gate 		flag = 1;
897*7c478bd9Sstevel@tonic-gate 	} else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
898*7c478bd9Sstevel@tonic-gate 			!= NULL) {
899*7c478bd9Sstevel@tonic-gate 		netid = strdup(tnconf->nc_netid);
900*7c478bd9Sstevel@tonic-gate 		flag = 1;
901*7c478bd9Sstevel@tonic-gate 		freenetconfigent(tnconf);
902*7c478bd9Sstevel@tonic-gate 	} /* must have been created with svc_raw_create */
903*7c478bd9Sstevel@tonic-gate 	if ((netid == NULL) && (flag == 1)) {
904*7c478bd9Sstevel@tonic-gate 		trace3(TR_svc_reg, 1, prog, vers);
905*7c478bd9Sstevel@tonic-gate 		return (FALSE);
906*7c478bd9Sstevel@tonic-gate 	}
907*7c478bd9Sstevel@tonic-gate 
908*7c478bd9Sstevel@tonic-gate 	rw_wrlock(&svc_lock);
909*7c478bd9Sstevel@tonic-gate 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
910*7c478bd9Sstevel@tonic-gate 		if (netid)
911*7c478bd9Sstevel@tonic-gate 			free(netid);
912*7c478bd9Sstevel@tonic-gate 		if (s->sc_dispatch == dispatch)
913*7c478bd9Sstevel@tonic-gate 			goto rpcb_it; /* he is registering another xptr */
914*7c478bd9Sstevel@tonic-gate 		trace3(TR_svc_reg, 1, prog, vers);
915*7c478bd9Sstevel@tonic-gate 		rw_unlock(&svc_lock);
916*7c478bd9Sstevel@tonic-gate 		return (FALSE);
917*7c478bd9Sstevel@tonic-gate 	}
918*7c478bd9Sstevel@tonic-gate 	s = (struct svc_callout *)mem_alloc(sizeof (struct svc_callout));
919*7c478bd9Sstevel@tonic-gate 	if (s == (struct svc_callout *)NULL) {
920*7c478bd9Sstevel@tonic-gate 		if (netid)
921*7c478bd9Sstevel@tonic-gate 			free(netid);
922*7c478bd9Sstevel@tonic-gate 		trace3(TR_svc_reg, 1, prog, vers);
923*7c478bd9Sstevel@tonic-gate 		rw_unlock(&svc_lock);
924*7c478bd9Sstevel@tonic-gate 		return (FALSE);
925*7c478bd9Sstevel@tonic-gate 	}
926*7c478bd9Sstevel@tonic-gate 
927*7c478bd9Sstevel@tonic-gate 	s->sc_prog = prog;
928*7c478bd9Sstevel@tonic-gate 	s->sc_vers = vers;
929*7c478bd9Sstevel@tonic-gate 	s->sc_dispatch = dispatch;
930*7c478bd9Sstevel@tonic-gate 	s->sc_netid = netid;
931*7c478bd9Sstevel@tonic-gate 	s->sc_next = NULL;
932*7c478bd9Sstevel@tonic-gate 
933*7c478bd9Sstevel@tonic-gate 	/*
934*7c478bd9Sstevel@tonic-gate 	 * The ordering of transports is such that the most frequently used
935*7c478bd9Sstevel@tonic-gate 	 * one appears first.  So add the new entry to the end of the list.
936*7c478bd9Sstevel@tonic-gate 	 */
937*7c478bd9Sstevel@tonic-gate 	for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
938*7c478bd9Sstevel@tonic-gate 		;
939*7c478bd9Sstevel@tonic-gate 	*s2 = s;
940*7c478bd9Sstevel@tonic-gate 
941*7c478bd9Sstevel@tonic-gate 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
942*7c478bd9Sstevel@tonic-gate 		if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
943*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "svc_reg : strdup failed.");
944*7c478bd9Sstevel@tonic-gate 			free(netid);
945*7c478bd9Sstevel@tonic-gate 			mem_free((char *)s,
946*7c478bd9Sstevel@tonic-gate 				(uint_t)sizeof (struct svc_callout));
947*7c478bd9Sstevel@tonic-gate 			*s2 = NULL;
948*7c478bd9Sstevel@tonic-gate 			rw_unlock(&svc_lock);
949*7c478bd9Sstevel@tonic-gate 			trace3(TR_svc_reg, 1, prog, vers);
950*7c478bd9Sstevel@tonic-gate 			return (FALSE);
951*7c478bd9Sstevel@tonic-gate 		}
952*7c478bd9Sstevel@tonic-gate 
953*7c478bd9Sstevel@tonic-gate rpcb_it:
954*7c478bd9Sstevel@tonic-gate 	rw_unlock(&svc_lock);
955*7c478bd9Sstevel@tonic-gate 	/* now register the information with the local binder service */
956*7c478bd9Sstevel@tonic-gate 	if (nconf) {
957*7c478bd9Sstevel@tonic-gate 		dummy = rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr);
958*7c478bd9Sstevel@tonic-gate 		trace3(TR_svc_reg, 1, prog, vers);
959*7c478bd9Sstevel@tonic-gate 		return (dummy);
960*7c478bd9Sstevel@tonic-gate 	}
961*7c478bd9Sstevel@tonic-gate 	trace3(TR_svc_reg, 1, prog, vers);
962*7c478bd9Sstevel@tonic-gate 	return (TRUE);
963*7c478bd9Sstevel@tonic-gate }
964*7c478bd9Sstevel@tonic-gate 
965*7c478bd9Sstevel@tonic-gate /*
966*7c478bd9Sstevel@tonic-gate  * Remove a service program from the callout list.
967*7c478bd9Sstevel@tonic-gate  */
968*7c478bd9Sstevel@tonic-gate void
969*7c478bd9Sstevel@tonic-gate svc_unreg(prog, vers)
970*7c478bd9Sstevel@tonic-gate 	rpcprog_t prog;
971*7c478bd9Sstevel@tonic-gate 	rpcvers_t vers;
972*7c478bd9Sstevel@tonic-gate {
973*7c478bd9Sstevel@tonic-gate 	struct svc_callout *prev;
974*7c478bd9Sstevel@tonic-gate 	struct svc_callout *s;
975*7c478bd9Sstevel@tonic-gate 
976*7c478bd9Sstevel@tonic-gate 	trace3(TR_svc_unreg, 0, prog, vers);
977*7c478bd9Sstevel@tonic-gate 	/* unregister the information anyway */
978*7c478bd9Sstevel@tonic-gate 	(void) rpcb_unset(prog, vers, NULL);
979*7c478bd9Sstevel@tonic-gate 	rw_wrlock(&svc_lock);
980*7c478bd9Sstevel@tonic-gate 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
981*7c478bd9Sstevel@tonic-gate 		if (prev == NULL_SVC) {
982*7c478bd9Sstevel@tonic-gate 			svc_head = s->sc_next;
983*7c478bd9Sstevel@tonic-gate 		} else {
984*7c478bd9Sstevel@tonic-gate 			prev->sc_next = s->sc_next;
985*7c478bd9Sstevel@tonic-gate 		}
986*7c478bd9Sstevel@tonic-gate 		s->sc_next = NULL_SVC;
987*7c478bd9Sstevel@tonic-gate 		if (s->sc_netid)
988*7c478bd9Sstevel@tonic-gate 			mem_free((char *)s->sc_netid,
989*7c478bd9Sstevel@tonic-gate 					(uint_t)sizeof (s->sc_netid) + 1);
990*7c478bd9Sstevel@tonic-gate 		mem_free((char *)s, (uint_t)sizeof (struct svc_callout));
991*7c478bd9Sstevel@tonic-gate 	}
992*7c478bd9Sstevel@tonic-gate 	rw_unlock(&svc_lock);
993*7c478bd9Sstevel@tonic-gate 	trace3(TR_svc_unreg, 1, prog, vers);
994*7c478bd9Sstevel@tonic-gate }
995*7c478bd9Sstevel@tonic-gate 
996*7c478bd9Sstevel@tonic-gate #ifdef PORTMAP
997*7c478bd9Sstevel@tonic-gate /*
998*7c478bd9Sstevel@tonic-gate  * Add a service program to the callout list.
999*7c478bd9Sstevel@tonic-gate  * The dispatch routine will be called when a rpc request for this
1000*7c478bd9Sstevel@tonic-gate  * program number comes in.
1001*7c478bd9Sstevel@tonic-gate  * For version 2 portmappers.
1002*7c478bd9Sstevel@tonic-gate  */
1003*7c478bd9Sstevel@tonic-gate #ifdef KERNEL
1004*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1005*7c478bd9Sstevel@tonic-gate #endif
1006*7c478bd9Sstevel@tonic-gate bool_t
1007*7c478bd9Sstevel@tonic-gate svc_register(xprt, prog, vers, dispatch, protocol)
1008*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1009*7c478bd9Sstevel@tonic-gate 	rpcprog_t prog;
1010*7c478bd9Sstevel@tonic-gate 	rpcvers_t vers;
1011*7c478bd9Sstevel@tonic-gate 	void (*dispatch)();
1012*7c478bd9Sstevel@tonic-gate 	int protocol;
1013*7c478bd9Sstevel@tonic-gate {
1014*7c478bd9Sstevel@tonic-gate 	bool_t dummy;
1015*7c478bd9Sstevel@tonic-gate 	struct svc_callout *prev;
1016*7c478bd9Sstevel@tonic-gate 	struct svc_callout *s;
1017*7c478bd9Sstevel@tonic-gate 	struct netconfig *nconf;
1018*7c478bd9Sstevel@tonic-gate 	char *netid = NULL;
1019*7c478bd9Sstevel@tonic-gate 	int flag = 0;
1020*7c478bd9Sstevel@tonic-gate 
1021*7c478bd9Sstevel@tonic-gate 	trace4(TR_svc_register, 0, prog, vers, protocol);
1022*7c478bd9Sstevel@tonic-gate 	if (xprt->xp_netid) {
1023*7c478bd9Sstevel@tonic-gate 		netid = strdup(xprt->xp_netid);
1024*7c478bd9Sstevel@tonic-gate 		flag = 1;
1025*7c478bd9Sstevel@tonic-gate 	} else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
1026*7c478bd9Sstevel@tonic-gate 	__rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
1027*7c478bd9Sstevel@tonic-gate 		/* fill in missing netid field in SVCXPRT */
1028*7c478bd9Sstevel@tonic-gate 		netid = strdup(nconf->nc_netid);
1029*7c478bd9Sstevel@tonic-gate 		flag = 1;
1030*7c478bd9Sstevel@tonic-gate 		freenetconfigent(nconf);
1031*7c478bd9Sstevel@tonic-gate 	} /* must be svc_raw_create */
1032*7c478bd9Sstevel@tonic-gate 
1033*7c478bd9Sstevel@tonic-gate 	if ((netid == NULL) && (flag == 1)) {
1034*7c478bd9Sstevel@tonic-gate 		trace4(TR_svc_register, 1, prog, vers, protocol);
1035*7c478bd9Sstevel@tonic-gate 		return (FALSE);
1036*7c478bd9Sstevel@tonic-gate 	}
1037*7c478bd9Sstevel@tonic-gate 
1038*7c478bd9Sstevel@tonic-gate 	rw_wrlock(&svc_lock);
1039*7c478bd9Sstevel@tonic-gate 	if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
1040*7c478bd9Sstevel@tonic-gate 		if (netid)
1041*7c478bd9Sstevel@tonic-gate 			free(netid);
1042*7c478bd9Sstevel@tonic-gate 		if (s->sc_dispatch == dispatch)
1043*7c478bd9Sstevel@tonic-gate 			goto pmap_it;  /* he is registering another xptr */
1044*7c478bd9Sstevel@tonic-gate 		rw_unlock(&svc_lock);
1045*7c478bd9Sstevel@tonic-gate 		trace4(TR_svc_register, 1, prog, vers, protocol);
1046*7c478bd9Sstevel@tonic-gate 		return (FALSE);
1047*7c478bd9Sstevel@tonic-gate 	}
1048*7c478bd9Sstevel@tonic-gate 	s = (struct svc_callout *)mem_alloc(sizeof (struct svc_callout));
1049*7c478bd9Sstevel@tonic-gate #ifndef KERNEL
1050*7c478bd9Sstevel@tonic-gate 	if (s == (struct svc_callout *)0) {
1051*7c478bd9Sstevel@tonic-gate 		if (netid)
1052*7c478bd9Sstevel@tonic-gate 			free(netid);
1053*7c478bd9Sstevel@tonic-gate 		trace4(TR_svc_register, 1, prog, vers, protocol);
1054*7c478bd9Sstevel@tonic-gate 		rw_unlock(&svc_lock);
1055*7c478bd9Sstevel@tonic-gate 		return (FALSE);
1056*7c478bd9Sstevel@tonic-gate 	}
1057*7c478bd9Sstevel@tonic-gate #endif
1058*7c478bd9Sstevel@tonic-gate 	s->sc_prog = prog;
1059*7c478bd9Sstevel@tonic-gate 	s->sc_vers = vers;
1060*7c478bd9Sstevel@tonic-gate 	s->sc_dispatch = dispatch;
1061*7c478bd9Sstevel@tonic-gate 	s->sc_netid = netid;
1062*7c478bd9Sstevel@tonic-gate 	s->sc_next = svc_head;
1063*7c478bd9Sstevel@tonic-gate 	svc_head = s;
1064*7c478bd9Sstevel@tonic-gate 
1065*7c478bd9Sstevel@tonic-gate 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1066*7c478bd9Sstevel@tonic-gate 		if ((xprt->xp_netid = strdup(netid)) == NULL) {
1067*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR, "svc_register : strdup failed.");
1068*7c478bd9Sstevel@tonic-gate 			free(netid);
1069*7c478bd9Sstevel@tonic-gate 			svc_head = s->sc_next;
1070*7c478bd9Sstevel@tonic-gate 			mem_free((char *)s,
1071*7c478bd9Sstevel@tonic-gate 				(uint_t)sizeof (struct svc_callout));
1072*7c478bd9Sstevel@tonic-gate 			rw_unlock(&svc_lock);
1073*7c478bd9Sstevel@tonic-gate 			trace4(TR_svc_register, 1, prog, vers, protocol);
1074*7c478bd9Sstevel@tonic-gate 			return (FALSE);
1075*7c478bd9Sstevel@tonic-gate 		}
1076*7c478bd9Sstevel@tonic-gate 
1077*7c478bd9Sstevel@tonic-gate pmap_it:
1078*7c478bd9Sstevel@tonic-gate 	rw_unlock(&svc_lock);
1079*7c478bd9Sstevel@tonic-gate #ifndef KERNEL
1080*7c478bd9Sstevel@tonic-gate 	/* now register the information with the local binder service */
1081*7c478bd9Sstevel@tonic-gate 	if (protocol) {
1082*7c478bd9Sstevel@tonic-gate 		dummy = pmap_set(prog, vers, protocol, xprt->xp_port);
1083*7c478bd9Sstevel@tonic-gate 		trace4(TR_svc_register, 1, prog, vers, protocol);
1084*7c478bd9Sstevel@tonic-gate 		return (dummy);
1085*7c478bd9Sstevel@tonic-gate 	}
1086*7c478bd9Sstevel@tonic-gate #endif
1087*7c478bd9Sstevel@tonic-gate 	trace4(TR_svc_register, 1, prog, vers, protocol);
1088*7c478bd9Sstevel@tonic-gate 	return (TRUE);
1089*7c478bd9Sstevel@tonic-gate }
1090*7c478bd9Sstevel@tonic-gate 
1091*7c478bd9Sstevel@tonic-gate /*
1092*7c478bd9Sstevel@tonic-gate  * Remove a service program from the callout list.
1093*7c478bd9Sstevel@tonic-gate  * For version 2 portmappers.
1094*7c478bd9Sstevel@tonic-gate  */
1095*7c478bd9Sstevel@tonic-gate void
1096*7c478bd9Sstevel@tonic-gate svc_unregister(prog, vers)
1097*7c478bd9Sstevel@tonic-gate 	rpcprog_t prog;
1098*7c478bd9Sstevel@tonic-gate 	rpcvers_t vers;
1099*7c478bd9Sstevel@tonic-gate {
1100*7c478bd9Sstevel@tonic-gate 	struct svc_callout *prev;
1101*7c478bd9Sstevel@tonic-gate 	struct svc_callout *s;
1102*7c478bd9Sstevel@tonic-gate 
1103*7c478bd9Sstevel@tonic-gate 	trace3(TR_svc_unregister, 0, prog, vers);
1104*7c478bd9Sstevel@tonic-gate 	rw_wrlock(&svc_lock);
1105*7c478bd9Sstevel@tonic-gate 	while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1106*7c478bd9Sstevel@tonic-gate 		if (prev == NULL_SVC) {
1107*7c478bd9Sstevel@tonic-gate 			svc_head = s->sc_next;
1108*7c478bd9Sstevel@tonic-gate 		} else {
1109*7c478bd9Sstevel@tonic-gate 			prev->sc_next = s->sc_next;
1110*7c478bd9Sstevel@tonic-gate 		}
1111*7c478bd9Sstevel@tonic-gate 		s->sc_next = NULL_SVC;
1112*7c478bd9Sstevel@tonic-gate 		if (s->sc_netid)
1113*7c478bd9Sstevel@tonic-gate 			mem_free((char *)s->sc_netid,
1114*7c478bd9Sstevel@tonic-gate 					(uint_t)sizeof (s->sc_netid) + 1);
1115*7c478bd9Sstevel@tonic-gate 		mem_free((char *)s, (uint_t)sizeof (struct svc_callout));
1116*7c478bd9Sstevel@tonic-gate #ifndef KERNEL
1117*7c478bd9Sstevel@tonic-gate 		/* unregister the information with the local binder service */
1118*7c478bd9Sstevel@tonic-gate 		(void) pmap_unset(prog, vers);
1119*7c478bd9Sstevel@tonic-gate #endif
1120*7c478bd9Sstevel@tonic-gate 	}
1121*7c478bd9Sstevel@tonic-gate 	rw_unlock(&svc_lock);
1122*7c478bd9Sstevel@tonic-gate 	trace3(TR_svc_unregister, 1, prog, vers);
1123*7c478bd9Sstevel@tonic-gate }
1124*7c478bd9Sstevel@tonic-gate 
1125*7c478bd9Sstevel@tonic-gate #endif /* PORTMAP */
1126*7c478bd9Sstevel@tonic-gate /*
1127*7c478bd9Sstevel@tonic-gate  * Search the callout list for a program number, return the callout
1128*7c478bd9Sstevel@tonic-gate  * struct.
1129*7c478bd9Sstevel@tonic-gate  * Also check for transport as well.  Many routines such as svc_unreg
1130*7c478bd9Sstevel@tonic-gate  * dont give any corresponding transport, so dont check for transport if
1131*7c478bd9Sstevel@tonic-gate  * netid == NULL
1132*7c478bd9Sstevel@tonic-gate  */
1133*7c478bd9Sstevel@tonic-gate static struct svc_callout *
1134*7c478bd9Sstevel@tonic-gate svc_find(prog, vers, prev, netid)
1135*7c478bd9Sstevel@tonic-gate 	rpcprog_t prog;
1136*7c478bd9Sstevel@tonic-gate 	rpcvers_t vers;
1137*7c478bd9Sstevel@tonic-gate 	struct svc_callout **prev;
1138*7c478bd9Sstevel@tonic-gate 	char *netid;
1139*7c478bd9Sstevel@tonic-gate {
1140*7c478bd9Sstevel@tonic-gate 	struct svc_callout *s, *p;
1141*7c478bd9Sstevel@tonic-gate 
1142*7c478bd9Sstevel@tonic-gate 	trace3(TR_svc_find, 0, prog, vers);
1143*7c478bd9Sstevel@tonic-gate 
1144*7c478bd9Sstevel@tonic-gate /* WRITE LOCK HELD ON ENTRY: svc_lock */
1145*7c478bd9Sstevel@tonic-gate 
1146*7c478bd9Sstevel@tonic-gate /*	assert(RW_WRITE_HELD(&svc_lock)); */
1147*7c478bd9Sstevel@tonic-gate 	p = NULL_SVC;
1148*7c478bd9Sstevel@tonic-gate 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1149*7c478bd9Sstevel@tonic-gate 		if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1150*7c478bd9Sstevel@tonic-gate 			((netid == NULL) || (s->sc_netid == NULL) ||
1151*7c478bd9Sstevel@tonic-gate 			(strcmp(netid, s->sc_netid) == 0)))
1152*7c478bd9Sstevel@tonic-gate 				break;
1153*7c478bd9Sstevel@tonic-gate 		p = s;
1154*7c478bd9Sstevel@tonic-gate 	}
1155*7c478bd9Sstevel@tonic-gate 	*prev = p;
1156*7c478bd9Sstevel@tonic-gate 	trace3(TR_svc_find, 1, prog, vers);
1157*7c478bd9Sstevel@tonic-gate 	return (s);
1158*7c478bd9Sstevel@tonic-gate }
1159*7c478bd9Sstevel@tonic-gate 
1160*7c478bd9Sstevel@tonic-gate 
1161*7c478bd9Sstevel@tonic-gate /* ******************* REPLY GENERATION ROUTINES  ************ */
1162*7c478bd9Sstevel@tonic-gate 
1163*7c478bd9Sstevel@tonic-gate /*
1164*7c478bd9Sstevel@tonic-gate  * Send a reply to an rpc request
1165*7c478bd9Sstevel@tonic-gate  */
1166*7c478bd9Sstevel@tonic-gate bool_t
1167*7c478bd9Sstevel@tonic-gate svc_sendreply(xprt, xdr_results, xdr_location)
1168*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
1169*7c478bd9Sstevel@tonic-gate 	xdrproc_t xdr_results;
1170*7c478bd9Sstevel@tonic-gate 	caddr_t xdr_location;
1171*7c478bd9Sstevel@tonic-gate {
1172*7c478bd9Sstevel@tonic-gate 	bool_t dummy;
1173*7c478bd9Sstevel@tonic-gate 	struct rpc_msg rply;
1174*7c478bd9Sstevel@tonic-gate 
1175*7c478bd9Sstevel@tonic-gate 	trace1(TR_svc_sendreply, 0);
1176*7c478bd9Sstevel@tonic-gate 	rply.rm_direction = REPLY;
1177*7c478bd9Sstevel@tonic-gate 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1178*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1179*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_stat = SUCCESS;
1180*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_results.where = xdr_location;
1181*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_results.proc = xdr_results;
1182*7c478bd9Sstevel@tonic-gate 	dummy = SVC_REPLY((SVCXPRT *)xprt, &rply);
1183*7c478bd9Sstevel@tonic-gate 	trace1(TR_svc_sendreply, 1);
1184*7c478bd9Sstevel@tonic-gate 	return (dummy);
1185*7c478bd9Sstevel@tonic-gate }
1186*7c478bd9Sstevel@tonic-gate 
1187*7c478bd9Sstevel@tonic-gate /*
1188*7c478bd9Sstevel@tonic-gate  * No procedure error reply
1189*7c478bd9Sstevel@tonic-gate  */
1190*7c478bd9Sstevel@tonic-gate void
1191*7c478bd9Sstevel@tonic-gate svcerr_noproc(xprt)
1192*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
1193*7c478bd9Sstevel@tonic-gate {
1194*7c478bd9Sstevel@tonic-gate 	struct rpc_msg rply;
1195*7c478bd9Sstevel@tonic-gate 
1196*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_noproc, 0);
1197*7c478bd9Sstevel@tonic-gate 	rply.rm_direction = REPLY;
1198*7c478bd9Sstevel@tonic-gate 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1199*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1200*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1201*7c478bd9Sstevel@tonic-gate 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1202*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_noproc, 1);
1203*7c478bd9Sstevel@tonic-gate }
1204*7c478bd9Sstevel@tonic-gate 
1205*7c478bd9Sstevel@tonic-gate /*
1206*7c478bd9Sstevel@tonic-gate  * Can't decode args error reply
1207*7c478bd9Sstevel@tonic-gate  */
1208*7c478bd9Sstevel@tonic-gate void
1209*7c478bd9Sstevel@tonic-gate svcerr_decode(xprt)
1210*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
1211*7c478bd9Sstevel@tonic-gate {
1212*7c478bd9Sstevel@tonic-gate 	struct rpc_msg rply;
1213*7c478bd9Sstevel@tonic-gate 
1214*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_decode, 0);
1215*7c478bd9Sstevel@tonic-gate 	rply.rm_direction = REPLY;
1216*7c478bd9Sstevel@tonic-gate 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1217*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1218*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1219*7c478bd9Sstevel@tonic-gate 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1220*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_decode, 1);
1221*7c478bd9Sstevel@tonic-gate }
1222*7c478bd9Sstevel@tonic-gate 
1223*7c478bd9Sstevel@tonic-gate /*
1224*7c478bd9Sstevel@tonic-gate  * Some system error
1225*7c478bd9Sstevel@tonic-gate  */
1226*7c478bd9Sstevel@tonic-gate void
1227*7c478bd9Sstevel@tonic-gate svcerr_systemerr(xprt)
1228*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
1229*7c478bd9Sstevel@tonic-gate {
1230*7c478bd9Sstevel@tonic-gate 	struct rpc_msg rply;
1231*7c478bd9Sstevel@tonic-gate 
1232*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_systemerr, 0);
1233*7c478bd9Sstevel@tonic-gate 	rply.rm_direction = REPLY;
1234*7c478bd9Sstevel@tonic-gate 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1235*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1236*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_stat = SYSTEM_ERR;
1237*7c478bd9Sstevel@tonic-gate 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1238*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_systemerr, 1);
1239*7c478bd9Sstevel@tonic-gate }
1240*7c478bd9Sstevel@tonic-gate 
1241*7c478bd9Sstevel@tonic-gate /*
1242*7c478bd9Sstevel@tonic-gate  * Tell RPC package to not complain about version errors to the client.	 This
1243*7c478bd9Sstevel@tonic-gate  * is useful when revving broadcast protocols that sit on a fixed address.
1244*7c478bd9Sstevel@tonic-gate  * There is really one (or should be only one) example of this kind of
1245*7c478bd9Sstevel@tonic-gate  * protocol: the portmapper (or rpc binder).
1246*7c478bd9Sstevel@tonic-gate  */
1247*7c478bd9Sstevel@tonic-gate void
1248*7c478bd9Sstevel@tonic-gate __svc_versquiet_on(xprt)
1249*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1250*7c478bd9Sstevel@tonic-gate {
1251*7c478bd9Sstevel@tonic-gate 	trace1(TR___svc_versquiet_on, 0);
1252*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1253*7c478bd9Sstevel@tonic-gate 	svc_flags(xprt) |= SVC_VERSQUIET;
1254*7c478bd9Sstevel@tonic-gate 	trace1(TR___svc_versquiet_on, 1);
1255*7c478bd9Sstevel@tonic-gate }
1256*7c478bd9Sstevel@tonic-gate 
1257*7c478bd9Sstevel@tonic-gate void
1258*7c478bd9Sstevel@tonic-gate __svc_versquiet_off(xprt)
1259*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1260*7c478bd9Sstevel@tonic-gate {
1261*7c478bd9Sstevel@tonic-gate 	trace1(TR___svc_versquiet_off, 0);
1262*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1263*7c478bd9Sstevel@tonic-gate 	svc_flags(xprt) &= ~SVC_VERSQUIET;
1264*7c478bd9Sstevel@tonic-gate 	trace1(TR___svc_versquiet_off, 1);
1265*7c478bd9Sstevel@tonic-gate }
1266*7c478bd9Sstevel@tonic-gate 
1267*7c478bd9Sstevel@tonic-gate void
1268*7c478bd9Sstevel@tonic-gate svc_versquiet(xprt)
1269*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1270*7c478bd9Sstevel@tonic-gate {
1271*7c478bd9Sstevel@tonic-gate 	trace1(TR_svc_versquiet, 0);
1272*7c478bd9Sstevel@tonic-gate 	__svc_versquiet_on(xprt);
1273*7c478bd9Sstevel@tonic-gate 	trace1(TR_svc_versquiet, 1);
1274*7c478bd9Sstevel@tonic-gate }
1275*7c478bd9Sstevel@tonic-gate 
1276*7c478bd9Sstevel@tonic-gate int
1277*7c478bd9Sstevel@tonic-gate __svc_versquiet_get(xprt)
1278*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1279*7c478bd9Sstevel@tonic-gate {
1280*7c478bd9Sstevel@tonic-gate 	trace1(TR___svc_versquiet_get, 0);
1281*7c478bd9Sstevel@tonic-gate 	trace2(TR___svc_versquiet_get, 1, tmp);
1282*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1283*7c478bd9Sstevel@tonic-gate 	return (svc_flags(xprt) & SVC_VERSQUIET);
1284*7c478bd9Sstevel@tonic-gate }
1285*7c478bd9Sstevel@tonic-gate 
1286*7c478bd9Sstevel@tonic-gate /*
1287*7c478bd9Sstevel@tonic-gate  * Authentication error reply
1288*7c478bd9Sstevel@tonic-gate  */
1289*7c478bd9Sstevel@tonic-gate void
1290*7c478bd9Sstevel@tonic-gate svcerr_auth(xprt, why)
1291*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
1292*7c478bd9Sstevel@tonic-gate 	enum auth_stat why;
1293*7c478bd9Sstevel@tonic-gate {
1294*7c478bd9Sstevel@tonic-gate 	struct rpc_msg rply;
1295*7c478bd9Sstevel@tonic-gate 
1296*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_auth, 0);
1297*7c478bd9Sstevel@tonic-gate 	rply.rm_direction = REPLY;
1298*7c478bd9Sstevel@tonic-gate 	rply.rm_reply.rp_stat = MSG_DENIED;
1299*7c478bd9Sstevel@tonic-gate 	rply.rjcted_rply.rj_stat = AUTH_ERROR;
1300*7c478bd9Sstevel@tonic-gate 	rply.rjcted_rply.rj_why = why;
1301*7c478bd9Sstevel@tonic-gate 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1302*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_auth, 1);
1303*7c478bd9Sstevel@tonic-gate }
1304*7c478bd9Sstevel@tonic-gate 
1305*7c478bd9Sstevel@tonic-gate /*
1306*7c478bd9Sstevel@tonic-gate  * Auth too weak error reply
1307*7c478bd9Sstevel@tonic-gate  */
1308*7c478bd9Sstevel@tonic-gate void
1309*7c478bd9Sstevel@tonic-gate svcerr_weakauth(xprt)
1310*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
1311*7c478bd9Sstevel@tonic-gate {
1312*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_weakauth, 0);
1313*7c478bd9Sstevel@tonic-gate 	svcerr_auth(xprt, AUTH_TOOWEAK);
1314*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_weakauth, 1);
1315*7c478bd9Sstevel@tonic-gate }
1316*7c478bd9Sstevel@tonic-gate 
1317*7c478bd9Sstevel@tonic-gate /*
1318*7c478bd9Sstevel@tonic-gate  * Program unavailable error reply
1319*7c478bd9Sstevel@tonic-gate  */
1320*7c478bd9Sstevel@tonic-gate void
1321*7c478bd9Sstevel@tonic-gate svcerr_noprog(xprt)
1322*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
1323*7c478bd9Sstevel@tonic-gate {
1324*7c478bd9Sstevel@tonic-gate 	struct rpc_msg rply;
1325*7c478bd9Sstevel@tonic-gate 
1326*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_noprog, 0);
1327*7c478bd9Sstevel@tonic-gate 	rply.rm_direction = REPLY;
1328*7c478bd9Sstevel@tonic-gate 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1329*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1330*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1331*7c478bd9Sstevel@tonic-gate 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1332*7c478bd9Sstevel@tonic-gate 	trace1(TR_svcerr_noprog, 1);
1333*7c478bd9Sstevel@tonic-gate }
1334*7c478bd9Sstevel@tonic-gate 
1335*7c478bd9Sstevel@tonic-gate /*
1336*7c478bd9Sstevel@tonic-gate  * Program version mismatch error reply
1337*7c478bd9Sstevel@tonic-gate  */
1338*7c478bd9Sstevel@tonic-gate void
1339*7c478bd9Sstevel@tonic-gate svcerr_progvers(xprt, low_vers, high_vers)
1340*7c478bd9Sstevel@tonic-gate 	const SVCXPRT *xprt;
1341*7c478bd9Sstevel@tonic-gate 	rpcvers_t low_vers;
1342*7c478bd9Sstevel@tonic-gate 	rpcvers_t high_vers;
1343*7c478bd9Sstevel@tonic-gate {
1344*7c478bd9Sstevel@tonic-gate 	struct rpc_msg rply;
1345*7c478bd9Sstevel@tonic-gate 
1346*7c478bd9Sstevel@tonic-gate 	trace3(TR_svcerr_progvers, 0, low_vers, high_vers);
1347*7c478bd9Sstevel@tonic-gate 	rply.rm_direction = REPLY;
1348*7c478bd9Sstevel@tonic-gate 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
1349*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_verf = xprt->xp_verf;
1350*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_stat = PROG_MISMATCH;
1351*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_vers.low = low_vers;
1352*7c478bd9Sstevel@tonic-gate 	rply.acpted_rply.ar_vers.high = high_vers;
1353*7c478bd9Sstevel@tonic-gate 	SVC_REPLY((SVCXPRT *)xprt, &rply);
1354*7c478bd9Sstevel@tonic-gate 	trace3(TR_svcerr_progvers, 1, low_vers, high_vers);
1355*7c478bd9Sstevel@tonic-gate }
1356*7c478bd9Sstevel@tonic-gate 
1357*7c478bd9Sstevel@tonic-gate /* ******************* SERVER INPUT STUFF ******************* */
1358*7c478bd9Sstevel@tonic-gate 
1359*7c478bd9Sstevel@tonic-gate /*
1360*7c478bd9Sstevel@tonic-gate  * Get server side input from some transport.
1361*7c478bd9Sstevel@tonic-gate  *
1362*7c478bd9Sstevel@tonic-gate  * Statement of authentication parameters management:
1363*7c478bd9Sstevel@tonic-gate  * This function owns and manages all authentication parameters, specifically
1364*7c478bd9Sstevel@tonic-gate  * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1365*7c478bd9Sstevel@tonic-gate  * the "cooked" credentials (rqst->rq_clntcred).
1366*7c478bd9Sstevel@tonic-gate  * However, this function does not know the structure of the cooked
1367*7c478bd9Sstevel@tonic-gate  * credentials, so it make the following assumptions:
1368*7c478bd9Sstevel@tonic-gate  *   a) the structure is contiguous (no pointers), and
1369*7c478bd9Sstevel@tonic-gate  *   b) the cred structure size does not exceed RQCRED_SIZE bytes.
1370*7c478bd9Sstevel@tonic-gate  * In all events, all three parameters are freed upon exit from this routine.
1371*7c478bd9Sstevel@tonic-gate  * The storage is trivially management on the call stack in user land, but
1372*7c478bd9Sstevel@tonic-gate  * is mallocated in kernel land.
1373*7c478bd9Sstevel@tonic-gate  */
1374*7c478bd9Sstevel@tonic-gate 
1375*7c478bd9Sstevel@tonic-gate void
1376*7c478bd9Sstevel@tonic-gate svc_getreq(rdfds)
1377*7c478bd9Sstevel@tonic-gate 	int rdfds;
1378*7c478bd9Sstevel@tonic-gate {
1379*7c478bd9Sstevel@tonic-gate 	fd_set readfds;
1380*7c478bd9Sstevel@tonic-gate 
1381*7c478bd9Sstevel@tonic-gate 	trace2(TR_svc_getreq, 0, rdfds);
1382*7c478bd9Sstevel@tonic-gate 	FD_ZERO(&readfds);
1383*7c478bd9Sstevel@tonic-gate 	readfds.fds_bits[0] = rdfds;
1384*7c478bd9Sstevel@tonic-gate 	svc_getreqset(&readfds);
1385*7c478bd9Sstevel@tonic-gate 	trace2(TR_svc_getreq, 1, rdfds);
1386*7c478bd9Sstevel@tonic-gate }
1387*7c478bd9Sstevel@tonic-gate 
1388*7c478bd9Sstevel@tonic-gate void
1389*7c478bd9Sstevel@tonic-gate svc_getreqset(readfds)
1390*7c478bd9Sstevel@tonic-gate 	fd_set *readfds;
1391*7c478bd9Sstevel@tonic-gate {
1392*7c478bd9Sstevel@tonic-gate 	int i;
1393*7c478bd9Sstevel@tonic-gate 
1394*7c478bd9Sstevel@tonic-gate 	trace1(TR_svc_getreqset, 0);
1395*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < svc_max_fd; i++) {
1396*7c478bd9Sstevel@tonic-gate 		/* fd has input waiting */
1397*7c478bd9Sstevel@tonic-gate 		if (FD_ISSET(i, readfds))
1398*7c478bd9Sstevel@tonic-gate 			svc_getreq_common(i);
1399*7c478bd9Sstevel@tonic-gate 	}
1400*7c478bd9Sstevel@tonic-gate 	trace1(TR_svc_getreqset, 1);
1401*7c478bd9Sstevel@tonic-gate }
1402*7c478bd9Sstevel@tonic-gate 
1403*7c478bd9Sstevel@tonic-gate void
1404*7c478bd9Sstevel@tonic-gate svc_getreq_poll(pfdp, pollretval)
1405*7c478bd9Sstevel@tonic-gate 	struct pollfd	*pfdp;
1406*7c478bd9Sstevel@tonic-gate 	int	pollretval;
1407*7c478bd9Sstevel@tonic-gate {
1408*7c478bd9Sstevel@tonic-gate 	int i;
1409*7c478bd9Sstevel@tonic-gate 	int fds_found;
1410*7c478bd9Sstevel@tonic-gate 
1411*7c478bd9Sstevel@tonic-gate 	trace2(TR_svc_getreq_poll, 0, pollretval);
1412*7c478bd9Sstevel@tonic-gate 	for (i = fds_found = 0; fds_found < pollretval; i++) {
1413*7c478bd9Sstevel@tonic-gate 		struct pollfd *p = &pfdp[i];
1414*7c478bd9Sstevel@tonic-gate 
1415*7c478bd9Sstevel@tonic-gate 		if (p->revents) {
1416*7c478bd9Sstevel@tonic-gate 			/* fd has input waiting */
1417*7c478bd9Sstevel@tonic-gate 			fds_found++;
1418*7c478bd9Sstevel@tonic-gate 			/*
1419*7c478bd9Sstevel@tonic-gate 			 *	We assume that this function is only called
1420*7c478bd9Sstevel@tonic-gate 			 *	via someone select()ing from svc_fdset or
1421*7c478bd9Sstevel@tonic-gate 			 *	poll()ing from svc_pollset[].  Thus it's safe
1422*7c478bd9Sstevel@tonic-gate 			 *	to handle the POLLNVAL event by simply turning
1423*7c478bd9Sstevel@tonic-gate 			 *	the corresponding bit off in svc_fdset.  The
1424*7c478bd9Sstevel@tonic-gate 			 *	svc_pollset[] array is derived from svc_fdset
1425*7c478bd9Sstevel@tonic-gate 			 *	and so will also be updated eventually.
1426*7c478bd9Sstevel@tonic-gate 			 *
1427*7c478bd9Sstevel@tonic-gate 			 *	XXX Should we do an xprt_unregister() instead?
1428*7c478bd9Sstevel@tonic-gate 			 */
1429*7c478bd9Sstevel@tonic-gate 			/* Handle user callback */
1430*7c478bd9Sstevel@tonic-gate 			if (__is_a_userfd(p->fd) == TRUE) {
1431*7c478bd9Sstevel@tonic-gate 				rw_rdlock(&svc_fd_lock);
1432*7c478bd9Sstevel@tonic-gate 				__svc_getreq_user(p);
1433*7c478bd9Sstevel@tonic-gate 				rw_unlock(&svc_fd_lock);
1434*7c478bd9Sstevel@tonic-gate 			} else {
1435*7c478bd9Sstevel@tonic-gate 				if (p->revents & POLLNVAL) {
1436*7c478bd9Sstevel@tonic-gate 					rw_wrlock(&svc_fd_lock);
1437*7c478bd9Sstevel@tonic-gate 					remove_pollfd(p->fd);	/* XXX */
1438*7c478bd9Sstevel@tonic-gate 					rw_unlock(&svc_fd_lock);
1439*7c478bd9Sstevel@tonic-gate 				} else
1440*7c478bd9Sstevel@tonic-gate 					svc_getreq_common(p->fd);
1441*7c478bd9Sstevel@tonic-gate 			}
1442*7c478bd9Sstevel@tonic-gate 		}
1443*7c478bd9Sstevel@tonic-gate 	}
1444*7c478bd9Sstevel@tonic-gate 	trace2(TR_svc_getreq_poll, 1, pollretval);
1445*7c478bd9Sstevel@tonic-gate }
1446*7c478bd9Sstevel@tonic-gate 
1447*7c478bd9Sstevel@tonic-gate void
1448*7c478bd9Sstevel@tonic-gate svc_getreq_common(fd)
1449*7c478bd9Sstevel@tonic-gate 	int fd;
1450*7c478bd9Sstevel@tonic-gate {
1451*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1452*7c478bd9Sstevel@tonic-gate 	enum xprt_stat stat;
1453*7c478bd9Sstevel@tonic-gate 	struct rpc_msg *msg;
1454*7c478bd9Sstevel@tonic-gate 	struct svc_req *r;
1455*7c478bd9Sstevel@tonic-gate 	char *cred_area;
1456*7c478bd9Sstevel@tonic-gate 
1457*7c478bd9Sstevel@tonic-gate 	trace2(TR_svc_getreq_common, 0, fd);
1458*7c478bd9Sstevel@tonic-gate 
1459*7c478bd9Sstevel@tonic-gate 	rw_rdlock(&svc_fd_lock);
1460*7c478bd9Sstevel@tonic-gate 
1461*7c478bd9Sstevel@tonic-gate 	/* HANDLE USER CALLBACK */
1462*7c478bd9Sstevel@tonic-gate 	if (__is_a_userfd(fd) == TRUE) {
1463*7c478bd9Sstevel@tonic-gate 		struct pollfd virtual_fd;
1464*7c478bd9Sstevel@tonic-gate 
1465*7c478bd9Sstevel@tonic-gate 		virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1466*7c478bd9Sstevel@tonic-gate 		virtual_fd.fd = fd;
1467*7c478bd9Sstevel@tonic-gate 		__svc_getreq_user(&virtual_fd);
1468*7c478bd9Sstevel@tonic-gate 		rw_unlock(&svc_fd_lock);
1469*7c478bd9Sstevel@tonic-gate 		return;
1470*7c478bd9Sstevel@tonic-gate 	}
1471*7c478bd9Sstevel@tonic-gate 
1472*7c478bd9Sstevel@tonic-gate 	/*
1473*7c478bd9Sstevel@tonic-gate 	 * The transport associated with this fd could have been
1474*7c478bd9Sstevel@tonic-gate 	 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1475*7c478bd9Sstevel@tonic-gate 	 * This can happen if two or more fds get read events and are
1476*7c478bd9Sstevel@tonic-gate 	 * passed to svc_getreq_poll/set, the first fd is seviced by
1477*7c478bd9Sstevel@tonic-gate 	 * the dispatch routine and cleans up any dead transports.  If
1478*7c478bd9Sstevel@tonic-gate 	 * one of the dead transports removed is the other fd that
1479*7c478bd9Sstevel@tonic-gate 	 * had a read event then svc_getreq_common() will be called with no
1480*7c478bd9Sstevel@tonic-gate 	 * xprt associated with the fd that had the original read event.
1481*7c478bd9Sstevel@tonic-gate 	 */
1482*7c478bd9Sstevel@tonic-gate 	if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1483*7c478bd9Sstevel@tonic-gate 		rw_unlock(&svc_fd_lock);
1484*7c478bd9Sstevel@tonic-gate 		trace2(TR_svc_getreq_common, 1, fd);
1485*7c478bd9Sstevel@tonic-gate 		return;
1486*7c478bd9Sstevel@tonic-gate 	}
1487*7c478bd9Sstevel@tonic-gate 	rw_unlock(&svc_fd_lock);
1488*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1489*7c478bd9Sstevel@tonic-gate 	msg = SVCEXT(xprt)->msg;
1490*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1491*7c478bd9Sstevel@tonic-gate 	r = SVCEXT(xprt)->req;
1492*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1493*7c478bd9Sstevel@tonic-gate 	cred_area = SVCEXT(xprt)->cred_area;
1494*7c478bd9Sstevel@tonic-gate 	msg->rm_call.cb_cred.oa_base = cred_area;
1495*7c478bd9Sstevel@tonic-gate 	msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1496*7c478bd9Sstevel@tonic-gate 	r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1497*7c478bd9Sstevel@tonic-gate 
1498*7c478bd9Sstevel@tonic-gate 	/* receive msgs from xprtprt (support batch calls) */
1499*7c478bd9Sstevel@tonic-gate 	do {
1500*7c478bd9Sstevel@tonic-gate 		bool_t dispatch;
1501*7c478bd9Sstevel@tonic-gate 
1502*7c478bd9Sstevel@tonic-gate 		if (dispatch = SVC_RECV(xprt, msg))
1503*7c478bd9Sstevel@tonic-gate 			(void) _svc_prog_dispatch(xprt, msg, r);
1504*7c478bd9Sstevel@tonic-gate 		/*
1505*7c478bd9Sstevel@tonic-gate 		 * Check if the xprt has been disconnected in a recursive call
1506*7c478bd9Sstevel@tonic-gate 		 * in the service dispatch routine. If so, then break
1507*7c478bd9Sstevel@tonic-gate 		 */
1508*7c478bd9Sstevel@tonic-gate 		rw_rdlock(&svc_fd_lock);
1509*7c478bd9Sstevel@tonic-gate 		if (xprt != svc_xports[fd]) {
1510*7c478bd9Sstevel@tonic-gate 			rw_unlock(&svc_fd_lock);
1511*7c478bd9Sstevel@tonic-gate 			break;
1512*7c478bd9Sstevel@tonic-gate 		}
1513*7c478bd9Sstevel@tonic-gate 		rw_unlock(&svc_fd_lock);
1514*7c478bd9Sstevel@tonic-gate 
1515*7c478bd9Sstevel@tonic-gate 		/*
1516*7c478bd9Sstevel@tonic-gate 		 * Call cleanup procedure if set.
1517*7c478bd9Sstevel@tonic-gate 		 */
1518*7c478bd9Sstevel@tonic-gate 		if (__proc_cleanup_cb != NULL && dispatch)
1519*7c478bd9Sstevel@tonic-gate 			(*__proc_cleanup_cb)(xprt);
1520*7c478bd9Sstevel@tonic-gate 
1521*7c478bd9Sstevel@tonic-gate 		if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1522*7c478bd9Sstevel@tonic-gate 			SVC_DESTROY(xprt);
1523*7c478bd9Sstevel@tonic-gate 			break;
1524*7c478bd9Sstevel@tonic-gate 		}
1525*7c478bd9Sstevel@tonic-gate 	} while (stat == XPRT_MOREREQS);
1526*7c478bd9Sstevel@tonic-gate 	trace2(TR_svc_getreq_common, 1, fd);
1527*7c478bd9Sstevel@tonic-gate }
1528*7c478bd9Sstevel@tonic-gate 
1529*7c478bd9Sstevel@tonic-gate int
1530*7c478bd9Sstevel@tonic-gate _svc_prog_dispatch(xprt, msg, r)
1531*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1532*7c478bd9Sstevel@tonic-gate 	struct rpc_msg *msg;
1533*7c478bd9Sstevel@tonic-gate 	struct svc_req *r;
1534*7c478bd9Sstevel@tonic-gate {
1535*7c478bd9Sstevel@tonic-gate 	struct svc_callout *s;
1536*7c478bd9Sstevel@tonic-gate 	enum auth_stat why;
1537*7c478bd9Sstevel@tonic-gate 	int prog_found;
1538*7c478bd9Sstevel@tonic-gate 	rpcvers_t low_vers;
1539*7c478bd9Sstevel@tonic-gate 	rpcvers_t high_vers;
1540*7c478bd9Sstevel@tonic-gate 	void (*disp_fn)();
1541*7c478bd9Sstevel@tonic-gate 
1542*7c478bd9Sstevel@tonic-gate 	trace1(TR_prog_dispatch, 0);
1543*7c478bd9Sstevel@tonic-gate 	r->rq_xprt = xprt;
1544*7c478bd9Sstevel@tonic-gate 	r->rq_prog = msg->rm_call.cb_prog;
1545*7c478bd9Sstevel@tonic-gate 	r->rq_vers = msg->rm_call.cb_vers;
1546*7c478bd9Sstevel@tonic-gate 	r->rq_proc = msg->rm_call.cb_proc;
1547*7c478bd9Sstevel@tonic-gate 	r->rq_cred = msg->rm_call.cb_cred;
1548*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1549*7c478bd9Sstevel@tonic-gate 	SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1550*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1551*7c478bd9Sstevel@tonic-gate 	SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1552*7c478bd9Sstevel@tonic-gate 
1553*7c478bd9Sstevel@tonic-gate 	/* first authenticate the message */
1554*7c478bd9Sstevel@tonic-gate 	/* Check for null flavor and bypass these calls if possible */
1555*7c478bd9Sstevel@tonic-gate 
1556*7c478bd9Sstevel@tonic-gate 	if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1557*7c478bd9Sstevel@tonic-gate 		r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1558*7c478bd9Sstevel@tonic-gate 		r->rq_xprt->xp_verf.oa_length = 0;
1559*7c478bd9Sstevel@tonic-gate 	} else {
1560*7c478bd9Sstevel@tonic-gate 		bool_t no_dispatch;
1561*7c478bd9Sstevel@tonic-gate 
1562*7c478bd9Sstevel@tonic-gate 		if ((why = __gss_authenticate(r, msg,
1563*7c478bd9Sstevel@tonic-gate 			&no_dispatch)) != AUTH_OK) {
1564*7c478bd9Sstevel@tonic-gate 			svcerr_auth(xprt, why);
1565*7c478bd9Sstevel@tonic-gate 			trace1(TR_prog_dispatch, 1);
1566*7c478bd9Sstevel@tonic-gate 			return (0);
1567*7c478bd9Sstevel@tonic-gate 		}
1568*7c478bd9Sstevel@tonic-gate 		if (no_dispatch)
1569*7c478bd9Sstevel@tonic-gate 			return (0);
1570*7c478bd9Sstevel@tonic-gate 	}
1571*7c478bd9Sstevel@tonic-gate 	/* match message with a registered service */
1572*7c478bd9Sstevel@tonic-gate 	prog_found = FALSE;
1573*7c478bd9Sstevel@tonic-gate 	low_vers = (rpcvers_t)(0 - 1);
1574*7c478bd9Sstevel@tonic-gate 	high_vers = 0;
1575*7c478bd9Sstevel@tonic-gate 	rw_rdlock(&svc_lock);
1576*7c478bd9Sstevel@tonic-gate 	for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1577*7c478bd9Sstevel@tonic-gate 		if (s->sc_prog == r->rq_prog) {
1578*7c478bd9Sstevel@tonic-gate 			prog_found = TRUE;
1579*7c478bd9Sstevel@tonic-gate 			if (s->sc_vers == r->rq_vers) {
1580*7c478bd9Sstevel@tonic-gate 				if ((xprt->xp_netid == NULL) ||
1581*7c478bd9Sstevel@tonic-gate 				    (s->sc_netid == NULL) ||
1582*7c478bd9Sstevel@tonic-gate 				    (strcmp(xprt->xp_netid,
1583*7c478bd9Sstevel@tonic-gate 					    s->sc_netid) == 0)) {
1584*7c478bd9Sstevel@tonic-gate 					disp_fn = (*s->sc_dispatch);
1585*7c478bd9Sstevel@tonic-gate 					rw_unlock(&svc_lock);
1586*7c478bd9Sstevel@tonic-gate 					disp_fn(r, xprt);
1587*7c478bd9Sstevel@tonic-gate 					trace1(TR_prog_dispatch, 1);
1588*7c478bd9Sstevel@tonic-gate 					return (1);
1589*7c478bd9Sstevel@tonic-gate 				} else {
1590*7c478bd9Sstevel@tonic-gate 					prog_found = FALSE;
1591*7c478bd9Sstevel@tonic-gate 				}
1592*7c478bd9Sstevel@tonic-gate 			}
1593*7c478bd9Sstevel@tonic-gate 			if (s->sc_vers < low_vers)
1594*7c478bd9Sstevel@tonic-gate 				low_vers = s->sc_vers;
1595*7c478bd9Sstevel@tonic-gate 			if (s->sc_vers > high_vers)
1596*7c478bd9Sstevel@tonic-gate 				high_vers = s->sc_vers;
1597*7c478bd9Sstevel@tonic-gate 		}		/* found correct program */
1598*7c478bd9Sstevel@tonic-gate 	}
1599*7c478bd9Sstevel@tonic-gate 	rw_unlock(&svc_lock);
1600*7c478bd9Sstevel@tonic-gate 
1601*7c478bd9Sstevel@tonic-gate 	/*
1602*7c478bd9Sstevel@tonic-gate 	 * if we got here, the program or version
1603*7c478bd9Sstevel@tonic-gate 	 * is not served ...
1604*7c478bd9Sstevel@tonic-gate 	 */
1605*7c478bd9Sstevel@tonic-gate 	if (prog_found) {
1606*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1607*7c478bd9Sstevel@tonic-gate 		if (!version_keepquiet(xprt))
1608*7c478bd9Sstevel@tonic-gate 			svcerr_progvers(xprt, low_vers, high_vers);
1609*7c478bd9Sstevel@tonic-gate 	} else {
1610*7c478bd9Sstevel@tonic-gate 		svcerr_noprog(xprt);
1611*7c478bd9Sstevel@tonic-gate 	}
1612*7c478bd9Sstevel@tonic-gate 	trace1(TR_prog_dispatch, 1);
1613*7c478bd9Sstevel@tonic-gate 	return (0);
1614*7c478bd9Sstevel@tonic-gate }
1615*7c478bd9Sstevel@tonic-gate 
1616*7c478bd9Sstevel@tonic-gate /* ******************* SVCXPRT allocation and deallocation ***************** */
1617*7c478bd9Sstevel@tonic-gate 
1618*7c478bd9Sstevel@tonic-gate /*
1619*7c478bd9Sstevel@tonic-gate  * svc_xprt_alloc() - allocate a service transport handle
1620*7c478bd9Sstevel@tonic-gate  */
1621*7c478bd9Sstevel@tonic-gate SVCXPRT *
1622*7c478bd9Sstevel@tonic-gate svc_xprt_alloc()
1623*7c478bd9Sstevel@tonic-gate {
1624*7c478bd9Sstevel@tonic-gate 	SVCXPRT		*xprt = NULL;
1625*7c478bd9Sstevel@tonic-gate 	SVCXPRT_EXT	*xt = NULL;
1626*7c478bd9Sstevel@tonic-gate 	SVCXPRT_LIST	*xlist = NULL;
1627*7c478bd9Sstevel@tonic-gate 	struct rpc_msg	*msg = NULL;
1628*7c478bd9Sstevel@tonic-gate 	struct svc_req	*req = NULL;
1629*7c478bd9Sstevel@tonic-gate 	char		*cred_area = NULL;
1630*7c478bd9Sstevel@tonic-gate 
1631*7c478bd9Sstevel@tonic-gate 	if ((xprt = (SVCXPRT *)calloc(1, sizeof (SVCXPRT))) == NULL)
1632*7c478bd9Sstevel@tonic-gate 		goto err_exit;
1633*7c478bd9Sstevel@tonic-gate 
1634*7c478bd9Sstevel@tonic-gate 	if ((xt = (SVCXPRT_EXT *)calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1635*7c478bd9Sstevel@tonic-gate 		goto err_exit;
1636*7c478bd9Sstevel@tonic-gate 	xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1637*7c478bd9Sstevel@tonic-gate 
1638*7c478bd9Sstevel@tonic-gate 	if ((xlist = (SVCXPRT_LIST *)calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1639*7c478bd9Sstevel@tonic-gate 		goto err_exit;
1640*7c478bd9Sstevel@tonic-gate 	xt->my_xlist = xlist;
1641*7c478bd9Sstevel@tonic-gate 	xlist->xprt = xprt;
1642*7c478bd9Sstevel@tonic-gate 
1643*7c478bd9Sstevel@tonic-gate 	if ((msg = (struct rpc_msg *)malloc(sizeof (struct rpc_msg))) == NULL)
1644*7c478bd9Sstevel@tonic-gate 		goto err_exit;
1645*7c478bd9Sstevel@tonic-gate 	xt->msg = msg;
1646*7c478bd9Sstevel@tonic-gate 
1647*7c478bd9Sstevel@tonic-gate 	if ((req = (struct svc_req *)malloc(sizeof (struct svc_req))) == NULL)
1648*7c478bd9Sstevel@tonic-gate 		goto err_exit;
1649*7c478bd9Sstevel@tonic-gate 	xt->req = req;
1650*7c478bd9Sstevel@tonic-gate 
1651*7c478bd9Sstevel@tonic-gate 	if ((cred_area = (char *)malloc(2*MAX_AUTH_BYTES +
1652*7c478bd9Sstevel@tonic-gate 							RQCRED_SIZE)) == NULL)
1653*7c478bd9Sstevel@tonic-gate 		goto err_exit;
1654*7c478bd9Sstevel@tonic-gate 	xt->cred_area = cred_area;
1655*7c478bd9Sstevel@tonic-gate 
1656*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1657*7c478bd9Sstevel@tonic-gate 	mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1658*7c478bd9Sstevel@tonic-gate 	return (xprt);
1659*7c478bd9Sstevel@tonic-gate 
1660*7c478bd9Sstevel@tonic-gate err_exit:
1661*7c478bd9Sstevel@tonic-gate 	svc_xprt_free(xprt);
1662*7c478bd9Sstevel@tonic-gate 	return (NULL);
1663*7c478bd9Sstevel@tonic-gate }
1664*7c478bd9Sstevel@tonic-gate 
1665*7c478bd9Sstevel@tonic-gate 
1666*7c478bd9Sstevel@tonic-gate /*
1667*7c478bd9Sstevel@tonic-gate  * svc_xprt_free() - free a service handle
1668*7c478bd9Sstevel@tonic-gate  */
1669*7c478bd9Sstevel@tonic-gate void
1670*7c478bd9Sstevel@tonic-gate svc_xprt_free(xprt)
1671*7c478bd9Sstevel@tonic-gate 	SVCXPRT	*xprt;
1672*7c478bd9Sstevel@tonic-gate {
1673*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1674*7c478bd9Sstevel@tonic-gate 	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
1675*7c478bd9Sstevel@tonic-gate 	SVCXPRT_LIST	*my_xlist = xt ? xt->my_xlist: NULL;
1676*7c478bd9Sstevel@tonic-gate 	struct rpc_msg	*msg = xt ? xt->msg : NULL;
1677*7c478bd9Sstevel@tonic-gate 	struct svc_req	*req = xt ? xt->req : NULL;
1678*7c478bd9Sstevel@tonic-gate 	char		*cred_area = xt ? xt->cred_area : NULL;
1679*7c478bd9Sstevel@tonic-gate 
1680*7c478bd9Sstevel@tonic-gate 	if (xprt)
1681*7c478bd9Sstevel@tonic-gate 		free((char *)xprt);
1682*7c478bd9Sstevel@tonic-gate 	if (xt)
1683*7c478bd9Sstevel@tonic-gate 		free((char *)xt);
1684*7c478bd9Sstevel@tonic-gate 	if (my_xlist)
1685*7c478bd9Sstevel@tonic-gate 		free((char *)my_xlist);
1686*7c478bd9Sstevel@tonic-gate 	if (msg)
1687*7c478bd9Sstevel@tonic-gate 		free((char *)msg);
1688*7c478bd9Sstevel@tonic-gate 	if (req)
1689*7c478bd9Sstevel@tonic-gate 		free((char *)req);
1690*7c478bd9Sstevel@tonic-gate 	if (cred_area)
1691*7c478bd9Sstevel@tonic-gate 		free((char *)cred_area);
1692*7c478bd9Sstevel@tonic-gate }
1693*7c478bd9Sstevel@tonic-gate 
1694*7c478bd9Sstevel@tonic-gate 
1695*7c478bd9Sstevel@tonic-gate /*
1696*7c478bd9Sstevel@tonic-gate  * svc_xprt_destroy() - free parent and child xprt list
1697*7c478bd9Sstevel@tonic-gate  */
1698*7c478bd9Sstevel@tonic-gate void
1699*7c478bd9Sstevel@tonic-gate svc_xprt_destroy(xprt)
1700*7c478bd9Sstevel@tonic-gate 	SVCXPRT		*xprt;
1701*7c478bd9Sstevel@tonic-gate {
1702*7c478bd9Sstevel@tonic-gate 	SVCXPRT_LIST	*xlist, *xnext = NULL;
1703*7c478bd9Sstevel@tonic-gate 	int		type;
1704*7c478bd9Sstevel@tonic-gate 
1705*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1706*7c478bd9Sstevel@tonic-gate 	if (SVCEXT(xprt)->parent)
1707*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1708*7c478bd9Sstevel@tonic-gate 		xprt = SVCEXT(xprt)->parent;
1709*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1710*7c478bd9Sstevel@tonic-gate 	type = svc_type(xprt);
1711*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1712*7c478bd9Sstevel@tonic-gate 	for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1713*7c478bd9Sstevel@tonic-gate 		xnext = xlist->next;
1714*7c478bd9Sstevel@tonic-gate 		xprt = xlist->xprt;
1715*7c478bd9Sstevel@tonic-gate 		switch (type) {
1716*7c478bd9Sstevel@tonic-gate 		case SVC_DGRAM:
1717*7c478bd9Sstevel@tonic-gate 			svc_dg_xprtfree(xprt);
1718*7c478bd9Sstevel@tonic-gate 			break;
1719*7c478bd9Sstevel@tonic-gate 		case SVC_RENDEZVOUS:
1720*7c478bd9Sstevel@tonic-gate 			svc_vc_xprtfree(xprt);
1721*7c478bd9Sstevel@tonic-gate 			break;
1722*7c478bd9Sstevel@tonic-gate 		case SVC_CONNECTION:
1723*7c478bd9Sstevel@tonic-gate 			svc_fd_xprtfree(xprt);
1724*7c478bd9Sstevel@tonic-gate 			break;
1725*7c478bd9Sstevel@tonic-gate 		case SVC_DOOR:
1726*7c478bd9Sstevel@tonic-gate 			svc_door_xprtfree(xprt);
1727*7c478bd9Sstevel@tonic-gate 			break;
1728*7c478bd9Sstevel@tonic-gate 		}
1729*7c478bd9Sstevel@tonic-gate 	}
1730*7c478bd9Sstevel@tonic-gate }
1731*7c478bd9Sstevel@tonic-gate 
1732*7c478bd9Sstevel@tonic-gate 
1733*7c478bd9Sstevel@tonic-gate /*
1734*7c478bd9Sstevel@tonic-gate  * svc_copy() - make a copy of parent
1735*7c478bd9Sstevel@tonic-gate  */
1736*7c478bd9Sstevel@tonic-gate SVCXPRT *
1737*7c478bd9Sstevel@tonic-gate svc_copy(xprt)
1738*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1739*7c478bd9Sstevel@tonic-gate {
1740*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1741*7c478bd9Sstevel@tonic-gate 	switch (svc_type(xprt)) {
1742*7c478bd9Sstevel@tonic-gate 	case SVC_DGRAM:
1743*7c478bd9Sstevel@tonic-gate 		return (svc_dg_xprtcopy(xprt));
1744*7c478bd9Sstevel@tonic-gate 	case SVC_RENDEZVOUS:
1745*7c478bd9Sstevel@tonic-gate 		return (svc_vc_xprtcopy(xprt));
1746*7c478bd9Sstevel@tonic-gate 	case SVC_CONNECTION:
1747*7c478bd9Sstevel@tonic-gate 		return (svc_fd_xprtcopy(xprt));
1748*7c478bd9Sstevel@tonic-gate 	}
1749*7c478bd9Sstevel@tonic-gate 	return ((SVCXPRT *)NULL);
1750*7c478bd9Sstevel@tonic-gate }
1751*7c478bd9Sstevel@tonic-gate 
1752*7c478bd9Sstevel@tonic-gate 
1753*7c478bd9Sstevel@tonic-gate /*
1754*7c478bd9Sstevel@tonic-gate  * _svc_destroy_private() - private SVC_DESTROY interface
1755*7c478bd9Sstevel@tonic-gate  */
1756*7c478bd9Sstevel@tonic-gate void
1757*7c478bd9Sstevel@tonic-gate _svc_destroy_private(xprt)
1758*7c478bd9Sstevel@tonic-gate 	SVCXPRT *xprt;
1759*7c478bd9Sstevel@tonic-gate {
1760*7c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
1761*7c478bd9Sstevel@tonic-gate 	switch (svc_type(xprt)) {
1762*7c478bd9Sstevel@tonic-gate 	case SVC_DGRAM:
1763*7c478bd9Sstevel@tonic-gate 		_svc_dg_destroy_private(xprt);
1764*7c478bd9Sstevel@tonic-gate 		break;
1765*7c478bd9Sstevel@tonic-gate 	case SVC_RENDEZVOUS:
1766*7c478bd9Sstevel@tonic-gate 	case SVC_CONNECTION:
1767*7c478bd9Sstevel@tonic-gate 		_svc_vc_destroy_private(xprt, TRUE);
1768*7c478bd9Sstevel@tonic-gate 		break;
1769*7c478bd9Sstevel@tonic-gate 	}
1770*7c478bd9Sstevel@tonic-gate }
1771*7c478bd9Sstevel@tonic-gate 
1772*7c478bd9Sstevel@tonic-gate /*
1773*7c478bd9Sstevel@tonic-gate  * svc_get_local_cred() - fetch local user credentials.  This always
1774*7c478bd9Sstevel@tonic-gate  * works over doors based transports.  For local transports, this
1775*7c478bd9Sstevel@tonic-gate  * does not yield correct results unless the __rpc_negotiate_uid()
1776*7c478bd9Sstevel@tonic-gate  * call has been invoked to enable this feature.
1777*7c478bd9Sstevel@tonic-gate  */
1778*7c478bd9Sstevel@tonic-gate bool_t
1779*7c478bd9Sstevel@tonic-gate svc_get_local_cred(xprt, lcred)
1780*7c478bd9Sstevel@tonic-gate 	SVCXPRT			*xprt;
1781*7c478bd9Sstevel@tonic-gate 	svc_local_cred_t	*lcred;
1782*7c478bd9Sstevel@tonic-gate {
1783*7c478bd9Sstevel@tonic-gate 	/* LINTED pointer alignment */
1784*7c478bd9Sstevel@tonic-gate 	if (svc_type(xprt) == SVC_DOOR)
1785*7c478bd9Sstevel@tonic-gate 		return (__svc_get_door_cred(xprt, lcred));
1786*7c478bd9Sstevel@tonic-gate 	return (__rpc_get_local_cred(xprt, lcred));
1787*7c478bd9Sstevel@tonic-gate }
1788*7c478bd9Sstevel@tonic-gate 
1789*7c478bd9Sstevel@tonic-gate 
1790*7c478bd9Sstevel@tonic-gate /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1791*7c478bd9Sstevel@tonic-gate 
1792*7c478bd9Sstevel@tonic-gate /*
1793*7c478bd9Sstevel@tonic-gate  * the dup cacheing routines below provide a cache of received
1794*7c478bd9Sstevel@tonic-gate  * transactions. rpc service routines can use this to detect
1795*7c478bd9Sstevel@tonic-gate  * retransmissions and re-send a non-failure response. Uses a
1796*7c478bd9Sstevel@tonic-gate  * lru scheme to find entries to get rid of entries in the cache,
1797*7c478bd9Sstevel@tonic-gate  * though only DUP_DONE entries are placed on the lru list.
1798*7c478bd9Sstevel@tonic-gate  * the routines were written towards development of a generic
1799*7c478bd9Sstevel@tonic-gate  * SVC_DUP() interface, which can be expanded to encompass the
1800*7c478bd9Sstevel@tonic-gate  * svc_dg_enablecache() routines as well. the cache is currently
1801*7c478bd9Sstevel@tonic-gate  * private to the automounter.
1802*7c478bd9Sstevel@tonic-gate  */
1803*7c478bd9Sstevel@tonic-gate 
1804*7c478bd9Sstevel@tonic-gate 
1805*7c478bd9Sstevel@tonic-gate /* dupcache header contains xprt specific information */
1806*7c478bd9Sstevel@tonic-gate struct dupcache
1807*7c478bd9Sstevel@tonic-gate {
1808*7c478bd9Sstevel@tonic-gate 	rwlock_t	dc_lock;
1809*7c478bd9Sstevel@tonic-gate 	time_t		dc_time;
1810*7c478bd9Sstevel@tonic-gate 	int		dc_buckets;
1811*7c478bd9Sstevel@tonic-gate 	int		dc_maxsz;
1812*7c478bd9Sstevel@tonic-gate 	int		dc_basis;
1813*7c478bd9Sstevel@tonic-gate 	struct dupreq 	*dc_mru;
1814*7c478bd9Sstevel@tonic-gate 	struct dupreq	**dc_hashtbl;
1815*7c478bd9Sstevel@tonic-gate };
1816*7c478bd9Sstevel@tonic-gate 
1817*7c478bd9Sstevel@tonic-gate /*
1818*7c478bd9Sstevel@tonic-gate  * private duplicate cache request routines
1819*7c478bd9Sstevel@tonic-gate  */
1820*7c478bd9Sstevel@tonic-gate static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1821*7c478bd9Sstevel@tonic-gate 		struct dupcache *, uint32_t, uint32_t);
1822*7c478bd9Sstevel@tonic-gate static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1823*7c478bd9Sstevel@tonic-gate static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1824*7c478bd9Sstevel@tonic-gate 		struct dupcache *, uint32_t, uint32_t, time_t);
1825*7c478bd9Sstevel@tonic-gate static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1826*7c478bd9Sstevel@tonic-gate 		struct dupcache *, uint32_t, uint32_t);
1827*7c478bd9Sstevel@tonic-gate #ifdef DUP_DEBUG
1828*7c478bd9Sstevel@tonic-gate static void __svc_dupcache_debug(struct dupcache *);
1829*7c478bd9Sstevel@tonic-gate #endif /* DUP_DEBUG */
1830*7c478bd9Sstevel@tonic-gate 
1831*7c478bd9Sstevel@tonic-gate /* default parameters for the dupcache */
1832*7c478bd9Sstevel@tonic-gate #define	DUPCACHE_BUCKETS	257
1833*7c478bd9Sstevel@tonic-gate #define	DUPCACHE_TIME		900
1834*7c478bd9Sstevel@tonic-gate #define	DUPCACHE_MAXSZ		INT_MAX
1835*7c478bd9Sstevel@tonic-gate 
1836*7c478bd9Sstevel@tonic-gate /*
1837*7c478bd9Sstevel@tonic-gate  * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1838*7c478bd9Sstevel@tonic-gate  * initialize the duprequest cache and assign it to the xprt_cache
1839*7c478bd9Sstevel@tonic-gate  * Use default values depending on the cache condition and basis.
1840*7c478bd9Sstevel@tonic-gate  * return TRUE on success and FALSE on failure
1841*7c478bd9Sstevel@tonic-gate  */
1842*7c478bd9Sstevel@tonic-gate bool_t
1843*7c478bd9Sstevel@tonic-gate __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1844*7c478bd9Sstevel@tonic-gate {
1845*7c478bd9Sstevel@tonic-gate 	static mutex_t initdc_lock = DEFAULTMUTEX;
1846*7c478bd9Sstevel@tonic-gate 	int i;
1847*7c478bd9Sstevel@tonic-gate 	struct dupcache *dc;
1848*7c478bd9Sstevel@tonic-gate 
1849*7c478bd9Sstevel@tonic-gate 	mutex_lock(&initdc_lock);
1850*7c478bd9Sstevel@tonic-gate 	if (*xprt_cache != NULL) { /* do only once per xprt */
1851*7c478bd9Sstevel@tonic-gate 		mutex_unlock(&initdc_lock);
1852*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR,
1853*7c478bd9Sstevel@tonic-gate 		"__svc_dupcache_init: multiply defined dup cache");
1854*7c478bd9Sstevel@tonic-gate 		return (FALSE);
1855*7c478bd9Sstevel@tonic-gate 	}
1856*7c478bd9Sstevel@tonic-gate 
1857*7c478bd9Sstevel@tonic-gate 	switch (basis) {
1858*7c478bd9Sstevel@tonic-gate 	case DUPCACHE_FIXEDTIME:
1859*7c478bd9Sstevel@tonic-gate 		dc = (struct dupcache *)mem_alloc(sizeof (struct dupcache));
1860*7c478bd9Sstevel@tonic-gate 		if (dc == NULL) {
1861*7c478bd9Sstevel@tonic-gate 			mutex_unlock(&initdc_lock);
1862*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR,
1863*7c478bd9Sstevel@tonic-gate 				"__svc_dupcache_init: memory alloc failed");
1864*7c478bd9Sstevel@tonic-gate 			return (FALSE);
1865*7c478bd9Sstevel@tonic-gate 		}
1866*7c478bd9Sstevel@tonic-gate 		rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1867*7c478bd9Sstevel@tonic-gate 		if (condition != NULL)
1868*7c478bd9Sstevel@tonic-gate 			dc->dc_time = *((time_t *)condition);
1869*7c478bd9Sstevel@tonic-gate 		else
1870*7c478bd9Sstevel@tonic-gate 			dc->dc_time = DUPCACHE_TIME;
1871*7c478bd9Sstevel@tonic-gate 		dc->dc_buckets = DUPCACHE_BUCKETS;
1872*7c478bd9Sstevel@tonic-gate 		dc->dc_maxsz = DUPCACHE_MAXSZ;
1873*7c478bd9Sstevel@tonic-gate 		dc->dc_basis = basis;
1874*7c478bd9Sstevel@tonic-gate 		dc->dc_mru = NULL;
1875*7c478bd9Sstevel@tonic-gate 		dc->dc_hashtbl = (struct dupreq **)mem_alloc(dc->dc_buckets *
1876*7c478bd9Sstevel@tonic-gate 						sizeof (struct dupreq *));
1877*7c478bd9Sstevel@tonic-gate 		if (dc->dc_hashtbl == NULL) {
1878*7c478bd9Sstevel@tonic-gate 			mem_free(dc, sizeof (struct dupcache));
1879*7c478bd9Sstevel@tonic-gate 			mutex_unlock(&initdc_lock);
1880*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR,
1881*7c478bd9Sstevel@tonic-gate 				"__svc_dupcache_init: memory alloc failed");
1882*7c478bd9Sstevel@tonic-gate 			return (FALSE);
1883*7c478bd9Sstevel@tonic-gate 		}
1884*7c478bd9Sstevel@tonic-gate 		for (i = 0; i < DUPCACHE_BUCKETS; i++)
1885*7c478bd9Sstevel@tonic-gate 			dc->dc_hashtbl[i] = NULL;
1886*7c478bd9Sstevel@tonic-gate 		*xprt_cache = (char *)dc;
1887*7c478bd9Sstevel@tonic-gate 		break;
1888*7c478bd9Sstevel@tonic-gate 	default:
1889*7c478bd9Sstevel@tonic-gate 		mutex_unlock(&initdc_lock);
1890*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR,
1891*7c478bd9Sstevel@tonic-gate 		"__svc_dupcache_init: undefined dup cache basis");
1892*7c478bd9Sstevel@tonic-gate 		return (FALSE);
1893*7c478bd9Sstevel@tonic-gate 	}
1894*7c478bd9Sstevel@tonic-gate 
1895*7c478bd9Sstevel@tonic-gate 	mutex_unlock(&initdc_lock);
1896*7c478bd9Sstevel@tonic-gate 
1897*7c478bd9Sstevel@tonic-gate 	return (TRUE);
1898*7c478bd9Sstevel@tonic-gate }
1899*7c478bd9Sstevel@tonic-gate 
1900*7c478bd9Sstevel@tonic-gate /*
1901*7c478bd9Sstevel@tonic-gate  * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1902*7c478bd9Sstevel@tonic-gate  *	char *xprt_cache)
1903*7c478bd9Sstevel@tonic-gate  * searches the request cache. Creates an entry and returns DUP_NEW if
1904*7c478bd9Sstevel@tonic-gate  * the request is not found in the cache.  If it is found, then it
1905*7c478bd9Sstevel@tonic-gate  * returns the state of the request (in progress, drop, or done) and
1906*7c478bd9Sstevel@tonic-gate  * also allocates, and passes back results to the user (if any) in
1907*7c478bd9Sstevel@tonic-gate  * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1908*7c478bd9Sstevel@tonic-gate  */
1909*7c478bd9Sstevel@tonic-gate int
1910*7c478bd9Sstevel@tonic-gate __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1911*7c478bd9Sstevel@tonic-gate 	char *xprt_cache)
1912*7c478bd9Sstevel@tonic-gate {
1913*7c478bd9Sstevel@tonic-gate 	uint32_t drxid, drhash;
1914*7c478bd9Sstevel@tonic-gate 	int rc;
1915*7c478bd9Sstevel@tonic-gate 	struct dupreq *dr = NULL;
1916*7c478bd9Sstevel@tonic-gate 	time_t timenow = time(NULL);
1917*7c478bd9Sstevel@tonic-gate 
1918*7c478bd9Sstevel@tonic-gate 	/* LINTED pointer alignment */
1919*7c478bd9Sstevel@tonic-gate 	struct dupcache *dc = (struct dupcache *)xprt_cache;
1920*7c478bd9Sstevel@tonic-gate 
1921*7c478bd9Sstevel@tonic-gate 	if (dc == NULL) {
1922*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "__svc_dup: undefined cache");
1923*7c478bd9Sstevel@tonic-gate 		return (DUP_ERROR);
1924*7c478bd9Sstevel@tonic-gate 	}
1925*7c478bd9Sstevel@tonic-gate 
1926*7c478bd9Sstevel@tonic-gate 	/* get the xid of the request */
1927*7c478bd9Sstevel@tonic-gate 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1928*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "__svc_dup: xid error");
1929*7c478bd9Sstevel@tonic-gate 		return (DUP_ERROR);
1930*7c478bd9Sstevel@tonic-gate 	}
1931*7c478bd9Sstevel@tonic-gate 	drhash = drxid % dc->dc_buckets;
1932*7c478bd9Sstevel@tonic-gate 
1933*7c478bd9Sstevel@tonic-gate 	if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1934*7c478bd9Sstevel@tonic-gate 			drhash)) != DUP_NEW)
1935*7c478bd9Sstevel@tonic-gate 		return (rc);
1936*7c478bd9Sstevel@tonic-gate 
1937*7c478bd9Sstevel@tonic-gate 	if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1938*7c478bd9Sstevel@tonic-gate 		return (DUP_ERROR);
1939*7c478bd9Sstevel@tonic-gate 
1940*7c478bd9Sstevel@tonic-gate 	if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1941*7c478bd9Sstevel@tonic-gate 			== DUP_ERROR)
1942*7c478bd9Sstevel@tonic-gate 		return (rc);
1943*7c478bd9Sstevel@tonic-gate 
1944*7c478bd9Sstevel@tonic-gate 	return (DUP_NEW);
1945*7c478bd9Sstevel@tonic-gate }
1946*7c478bd9Sstevel@tonic-gate 
1947*7c478bd9Sstevel@tonic-gate 
1948*7c478bd9Sstevel@tonic-gate 
1949*7c478bd9Sstevel@tonic-gate /*
1950*7c478bd9Sstevel@tonic-gate  * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1951*7c478bd9Sstevel@tonic-gate  *		uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1952*7c478bd9Sstevel@tonic-gate  * 		uint32_t drhash)
1953*7c478bd9Sstevel@tonic-gate  * Checks to see whether an entry already exists in the cache. If it does
1954*7c478bd9Sstevel@tonic-gate  * copy back into the resp_buf, if appropriate. Return the status of
1955*7c478bd9Sstevel@tonic-gate  * the request, or DUP_NEW if the entry is not in the cache
1956*7c478bd9Sstevel@tonic-gate  */
1957*7c478bd9Sstevel@tonic-gate static int
1958*7c478bd9Sstevel@tonic-gate __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1959*7c478bd9Sstevel@tonic-gate 		struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1960*7c478bd9Sstevel@tonic-gate {
1961*7c478bd9Sstevel@tonic-gate 	struct dupreq *dr = NULL;
1962*7c478bd9Sstevel@tonic-gate 
1963*7c478bd9Sstevel@tonic-gate 	rw_rdlock(&(dc->dc_lock));
1964*7c478bd9Sstevel@tonic-gate 	dr = dc->dc_hashtbl[drhash];
1965*7c478bd9Sstevel@tonic-gate 	while (dr != NULL) {
1966*7c478bd9Sstevel@tonic-gate 		if (dr->dr_xid == drxid &&
1967*7c478bd9Sstevel@tonic-gate 		    dr->dr_proc == req->rq_proc &&
1968*7c478bd9Sstevel@tonic-gate 		    dr->dr_prog == req->rq_prog &&
1969*7c478bd9Sstevel@tonic-gate 		    dr->dr_vers == req->rq_vers &&
1970*7c478bd9Sstevel@tonic-gate 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1971*7c478bd9Sstevel@tonic-gate 		    memcmp((caddr_t)dr->dr_addr.buf,
1972*7c478bd9Sstevel@tonic-gate 				(caddr_t)req->rq_xprt->xp_rtaddr.buf,
1973*7c478bd9Sstevel@tonic-gate 				dr->dr_addr.len) == 0) { /* entry found */
1974*7c478bd9Sstevel@tonic-gate 			if (dr->dr_hash != drhash) {
1975*7c478bd9Sstevel@tonic-gate 				/* sanity check */
1976*7c478bd9Sstevel@tonic-gate 				rw_unlock((&dc->dc_lock));
1977*7c478bd9Sstevel@tonic-gate 				syslog(LOG_ERR,
1978*7c478bd9Sstevel@tonic-gate 					"\n__svc_dupdone: hashing error");
1979*7c478bd9Sstevel@tonic-gate 				return (DUP_ERROR);
1980*7c478bd9Sstevel@tonic-gate 			}
1981*7c478bd9Sstevel@tonic-gate 
1982*7c478bd9Sstevel@tonic-gate 			/*
1983*7c478bd9Sstevel@tonic-gate 			 * return results for requests on lru list, if
1984*7c478bd9Sstevel@tonic-gate 			 * appropriate requests must be DUP_DROP or DUP_DONE
1985*7c478bd9Sstevel@tonic-gate 			 * to have a result. A NULL buffer in the cache
1986*7c478bd9Sstevel@tonic-gate 			 * implies no results were sent during dupdone.
1987*7c478bd9Sstevel@tonic-gate 			 * A NULL buffer in the call implies not interested
1988*7c478bd9Sstevel@tonic-gate 			 * in results.
1989*7c478bd9Sstevel@tonic-gate 			 */
1990*7c478bd9Sstevel@tonic-gate 			if (((dr->dr_status == DUP_DONE) ||
1991*7c478bd9Sstevel@tonic-gate 				(dr->dr_status == DUP_DROP)) &&
1992*7c478bd9Sstevel@tonic-gate 				resp_buf != NULL &&
1993*7c478bd9Sstevel@tonic-gate 				dr->dr_resp.buf != NULL) {
1994*7c478bd9Sstevel@tonic-gate 				*resp_buf = (caddr_t)mem_alloc
1995*7c478bd9Sstevel@tonic-gate 					(dr->dr_resp.len);
1996*7c478bd9Sstevel@tonic-gate 				if (*resp_buf == NULL) {
1997*7c478bd9Sstevel@tonic-gate 					syslog(LOG_ERR,
1998*7c478bd9Sstevel@tonic-gate 					"__svc_dupcache_check: malloc failed");
1999*7c478bd9Sstevel@tonic-gate 					rw_unlock(&(dc->dc_lock));
2000*7c478bd9Sstevel@tonic-gate 					return (DUP_ERROR);
2001*7c478bd9Sstevel@tonic-gate 				}
2002*7c478bd9Sstevel@tonic-gate 				memset((caddr_t)*resp_buf, 0,
2003*7c478bd9Sstevel@tonic-gate 					dr->dr_resp.len);
2004*7c478bd9Sstevel@tonic-gate 				memcpy(*resp_buf, (caddr_t)dr->dr_resp.buf,
2005*7c478bd9Sstevel@tonic-gate 					dr->dr_resp.len);
2006*7c478bd9Sstevel@tonic-gate 				*resp_bufsz = dr->dr_resp.len;
2007*7c478bd9Sstevel@tonic-gate 			} else {
2008*7c478bd9Sstevel@tonic-gate 				/* no result */
2009*7c478bd9Sstevel@tonic-gate 				if (resp_buf)
2010*7c478bd9Sstevel@tonic-gate 					*resp_buf = NULL;
2011*7c478bd9Sstevel@tonic-gate 				if (resp_bufsz)
2012*7c478bd9Sstevel@tonic-gate 					*resp_bufsz = 0;
2013*7c478bd9Sstevel@tonic-gate 			}
2014*7c478bd9Sstevel@tonic-gate 			rw_unlock(&(dc->dc_lock));
2015*7c478bd9Sstevel@tonic-gate 			return (dr->dr_status);
2016*7c478bd9Sstevel@tonic-gate 		}
2017*7c478bd9Sstevel@tonic-gate 		dr = dr->dr_chain;
2018*7c478bd9Sstevel@tonic-gate 	}
2019*7c478bd9Sstevel@tonic-gate 	rw_unlock(&(dc->dc_lock));
2020*7c478bd9Sstevel@tonic-gate 	return (DUP_NEW);
2021*7c478bd9Sstevel@tonic-gate }
2022*7c478bd9Sstevel@tonic-gate 
2023*7c478bd9Sstevel@tonic-gate /*
2024*7c478bd9Sstevel@tonic-gate  * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
2025*7c478bd9Sstevel@tonic-gate  * Return a victim dupreq entry to the caller, depending on cache policy.
2026*7c478bd9Sstevel@tonic-gate  */
2027*7c478bd9Sstevel@tonic-gate static struct dupreq *
2028*7c478bd9Sstevel@tonic-gate __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
2029*7c478bd9Sstevel@tonic-gate {
2030*7c478bd9Sstevel@tonic-gate 	struct dupreq *dr = NULL;
2031*7c478bd9Sstevel@tonic-gate 
2032*7c478bd9Sstevel@tonic-gate 	switch (dc->dc_basis) {
2033*7c478bd9Sstevel@tonic-gate 	case DUPCACHE_FIXEDTIME:
2034*7c478bd9Sstevel@tonic-gate 		/*
2035*7c478bd9Sstevel@tonic-gate 		 * The hash policy is to free up a bit of the hash
2036*7c478bd9Sstevel@tonic-gate 		 * table before allocating a new entry as the victim.
2037*7c478bd9Sstevel@tonic-gate 		 * Freeing up the hash table each time should split
2038*7c478bd9Sstevel@tonic-gate 		 * the cost of keeping the hash table clean among threads.
2039*7c478bd9Sstevel@tonic-gate 		 * Note that only DONE or DROPPED entries are on the lru
2040*7c478bd9Sstevel@tonic-gate 		 * list but we do a sanity check anyway.
2041*7c478bd9Sstevel@tonic-gate 		 */
2042*7c478bd9Sstevel@tonic-gate 		rw_wrlock(&(dc->dc_lock));
2043*7c478bd9Sstevel@tonic-gate 		while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
2044*7c478bd9Sstevel@tonic-gate 				((timenow - dr->dr_time) > dc->dc_time)) {
2045*7c478bd9Sstevel@tonic-gate 			/* clean and then free the entry */
2046*7c478bd9Sstevel@tonic-gate 			if (dr->dr_status != DUP_DONE &&
2047*7c478bd9Sstevel@tonic-gate 				dr->dr_status != DUP_DROP) {
2048*7c478bd9Sstevel@tonic-gate 				/*
2049*7c478bd9Sstevel@tonic-gate 				 * The LRU list can't contain an
2050*7c478bd9Sstevel@tonic-gate 				 * entry where the status is other than
2051*7c478bd9Sstevel@tonic-gate 				 * DUP_DONE or DUP_DROP.
2052*7c478bd9Sstevel@tonic-gate 				 */
2053*7c478bd9Sstevel@tonic-gate 				syslog(LOG_ERR,
2054*7c478bd9Sstevel@tonic-gate 				"__svc_dupcache_victim: bad victim");
2055*7c478bd9Sstevel@tonic-gate #ifdef DUP_DEBUG
2056*7c478bd9Sstevel@tonic-gate 				/*
2057*7c478bd9Sstevel@tonic-gate 				 * Need to hold the reader/writers lock to
2058*7c478bd9Sstevel@tonic-gate 				 * print the cache info, since we already
2059*7c478bd9Sstevel@tonic-gate 				 * hold the writers lock, we shall continue
2060*7c478bd9Sstevel@tonic-gate 				 * calling __svc_dupcache_debug()
2061*7c478bd9Sstevel@tonic-gate 				 */
2062*7c478bd9Sstevel@tonic-gate 				__svc_dupcache_debug(dc);
2063*7c478bd9Sstevel@tonic-gate #endif /* DUP_DEBUG */
2064*7c478bd9Sstevel@tonic-gate 				rw_unlock(&(dc->dc_lock));
2065*7c478bd9Sstevel@tonic-gate 				return (NULL);
2066*7c478bd9Sstevel@tonic-gate 			}
2067*7c478bd9Sstevel@tonic-gate 			/* free buffers */
2068*7c478bd9Sstevel@tonic-gate 			if (dr->dr_resp.buf) {
2069*7c478bd9Sstevel@tonic-gate 				mem_free(dr->dr_resp.buf, dr->dr_resp.len);
2070*7c478bd9Sstevel@tonic-gate 				dr->dr_resp.buf = NULL;
2071*7c478bd9Sstevel@tonic-gate 			}
2072*7c478bd9Sstevel@tonic-gate 			if (dr->dr_addr.buf) {
2073*7c478bd9Sstevel@tonic-gate 				mem_free(dr->dr_addr.buf, dr->dr_addr.len);
2074*7c478bd9Sstevel@tonic-gate 				dr->dr_addr.buf = NULL;
2075*7c478bd9Sstevel@tonic-gate 			}
2076*7c478bd9Sstevel@tonic-gate 
2077*7c478bd9Sstevel@tonic-gate 			/* unhash the entry */
2078*7c478bd9Sstevel@tonic-gate 			if (dr->dr_chain)
2079*7c478bd9Sstevel@tonic-gate 				dr->dr_chain->dr_prevchain = dr->dr_prevchain;
2080*7c478bd9Sstevel@tonic-gate 			if (dr->dr_prevchain)
2081*7c478bd9Sstevel@tonic-gate 				dr->dr_prevchain->dr_chain = dr->dr_chain;
2082*7c478bd9Sstevel@tonic-gate 			if (dc->dc_hashtbl[dr->dr_hash] == dr)
2083*7c478bd9Sstevel@tonic-gate 				dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
2084*7c478bd9Sstevel@tonic-gate 
2085*7c478bd9Sstevel@tonic-gate 			/* modify the lru pointers */
2086*7c478bd9Sstevel@tonic-gate 			if (dc->dc_mru == dr)
2087*7c478bd9Sstevel@tonic-gate 				dc->dc_mru = NULL;
2088*7c478bd9Sstevel@tonic-gate 			else {
2089*7c478bd9Sstevel@tonic-gate 				dc->dc_mru->dr_next = dr->dr_next;
2090*7c478bd9Sstevel@tonic-gate 				dr->dr_next->dr_prev = dc->dc_mru;
2091*7c478bd9Sstevel@tonic-gate 			}
2092*7c478bd9Sstevel@tonic-gate 			mem_free(dr, sizeof (struct dupreq));
2093*7c478bd9Sstevel@tonic-gate 			dr = NULL;
2094*7c478bd9Sstevel@tonic-gate 		}
2095*7c478bd9Sstevel@tonic-gate 		rw_unlock(&(dc->dc_lock));
2096*7c478bd9Sstevel@tonic-gate 
2097*7c478bd9Sstevel@tonic-gate 		/*
2098*7c478bd9Sstevel@tonic-gate 		 * Allocate and return new clean entry as victim
2099*7c478bd9Sstevel@tonic-gate 		 */
2100*7c478bd9Sstevel@tonic-gate 		if ((dr = (struct dupreq *)mem_alloc(sizeof (*dr))) == NULL) {
2101*7c478bd9Sstevel@tonic-gate 			syslog(LOG_ERR,
2102*7c478bd9Sstevel@tonic-gate 				"__svc_dupcache_victim: mem_alloc failed");
2103*7c478bd9Sstevel@tonic-gate 			return (NULL);
2104*7c478bd9Sstevel@tonic-gate 		}
2105*7c478bd9Sstevel@tonic-gate 		memset((caddr_t)dr, 0, sizeof (*dr));
2106*7c478bd9Sstevel@tonic-gate 		return (dr);
2107*7c478bd9Sstevel@tonic-gate 	default:
2108*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR,
2109*7c478bd9Sstevel@tonic-gate 		"__svc_dupcache_victim: undefined dup cache_basis");
2110*7c478bd9Sstevel@tonic-gate 		return (NULL);
2111*7c478bd9Sstevel@tonic-gate 	}
2112*7c478bd9Sstevel@tonic-gate }
2113*7c478bd9Sstevel@tonic-gate 
2114*7c478bd9Sstevel@tonic-gate /*
2115*7c478bd9Sstevel@tonic-gate  * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
2116*7c478bd9Sstevel@tonic-gate  *	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
2117*7c478bd9Sstevel@tonic-gate  * build new duprequest entry and then insert into the cache
2118*7c478bd9Sstevel@tonic-gate  */
2119*7c478bd9Sstevel@tonic-gate static int
2120*7c478bd9Sstevel@tonic-gate __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
2121*7c478bd9Sstevel@tonic-gate 	struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
2122*7c478bd9Sstevel@tonic-gate {
2123*7c478bd9Sstevel@tonic-gate 	dr->dr_xid = drxid;
2124*7c478bd9Sstevel@tonic-gate 	dr->dr_prog = req->rq_prog;
2125*7c478bd9Sstevel@tonic-gate 	dr->dr_vers = req->rq_vers;
2126*7c478bd9Sstevel@tonic-gate 	dr->dr_proc = req->rq_proc;
2127*7c478bd9Sstevel@tonic-gate 	dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
2128*7c478bd9Sstevel@tonic-gate 	dr->dr_addr.len = dr->dr_addr.maxlen;
2129*7c478bd9Sstevel@tonic-gate 	if ((dr->dr_addr.buf = (caddr_t)mem_alloc(dr->dr_addr.maxlen))
2130*7c478bd9Sstevel@tonic-gate 				== NULL) {
2131*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "__svc_dupcache_enter: mem_alloc failed");
2132*7c478bd9Sstevel@tonic-gate 		mem_free(dr, sizeof (struct dupreq));
2133*7c478bd9Sstevel@tonic-gate 		return (DUP_ERROR);
2134*7c478bd9Sstevel@tonic-gate 	}
2135*7c478bd9Sstevel@tonic-gate 	memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
2136*7c478bd9Sstevel@tonic-gate 	memcpy((caddr_t)dr->dr_addr.buf,
2137*7c478bd9Sstevel@tonic-gate 		(caddr_t)req->rq_xprt->xp_rtaddr.buf, dr->dr_addr.len);
2138*7c478bd9Sstevel@tonic-gate 	dr->dr_resp.buf = NULL;
2139*7c478bd9Sstevel@tonic-gate 	dr->dr_resp.maxlen = 0;
2140*7c478bd9Sstevel@tonic-gate 	dr->dr_resp.len = 0;
2141*7c478bd9Sstevel@tonic-gate 	dr->dr_status = DUP_INPROGRESS;
2142*7c478bd9Sstevel@tonic-gate 	dr->dr_time = timenow;
2143*7c478bd9Sstevel@tonic-gate 	dr->dr_hash = drhash;	/* needed for efficient victim cleanup */
2144*7c478bd9Sstevel@tonic-gate 
2145*7c478bd9Sstevel@tonic-gate 	/* place entry at head of hash table */
2146*7c478bd9Sstevel@tonic-gate 	rw_wrlock(&(dc->dc_lock));
2147*7c478bd9Sstevel@tonic-gate 	dr->dr_chain = dc->dc_hashtbl[drhash];
2148*7c478bd9Sstevel@tonic-gate 	dr->dr_prevchain = NULL;
2149*7c478bd9Sstevel@tonic-gate 	if (dc->dc_hashtbl[drhash] != NULL)
2150*7c478bd9Sstevel@tonic-gate 		dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2151*7c478bd9Sstevel@tonic-gate 	dc->dc_hashtbl[drhash] = dr;
2152*7c478bd9Sstevel@tonic-gate 	rw_unlock(&(dc->dc_lock));
2153*7c478bd9Sstevel@tonic-gate 	return (DUP_NEW);
2154*7c478bd9Sstevel@tonic-gate }
2155*7c478bd9Sstevel@tonic-gate 
2156*7c478bd9Sstevel@tonic-gate /*
2157*7c478bd9Sstevel@tonic-gate  * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2158*7c478bd9Sstevel@tonic-gate  *		int status, char *xprt_cache)
2159*7c478bd9Sstevel@tonic-gate  * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2160*7c478bd9Sstevel@tonic-gate  * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2161*7c478bd9Sstevel@tonic-gate  * to make the entry the most recently used. Returns DUP_ERROR or status.
2162*7c478bd9Sstevel@tonic-gate  */
2163*7c478bd9Sstevel@tonic-gate int
2164*7c478bd9Sstevel@tonic-gate __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2165*7c478bd9Sstevel@tonic-gate 		int status, char *xprt_cache)
2166*7c478bd9Sstevel@tonic-gate {
2167*7c478bd9Sstevel@tonic-gate 	uint32_t drxid, drhash;
2168*7c478bd9Sstevel@tonic-gate 	int rc;
2169*7c478bd9Sstevel@tonic-gate 
2170*7c478bd9Sstevel@tonic-gate 	/* LINTED pointer alignment */
2171*7c478bd9Sstevel@tonic-gate 	struct dupcache *dc = (struct dupcache *)xprt_cache;
2172*7c478bd9Sstevel@tonic-gate 
2173*7c478bd9Sstevel@tonic-gate 	if (dc == NULL) {
2174*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2175*7c478bd9Sstevel@tonic-gate 		return (DUP_ERROR);
2176*7c478bd9Sstevel@tonic-gate 	}
2177*7c478bd9Sstevel@tonic-gate 
2178*7c478bd9Sstevel@tonic-gate 	if (status != DUP_DONE && status != DUP_DROP) {
2179*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2180*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "	 must be DUP_DONE or DUP_DROP");
2181*7c478bd9Sstevel@tonic-gate 		return (DUP_ERROR);
2182*7c478bd9Sstevel@tonic-gate 	}
2183*7c478bd9Sstevel@tonic-gate 
2184*7c478bd9Sstevel@tonic-gate 	/* find the xid of the entry in the cache */
2185*7c478bd9Sstevel@tonic-gate 	if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2186*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "__svc_dup: xid error");
2187*7c478bd9Sstevel@tonic-gate 		return (DUP_ERROR);
2188*7c478bd9Sstevel@tonic-gate 	}
2189*7c478bd9Sstevel@tonic-gate 	drhash = drxid % dc->dc_buckets;
2190*7c478bd9Sstevel@tonic-gate 
2191*7c478bd9Sstevel@tonic-gate 	/* update the status of the entry and result buffers, if required */
2192*7c478bd9Sstevel@tonic-gate 	if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2193*7c478bd9Sstevel@tonic-gate 			dc, drxid, drhash)) == DUP_ERROR) {
2194*7c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2195*7c478bd9Sstevel@tonic-gate 		return (DUP_ERROR);
2196*7c478bd9Sstevel@tonic-gate 	}
2197*7c478bd9Sstevel@tonic-gate 
2198*7c478bd9Sstevel@tonic-gate 	return (rc);
2199*7c478bd9Sstevel@tonic-gate }
2200*7c478bd9Sstevel@tonic-gate 
2201*7c478bd9Sstevel@tonic-gate /*
2202*7c478bd9Sstevel@tonic-gate  * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2203*7c478bd9Sstevel@tonic-gate  * 	uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2204*7c478bd9Sstevel@tonic-gate  * 	uint32_t drhash)
2205*7c478bd9Sstevel@tonic-gate  * Check if entry exists in the dupcacache. If it does, update its status
2206*7c478bd9Sstevel@tonic-gate  * and time and also its buffer, if appropriate. Its possible, but unlikely
2207*7c478bd9Sstevel@tonic-gate  * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2208*7c478bd9Sstevel@tonic-gate  */
2209*7c478bd9Sstevel@tonic-gate static int
2210*7c478bd9Sstevel@tonic-gate __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2211*7c478bd9Sstevel@tonic-gate 	int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2212*7c478bd9Sstevel@tonic-gate {
2213*7c478bd9Sstevel@tonic-gate 	struct dupreq *dr = NULL;
2214*7c478bd9Sstevel@tonic-gate 	time_t timenow = time(NULL);
2215*7c478bd9Sstevel@tonic-gate 
2216*7c478bd9Sstevel@tonic-gate 	rw_wrlock(&(dc->dc_lock));
2217*7c478bd9Sstevel@tonic-gate 	dr = dc->dc_hashtbl[drhash];
2218*7c478bd9Sstevel@tonic-gate 	while (dr != NULL) {
2219*7c478bd9Sstevel@tonic-gate 		if (dr->dr_xid == drxid &&
2220*7c478bd9Sstevel@tonic-gate 		    dr->dr_proc == req->rq_proc &&
2221*7c478bd9Sstevel@tonic-gate 		    dr->dr_prog == req->rq_prog &&
2222*7c478bd9Sstevel@tonic-gate 		    dr->dr_vers == req->rq_vers &&
2223*7c478bd9Sstevel@tonic-gate 		    dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2224*7c478bd9Sstevel@tonic-gate 		    memcmp((caddr_t)dr->dr_addr.buf,
2225*7c478bd9Sstevel@tonic-gate 				(caddr_t)req->rq_xprt->xp_rtaddr.buf,
2226*7c478bd9Sstevel@tonic-gate 				dr->dr_addr.len) == 0) { /* entry found */
2227*7c478bd9Sstevel@tonic-gate 			if (dr->dr_hash != drhash) {
2228*7c478bd9Sstevel@tonic-gate 				/* sanity check */
2229*7c478bd9Sstevel@tonic-gate 				rw_unlock(&(dc->dc_lock));
2230*7c478bd9Sstevel@tonic-gate 				syslog(LOG_ERR,
2231*7c478bd9Sstevel@tonic-gate 				"\n__svc_dupdone: hashing error");
2232*7c478bd9Sstevel@tonic-gate 				return (DUP_ERROR);
2233*7c478bd9Sstevel@tonic-gate 			}
2234*7c478bd9Sstevel@tonic-gate 
2235*7c478bd9Sstevel@tonic-gate 			/* store the results if bufer is not NULL */
2236*7c478bd9Sstevel@tonic-gate 			if (resp_buf != NULL) {
2237*7c478bd9Sstevel@tonic-gate 				if ((dr->dr_resp.buf = (caddr_t)
2238*7c478bd9Sstevel@tonic-gate 					mem_alloc(resp_bufsz)) == NULL) {
2239*7c478bd9Sstevel@tonic-gate 					rw_unlock(&(dc->dc_lock));
2240*7c478bd9Sstevel@tonic-gate 					syslog(LOG_ERR,
2241*7c478bd9Sstevel@tonic-gate 					"__svc_dupdone: mem_alloc failed");
2242*7c478bd9Sstevel@tonic-gate 					return (DUP_ERROR);
2243*7c478bd9Sstevel@tonic-gate 				}
2244*7c478bd9Sstevel@tonic-gate 				memset(dr->dr_resp.buf, 0, resp_bufsz);
2245*7c478bd9Sstevel@tonic-gate 				memcpy((caddr_t)dr->dr_resp.buf, resp_buf,
2246*7c478bd9Sstevel@tonic-gate 					(uint_t)resp_bufsz);
2247*7c478bd9Sstevel@tonic-gate 				dr->dr_resp.len = resp_bufsz;
2248*7c478bd9Sstevel@tonic-gate 			}
2249*7c478bd9Sstevel@tonic-gate 
2250*7c478bd9Sstevel@tonic-gate 			/* update status and done time */
2251*7c478bd9Sstevel@tonic-gate 			dr->dr_status = status;
2252*7c478bd9Sstevel@tonic-gate 			dr->dr_time = timenow;
2253*7c478bd9Sstevel@tonic-gate 
2254*7c478bd9Sstevel@tonic-gate 			/* move the entry to the mru position */
2255*7c478bd9Sstevel@tonic-gate 			if (dc->dc_mru == NULL) {
2256*7c478bd9Sstevel@tonic-gate 				dr->dr_next = dr;
2257*7c478bd9Sstevel@tonic-gate 				dr->dr_prev = dr;
2258*7c478bd9Sstevel@tonic-gate 			} else {
2259*7c478bd9Sstevel@tonic-gate 				dr->dr_next = dc->dc_mru->dr_next;
2260*7c478bd9Sstevel@tonic-gate 				dc->dc_mru->dr_next->dr_prev = dr;
2261*7c478bd9Sstevel@tonic-gate 				dr->dr_prev = dc->dc_mru;
2262*7c478bd9Sstevel@tonic-gate 				dc->dc_mru->dr_next = dr;
2263*7c478bd9Sstevel@tonic-gate 			}
2264*7c478bd9Sstevel@tonic-gate 			dc->dc_mru = dr;
2265*7c478bd9Sstevel@tonic-gate 
2266*7c478bd9Sstevel@tonic-gate 			rw_unlock(&(dc->dc_lock));
2267*7c478bd9Sstevel@tonic-gate 			return (status);
2268*7c478bd9Sstevel@tonic-gate 		}
2269*7c478bd9Sstevel@tonic-gate 		dr = dr->dr_chain;
2270*7c478bd9Sstevel@tonic-gate 	}
2271*7c478bd9Sstevel@tonic-gate 	rw_unlock(&(dc->dc_lock));
2272*7c478bd9Sstevel@tonic-gate 	syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2273*7c478bd9Sstevel@tonic-gate 	return (DUP_ERROR);
2274*7c478bd9Sstevel@tonic-gate }
2275*7c478bd9Sstevel@tonic-gate 
2276*7c478bd9Sstevel@tonic-gate #ifdef DUP_DEBUG
2277*7c478bd9Sstevel@tonic-gate /*
2278*7c478bd9Sstevel@tonic-gate  * __svc_dupcache_debug(struct dupcache *dc)
2279*7c478bd9Sstevel@tonic-gate  * print out the hash table stuff
2280*7c478bd9Sstevel@tonic-gate  *
2281*7c478bd9Sstevel@tonic-gate  * This function requires the caller to hold the reader
2282*7c478bd9Sstevel@tonic-gate  * or writer version of the duplicate request cache lock (dc_lock).
2283*7c478bd9Sstevel@tonic-gate  */
2284*7c478bd9Sstevel@tonic-gate static void
2285*7c478bd9Sstevel@tonic-gate __svc_dupcache_debug(struct dupcache *dc)
2286*7c478bd9Sstevel@tonic-gate {
2287*7c478bd9Sstevel@tonic-gate 	struct dupreq *dr = NULL;
2288*7c478bd9Sstevel@tonic-gate 	int i;
2289*7c478bd9Sstevel@tonic-gate 	bool_t bval;
2290*7c478bd9Sstevel@tonic-gate 
2291*7c478bd9Sstevel@tonic-gate 	fprintf(stderr, "   HASHTABLE\n");
2292*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < dc->dc_buckets; i++) {
2293*7c478bd9Sstevel@tonic-gate 		bval = FALSE;
2294*7c478bd9Sstevel@tonic-gate 		dr = dc->dc_hashtbl[i];
2295*7c478bd9Sstevel@tonic-gate 		while (dr != NULL) {
2296*7c478bd9Sstevel@tonic-gate 			if (!bval) {	/* ensures bucket printed only once */
2297*7c478bd9Sstevel@tonic-gate 				fprintf(stderr, "    bucket : %d\n", i);
2298*7c478bd9Sstevel@tonic-gate 				bval = TRUE;
2299*7c478bd9Sstevel@tonic-gate 			}
2300*7c478bd9Sstevel@tonic-gate 			fprintf(stderr, "\txid: %u status: %d time: %ld",
2301*7c478bd9Sstevel@tonic-gate 				dr->dr_xid, dr->dr_status, dr->dr_time);
2302*7c478bd9Sstevel@tonic-gate 			fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2303*7c478bd9Sstevel@tonic-gate 				dr, dr->dr_chain, dr->dr_prevchain);
2304*7c478bd9Sstevel@tonic-gate 			dr = dr->dr_chain;
2305*7c478bd9Sstevel@tonic-gate 		}
2306*7c478bd9Sstevel@tonic-gate 	}
2307*7c478bd9Sstevel@tonic-gate 
2308*7c478bd9Sstevel@tonic-gate 	fprintf(stderr, "   LRU\n");
2309*7c478bd9Sstevel@tonic-gate 	if (dc->dc_mru) {
2310*7c478bd9Sstevel@tonic-gate 		dr = dc->dc_mru->dr_next;	/* lru */
2311*7c478bd9Sstevel@tonic-gate 		while (dr != dc->dc_mru) {
2312*7c478bd9Sstevel@tonic-gate 			fprintf(stderr, "\txid: %u status : %d time : %ld",
2313*7c478bd9Sstevel@tonic-gate 				dr->dr_xid, dr->dr_status, dr->dr_time);
2314*7c478bd9Sstevel@tonic-gate 			fprintf(stderr, " dr: %x next: %x prev: %x\n",
2315*7c478bd9Sstevel@tonic-gate 				dr, dr->dr_next, dr->dr_prev);
2316*7c478bd9Sstevel@tonic-gate 			dr = dr->dr_next;
2317*7c478bd9Sstevel@tonic-gate 		}
2318*7c478bd9Sstevel@tonic-gate 		fprintf(stderr, "\txid: %u status: %d time: %ld",
2319*7c478bd9Sstevel@tonic-gate 			dr->dr_xid, dr->dr_status, dr->dr_time);
2320*7c478bd9Sstevel@tonic-gate 		fprintf(stderr, " dr: %x next: %x prev: %x\n", dr,
2321*7c478bd9Sstevel@tonic-gate 			dr->dr_next, dr->dr_prev);
2322*7c478bd9Sstevel@tonic-gate 	}
2323*7c478bd9Sstevel@tonic-gate }
2324*7c478bd9Sstevel@tonic-gate #endif /* DUP_DEBUG */
2325