10d63ce2bSvenki /*
20d63ce2bSvenki  * CDDL HEADER START
30d63ce2bSvenki  *
40d63ce2bSvenki  * The contents of this file are subject to the terms of the
50d63ce2bSvenki  * Common Development and Distribution License (the "License").
60d63ce2bSvenki  * You may not use this file except in compliance with the License.
70d63ce2bSvenki  *
80d63ce2bSvenki  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90d63ce2bSvenki  * or http://www.opensolaris.org/os/licensing.
100d63ce2bSvenki  * See the License for the specific language governing permissions
110d63ce2bSvenki  * and limitations under the License.
120d63ce2bSvenki  *
130d63ce2bSvenki  * When distributing Covered Code, include this CDDL HEADER in each
140d63ce2bSvenki  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150d63ce2bSvenki  * If applicable, add the following below this CDDL HEADER, with the
160d63ce2bSvenki  * fields enclosed by brackets "[]" replaced with your own identifying
170d63ce2bSvenki  * information: Portions Copyright [yyyy] [name of copyright owner]
180d63ce2bSvenki  *
190d63ce2bSvenki  * CDDL HEADER END
200d63ce2bSvenki  */
210d63ce2bSvenki 
220d63ce2bSvenki /*
23a1c54725Sfw  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240d63ce2bSvenki  * Use is subject to license terms.
25*0409f346SPeter Tribble  * Copyright 2019 Peter Tribble.
260d63ce2bSvenki  */
270d63ce2bSvenki 
280d63ce2bSvenki /*
290d63ce2bSvenki  * The snmp library helps to prepare the PDUs and communicate with
300d63ce2bSvenki  * the snmp agent on the SP side via the ds_snmp driver.
310d63ce2bSvenki  */
320d63ce2bSvenki 
330d63ce2bSvenki #include <stdio.h>
340d63ce2bSvenki #include <stdlib.h>
350d63ce2bSvenki #include <string.h>
360d63ce2bSvenki #include <unistd.h>
370d63ce2bSvenki #include <thread.h>
380d63ce2bSvenki #include <synch.h>
390d63ce2bSvenki #include <errno.h>
400d63ce2bSvenki #include <sys/time.h>
410d63ce2bSvenki #include <sys/types.h>
420d63ce2bSvenki #include <sys/stat.h>
430d63ce2bSvenki #include <fcntl.h>
440d63ce2bSvenki #include <libnvpair.h>
450d63ce2bSvenki #include <sys/ds_snmp.h>
460d63ce2bSvenki 
470d63ce2bSvenki #include "libpiclsnmp.h"
480d63ce2bSvenki #include "snmplib.h"
490d63ce2bSvenki #include "asn1.h"
500d63ce2bSvenki #include "pdu.h"
510d63ce2bSvenki 
520d63ce2bSvenki #pragma init(libpiclsnmp_init)		/* need this in .init */
530d63ce2bSvenki 
540d63ce2bSvenki /*
550d63ce2bSvenki  * Data from the MIB is fetched based on the hints about object
560d63ce2bSvenki  * groups received from (possibly many threads in) the application.
570d63ce2bSvenki  * However, the fetched data is kept in a common cache for use across
580d63ce2bSvenki  * all threads, so even a GETBULK is issued only when absolutely
590d63ce2bSvenki  * necessary.
600d63ce2bSvenki  *
610d63ce2bSvenki  * Note that locking is not fine grained (there's no locking per row)
620d63ce2bSvenki  * since we don't expect too many MT consumers right away.
630d63ce2bSvenki  *
640d63ce2bSvenki  */
650d63ce2bSvenki static mutex_t	mibcache_lock;
660d63ce2bSvenki static nvlist_t	**mibcache = NULL;
670d63ce2bSvenki static uint_t	n_mibcache_rows = 0;
680d63ce2bSvenki 
690d63ce2bSvenki static mutex_t snmp_reqid_lock;
700d63ce2bSvenki static int snmp_reqid = 1;
710d63ce2bSvenki 
720d63ce2bSvenki #ifdef USE_SOCKETS
730d63ce2bSvenki #define	SNMP_DEFAULT_PORT	161
740d63ce2bSvenki #define	SNMP_MAX_RECV_PKTSZ	(64 * 1024)
750d63ce2bSvenki #endif
760d63ce2bSvenki 
77817697f4SKelly Moyer /*
78817697f4SKelly Moyer  * We need a reliably monotonic and stable source of time values to age
79817697f4SKelly Moyer  * entries in the mibcache toward expiration.  The code originally used
80817697f4SKelly Moyer  * gettimeofday(), but since that is subject to time-of-day changes made by
81817697f4SKelly Moyer  * the administrator, the values it returns do not satisfy our needs.
82817697f4SKelly Moyer  * Instead, we use gethrtime(), which is immune to time-of-day changes.
83817697f4SKelly Moyer  * However, since gethrtime() returns a signed 64-bit value in units of
84817697f4SKelly Moyer  * nanoseconds and we are using signed 32-bit timestamps, we always divide
85817697f4SKelly Moyer  * the result by (HRTIME_SCALE * NANOSEC) to scale it down into units of 10
86817697f4SKelly Moyer  * seconds.
87817697f4SKelly Moyer  *
88817697f4SKelly Moyer  * Note that the scaling factor means that the value of MAX_INCACHE_TIME
89817697f4SKelly Moyer  * from snmplib.h should also be in units of 10 seconds.
90817697f4SKelly Moyer  */
91817697f4SKelly Moyer #define	GET_SCALED_HRTIME()	(int)(gethrtime() / (HRTIME_SCALE * NANOSEC))
92817697f4SKelly Moyer 
93817697f4SKelly Moyer /*
94817697f4SKelly Moyer  * The mibcache code originally cached values for 300 seconds after fetching
95817697f4SKelly Moyer  * data via SNMP.  Subsequent reads within that 300 second window would come
96817697f4SKelly Moyer  * from the cache - which is quite a bit faster than an SNMP query - but the
97817697f4SKelly Moyer  * first request that came in more than 300 seconds after the previous SNMP
98817697f4SKelly Moyer  * query would trigger a new SNMP query.  This worked well as an
99817697f4SKelly Moyer  * optimization for frequent queries, but when data was only queried less
100817697f4SKelly Moyer  * frequently than every 300 seconds (as proved to be the case at multiple
101817697f4SKelly Moyer  * customer sites), the cache didn't help at all.
102817697f4SKelly Moyer  *
103817697f4SKelly Moyer  * To improve the performance of infrequent queries, code was added to the
104817697f4SKelly Moyer  * library to allow a client (i.e. a thread in the picl plugin) to proactively
105817697f4SKelly Moyer  * refresh cache entries without waiting for them to expire, thereby ensuring
106817697f4SKelly Moyer  * that all volatile entries in the cache at any given time are less than 300
107817697f4SKelly Moyer  * seconds old.  Whenever an SNMP query is generated to retrieve volatile data
108817697f4SKelly Moyer  * that will be cached, an entry is added in a refresh queue that tracks the
109817697f4SKelly Moyer  * parameters of the query and the time that it was made.  A client can query
110817697f4SKelly Moyer  * the age of the oldest item in the refresh queue and - at its discretion - can
111817697f4SKelly Moyer  * then force that query to be repeated in a manner that will update the
112817697f4SKelly Moyer  * mibcache entry even though it hasn't expired.
113817697f4SKelly Moyer  */
114817697f4SKelly Moyer typedef struct {
115817697f4SKelly Moyer 	struct picl_snmphdl	*smd;
116817697f4SKelly Moyer 	char			*oidstrs;
117817697f4SKelly Moyer 	int			n_oids;
118817697f4SKelly Moyer 	int			row;
119817697f4SKelly Moyer 	int			last_fetch_time;	/* in scaled hrtime */
120817697f4SKelly Moyer } refreshq_job_t;
121817697f4SKelly Moyer 
122817697f4SKelly Moyer static mutex_t		refreshq_lock;
123817697f4SKelly Moyer static refreshq_job_t	*refreshq = NULL;
124817697f4SKelly Moyer static uint_t		n_refreshq_slots = 0;	/* # of alloc'ed job slots */
125817697f4SKelly Moyer static uint_t		n_refreshq_jobs = 0;	/* # of unprocessed jobs */
126817697f4SKelly Moyer static uint_t		refreshq_next_job = 0;	/* oldest unprocessed job */
127817697f4SKelly Moyer static uint_t		refreshq_next_slot = 0;	/* next available job slot */
128817697f4SKelly Moyer 
129817697f4SKelly Moyer 
1300d63ce2bSvenki /*
1310d63ce2bSvenki  * Static function declarations
1320d63ce2bSvenki  */
1330d63ce2bSvenki static void	libpiclsnmp_init(void);
1340d63ce2bSvenki 
1350d63ce2bSvenki static int	lookup_int(char *, int, int *, int);
1360d63ce2bSvenki static int	lookup_str(char *, int, char **, int);
1370d63ce2bSvenki static int	lookup_bitstr(char *, int, uchar_t **, uint_t *, int);
1380d63ce2bSvenki 
1390d63ce2bSvenki static oidgroup_t *locate_oid_group(struct picl_snmphdl *, char *);
1400d63ce2bSvenki static int	search_oid_in_group(char *, char *, int);
1410d63ce2bSvenki 
1420d63ce2bSvenki static snmp_pdu_t *fetch_single(struct picl_snmphdl *, char *, int, int *);
1430d63ce2bSvenki static snmp_pdu_t *fetch_next(struct picl_snmphdl *, char *, int, int *);
1440d63ce2bSvenki static void	fetch_bulk(struct picl_snmphdl *, char *, int, int, int, int *);
1450d63ce2bSvenki static int	fetch_single_str(struct picl_snmphdl *, char *, int,
1460d63ce2bSvenki 		    char **, int *);
1470d63ce2bSvenki static int	fetch_single_int(struct picl_snmphdl *, char *, int,
1480d63ce2bSvenki 		    int *, int *);
1490d63ce2bSvenki static int	fetch_single_bitstr(struct picl_snmphdl *, char *, int,
1500d63ce2bSvenki 		    uchar_t **, uint_t *, int *);
1510d63ce2bSvenki 
1520d63ce2bSvenki static int	snmp_send_request(struct picl_snmphdl *, snmp_pdu_t *, int *);
1530d63ce2bSvenki static int	snmp_recv_reply(struct picl_snmphdl *, snmp_pdu_t *, int *);
1540d63ce2bSvenki 
1550d63ce2bSvenki static int	mibcache_realloc(int);
1560d63ce2bSvenki static void	mibcache_populate(snmp_pdu_t *, int);
1570d63ce2bSvenki static char	*oid_to_oidstr(oid *, size_t);
1580d63ce2bSvenki 
159817697f4SKelly Moyer static int	refreshq_realloc(int);
160817697f4SKelly Moyer static int	refreshq_add_job(struct picl_snmphdl *, char *, int, int);
161817697f4SKelly Moyer 
1620d63ce2bSvenki 
1630d63ce2bSvenki static void
libpiclsnmp_init(void)1640d63ce2bSvenki libpiclsnmp_init(void)
1650d63ce2bSvenki {
1660d63ce2bSvenki 	(void) mutex_init(&mibcache_lock, USYNC_THREAD, NULL);
1670d63ce2bSvenki 	if (mibcache_realloc(0) < 0)
1680d63ce2bSvenki 		(void) mutex_destroy(&mibcache_lock);
1690d63ce2bSvenki 
170817697f4SKelly Moyer 	(void) mutex_init(&refreshq_lock, USYNC_THREAD, NULL);
1710d63ce2bSvenki 	(void) mutex_init(&snmp_reqid_lock, USYNC_THREAD, NULL);
1720d63ce2bSvenki }
1730d63ce2bSvenki 
1740d63ce2bSvenki picl_snmphdl_t
snmp_init()1750d63ce2bSvenki snmp_init()
1760d63ce2bSvenki {
1770d63ce2bSvenki 	struct picl_snmphdl	*smd;
1780d63ce2bSvenki #ifdef USE_SOCKETS
1790d63ce2bSvenki 	int	sbuf = (1 << 15);	/* 16K */
1800d63ce2bSvenki 	int	rbuf = (1 << 17);	/* 64K */
1810d63ce2bSvenki 	char	*snmp_agent_addr;
1820d63ce2bSvenki #endif
1830d63ce2bSvenki 
1840d63ce2bSvenki 	smd = (struct picl_snmphdl *)calloc(1, sizeof (struct picl_snmphdl));
1850d63ce2bSvenki 	if (smd == NULL)
1860d63ce2bSvenki 		return (NULL);
1870d63ce2bSvenki 
1880d63ce2bSvenki #ifdef USE_SOCKETS
1890d63ce2bSvenki 	if ((snmp_agent_addr = getenv("SNMP_AGENT_IPADDR")) == NULL)
1900d63ce2bSvenki 		return (NULL);
1910d63ce2bSvenki 
1920d63ce2bSvenki 	if ((smd->fd = socket(PF_INET, SOCK_DGRAM, 0)) < 0)
1930d63ce2bSvenki 		return (NULL);
1940d63ce2bSvenki 
1950d63ce2bSvenki 	(void) setsockopt(smd->fd, SOL_SOCKET, SO_SNDBUF, &sbuf, sizeof (int));
1960d63ce2bSvenki 	(void) setsockopt(smd->fd, SOL_SOCKET, SO_RCVBUF, &rbuf, sizeof (int));
1970d63ce2bSvenki 
1980d63ce2bSvenki 	memset(&smd->agent_addr, 0, sizeof (struct sockaddr_in));
1990d63ce2bSvenki 	smd->agent_addr.sin_family = AF_INET;
2000d63ce2bSvenki 	smd->agent_addr.sin_port = htons(SNMP_DEFAULT_PORT);
2010d63ce2bSvenki 	smd->agent_addr.sin_addr.s_addr = inet_addr(snmp_agent_addr);
2020d63ce2bSvenki #else
2030d63ce2bSvenki 	smd->fd = open(DS_SNMP_DRIVER, O_RDWR);
2040d63ce2bSvenki 	if (smd->fd < 0) {
2050d63ce2bSvenki 		free(smd);
2060d63ce2bSvenki 		return (NULL);
2070d63ce2bSvenki 	}
2080d63ce2bSvenki #endif
2090d63ce2bSvenki 
2100d63ce2bSvenki 	return ((picl_snmphdl_t)smd);
2110d63ce2bSvenki }
2120d63ce2bSvenki 
2130d63ce2bSvenki void
snmp_fini(picl_snmphdl_t hdl)2140d63ce2bSvenki snmp_fini(picl_snmphdl_t hdl)
2150d63ce2bSvenki {
2160d63ce2bSvenki 	struct picl_snmphdl	*smd = (struct picl_snmphdl *)hdl;
2170d63ce2bSvenki 
2180d63ce2bSvenki 	if (smd) {
2190d63ce2bSvenki 		if (smd->fd >= 0) {
2200d63ce2bSvenki 			(void) close(smd->fd);
2210d63ce2bSvenki 		}
2220d63ce2bSvenki 		free(smd);
2230d63ce2bSvenki 	}
2240d63ce2bSvenki }
2250d63ce2bSvenki 
2260d63ce2bSvenki int
snmp_reinit(picl_snmphdl_t hdl,int clr_linkreset)2270d63ce2bSvenki snmp_reinit(picl_snmphdl_t hdl, int clr_linkreset)
2280d63ce2bSvenki {
2290d63ce2bSvenki 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
2300d63ce2bSvenki 	nvlist_t *nvl;
2310d63ce2bSvenki 	int i;
2320d63ce2bSvenki 
2330d63ce2bSvenki 	(void) mutex_lock(&mibcache_lock);
2340d63ce2bSvenki 
2350d63ce2bSvenki 	for (i = 0; i < n_mibcache_rows; i++) {
2360d63ce2bSvenki 		if ((nvl = mibcache[i]) != NULL)
2370d63ce2bSvenki 			nvlist_free(nvl);
2380d63ce2bSvenki 	}
2390d63ce2bSvenki 
2400d63ce2bSvenki 	n_mibcache_rows = 0;
2410d63ce2bSvenki 	if (mibcache) {
2420d63ce2bSvenki 		free(mibcache);
2430d63ce2bSvenki 		mibcache = NULL;
2440d63ce2bSvenki 	}
2450d63ce2bSvenki 
2460d63ce2bSvenki 	(void) mutex_unlock(&mibcache_lock);
2470d63ce2bSvenki 
2480d63ce2bSvenki 	if (clr_linkreset) {
2490d63ce2bSvenki 		if (smd == NULL || smd->fd < 0)
2500d63ce2bSvenki 			return (-1);
2510d63ce2bSvenki 		else
2520d63ce2bSvenki 			return (ioctl(smd->fd, DSSNMP_CLRLNKRESET, NULL));
2530d63ce2bSvenki 	}
2540d63ce2bSvenki 
2550d63ce2bSvenki 	return (0);
2560d63ce2bSvenki }
2570d63ce2bSvenki 
2580d63ce2bSvenki void
snmp_register_group(picl_snmphdl_t hdl,char * oidstrs,int n_oids,int is_vol)2590d63ce2bSvenki snmp_register_group(picl_snmphdl_t hdl, char *oidstrs, int n_oids, int is_vol)
2600d63ce2bSvenki {
2610d63ce2bSvenki 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
2620d63ce2bSvenki 	oidgroup_t	*oidg;
2630d63ce2bSvenki 	oidgroup_t	*curr, *prev;
2640d63ce2bSvenki 	char		*p;
2650d63ce2bSvenki 	int		i, sz;
2660d63ce2bSvenki 
2670d63ce2bSvenki 	/*
2680d63ce2bSvenki 	 * Allocate a new oidgroup_t
2690d63ce2bSvenki 	 */
2700d63ce2bSvenki 	oidg = (oidgroup_t *)calloc(1, sizeof (struct oidgroup));
2710d63ce2bSvenki 	if (oidg == NULL)
2720d63ce2bSvenki 		return;
2730d63ce2bSvenki 
2740d63ce2bSvenki 	/*
2750d63ce2bSvenki 	 * Determine how much space is required to register this group
2760d63ce2bSvenki 	 */
2770d63ce2bSvenki 	sz = 0;
2780d63ce2bSvenki 	p = oidstrs;
2790d63ce2bSvenki 	for (i = 0; i < n_oids; i++) {
2800d63ce2bSvenki 		sz += strlen(p) + 1;
2810d63ce2bSvenki 		p = oidstrs + sz;
2820d63ce2bSvenki 	}
2830d63ce2bSvenki 
2840d63ce2bSvenki 	/*
2850d63ce2bSvenki 	 * Create this oid group
2860d63ce2bSvenki 	 */
2870d63ce2bSvenki 	if ((p = (char *)malloc(sz)) == NULL) {
2880d63ce2bSvenki 		free((void *) oidg);
2890d63ce2bSvenki 		return;
2900d63ce2bSvenki 	}
2910d63ce2bSvenki 
2920d63ce2bSvenki 	(void) memcpy(p, oidstrs, sz);
2930d63ce2bSvenki 
2940d63ce2bSvenki 	oidg->next = NULL;
2950d63ce2bSvenki 	oidg->oidstrs = p;
2960d63ce2bSvenki 	oidg->n_oids = n_oids;
2970d63ce2bSvenki 	oidg->is_volatile = is_vol;
2980d63ce2bSvenki 
2990d63ce2bSvenki 	/*
3000d63ce2bSvenki 	 * Link it to the tail of the list of oid groups
3010d63ce2bSvenki 	 */
3020d63ce2bSvenki 	for (prev = NULL, curr = smd->group; curr; curr = curr->next)
3030d63ce2bSvenki 		prev = curr;
3040d63ce2bSvenki 
3050d63ce2bSvenki 	if (prev == NULL)
3060d63ce2bSvenki 		smd->group = oidg;
3070d63ce2bSvenki 	else
3080d63ce2bSvenki 		prev->next = oidg;
3090d63ce2bSvenki }
3100d63ce2bSvenki 
3110d63ce2bSvenki /*
3120d63ce2bSvenki  * snmp_get_int() takes in an OID and returns the integer value
3130d63ce2bSvenki  * of the object referenced in the passed arg. It returns 0 on
3140d63ce2bSvenki  * success and -1 on failure.
3150d63ce2bSvenki  */
3160d63ce2bSvenki int
snmp_get_int(picl_snmphdl_t hdl,char * prefix,int row,int * val,int * snmp_syserr)3170d63ce2bSvenki snmp_get_int(picl_snmphdl_t hdl, char *prefix, int row, int *val,
3180d63ce2bSvenki     int *snmp_syserr)
3190d63ce2bSvenki {
3200d63ce2bSvenki 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
3210d63ce2bSvenki 	oidgroup_t	*grp;
3220d63ce2bSvenki 	int	ret;
3230d63ce2bSvenki 	int	err = 0;
3240d63ce2bSvenki 
3250d63ce2bSvenki 	if (smd == NULL || prefix == NULL || val == NULL)
3260d63ce2bSvenki 		return (-1);
3270d63ce2bSvenki 
3280d63ce2bSvenki 	/*
3290d63ce2bSvenki 	 * If this item should not be cached, fetch it directly from
3300d63ce2bSvenki 	 * the agent using fetch_single_xxx()
3310d63ce2bSvenki 	 */
3320d63ce2bSvenki 	if ((grp = locate_oid_group(smd, prefix)) == NULL) {
3330d63ce2bSvenki 		ret = fetch_single_int(smd, prefix, row, val, &err);
3340d63ce2bSvenki 
3350d63ce2bSvenki 		if (snmp_syserr)
3360d63ce2bSvenki 			*snmp_syserr = err;
3370d63ce2bSvenki 
3380d63ce2bSvenki 		return (ret);
3390d63ce2bSvenki 	}
3400d63ce2bSvenki 
3410d63ce2bSvenki 	/*
3420d63ce2bSvenki 	 * is it in the cache ?
3430d63ce2bSvenki 	 */
3440d63ce2bSvenki 	if (lookup_int(prefix, row, val, grp->is_volatile) == 0)
3450d63ce2bSvenki 		return (0);
3460d63ce2bSvenki 
3470d63ce2bSvenki 	/*
3480d63ce2bSvenki 	 * fetch it from the agent and populate the cache
3490d63ce2bSvenki 	 */
3500d63ce2bSvenki 	fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
3510d63ce2bSvenki 	if (snmp_syserr)
3520d63ce2bSvenki 		*snmp_syserr = err;
3530d63ce2bSvenki 
3540d63ce2bSvenki 	/*
3550d63ce2bSvenki 	 * look it up again and return it
3560d63ce2bSvenki 	 */
3570d63ce2bSvenki 	if (lookup_int(prefix, row, val, grp->is_volatile) < 0)
3580d63ce2bSvenki 		return (-1);
3590d63ce2bSvenki 
3600d63ce2bSvenki 	return (0);
3610d63ce2bSvenki }
3620d63ce2bSvenki 
3630d63ce2bSvenki /*
3640d63ce2bSvenki  * snmp_get_str() takes in an OID and returns the string value
3650d63ce2bSvenki  * of the object referenced in the passed arg. Memory for the string
3660d63ce2bSvenki  * is allocated within snmp_get_str() and is expected to be freed by
3670d63ce2bSvenki  * the caller when it is no longer needed. The function returns 0
3680d63ce2bSvenki  * on success and -1 on failure.
3690d63ce2bSvenki  */
3700d63ce2bSvenki int
snmp_get_str(picl_snmphdl_t hdl,char * prefix,int row,char ** strp,int * snmp_syserr)3710d63ce2bSvenki snmp_get_str(picl_snmphdl_t hdl, char *prefix, int row, char **strp,
3720d63ce2bSvenki     int *snmp_syserr)
3730d63ce2bSvenki {
3740d63ce2bSvenki 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
3750d63ce2bSvenki 	oidgroup_t	*grp;
3760d63ce2bSvenki 	char	*val;
3770d63ce2bSvenki 	int	ret;
3780d63ce2bSvenki 	int	err = 0;
3790d63ce2bSvenki 
3800d63ce2bSvenki 	if (smd == NULL || prefix == NULL || strp == NULL)
3810d63ce2bSvenki 		return (-1);
3820d63ce2bSvenki 
383d19c75f6Sjfrank 	*strp = NULL;
3840d63ce2bSvenki 	/*
3850d63ce2bSvenki 	 * Check if this item is cacheable or not. If not, call
3860d63ce2bSvenki 	 * fetch_single_* to get it directly from the agent
3870d63ce2bSvenki 	 */
3880d63ce2bSvenki 	if ((grp = locate_oid_group(smd, prefix)) == NULL) {
3890d63ce2bSvenki 		ret = fetch_single_str(smd, prefix, row, strp, &err);
3900d63ce2bSvenki 
3910d63ce2bSvenki 		if (snmp_syserr)
3920d63ce2bSvenki 			*snmp_syserr = err;
3930d63ce2bSvenki 
3940d63ce2bSvenki 		return (ret);
3950d63ce2bSvenki 	}
3960d63ce2bSvenki 
3970d63ce2bSvenki 	/*
3980d63ce2bSvenki 	 * See if it's in the cache already
3990d63ce2bSvenki 	 */
4000d63ce2bSvenki 	if (lookup_str(prefix, row, &val, grp->is_volatile) == 0) {
4010d63ce2bSvenki 		if ((*strp = strdup(val)) == NULL)
4020d63ce2bSvenki 			return (-1);
4030d63ce2bSvenki 		else
4040d63ce2bSvenki 			return (0);
4050d63ce2bSvenki 	}
4060d63ce2bSvenki 
4070d63ce2bSvenki 	/*
4080d63ce2bSvenki 	 * Fetch it from the agent and populate cache
4090d63ce2bSvenki 	 */
4100d63ce2bSvenki 	fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
4110d63ce2bSvenki 	if (snmp_syserr)
4120d63ce2bSvenki 		*snmp_syserr = err;
4130d63ce2bSvenki 
4140d63ce2bSvenki 	/*
4150d63ce2bSvenki 	 * Retry lookup
4160d63ce2bSvenki 	 */
4170d63ce2bSvenki 	if (lookup_str(prefix, row, &val, grp->is_volatile) < 0)
4180d63ce2bSvenki 		return (-1);
4190d63ce2bSvenki 
4200d63ce2bSvenki 
4210d63ce2bSvenki 	if ((*strp = strdup(val)) == NULL)
4220d63ce2bSvenki 		return (-1);
4230d63ce2bSvenki 	else
4240d63ce2bSvenki 		return (0);
4250d63ce2bSvenki }
4260d63ce2bSvenki 
4270d63ce2bSvenki /*
4280d63ce2bSvenki  * snmp_get_bitstr() takes in an OID and returns the bit string value
4290d63ce2bSvenki  * of the object referenced in the passed args. Memory for the bitstring
4300d63ce2bSvenki  * is allocated within the function and is expected to be freed by
4310d63ce2bSvenki  * the caller when it is no longer needed. The function returns 0
4320d63ce2bSvenki  * on success and -1 on failure.
4330d63ce2bSvenki  */
4340d63ce2bSvenki int
snmp_get_bitstr(picl_snmphdl_t hdl,char * prefix,int row,uchar_t ** bitstrp,uint_t * nbytes,int * snmp_syserr)4350d63ce2bSvenki snmp_get_bitstr(picl_snmphdl_t hdl, char *prefix, int row, uchar_t **bitstrp,
4360d63ce2bSvenki     uint_t *nbytes, int *snmp_syserr)
4370d63ce2bSvenki {
4380d63ce2bSvenki 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
4390d63ce2bSvenki 	oidgroup_t	*grp;
4400d63ce2bSvenki 	uchar_t	*val;
4410d63ce2bSvenki 	int	ret;
4420d63ce2bSvenki 	int	err = 0;
4430d63ce2bSvenki 
4440d63ce2bSvenki 	if (smd == NULL || prefix == NULL || bitstrp == NULL || nbytes == NULL)
4450d63ce2bSvenki 		return (-1);
4460d63ce2bSvenki 
447d19c75f6Sjfrank 	*bitstrp = NULL;
4480d63ce2bSvenki 	/*
4490d63ce2bSvenki 	 * Check if this item is cacheable or not. If not, call
4500d63ce2bSvenki 	 * fetch_single_* to get it directly from the agent
4510d63ce2bSvenki 	 */
4520d63ce2bSvenki 	if ((grp = locate_oid_group(smd, prefix)) == NULL) {
4530d63ce2bSvenki 		ret = fetch_single_bitstr(smd, prefix, row, bitstrp,
4540d63ce2bSvenki 		    nbytes, &err);
4550d63ce2bSvenki 
4560d63ce2bSvenki 		if (snmp_syserr)
4570d63ce2bSvenki 			*snmp_syserr = err;
4580d63ce2bSvenki 
4590d63ce2bSvenki 		return (ret);
4600d63ce2bSvenki 	}
4610d63ce2bSvenki 
4620d63ce2bSvenki 	/*
4630d63ce2bSvenki 	 * See if it's in the cache already
4640d63ce2bSvenki 	 */
4650d63ce2bSvenki 	if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) == 0) {
4660d63ce2bSvenki 		if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
4670d63ce2bSvenki 			return (-1);
4680d63ce2bSvenki 		(void) memcpy(*bitstrp, (const void *)val, *nbytes);
4690d63ce2bSvenki 		return (0);
4700d63ce2bSvenki 	}
4710d63ce2bSvenki 
4720d63ce2bSvenki 	/*
4730d63ce2bSvenki 	 * Fetch it from the agent and populate cache
4740d63ce2bSvenki 	 */
4750d63ce2bSvenki 	fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
4760d63ce2bSvenki 	if (snmp_syserr)
4770d63ce2bSvenki 		*snmp_syserr = err;
4780d63ce2bSvenki 
4790d63ce2bSvenki 	/*
4800d63ce2bSvenki 	 * Retry lookup
4810d63ce2bSvenki 	 */
4820d63ce2bSvenki 	if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) < 0)
4830d63ce2bSvenki 		return (-1);
4840d63ce2bSvenki 
4850d63ce2bSvenki 	if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
4860d63ce2bSvenki 		return (-1);
4870d63ce2bSvenki 	(void) memcpy(*bitstrp, (const void *)val, *nbytes);
4880d63ce2bSvenki 
4890d63ce2bSvenki 	return (0);
4900d63ce2bSvenki }
4910d63ce2bSvenki 
4920d63ce2bSvenki /*
4930d63ce2bSvenki  * snmp_get_nextrow() is similar in operation to SNMP_GETNEXT, but
4940d63ce2bSvenki  * only just. In particular, this is only expected to return the next
4950d63ce2bSvenki  * valid row number for the same object, not its value. Since we don't
4960d63ce2bSvenki  * have any other means, we use this to determine the number of rows
4970d63ce2bSvenki  * in the table (and the valid ones). This function returns 0 on success
4980d63ce2bSvenki  * and -1 on failure.
4990d63ce2bSvenki  */
5000d63ce2bSvenki int
snmp_get_nextrow(picl_snmphdl_t hdl,char * prefix,int row,int * nextrow,int * snmp_syserr)5010d63ce2bSvenki snmp_get_nextrow(picl_snmphdl_t hdl, char *prefix, int row, int *nextrow,
5020d63ce2bSvenki     int *snmp_syserr)
5030d63ce2bSvenki {
5040d63ce2bSvenki 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
5050d63ce2bSvenki 	snmp_pdu_t *reply_pdu;
5060d63ce2bSvenki 	pdu_varlist_t *vp;
5070d63ce2bSvenki 	char	*nxt_oidstr;
5080d63ce2bSvenki 	int	err = 0;
5090d63ce2bSvenki 
510a1c54725Sfw 	if (smd == NULL || prefix == NULL || nextrow == NULL) {
511a1c54725Sfw 		if (snmp_syserr)
512a1c54725Sfw 			*snmp_syserr = EINVAL;
5130d63ce2bSvenki 		return (-1);
514a1c54725Sfw 	}
5150d63ce2bSvenki 
5160d63ce2bSvenki 	/*
5170d63ce2bSvenki 	 * The get_nextrow results should *never* go into any cache,
5180d63ce2bSvenki 	 * since these relationships are dynamically discovered each time.
5190d63ce2bSvenki 	 */
5200d63ce2bSvenki 	if ((reply_pdu = fetch_next(smd, prefix, row, &err)) == NULL) {
5210d63ce2bSvenki 		if (snmp_syserr)
5220d63ce2bSvenki 			*snmp_syserr = err;
5230d63ce2bSvenki 		return (-1);
5240d63ce2bSvenki 	}
5250d63ce2bSvenki 
5260d63ce2bSvenki 	/*
5270d63ce2bSvenki 	 * We are not concerned about the "value" of the lexicographically
5280d63ce2bSvenki 	 * next object; we only care about the name of that object and
5290d63ce2bSvenki 	 * its row number (and whether such an object exists or not).
5300d63ce2bSvenki 	 */
5310d63ce2bSvenki 	vp = reply_pdu->vars;
532a1c54725Sfw 
533a1c54725Sfw 	/*
534a1c54725Sfw 	 * This indicates that we're at the end of the MIB view.
535a1c54725Sfw 	 */
5360d63ce2bSvenki 	if (vp == NULL || vp->name == NULL || vp->type == SNMP_NOSUCHOBJECT ||
5370d63ce2bSvenki 	    vp->type == SNMP_NOSUCHINSTANCE || vp->type == SNMP_ENDOFMIBVIEW) {
5380d63ce2bSvenki 		snmp_free_pdu(reply_pdu);
539a1c54725Sfw 		if (snmp_syserr)
540a1c54725Sfw 			*snmp_syserr = ENOSPC;
5410d63ce2bSvenki 		return (-1);
5420d63ce2bSvenki 	}
543a1c54725Sfw 
544a1c54725Sfw 	/*
545a1c54725Sfw 	 * need to be able to convert the OID
546a1c54725Sfw 	 */
5470d63ce2bSvenki 	if ((nxt_oidstr = oid_to_oidstr(vp->name, vp->name_len - 1)) == NULL) {
5480d63ce2bSvenki 		snmp_free_pdu(reply_pdu);
549a1c54725Sfw 		if (snmp_syserr)
550a1c54725Sfw 			*snmp_syserr = ENOMEM;
5510d63ce2bSvenki 		return (-1);
5520d63ce2bSvenki 	}
553a1c54725Sfw 
554a1c54725Sfw 	/*
555a1c54725Sfw 	 * We're on to the next table.
556a1c54725Sfw 	 */
5570d63ce2bSvenki 	if (strcmp(nxt_oidstr, prefix) != 0) {
5580d63ce2bSvenki 		free(nxt_oidstr);
5590d63ce2bSvenki 		snmp_free_pdu(reply_pdu);
560a1c54725Sfw 		if (snmp_syserr)
561a1c54725Sfw 			*snmp_syserr = ENOENT;
5620d63ce2bSvenki 		return (-1);
5630d63ce2bSvenki 	}
5640d63ce2bSvenki 
5650d63ce2bSvenki 	/*
5660d63ce2bSvenki 	 * Ok, so we've got an oid that's simply the next valid row of the
5670d63ce2bSvenki 	 * passed on object, return this row number.
5680d63ce2bSvenki 	 */
5690d63ce2bSvenki 	*nextrow = (vp->name)[vp->name_len-1];
5700d63ce2bSvenki 
5710d63ce2bSvenki 	free(nxt_oidstr);
5720d63ce2bSvenki 	snmp_free_pdu(reply_pdu);
5730d63ce2bSvenki 
5740d63ce2bSvenki 	return (0);
5750d63ce2bSvenki }
5760d63ce2bSvenki 
5770d63ce2bSvenki /*
5780d63ce2bSvenki  * Request ids for snmp messages to the agent are sequenced here.
5790d63ce2bSvenki  */
5800d63ce2bSvenki int
snmp_get_reqid(void)5810d63ce2bSvenki snmp_get_reqid(void)
5820d63ce2bSvenki {
5830d63ce2bSvenki 	int	ret;
5840d63ce2bSvenki 
5850d63ce2bSvenki 	(void) mutex_lock(&snmp_reqid_lock);
5860d63ce2bSvenki 
5870d63ce2bSvenki 	ret = snmp_reqid++;
5880d63ce2bSvenki 
5890d63ce2bSvenki 	(void) mutex_unlock(&snmp_reqid_lock);
5900d63ce2bSvenki 
5910d63ce2bSvenki 	return (ret);
5920d63ce2bSvenki }
5930d63ce2bSvenki 
5940d63ce2bSvenki static int
lookup_int(char * prefix,int row,int * valp,int is_vol)5950d63ce2bSvenki lookup_int(char *prefix, int row, int *valp, int is_vol)
5960d63ce2bSvenki {
5970d63ce2bSvenki 	int32_t	*val_arr;
5980d63ce2bSvenki 	uint_t	nelem;
599817697f4SKelly Moyer 	int	now;
6000d63ce2bSvenki 	int	elapsed;
6010d63ce2bSvenki 
6020d63ce2bSvenki 	(void) mutex_lock(&mibcache_lock);
6030d63ce2bSvenki 
6040d63ce2bSvenki 	if (row >= n_mibcache_rows) {
6050d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
6060d63ce2bSvenki 		return (-1);
6070d63ce2bSvenki 	}
6080d63ce2bSvenki 
6090d63ce2bSvenki 	if (mibcache[row] == NULL) {
6100d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
6110d63ce2bSvenki 		return (-1);
6120d63ce2bSvenki 	}
6130d63ce2bSvenki 
6140d63ce2bSvenki 	/*
6150d63ce2bSvenki 	 * If this is a volatile property, we should be searching
6160d63ce2bSvenki 	 * for an integer-timestamp pair
6170d63ce2bSvenki 	 */
6180d63ce2bSvenki 	if (is_vol) {
6190d63ce2bSvenki 		if (nvlist_lookup_int32_array(mibcache[row], prefix,
6200d63ce2bSvenki 		    &val_arr, &nelem) != 0) {
6210d63ce2bSvenki 			(void) mutex_unlock(&mibcache_lock);
6220d63ce2bSvenki 			return (-1);
6230d63ce2bSvenki 		}
6240d63ce2bSvenki 		if (nelem != 2 || val_arr[1] < 0) {
6250d63ce2bSvenki 			(void) mutex_unlock(&mibcache_lock);
6260d63ce2bSvenki 			return (-1);
6270d63ce2bSvenki 		}
628817697f4SKelly Moyer 		now = GET_SCALED_HRTIME();
629817697f4SKelly Moyer 		elapsed = now - val_arr[1];
6300d63ce2bSvenki 		if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
6310d63ce2bSvenki 			(void) mutex_unlock(&mibcache_lock);
6320d63ce2bSvenki 			return (-1);
6330d63ce2bSvenki 		}
6340d63ce2bSvenki 
6350d63ce2bSvenki 		*valp = (int)val_arr[0];
6360d63ce2bSvenki 	} else {
6370d63ce2bSvenki 		if (nvlist_lookup_int32(mibcache[row], prefix, valp) != 0) {
6380d63ce2bSvenki 			(void) mutex_unlock(&mibcache_lock);
6390d63ce2bSvenki 			return (-1);
6400d63ce2bSvenki 		}
6410d63ce2bSvenki 	}
6420d63ce2bSvenki 
6430d63ce2bSvenki 	(void) mutex_unlock(&mibcache_lock);
6440d63ce2bSvenki 
6450d63ce2bSvenki 	return (0);
6460d63ce2bSvenki }
6470d63ce2bSvenki 
6480d63ce2bSvenki static int
lookup_str(char * prefix,int row,char ** valp,int is_vol)6490d63ce2bSvenki lookup_str(char *prefix, int row, char **valp, int is_vol)
6500d63ce2bSvenki {
6510d63ce2bSvenki 	char	**val_arr;
6520d63ce2bSvenki 	uint_t	nelem;
653817697f4SKelly Moyer 	int	now;
6540d63ce2bSvenki 	int	elapsed;
6550d63ce2bSvenki 
6560d63ce2bSvenki 	(void) mutex_lock(&mibcache_lock);
6570d63ce2bSvenki 
6580d63ce2bSvenki 	if (row >= n_mibcache_rows) {
6590d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
6600d63ce2bSvenki 		return (-1);
6610d63ce2bSvenki 	}
6620d63ce2bSvenki 
6630d63ce2bSvenki 	if (mibcache[row] == NULL) {
6640d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
6650d63ce2bSvenki 		return (-1);
6660d63ce2bSvenki 	}
6670d63ce2bSvenki 
6680d63ce2bSvenki 	/*
6690d63ce2bSvenki 	 * If this is a volatile property, we should be searching
6700d63ce2bSvenki 	 * for a string-timestamp pair
6710d63ce2bSvenki 	 */
6720d63ce2bSvenki 	if (is_vol) {
6730d63ce2bSvenki 		if (nvlist_lookup_string_array(mibcache[row], prefix,
6740d63ce2bSvenki 		    &val_arr, &nelem) != 0) {
6750d63ce2bSvenki 			(void) mutex_unlock(&mibcache_lock);
6760d63ce2bSvenki 			return (-1);
6770d63ce2bSvenki 		}
6780d63ce2bSvenki 		if (nelem != 2 || atoi(val_arr[1]) <= 0) {
6790d63ce2bSvenki 			(void) mutex_unlock(&mibcache_lock);
6800d63ce2bSvenki 			return (-1);
6810d63ce2bSvenki 		}
682817697f4SKelly Moyer 		now = GET_SCALED_HRTIME();
683817697f4SKelly Moyer 		elapsed = now - atoi(val_arr[1]);
6840d63ce2bSvenki 		if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
6850d63ce2bSvenki 			(void) mutex_unlock(&mibcache_lock);
6860d63ce2bSvenki 			return (-1);
6870d63ce2bSvenki 		}
6880d63ce2bSvenki 
6890d63ce2bSvenki 		*valp = val_arr[0];
6900d63ce2bSvenki 	} else {
691a1c54725Sfw 		if (nvlist_lookup_string(mibcache[row], prefix, valp) != 0) {
692a1c54725Sfw 			(void) mutex_unlock(&mibcache_lock);
693a1c54725Sfw 			return (-1);
694a1c54725Sfw 		}
6950d63ce2bSvenki 	}
6960d63ce2bSvenki 
6970d63ce2bSvenki 	(void) mutex_unlock(&mibcache_lock);
6980d63ce2bSvenki 
6990d63ce2bSvenki 	return (0);
7000d63ce2bSvenki }
7010d63ce2bSvenki 
7020d63ce2bSvenki static int
lookup_bitstr(char * prefix,int row,uchar_t ** valp,uint_t * nelem,int is_vol)7030d63ce2bSvenki lookup_bitstr(char *prefix, int row, uchar_t **valp, uint_t *nelem, int is_vol)
7040d63ce2bSvenki {
7050d63ce2bSvenki 	(void) mutex_lock(&mibcache_lock);
7060d63ce2bSvenki 
7070d63ce2bSvenki 	if (row >= n_mibcache_rows) {
7080d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
7090d63ce2bSvenki 		return (-1);
7100d63ce2bSvenki 	}
7110d63ce2bSvenki 
7120d63ce2bSvenki 	if (mibcache[row] == NULL) {
7130d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
7140d63ce2bSvenki 		return (-1);
7150d63ce2bSvenki 	}
7160d63ce2bSvenki 
7170d63ce2bSvenki 	/*
7180d63ce2bSvenki 	 * We don't support volatile bit string values yet. The nvlist
7190d63ce2bSvenki 	 * functions don't support bitstring arrays like they do charstring
7200d63ce2bSvenki 	 * arrays, so we would need to do things in a convoluted way,
7210d63ce2bSvenki 	 * probably by attaching the timestamp as part of the byte array
7220d63ce2bSvenki 	 * itself. However, the need for volatile bitstrings isn't there
7230d63ce2bSvenki 	 * yet, to justify the effort.
7240d63ce2bSvenki 	 */
7250d63ce2bSvenki 	if (is_vol) {
7260d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
7270d63ce2bSvenki 		return (-1);
7280d63ce2bSvenki 	}
7290d63ce2bSvenki 
7300d63ce2bSvenki 	if (nvlist_lookup_byte_array(mibcache[row], prefix, valp, nelem) != 0) {
7310d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
7320d63ce2bSvenki 		return (-1);
7330d63ce2bSvenki 	}
7340d63ce2bSvenki 
7350d63ce2bSvenki 	(void) mutex_unlock(&mibcache_lock);
7360d63ce2bSvenki 
7370d63ce2bSvenki 	return (0);
7380d63ce2bSvenki }
7390d63ce2bSvenki 
7400d63ce2bSvenki static int
search_oid_in_group(char * prefix,char * oidstrs,int n_oids)7410d63ce2bSvenki search_oid_in_group(char *prefix, char *oidstrs, int n_oids)
7420d63ce2bSvenki {
7430d63ce2bSvenki 	char	*p;
7440d63ce2bSvenki 	int	i;
7450d63ce2bSvenki 
7460d63ce2bSvenki 	p = oidstrs;
7470d63ce2bSvenki 	for (i = 0; i < n_oids; i++) {
7480d63ce2bSvenki 		if (strcmp(p, prefix) == 0)
7490d63ce2bSvenki 			return (0);
7500d63ce2bSvenki 
7510d63ce2bSvenki 		p += strlen(p) + 1;
7520d63ce2bSvenki 	}
7530d63ce2bSvenki 
7540d63ce2bSvenki 	return (-1);
7550d63ce2bSvenki }
7560d63ce2bSvenki 
7570d63ce2bSvenki static oidgroup_t *
locate_oid_group(struct picl_snmphdl * smd,char * prefix)7580d63ce2bSvenki locate_oid_group(struct picl_snmphdl *smd, char *prefix)
7590d63ce2bSvenki {
7600d63ce2bSvenki 	oidgroup_t	*grp;
7610d63ce2bSvenki 
7620d63ce2bSvenki 	if (smd == NULL)
7630d63ce2bSvenki 		return (NULL);
7640d63ce2bSvenki 
7650d63ce2bSvenki 	if (smd->group == NULL)
7660d63ce2bSvenki 		return (NULL);
7670d63ce2bSvenki 
7680d63ce2bSvenki 	for (grp = smd->group; grp; grp = grp->next) {
7690d63ce2bSvenki 		if (search_oid_in_group(prefix, grp->oidstrs,
7700d63ce2bSvenki 		    grp->n_oids) == 0) {
7710d63ce2bSvenki 			return (grp);
7720d63ce2bSvenki 		}
7730d63ce2bSvenki 	}
7740d63ce2bSvenki 
7750d63ce2bSvenki 	return (NULL);
7760d63ce2bSvenki }
7770d63ce2bSvenki 
7780d63ce2bSvenki static int
fetch_single_int(struct picl_snmphdl * smd,char * prefix,int row,int * ival,int * snmp_syserr)7790d63ce2bSvenki fetch_single_int(struct picl_snmphdl *smd, char *prefix, int row, int *ival,
7800d63ce2bSvenki     int *snmp_syserr)
7810d63ce2bSvenki {
7820d63ce2bSvenki 	snmp_pdu_t *reply_pdu;
7830d63ce2bSvenki 	pdu_varlist_t *vp;
7840d63ce2bSvenki 
7850d63ce2bSvenki 	if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
7860d63ce2bSvenki 		return (-1);
7870d63ce2bSvenki 
7880d63ce2bSvenki 	/*
7890d63ce2bSvenki 	 * Note that we don't make any distinction between unsigned int
7900d63ce2bSvenki 	 * value and signed int value at this point, since we provide
7910d63ce2bSvenki 	 * only snmp_get_int() at the higher level. While it is possible
7920d63ce2bSvenki 	 * to provide an entirely separate interface such as snmp_get_uint(),
7930d63ce2bSvenki 	 * that's quite unnecessary, because we don't do any interpretation
7940d63ce2bSvenki 	 * of the received value. Besides, the sizes of int and uint are
7950d63ce2bSvenki 	 * the same and the sizes of all pointers are the same (so val.iptr
7960d63ce2bSvenki 	 * would be the same as val.uiptr in pdu_varlist_t). If/when we
7970d63ce2bSvenki 	 * violate any of these assumptions, it will be time to add
7980d63ce2bSvenki 	 * snmp_get_uint().
7990d63ce2bSvenki 	 */
8000d63ce2bSvenki 	vp = reply_pdu->vars;
8010d63ce2bSvenki 	if (vp == NULL || vp->val.iptr == NULL) {
8020d63ce2bSvenki 		snmp_free_pdu(reply_pdu);
8030d63ce2bSvenki 		return (-1);
8040d63ce2bSvenki 	}
8050d63ce2bSvenki 
8060d63ce2bSvenki 	*ival = *(vp->val.iptr);
8070d63ce2bSvenki 
8080d63ce2bSvenki 	snmp_free_pdu(reply_pdu);
8090d63ce2bSvenki 
8100d63ce2bSvenki 	return (0);
8110d63ce2bSvenki }
8120d63ce2bSvenki 
8130d63ce2bSvenki static int
fetch_single_str(struct picl_snmphdl * smd,char * prefix,int row,char ** valp,int * snmp_syserr)8140d63ce2bSvenki fetch_single_str(struct picl_snmphdl *smd, char *prefix, int row, char **valp,
8150d63ce2bSvenki     int *snmp_syserr)
8160d63ce2bSvenki {
8170d63ce2bSvenki 	snmp_pdu_t *reply_pdu;
8180d63ce2bSvenki 	pdu_varlist_t *vp;
8190d63ce2bSvenki 
8200d63ce2bSvenki 	if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
8210d63ce2bSvenki 		return (-1);
8220d63ce2bSvenki 
8230d63ce2bSvenki 	vp = reply_pdu->vars;
8240d63ce2bSvenki 	if (vp == NULL || vp->val.str == NULL) {
8250d63ce2bSvenki 		snmp_free_pdu(reply_pdu);
8260d63ce2bSvenki 		return (-1);
8270d63ce2bSvenki 	}
8280d63ce2bSvenki 
8290d63ce2bSvenki 	*valp = strdup((const char *)(vp->val.str));
8300d63ce2bSvenki 
8310d63ce2bSvenki 	snmp_free_pdu(reply_pdu);
8320d63ce2bSvenki 
8330d63ce2bSvenki 	return (0);
8340d63ce2bSvenki }
8350d63ce2bSvenki 
8360d63ce2bSvenki static int
fetch_single_bitstr(struct picl_snmphdl * smd,char * prefix,int row,uchar_t ** valp,uint_t * nelem,int * snmp_syserr)8370d63ce2bSvenki fetch_single_bitstr(struct picl_snmphdl *smd, char *prefix, int row,
8380d63ce2bSvenki     uchar_t **valp, uint_t *nelem, int *snmp_syserr)
8390d63ce2bSvenki {
8400d63ce2bSvenki 	snmp_pdu_t *reply_pdu;
8410d63ce2bSvenki 	pdu_varlist_t *vp;
8420d63ce2bSvenki 
8430d63ce2bSvenki 	if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
8440d63ce2bSvenki 		return (-1);
8450d63ce2bSvenki 
8460d63ce2bSvenki 	vp = reply_pdu->vars;
8470d63ce2bSvenki 	if (vp == NULL || vp->val.str == NULL) {
8480d63ce2bSvenki 		snmp_free_pdu(reply_pdu);
8490d63ce2bSvenki 		return (-1);
8500d63ce2bSvenki 	}
8510d63ce2bSvenki 
8520d63ce2bSvenki 	if ((*valp = (uchar_t *)calloc(vp->val_len, 1)) == NULL) {
8530d63ce2bSvenki 		snmp_free_pdu(reply_pdu);
8540d63ce2bSvenki 		return (-1);
8550d63ce2bSvenki 	}
8560d63ce2bSvenki 
8570d63ce2bSvenki 	*nelem = vp->val_len;
8580d63ce2bSvenki 	(void) memcpy(*valp, (const void *)(vp->val.str),
8590d63ce2bSvenki 	    (size_t)(vp->val_len));
8600d63ce2bSvenki 
8610d63ce2bSvenki 	snmp_free_pdu(reply_pdu);
8620d63ce2bSvenki 
8630d63ce2bSvenki 	return (0);
8640d63ce2bSvenki }
8650d63ce2bSvenki 
8660d63ce2bSvenki static snmp_pdu_t *
fetch_single(struct picl_snmphdl * smd,char * prefix,int row,int * snmp_syserr)8670d63ce2bSvenki fetch_single(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
8680d63ce2bSvenki {
8690d63ce2bSvenki 	snmp_pdu_t	*pdu, *reply_pdu;
8700d63ce2bSvenki 
8710d63ce2bSvenki 	if ((pdu = snmp_create_pdu(SNMP_MSG_GET, 0, prefix, 1, row)) == NULL)
8720d63ce2bSvenki 		return (NULL);
8730d63ce2bSvenki 
8740d63ce2bSvenki 	if (snmp_make_packet(pdu) < 0) {
8750d63ce2bSvenki 		snmp_free_pdu(pdu);
8760d63ce2bSvenki 		return (NULL);
8770d63ce2bSvenki 	}
8780d63ce2bSvenki 
8790d63ce2bSvenki 	if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
8800d63ce2bSvenki 		snmp_free_pdu(pdu);
8810d63ce2bSvenki 		return (NULL);
8820d63ce2bSvenki 	}
8830d63ce2bSvenki 
8840d63ce2bSvenki 	if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
8850d63ce2bSvenki 		snmp_free_pdu(pdu);
8860d63ce2bSvenki 		return (NULL);
8870d63ce2bSvenki 	}
8880d63ce2bSvenki 
8890d63ce2bSvenki 	reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
8900d63ce2bSvenki 	    pdu->reply_pktsz);
8910d63ce2bSvenki 
8920d63ce2bSvenki 	snmp_free_pdu(pdu);
8930d63ce2bSvenki 
8940d63ce2bSvenki 	return (reply_pdu);
8950d63ce2bSvenki }
8960d63ce2bSvenki 
8970d63ce2bSvenki static void
fetch_bulk(struct picl_snmphdl * smd,char * oidstrs,int n_oids,int row,int is_vol,int * snmp_syserr)8980d63ce2bSvenki fetch_bulk(struct picl_snmphdl *smd, char *oidstrs, int n_oids,
8990d63ce2bSvenki     int row, int is_vol, int *snmp_syserr)
9000d63ce2bSvenki {
9010d63ce2bSvenki 	snmp_pdu_t	*pdu, *reply_pdu;
9020d63ce2bSvenki 	int		max_reps;
9030d63ce2bSvenki 
9040d63ce2bSvenki 	/*
9050d63ce2bSvenki 	 * If we're fetching volatile properties using BULKGET, don't
9060d63ce2bSvenki 	 * venture to get multiple rows (passing max_reps=0 will make
9070d63ce2bSvenki 	 * snmp_create_pdu() fetch SNMP_DEF_MAX_REPETITIONS rows)
9080d63ce2bSvenki 	 */
9090d63ce2bSvenki 	max_reps = is_vol ? 1 : 0;
9100d63ce2bSvenki 
9110d63ce2bSvenki 	pdu = snmp_create_pdu(SNMP_MSG_GETBULK, max_reps, oidstrs, n_oids, row);
9120d63ce2bSvenki 	if (pdu == NULL)
9130d63ce2bSvenki 		return;
9140d63ce2bSvenki 
9150d63ce2bSvenki 	/*
9160d63ce2bSvenki 	 * Make an ASN.1 encoded packet from the PDU information
9170d63ce2bSvenki 	 */
9180d63ce2bSvenki 	if (snmp_make_packet(pdu) < 0) {
9190d63ce2bSvenki 		snmp_free_pdu(pdu);
9200d63ce2bSvenki 		return;
9210d63ce2bSvenki 	}
9220d63ce2bSvenki 
9230d63ce2bSvenki 	/*
9240d63ce2bSvenki 	 * Send the request packet to the agent
9250d63ce2bSvenki 	 */
9260d63ce2bSvenki 	if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
9270d63ce2bSvenki 		snmp_free_pdu(pdu);
9280d63ce2bSvenki 		return;
9290d63ce2bSvenki 	}
9300d63ce2bSvenki 
9310d63ce2bSvenki 	/*
9320d63ce2bSvenki 	 * Receive response from the agent into the reply packet buffer
9330d63ce2bSvenki 	 * in the request PDU
9340d63ce2bSvenki 	 */
9350d63ce2bSvenki 	if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
9360d63ce2bSvenki 		snmp_free_pdu(pdu);
9370d63ce2bSvenki 		return;
9380d63ce2bSvenki 	}
9390d63ce2bSvenki 
9400d63ce2bSvenki 	/*
9410d63ce2bSvenki 	 * Parse the reply, validate the response and create a
9420d63ce2bSvenki 	 * reply-PDU out of the information. Populate the mibcache
9430d63ce2bSvenki 	 * with the received values.
9440d63ce2bSvenki 	 */
9450d63ce2bSvenki 	reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
9460d63ce2bSvenki 	    pdu->reply_pktsz);
9470d63ce2bSvenki 	if (reply_pdu) {
948817697f4SKelly Moyer 		if (reply_pdu->errstat == SNMP_ERR_NOERROR) {
949817697f4SKelly Moyer 			if (is_vol) {
950817697f4SKelly Moyer 				/* Add a job to the cache refresh work queue */
951817697f4SKelly Moyer 				(void) refreshq_add_job(smd, oidstrs, n_oids,
952817697f4SKelly Moyer 				    row);
953817697f4SKelly Moyer 			}
954817697f4SKelly Moyer 
9550d63ce2bSvenki 			mibcache_populate(reply_pdu, is_vol);
956817697f4SKelly Moyer 		}
9570d63ce2bSvenki 
9580d63ce2bSvenki 		snmp_free_pdu(reply_pdu);
9590d63ce2bSvenki 	}
9600d63ce2bSvenki 
9610d63ce2bSvenki 	snmp_free_pdu(pdu);
9620d63ce2bSvenki }
9630d63ce2bSvenki 
9640d63ce2bSvenki static snmp_pdu_t *
fetch_next(struct picl_snmphdl * smd,char * prefix,int row,int * snmp_syserr)9650d63ce2bSvenki fetch_next(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
9660d63ce2bSvenki {
9670d63ce2bSvenki 	snmp_pdu_t	*pdu, *reply_pdu;
9680d63ce2bSvenki 
9690d63ce2bSvenki 	pdu = snmp_create_pdu(SNMP_MSG_GETNEXT, 0, prefix, 1, row);
9700d63ce2bSvenki 	if (pdu == NULL)
9710d63ce2bSvenki 		return (NULL);
9720d63ce2bSvenki 
9730d63ce2bSvenki 	if (snmp_make_packet(pdu) < 0) {
9740d63ce2bSvenki 		snmp_free_pdu(pdu);
9750d63ce2bSvenki 		return (NULL);
9760d63ce2bSvenki 	}
9770d63ce2bSvenki 
9780d63ce2bSvenki 	if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
9790d63ce2bSvenki 		snmp_free_pdu(pdu);
9800d63ce2bSvenki 		return (NULL);
9810d63ce2bSvenki 	}
9820d63ce2bSvenki 
9830d63ce2bSvenki 	if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
9840d63ce2bSvenki 		snmp_free_pdu(pdu);
9850d63ce2bSvenki 		return (NULL);
9860d63ce2bSvenki 	}
9870d63ce2bSvenki 
9880d63ce2bSvenki 	reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
9890d63ce2bSvenki 	    pdu->reply_pktsz);
9900d63ce2bSvenki 
9910d63ce2bSvenki 	snmp_free_pdu(pdu);
9920d63ce2bSvenki 
9930d63ce2bSvenki 	return (reply_pdu);
9940d63ce2bSvenki }
9950d63ce2bSvenki 
9960d63ce2bSvenki static int
snmp_send_request(struct picl_snmphdl * smd,snmp_pdu_t * pdu,int * snmp_syserr)9970d63ce2bSvenki snmp_send_request(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
9980d63ce2bSvenki {
9990d63ce2bSvenki 	extern int	errno;
10000d63ce2bSvenki #ifdef USE_SOCKETS
10010d63ce2bSvenki 	int		ret;
10020d63ce2bSvenki #endif
10030d63ce2bSvenki 
10040d63ce2bSvenki 	if (smd->fd < 0)
10050d63ce2bSvenki 		return (-1);
10060d63ce2bSvenki 
10070d63ce2bSvenki 	if (pdu == NULL || pdu->req_pkt == NULL)
10080d63ce2bSvenki 		return (-1);
10090d63ce2bSvenki 
10100d63ce2bSvenki #ifdef USE_SOCKETS
10110d63ce2bSvenki 	ret = -1;
10120d63ce2bSvenki 	while (ret < 0) {
10130d63ce2bSvenki 		ret = sendto(smd->fd, pdu->req_pkt, pdu->req_pktsz, 0,
10140d63ce2bSvenki 		    (struct sockaddr *)&smd->agent_addr,
10150d63ce2bSvenki 		    sizeof (struct sockaddr));
10160d63ce2bSvenki 		if (ret < 0 && errno != EINTR) {
10170d63ce2bSvenki 			return (-1);
10180d63ce2bSvenki 		}
10190d63ce2bSvenki 	}
10200d63ce2bSvenki #else
10210d63ce2bSvenki 	if (write(smd->fd, pdu->req_pkt, pdu->req_pktsz) < 0) {
10220d63ce2bSvenki 		if (snmp_syserr)
10230d63ce2bSvenki 			*snmp_syserr = errno;
10240d63ce2bSvenki 		return (-1);
10250d63ce2bSvenki 	}
10260d63ce2bSvenki #endif
10270d63ce2bSvenki 
10280d63ce2bSvenki 	return (0);
10290d63ce2bSvenki }
10300d63ce2bSvenki 
10310d63ce2bSvenki static int
snmp_recv_reply(struct picl_snmphdl * smd,snmp_pdu_t * pdu,int * snmp_syserr)10320d63ce2bSvenki snmp_recv_reply(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
10330d63ce2bSvenki {
10340d63ce2bSvenki 	struct dssnmp_info	snmp_info;
10350d63ce2bSvenki 	size_t	pktsz;
10360d63ce2bSvenki 	uchar_t	*pkt;
10370d63ce2bSvenki 	extern int errno;
10380d63ce2bSvenki #ifdef USE_SOCKETS
1039*0409f346SPeter Tribble 	struct sockaddr_in	from;
10400d63ce2bSvenki 	int	fromlen;
10410d63ce2bSvenki 	ssize_t	msgsz;
10420d63ce2bSvenki #endif
10430d63ce2bSvenki 
10440d63ce2bSvenki 	if (smd->fd < 0 || pdu == NULL)
10450d63ce2bSvenki 		return (-1);
10460d63ce2bSvenki 
10470d63ce2bSvenki #ifdef USE_SOCKETS
10480d63ce2bSvenki 	if ((pkt = (uchar_t *)calloc(1, SNMP_MAX_RECV_PKTSZ)) == NULL)
10490d63ce2bSvenki 		return (-1);
10500d63ce2bSvenki 
10510d63ce2bSvenki 	fromlen = sizeof (struct sockaddr_in);
10520d63ce2bSvenki 
10530d63ce2bSvenki 	msgsz = recvfrom(smd->fd, pkt, SNMP_MAX_RECV_PKTSZ, 0,
10540d63ce2bSvenki 	    (struct sockaddr *)&from, &fromlen);
10550d63ce2bSvenki 	if (msgsz  < 0 || msgsz >= SNMP_MAX_RECV_PKTSZ) {
10560d63ce2bSvenki 		free(pkt);
10570d63ce2bSvenki 		return (-1);
10580d63ce2bSvenki 	}
10590d63ce2bSvenki 
10600d63ce2bSvenki 	pktsz = (size_t)msgsz;
10610d63ce2bSvenki #else
10620d63ce2bSvenki 	/*
10630d63ce2bSvenki 	 * The ioctl will block until we have snmp data available
10640d63ce2bSvenki 	 */
10650d63ce2bSvenki 	if (ioctl(smd->fd, DSSNMP_GETINFO, &snmp_info) < 0) {
10660d63ce2bSvenki 		if (snmp_syserr)
10670d63ce2bSvenki 			*snmp_syserr = errno;
10680d63ce2bSvenki 		return (-1);
10690d63ce2bSvenki 	}
10700d63ce2bSvenki 
10710d63ce2bSvenki 	pktsz = snmp_info.size;
10720d63ce2bSvenki 	if ((pkt = (uchar_t *)calloc(1, pktsz)) == NULL)
10730d63ce2bSvenki 		return (-1);
10740d63ce2bSvenki 
10750d63ce2bSvenki 	if (read(smd->fd, pkt, pktsz) < 0) {
10760d63ce2bSvenki 		free(pkt);
10770d63ce2bSvenki 		if (snmp_syserr)
10780d63ce2bSvenki 			*snmp_syserr = errno;
10790d63ce2bSvenki 		return (-1);
10800d63ce2bSvenki 	}
10810d63ce2bSvenki #endif
10820d63ce2bSvenki 
10830d63ce2bSvenki 	pdu->reply_pkt = pkt;
10840d63ce2bSvenki 	pdu->reply_pktsz = pktsz;
10850d63ce2bSvenki 
10860d63ce2bSvenki 	return (0);
10870d63ce2bSvenki }
10880d63ce2bSvenki 
10890d63ce2bSvenki static int
mibcache_realloc(int hint)10900d63ce2bSvenki mibcache_realloc(int hint)
10910d63ce2bSvenki {
10920d63ce2bSvenki 	uint_t		count = (uint_t)hint;
10930d63ce2bSvenki 	nvlist_t	**p;
10940d63ce2bSvenki 
10950d63ce2bSvenki 	if (hint < 0)
10960d63ce2bSvenki 		return (-1);
10970d63ce2bSvenki 
10980d63ce2bSvenki 	(void) mutex_lock(&mibcache_lock);
10990d63ce2bSvenki 
11000d63ce2bSvenki 	if (hint < n_mibcache_rows) {
11010d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
11020d63ce2bSvenki 		return (0);
11030d63ce2bSvenki 	}
11040d63ce2bSvenki 
11050d63ce2bSvenki 	count =  ((count >> MIBCACHE_BLK_SHIFT) + 1) << MIBCACHE_BLK_SHIFT;
11060d63ce2bSvenki 
11070d63ce2bSvenki 	p = (nvlist_t **)calloc(count, sizeof (nvlist_t *));
11080d63ce2bSvenki 	if (p == NULL) {
11090d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
11100d63ce2bSvenki 		return (-1);
11110d63ce2bSvenki 	}
11120d63ce2bSvenki 
11130d63ce2bSvenki 	if (mibcache) {
11140d63ce2bSvenki 		(void) memcpy((void *) p, (void *) mibcache,
11150d63ce2bSvenki 		    n_mibcache_rows * sizeof (nvlist_t *));
11160d63ce2bSvenki 		free((void *) mibcache);
11170d63ce2bSvenki 	}
11180d63ce2bSvenki 
11190d63ce2bSvenki 	mibcache = p;
11200d63ce2bSvenki 	n_mibcache_rows = count;
11210d63ce2bSvenki 
11220d63ce2bSvenki 	(void) mutex_unlock(&mibcache_lock);
11230d63ce2bSvenki 
11240d63ce2bSvenki 	return (0);
11250d63ce2bSvenki }
11260d63ce2bSvenki 
11270d63ce2bSvenki 
11280d63ce2bSvenki /*
11290d63ce2bSvenki  * Scan each variable in the returned PDU's bindings and populate
11300d63ce2bSvenki  * the cache appropriately
11310d63ce2bSvenki  */
11320d63ce2bSvenki static void
mibcache_populate(snmp_pdu_t * pdu,int is_vol)11330d63ce2bSvenki mibcache_populate(snmp_pdu_t *pdu, int is_vol)
11340d63ce2bSvenki {
11350d63ce2bSvenki 	pdu_varlist_t	*vp;
11360d63ce2bSvenki 	int		row, ret;
11370d63ce2bSvenki 	char		*oidstr;
11380d63ce2bSvenki 	int		tod;	/* in secs */
11390d63ce2bSvenki 	char		tod_str[MAX_INT_LEN];
11400d63ce2bSvenki 	int		ival_arr[2];
11410d63ce2bSvenki 	char		*sval_arr[2];
11420d63ce2bSvenki 
11430d63ce2bSvenki 	/*
11440d63ce2bSvenki 	 * If we're populating volatile properties, we also store a
1145817697f4SKelly Moyer 	 * timestamp with each property value. When we lookup, we check the
1146817697f4SKelly Moyer 	 * current time against this timestamp to determine if we need to
1147817697f4SKelly Moyer 	 * refetch the value or not (refetch if it has been in for far too
1148817697f4SKelly Moyer 	 * long).
11490d63ce2bSvenki 	 */
1150817697f4SKelly Moyer 
11510d63ce2bSvenki 	if (is_vol) {
1152817697f4SKelly Moyer 		tod = GET_SCALED_HRTIME();
11530d63ce2bSvenki 
11540d63ce2bSvenki 		tod_str[0] = 0;
11550d63ce2bSvenki 		(void) snprintf(tod_str, MAX_INT_LEN, "%d", tod);
11560d63ce2bSvenki 
11570d63ce2bSvenki 		ival_arr[1] = tod;
11580d63ce2bSvenki 		sval_arr[1] = (char *)tod_str;
11590d63ce2bSvenki 	}
11600d63ce2bSvenki 
11610d63ce2bSvenki 	for (vp = pdu->vars; vp; vp = vp->nextvar) {
11620d63ce2bSvenki 		if (vp->type != ASN_INTEGER && vp->type != ASN_OCTET_STR &&
11630d63ce2bSvenki 		    vp->type != ASN_BIT_STR) {
11640d63ce2bSvenki 			continue;
11650d63ce2bSvenki 		}
11660d63ce2bSvenki 
11670d63ce2bSvenki 		if (vp->name == NULL || vp->val.str == NULL)
11680d63ce2bSvenki 			continue;
11690d63ce2bSvenki 
11700d63ce2bSvenki 		row = (vp->name)[vp->name_len-1];
11710d63ce2bSvenki 
11720d63ce2bSvenki 		(void) mutex_lock(&mibcache_lock);
11730d63ce2bSvenki 
11740d63ce2bSvenki 		if (row >= n_mibcache_rows) {
11750d63ce2bSvenki 			(void) mutex_unlock(&mibcache_lock);
11760d63ce2bSvenki 			if (mibcache_realloc(row) < 0)
11770d63ce2bSvenki 				continue;
11780d63ce2bSvenki 			(void) mutex_lock(&mibcache_lock);
11790d63ce2bSvenki 		}
11800d63ce2bSvenki 		ret = 0;
11810d63ce2bSvenki 		if (mibcache[row] == NULL)
11820d63ce2bSvenki 			ret = nvlist_alloc(&mibcache[row], NV_UNIQUE_NAME, 0);
11830d63ce2bSvenki 
11840d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
11850d63ce2bSvenki 
11860d63ce2bSvenki 		if (ret != 0)
11870d63ce2bSvenki 			continue;
11880d63ce2bSvenki 
11890d63ce2bSvenki 		/*
11900d63ce2bSvenki 		 * Convert the standard OID form into an oid string that
11910d63ce2bSvenki 		 * we can use as the key to lookup. Since we only search
11920d63ce2bSvenki 		 * by the prefix (mibcache is really an array of nvlist_t
11930d63ce2bSvenki 		 * pointers), ignore the leaf subid.
11940d63ce2bSvenki 		 */
11950d63ce2bSvenki 		oidstr = oid_to_oidstr(vp->name, vp->name_len - 1);
11960d63ce2bSvenki 		if (oidstr == NULL)
11970d63ce2bSvenki 			continue;
11980d63ce2bSvenki 
11990d63ce2bSvenki 		(void) mutex_lock(&mibcache_lock);
12000d63ce2bSvenki 
12010d63ce2bSvenki 		if (vp->type == ASN_INTEGER) {
12020d63ce2bSvenki 			if (is_vol) {
12030d63ce2bSvenki 				ival_arr[0] = *(vp->val.iptr);
12040d63ce2bSvenki 				(void) nvlist_add_int32_array(mibcache[row],
12050d63ce2bSvenki 				    oidstr, ival_arr, 2);
12060d63ce2bSvenki 			} else {
1207a1c54725Sfw 				(void) nvlist_add_int32(mibcache[row],
12080d63ce2bSvenki 				    oidstr, *(vp->val.iptr));
12090d63ce2bSvenki 			}
12100d63ce2bSvenki 
12110d63ce2bSvenki 		} else if (vp->type == ASN_OCTET_STR) {
12120d63ce2bSvenki 			if (is_vol) {
12130d63ce2bSvenki 				sval_arr[0] = (char *)vp->val.str;
12140d63ce2bSvenki 				(void) nvlist_add_string_array(mibcache[row],
12150d63ce2bSvenki 				    oidstr, sval_arr, 2);
12160d63ce2bSvenki 			} else {
12170d63ce2bSvenki 				(void) nvlist_add_string(mibcache[row],
12180d63ce2bSvenki 				    oidstr, (const char *)(vp->val.str));
12190d63ce2bSvenki 			}
12200d63ce2bSvenki 		} else if (vp->type == ASN_BIT_STR) {
12210d63ce2bSvenki 			/*
12220d63ce2bSvenki 			 * We don't support yet bit string objects that are
12230d63ce2bSvenki 			 * volatile values.
12240d63ce2bSvenki 			 */
12250d63ce2bSvenki 			if (!is_vol) {
12260d63ce2bSvenki 				(void) nvlist_add_byte_array(mibcache[row],
12270d63ce2bSvenki 				    oidstr, (uchar_t *)(vp->val.str),
12280d63ce2bSvenki 				    (uint_t)vp->val_len);
12290d63ce2bSvenki 			}
12300d63ce2bSvenki 		}
12310d63ce2bSvenki 		(void) mutex_unlock(&mibcache_lock);
12320d63ce2bSvenki 
12330d63ce2bSvenki 		free(oidstr);
12340d63ce2bSvenki 	}
12350d63ce2bSvenki }
12360d63ce2bSvenki 
12370d63ce2bSvenki static char *
oid_to_oidstr(oid * objid,size_t n_subids)12380d63ce2bSvenki oid_to_oidstr(oid *objid, size_t n_subids)
12390d63ce2bSvenki {
12400d63ce2bSvenki 	char	*oidstr;
12410d63ce2bSvenki 	char	subid_str[MAX_INT_LEN];
12420d63ce2bSvenki 	int	i, isize;
12434c5e0fdeSvivek 	size_t	oidstr_sz;
12440d63ce2bSvenki 
12450d63ce2bSvenki 	/*
12460d63ce2bSvenki 	 * ugly, but for now this will have to do.
12470d63ce2bSvenki 	 */
12484c5e0fdeSvivek 	oidstr_sz = sizeof (subid_str) * n_subids;
12494c5e0fdeSvivek 	oidstr = calloc(1, oidstr_sz);
12500d63ce2bSvenki 
12510d63ce2bSvenki 	for (i = 0; i < n_subids; i++) {
12524c5e0fdeSvivek 		(void) memset(subid_str, 0, sizeof (subid_str));
12534c5e0fdeSvivek 		isize = snprintf(subid_str, sizeof (subid_str), "%d",
1254a1c54725Sfw 		    objid[i]);
12554c5e0fdeSvivek 		if (isize >= sizeof (subid_str))
12560d63ce2bSvenki 			return (NULL);
12570d63ce2bSvenki 
12584c5e0fdeSvivek 		(void) strlcat(oidstr, subid_str, oidstr_sz);
12590d63ce2bSvenki 		if (i < (n_subids - 1))
12604c5e0fdeSvivek 			(void) strlcat(oidstr, ".", oidstr_sz);
12610d63ce2bSvenki 	}
12620d63ce2bSvenki 
12630d63ce2bSvenki 	return (oidstr);
12640d63ce2bSvenki }
1265817697f4SKelly Moyer 
1266817697f4SKelly Moyer /*
1267817697f4SKelly Moyer  * Expand the refreshq to hold more cache refresh jobs.  Caller must already
1268817697f4SKelly Moyer  * hold refreshq_lock mutex.  Every expansion of the refreshq will add
1269817697f4SKelly Moyer  * REFRESH_BLK_SZ job slots, rather than expanding by one slot every time more
1270817697f4SKelly Moyer  * space is needed.
1271817697f4SKelly Moyer  */
1272817697f4SKelly Moyer static int
refreshq_realloc(int hint)1273817697f4SKelly Moyer refreshq_realloc(int hint)
1274817697f4SKelly Moyer {
1275817697f4SKelly Moyer 	uint_t		count = (uint_t)hint;
1276817697f4SKelly Moyer 	refreshq_job_t	*p;
1277817697f4SKelly Moyer 
1278817697f4SKelly Moyer 	if (hint < 0)
1279817697f4SKelly Moyer 		return (-1);
1280817697f4SKelly Moyer 
1281817697f4SKelly Moyer 	if (hint < n_refreshq_slots) {
1282817697f4SKelly Moyer 		return (0);
1283817697f4SKelly Moyer 	}
1284817697f4SKelly Moyer 
1285817697f4SKelly Moyer 	/* Round count up to next multiple of REFRESHQ_BLK_SHIFT */
1286817697f4SKelly Moyer 	count =  ((count >> REFRESHQ_BLK_SHIFT) + 1) << REFRESHQ_BLK_SHIFT;
1287817697f4SKelly Moyer 
1288817697f4SKelly Moyer 	p = (refreshq_job_t *)calloc(count, sizeof (refreshq_job_t));
1289817697f4SKelly Moyer 	if (p == NULL) {
1290817697f4SKelly Moyer 		return (-1);
1291817697f4SKelly Moyer 	}
1292817697f4SKelly Moyer 
1293817697f4SKelly Moyer 	if (refreshq) {
1294817697f4SKelly Moyer 		if (n_refreshq_jobs == 0) {
1295817697f4SKelly Moyer 			/* Simple case, nothing to copy */
1296817697f4SKelly Moyer 			refreshq_next_job = 0;
1297817697f4SKelly Moyer 			refreshq_next_slot = 0;
1298817697f4SKelly Moyer 		} else if (refreshq_next_slot > refreshq_next_job) {
1299817697f4SKelly Moyer 			/* Simple case, single copy preserves everything */
1300817697f4SKelly Moyer 			(void) memcpy((void *) p,
1301817697f4SKelly Moyer 			    (void *) &(refreshq[refreshq_next_job]),
1302817697f4SKelly Moyer 			    n_refreshq_jobs * sizeof (refreshq_job_t));
1303817697f4SKelly Moyer 		} else {
1304817697f4SKelly Moyer 			/*
1305817697f4SKelly Moyer 			 * Complex case.  The jobs in the refresh queue wrap
1306817697f4SKelly Moyer 			 * around the end of the array in which they are stored.
1307817697f4SKelly Moyer 			 * To preserve chronological order in the new allocated
1308817697f4SKelly Moyer 			 * array, we need to copy the jobs at the end of the old
1309817697f4SKelly Moyer 			 * array to the beginning of the new one and place the
1310817697f4SKelly Moyer 			 * jobs from the beginning of the old array after them.
1311817697f4SKelly Moyer 			 */
1312817697f4SKelly Moyer 			uint_t tail_jobs, head_jobs;
1313817697f4SKelly Moyer 
1314817697f4SKelly Moyer 			tail_jobs = n_refreshq_slots - refreshq_next_job;
1315817697f4SKelly Moyer 			head_jobs = n_refreshq_jobs - tail_jobs;
1316817697f4SKelly Moyer 
1317817697f4SKelly Moyer 			/* Copy the jobs from the end of the old array */
1318817697f4SKelly Moyer 			(void) memcpy((void *) p,
1319817697f4SKelly Moyer 			    (void *) &(refreshq[refreshq_next_job]),
1320817697f4SKelly Moyer 			    tail_jobs * sizeof (refreshq_job_t));
1321817697f4SKelly Moyer 
1322817697f4SKelly Moyer 			/* Copy the jobs from the beginning of the old array */
1323817697f4SKelly Moyer 			(void) memcpy((void *) &(p[tail_jobs]),
1324c8268b2cSKelly Moyer 			    (void *) &(refreshq[0]),
1325817697f4SKelly Moyer 			    head_jobs * sizeof (refreshq_job_t));
1326817697f4SKelly Moyer 
1327817697f4SKelly Moyer 			/* update the job and slot indices to match */
1328817697f4SKelly Moyer 			refreshq_next_job = 0;
1329817697f4SKelly Moyer 			refreshq_next_slot = n_refreshq_jobs;
1330817697f4SKelly Moyer 		}
1331817697f4SKelly Moyer 		free((void *) refreshq);
1332817697f4SKelly Moyer 	} else {
1333817697f4SKelly Moyer 		/* First initialization */
1334817697f4SKelly Moyer 		refreshq_next_job = 0;
1335817697f4SKelly Moyer 		refreshq_next_slot = 0;
1336817697f4SKelly Moyer 		n_refreshq_jobs = 0;
1337817697f4SKelly Moyer 	}
1338817697f4SKelly Moyer 
1339817697f4SKelly Moyer 	refreshq = p;
1340817697f4SKelly Moyer 	n_refreshq_slots = count;
1341817697f4SKelly Moyer 
1342817697f4SKelly Moyer 	return (0);
1343817697f4SKelly Moyer }
1344817697f4SKelly Moyer 
1345817697f4SKelly Moyer /*
1346817697f4SKelly Moyer  * Add a new job to the refreshq.  If there aren't any open slots, attempt to
1347817697f4SKelly Moyer  * expand the queue first.  Return -1 if unable to add the job to the work
1348817697f4SKelly Moyer  * queue, or 0 if the job was added OR if an existing job with the same
1349817697f4SKelly Moyer  * parameters is already pending.
1350817697f4SKelly Moyer  */
1351817697f4SKelly Moyer static int
refreshq_add_job(struct picl_snmphdl * smd,char * oidstrs,int n_oids,int row)1352817697f4SKelly Moyer refreshq_add_job(struct picl_snmphdl *smd, char *oidstrs, int n_oids, int row)
1353817697f4SKelly Moyer {
1354817697f4SKelly Moyer 	int	i;
1355817697f4SKelly Moyer 	int	job;
1356817697f4SKelly Moyer 
1357817697f4SKelly Moyer 	(void) mutex_lock(&refreshq_lock);
1358817697f4SKelly Moyer 
1359817697f4SKelly Moyer 	/*
1360817697f4SKelly Moyer 	 * Can't do anything without a queue.  Either the client never
1361817697f4SKelly Moyer 	 * initialized the refresh queue or the initial memory allocation
1362817697f4SKelly Moyer 	 * failed.
1363817697f4SKelly Moyer 	 */
1364817697f4SKelly Moyer 	if (refreshq == NULL) {
1365817697f4SKelly Moyer 		(void) mutex_unlock(&refreshq_lock);
1366817697f4SKelly Moyer 		return (-1);
1367817697f4SKelly Moyer 	}
1368817697f4SKelly Moyer 
1369817697f4SKelly Moyer 	/*
1370817697f4SKelly Moyer 	 * If there is already a job pending with the same parameters as the job
1371817697f4SKelly Moyer 	 * we have been asked to add, we apparently let an entry expire and it
1372817697f4SKelly Moyer 	 * is now being reloaded.  Rather than add another job for the same
1373817697f4SKelly Moyer 	 * entry, we skip adding the new job and let the existing job address
1374817697f4SKelly Moyer 	 * it.
1375817697f4SKelly Moyer 	 */
1376817697f4SKelly Moyer 	for (i = 0, job = refreshq_next_job; i < n_refreshq_jobs; i++,
1377817697f4SKelly Moyer 	    job = (job + 1) % n_refreshq_slots) {
1378817697f4SKelly Moyer 		if ((refreshq[job].row == row) &&
1379817697f4SKelly Moyer 		    (refreshq[job].n_oids == n_oids) &&
1380817697f4SKelly Moyer 		    (refreshq[job].oidstrs == oidstrs)) {
1381817697f4SKelly Moyer 			(void) mutex_unlock(&refreshq_lock);
1382817697f4SKelly Moyer 			return (0);
1383817697f4SKelly Moyer 		}
1384817697f4SKelly Moyer 	}
1385817697f4SKelly Moyer 
1386817697f4SKelly Moyer 
1387817697f4SKelly Moyer 	/*
1388817697f4SKelly Moyer 	 * If the queue is full, we need to expand it
1389817697f4SKelly Moyer 	 */
1390817697f4SKelly Moyer 	if (n_refreshq_jobs == n_refreshq_slots) {
1391817697f4SKelly Moyer 		if (refreshq_realloc(n_refreshq_slots + 1) < 0) {
1392817697f4SKelly Moyer 			/*
1393817697f4SKelly Moyer 			 * Can't expand the job queue, so we drop this job on
1394817697f4SKelly Moyer 			 * the floor.  No data is lost... we just allow some
1395817697f4SKelly Moyer 			 * data in the mibcache to expire.
1396817697f4SKelly Moyer 			 */
1397817697f4SKelly Moyer 			(void) mutex_unlock(&refreshq_lock);
1398817697f4SKelly Moyer 			return (-1);
1399817697f4SKelly Moyer 		}
1400817697f4SKelly Moyer 	}
1401817697f4SKelly Moyer 
1402817697f4SKelly Moyer 	/*
1403817697f4SKelly Moyer 	 * There is room in the queue, so add the new job.  We are actually
1404817697f4SKelly Moyer 	 * taking a timestamp for this job that is slightly earlier than when
1405817697f4SKelly Moyer 	 * the mibcache entry will be updated, but since we're trying to update
1406817697f4SKelly Moyer 	 * the mibcache entry before it expires anyway, the earlier timestamp
1407817697f4SKelly Moyer 	 * here is acceptable.
1408817697f4SKelly Moyer 	 */
1409817697f4SKelly Moyer 	refreshq[refreshq_next_slot].smd = smd;
1410817697f4SKelly Moyer 	refreshq[refreshq_next_slot].oidstrs = oidstrs;
1411817697f4SKelly Moyer 	refreshq[refreshq_next_slot].n_oids = n_oids;
1412817697f4SKelly Moyer 	refreshq[refreshq_next_slot].row = row;
1413817697f4SKelly Moyer 	refreshq[refreshq_next_slot].last_fetch_time = GET_SCALED_HRTIME();
1414817697f4SKelly Moyer 
1415817697f4SKelly Moyer 	/*
1416817697f4SKelly Moyer 	 * Update queue management variables
1417817697f4SKelly Moyer 	 */
1418817697f4SKelly Moyer 	n_refreshq_jobs += 1;
1419817697f4SKelly Moyer 	refreshq_next_slot = (refreshq_next_slot + 1) % n_refreshq_slots;
1420817697f4SKelly Moyer 
1421817697f4SKelly Moyer 	(void) mutex_unlock(&refreshq_lock);
1422817697f4SKelly Moyer 
1423817697f4SKelly Moyer 	return (0);
1424817697f4SKelly Moyer }
1425817697f4SKelly Moyer 
1426817697f4SKelly Moyer /*
1427817697f4SKelly Moyer  * Almost all of the refresh code remains dormant unless specifically
1428817697f4SKelly Moyer  * initialized by a client (the exception being that fetch_bulk() will still
1429817697f4SKelly Moyer  * call refreshq_add_job(), but the latter will return without doing anything).
1430817697f4SKelly Moyer  */
1431817697f4SKelly Moyer int
snmp_refresh_init(void)1432817697f4SKelly Moyer snmp_refresh_init(void)
1433817697f4SKelly Moyer {
1434817697f4SKelly Moyer 	int ret;
1435817697f4SKelly Moyer 
1436817697f4SKelly Moyer 	(void) mutex_lock(&refreshq_lock);
1437817697f4SKelly Moyer 
1438817697f4SKelly Moyer 	ret = refreshq_realloc(0);
1439817697f4SKelly Moyer 
1440817697f4SKelly Moyer 	(void) mutex_unlock(&refreshq_lock);
1441817697f4SKelly Moyer 
1442817697f4SKelly Moyer 	return (ret);
1443817697f4SKelly Moyer }
1444817697f4SKelly Moyer 
1445817697f4SKelly Moyer /*
1446817697f4SKelly Moyer  * If the client is going away, we don't want to keep doing refresh work, so
1447817697f4SKelly Moyer  * clean everything up.
1448817697f4SKelly Moyer  */
1449817697f4SKelly Moyer void
snmp_refresh_fini(void)1450817697f4SKelly Moyer snmp_refresh_fini(void)
1451817697f4SKelly Moyer {
1452817697f4SKelly Moyer 	(void) mutex_lock(&refreshq_lock);
1453817697f4SKelly Moyer 
1454817697f4SKelly Moyer 	n_refreshq_jobs = 0;
1455817697f4SKelly Moyer 	n_refreshq_slots = 0;
1456817697f4SKelly Moyer 	refreshq_next_job = 0;
1457817697f4SKelly Moyer 	refreshq_next_slot = 0;
1458817697f4SKelly Moyer 	free(refreshq);
1459817697f4SKelly Moyer 	refreshq = NULL;
1460817697f4SKelly Moyer 
1461817697f4SKelly Moyer 	(void) mutex_unlock(&refreshq_lock);
1462817697f4SKelly Moyer }
1463817697f4SKelly Moyer 
1464817697f4SKelly Moyer /*
1465817697f4SKelly Moyer  * Return the number of seconds remaining before the mibcache entry associated
1466817697f4SKelly Moyer  * with the next job in the queue will expire.  Note that this requires
1467817697f4SKelly Moyer  * reversing the scaling normally done on hrtime values.  (The need for scaling
1468817697f4SKelly Moyer  * is purely internal, and should be hidden from clients.)  If there are no jobs
1469817697f4SKelly Moyer  * in the queue, return -1.  If the next job has already expired, return 0.
1470817697f4SKelly Moyer  */
1471817697f4SKelly Moyer int
snmp_refresh_get_next_expiration(void)1472817697f4SKelly Moyer snmp_refresh_get_next_expiration(void)
1473817697f4SKelly Moyer {
1474817697f4SKelly Moyer 	int ret;
1475817697f4SKelly Moyer 	int elapsed;
1476817697f4SKelly Moyer 
1477817697f4SKelly Moyer 	(void) mutex_lock(&refreshq_lock);
1478817697f4SKelly Moyer 
1479817697f4SKelly Moyer 	if (n_refreshq_jobs == 0) {
1480817697f4SKelly Moyer 		ret = -1;
1481817697f4SKelly Moyer 	} else {
1482817697f4SKelly Moyer 		elapsed = GET_SCALED_HRTIME() -
1483817697f4SKelly Moyer 		    refreshq[refreshq_next_job].last_fetch_time;
1484817697f4SKelly Moyer 
1485817697f4SKelly Moyer 		if (elapsed >= MAX_INCACHE_TIME) {
1486817697f4SKelly Moyer 			ret = 0;
1487817697f4SKelly Moyer 		} else {
1488817697f4SKelly Moyer 			ret = (MAX_INCACHE_TIME - elapsed) * HRTIME_SCALE;
1489817697f4SKelly Moyer 		}
1490817697f4SKelly Moyer 	}
1491817697f4SKelly Moyer 
1492817697f4SKelly Moyer 	(void) mutex_unlock(&refreshq_lock);
1493817697f4SKelly Moyer 
1494817697f4SKelly Moyer 	return (ret);
1495817697f4SKelly Moyer }
1496817697f4SKelly Moyer 
1497817697f4SKelly Moyer /*
1498817697f4SKelly Moyer  * Given the number of seconds the client wants to spend on each cyle of
1499817697f4SKelly Moyer  * processing jobs and then sleeping, return a suggestion for the number of jobs
1500817697f4SKelly Moyer  * the client should process, calculated by dividing the client's cycle duration
1501817697f4SKelly Moyer  * by MAX_INCACHE_TIME and multiplying the result by the total number of jobs in
1502817697f4SKelly Moyer  * the queue.  (Note that the actual implementation of that calculation is done
1503817697f4SKelly Moyer  * in a different order to avoid losing fractional values during integer
1504817697f4SKelly Moyer  * arithmetic.)
1505817697f4SKelly Moyer  */
1506817697f4SKelly Moyer int
snmp_refresh_get_cycle_hint(int secs)1507817697f4SKelly Moyer snmp_refresh_get_cycle_hint(int secs)
1508817697f4SKelly Moyer {
1509817697f4SKelly Moyer 	int	jobs;
1510817697f4SKelly Moyer 
1511817697f4SKelly Moyer 	(void) mutex_lock(&refreshq_lock);
1512817697f4SKelly Moyer 
1513817697f4SKelly Moyer 	/*
1514817697f4SKelly Moyer 	 * First, we need to scale the client's cycle time to get it into the
1515817697f4SKelly Moyer 	 * same units we use internally (i.e. tens of seconds).  We round up, as
1516817697f4SKelly Moyer 	 * it makes more sense for the client to process extra jobs than
1517817697f4SKelly Moyer 	 * insufficient jobs.  If the client's desired cycle time is greater
1518817697f4SKelly Moyer 	 * than MAX_INCACHE_TIME, we just return the current total number of
1519817697f4SKelly Moyer 	 * jobs.
1520817697f4SKelly Moyer 	 */
1521817697f4SKelly Moyer 	secs = (secs + HRTIME_SCALE - 1) / HRTIME_SCALE;
1522817697f4SKelly Moyer 
1523817697f4SKelly Moyer 	jobs = (n_refreshq_jobs * secs) / MAX_INCACHE_TIME;
1524817697f4SKelly Moyer 	if (jobs > n_refreshq_jobs) {
1525817697f4SKelly Moyer 		jobs = n_refreshq_jobs;
1526817697f4SKelly Moyer 	}
1527817697f4SKelly Moyer 
1528817697f4SKelly Moyer 	(void) mutex_unlock(&refreshq_lock);
1529817697f4SKelly Moyer 
1530817697f4SKelly Moyer 	return (jobs);
1531817697f4SKelly Moyer }
1532817697f4SKelly Moyer 
1533817697f4SKelly Moyer /*
1534817697f4SKelly Moyer  * Process the next job on the refresh queue by invoking fetch_bulk() with the
1535817697f4SKelly Moyer  * recorded parameters.  Return -1 if no job was processed (e.g. because there
1536817697f4SKelly Moyer  * aren't any available), or 0 if a job was processed.  We don't actually care
1537817697f4SKelly Moyer  * if fetch_bulk() fails, since we're just working on cache entry refreshing and
1538817697f4SKelly Moyer  * the worst case result of failing here is a longer delay getting that data the
1539817697f4SKelly Moyer  * next time it is requested.
1540817697f4SKelly Moyer  */
1541817697f4SKelly Moyer int
snmp_refresh_process_job(void)1542817697f4SKelly Moyer snmp_refresh_process_job(void)
1543817697f4SKelly Moyer {
1544817697f4SKelly Moyer 	struct picl_snmphdl	*smd;
1545817697f4SKelly Moyer 	char			*oidstrs;
1546817697f4SKelly Moyer 	int			n_oids;
1547817697f4SKelly Moyer 	int			row;
1548817697f4SKelly Moyer 	int			err;
1549817697f4SKelly Moyer 
1550817697f4SKelly Moyer 	(void) mutex_lock(&refreshq_lock);
1551817697f4SKelly Moyer 
1552817697f4SKelly Moyer 	if (n_refreshq_jobs == 0) {
1553817697f4SKelly Moyer 		(void) mutex_unlock(&refreshq_lock);
1554817697f4SKelly Moyer 
1555817697f4SKelly Moyer 		return (-1);
1556817697f4SKelly Moyer 	}
1557817697f4SKelly Moyer 
1558817697f4SKelly Moyer 	smd = refreshq[refreshq_next_job].smd;
1559817697f4SKelly Moyer 	oidstrs = refreshq[refreshq_next_job].oidstrs;
1560817697f4SKelly Moyer 	n_oids = refreshq[refreshq_next_job].n_oids;
1561817697f4SKelly Moyer 	row = refreshq[refreshq_next_job].row;
1562817697f4SKelly Moyer 
1563817697f4SKelly Moyer 	refreshq_next_job = (refreshq_next_job + 1) % n_refreshq_slots;
1564817697f4SKelly Moyer 	n_refreshq_jobs--;
1565817697f4SKelly Moyer 
1566817697f4SKelly Moyer 	(void) mutex_unlock(&refreshq_lock);
1567817697f4SKelly Moyer 
1568817697f4SKelly Moyer 
1569817697f4SKelly Moyer 	/*
1570817697f4SKelly Moyer 	 * fetch_bulk() is going to come right back into the refresh code to add
1571817697f4SKelly Moyer 	 * a new job for the entry we just loaded, which means we have to make
1572817697f4SKelly Moyer 	 * the call without holding the refreshq_lock mutex.
1573817697f4SKelly Moyer 	 */
1574817697f4SKelly Moyer 	fetch_bulk(smd, oidstrs, n_oids, row, 1, &err);
1575817697f4SKelly Moyer 
1576817697f4SKelly Moyer 	return (0);
1577817697f4SKelly Moyer }
1578