1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2019 Peter Tribble.
26  */
27 
28 /*
29  * The snmp library helps to prepare the PDUs and communicate with
30  * the snmp agent on the SP side via the ds_snmp driver.
31  */
32 
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <unistd.h>
37 #include <thread.h>
38 #include <synch.h>
39 #include <errno.h>
40 #include <sys/time.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include <libnvpair.h>
45 #include <sys/ds_snmp.h>
46 
47 #include "libpiclsnmp.h"
48 #include "snmplib.h"
49 #include "asn1.h"
50 #include "pdu.h"
51 
52 #pragma init(libpiclsnmp_init)		/* need this in .init */
53 
54 /*
55  * Data from the MIB is fetched based on the hints about object
56  * groups received from (possibly many threads in) the application.
57  * However, the fetched data is kept in a common cache for use across
58  * all threads, so even a GETBULK is issued only when absolutely
59  * necessary.
60  *
61  * Note that locking is not fine grained (there's no locking per row)
62  * since we don't expect too many MT consumers right away.
63  *
64  */
65 static mutex_t	mibcache_lock;
66 static nvlist_t	**mibcache = NULL;
67 static uint_t	n_mibcache_rows = 0;
68 
69 static mutex_t snmp_reqid_lock;
70 static int snmp_reqid = 1;
71 
72 #ifdef USE_SOCKETS
73 #define	SNMP_DEFAULT_PORT	161
74 #define	SNMP_MAX_RECV_PKTSZ	(64 * 1024)
75 #endif
76 
77 /*
78  * We need a reliably monotonic and stable source of time values to age
79  * entries in the mibcache toward expiration.  The code originally used
80  * gettimeofday(), but since that is subject to time-of-day changes made by
81  * the administrator, the values it returns do not satisfy our needs.
82  * Instead, we use gethrtime(), which is immune to time-of-day changes.
83  * However, since gethrtime() returns a signed 64-bit value in units of
84  * nanoseconds and we are using signed 32-bit timestamps, we always divide
85  * the result by (HRTIME_SCALE * NANOSEC) to scale it down into units of 10
86  * seconds.
87  *
88  * Note that the scaling factor means that the value of MAX_INCACHE_TIME
89  * from snmplib.h should also be in units of 10 seconds.
90  */
91 #define	GET_SCALED_HRTIME()	(int)(gethrtime() / (HRTIME_SCALE * NANOSEC))
92 
93 /*
94  * The mibcache code originally cached values for 300 seconds after fetching
95  * data via SNMP.  Subsequent reads within that 300 second window would come
96  * from the cache - which is quite a bit faster than an SNMP query - but the
97  * first request that came in more than 300 seconds after the previous SNMP
98  * query would trigger a new SNMP query.  This worked well as an
99  * optimization for frequent queries, but when data was only queried less
100  * frequently than every 300 seconds (as proved to be the case at multiple
101  * customer sites), the cache didn't help at all.
102  *
103  * To improve the performance of infrequent queries, code was added to the
104  * library to allow a client (i.e. a thread in the picl plugin) to proactively
105  * refresh cache entries without waiting for them to expire, thereby ensuring
106  * that all volatile entries in the cache at any given time are less than 300
107  * seconds old.  Whenever an SNMP query is generated to retrieve volatile data
108  * that will be cached, an entry is added in a refresh queue that tracks the
109  * parameters of the query and the time that it was made.  A client can query
110  * the age of the oldest item in the refresh queue and - at its discretion - can
111  * then force that query to be repeated in a manner that will update the
112  * mibcache entry even though it hasn't expired.
113  */
114 typedef struct {
115 	struct picl_snmphdl	*smd;
116 	char			*oidstrs;
117 	int			n_oids;
118 	int			row;
119 	int			last_fetch_time;	/* in scaled hrtime */
120 } refreshq_job_t;
121 
122 static mutex_t		refreshq_lock;
123 static refreshq_job_t	*refreshq = NULL;
124 static uint_t		n_refreshq_slots = 0;	/* # of alloc'ed job slots */
125 static uint_t		n_refreshq_jobs = 0;	/* # of unprocessed jobs */
126 static uint_t		refreshq_next_job = 0;	/* oldest unprocessed job */
127 static uint_t		refreshq_next_slot = 0;	/* next available job slot */
128 
129 
130 /*
131  * Static function declarations
132  */
133 static void	libpiclsnmp_init(void);
134 
135 static int	lookup_int(char *, int, int *, int);
136 static int	lookup_str(char *, int, char **, int);
137 static int	lookup_bitstr(char *, int, uchar_t **, uint_t *, int);
138 
139 static oidgroup_t *locate_oid_group(struct picl_snmphdl *, char *);
140 static int	search_oid_in_group(char *, char *, int);
141 
142 static snmp_pdu_t *fetch_single(struct picl_snmphdl *, char *, int, int *);
143 static snmp_pdu_t *fetch_next(struct picl_snmphdl *, char *, int, int *);
144 static void	fetch_bulk(struct picl_snmphdl *, char *, int, int, int, int *);
145 static int	fetch_single_str(struct picl_snmphdl *, char *, int,
146 		    char **, int *);
147 static int	fetch_single_int(struct picl_snmphdl *, char *, int,
148 		    int *, int *);
149 static int	fetch_single_bitstr(struct picl_snmphdl *, char *, int,
150 		    uchar_t **, uint_t *, int *);
151 
152 static int	snmp_send_request(struct picl_snmphdl *, snmp_pdu_t *, int *);
153 static int	snmp_recv_reply(struct picl_snmphdl *, snmp_pdu_t *, int *);
154 
155 static int	mibcache_realloc(int);
156 static void	mibcache_populate(snmp_pdu_t *, int);
157 static char	*oid_to_oidstr(oid *, size_t);
158 
159 static int	refreshq_realloc(int);
160 static int	refreshq_add_job(struct picl_snmphdl *, char *, int, int);
161 
162 
163 static void
libpiclsnmp_init(void)164 libpiclsnmp_init(void)
165 {
166 	(void) mutex_init(&mibcache_lock, USYNC_THREAD, NULL);
167 	if (mibcache_realloc(0) < 0)
168 		(void) mutex_destroy(&mibcache_lock);
169 
170 	(void) mutex_init(&refreshq_lock, USYNC_THREAD, NULL);
171 	(void) mutex_init(&snmp_reqid_lock, USYNC_THREAD, NULL);
172 }
173 
174 picl_snmphdl_t
snmp_init()175 snmp_init()
176 {
177 	struct picl_snmphdl	*smd;
178 #ifdef USE_SOCKETS
179 	int	sbuf = (1 << 15);	/* 16K */
180 	int	rbuf = (1 << 17);	/* 64K */
181 	char	*snmp_agent_addr;
182 #endif
183 
184 	smd = (struct picl_snmphdl *)calloc(1, sizeof (struct picl_snmphdl));
185 	if (smd == NULL)
186 		return (NULL);
187 
188 #ifdef USE_SOCKETS
189 	if ((snmp_agent_addr = getenv("SNMP_AGENT_IPADDR")) == NULL)
190 		return (NULL);
191 
192 	if ((smd->fd = socket(PF_INET, SOCK_DGRAM, 0)) < 0)
193 		return (NULL);
194 
195 	(void) setsockopt(smd->fd, SOL_SOCKET, SO_SNDBUF, &sbuf, sizeof (int));
196 	(void) setsockopt(smd->fd, SOL_SOCKET, SO_RCVBUF, &rbuf, sizeof (int));
197 
198 	memset(&smd->agent_addr, 0, sizeof (struct sockaddr_in));
199 	smd->agent_addr.sin_family = AF_INET;
200 	smd->agent_addr.sin_port = htons(SNMP_DEFAULT_PORT);
201 	smd->agent_addr.sin_addr.s_addr = inet_addr(snmp_agent_addr);
202 #else
203 	smd->fd = open(DS_SNMP_DRIVER, O_RDWR);
204 	if (smd->fd < 0) {
205 		free(smd);
206 		return (NULL);
207 	}
208 #endif
209 
210 	return ((picl_snmphdl_t)smd);
211 }
212 
213 void
snmp_fini(picl_snmphdl_t hdl)214 snmp_fini(picl_snmphdl_t hdl)
215 {
216 	struct picl_snmphdl	*smd = (struct picl_snmphdl *)hdl;
217 
218 	if (smd) {
219 		if (smd->fd >= 0) {
220 			(void) close(smd->fd);
221 		}
222 		free(smd);
223 	}
224 }
225 
226 int
snmp_reinit(picl_snmphdl_t hdl,int clr_linkreset)227 snmp_reinit(picl_snmphdl_t hdl, int clr_linkreset)
228 {
229 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
230 	nvlist_t *nvl;
231 	int i;
232 
233 	(void) mutex_lock(&mibcache_lock);
234 
235 	for (i = 0; i < n_mibcache_rows; i++) {
236 		if ((nvl = mibcache[i]) != NULL)
237 			nvlist_free(nvl);
238 	}
239 
240 	n_mibcache_rows = 0;
241 	if (mibcache) {
242 		free(mibcache);
243 		mibcache = NULL;
244 	}
245 
246 	(void) mutex_unlock(&mibcache_lock);
247 
248 	if (clr_linkreset) {
249 		if (smd == NULL || smd->fd < 0)
250 			return (-1);
251 		else
252 			return (ioctl(smd->fd, DSSNMP_CLRLNKRESET, NULL));
253 	}
254 
255 	return (0);
256 }
257 
258 void
snmp_register_group(picl_snmphdl_t hdl,char * oidstrs,int n_oids,int is_vol)259 snmp_register_group(picl_snmphdl_t hdl, char *oidstrs, int n_oids, int is_vol)
260 {
261 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
262 	oidgroup_t	*oidg;
263 	oidgroup_t	*curr, *prev;
264 	char		*p;
265 	int		i, sz;
266 
267 	/*
268 	 * Allocate a new oidgroup_t
269 	 */
270 	oidg = (oidgroup_t *)calloc(1, sizeof (struct oidgroup));
271 	if (oidg == NULL)
272 		return;
273 
274 	/*
275 	 * Determine how much space is required to register this group
276 	 */
277 	sz = 0;
278 	p = oidstrs;
279 	for (i = 0; i < n_oids; i++) {
280 		sz += strlen(p) + 1;
281 		p = oidstrs + sz;
282 	}
283 
284 	/*
285 	 * Create this oid group
286 	 */
287 	if ((p = (char *)malloc(sz)) == NULL) {
288 		free((void *) oidg);
289 		return;
290 	}
291 
292 	(void) memcpy(p, oidstrs, sz);
293 
294 	oidg->next = NULL;
295 	oidg->oidstrs = p;
296 	oidg->n_oids = n_oids;
297 	oidg->is_volatile = is_vol;
298 
299 	/*
300 	 * Link it to the tail of the list of oid groups
301 	 */
302 	for (prev = NULL, curr = smd->group; curr; curr = curr->next)
303 		prev = curr;
304 
305 	if (prev == NULL)
306 		smd->group = oidg;
307 	else
308 		prev->next = oidg;
309 }
310 
311 /*
312  * snmp_get_int() takes in an OID and returns the integer value
313  * of the object referenced in the passed arg. It returns 0 on
314  * success and -1 on failure.
315  */
316 int
snmp_get_int(picl_snmphdl_t hdl,char * prefix,int row,int * val,int * snmp_syserr)317 snmp_get_int(picl_snmphdl_t hdl, char *prefix, int row, int *val,
318     int *snmp_syserr)
319 {
320 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
321 	oidgroup_t	*grp;
322 	int	ret;
323 	int	err = 0;
324 
325 	if (smd == NULL || prefix == NULL || val == NULL)
326 		return (-1);
327 
328 	/*
329 	 * If this item should not be cached, fetch it directly from
330 	 * the agent using fetch_single_xxx()
331 	 */
332 	if ((grp = locate_oid_group(smd, prefix)) == NULL) {
333 		ret = fetch_single_int(smd, prefix, row, val, &err);
334 
335 		if (snmp_syserr)
336 			*snmp_syserr = err;
337 
338 		return (ret);
339 	}
340 
341 	/*
342 	 * is it in the cache ?
343 	 */
344 	if (lookup_int(prefix, row, val, grp->is_volatile) == 0)
345 		return (0);
346 
347 	/*
348 	 * fetch it from the agent and populate the cache
349 	 */
350 	fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
351 	if (snmp_syserr)
352 		*snmp_syserr = err;
353 
354 	/*
355 	 * look it up again and return it
356 	 */
357 	if (lookup_int(prefix, row, val, grp->is_volatile) < 0)
358 		return (-1);
359 
360 	return (0);
361 }
362 
363 /*
364  * snmp_get_str() takes in an OID and returns the string value
365  * of the object referenced in the passed arg. Memory for the string
366  * is allocated within snmp_get_str() and is expected to be freed by
367  * the caller when it is no longer needed. The function returns 0
368  * on success and -1 on failure.
369  */
370 int
snmp_get_str(picl_snmphdl_t hdl,char * prefix,int row,char ** strp,int * snmp_syserr)371 snmp_get_str(picl_snmphdl_t hdl, char *prefix, int row, char **strp,
372     int *snmp_syserr)
373 {
374 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
375 	oidgroup_t	*grp;
376 	char	*val;
377 	int	ret;
378 	int	err = 0;
379 
380 	if (smd == NULL || prefix == NULL || strp == NULL)
381 		return (-1);
382 
383 	*strp = NULL;
384 	/*
385 	 * Check if this item is cacheable or not. If not, call
386 	 * fetch_single_* to get it directly from the agent
387 	 */
388 	if ((grp = locate_oid_group(smd, prefix)) == NULL) {
389 		ret = fetch_single_str(smd, prefix, row, strp, &err);
390 
391 		if (snmp_syserr)
392 			*snmp_syserr = err;
393 
394 		return (ret);
395 	}
396 
397 	/*
398 	 * See if it's in the cache already
399 	 */
400 	if (lookup_str(prefix, row, &val, grp->is_volatile) == 0) {
401 		if ((*strp = strdup(val)) == NULL)
402 			return (-1);
403 		else
404 			return (0);
405 	}
406 
407 	/*
408 	 * Fetch it from the agent and populate cache
409 	 */
410 	fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
411 	if (snmp_syserr)
412 		*snmp_syserr = err;
413 
414 	/*
415 	 * Retry lookup
416 	 */
417 	if (lookup_str(prefix, row, &val, grp->is_volatile) < 0)
418 		return (-1);
419 
420 
421 	if ((*strp = strdup(val)) == NULL)
422 		return (-1);
423 	else
424 		return (0);
425 }
426 
427 /*
428  * snmp_get_bitstr() takes in an OID and returns the bit string value
429  * of the object referenced in the passed args. Memory for the bitstring
430  * is allocated within the function and is expected to be freed by
431  * the caller when it is no longer needed. The function returns 0
432  * on success and -1 on failure.
433  */
434 int
snmp_get_bitstr(picl_snmphdl_t hdl,char * prefix,int row,uchar_t ** bitstrp,uint_t * nbytes,int * snmp_syserr)435 snmp_get_bitstr(picl_snmphdl_t hdl, char *prefix, int row, uchar_t **bitstrp,
436     uint_t *nbytes, int *snmp_syserr)
437 {
438 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
439 	oidgroup_t	*grp;
440 	uchar_t	*val;
441 	int	ret;
442 	int	err = 0;
443 
444 	if (smd == NULL || prefix == NULL || bitstrp == NULL || nbytes == NULL)
445 		return (-1);
446 
447 	*bitstrp = NULL;
448 	/*
449 	 * Check if this item is cacheable or not. If not, call
450 	 * fetch_single_* to get it directly from the agent
451 	 */
452 	if ((grp = locate_oid_group(smd, prefix)) == NULL) {
453 		ret = fetch_single_bitstr(smd, prefix, row, bitstrp,
454 		    nbytes, &err);
455 
456 		if (snmp_syserr)
457 			*snmp_syserr = err;
458 
459 		return (ret);
460 	}
461 
462 	/*
463 	 * See if it's in the cache already
464 	 */
465 	if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) == 0) {
466 		if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
467 			return (-1);
468 		(void) memcpy(*bitstrp, (const void *)val, *nbytes);
469 		return (0);
470 	}
471 
472 	/*
473 	 * Fetch it from the agent and populate cache
474 	 */
475 	fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
476 	if (snmp_syserr)
477 		*snmp_syserr = err;
478 
479 	/*
480 	 * Retry lookup
481 	 */
482 	if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) < 0)
483 		return (-1);
484 
485 	if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
486 		return (-1);
487 	(void) memcpy(*bitstrp, (const void *)val, *nbytes);
488 
489 	return (0);
490 }
491 
492 /*
493  * snmp_get_nextrow() is similar in operation to SNMP_GETNEXT, but
494  * only just. In particular, this is only expected to return the next
495  * valid row number for the same object, not its value. Since we don't
496  * have any other means, we use this to determine the number of rows
497  * in the table (and the valid ones). This function returns 0 on success
498  * and -1 on failure.
499  */
500 int
snmp_get_nextrow(picl_snmphdl_t hdl,char * prefix,int row,int * nextrow,int * snmp_syserr)501 snmp_get_nextrow(picl_snmphdl_t hdl, char *prefix, int row, int *nextrow,
502     int *snmp_syserr)
503 {
504 	struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
505 	snmp_pdu_t *reply_pdu;
506 	pdu_varlist_t *vp;
507 	char	*nxt_oidstr;
508 	int	err = 0;
509 
510 	if (smd == NULL || prefix == NULL || nextrow == NULL) {
511 		if (snmp_syserr)
512 			*snmp_syserr = EINVAL;
513 		return (-1);
514 	}
515 
516 	/*
517 	 * The get_nextrow results should *never* go into any cache,
518 	 * since these relationships are dynamically discovered each time.
519 	 */
520 	if ((reply_pdu = fetch_next(smd, prefix, row, &err)) == NULL) {
521 		if (snmp_syserr)
522 			*snmp_syserr = err;
523 		return (-1);
524 	}
525 
526 	/*
527 	 * We are not concerned about the "value" of the lexicographically
528 	 * next object; we only care about the name of that object and
529 	 * its row number (and whether such an object exists or not).
530 	 */
531 	vp = reply_pdu->vars;
532 
533 	/*
534 	 * This indicates that we're at the end of the MIB view.
535 	 */
536 	if (vp == NULL || vp->name == NULL || vp->type == SNMP_NOSUCHOBJECT ||
537 	    vp->type == SNMP_NOSUCHINSTANCE || vp->type == SNMP_ENDOFMIBVIEW) {
538 		snmp_free_pdu(reply_pdu);
539 		if (snmp_syserr)
540 			*snmp_syserr = ENOSPC;
541 		return (-1);
542 	}
543 
544 	/*
545 	 * need to be able to convert the OID
546 	 */
547 	if ((nxt_oidstr = oid_to_oidstr(vp->name, vp->name_len - 1)) == NULL) {
548 		snmp_free_pdu(reply_pdu);
549 		if (snmp_syserr)
550 			*snmp_syserr = ENOMEM;
551 		return (-1);
552 	}
553 
554 	/*
555 	 * We're on to the next table.
556 	 */
557 	if (strcmp(nxt_oidstr, prefix) != 0) {
558 		free(nxt_oidstr);
559 		snmp_free_pdu(reply_pdu);
560 		if (snmp_syserr)
561 			*snmp_syserr = ENOENT;
562 		return (-1);
563 	}
564 
565 	/*
566 	 * Ok, so we've got an oid that's simply the next valid row of the
567 	 * passed on object, return this row number.
568 	 */
569 	*nextrow = (vp->name)[vp->name_len-1];
570 
571 	free(nxt_oidstr);
572 	snmp_free_pdu(reply_pdu);
573 
574 	return (0);
575 }
576 
577 /*
578  * Request ids for snmp messages to the agent are sequenced here.
579  */
580 int
snmp_get_reqid(void)581 snmp_get_reqid(void)
582 {
583 	int	ret;
584 
585 	(void) mutex_lock(&snmp_reqid_lock);
586 
587 	ret = snmp_reqid++;
588 
589 	(void) mutex_unlock(&snmp_reqid_lock);
590 
591 	return (ret);
592 }
593 
594 static int
lookup_int(char * prefix,int row,int * valp,int is_vol)595 lookup_int(char *prefix, int row, int *valp, int is_vol)
596 {
597 	int32_t	*val_arr;
598 	uint_t	nelem;
599 	int	now;
600 	int	elapsed;
601 
602 	(void) mutex_lock(&mibcache_lock);
603 
604 	if (row >= n_mibcache_rows) {
605 		(void) mutex_unlock(&mibcache_lock);
606 		return (-1);
607 	}
608 
609 	if (mibcache[row] == NULL) {
610 		(void) mutex_unlock(&mibcache_lock);
611 		return (-1);
612 	}
613 
614 	/*
615 	 * If this is a volatile property, we should be searching
616 	 * for an integer-timestamp pair
617 	 */
618 	if (is_vol) {
619 		if (nvlist_lookup_int32_array(mibcache[row], prefix,
620 		    &val_arr, &nelem) != 0) {
621 			(void) mutex_unlock(&mibcache_lock);
622 			return (-1);
623 		}
624 		if (nelem != 2 || val_arr[1] < 0) {
625 			(void) mutex_unlock(&mibcache_lock);
626 			return (-1);
627 		}
628 		now = GET_SCALED_HRTIME();
629 		elapsed = now - val_arr[1];
630 		if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
631 			(void) mutex_unlock(&mibcache_lock);
632 			return (-1);
633 		}
634 
635 		*valp = (int)val_arr[0];
636 	} else {
637 		if (nvlist_lookup_int32(mibcache[row], prefix, valp) != 0) {
638 			(void) mutex_unlock(&mibcache_lock);
639 			return (-1);
640 		}
641 	}
642 
643 	(void) mutex_unlock(&mibcache_lock);
644 
645 	return (0);
646 }
647 
648 static int
lookup_str(char * prefix,int row,char ** valp,int is_vol)649 lookup_str(char *prefix, int row, char **valp, int is_vol)
650 {
651 	char	**val_arr;
652 	uint_t	nelem;
653 	int	now;
654 	int	elapsed;
655 
656 	(void) mutex_lock(&mibcache_lock);
657 
658 	if (row >= n_mibcache_rows) {
659 		(void) mutex_unlock(&mibcache_lock);
660 		return (-1);
661 	}
662 
663 	if (mibcache[row] == NULL) {
664 		(void) mutex_unlock(&mibcache_lock);
665 		return (-1);
666 	}
667 
668 	/*
669 	 * If this is a volatile property, we should be searching
670 	 * for a string-timestamp pair
671 	 */
672 	if (is_vol) {
673 		if (nvlist_lookup_string_array(mibcache[row], prefix,
674 		    &val_arr, &nelem) != 0) {
675 			(void) mutex_unlock(&mibcache_lock);
676 			return (-1);
677 		}
678 		if (nelem != 2 || atoi(val_arr[1]) <= 0) {
679 			(void) mutex_unlock(&mibcache_lock);
680 			return (-1);
681 		}
682 		now = GET_SCALED_HRTIME();
683 		elapsed = now - atoi(val_arr[1]);
684 		if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
685 			(void) mutex_unlock(&mibcache_lock);
686 			return (-1);
687 		}
688 
689 		*valp = val_arr[0];
690 	} else {
691 		if (nvlist_lookup_string(mibcache[row], prefix, valp) != 0) {
692 			(void) mutex_unlock(&mibcache_lock);
693 			return (-1);
694 		}
695 	}
696 
697 	(void) mutex_unlock(&mibcache_lock);
698 
699 	return (0);
700 }
701 
702 static int
lookup_bitstr(char * prefix,int row,uchar_t ** valp,uint_t * nelem,int is_vol)703 lookup_bitstr(char *prefix, int row, uchar_t **valp, uint_t *nelem, int is_vol)
704 {
705 	(void) mutex_lock(&mibcache_lock);
706 
707 	if (row >= n_mibcache_rows) {
708 		(void) mutex_unlock(&mibcache_lock);
709 		return (-1);
710 	}
711 
712 	if (mibcache[row] == NULL) {
713 		(void) mutex_unlock(&mibcache_lock);
714 		return (-1);
715 	}
716 
717 	/*
718 	 * We don't support volatile bit string values yet. The nvlist
719 	 * functions don't support bitstring arrays like they do charstring
720 	 * arrays, so we would need to do things in a convoluted way,
721 	 * probably by attaching the timestamp as part of the byte array
722 	 * itself. However, the need for volatile bitstrings isn't there
723 	 * yet, to justify the effort.
724 	 */
725 	if (is_vol) {
726 		(void) mutex_unlock(&mibcache_lock);
727 		return (-1);
728 	}
729 
730 	if (nvlist_lookup_byte_array(mibcache[row], prefix, valp, nelem) != 0) {
731 		(void) mutex_unlock(&mibcache_lock);
732 		return (-1);
733 	}
734 
735 	(void) mutex_unlock(&mibcache_lock);
736 
737 	return (0);
738 }
739 
740 static int
search_oid_in_group(char * prefix,char * oidstrs,int n_oids)741 search_oid_in_group(char *prefix, char *oidstrs, int n_oids)
742 {
743 	char	*p;
744 	int	i;
745 
746 	p = oidstrs;
747 	for (i = 0; i < n_oids; i++) {
748 		if (strcmp(p, prefix) == 0)
749 			return (0);
750 
751 		p += strlen(p) + 1;
752 	}
753 
754 	return (-1);
755 }
756 
757 static oidgroup_t *
locate_oid_group(struct picl_snmphdl * smd,char * prefix)758 locate_oid_group(struct picl_snmphdl *smd, char *prefix)
759 {
760 	oidgroup_t	*grp;
761 
762 	if (smd == NULL)
763 		return (NULL);
764 
765 	if (smd->group == NULL)
766 		return (NULL);
767 
768 	for (grp = smd->group; grp; grp = grp->next) {
769 		if (search_oid_in_group(prefix, grp->oidstrs,
770 		    grp->n_oids) == 0) {
771 			return (grp);
772 		}
773 	}
774 
775 	return (NULL);
776 }
777 
778 static int
fetch_single_int(struct picl_snmphdl * smd,char * prefix,int row,int * ival,int * snmp_syserr)779 fetch_single_int(struct picl_snmphdl *smd, char *prefix, int row, int *ival,
780     int *snmp_syserr)
781 {
782 	snmp_pdu_t *reply_pdu;
783 	pdu_varlist_t *vp;
784 
785 	if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
786 		return (-1);
787 
788 	/*
789 	 * Note that we don't make any distinction between unsigned int
790 	 * value and signed int value at this point, since we provide
791 	 * only snmp_get_int() at the higher level. While it is possible
792 	 * to provide an entirely separate interface such as snmp_get_uint(),
793 	 * that's quite unnecessary, because we don't do any interpretation
794 	 * of the received value. Besides, the sizes of int and uint are
795 	 * the same and the sizes of all pointers are the same (so val.iptr
796 	 * would be the same as val.uiptr in pdu_varlist_t). If/when we
797 	 * violate any of these assumptions, it will be time to add
798 	 * snmp_get_uint().
799 	 */
800 	vp = reply_pdu->vars;
801 	if (vp == NULL || vp->val.iptr == NULL) {
802 		snmp_free_pdu(reply_pdu);
803 		return (-1);
804 	}
805 
806 	*ival = *(vp->val.iptr);
807 
808 	snmp_free_pdu(reply_pdu);
809 
810 	return (0);
811 }
812 
813 static int
fetch_single_str(struct picl_snmphdl * smd,char * prefix,int row,char ** valp,int * snmp_syserr)814 fetch_single_str(struct picl_snmphdl *smd, char *prefix, int row, char **valp,
815     int *snmp_syserr)
816 {
817 	snmp_pdu_t *reply_pdu;
818 	pdu_varlist_t *vp;
819 
820 	if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
821 		return (-1);
822 
823 	vp = reply_pdu->vars;
824 	if (vp == NULL || vp->val.str == NULL) {
825 		snmp_free_pdu(reply_pdu);
826 		return (-1);
827 	}
828 
829 	*valp = strdup((const char *)(vp->val.str));
830 
831 	snmp_free_pdu(reply_pdu);
832 
833 	return (0);
834 }
835 
836 static int
fetch_single_bitstr(struct picl_snmphdl * smd,char * prefix,int row,uchar_t ** valp,uint_t * nelem,int * snmp_syserr)837 fetch_single_bitstr(struct picl_snmphdl *smd, char *prefix, int row,
838     uchar_t **valp, uint_t *nelem, int *snmp_syserr)
839 {
840 	snmp_pdu_t *reply_pdu;
841 	pdu_varlist_t *vp;
842 
843 	if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
844 		return (-1);
845 
846 	vp = reply_pdu->vars;
847 	if (vp == NULL || vp->val.str == NULL) {
848 		snmp_free_pdu(reply_pdu);
849 		return (-1);
850 	}
851 
852 	if ((*valp = (uchar_t *)calloc(vp->val_len, 1)) == NULL) {
853 		snmp_free_pdu(reply_pdu);
854 		return (-1);
855 	}
856 
857 	*nelem = vp->val_len;
858 	(void) memcpy(*valp, (const void *)(vp->val.str),
859 	    (size_t)(vp->val_len));
860 
861 	snmp_free_pdu(reply_pdu);
862 
863 	return (0);
864 }
865 
866 static snmp_pdu_t *
fetch_single(struct picl_snmphdl * smd,char * prefix,int row,int * snmp_syserr)867 fetch_single(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
868 {
869 	snmp_pdu_t	*pdu, *reply_pdu;
870 
871 	if ((pdu = snmp_create_pdu(SNMP_MSG_GET, 0, prefix, 1, row)) == NULL)
872 		return (NULL);
873 
874 	if (snmp_make_packet(pdu) < 0) {
875 		snmp_free_pdu(pdu);
876 		return (NULL);
877 	}
878 
879 	if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
880 		snmp_free_pdu(pdu);
881 		return (NULL);
882 	}
883 
884 	if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
885 		snmp_free_pdu(pdu);
886 		return (NULL);
887 	}
888 
889 	reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
890 	    pdu->reply_pktsz);
891 
892 	snmp_free_pdu(pdu);
893 
894 	return (reply_pdu);
895 }
896 
897 static void
fetch_bulk(struct picl_snmphdl * smd,char * oidstrs,int n_oids,int row,int is_vol,int * snmp_syserr)898 fetch_bulk(struct picl_snmphdl *smd, char *oidstrs, int n_oids,
899     int row, int is_vol, int *snmp_syserr)
900 {
901 	snmp_pdu_t	*pdu, *reply_pdu;
902 	int		max_reps;
903 
904 	/*
905 	 * If we're fetching volatile properties using BULKGET, don't
906 	 * venture to get multiple rows (passing max_reps=0 will make
907 	 * snmp_create_pdu() fetch SNMP_DEF_MAX_REPETITIONS rows)
908 	 */
909 	max_reps = is_vol ? 1 : 0;
910 
911 	pdu = snmp_create_pdu(SNMP_MSG_GETBULK, max_reps, oidstrs, n_oids, row);
912 	if (pdu == NULL)
913 		return;
914 
915 	/*
916 	 * Make an ASN.1 encoded packet from the PDU information
917 	 */
918 	if (snmp_make_packet(pdu) < 0) {
919 		snmp_free_pdu(pdu);
920 		return;
921 	}
922 
923 	/*
924 	 * Send the request packet to the agent
925 	 */
926 	if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
927 		snmp_free_pdu(pdu);
928 		return;
929 	}
930 
931 	/*
932 	 * Receive response from the agent into the reply packet buffer
933 	 * in the request PDU
934 	 */
935 	if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
936 		snmp_free_pdu(pdu);
937 		return;
938 	}
939 
940 	/*
941 	 * Parse the reply, validate the response and create a
942 	 * reply-PDU out of the information. Populate the mibcache
943 	 * with the received values.
944 	 */
945 	reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
946 	    pdu->reply_pktsz);
947 	if (reply_pdu) {
948 		if (reply_pdu->errstat == SNMP_ERR_NOERROR) {
949 			if (is_vol) {
950 				/* Add a job to the cache refresh work queue */
951 				(void) refreshq_add_job(smd, oidstrs, n_oids,
952 				    row);
953 			}
954 
955 			mibcache_populate(reply_pdu, is_vol);
956 		}
957 
958 		snmp_free_pdu(reply_pdu);
959 	}
960 
961 	snmp_free_pdu(pdu);
962 }
963 
964 static snmp_pdu_t *
fetch_next(struct picl_snmphdl * smd,char * prefix,int row,int * snmp_syserr)965 fetch_next(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
966 {
967 	snmp_pdu_t	*pdu, *reply_pdu;
968 
969 	pdu = snmp_create_pdu(SNMP_MSG_GETNEXT, 0, prefix, 1, row);
970 	if (pdu == NULL)
971 		return (NULL);
972 
973 	if (snmp_make_packet(pdu) < 0) {
974 		snmp_free_pdu(pdu);
975 		return (NULL);
976 	}
977 
978 	if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
979 		snmp_free_pdu(pdu);
980 		return (NULL);
981 	}
982 
983 	if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
984 		snmp_free_pdu(pdu);
985 		return (NULL);
986 	}
987 
988 	reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
989 	    pdu->reply_pktsz);
990 
991 	snmp_free_pdu(pdu);
992 
993 	return (reply_pdu);
994 }
995 
996 static int
snmp_send_request(struct picl_snmphdl * smd,snmp_pdu_t * pdu,int * snmp_syserr)997 snmp_send_request(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
998 {
999 	extern int	errno;
1000 #ifdef USE_SOCKETS
1001 	int		ret;
1002 #endif
1003 
1004 	if (smd->fd < 0)
1005 		return (-1);
1006 
1007 	if (pdu == NULL || pdu->req_pkt == NULL)
1008 		return (-1);
1009 
1010 #ifdef USE_SOCKETS
1011 	ret = -1;
1012 	while (ret < 0) {
1013 		ret = sendto(smd->fd, pdu->req_pkt, pdu->req_pktsz, 0,
1014 		    (struct sockaddr *)&smd->agent_addr,
1015 		    sizeof (struct sockaddr));
1016 		if (ret < 0 && errno != EINTR) {
1017 			return (-1);
1018 		}
1019 	}
1020 #else
1021 	if (write(smd->fd, pdu->req_pkt, pdu->req_pktsz) < 0) {
1022 		if (snmp_syserr)
1023 			*snmp_syserr = errno;
1024 		return (-1);
1025 	}
1026 #endif
1027 
1028 	return (0);
1029 }
1030 
1031 static int
snmp_recv_reply(struct picl_snmphdl * smd,snmp_pdu_t * pdu,int * snmp_syserr)1032 snmp_recv_reply(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
1033 {
1034 	struct dssnmp_info	snmp_info;
1035 	size_t	pktsz;
1036 	uchar_t	*pkt;
1037 	extern int errno;
1038 #ifdef USE_SOCKETS
1039 	struct sockaddr_in	from;
1040 	int	fromlen;
1041 	ssize_t	msgsz;
1042 #endif
1043 
1044 	if (smd->fd < 0 || pdu == NULL)
1045 		return (-1);
1046 
1047 #ifdef USE_SOCKETS
1048 	if ((pkt = (uchar_t *)calloc(1, SNMP_MAX_RECV_PKTSZ)) == NULL)
1049 		return (-1);
1050 
1051 	fromlen = sizeof (struct sockaddr_in);
1052 
1053 	msgsz = recvfrom(smd->fd, pkt, SNMP_MAX_RECV_PKTSZ, 0,
1054 	    (struct sockaddr *)&from, &fromlen);
1055 	if (msgsz  < 0 || msgsz >= SNMP_MAX_RECV_PKTSZ) {
1056 		free(pkt);
1057 		return (-1);
1058 	}
1059 
1060 	pktsz = (size_t)msgsz;
1061 #else
1062 	/*
1063 	 * The ioctl will block until we have snmp data available
1064 	 */
1065 	if (ioctl(smd->fd, DSSNMP_GETINFO, &snmp_info) < 0) {
1066 		if (snmp_syserr)
1067 			*snmp_syserr = errno;
1068 		return (-1);
1069 	}
1070 
1071 	pktsz = snmp_info.size;
1072 	if ((pkt = (uchar_t *)calloc(1, pktsz)) == NULL)
1073 		return (-1);
1074 
1075 	if (read(smd->fd, pkt, pktsz) < 0) {
1076 		free(pkt);
1077 		if (snmp_syserr)
1078 			*snmp_syserr = errno;
1079 		return (-1);
1080 	}
1081 #endif
1082 
1083 	pdu->reply_pkt = pkt;
1084 	pdu->reply_pktsz = pktsz;
1085 
1086 	return (0);
1087 }
1088 
1089 static int
mibcache_realloc(int hint)1090 mibcache_realloc(int hint)
1091 {
1092 	uint_t		count = (uint_t)hint;
1093 	nvlist_t	**p;
1094 
1095 	if (hint < 0)
1096 		return (-1);
1097 
1098 	(void) mutex_lock(&mibcache_lock);
1099 
1100 	if (hint < n_mibcache_rows) {
1101 		(void) mutex_unlock(&mibcache_lock);
1102 		return (0);
1103 	}
1104 
1105 	count =  ((count >> MIBCACHE_BLK_SHIFT) + 1) << MIBCACHE_BLK_SHIFT;
1106 
1107 	p = (nvlist_t **)calloc(count, sizeof (nvlist_t *));
1108 	if (p == NULL) {
1109 		(void) mutex_unlock(&mibcache_lock);
1110 		return (-1);
1111 	}
1112 
1113 	if (mibcache) {
1114 		(void) memcpy((void *) p, (void *) mibcache,
1115 		    n_mibcache_rows * sizeof (nvlist_t *));
1116 		free((void *) mibcache);
1117 	}
1118 
1119 	mibcache = p;
1120 	n_mibcache_rows = count;
1121 
1122 	(void) mutex_unlock(&mibcache_lock);
1123 
1124 	return (0);
1125 }
1126 
1127 
1128 /*
1129  * Scan each variable in the returned PDU's bindings and populate
1130  * the cache appropriately
1131  */
1132 static void
mibcache_populate(snmp_pdu_t * pdu,int is_vol)1133 mibcache_populate(snmp_pdu_t *pdu, int is_vol)
1134 {
1135 	pdu_varlist_t	*vp;
1136 	int		row, ret;
1137 	char		*oidstr;
1138 	int		tod;	/* in secs */
1139 	char		tod_str[MAX_INT_LEN];
1140 	int		ival_arr[2];
1141 	char		*sval_arr[2];
1142 
1143 	/*
1144 	 * If we're populating volatile properties, we also store a
1145 	 * timestamp with each property value. When we lookup, we check the
1146 	 * current time against this timestamp to determine if we need to
1147 	 * refetch the value or not (refetch if it has been in for far too
1148 	 * long).
1149 	 */
1150 
1151 	if (is_vol) {
1152 		tod = GET_SCALED_HRTIME();
1153 
1154 		tod_str[0] = 0;
1155 		(void) snprintf(tod_str, MAX_INT_LEN, "%d", tod);
1156 
1157 		ival_arr[1] = tod;
1158 		sval_arr[1] = (char *)tod_str;
1159 	}
1160 
1161 	for (vp = pdu->vars; vp; vp = vp->nextvar) {
1162 		if (vp->type != ASN_INTEGER && vp->type != ASN_OCTET_STR &&
1163 		    vp->type != ASN_BIT_STR) {
1164 			continue;
1165 		}
1166 
1167 		if (vp->name == NULL || vp->val.str == NULL)
1168 			continue;
1169 
1170 		row = (vp->name)[vp->name_len-1];
1171 
1172 		(void) mutex_lock(&mibcache_lock);
1173 
1174 		if (row >= n_mibcache_rows) {
1175 			(void) mutex_unlock(&mibcache_lock);
1176 			if (mibcache_realloc(row) < 0)
1177 				continue;
1178 			(void) mutex_lock(&mibcache_lock);
1179 		}
1180 		ret = 0;
1181 		if (mibcache[row] == NULL)
1182 			ret = nvlist_alloc(&mibcache[row], NV_UNIQUE_NAME, 0);
1183 
1184 		(void) mutex_unlock(&mibcache_lock);
1185 
1186 		if (ret != 0)
1187 			continue;
1188 
1189 		/*
1190 		 * Convert the standard OID form into an oid string that
1191 		 * we can use as the key to lookup. Since we only search
1192 		 * by the prefix (mibcache is really an array of nvlist_t
1193 		 * pointers), ignore the leaf subid.
1194 		 */
1195 		oidstr = oid_to_oidstr(vp->name, vp->name_len - 1);
1196 		if (oidstr == NULL)
1197 			continue;
1198 
1199 		(void) mutex_lock(&mibcache_lock);
1200 
1201 		if (vp->type == ASN_INTEGER) {
1202 			if (is_vol) {
1203 				ival_arr[0] = *(vp->val.iptr);
1204 				(void) nvlist_add_int32_array(mibcache[row],
1205 				    oidstr, ival_arr, 2);
1206 			} else {
1207 				(void) nvlist_add_int32(mibcache[row],
1208 				    oidstr, *(vp->val.iptr));
1209 			}
1210 
1211 		} else if (vp->type == ASN_OCTET_STR) {
1212 			if (is_vol) {
1213 				sval_arr[0] = (char *)vp->val.str;
1214 				(void) nvlist_add_string_array(mibcache[row],
1215 				    oidstr, sval_arr, 2);
1216 			} else {
1217 				(void) nvlist_add_string(mibcache[row],
1218 				    oidstr, (const char *)(vp->val.str));
1219 			}
1220 		} else if (vp->type == ASN_BIT_STR) {
1221 			/*
1222 			 * We don't support yet bit string objects that are
1223 			 * volatile values.
1224 			 */
1225 			if (!is_vol) {
1226 				(void) nvlist_add_byte_array(mibcache[row],
1227 				    oidstr, (uchar_t *)(vp->val.str),
1228 				    (uint_t)vp->val_len);
1229 			}
1230 		}
1231 		(void) mutex_unlock(&mibcache_lock);
1232 
1233 		free(oidstr);
1234 	}
1235 }
1236 
1237 static char *
oid_to_oidstr(oid * objid,size_t n_subids)1238 oid_to_oidstr(oid *objid, size_t n_subids)
1239 {
1240 	char	*oidstr;
1241 	char	subid_str[MAX_INT_LEN];
1242 	int	i, isize;
1243 	size_t	oidstr_sz;
1244 
1245 	/*
1246 	 * ugly, but for now this will have to do.
1247 	 */
1248 	oidstr_sz = sizeof (subid_str) * n_subids;
1249 	oidstr = calloc(1, oidstr_sz);
1250 
1251 	for (i = 0; i < n_subids; i++) {
1252 		(void) memset(subid_str, 0, sizeof (subid_str));
1253 		isize = snprintf(subid_str, sizeof (subid_str), "%d",
1254 		    objid[i]);
1255 		if (isize >= sizeof (subid_str))
1256 			return (NULL);
1257 
1258 		(void) strlcat(oidstr, subid_str, oidstr_sz);
1259 		if (i < (n_subids - 1))
1260 			(void) strlcat(oidstr, ".", oidstr_sz);
1261 	}
1262 
1263 	return (oidstr);
1264 }
1265 
1266 /*
1267  * Expand the refreshq to hold more cache refresh jobs.  Caller must already
1268  * hold refreshq_lock mutex.  Every expansion of the refreshq will add
1269  * REFRESH_BLK_SZ job slots, rather than expanding by one slot every time more
1270  * space is needed.
1271  */
1272 static int
refreshq_realloc(int hint)1273 refreshq_realloc(int hint)
1274 {
1275 	uint_t		count = (uint_t)hint;
1276 	refreshq_job_t	*p;
1277 
1278 	if (hint < 0)
1279 		return (-1);
1280 
1281 	if (hint < n_refreshq_slots) {
1282 		return (0);
1283 	}
1284 
1285 	/* Round count up to next multiple of REFRESHQ_BLK_SHIFT */
1286 	count =  ((count >> REFRESHQ_BLK_SHIFT) + 1) << REFRESHQ_BLK_SHIFT;
1287 
1288 	p = (refreshq_job_t *)calloc(count, sizeof (refreshq_job_t));
1289 	if (p == NULL) {
1290 		return (-1);
1291 	}
1292 
1293 	if (refreshq) {
1294 		if (n_refreshq_jobs == 0) {
1295 			/* Simple case, nothing to copy */
1296 			refreshq_next_job = 0;
1297 			refreshq_next_slot = 0;
1298 		} else if (refreshq_next_slot > refreshq_next_job) {
1299 			/* Simple case, single copy preserves everything */
1300 			(void) memcpy((void *) p,
1301 			    (void *) &(refreshq[refreshq_next_job]),
1302 			    n_refreshq_jobs * sizeof (refreshq_job_t));
1303 		} else {
1304 			/*
1305 			 * Complex case.  The jobs in the refresh queue wrap
1306 			 * around the end of the array in which they are stored.
1307 			 * To preserve chronological order in the new allocated
1308 			 * array, we need to copy the jobs at the end of the old
1309 			 * array to the beginning of the new one and place the
1310 			 * jobs from the beginning of the old array after them.
1311 			 */
1312 			uint_t tail_jobs, head_jobs;
1313 
1314 			tail_jobs = n_refreshq_slots - refreshq_next_job;
1315 			head_jobs = n_refreshq_jobs - tail_jobs;
1316 
1317 			/* Copy the jobs from the end of the old array */
1318 			(void) memcpy((void *) p,
1319 			    (void *) &(refreshq[refreshq_next_job]),
1320 			    tail_jobs * sizeof (refreshq_job_t));
1321 
1322 			/* Copy the jobs from the beginning of the old array */
1323 			(void) memcpy((void *) &(p[tail_jobs]),
1324 			    (void *) &(refreshq[0]),
1325 			    head_jobs * sizeof (refreshq_job_t));
1326 
1327 			/* update the job and slot indices to match */
1328 			refreshq_next_job = 0;
1329 			refreshq_next_slot = n_refreshq_jobs;
1330 		}
1331 		free((void *) refreshq);
1332 	} else {
1333 		/* First initialization */
1334 		refreshq_next_job = 0;
1335 		refreshq_next_slot = 0;
1336 		n_refreshq_jobs = 0;
1337 	}
1338 
1339 	refreshq = p;
1340 	n_refreshq_slots = count;
1341 
1342 	return (0);
1343 }
1344 
1345 /*
1346  * Add a new job to the refreshq.  If there aren't any open slots, attempt to
1347  * expand the queue first.  Return -1 if unable to add the job to the work
1348  * queue, or 0 if the job was added OR if an existing job with the same
1349  * parameters is already pending.
1350  */
1351 static int
refreshq_add_job(struct picl_snmphdl * smd,char * oidstrs,int n_oids,int row)1352 refreshq_add_job(struct picl_snmphdl *smd, char *oidstrs, int n_oids, int row)
1353 {
1354 	int	i;
1355 	int	job;
1356 
1357 	(void) mutex_lock(&refreshq_lock);
1358 
1359 	/*
1360 	 * Can't do anything without a queue.  Either the client never
1361 	 * initialized the refresh queue or the initial memory allocation
1362 	 * failed.
1363 	 */
1364 	if (refreshq == NULL) {
1365 		(void) mutex_unlock(&refreshq_lock);
1366 		return (-1);
1367 	}
1368 
1369 	/*
1370 	 * If there is already a job pending with the same parameters as the job
1371 	 * we have been asked to add, we apparently let an entry expire and it
1372 	 * is now being reloaded.  Rather than add another job for the same
1373 	 * entry, we skip adding the new job and let the existing job address
1374 	 * it.
1375 	 */
1376 	for (i = 0, job = refreshq_next_job; i < n_refreshq_jobs; i++,
1377 	    job = (job + 1) % n_refreshq_slots) {
1378 		if ((refreshq[job].row == row) &&
1379 		    (refreshq[job].n_oids == n_oids) &&
1380 		    (refreshq[job].oidstrs == oidstrs)) {
1381 			(void) mutex_unlock(&refreshq_lock);
1382 			return (0);
1383 		}
1384 	}
1385 
1386 
1387 	/*
1388 	 * If the queue is full, we need to expand it
1389 	 */
1390 	if (n_refreshq_jobs == n_refreshq_slots) {
1391 		if (refreshq_realloc(n_refreshq_slots + 1) < 0) {
1392 			/*
1393 			 * Can't expand the job queue, so we drop this job on
1394 			 * the floor.  No data is lost... we just allow some
1395 			 * data in the mibcache to expire.
1396 			 */
1397 			(void) mutex_unlock(&refreshq_lock);
1398 			return (-1);
1399 		}
1400 	}
1401 
1402 	/*
1403 	 * There is room in the queue, so add the new job.  We are actually
1404 	 * taking a timestamp for this job that is slightly earlier than when
1405 	 * the mibcache entry will be updated, but since we're trying to update
1406 	 * the mibcache entry before it expires anyway, the earlier timestamp
1407 	 * here is acceptable.
1408 	 */
1409 	refreshq[refreshq_next_slot].smd = smd;
1410 	refreshq[refreshq_next_slot].oidstrs = oidstrs;
1411 	refreshq[refreshq_next_slot].n_oids = n_oids;
1412 	refreshq[refreshq_next_slot].row = row;
1413 	refreshq[refreshq_next_slot].last_fetch_time = GET_SCALED_HRTIME();
1414 
1415 	/*
1416 	 * Update queue management variables
1417 	 */
1418 	n_refreshq_jobs += 1;
1419 	refreshq_next_slot = (refreshq_next_slot + 1) % n_refreshq_slots;
1420 
1421 	(void) mutex_unlock(&refreshq_lock);
1422 
1423 	return (0);
1424 }
1425 
1426 /*
1427  * Almost all of the refresh code remains dormant unless specifically
1428  * initialized by a client (the exception being that fetch_bulk() will still
1429  * call refreshq_add_job(), but the latter will return without doing anything).
1430  */
1431 int
snmp_refresh_init(void)1432 snmp_refresh_init(void)
1433 {
1434 	int ret;
1435 
1436 	(void) mutex_lock(&refreshq_lock);
1437 
1438 	ret = refreshq_realloc(0);
1439 
1440 	(void) mutex_unlock(&refreshq_lock);
1441 
1442 	return (ret);
1443 }
1444 
1445 /*
1446  * If the client is going away, we don't want to keep doing refresh work, so
1447  * clean everything up.
1448  */
1449 void
snmp_refresh_fini(void)1450 snmp_refresh_fini(void)
1451 {
1452 	(void) mutex_lock(&refreshq_lock);
1453 
1454 	n_refreshq_jobs = 0;
1455 	n_refreshq_slots = 0;
1456 	refreshq_next_job = 0;
1457 	refreshq_next_slot = 0;
1458 	free(refreshq);
1459 	refreshq = NULL;
1460 
1461 	(void) mutex_unlock(&refreshq_lock);
1462 }
1463 
1464 /*
1465  * Return the number of seconds remaining before the mibcache entry associated
1466  * with the next job in the queue will expire.  Note that this requires
1467  * reversing the scaling normally done on hrtime values.  (The need for scaling
1468  * is purely internal, and should be hidden from clients.)  If there are no jobs
1469  * in the queue, return -1.  If the next job has already expired, return 0.
1470  */
1471 int
snmp_refresh_get_next_expiration(void)1472 snmp_refresh_get_next_expiration(void)
1473 {
1474 	int ret;
1475 	int elapsed;
1476 
1477 	(void) mutex_lock(&refreshq_lock);
1478 
1479 	if (n_refreshq_jobs == 0) {
1480 		ret = -1;
1481 	} else {
1482 		elapsed = GET_SCALED_HRTIME() -
1483 		    refreshq[refreshq_next_job].last_fetch_time;
1484 
1485 		if (elapsed >= MAX_INCACHE_TIME) {
1486 			ret = 0;
1487 		} else {
1488 			ret = (MAX_INCACHE_TIME - elapsed) * HRTIME_SCALE;
1489 		}
1490 	}
1491 
1492 	(void) mutex_unlock(&refreshq_lock);
1493 
1494 	return (ret);
1495 }
1496 
1497 /*
1498  * Given the number of seconds the client wants to spend on each cyle of
1499  * processing jobs and then sleeping, return a suggestion for the number of jobs
1500  * the client should process, calculated by dividing the client's cycle duration
1501  * by MAX_INCACHE_TIME and multiplying the result by the total number of jobs in
1502  * the queue.  (Note that the actual implementation of that calculation is done
1503  * in a different order to avoid losing fractional values during integer
1504  * arithmetic.)
1505  */
1506 int
snmp_refresh_get_cycle_hint(int secs)1507 snmp_refresh_get_cycle_hint(int secs)
1508 {
1509 	int	jobs;
1510 
1511 	(void) mutex_lock(&refreshq_lock);
1512 
1513 	/*
1514 	 * First, we need to scale the client's cycle time to get it into the
1515 	 * same units we use internally (i.e. tens of seconds).  We round up, as
1516 	 * it makes more sense for the client to process extra jobs than
1517 	 * insufficient jobs.  If the client's desired cycle time is greater
1518 	 * than MAX_INCACHE_TIME, we just return the current total number of
1519 	 * jobs.
1520 	 */
1521 	secs = (secs + HRTIME_SCALE - 1) / HRTIME_SCALE;
1522 
1523 	jobs = (n_refreshq_jobs * secs) / MAX_INCACHE_TIME;
1524 	if (jobs > n_refreshq_jobs) {
1525 		jobs = n_refreshq_jobs;
1526 	}
1527 
1528 	(void) mutex_unlock(&refreshq_lock);
1529 
1530 	return (jobs);
1531 }
1532 
1533 /*
1534  * Process the next job on the refresh queue by invoking fetch_bulk() with the
1535  * recorded parameters.  Return -1 if no job was processed (e.g. because there
1536  * aren't any available), or 0 if a job was processed.  We don't actually care
1537  * if fetch_bulk() fails, since we're just working on cache entry refreshing and
1538  * the worst case result of failing here is a longer delay getting that data the
1539  * next time it is requested.
1540  */
1541 int
snmp_refresh_process_job(void)1542 snmp_refresh_process_job(void)
1543 {
1544 	struct picl_snmphdl	*smd;
1545 	char			*oidstrs;
1546 	int			n_oids;
1547 	int			row;
1548 	int			err;
1549 
1550 	(void) mutex_lock(&refreshq_lock);
1551 
1552 	if (n_refreshq_jobs == 0) {
1553 		(void) mutex_unlock(&refreshq_lock);
1554 
1555 		return (-1);
1556 	}
1557 
1558 	smd = refreshq[refreshq_next_job].smd;
1559 	oidstrs = refreshq[refreshq_next_job].oidstrs;
1560 	n_oids = refreshq[refreshq_next_job].n_oids;
1561 	row = refreshq[refreshq_next_job].row;
1562 
1563 	refreshq_next_job = (refreshq_next_job + 1) % n_refreshq_slots;
1564 	n_refreshq_jobs--;
1565 
1566 	(void) mutex_unlock(&refreshq_lock);
1567 
1568 
1569 	/*
1570 	 * fetch_bulk() is going to come right back into the refresh code to add
1571 	 * a new job for the entry we just loaded, which means we have to make
1572 	 * the call without holding the refreshq_lock mutex.
1573 	 */
1574 	fetch_bulk(smd, oidstrs, n_oids, row, 1, &err);
1575 
1576 	return (0);
1577 }
1578