1 /*
2  * Copyright (c) 2005 by Internet Systems Consortium, Inc. ("ISC")
3  * Copyright (c) 1997,1999 by Internet Software Consortium.
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 
19 /* When this symbol is defined allocations via memget are made slightly
20    bigger and some debugging info stuck before and after the region given
21    back to the caller. */
22 /* #define DEBUGGING_MEMCLUSTER */
23 #define MEMCLUSTER_ATEND
24 
25 #include "port_before.h"
26 
27 #include <sys/types.h>
28 #include <sys/uio.h>
29 #include <sys/param.h>
30 #include <sys/stat.h>
31 
32 #include <netinet/in.h>
33 #include <arpa/inet.h>
34 #include <arpa/nameser.h>
35 
36 #include <errno.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <time.h>
41 
42 #include <isc/memcluster.h>
43 #include <isc/assertions.h>
44 
45 #include "port_after.h"
46 
47 #ifdef MEMCLUSTER_RECORD
48 #ifndef DEBUGGING_MEMCLUSTER
49 #define DEBUGGING_MEMCLUSTER
50 #endif
51 #endif
52 
53 #define DEF_MAX_SIZE		1100
54 #define DEF_MEM_TARGET		4096
55 
56 typedef u_int32_t fence_t;
57 
58 typedef struct {
59 	void *			next;
60 #if defined(DEBUGGING_MEMCLUSTER)
61 #if defined(MEMCLUSTER_RECORD)
62 	const char *		file;
63 	int			line;
64 #endif
65 	size_t			size;
66 	fence_t			fencepost;
67 #endif
68 } memcluster_element;
69 
70 #define SMALL_SIZE_LIMIT sizeof(memcluster_element)
71 #define P_SIZE sizeof(void *)
72 #define FRONT_FENCEPOST 0xfebafeba
73 #define BACK_FENCEPOST 0xabefabef
74 #define FENCEPOST_SIZE 4
75 
76 #ifndef MEMCLUSTER_LITTLE_MALLOC
77 #define MEMCLUSTER_BIG_MALLOC 1
78 #define NUM_BASIC_BLOCKS 64
79 #endif
80 
81 struct stats {
82 	u_long			gets;
83 	u_long			totalgets;
84 	u_long			blocks;
85 	u_long			freefrags;
86 };
87 
88 #ifdef DO_PTHREADS
89 #include <pthread.h>
90 static pthread_mutex_t	memlock = PTHREAD_MUTEX_INITIALIZER;
91 #define MEMLOCK		(void)pthread_mutex_lock(&memlock)
92 #define MEMUNLOCK	(void)pthread_mutex_unlock(&memlock)
93 #else
94 /*
95  * Catch bad lock usage in non threaded build.
96  */
97 static unsigned int	memlock = 0;
98 #define MEMLOCK		do { INSIST(memlock == 0); memlock = 1; } while (0)
99 #define MEMUNLOCK	do { INSIST(memlock == 1); memlock = 0; } while (0)
100 #endif  /* DO_PTHEADS */
101 
102 /* Private data. */
103 
104 static size_t			max_size;
105 static size_t			mem_target;
106 #ifndef MEMCLUSTER_BIG_MALLOC
107 static size_t			mem_target_half;
108 static size_t			mem_target_fudge;
109 #endif
110 static memcluster_element **	freelists;
111 #ifdef MEMCLUSTER_RECORD
112 static memcluster_element **	activelists;
113 #endif
114 #ifdef MEMCLUSTER_BIG_MALLOC
115 static memcluster_element *	basic_blocks;
116 #endif
117 static struct stats *		stats;
118 
119 /* Forward. */
120 
121 static size_t			quantize(size_t);
122 #if defined(DEBUGGING_MEMCLUSTER)
123 static void			check(unsigned char *, int, size_t);
124 #endif
125 
126 /* Public. */
127 
128 int
129 meminit(size_t init_max_size, size_t target_size) {
130 
131 #if defined(DEBUGGING_MEMCLUSTER)
132 	INSIST(sizeof(fence_t) == FENCEPOST_SIZE);
133 #endif
134 	if (freelists != NULL) {
135 		errno = EEXIST;
136 		return (-1);
137 	}
138 	if (init_max_size == 0U)
139 		max_size = DEF_MAX_SIZE;
140 	else
141 		max_size = init_max_size;
142 	if (target_size == 0U)
143 		mem_target = DEF_MEM_TARGET;
144 	else
145 		mem_target = target_size;
146 #ifndef MEMCLUSTER_BIG_MALLOC
147 	mem_target_half = mem_target / 2;
148 	mem_target_fudge = mem_target + mem_target / 4;
149 #endif
150 	freelists = malloc(max_size * sizeof (memcluster_element *));
151 	stats = malloc((max_size+1) * sizeof (struct stats));
152 	if (freelists == NULL || stats == NULL) {
153 		errno = ENOMEM;
154 		return (-1);
155 	}
156 	memset(freelists, 0,
157 	       max_size * sizeof (memcluster_element *));
158 	memset(stats, 0, (max_size + 1) * sizeof (struct stats));
159 #ifdef MEMCLUSTER_RECORD
160 	activelists = malloc((max_size + 1) * sizeof (memcluster_element *));
161 	if (activelists == NULL) {
162 		errno = ENOMEM;
163 		return (-1);
164 	}
165 	memset(activelists, 0,
166 	       (max_size + 1) * sizeof (memcluster_element *));
167 #endif
168 #ifdef MEMCLUSTER_BIG_MALLOC
169 	basic_blocks = NULL;
170 #endif
171 	return (0);
172 }
173 
174 void *
175 __memget(size_t size) {
176 	return (__memget_record(size, NULL, 0));
177 }
178 
179 void *
180 __memget_record(size_t size, const char *file, int line) {
181 	size_t new_size = quantize(size);
182 #if defined(DEBUGGING_MEMCLUSTER)
183 	memcluster_element *e;
184 	char *p;
185 	fence_t fp = BACK_FENCEPOST;
186 #endif
187 	void *ret;
188 
189 	MEMLOCK;
190 
191 #if !defined(MEMCLUSTER_RECORD)
192 	UNUSED(file);
193 	UNUSED(line);
194 #endif
195 	if (freelists == NULL) {
196 		if (meminit(0, 0) == -1) {
197 			MEMUNLOCK;
198 			return (NULL);
199 		}
200 	}
201 	if (size == 0U) {
202 		MEMUNLOCK;
203 		errno = EINVAL;
204 		return (NULL);
205 	}
206 	if (size >= max_size || new_size >= max_size) {
207 		/* memget() was called on something beyond our upper limit. */
208 		stats[max_size].gets++;
209 		stats[max_size].totalgets++;
210 #if defined(DEBUGGING_MEMCLUSTER)
211 		e = malloc(new_size);
212 		if (e == NULL) {
213 			MEMUNLOCK;
214 			errno = ENOMEM;
215 			return (NULL);
216 		}
217 		e->next = NULL;
218 		e->size = size;
219 #ifdef MEMCLUSTER_RECORD
220 		e->file = file;
221 		e->line = line;
222 		e->next = activelists[max_size];
223 		activelists[max_size] = e;
224 #endif
225 		MEMUNLOCK;
226 		e->fencepost = FRONT_FENCEPOST;
227 		p = (char *)e + sizeof *e + size;
228 		memcpy(p, &fp, sizeof fp);
229 		return ((char *)e + sizeof *e);
230 #else
231 		MEMUNLOCK;
232 		return (malloc(size));
233 #endif
234 	}
235 
236 	/*
237 	 * If there are no blocks in the free list for this size, get a chunk
238 	 * of memory and then break it up into "new_size"-sized blocks, adding
239 	 * them to the free list.
240 	 */
241 	if (freelists[new_size] == NULL) {
242 		int i, frags;
243 		size_t total_size;
244 		void *new;
245 		char *curr, *next;
246 
247 #ifdef MEMCLUSTER_BIG_MALLOC
248 		if (basic_blocks == NULL) {
249 			new = malloc(NUM_BASIC_BLOCKS * mem_target);
250 			if (new == NULL) {
251 				MEMUNLOCK;
252 				errno = ENOMEM;
253 				return (NULL);
254 			}
255 			curr = new;
256 			next = curr + mem_target;
257 			for (i = 0; i < (NUM_BASIC_BLOCKS - 1); i++) {
258 				((memcluster_element *)curr)->next = next;
259 				curr = next;
260 				next += mem_target;
261 			}
262 			/*
263 			 * curr is now pointing at the last block in the
264 			 * array.
265 			 */
266 			((memcluster_element *)curr)->next = NULL;
267 			basic_blocks = new;
268 		}
269 		total_size = mem_target;
270 		new = basic_blocks;
271 		basic_blocks = basic_blocks->next;
272 #else
273 		if (new_size > mem_target_half)
274 			total_size = mem_target_fudge;
275 		else
276 			total_size = mem_target;
277 		new = malloc(total_size);
278 		if (new == NULL) {
279 			MEMUNLOCK;
280 			errno = ENOMEM;
281 			return (NULL);
282 		}
283 #endif
284 		frags = total_size / new_size;
285 		stats[new_size].blocks++;
286 		stats[new_size].freefrags += frags;
287 		/* Set up a linked-list of blocks of size "new_size". */
288 		curr = new;
289 		next = curr + new_size;
290 		for (i = 0; i < (frags - 1); i++) {
291 #if defined (DEBUGGING_MEMCLUSTER)
292 			memset(curr, 0xa5, new_size);
293 #endif
294 			((memcluster_element *)curr)->next = next;
295 			curr = next;
296 			next += new_size;
297 		}
298 		/* curr is now pointing at the last block in the array. */
299 #if defined (DEBUGGING_MEMCLUSTER)
300 		memset(curr, 0xa5, new_size);
301 #endif
302 		((memcluster_element *)curr)->next = freelists[new_size];
303 		freelists[new_size] = new;
304 	}
305 
306 	/* The free list uses the "rounded-up" size "new_size". */
307 #if defined (DEBUGGING_MEMCLUSTER)
308 	e = freelists[new_size];
309 	ret = (char *)e + sizeof *e;
310 	/*
311 	 * Check to see if this buffer has been written to while on free list.
312 	 */
313 	check(ret, 0xa5, new_size - sizeof *e);
314 	/*
315 	 * Mark memory we are returning.
316 	 */
317 	memset(ret, 0xe5, size);
318 #else
319 	ret = freelists[new_size];
320 #endif
321 	freelists[new_size] = freelists[new_size]->next;
322 #if defined(DEBUGGING_MEMCLUSTER)
323 	e->next = NULL;
324 	e->size = size;
325 	e->fencepost = FRONT_FENCEPOST;
326 #ifdef MEMCLUSTER_RECORD
327 	e->file = file;
328 	e->line = line;
329 	e->next = activelists[size];
330 	activelists[size] = e;
331 #endif
332 	p = (char *)e + sizeof *e + size;
333 	memcpy(p, &fp, sizeof fp);
334 #endif
335 
336 	/*
337 	 * The stats[] uses the _actual_ "size" requested by the
338 	 * caller, with the caveat (in the code above) that "size" >= the
339 	 * max. size (max_size) ends up getting recorded as a call to
340 	 * max_size.
341 	 */
342 	stats[size].gets++;
343 	stats[size].totalgets++;
344 	stats[new_size].freefrags--;
345 	MEMUNLOCK;
346 #if defined(DEBUGGING_MEMCLUSTER)
347 	return ((char *)e + sizeof *e);
348 #else
349 	return (ret);
350 #endif
351 }
352 
353 /*%
354  * This is a call from an external caller,
355  * so we want to count this as a user "put".
356  */
357 void
358 __memput(void *mem, size_t size) {
359 	__memput_record(mem, size, NULL, 0);
360 }
361 
362 void
363 __memput_record(void *mem, size_t size, const char *file, int line) {
364 	size_t new_size = quantize(size);
365 #if defined (DEBUGGING_MEMCLUSTER)
366 	memcluster_element *e;
367 	memcluster_element *el;
368 #ifdef MEMCLUSTER_RECORD
369 	memcluster_element *prev;
370 #endif
371 	fence_t fp;
372 	char *p;
373 #endif
374 
375 	MEMLOCK;
376 
377 #if !defined (MEMCLUSTER_RECORD)
378 	UNUSED(file);
379 	UNUSED(line);
380 #endif
381 
382 	REQUIRE(freelists != NULL);
383 
384 	if (size == 0U) {
385 		MEMUNLOCK;
386 		errno = EINVAL;
387 		return;
388 	}
389 
390 #if defined (DEBUGGING_MEMCLUSTER)
391 	e = (memcluster_element *) ((char *)mem - sizeof *e);
392 	INSIST(e->fencepost == FRONT_FENCEPOST);
393 	INSIST(e->size == size);
394 	p = (char *)e + sizeof *e + size;
395 	memcpy(&fp, p, sizeof fp);
396 	INSIST(fp == BACK_FENCEPOST);
397 	INSIST(((u_long)mem % 4) == 0);
398 #ifdef MEMCLUSTER_RECORD
399 	prev = NULL;
400 	if (size == max_size || new_size >= max_size)
401 		el = activelists[max_size];
402 	else
403 		el = activelists[size];
404 	while (el != NULL && el != e) {
405 		prev = el;
406 		el = el->next;
407 	}
408 	INSIST(el != NULL);	/*%< double free */
409 	if (prev == NULL) {
410 		if (size == max_size || new_size >= max_size)
411 			activelists[max_size] = el->next;
412 		else
413 			activelists[size] = el->next;
414 	} else
415 		prev->next = el->next;
416 #endif
417 #endif
418 
419 	if (size == max_size || new_size >= max_size) {
420 		/* memput() called on something beyond our upper limit */
421 #if defined(DEBUGGING_MEMCLUSTER)
422 		free(e);
423 #else
424 		free(mem);
425 #endif
426 
427 		INSIST(stats[max_size].gets != 0U);
428 		stats[max_size].gets--;
429 		MEMUNLOCK;
430 		return;
431 	}
432 
433 	/* The free list uses the "rounded-up" size "new_size": */
434 #if defined(DEBUGGING_MEMCLUSTER)
435 	memset(mem, 0xa5, new_size - sizeof *e); /*%< catch write after free */
436 	e->size = 0;	/*%< catch double memput() */
437 #ifdef MEMCLUSTER_RECORD
438 	e->file = file;
439 	e->line = line;
440 #endif
441 #ifdef MEMCLUSTER_ATEND
442 	e->next = NULL;
443 	el = freelists[new_size];
444 	while (el != NULL && el->next != NULL)
445 		el = el->next;
446 	if (el)
447 		el->next = e;
448 	else
449 		freelists[new_size] = e;
450 #else
451 	e->next = freelists[new_size];
452 	freelists[new_size] = (void *)e;
453 #endif
454 #else
455 	((memcluster_element *)mem)->next = freelists[new_size];
456 	freelists[new_size] = (memcluster_element *)mem;
457 #endif
458 
459 	/*
460 	 * The stats[] uses the _actual_ "size" requested by the
461 	 * caller, with the caveat (in the code above) that "size" >= the
462 	 * max. size (max_size) ends up getting recorded as a call to
463 	 * max_size.
464 	 */
465 	INSIST(stats[size].gets != 0U);
466 	stats[size].gets--;
467 	stats[new_size].freefrags++;
468 	MEMUNLOCK;
469 }
470 
471 void *
472 __memget_debug(size_t size, const char *file, int line) {
473 	void *ptr;
474 	ptr = __memget_record(size, file, line);
475 	fprintf(stderr, "%s:%d: memget(%lu) -> %p\n", file, line,
476 		(u_long)size, ptr);
477 	return (ptr);
478 }
479 
480 void
481 __memput_debug(void *ptr, size_t size, const char *file, int line) {
482 	fprintf(stderr, "%s:%d: memput(%p, %lu)\n", file, line, ptr,
483 		(u_long)size);
484 	__memput_record(ptr, size, file, line);
485 }
486 
487 /*%
488  * Print the stats[] on the stream "out" with suitable formatting.
489  */
490 void
491 memstats(FILE *out) {
492 	size_t i;
493 #ifdef MEMCLUSTER_RECORD
494 	memcluster_element *e;
495 #endif
496 
497 	MEMLOCK;
498 
499 	if (freelists == NULL) {
500 		MEMUNLOCK;
501 		return;
502 	}
503 	for (i = 1; i <= max_size; i++) {
504 		const struct stats *s = &stats[i];
505 
506 		if (s->totalgets == 0U && s->gets == 0U)
507 			continue;
508 		fprintf(out, "%s%5lu: %11lu gets, %11lu rem",
509 			(i == max_size) ? ">=" : "  ",
510 			(unsigned long)i, s->totalgets, s->gets);
511 		if (s->blocks != 0U)
512 			fprintf(out, " (%lu bl, %lu ff)",
513 				s->blocks, s->freefrags);
514 		fputc('\n', out);
515 	}
516 #ifdef MEMCLUSTER_RECORD
517 	fprintf(out, "Active Memory:\n");
518 	for (i = 1; i <= max_size; i++) {
519 		if ((e = activelists[i]) != NULL)
520 			while (e != NULL) {
521 				fprintf(out, "%s:%d %p:%lu\n",
522 				        e->file != NULL ? e->file :
523 						"<UNKNOWN>", e->line,
524 					(char *)e + sizeof *e,
525 					(u_long)e->size);
526 				e = e->next;
527 			}
528 	}
529 #endif
530 	MEMUNLOCK;
531 }
532 
533 int
534 memactive(void) {
535 	size_t i;
536 
537 	if (stats == NULL)
538 		return (0);
539 	for (i = 1; i <= max_size; i++)
540 		if (stats[i].gets != 0U)
541 			return (1);
542 	return (0);
543 }
544 
545 /* Private. */
546 
547 /*%
548  * Round up size to a multiple of sizeof(void *).  This guarantees that a
549  * block is at least sizeof void *, and that we won't violate alignment
550  * restrictions, both of which are needed to make lists of blocks.
551  */
552 static size_t
553 quantize(size_t size) {
554 	int remainder;
555 	/*
556 	 * If there is no remainder for the integer division of
557 	 *
558 	 *	(rightsize/P_SIZE)
559 	 *
560 	 * then we already have a good size; if not, then we need
561 	 * to round up the result in order to get a size big
562 	 * enough to satisfy the request _and_ aligned on P_SIZE boundaries.
563 	 */
564 	remainder = size % P_SIZE;
565 	if (remainder != 0)
566 		size += P_SIZE - remainder;
567 #if defined(DEBUGGING_MEMCLUSTER)
568 	return (size + SMALL_SIZE_LIMIT + sizeof (int));
569 #else
570 	return (size);
571 #endif
572 }
573 
574 #if defined(DEBUGGING_MEMCLUSTER)
575 static void
576 check(unsigned char *a, int value, size_t len) {
577 	size_t i;
578 	for (i = 0; i < len; i++)
579 		INSIST(a[i] == value);
580 }
581 #endif
582 
583 /*! \file */
584