1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*	Copyright (c) 1988 AT&T	*/
28/*	  All Rights Reserved  	*/
29
30#pragma ident	"%Z%%M%	%I%	%E% SMI"
31
32/*
33 *	Memory management: malloc(), realloc(), free().
34 *
35 *	The following #-parameters may be redefined:
36 *	SEGMENTED: if defined, memory requests are assumed to be
37 *		non-contiguous across calls of GETCORE's.
38 *	GETCORE: a function to get more core memory. If not SEGMENTED,
39 *		GETCORE(0) is assumed to return the next available
40 *		address. Default is 'sbrk'.
41 *	ERRCORE: the error code as returned by GETCORE.
42 *		Default is (char *)(-1).
43 *	CORESIZE: a desired unit (measured in bytes) to be used
44 *		with GETCORE. Default is (1024*ALIGN).
45 *
46 *	This algorithm is based on a  best fit strategy with lists of
47 *	free elts maintained in a self-adjusting binary tree. Each list
48 *	contains all elts of the same size. The tree is ordered by size.
49 *	For results on self-adjusting trees, see the paper:
50 *		Self-Adjusting Binary Trees,
51 *		DD Sleator & RE Tarjan, JACM 1985.
52 *
53 *	The header of a block contains the size of the data part in bytes.
54 *	Since the size of a block is 0%4, the low two bits of the header
55 *	are free and used as follows:
56 *
57 *		BIT0:	1 for busy (block is in use), 0 for free.
58 *		BIT1:	if the block is busy, this bit is 1 if the
59 *			preceding block in contiguous memory is free.
60 *			Otherwise, it is always 0.
61 */
62
63#include "lint.h"
64#include "mallint.h"
65#include "mtlib.h"
66
67/*
68 * Some abusers of the system (notably java1.2) acquire __malloc_lock
69 * in order to prevent threads from holding it while they are being
70 * suspended via thr_suspend() or thr_suspend_allmutators().
71 * This never worked when alternate malloc() libraries were used
72 * because they don't use __malloc_lock for their locking strategy.
73 * We leave __malloc_lock as an external variable to satisfy these
74 * old programs, but we define a new lock, private to libc, to do the
75 * real locking: libc_malloc_lock.  This puts libc's malloc() package
76 * on the same footing as all other malloc packages.
77 */
78mutex_t __malloc_lock = DEFAULTMUTEX;
79mutex_t libc_malloc_lock = DEFAULTMUTEX;
80
81static TREE	*Root,		/* root of the free tree */
82		*Bottom,	/* the last free chunk in the arena */
83		*_morecore(size_t);	/* function to get more core */
84
85static char	*Baddr;		/* current high address of the arena */
86static char	*Lfree;		/* last freed block with data intact */
87
88static void	t_delete(TREE *);
89static void	t_splay(TREE *);
90static void	realfree(void *);
91static void	cleanfree(void *);
92static void	*_malloc_unlocked(size_t);
93
94#define	FREESIZE (1<<5) /* size for preserving free blocks until next malloc */
95#define	FREEMASK FREESIZE-1
96
97static void *flist[FREESIZE];	/* list of blocks to be freed on next malloc */
98static int freeidx;		/* index of free blocks in flist % FREESIZE */
99
100/*
101 * Interfaces used only by atfork_init() functions.
102 */
103void
104malloc_locks(void)
105{
106	(void) mutex_lock(&libc_malloc_lock);
107}
108
109void
110malloc_unlocks(void)
111{
112	(void) mutex_unlock(&libc_malloc_lock);
113}
114
115/*
116 *	Allocation of small blocks
117 */
118static TREE	*List[MINSIZE/WORDSIZE-1]; /* lists of small blocks */
119
120static void *
121_smalloc(size_t size)
122{
123	TREE	*tp;
124	size_t	i;
125
126	ASSERT(size % WORDSIZE == 0);
127	/* want to return a unique pointer on malloc(0) */
128	if (size == 0)
129		size = WORDSIZE;
130
131	/* list to use */
132	i = size / WORDSIZE - 1;
133
134	if (List[i] == NULL) {
135		TREE *np;
136		int n;
137		/* number of blocks to get at one time */
138#define	NPS (WORDSIZE*8)
139		ASSERT((size + WORDSIZE) * NPS >= MINSIZE);
140
141		/* get NPS of these block types */
142		if ((List[i] = _malloc_unlocked((size + WORDSIZE) * NPS)) == 0)
143			return (0);
144
145		/* make them into a link list */
146		for (n = 0, np = List[i]; n < NPS; ++n) {
147			tp = np;
148			SIZE(tp) = size;
149			np = NEXT(tp);
150			AFTER(tp) = np;
151		}
152		AFTER(tp) = NULL;
153	}
154
155	/* allocate from the head of the queue */
156	tp = List[i];
157	List[i] = AFTER(tp);
158	SETBIT0(SIZE(tp));
159	return (DATA(tp));
160}
161
162void *
163malloc(size_t size)
164{
165	void *ret;
166
167	if (!primary_link_map) {
168		errno = ENOTSUP;
169		return (NULL);
170	}
171	assert_no_libc_locks_held();
172	(void) mutex_lock(&libc_malloc_lock);
173	ret = _malloc_unlocked(size);
174	(void) mutex_unlock(&libc_malloc_lock);
175	return (ret);
176}
177
178static void *
179_malloc_unlocked(size_t size)
180{
181	size_t	n;
182	TREE	*tp, *sp;
183	size_t	o_bit1;
184
185	COUNT(nmalloc);
186	ASSERT(WORDSIZE == ALIGN);
187
188	/* check for size that could overflow calculations */
189	if (size > MAX_MALLOC) {
190		errno = ENOMEM;
191		return (NULL);
192	}
193
194	/* make sure that size is 0 mod ALIGN */
195	ROUND(size);
196
197	/* see if the last free block can be used */
198	if (Lfree) {
199		sp = BLOCK(Lfree);
200		n = SIZE(sp);
201		CLRBITS01(n);
202		if (n == size) {
203			/*
204			 * exact match, use it as is
205			 */
206			freeidx = (freeidx + FREESIZE - 1) &
207			    FREEMASK; /* 1 back */
208			flist[freeidx] = Lfree = NULL;
209			return (DATA(sp));
210		} else if (size >= MINSIZE && n > size) {
211			/*
212			 * got a big enough piece
213			 */
214			freeidx = (freeidx + FREESIZE - 1) &
215			    FREEMASK; /* 1 back */
216			flist[freeidx] = Lfree = NULL;
217			o_bit1 = SIZE(sp) & BIT1;
218			SIZE(sp) = n;
219			goto leftover;
220		}
221	}
222	o_bit1 = 0;
223
224	/* perform free's of space since last malloc */
225	cleanfree(NULL);
226
227	/* small blocks */
228	if (size < MINSIZE)
229		return (_smalloc(size));
230
231	/* search for an elt of the right size */
232	sp = NULL;
233	n  = 0;
234	if (Root) {
235		tp = Root;
236		for (;;) {
237			/* branch left */
238			if (SIZE(tp) >= size) {
239				if (n == 0 || n >= SIZE(tp)) {
240					sp = tp;
241					n = SIZE(tp);
242				}
243				if (LEFT(tp))
244					tp = LEFT(tp);
245				else
246					break;
247			} else { /* branch right */
248				if (RIGHT(tp))
249					tp = RIGHT(tp);
250				else
251					break;
252			}
253		}
254
255		if (sp) {
256			t_delete(sp);
257		} else if (tp != Root) {
258			/* make the searched-to element the root */
259			t_splay(tp);
260			Root = tp;
261		}
262	}
263
264	/* if found none fitted in the tree */
265	if (!sp) {
266		if (Bottom && size <= SIZE(Bottom)) {
267			sp = Bottom;
268			CLRBITS01(SIZE(sp));
269		} else if ((sp = _morecore(size)) == NULL) /* no more memory */
270			return (NULL);
271	}
272
273	/* tell the forward neighbor that we're busy */
274	CLRBIT1(SIZE(NEXT(sp)));
275
276	ASSERT(ISBIT0(SIZE(NEXT(sp))));
277
278leftover:
279	/* if the leftover is enough for a new free piece */
280	if ((n = (SIZE(sp) - size)) >= MINSIZE + WORDSIZE) {
281		n -= WORDSIZE;
282		SIZE(sp) = size;
283		tp = NEXT(sp);
284		SIZE(tp) = n|BIT0;
285		realfree(DATA(tp));
286	} else if (BOTTOM(sp))
287		Bottom = NULL;
288
289	/* return the allocated space */
290	SIZE(sp) |= BIT0 | o_bit1;
291	return (DATA(sp));
292}
293
294
295/*
296 * realloc().
297 *
298 * If the block size is increasing, we try forward merging first.
299 * This is not best-fit but it avoids some data recopying.
300 */
301void *
302realloc(void *old, size_t size)
303{
304	TREE	*tp, *np;
305	size_t	ts;
306	char	*new;
307
308	if (!primary_link_map) {
309		errno = ENOTSUP;
310		return (NULL);
311	}
312
313	assert_no_libc_locks_held();
314	COUNT(nrealloc);
315
316	/* check for size that could overflow calculations */
317	if (size > MAX_MALLOC) {
318		errno = ENOMEM;
319		return (NULL);
320	}
321
322	/* pointer to the block */
323	(void) mutex_lock(&libc_malloc_lock);
324	if (old == NULL) {
325		new = _malloc_unlocked(size);
326		(void) mutex_unlock(&libc_malloc_lock);
327		return (new);
328	}
329
330	/* perform free's of space since last malloc */
331	cleanfree(old);
332
333	/* make sure that size is 0 mod ALIGN */
334	ROUND(size);
335
336	tp = BLOCK(old);
337	ts = SIZE(tp);
338
339	/* if the block was freed, data has been destroyed. */
340	if (!ISBIT0(ts)) {
341		(void) mutex_unlock(&libc_malloc_lock);
342		return (NULL);
343	}
344
345	/* nothing to do */
346	CLRBITS01(SIZE(tp));
347	if (size == SIZE(tp)) {
348		SIZE(tp) = ts;
349		(void) mutex_unlock(&libc_malloc_lock);
350		return (old);
351	}
352
353	/* special cases involving small blocks */
354	if (size < MINSIZE || SIZE(tp) < MINSIZE) {
355		/* free is size is zero */
356		if (size == 0) {
357			SETOLD01(SIZE(tp), ts);
358			_free_unlocked(old);
359			(void) mutex_unlock(&libc_malloc_lock);
360			return (NULL);
361		} else {
362			goto call_malloc;
363		}
364	}
365
366	/* block is increasing in size, try merging the next block */
367	if (size > SIZE(tp)) {
368		np = NEXT(tp);
369		if (!ISBIT0(SIZE(np))) {
370			ASSERT(SIZE(np) >= MINSIZE);
371			ASSERT(!ISBIT1(SIZE(np)));
372			SIZE(tp) += SIZE(np) + WORDSIZE;
373			if (np != Bottom)
374				t_delete(np);
375			else
376				Bottom = NULL;
377			CLRBIT1(SIZE(NEXT(np)));
378		}
379
380#ifndef SEGMENTED
381		/* not enough & at TRUE end of memory, try extending core */
382		if (size > SIZE(tp) && BOTTOM(tp) && GETCORE(0) == Baddr) {
383			Bottom = tp;
384			if ((tp = _morecore(size)) == NULL) {
385				tp = Bottom;
386				Bottom = NULL;
387				}
388		}
389#endif
390	}
391
392	/* got enough space to use */
393	if (size <= SIZE(tp)) {
394		size_t n;
395
396chop_big:
397		if ((n = (SIZE(tp) - size)) >= MINSIZE + WORDSIZE) {
398			n -= WORDSIZE;
399			SIZE(tp) = size;
400			np = NEXT(tp);
401			SIZE(np) = n|BIT0;
402			realfree(DATA(np));
403		} else if (BOTTOM(tp))
404			Bottom = NULL;
405
406		/* the previous block may be free */
407		SETOLD01(SIZE(tp), ts);
408		(void) mutex_unlock(&libc_malloc_lock);
409		return (old);
410	}
411
412	/* call malloc to get a new block */
413call_malloc:
414	SETOLD01(SIZE(tp), ts);
415	if ((new = _malloc_unlocked(size)) != NULL) {
416		CLRBITS01(ts);
417		if (ts > size)
418			ts = size;
419		MEMCOPY(new, old, ts);
420		_free_unlocked(old);
421		(void) mutex_unlock(&libc_malloc_lock);
422		return (new);
423	}
424
425	/*
426	 * Attempt special case recovery allocations since malloc() failed:
427	 *
428	 * 1. size <= SIZE(tp) < MINSIZE
429	 *	Simply return the existing block
430	 * 2. SIZE(tp) < size < MINSIZE
431	 *	malloc() may have failed to allocate the chunk of
432	 *	small blocks. Try asking for MINSIZE bytes.
433	 * 3. size < MINSIZE <= SIZE(tp)
434	 *	malloc() may have failed as with 2.  Change to
435	 *	MINSIZE allocation which is taken from the beginning
436	 *	of the current block.
437	 * 4. MINSIZE <= SIZE(tp) < size
438	 *	If the previous block is free and the combination of
439	 *	these two blocks has at least size bytes, then merge
440	 *	the two blocks copying the existing contents backwards.
441	 */
442	CLRBITS01(SIZE(tp));
443	if (SIZE(tp) < MINSIZE) {
444		if (size < SIZE(tp)) {			/* case 1. */
445			SETOLD01(SIZE(tp), ts);
446			(void) mutex_unlock(&libc_malloc_lock);
447			return (old);
448		} else if (size < MINSIZE) {		/* case 2. */
449			size = MINSIZE;
450			goto call_malloc;
451		}
452	} else if (size < MINSIZE) {			/* case 3. */
453		size = MINSIZE;
454		goto chop_big;
455	} else if (ISBIT1(ts) &&
456	    (SIZE(np = LAST(tp)) + SIZE(tp) + WORDSIZE) >= size) {
457		ASSERT(!ISBIT0(SIZE(np)));
458		t_delete(np);
459		SIZE(np) += SIZE(tp) + WORDSIZE;
460		/*
461		 * Since the copy may overlap, use memmove() if available.
462		 * Otherwise, copy by hand.
463		 */
464		(void) memmove(DATA(np), old, SIZE(tp));
465		old = DATA(np);
466		tp = np;
467		CLRBIT1(ts);
468		goto chop_big;
469	}
470	SETOLD01(SIZE(tp), ts);
471	(void) mutex_unlock(&libc_malloc_lock);
472	return (NULL);
473}
474
475
476/*
477 * realfree().
478 *
479 * Coalescing of adjacent free blocks is done first.
480 * Then, the new free block is leaf-inserted into the free tree
481 * without splaying. This strategy does not guarantee the amortized
482 * O(nlogn) behaviour for the insert/delete/find set of operations
483 * on the tree. In practice, however, free is much more infrequent
484 * than malloc/realloc and the tree searches performed by these
485 * functions adequately keep the tree in balance.
486 */
487static void
488realfree(void *old)
489{
490	TREE	*tp, *sp, *np;
491	size_t	ts, size;
492
493	COUNT(nfree);
494
495	/* pointer to the block */
496	tp = BLOCK(old);
497	ts = SIZE(tp);
498	if (!ISBIT0(ts))
499		return;
500	CLRBITS01(SIZE(tp));
501
502	/* small block, put it in the right linked list */
503	if (SIZE(tp) < MINSIZE) {
504		ASSERT(SIZE(tp) / WORDSIZE >= 1);
505		ts = SIZE(tp) / WORDSIZE - 1;
506		AFTER(tp) = List[ts];
507		List[ts] = tp;
508		return;
509	}
510
511	/* see if coalescing with next block is warranted */
512	np = NEXT(tp);
513	if (!ISBIT0(SIZE(np))) {
514		if (np != Bottom)
515			t_delete(np);
516		SIZE(tp) += SIZE(np) + WORDSIZE;
517	}
518
519	/* the same with the preceding block */
520	if (ISBIT1(ts)) {
521		np = LAST(tp);
522		ASSERT(!ISBIT0(SIZE(np)));
523		ASSERT(np != Bottom);
524		t_delete(np);
525		SIZE(np) += SIZE(tp) + WORDSIZE;
526		tp = np;
527	}
528
529	/* initialize tree info */
530	PARENT(tp) = LEFT(tp) = RIGHT(tp) = LINKFOR(tp) = NULL;
531
532	/* the last word of the block contains self's address */
533	*(SELFP(tp)) = tp;
534
535	/* set bottom block, or insert in the free tree */
536	if (BOTTOM(tp))
537		Bottom = tp;
538	else {
539		/* search for the place to insert */
540		if (Root) {
541			size = SIZE(tp);
542			np = Root;
543			for (;;) {
544				if (SIZE(np) > size) {
545					if (LEFT(np))
546						np = LEFT(np);
547					else {
548						LEFT(np) = tp;
549						PARENT(tp) = np;
550						break;
551					}
552				} else if (SIZE(np) < size) {
553					if (RIGHT(np))
554						np = RIGHT(np);
555					else {
556						RIGHT(np) = tp;
557						PARENT(tp) = np;
558						break;
559					}
560				} else {
561					if ((sp = PARENT(np)) != NULL) {
562						if (np == LEFT(sp))
563							LEFT(sp) = tp;
564						else
565							RIGHT(sp) = tp;
566						PARENT(tp) = sp;
567					} else
568						Root = tp;
569
570					/* insert to head of list */
571					if ((sp = LEFT(np)) != NULL)
572						PARENT(sp) = tp;
573					LEFT(tp) = sp;
574
575					if ((sp = RIGHT(np)) != NULL)
576						PARENT(sp) = tp;
577					RIGHT(tp) = sp;
578
579					/* doubly link list */
580					LINKFOR(tp) = np;
581					LINKBAK(np) = tp;
582					SETNOTREE(np);
583
584					break;
585				}
586			}
587		} else
588			Root = tp;
589	}
590
591	/* tell next block that this one is free */
592	SETBIT1(SIZE(NEXT(tp)));
593
594	ASSERT(ISBIT0(SIZE(NEXT(tp))));
595}
596
597/*
598 * Get more core. Gaps in memory are noted as busy blocks.
599 */
600static TREE *
601_morecore(size_t size)
602{
603	TREE	*tp;
604	ssize_t	n, offset;
605	char	*addr;
606	ssize_t	nsize;
607
608	/* compute new amount of memory to get */
609	tp = Bottom;
610	n = (ssize_t)size + 2 * WORDSIZE;
611	addr = GETCORE(0);
612
613	if (addr == ERRCORE)
614		return (NULL);
615
616	/* need to pad size out so that addr is aligned */
617	if ((((uintptr_t)addr) % ALIGN) != 0)
618		offset = ALIGN - (uintptr_t)addr % ALIGN;
619	else
620		offset = 0;
621
622#ifndef SEGMENTED
623	/* if not segmented memory, what we need may be smaller */
624	if (addr == Baddr) {
625		n -= WORDSIZE;
626		if (tp != NULL)
627			n -= SIZE(tp);
628	}
629#endif
630
631	/* get a multiple of CORESIZE */
632	n = ((n - 1) / CORESIZE + 1) * CORESIZE;
633	nsize = n + offset;
634
635	/* check if nsize request could overflow in GETCORE */
636	if ((size_t)nsize > MAX_MALLOC - (uintptr_t)addr) {
637		errno = ENOMEM;
638		return (NULL);
639	}
640
641	if ((size_t)nsize <= MAX_GETCORE) {
642		if (GETCORE(nsize) == ERRCORE)
643			return (NULL);
644	} else {
645		intptr_t	delta;
646		/*
647		 * the value required is too big for GETCORE() to deal with
648		 * in one go, so use GETCORE() at most 2 times instead.
649		 * Argument to GETCORE() must be multiple of ALIGN.
650		 * If not, GETCORE(-MAX_GETCORE) will not return brk point
651		 * to previous value, but will be ALIGN more.
652		 * This would leave a small hole.
653		 */
654		delta = MAX_GETCORE;
655		while (delta > 0) {
656			if (GETCORE(delta) == ERRCORE) {
657				if (addr != GETCORE(0))
658					(void) GETCORE(-MAX_GETCORE);
659				return (NULL);
660			}
661			nsize -= MAX_GETCORE;
662			delta = nsize;
663		}
664	}
665
666	/* contiguous memory */
667	if (addr == Baddr) {
668		ASSERT(offset == 0);
669		if (tp) {
670			addr = (char *)tp;
671			n += SIZE(tp) + 2 * WORDSIZE;
672		} else {
673			addr = Baddr - WORDSIZE;
674			n += WORDSIZE;
675		}
676	} else
677		addr += offset;
678
679	/* new bottom address */
680	Baddr = addr + n;
681
682	/* new bottom block */
683	tp = (TREE *)(uintptr_t)addr;
684	SIZE(tp) = n - 2 * WORDSIZE;
685	ASSERT((SIZE(tp) % ALIGN) == 0);
686
687	/* reserved the last word to head any noncontiguous memory */
688	SETBIT0(SIZE(NEXT(tp)));
689
690	/* non-contiguous memory, free old bottom block */
691	if (Bottom && Bottom != tp) {
692		SETBIT0(SIZE(Bottom));
693		realfree(DATA(Bottom));
694	}
695
696	return (tp);
697}
698
699
700/*
701 * Tree rotation functions (BU: bottom-up, TD: top-down)
702 */
703
704#define	LEFT1(x, y)		\
705			if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
706			if ((PARENT(y) = PARENT(x)) != NULL)\
707				if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
708				else RIGHT(PARENT(y)) = y;\
709			LEFT(y) = x; PARENT(x) = y
710
711#define	RIGHT1(x, y)		\
712			if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
713			if ((PARENT(y) = PARENT(x)) != NULL)\
714				if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
715				else RIGHT(PARENT(y)) = y;\
716			RIGHT(y) = x; PARENT(x) = y
717
718#define	BULEFT2(x, y, z)	\
719			if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
720			if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
721			if ((PARENT(z) = PARENT(x)) != NULL)\
722				if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
723				else RIGHT(PARENT(z)) = z;\
724			LEFT(z) = y; PARENT(y) = z; LEFT(y) = x; PARENT(x) = y
725
726#define	BURIGHT2(x, y, z)	\
727			if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
728			if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
729			if ((PARENT(z) = PARENT(x)) != NULL)\
730				if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
731				else RIGHT(PARENT(z)) = z;\
732			RIGHT(z) = y; PARENT(y) = z; RIGHT(y) = x; PARENT(x) = y
733
734#define	TDLEFT2(x, y, z)	\
735			if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
736			if ((PARENT(z) = PARENT(x)) != NULL)\
737				if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
738				else RIGHT(PARENT(z)) = z;\
739			PARENT(x) = z; LEFT(z) = x;
740
741#define	TDRIGHT2(x, y, z)	\
742			if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
743			if ((PARENT(z) = PARENT(x)) != NULL)\
744				if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
745				else RIGHT(PARENT(z)) = z;\
746			PARENT(x) = z; RIGHT(z) = x;
747
748/*
749 * Delete a tree element
750 */
751static void
752t_delete(TREE *op)
753{
754	TREE	*tp, *sp, *gp;
755
756	/* if this is a non-tree node */
757	if (ISNOTREE(op)) {
758		tp = LINKBAK(op);
759		if ((sp = LINKFOR(op)) != NULL)
760			LINKBAK(sp) = tp;
761		LINKFOR(tp) = sp;
762		return;
763	}
764
765	/* make op the root of the tree */
766	if (PARENT(op))
767		t_splay(op);
768
769	/* if this is the start of a list */
770	if ((tp = LINKFOR(op)) != NULL) {
771		PARENT(tp) = NULL;
772		if ((sp = LEFT(op)) != NULL)
773			PARENT(sp) = tp;
774		LEFT(tp) = sp;
775
776		if ((sp = RIGHT(op)) != NULL)
777			PARENT(sp) = tp;
778		RIGHT(tp) = sp;
779
780		Root = tp;
781		return;
782	}
783
784	/* if op has a non-null left subtree */
785	if ((tp = LEFT(op)) != NULL) {
786		PARENT(tp) = NULL;
787
788		if (RIGHT(op)) {
789			/* make the right-end of the left subtree its root */
790			while ((sp = RIGHT(tp)) != NULL) {
791				if ((gp = RIGHT(sp)) != NULL) {
792					TDLEFT2(tp, sp, gp);
793					tp = gp;
794				} else {
795					LEFT1(tp, sp);
796					tp = sp;
797				}
798			}
799
800			/* hook the right subtree of op to the above elt */
801			RIGHT(tp) = RIGHT(op);
802			PARENT(RIGHT(tp)) = tp;
803		}
804	} else if ((tp = RIGHT(op)) != NULL)	/* no left subtree */
805		PARENT(tp) = NULL;
806
807	Root = tp;
808}
809
810/*
811 * Bottom up splaying (simple version).
812 * The basic idea is to roughly cut in half the
813 * path from Root to tp and make tp the new root.
814 */
815static void
816t_splay(TREE *tp)
817{
818	TREE	*pp, *gp;
819
820	/* iterate until tp is the root */
821	while ((pp = PARENT(tp)) != NULL) {
822		/* grandparent of tp */
823		gp = PARENT(pp);
824
825		/* x is a left child */
826		if (LEFT(pp) == tp) {
827			if (gp && LEFT(gp) == pp) {
828				BURIGHT2(gp, pp, tp);
829			} else {
830				RIGHT1(pp, tp);
831			}
832		} else {
833			ASSERT(RIGHT(pp) == tp);
834			if (gp && RIGHT(gp) == pp) {
835				BULEFT2(gp, pp, tp);
836			} else {
837				LEFT1(pp, tp);
838			}
839		}
840	}
841}
842
843
844/*
845 *	free().
846 *	Performs a delayed free of the block pointed to
847 *	by old. The pointer to old is saved on a list, flist,
848 *	until the next malloc or realloc. At that time, all the
849 *	blocks pointed to in flist are actually freed via
850 *	realfree(). This allows the contents of free blocks to
851 *	remain undisturbed until the next malloc or realloc.
852 */
853void
854free(void *old)
855{
856	if (!primary_link_map) {
857		errno = ENOTSUP;
858		return;
859	}
860	assert_no_libc_locks_held();
861	(void) mutex_lock(&libc_malloc_lock);
862	_free_unlocked(old);
863	(void) mutex_unlock(&libc_malloc_lock);
864}
865
866
867void
868_free_unlocked(void *old)
869{
870	int	i;
871
872	if (old == NULL)
873		return;
874
875	/*
876	 * Make sure the same data block is not freed twice.
877	 * 3 cases are checked.  It returns immediately if either
878	 * one of the conditions is true.
879	 *	1. Last freed.
880	 *	2. Not in use or freed already.
881	 *	3. In the free list.
882	 */
883	if (old == Lfree)
884		return;
885	if (!ISBIT0(SIZE(BLOCK(old))))
886		return;
887	for (i = 0; i < freeidx; i++)
888		if (old == flist[i])
889			return;
890
891	if (flist[freeidx] != NULL)
892		realfree(flist[freeidx]);
893	flist[freeidx] = Lfree = old;
894	freeidx = (freeidx + 1) & FREEMASK; /* one forward */
895}
896
897/*
898 * cleanfree() frees all the blocks pointed to be flist.
899 *
900 * realloc() should work if it is called with a pointer
901 * to a block that was freed since the last call to malloc() or
902 * realloc(). If cleanfree() is called from realloc(), ptr
903 * is set to the old block and that block should not be
904 * freed since it is actually being reallocated.
905 */
906static void
907cleanfree(void *ptr)
908{
909	char	**flp;
910
911	flp = (char **)&(flist[freeidx]);
912	for (;;) {
913		if (flp == (char **)&(flist[0]))
914			flp = (char **)&(flist[FREESIZE]);
915		if (*--flp == NULL)
916			break;
917		if (*flp != ptr)
918			realfree(*flp);
919		*flp = NULL;
920	}
921	freeidx = 0;
922	Lfree = NULL;
923}
924