1/***********************************************************************
2*                                                                      *
3*               This software is part of the ast package               *
4*          Copyright (c) 1985-2010 AT&T Intellectual Property          *
5*                      and is licensed under the                       *
6*                  Common Public License, Version 1.0                  *
7*                    by AT&T Intellectual Property                     *
8*                                                                      *
9*                A copy of the License is available at                 *
10*            http://www.opensource.org/licenses/cpl1.0.txt             *
11*         (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9)         *
12*                                                                      *
13*              Information and Software Systems Research               *
14*                            AT&T Research                             *
15*                           Florham Park NJ                            *
16*                                                                      *
17*                 Glenn Fowler <gsf@research.att.com>                  *
18*                  David Korn <dgk@research.att.com>                   *
19*                   Phong Vo <kpv@research.att.com>                    *
20*                                                                      *
21***********************************************************************/
22#if defined(_UWIN) && defined(_BLD_ast)
23
24void _STUB_vmbest(){}
25
26#else
27
28#include	"vmhdr.h"
29
30/*	Best-fit allocation method. This is based on a best-fit strategy
31**	using a splay tree for storage of lists of free blocks of the same
32**	size. Recent free blocks may be cached for fast reuse.
33**
34**	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
35*/
36
37#ifdef DEBUG
38static int	N_free;		/* # of free calls			*/
39static int	N_alloc;	/* # of alloc calls			*/
40static int	N_resize;	/* # of resize calls			*/
41static int	N_wild;		/* # allocated from the wild block	*/
42static int	N_last;		/* # allocated from last free block	*/
43static int	N_reclaim;	/* # of bestreclaim calls		*/
44
45#undef	VM_TRUST		/* always check for locking, etc.s	*/
46#define	VM_TRUST	0
47#endif /*DEBUG*/
48
49#if _BLD_posix
50#define logmsg(d,a ...)	logsrc(d,__FILE__,__LINE__,a)
51
52extern int	logsrc(int, const char*, int, const char*, ...);
53#endif /*_BLD_posix*/
54
55#define COMPACT		8	/* factor to decide when to compact	*/
56
57/* Check to see if a block is in the free tree */
58#if __STD_C
59static int vmintree(Block_t* node, Block_t* b)
60#else
61static int vmintree(node,b)
62Block_t*	node;
63Block_t*	b;
64#endif
65{	Block_t*	t;
66
67	for(t = node; t; t = LINK(t))
68		if(t == b)
69			return 1;
70	if(LEFT(node) && vmintree(LEFT(node),b))
71		return 1;
72	if(RIGHT(node) && vmintree(RIGHT(node),b))
73		return 1;
74	return 0;
75}
76
77#if __STD_C
78static int vmonlist(Block_t* list, Block_t* b)
79#else
80static int vmonlist(list,b)
81Block_t*	list;
82Block_t*	b;
83#endif
84{
85	for(; list; list = LINK(list))
86		if(list == b)
87			return 1;
88	return 0;
89}
90
91/* Check to see if a block is known to be free */
92#if __STD_C
93static int vmisfree(Vmdata_t* vd, Block_t* b)
94#else
95static int vmisfree(vd,b)
96Vmdata_t*	vd;
97Block_t*	b;
98#endif
99{
100	if(SIZE(b) & (BUSY|JUNK|PFREE))
101		return 0;
102
103	if(b == vd->wild)
104		return 1;
105
106	if(SIZE(b) < MAXTINY)
107		return vmonlist(TINY(vd)[INDEX(SIZE(b))], b);
108
109	if(vd->root)
110		return vmintree(vd->root, b);
111
112	return 0;
113}
114
115/* Check to see if a block is known to be junked */
116#if __STD_C
117static int vmisjunk(Vmdata_t* vd, Block_t* b)
118#else
119static int vmisjunk(vd,b)
120Vmdata_t*	vd;
121Block_t*	b;
122#endif
123{
124	Block_t*	t;
125
126	if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0)
127		return 0;
128
129	if(b == vd->free) /* recently freed */
130		return 1;
131
132	/* check the list that b is supposed to be in */
133	for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t))
134		if(t == b)
135			return 1;
136
137	/* on occasions, b may be put onto the catch-all list */
138	if(C_INDEX(SIZE(b)) < S_CACHE)
139		for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
140			if(t == b)
141				return 1;
142
143	return 0;
144}
145
146/* check to see if the free tree is in good shape */
147#if __STD_C
148static int vmchktree(Block_t* node)
149#else
150static int vmchktree(node)
151Block_t*	node;
152#endif
153{	Block_t*	t;
154
155	if(SIZE(node) & BITS)
156		{ /**/ASSERT(0); return -1; }
157
158	for(t = LINK(node); t; t = LINK(t))
159		if(SIZE(t) != SIZE(node))
160			{ /**/ASSERT(0); return -1; }
161
162	if((t = LEFT(node)) )
163	{	if(SIZE(t) >= SIZE(node) )
164			{ /**/ASSERT(0); return -1; }
165		else	return vmchktree(t);
166	}
167	if((t = RIGHT(node)) )
168	{	if(SIZE(t) <= SIZE(node) )
169			{ /**/ASSERT(0); return -1; }
170		else	return vmchktree(t);
171	}
172
173	return 0;
174}
175
176#if __STD_C
177int _vmbestcheck(Vmdata_t* vd, Block_t* freeb)
178#else
179int _vmbestcheck(vd, freeb)
180Vmdata_t*	vd;
181Block_t*	freeb; /* known to be free but not on any free list */
182#endif
183{
184	reg Seg_t	*seg;
185	reg Block_t	*b, *endb, *nextb;
186	int		rv = 0;
187
188	if(!CHECK())
189		return 0;
190
191	/* make sure the free tree is still in shape */
192	if(vd->root && vmchktree(vd->root) < 0 )
193		{ rv = -1; /**/ASSERT(0); }
194
195	for(seg = vd->seg; seg && rv == 0; seg = seg->next)
196	{	b = SEGBLOCK(seg);
197		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
198		for(; b < endb && rv == 0; b = nextb)
199		{	nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
200
201			if(!ISBUSY(SIZE(b)) ) /* a completely free block */
202			{	/* there should be no marked bits of any type */
203				if(SIZE(b) & (BUSY|JUNK|PFREE) )
204					{ rv = -1; /**/ASSERT(0); }
205
206				/* next block must be busy and marked PFREE */
207				if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) )
208					{ rv = -1; /**/ASSERT(0); }
209
210				/* must have a self-reference pointer */
211				if(*SELF(b) != b)
212					{ rv = -1; /**/ASSERT(0); }
213
214				/* segment pointer should be well-defined */
215				if(!TINIEST(b) && SEG(b) != seg)
216					{ rv = -1; /**/ASSERT(0); }
217
218				/* must be on a free list */
219				if(b != freeb && !vmisfree(vd, b) )
220					{ rv = -1; /**/ASSERT(0); }
221			}
222			else
223			{	/* segment pointer should be well-defined */
224				if(SEG(b) != seg)
225					{ rv = -1; /**/ASSERT(0); }
226
227				/* next block should not be marked PFREE */
228				if(ISPFREE(SIZE(nextb)) )
229					{ rv = -1; /**/ASSERT(0); }
230
231				/* if PFREE, last block should be free */
232				if(ISPFREE(SIZE(b)) && LAST(b) != freeb &&
233				   !vmisfree(vd, LAST(b)) )
234					{ rv = -1; /**/ASSERT(0); }
235
236				/* if free but unreclaimed, should be junk */
237				if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b))
238					{ rv = -1; /**/ASSERT(0); }
239			}
240		}
241	}
242
243	return rv;
244}
245
246/* Tree rotation functions */
247#define RROTATE(x,y)	(LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
248#define LROTATE(x,y)	(RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
249#define RLINK(s,x)	((s) = LEFT(s) = (x))
250#define LLINK(s,x)	((s) = RIGHT(s) = (x))
251
252/* Find and delete a suitable element in the free tree. */
253#if __STD_C
254static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted)
255#else
256static Block_t* bestsearch(vd, size, wanted)
257Vmdata_t*	vd;
258reg size_t	size;
259Block_t*	wanted;
260#endif
261{
262	reg size_t	s;
263	reg Block_t	*t, *root, *l, *r;
264	Block_t		link;
265
266	/* extracting a tiniest block from its list */
267	if((root = wanted) && size == TINYSIZE)
268	{	reg Seg_t*	seg;
269
270		l = TLEFT(root);
271		if((r = LINK(root)) )
272			TLEFT(r) = l;
273		if(l)
274			LINK(l) = r;
275		else	TINY(vd)[0] = r;
276
277		seg = vd->seg;
278		if(!seg->next)
279			SEG(root) = seg;
280		else for(;; seg = seg->next)
281		{	if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr &&
282			   (Vmuchar_t*)root < seg->baddr)
283			{	SEG(root) = seg;
284				break;
285			}
286		}
287
288		return root;
289	}
290
291	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
292
293	/* find the right one to delete */
294	l = r = &link;
295	if((root = vd->root) ) do
296	{	/**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
297		if(size == (s = SIZE(root)) )
298			break;
299		if(size < s)
300		{	if((t = LEFT(root)) )
301			{	if(size <= (s = SIZE(t)) )
302				{	RROTATE(root,t);
303					if(size == s)
304						break;
305					t = LEFT(root);
306				}
307				else
308				{	LLINK(l,t);
309					t = RIGHT(t);
310				}
311			}
312			RLINK(r,root);
313		}
314		else
315		{	if((t = RIGHT(root)) )
316			{	if(size >= (s = SIZE(t)) )
317				{	LROTATE(root,t);
318					if(size == s)
319						break;
320					t = RIGHT(root);
321				}
322				else
323				{	RLINK(r,t);
324					t = LEFT(t);
325				}
326			}
327			LLINK(l,root);
328		}
329		/**/ ASSERT(root != t);
330	} while((root = t) );
331
332	if(root)	/* found it, now isolate it */
333	{	RIGHT(l) = LEFT(root);
334		LEFT(r) = RIGHT(root);
335	}
336	else		/* nothing exactly fit	*/
337	{	LEFT(r) = NIL(Block_t*);
338		RIGHT(l) = NIL(Block_t*);
339
340		/* grab the least one from the right tree */
341		if((root = LEFT(&link)) )
342		{	while((t = LEFT(root)) )
343				RROTATE(root,t);
344			LEFT(&link) = RIGHT(root);
345		}
346	}
347
348	if(root && (r = LINK(root)) )
349	{	/* head of a link list, use next one for root */
350		LEFT(r) = RIGHT(&link);
351		RIGHT(r) = LEFT(&link);
352	}
353	else if(!(r = LEFT(&link)) )
354		r = RIGHT(&link);
355	else /* graft left tree to right tree */
356	{	while((t = LEFT(r)) )
357			RROTATE(r,t);
358		LEFT(r) = RIGHT(&link);
359	}
360	vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r)));
361
362	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
363	/**/ASSERT(!wanted || wanted == root);
364
365	return root;
366}
367
368/* Reclaim all delayed free blocks into the free tree */
369#if __STD_C
370static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c)
371#else
372static int bestreclaim(vd, wanted, c)
373reg Vmdata_t*	vd;
374Block_t*	wanted;
375int		c;
376#endif
377{
378	reg size_t	size, s;
379	reg Block_t	*fp, *np, *t, *list;
380	reg int		n, saw_wanted;
381	reg Seg_t	*seg;
382
383	/**/COUNT(N_reclaim);
384	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
385
386	if((fp = vd->free) )
387	{	LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp;
388		vd->free = NIL(Block_t*);
389	}
390
391	saw_wanted = wanted ? 0 : 1;
392	for(n = S_CACHE; n >= c; --n)
393	{	list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*);
394		while((fp = list) )
395		{	/* Note that below here we allow ISJUNK blocks to be
396			** forward-merged even though they are not removed from
397			** the list immediately. In this way, the list is
398			** scanned only once. It works because the LINK and SIZE
399			** fields are not destroyed during the merging. This can
400			** be seen by observing that a tiniest block has a 2-word
401			** header and a 2-word body. Merging a tiniest block
402			** (1seg) and the next block (2seg) looks like this:
403			**	1seg  size  link  left  2seg size link left ....
404			**	1seg  size  link  left  rite xxxx xxxx .... self
405			** After the merge, the 2seg word is replaced by the RIGHT
406			** pointer of the new block and somewhere beyond the
407			** two xxxx fields, the SELF pointer will replace some
408			** other word. The important part is that the two xxxx
409			** fields are kept intact.
410			*/
411			list = LINK(list); /**/ASSERT(!vmonlist(list,fp));
412
413			size = SIZE(fp);
414			if(!ISJUNK(size))	/* already done */
415				continue;
416
417			if(_Vmassert & VM_region)
418			{	/* see if this address is from region */
419				for(seg = vd->seg; seg; seg = seg->next)
420					if(fp >= SEGBLOCK(seg) && fp < (Block_t*)seg->baddr )
421						break;
422				if(!seg) /* must be a bug in application code! */
423				{	/**/ ASSERT(seg != NIL(Seg_t*));
424					continue;
425				}
426			}
427
428			if(ISPFREE(size))	/* backward merge */
429			{	fp = LAST(fp);
430#if _BLD_posix
431				if (fp < (Block_t*)0x00120000)
432				{
433					logmsg(0, "bestreclaim fp=%p", fp);
434					ASSERT(!fp);
435				}
436#endif
437				s = SIZE(fp); /**/ASSERT(!(s&BITS));
438				REMOVE(vd,fp,INDEX(s),t,bestsearch);
439				size = (size&~BITS) + s + sizeof(Head_t);
440			}
441			else	size &= ~BITS;
442
443			for(;;)	/* forward merge */
444			{	np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t));
445#if _BLD_posix
446				if (np < (Block_t*)0x00120000)
447				{
448					logmsg(0, "bestreclaim np=%p", np);
449					ASSERT(!np);
450				}
451#endif
452				s = SIZE(np);	/**/ASSERT(s > 0);
453				if(!ISBUSY(s))
454				{	/**/ASSERT((s&BITS) == 0);
455					if(np == vd->wild)
456						vd->wild = NIL(Block_t*);
457					else	REMOVE(vd,np,INDEX(s),t,bestsearch);
458				}
459				else if(ISJUNK(s))
460				{	/* reclaim any touched junk list */
461					if((int)C_INDEX(s) < c)
462						c = C_INDEX(s);
463					SIZE(np) = 0;
464					CLRBITS(s);
465				}
466				else	break;
467				size += s + sizeof(Head_t);
468			}
469			SIZE(fp) = size;
470
471			/* tell next block that this one is free */
472			np = NEXT(fp);	/**/ASSERT(ISBUSY(SIZE(np)));
473					/**/ASSERT(!ISJUNK(SIZE(np)));
474			SETPFREE(SIZE(np));
475			*(SELF(fp)) = fp;
476
477			if(fp == wanted) /* to be consumed soon */
478			{	/**/ASSERT(!saw_wanted); /* should be seen just once */
479				saw_wanted = 1;
480				continue;
481			}
482
483			/* wilderness preservation */
484			if(np->body.data >= vd->seg->baddr)
485			{	vd->wild = fp;
486				continue;
487			}
488
489			/* tiny block goes to tiny list */
490			if(size < MAXTINY)
491			{	s = INDEX(size);
492				np = LINK(fp) = TINY(vd)[s];
493				if(s == 0)	/* TINIEST block */
494				{	if(np)
495						TLEFT(np) = fp;
496					TLEFT(fp) = NIL(Block_t*);
497				}
498				else
499				{	if(np)
500						LEFT(np)  = fp;
501					LEFT(fp) = NIL(Block_t*);
502					SETLINK(fp);
503				}
504				TINY(vd)[s] = fp;
505				continue;
506			}
507
508			LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*);
509			if(!(np = vd->root) )	/* inserting into an empty tree	*/
510			{	vd->root = fp;
511				continue;
512			}
513
514			size = SIZE(fp);
515			while(1)	/* leaf insertion */
516			{	/**/ASSERT(np != fp);
517				if((s = SIZE(np)) > size)
518				{	if((t = LEFT(np)) )
519					{	/**/ ASSERT(np != t);
520						np = t;
521					}
522					else
523					{	LEFT(np) = fp;
524						break;
525					}
526				}
527				else if(s < size)
528				{	if((t = RIGHT(np)) )
529					{	/**/ ASSERT(np != t);
530						np = t;
531					}
532					else
533					{	RIGHT(np) = fp;
534						break;
535					}
536				}
537				else /* s == size */
538				{	if((t = LINK(np)) )
539					{	LINK(fp) = t;
540						LEFT(t) = fp;
541					}
542					LINK(np) = fp;
543					LEFT(fp) = np;
544					SETLINK(fp);
545					break;
546				}
547			}
548		}
549	}
550
551	/**/ASSERT(!wanted || saw_wanted == 1);
552	/**/ASSERT(_vmbestcheck(vd, wanted) == 0);
553	return saw_wanted;
554}
555
556#if __STD_C
557static int bestcompact(Vmalloc_t* vm)
558#else
559static int bestcompact(vm)
560Vmalloc_t*	vm;
561#endif
562{
563	reg Seg_t	*seg, *next;
564	reg Block_t	*bp, *t;
565	reg size_t	size, segsize, round;
566	reg int		local, inuse;
567	reg Vmdata_t*	vd = vm->data;
568
569	SETINUSE(vd, inuse);
570
571	if(!(local = vd->mode&VM_TRUST) )
572	{	GETLOCAL(vd,local);
573		if(ISLOCK(vd,local))
574		{	CLRINUSE(vd, inuse);
575			return -1;
576		}
577		SETLOCK(vd,local);
578	}
579
580	bestreclaim(vd,NIL(Block_t*),0);
581
582	for(seg = vd->seg; seg; seg = next)
583	{	next = seg->next;
584
585		bp = BLOCK(seg->baddr);
586		if(!ISPFREE(SIZE(bp)) )
587			continue;
588
589		bp = LAST(bp);	/**/ASSERT(vmisfree(vd,bp));
590		size = SIZE(bp);
591		if(bp == vd->wild)
592		{	/* During large block allocations, _Vmextend might
593			** have been enlarged the rounding factor. Reducing
594			** it a bit help avoiding getting large raw memory.
595			*/
596			if((round = vm->disc->round) == 0)
597				round = _Vmpagesize;
598			if(size > COMPACT*vd->incr && vd->incr > round)
599				vd->incr /= 2;
600
601			/* for the bottom segment, we don't necessarily want
602			** to return raw memory too early. vd->pool has an
603			** approximation of the average size of recently freed
604			** blocks. If this is large, the application is managing
605			** large blocks so we throttle back memory chopping
606			** to avoid thrashing the underlying memory system.
607			*/
608			if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool)
609				continue;
610
611			vd->wild = NIL(Block_t*);
612			vd->pool = 0;
613		}
614		else	REMOVE(vd,bp,INDEX(size),t,bestsearch);
615		CLRPFREE(SIZE(NEXT(bp)));
616
617		if(size < (segsize = seg->size))
618			size += sizeof(Head_t);
619
620		if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0)
621		{	if(size >= segsize) /* entire segment deleted */
622				continue;
623			/**/ASSERT(SEG(BLOCK(seg->baddr)) == seg);
624
625			if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0)
626				SIZE(bp) = size - sizeof(Head_t);
627			else	bp = NIL(Block_t*);
628		}
629
630		if(bp)
631		{	/**/ ASSERT(SIZE(bp) >= BODYSIZE);
632			/**/ ASSERT(SEGWILD(bp));
633			/**/ ASSERT(!vd->root || !vmintree(vd->root,bp));
634			SIZE(bp) |= BUSY|JUNK;
635			LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
636			CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
637		}
638	}
639
640	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
641		(*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
642
643	CLRLOCK(vd,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
644
645	CLRINUSE(vd, inuse);
646	return 0;
647}
648
649#if __STD_C
650static Void_t* bestalloc(Vmalloc_t* vm, reg size_t size )
651#else
652static Void_t* bestalloc(vm,size)
653Vmalloc_t*	vm;	/* region allocating from	*/
654reg size_t	size;	/* desired block size		*/
655#endif
656{
657	reg Vmdata_t*	vd = vm->data;
658	reg size_t	s;
659	reg int		n;
660	reg Block_t	*tp, *np;
661	reg int		local, inuse;
662	size_t		orgsize = 0;
663
664	VMOPTIONS();
665
666	/**/COUNT(N_alloc);
667
668	SETINUSE(vd, inuse);
669
670	if(!(local = vd->mode&VM_TRUST))
671	{	GETLOCAL(vd,local);	/**/ASSERT(!ISLOCK(vd,local));
672		if(ISLOCK(vd,local) )
673		{	CLRINUSE(vd, inuse);
674			return NIL(Void_t*);
675		}
676		SETLOCK(vd,local);
677		orgsize = size;
678	}
679
680	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
681	/**/ ASSERT(HEADSIZE == sizeof(Head_t));
682	/**/ ASSERT(BODYSIZE == sizeof(Body_t));
683	/**/ ASSERT((ALIGN%(BITS+1)) == 0 );
684	/**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 );
685	/**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 );
686	/**/ ASSERT((BODYSIZE%ALIGN) == 0 );
687	/**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) );
688
689	/* for ANSI requirement that malloc(0) returns non-NULL pointer */
690	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
691
692	if((tp = vd->free) )	/* reuse last free piece if appropriate */
693	{	/**/ASSERT(ISBUSY(SIZE(tp)) );
694		/**/ASSERT(ISJUNK(SIZE(tp)) );
695		/**/COUNT(N_last);
696
697		vd->free = NIL(Block_t*);
698		if((s = SIZE(tp)) >= size && s < (size << 1) )
699		{	if(s >= size + (sizeof(Head_t)+BODYSIZE) )
700			{	SIZE(tp) = size;
701				np = NEXT(tp);
702				SEG(np) = SEG(tp);
703				SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY;
704				vd->free = np;
705				SIZE(tp) |= s&BITS;
706			}
707			CLRJUNK(SIZE(tp));
708			goto done;
709		}
710
711		LINK(tp) = CACHE(vd)[S_CACHE];
712		CACHE(vd)[S_CACHE] = tp;
713	}
714
715	for(;;)
716	{	for(n = S_CACHE; n >= 0; --n)	/* best-fit except for coalescing */
717		{	bestreclaim(vd,NIL(Block_t*),n);
718			if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) )
719				goto got_block;
720		}
721
722		/**/ASSERT(!vd->free);
723		if((tp = vd->wild) && SIZE(tp) >= size)
724		{	/**/COUNT(N_wild);
725			vd->wild = NIL(Block_t*);
726			goto got_block;
727		}
728
729		KPVCOMPACT(vm,bestcompact);
730		if((tp = (*_Vmextend)(vm,size,bestsearch)) )
731			goto got_block;
732		else if(vd->mode&VM_AGAIN)
733			vd->mode &= ~VM_AGAIN;
734		else
735		{	CLRLOCK(vd,local);
736			CLRINUSE(vd, inuse);
737			return NIL(Void_t*);
738		}
739	}
740
741got_block:
742	/**/ ASSERT(!ISBITS(SIZE(tp)));
743	/**/ ASSERT(SIZE(tp) >= size);
744	/**/ ASSERT((SIZE(tp)%ALIGN) == 0);
745	/**/ ASSERT(!vd->free);
746
747	/* tell next block that we are no longer a free block */
748	CLRPFREE(SIZE(NEXT(tp)));	/**/ ASSERT(ISBUSY(SIZE(NEXT(tp))));
749
750	if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) )
751	{	SIZE(tp) = size;
752
753		np = NEXT(tp);
754		SEG(np) = SEG(tp);
755		SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK;
756
757		if(VMWILD(vd,np))
758		{	SIZE(np) &= ~BITS;
759			*SELF(np) = np; /**/ASSERT(ISBUSY(SIZE(NEXT(np))));
760			SETPFREE(SIZE(NEXT(np)));
761			vd->wild = np;
762		}
763		else	vd->free = np;
764	}
765
766	SETBUSY(SIZE(tp));
767
768done:
769	if(!local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST)
770		(*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0);
771
772	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
773	CLRLOCK(vd,local);
774	ANNOUNCE(local, vm, VM_ALLOC, DATA(tp), vm->disc);
775
776	CLRINUSE(vd, inuse);
777	return DATA(tp);
778}
779
780#if __STD_C
781static long bestaddr(Vmalloc_t* vm, Void_t* addr )
782#else
783static long bestaddr(vm, addr)
784Vmalloc_t*	vm;	/* region allocating from	*/
785Void_t*		addr;	/* address to check		*/
786#endif
787{
788	reg Seg_t*	seg;
789	reg Block_t	*b, *endb;
790	reg long	offset;
791	reg Vmdata_t*	vd = vm->data;
792	reg int		local, inuse;
793
794	SETINUSE(vd, inuse);
795
796	if(!(local = vd->mode&VM_TRUST) )
797	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
798		if(ISLOCK(vd,local))
799		{	CLRINUSE(vd, inuse);
800			return -1L;
801		}
802		SETLOCK(vd,local);
803	}
804
805	offset = -1L; b = endb = NIL(Block_t*);
806	for(seg = vd->seg; seg; seg = seg->next)
807	{	b = SEGBLOCK(seg);
808		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
809		if((Vmuchar_t*)addr > (Vmuchar_t*)b &&
810		   (Vmuchar_t*)addr < (Vmuchar_t*)endb)
811			break;
812	}
813
814	if(local && !(vd->mode&VM_TRUST) ) /* from bestfree or bestresize */
815	{	b = BLOCK(addr);
816		if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) )
817			offset = 0;
818		if(offset != 0 && vm->disc->exceptf)
819			(void)(*vm->disc->exceptf)(vm,VM_BADADDR,addr,vm->disc);
820	}
821	else if(seg)
822	{	while(b < endb)
823		{	reg Vmuchar_t*	data = (Vmuchar_t*)DATA(b);
824			reg size_t	size = SIZE(b)&~BITS;
825
826			if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size)
827			{	if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
828					offset = -1L;
829				else	offset = (Vmuchar_t*)addr - data;
830				goto done;
831			}
832
833			b = (Block_t*)((Vmuchar_t*)DATA(b) + size);
834		}
835	}
836
837done:
838	CLRLOCK(vd,local);
839	CLRINUSE(vd, inuse);
840	return offset;
841}
842
843#if __STD_C
844static int bestfree(Vmalloc_t* vm, Void_t* data )
845#else
846static int bestfree(vm, data )
847Vmalloc_t*	vm;
848Void_t*		data;
849#endif
850{
851	reg Vmdata_t*	vd = vm->data;
852	reg Block_t	*bp;
853	reg size_t	s;
854	reg int		local, inuse;
855
856#ifdef DEBUG
857	if((local = (int)integralof(data)) >= 0 && local <= 0xf)
858	{	int	vmassert = _Vmassert;
859		_Vmassert = local ? local : vmassert ? vmassert : (VM_check|VM_abort);
860		_vmbestcheck(vd, NIL(Block_t*));
861		_Vmassert = local ? local : vmassert;
862		return 0;
863	}
864#endif
865
866	if(!data) /* ANSI-ism */
867		return 0;
868
869	/**/COUNT(N_free);
870
871	SETINUSE(vd, inuse);
872
873	if(!(local = vd->mode&VM_TRUST) )
874	{	GETLOCAL(vd,local);	/**/ASSERT(!ISLOCK(vd,local));
875		if(ISLOCK(vd,local) || KPVADDR(vm,data,bestaddr) != 0 )
876		{	CLRINUSE(vd, inuse);
877			return -1;
878		}
879		SETLOCK(vd,local);
880	}
881
882	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
883	bp = BLOCK(data); s = SIZE(bp);
884
885	/* Keep an approximate average free block size.
886	** This is used in bestcompact() to decide when to release
887	** raw memory back to the underlying memory system.
888	*/
889	vd->pool = (vd->pool + (s&~BITS))/2;
890
891	if(ISBUSY(s) && !ISJUNK(s))
892	{	SETJUNK(SIZE(bp));
893	        if(s < MAXCACHE)
894	        {       /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) );
895	                LINK(bp) = CACHE(vd)[INDEX(s)];
896	                CACHE(vd)[INDEX(s)] = bp;
897	        }
898	        else if(!vd->free)
899	                vd->free = bp;
900	        else
901	        {       /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) );
902	                LINK(bp) = CACHE(vd)[S_CACHE];
903	                CACHE(vd)[S_CACHE] = bp;
904	        }
905
906		/* coalesce on freeing large blocks to avoid fragmentation */
907		if(SIZE(bp) >= 2*vd->incr)
908		{	bestreclaim(vd,NIL(Block_t*),0);
909			if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr)
910				KPVCOMPACT(vm,bestcompact);
911		}
912	}
913
914	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST )
915		(*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0);
916
917	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
918	CLRLOCK(vd,local);
919	ANNOUNCE(local, vm, VM_FREE, data, vm->disc);
920
921	CLRINUSE(vd, inuse);
922	return 0;
923}
924
925#if __STD_C
926static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type)
927#else
928static Void_t* bestresize(vm,data,size,type)
929Vmalloc_t*	vm;		/* region allocating from	*/
930Void_t*		data;		/* old block of data		*/
931reg size_t	size;		/* new size			*/
932int		type;		/* !=0 to move, <0 for not copy */
933#endif
934{
935	reg Block_t	*rp, *np, *t;
936	int		local, inuse;
937	size_t		s, bs, oldsize = 0, orgsize = 0;
938	Void_t		*oldd, *orgdata = NIL(Void_t*);
939	Vmdata_t	*vd = vm->data;
940
941	/**/COUNT(N_resize);
942
943	SETINUSE(vd, inuse);
944
945	if(!data)
946	{	if((data = bestalloc(vm,size)) )
947		{	oldsize = 0;
948			size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
949		}
950		goto done;
951	}
952	if(size == 0)
953	{	(void)bestfree(vm,data);
954		CLRINUSE(vd, inuse);
955		return NIL(Void_t*);
956	}
957
958	if(!(local = vd->mode&VM_TRUST) )
959	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
960		if(ISLOCK(vd,local) || (!local && KPVADDR(vm,data,bestaddr) != 0 ) )
961		{	CLRINUSE(vd, inuse);
962			return NIL(Void_t*);
963		}
964		SETLOCK(vd,local);
965
966		orgdata = data;	/* for tracing */
967		orgsize = size;
968	}
969
970	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
971	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
972	rp = BLOCK(data);	/**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
973	oldsize = SIZE(rp); CLRBITS(oldsize);
974	if(oldsize < size)
975	{	np = (Block_t*)((Vmuchar_t*)rp + oldsize + sizeof(Head_t));
976		do	/* forward merge as much as possible */
977		{	s = SIZE(np); /**/ASSERT(!ISPFREE(s));
978			if(np == vd->free)
979			{	vd->free = NIL(Block_t*);
980				CLRBITS(s);
981			}
982			else if(ISJUNK(s) )
983			{
984				if(!bestreclaim(vd,np,C_INDEX(s)) )
985					/**/ASSERT(0); /* oops: did not see np! */
986				s = SIZE(np); /**/ASSERT(s%ALIGN == 0);
987			}
988			else if(!ISBUSY(s) )
989			{	if(np == vd->wild)
990					vd->wild = NIL(Block_t*);
991				else	REMOVE(vd,np,INDEX(s),t,bestsearch);
992			}
993			else	break;
994
995			SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0);
996			np = (Block_t*)((Vmuchar_t*)np + s);
997			CLRPFREE(SIZE(np));
998		} while(SIZE(rp) < size);
999
1000		if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) )
1001		{	reg Seg_t*	seg;
1002
1003			s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr);
1004			seg = SEG(rp);
1005			if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s,
1006				      vm->disc) == seg->addr )
1007			{	SIZE(rp) += s;
1008				seg->extent += s;
1009				seg->size += s;
1010				seg->baddr += s;
1011				s  = (SIZE(rp)&~BITS) + sizeof(Head_t);
1012				np = (Block_t*)((Vmuchar_t*)rp + s);
1013				SEG(np) = seg;
1014				SIZE(np) = BUSY;
1015			}
1016		}
1017	}
1018
1019	if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) )
1020	{	SIZE(rp) = size;
1021		np = NEXT(rp);
1022		SEG(np) = SEG(rp);
1023		SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK;
1024		CPYBITS(SIZE(rp),s);
1025		rp = np;
1026		goto do_free;
1027	}
1028	else if((bs = s&~BITS) < size)
1029	{	if(!(type&(VM_RSMOVE|VM_RSCOPY)) )
1030			data = NIL(Void_t*); /* old data is not moveable */
1031		else
1032		{	oldd = data;
1033			if((data = KPVALLOC(vm,size,bestalloc)) )
1034			{	if(type&VM_RSCOPY)
1035					memcpy(data, oldd, bs);
1036
1037			do_free: /* reclaim these right away */
1038				SETJUNK(SIZE(rp));
1039				LINK(rp) = CACHE(vd)[S_CACHE];
1040				CACHE(vd)[S_CACHE] = rp;
1041				bestreclaim(vd, NIL(Block_t*), S_CACHE);
1042			}
1043		}
1044	}
1045
1046	if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
1047		(*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0);
1048
1049	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1050	CLRLOCK(vd,local);
1051	ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc);
1052
1053done:	if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldsize )
1054		memset((Void_t*)((Vmuchar_t*)data + oldsize), 0, size-oldsize);
1055
1056	CLRINUSE(vd, inuse);
1057	return data;
1058}
1059
1060#if __STD_C
1061static long bestsize(Vmalloc_t* vm, Void_t* addr )
1062#else
1063static long bestsize(vm, addr)
1064Vmalloc_t*	vm;	/* region allocating from	*/
1065Void_t*		addr;	/* address to check		*/
1066#endif
1067{
1068	reg Seg_t*	seg;
1069	reg Block_t	*b, *endb;
1070	reg long	size;
1071	reg Vmdata_t*	vd = vm->data;
1072	reg int		inuse;
1073
1074	SETINUSE(vd, inuse);
1075
1076	if(!(vd->mode&VM_TRUST) )
1077	{	if(ISLOCK(vd,0))
1078		{	CLRINUSE(vd, inuse);
1079			return -1L;
1080		}
1081		SETLOCK(vd,0);
1082	}
1083
1084	size = -1L;
1085	for(seg = vd->seg; seg; seg = seg->next)
1086	{	b = SEGBLOCK(seg);
1087		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
1088		if((Vmuchar_t*)addr <= (Vmuchar_t*)b ||
1089		   (Vmuchar_t*)addr >= (Vmuchar_t*)endb)
1090			continue;
1091		while(b < endb)
1092		{	if(addr == DATA(b))
1093			{	if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) )
1094					size = -1L;
1095				else	size = (long)SIZE(b)&~BITS;
1096				goto done;
1097			}
1098			else if((Vmuchar_t*)addr <= (Vmuchar_t*)b)
1099				break;
1100
1101			b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
1102		}
1103	}
1104
1105done:
1106	CLRLOCK(vd,0);
1107	CLRINUSE(vd, inuse);
1108	return size;
1109}
1110
1111#if __STD_C
1112static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align)
1113#else
1114static Void_t* bestalign(vm, size, align)
1115Vmalloc_t*	vm;
1116size_t		size;
1117size_t		align;
1118#endif
1119{
1120	reg Vmuchar_t	*data;
1121	reg Block_t	*tp, *np;
1122	reg Seg_t*	seg;
1123	reg int		local, inuse;
1124	reg size_t	s, extra, orgsize = 0, orgalign = 0;
1125	reg Vmdata_t*	vd = vm->data;
1126
1127	if(size <= 0 || align <= 0)
1128		return NIL(Void_t*);
1129
1130	SETINUSE(vd, inuse);
1131
1132	if(!(local = vd->mode&VM_TRUST) )
1133	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
1134		if(ISLOCK(vd,local) )
1135		{	CLRINUSE(vd, inuse);
1136			return NIL(Void_t*);
1137		}
1138		SETLOCK(vd,local);
1139		orgsize = size;
1140		orgalign = align;
1141	}
1142
1143	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1144	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
1145	align = MULTIPLE(align,ALIGN);
1146
1147	/* hack so that dbalign() can store header data */
1148	if(VMETHOD(vd) != VM_MTDEBUG)
1149		extra = 0;
1150	else
1151	{	extra = DB_HEAD;
1152		while(align < extra || (align - extra) < sizeof(Block_t))
1153			align *= 2;
1154	}
1155
1156	/* reclaim all free blocks now to avoid fragmentation */
1157	bestreclaim(vd,NIL(Block_t*),0);
1158
1159	s = size + 2*(align+sizeof(Head_t)+extra);
1160	if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) )
1161		goto done;
1162
1163	tp = BLOCK(data);
1164	seg = SEG(tp);
1165
1166	/* get an aligned address that we can live with */
1167	if((s = (size_t)((VLONG(data)+extra)%align)) != 0)
1168		data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1169
1170	if((np = BLOCK(data)) != tp ) /* need to free left part */
1171	{	if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) )
1172		{	data += align;
1173			np = BLOCK(data);
1174		} /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1175
1176		s  = (Vmuchar_t*)np - (Vmuchar_t*)tp;
1177		SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY;
1178		SEG(np) = seg;
1179
1180		SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK;
1181		/**/ ASSERT(SIZE(tp) >= sizeof(Body_t) );
1182		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1183		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1184	}
1185
1186	/* free left-over if too big */
1187	if((s = SIZE(np) - size) >= sizeof(Block_t))
1188	{	SIZE(np) = size;
1189
1190		tp = NEXT(np);
1191		SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK;
1192		SEG(tp) = seg;
1193		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1194		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1195
1196		SIZE(np) |= s&BITS;
1197	}
1198
1199	bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */
1200
1201	if(!local && !(vd->mode&VM_TRUST) && _Vmtrace && (vd->mode&VM_TRACE) )
1202		(*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign);
1203
1204done:
1205	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1206	CLRLOCK(vd,local);
1207	ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)data, vm->disc);
1208
1209	CLRINUSE(vd, inuse);
1210	return (Void_t*)data;
1211}
1212
1213
1214#if _mem_win32
1215#if _PACKAGE_ast
1216#include		<ast_windows.h>
1217#else
1218#include		<windows.h>
1219#endif
1220#endif /* _lib_win32 */
1221
1222#if _mem_mmap_anon
1223#include		<sys/mman.h>
1224#ifndef MAP_ANON
1225#define	MAP_ANON	MAP_ANONYMOUS
1226#endif
1227#endif /* _mem_mmap_anon */
1228
1229#if _mem_mmap_zero
1230#include		<sys/fcntl.h>
1231#include		<sys/mman.h>
1232typedef struct _mmapdisc_s
1233{	Vmdisc_t	disc;
1234	int		fd;
1235	off_t		offset;
1236} Mmapdisc_t;
1237
1238#ifndef OPEN_MAX
1239#define	OPEN_MAX	64
1240#endif
1241#define OPEN_PRIVATE	(3*OPEN_MAX/4)
1242#endif /* _mem_mmap_zero */
1243
1244/* failure mode of mmap, sbrk and brk */
1245#ifndef MAP_FAILED
1246#define MAP_FAILED	((Void_t*)(-1))
1247#endif
1248#define BRK_FAILED	((Void_t*)(-1))
1249
1250/* make sure that allocated memory are addressable */
1251
1252#if _PACKAGE_ast
1253#include	<sig.h>
1254#else
1255#include	<signal.h>
1256typedef void	(*Sig_handler_t)(int);
1257#endif
1258
1259static int	Gotsegv = 0;
1260
1261#if __STD_C
1262static void sigsegv(int sig)
1263#else
1264static void sigsegv(sig)
1265int	sig;
1266#endif
1267{
1268	if(sig == SIGSEGV)
1269		Gotsegv = 1;
1270}
1271
1272#if __STD_C
1273static int okaddr(Void_t* addr, size_t nsize)
1274#else
1275static int okaddr(addr, nsize)
1276Void_t*	addr;
1277size_t	nsize;
1278#endif
1279{
1280	Sig_handler_t	segv;
1281	int		rv;
1282
1283	Gotsegv = 0; /* catch segment fault */
1284	segv = signal(SIGSEGV, sigsegv);
1285
1286	if(Gotsegv == 0)
1287		rv = *((char*)addr);
1288	if(Gotsegv == 0)
1289		rv += *(((char*)addr)+nsize-1);
1290	if(Gotsegv == 0)
1291		rv = rv == 0 ? 0 : 1;
1292	else	rv = -1;
1293
1294	signal(SIGSEGV, segv); /* restore signal catcher */
1295	Gotsegv = 0;
1296
1297	return rv;
1298}
1299
1300/* A discipline to get raw memory using sbrk/VirtualAlloc/mmap */
1301#if __STD_C
1302static Void_t* sbrkmem(Vmalloc_t* vm, Void_t* caddr,
1303			size_t csize, size_t nsize, Vmdisc_t* disc)
1304#else
1305static Void_t* sbrkmem(vm, caddr, csize, nsize, disc)
1306Vmalloc_t*	vm;	/* region doing allocation from		*/
1307Void_t*		caddr;	/* current address			*/
1308size_t		csize;	/* current size				*/
1309size_t		nsize;	/* new size				*/
1310Vmdisc_t*	disc;	/* discipline structure			*/
1311#endif
1312{
1313#undef _done_sbrkmem
1314
1315#if !defined(_done_sbrkmem) && defined(_mem_win32)
1316#define _done_sbrkmem	1
1317	NOTUSED(vm);
1318	NOTUSED(disc);
1319	if(csize == 0)
1320		return (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE);
1321	else if(nsize == 0)
1322		return VirtualFree((LPVOID)caddr,0,MEM_RELEASE) ? caddr : NIL(Void_t*);
1323	else	return NIL(Void_t*);
1324#endif /* MUST_WIN32 */
1325
1326#if !defined(_done_sbrkmem) && (_mem_sbrk || _mem_mmap_zero || _mem_mmap_anon)
1327#define _done_sbrkmem	1
1328	Vmuchar_t	*addr;
1329#if _mem_mmap_zero
1330	Mmapdisc_t	*mmdc = (Mmapdisc_t*)disc;
1331#else
1332	NOTUSED(disc);
1333#endif
1334	NOTUSED(vm);
1335
1336	if(csize == 0) /* allocating new memory */
1337	{
1338
1339#if _mem_sbrk	/* try using sbrk() and brk() */
1340		if(!(_Vmassert & VM_mmap))
1341		{
1342			addr = (Vmuchar_t*)sbrk(0); /* old break value */
1343			if(addr && addr != (Vmuchar_t*)BRK_FAILED )
1344			{
1345				if((addr+nsize) < addr)
1346					return NIL(Void_t*);
1347				if(brk(addr+nsize) == 0 )
1348				{	if(okaddr(addr,nsize) >= 0)
1349						return addr;
1350					(void)brk(addr); /* release reserved address */
1351				}
1352			}
1353		}
1354#endif /* _mem_sbrk */
1355
1356#if _mem_mmap_anon /* anonymous mmap */
1357		addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1358                                        MAP_ANON|MAP_PRIVATE, -1, 0);
1359		if(addr && addr != (Vmuchar_t*)MAP_FAILED)
1360		{	if(okaddr(addr,nsize) >= 0)
1361				return addr;
1362			(void)munmap((char*)addr, nsize); /* release reserved address */
1363		}
1364#endif /* _mem_mmap_anon */
1365
1366#if _mem_mmap_zero /* mmap from /dev/zero */
1367		if(mmdc->fd < 0)
1368		{	int	fd;
1369			if(mmdc->fd != -1)
1370				return NIL(Void_t*);
1371			if((fd = open("/dev/zero", O_RDONLY)) < 0 )
1372			{	mmdc->fd = -2;
1373				return NIL(Void_t*);
1374			}
1375			if(fd >= OPEN_PRIVATE || (mmdc->fd = dup2(fd,OPEN_PRIVATE)) < 0 )
1376				mmdc->fd = fd;
1377			else	close(fd);
1378#ifdef FD_CLOEXEC
1379			fcntl(mmdc->fd, F_SETFD, FD_CLOEXEC);
1380#endif
1381		}
1382		addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1383					MAP_PRIVATE, mmdc->fd, mmdc->offset);
1384		if(addr && addr != (Vmuchar_t*)MAP_FAILED)
1385		{	if(okaddr(addr, nsize) >= 0)
1386			{	mmdc->offset += nsize;
1387				return addr;
1388			}
1389			(void)munmap((char*)addr, nsize); /* release reserved address */
1390		}
1391#endif /* _mem_mmap_zero */
1392
1393		return NIL(Void_t*);
1394	}
1395	else
1396	{
1397
1398#if _mem_sbrk
1399		addr = (Vmuchar_t*)sbrk(0);
1400		if(!addr || addr == (Vmuchar_t*)BRK_FAILED)
1401			addr = caddr;
1402		else if(((Vmuchar_t*)caddr+csize) == addr) /* in sbrk-space */
1403		{	if(nsize <= csize)
1404				addr -= csize-nsize;
1405			else if((addr += nsize-csize) < (Vmuchar_t*)caddr)
1406				return NIL(Void_t*); /* wrapped around address */
1407			else	return brk(addr) == 0 ? caddr : NIL(Void_t*);
1408		}
1409#else
1410		addr = caddr;
1411#endif /* _mem_sbrk */
1412
1413#if _mem_mmap_zero || _mem_mmap_anon
1414		if(((Vmuchar_t*)caddr+csize) > addr) /* in mmap-space */
1415			if(nsize == 0 && munmap(caddr,csize) == 0)
1416				return caddr;
1417#endif /* _mem_mmap_zero || _mem_mmap_anon */
1418
1419		return NIL(Void_t*);
1420	}
1421#endif /*_done_sbrkmem*/
1422
1423#if !_done_sbrkmem /* use native malloc/free as a last resort */
1424	/**/ASSERT(_std_malloc); /* _std_malloc should be well-defined */
1425	NOTUSED(vm);
1426	NOTUSED(disc);
1427	if(csize == 0)
1428		return (Void_t*)malloc(nsize);
1429	else if(nsize == 0)
1430	{	free(caddr);
1431		return caddr;
1432	}
1433	else	return NIL(Void_t*);
1434#endif /* _done_sbrkmem */
1435}
1436
1437#if _mem_mmap_zero
1438static Mmapdisc_t _Vmdcsbrk = { { sbrkmem, NIL(Vmexcept_f), 64*1024 }, -1, 0 };
1439#else
1440static Vmdisc_t _Vmdcsbrk = { sbrkmem, NIL(Vmexcept_f), 0 };
1441#endif
1442
1443static Vmethod_t _Vmbest =
1444{
1445	bestalloc,
1446	bestresize,
1447	bestfree,
1448	bestaddr,
1449	bestsize,
1450	bestcompact,
1451	bestalign,
1452	VM_MTBEST
1453};
1454
1455/* The heap region */
1456static Vmdata_t	_Vmdata =
1457{
1458	VM_MTBEST|VM_TRUST,		/* mode		*/
1459	0,				/* incr		*/
1460	0,				/* pool		*/
1461	NIL(Seg_t*),			/* seg		*/
1462	NIL(Block_t*),			/* free		*/
1463	NIL(Block_t*),			/* wild		*/
1464	NIL(Block_t*),			/* root		*/
1465};
1466Vmalloc_t _Vmheap =
1467{
1468	{ bestalloc,
1469	  bestresize,
1470	  bestfree,
1471	  bestaddr,
1472	  bestsize,
1473	  bestcompact,
1474	  bestalign,
1475	  VM_MTBEST
1476	},
1477	NIL(char*),			/* file		*/
1478	0,				/* line		*/
1479	0,				/* func		*/
1480	(Vmdisc_t*)(&_Vmdcsbrk),	/* disc		*/
1481	&_Vmdata,			/* data		*/
1482	NIL(Vmalloc_t*)			/* next		*/
1483};
1484
1485__DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap);
1486__DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap);
1487__DEFINE__(Vmethod_t*, Vmbest, &_Vmbest);
1488__DEFINE__(Vmdisc_t*,  Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsbrk) );
1489
1490#ifdef NoF
1491NoF(vmbest)
1492#endif
1493
1494#endif
1495