vmbest.c revision 34f9b3eef6fdadbda0a846aa4d68691ac40eace5
1/***********************************************************************
2*                                                                      *
3*               This software is part of the ast package               *
4*          Copyright (c) 1985-2009 AT&T Intellectual Property          *
5*                      and is licensed under the                       *
6*                  Common Public License, Version 1.0                  *
7*                    by AT&T Intellectual Property                     *
8*                                                                      *
9*                A copy of the License is available at                 *
10*            http://www.opensource.org/licenses/cpl1.0.txt             *
11*         (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9)         *
12*                                                                      *
13*              Information and Software Systems Research               *
14*                            AT&T Research                             *
15*                           Florham Park NJ                            *
16*                                                                      *
17*                 Glenn Fowler <gsf@research.att.com>                  *
18*                  David Korn <dgk@research.att.com>                   *
19*                   Phong Vo <kpv@research.att.com>                    *
20*                                                                      *
21***********************************************************************/
22#if defined(_UWIN) && defined(_BLD_ast)
23
24void _STUB_vmbest(){}
25
26#else
27
28#include	"vmhdr.h"
29
30/*	Best-fit allocation method. This is based on a best-fit strategy
31**	using a splay tree for storage of lists of free blocks of the same
32**	size. Recent free blocks may be cached for fast reuse.
33**
34**	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
35*/
36
37#ifdef DEBUG
38static int	N_free;		/* # of free calls			*/
39static int	N_alloc;	/* # of alloc calls			*/
40static int	N_resize;	/* # of resize calls			*/
41static int	N_wild;		/* # allocated from the wild block	*/
42static int	N_last;		/* # allocated from last free block	*/
43static int	N_reclaim;	/* # of bestreclaim calls		*/
44
45#undef	VM_TRUST		/* always check for locking, etc.s	*/
46#define	VM_TRUST	0
47#endif /*DEBUG*/
48
49#define COMPACT		8	/* factor to decide when to compact	*/
50
51/* Check to see if a block is in the free tree */
52#if __STD_C
53static int vmintree(Block_t* node, Block_t* b)
54#else
55static int vmintree(node,b)
56Block_t*	node;
57Block_t*	b;
58#endif
59{	Block_t*	t;
60
61	for(t = node; t; t = LINK(t))
62		if(t == b)
63			return 1;
64	if(LEFT(node) && vmintree(LEFT(node),b))
65		return 1;
66	if(RIGHT(node) && vmintree(RIGHT(node),b))
67		return 1;
68	return 0;
69}
70
71#if __STD_C
72static int vmonlist(Block_t* list, Block_t* b)
73#else
74static int vmonlist(list,b)
75Block_t*	list;
76Block_t*	b;
77#endif
78{
79	for(; list; list = LINK(list))
80		if(list == b)
81			return 1;
82	return 0;
83}
84
85/* Check to see if a block is known to be free */
86#if __STD_C
87static int vmisfree(Vmdata_t* vd, Block_t* b)
88#else
89static int vmisfree(vd,b)
90Vmdata_t*	vd;
91Block_t*	b;
92#endif
93{
94	if(SIZE(b) & (BUSY|JUNK|PFREE))
95		return 0;
96
97	if(b == vd->wild)
98		return 1;
99
100	if(SIZE(b) < MAXTINY)
101		return vmonlist(TINY(vd)[INDEX(SIZE(b))], b);
102
103	if(vd->root)
104		return vmintree(vd->root, b);
105
106	return 0;
107}
108
109/* Check to see if a block is known to be junked */
110#if __STD_C
111static int vmisjunk(Vmdata_t* vd, Block_t* b)
112#else
113static int vmisjunk(vd,b)
114Vmdata_t*	vd;
115Block_t*	b;
116#endif
117{
118	Block_t*	t;
119
120	if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0)
121		return 0;
122
123	if(b == vd->free) /* recently freed */
124		return 1;
125
126	/* check the list that b is supposed to be in */
127	for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t))
128		if(t == b)
129			return 1;
130
131	/* on occasions, b may be put onto the catch-all list */
132	if(C_INDEX(SIZE(b)) < S_CACHE)
133		for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
134			if(t == b)
135				return 1;
136
137	return 0;
138}
139
140/* check to see if the free tree is in good shape */
141#if __STD_C
142static int vmchktree(Block_t* node)
143#else
144static int vmchktree(node)
145Block_t*	node;
146#endif
147{	Block_t*	t;
148
149	if(SIZE(node) & BITS)
150		{ /**/ASSERT(0); return -1; }
151
152	for(t = LINK(node); t; t = LINK(t))
153		if(SIZE(t) != SIZE(node))
154			{ /**/ASSERT(0); return -1; }
155
156	if((t = LEFT(node)) )
157	{	if(SIZE(t) >= SIZE(node) )
158			{ /**/ASSERT(0); return -1; }
159		else	return vmchktree(t);
160	}
161	if((t = RIGHT(node)) )
162	{	if(SIZE(t) <= SIZE(node) )
163			{ /**/ASSERT(0); return -1; }
164		else	return vmchktree(t);
165	}
166
167	return 0;
168}
169
170#if __STD_C
171int _vmbestcheck(Vmdata_t* vd, Block_t* freeb)
172#else
173int _vmbestcheck(vd, freeb)
174Vmdata_t*	vd;
175Block_t*	freeb; /* known to be free but not on any free list */
176#endif
177{
178	reg Seg_t	*seg;
179	reg Block_t	*b, *endb, *nextb;
180	int		rv = 0;
181
182	if(!CHECK())
183		return 0;
184
185	/* make sure the free tree is still in shape */
186	if(vd->root && vmchktree(vd->root) < 0 )
187		{ rv = -1; /**/ASSERT(0); }
188
189	for(seg = vd->seg; seg && rv == 0; seg = seg->next)
190	{	b = SEGBLOCK(seg);
191		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
192		for(; b < endb && rv == 0; b = nextb)
193		{	nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
194
195			if(!ISBUSY(SIZE(b)) ) /* a completely free block */
196			{	/* there should be no marked bits of any type */
197				if(SIZE(b) & (BUSY|JUNK|PFREE) )
198					{ rv = -1; /**/ASSERT(0); }
199
200				/* next block must be busy and marked PFREE */
201				if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) )
202					{ rv = -1; /**/ASSERT(0); }
203
204				/* must have a self-reference pointer */
205				if(*SELF(b) != b)
206					{ rv = -1; /**/ASSERT(0); }
207
208				/* segment pointer should be well-defined */
209				if(!TINIEST(b) && SEG(b) != seg)
210					{ rv = -1; /**/ASSERT(0); }
211
212				/* must be on a free list */
213				if(b != freeb && !vmisfree(vd, b) )
214					{ rv = -1; /**/ASSERT(0); }
215			}
216			else
217			{	/* segment pointer should be well-defined */
218				if(SEG(b) != seg)
219					{ rv = -1; /**/ASSERT(0); }
220
221				/* next block should not be marked PFREE */
222				if(ISPFREE(SIZE(nextb)) )
223					{ rv = -1; /**/ASSERT(0); }
224
225				/* if PFREE, last block should be free */
226				if(ISPFREE(SIZE(b)) && LAST(b) != freeb &&
227				   !vmisfree(vd, LAST(b)) )
228					{ rv = -1; /**/ASSERT(0); }
229
230				/* if free but unreclaimed, should be junk */
231				if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b))
232					{ rv = -1; /**/ASSERT(0); }
233			}
234		}
235	}
236
237	return rv;
238}
239
240/* Tree rotation functions */
241#define RROTATE(x,y)	(LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
242#define LROTATE(x,y)	(RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
243#define RLINK(s,x)	((s) = LEFT(s) = (x))
244#define LLINK(s,x)	((s) = RIGHT(s) = (x))
245
246/* Find and delete a suitable element in the free tree. */
247#if __STD_C
248static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted)
249#else
250static Block_t* bestsearch(vd, size, wanted)
251Vmdata_t*	vd;
252reg size_t	size;
253Block_t*	wanted;
254#endif
255{
256	reg size_t	s;
257	reg Block_t	*t, *root, *l, *r;
258	Block_t		link;
259
260	/* extracting a tiniest block from its list */
261	if((root = wanted) && size == TINYSIZE)
262	{	reg Seg_t*	seg;
263
264		l = TLEFT(root);
265		if((r = LINK(root)) )
266			TLEFT(r) = l;
267		if(l)
268			LINK(l) = r;
269		else	TINY(vd)[0] = r;
270
271		seg = vd->seg;
272		if(!seg->next)
273			SEG(root) = seg;
274		else for(;; seg = seg->next)
275		{	if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr &&
276			   (Vmuchar_t*)root < seg->baddr)
277			{	SEG(root) = seg;
278				break;
279			}
280		}
281
282		return root;
283	}
284
285	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
286
287	/* find the right one to delete */
288	l = r = &link;
289	if((root = vd->root) ) do
290	{	/**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
291		if(size == (s = SIZE(root)) )
292			break;
293		if(size < s)
294		{	if((t = LEFT(root)) )
295			{	if(size <= (s = SIZE(t)) )
296				{	RROTATE(root,t);
297					if(size == s)
298						break;
299					t = LEFT(root);
300				}
301				else
302				{	LLINK(l,t);
303					t = RIGHT(t);
304				}
305			}
306			RLINK(r,root);
307		}
308		else
309		{	if((t = RIGHT(root)) )
310			{	if(size >= (s = SIZE(t)) )
311				{	LROTATE(root,t);
312					if(size == s)
313						break;
314					t = RIGHT(root);
315				}
316				else
317				{	RLINK(r,t);
318					t = LEFT(t);
319				}
320			}
321			LLINK(l,root);
322		}
323		/**/ ASSERT(root != t);
324	} while((root = t) );
325
326	if(root)	/* found it, now isolate it */
327	{	RIGHT(l) = LEFT(root);
328		LEFT(r) = RIGHT(root);
329	}
330	else		/* nothing exactly fit	*/
331	{	LEFT(r) = NIL(Block_t*);
332		RIGHT(l) = NIL(Block_t*);
333
334		/* grab the least one from the right tree */
335		if((root = LEFT(&link)) )
336		{	while((t = LEFT(root)) )
337				RROTATE(root,t);
338			LEFT(&link) = RIGHT(root);
339		}
340	}
341
342	if(root && (r = LINK(root)) )
343	{	/* head of a link list, use next one for root */
344		LEFT(r) = RIGHT(&link);
345		RIGHT(r) = LEFT(&link);
346	}
347	else if(!(r = LEFT(&link)) )
348		r = RIGHT(&link);
349	else /* graft left tree to right tree */
350	{	while((t = LEFT(r)) )
351			RROTATE(r,t);
352		LEFT(r) = RIGHT(&link);
353	}
354	vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r)));
355
356	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
357	/**/ASSERT(!wanted || wanted == root);
358
359	return root;
360}
361
362/* Reclaim all delayed free blocks into the free tree */
363#if __STD_C
364static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c)
365#else
366static int bestreclaim(vd, wanted, c)
367reg Vmdata_t*	vd;
368Block_t*	wanted;
369int		c;
370#endif
371{
372	reg size_t	size, s;
373	reg Block_t	*fp, *np, *t, *list;
374	reg int		n, saw_wanted;
375	reg Seg_t	*seg;
376
377	/**/COUNT(N_reclaim);
378	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
379
380	if((fp = vd->free) )
381	{	LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp;
382		vd->free = NIL(Block_t*);
383	}
384
385	saw_wanted = wanted ? 0 : 1;
386	for(n = S_CACHE; n >= c; --n)
387	{	list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*);
388		while((fp = list) )
389		{	/* Note that below here we allow ISJUNK blocks to be
390			** forward-merged even though they are not removed from
391			** the list immediately. In this way, the list is
392			** scanned only once. It works because the LINK and SIZE
393			** fields are not destroyed during the merging. This can
394			** be seen by observing that a tiniest block has a 2-word
395			** header and a 2-word body. Merging a tiniest block
396			** (1seg) and the next block (2seg) looks like this:
397			**	1seg  size  link  left  2seg size link left ....
398			**	1seg  size  link  left  rite xxxx xxxx .... self
399			** After the merge, the 2seg word is replaced by the RIGHT
400			** pointer of the new block and somewhere beyond the
401			** two xxxx fields, the SELF pointer will replace some
402			** other word. The important part is that the two xxxx
403			** fields are kept intact.
404			*/
405			list = LINK(list); /**/ASSERT(!vmonlist(list,fp));
406
407			size = SIZE(fp);
408			if(!ISJUNK(size))	/* already done */
409				continue;
410
411			if(_Vmassert & VM_region)
412			{	/* see if this address is from region */
413				for(seg = vd->seg; seg; seg = seg->next)
414					if(fp >= SEGBLOCK(seg) && fp < (Block_t*)seg->baddr )
415						break;
416				if(!seg) /* must be a bug in application code! */
417				{	/**/ ASSERT(seg != NIL(Seg_t*));
418					continue;
419				}
420			}
421
422			if(ISPFREE(size))	/* backward merge */
423			{	fp = LAST(fp);
424				s = SIZE(fp); /**/ASSERT(!(s&BITS));
425				REMOVE(vd,fp,INDEX(s),t,bestsearch);
426				size = (size&~BITS) + s + sizeof(Head_t);
427			}
428			else	size &= ~BITS;
429
430			for(;;)	/* forward merge */
431			{	np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t));
432				s = SIZE(np);	/**/ASSERT(s > 0);
433				if(!ISBUSY(s))
434				{	/**/ASSERT((s&BITS) == 0);
435					if(np == vd->wild)
436						vd->wild = NIL(Block_t*);
437					else	REMOVE(vd,np,INDEX(s),t,bestsearch);
438				}
439				else if(ISJUNK(s))
440				{	/* reclaim any touched junk list */
441					if((int)C_INDEX(s) < c)
442						c = C_INDEX(s);
443					SIZE(np) = 0;
444					CLRBITS(s);
445				}
446				else	break;
447				size += s + sizeof(Head_t);
448			}
449			SIZE(fp) = size;
450
451			/* tell next block that this one is free */
452			np = NEXT(fp);	/**/ASSERT(ISBUSY(SIZE(np)));
453					/**/ASSERT(!ISJUNK(SIZE(np)));
454			SETPFREE(SIZE(np));
455			*(SELF(fp)) = fp;
456
457			if(fp == wanted) /* to be consumed soon */
458			{	/**/ASSERT(!saw_wanted); /* should be seen just once */
459				saw_wanted = 1;
460				continue;
461			}
462
463			/* wilderness preservation */
464			if(np->body.data >= vd->seg->baddr)
465			{	vd->wild = fp;
466				continue;
467			}
468
469			/* tiny block goes to tiny list */
470			if(size < MAXTINY)
471			{	s = INDEX(size);
472				np = LINK(fp) = TINY(vd)[s];
473				if(s == 0)	/* TINIEST block */
474				{	if(np)
475						TLEFT(np) = fp;
476					TLEFT(fp) = NIL(Block_t*);
477				}
478				else
479				{	if(np)
480						LEFT(np)  = fp;
481					LEFT(fp) = NIL(Block_t*);
482					SETLINK(fp);
483				}
484				TINY(vd)[s] = fp;
485				continue;
486			}
487
488			LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*);
489			if(!(np = vd->root) )	/* inserting into an empty tree	*/
490			{	vd->root = fp;
491				continue;
492			}
493
494			size = SIZE(fp);
495			while(1)	/* leaf insertion */
496			{	/**/ASSERT(np != fp);
497				if((s = SIZE(np)) > size)
498				{	if((t = LEFT(np)) )
499					{	/**/ ASSERT(np != t);
500						np = t;
501					}
502					else
503					{	LEFT(np) = fp;
504						break;
505					}
506				}
507				else if(s < size)
508				{	if((t = RIGHT(np)) )
509					{	/**/ ASSERT(np != t);
510						np = t;
511					}
512					else
513					{	RIGHT(np) = fp;
514						break;
515					}
516				}
517				else /* s == size */
518				{	if((t = LINK(np)) )
519					{	LINK(fp) = t;
520						LEFT(t) = fp;
521					}
522					LINK(np) = fp;
523					LEFT(fp) = np;
524					SETLINK(fp);
525					break;
526				}
527			}
528		}
529	}
530
531	/**/ASSERT(!wanted || saw_wanted == 1);
532	/**/ASSERT(_vmbestcheck(vd, wanted) == 0);
533	return saw_wanted;
534}
535
536#if __STD_C
537static int bestcompact(Vmalloc_t* vm)
538#else
539static int bestcompact(vm)
540Vmalloc_t*	vm;
541#endif
542{
543	reg Seg_t	*seg, *next;
544	reg Block_t	*bp, *t;
545	reg size_t	size, segsize, round;
546	reg int		local, inuse;
547	reg Vmdata_t*	vd = vm->data;
548
549	SETINUSE(vd, inuse);
550
551	if(!(local = vd->mode&VM_TRUST) )
552	{	GETLOCAL(vd,local);
553		if(ISLOCK(vd,local))
554		{	CLRINUSE(vd, inuse);
555			return -1;
556		}
557		SETLOCK(vd,local);
558	}
559
560	bestreclaim(vd,NIL(Block_t*),0);
561
562	for(seg = vd->seg; seg; seg = next)
563	{	next = seg->next;
564
565		bp = BLOCK(seg->baddr);
566		if(!ISPFREE(SIZE(bp)) )
567			continue;
568
569		bp = LAST(bp);	/**/ASSERT(vmisfree(vd,bp));
570		size = SIZE(bp);
571		if(bp == vd->wild)
572		{	/* During large block allocations, _Vmextend might
573			** have been enlarged the rounding factor. Reducing
574			** it a bit help avoiding getting large raw memory.
575			*/
576			if((round = vm->disc->round) == 0)
577				round = _Vmpagesize;
578			if(size > COMPACT*vd->incr && vd->incr > round)
579				vd->incr /= 2;
580
581			/* for the bottom segment, we don't necessarily want
582			** to return raw memory too early. vd->pool has an
583			** approximation of the average size of recently freed
584			** blocks. If this is large, the application is managing
585			** large blocks so we throttle back memory chopping
586			** to avoid thrashing the underlying memory system.
587			*/
588			if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool)
589				continue;
590
591			vd->wild = NIL(Block_t*);
592			vd->pool = 0;
593		}
594		else	REMOVE(vd,bp,INDEX(size),t,bestsearch);
595		CLRPFREE(SIZE(NEXT(bp)));
596
597		if(size < (segsize = seg->size))
598			size += sizeof(Head_t);
599
600		if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0)
601		{	if(size >= segsize) /* entire segment deleted */
602				continue;
603			/**/ASSERT(SEG(BLOCK(seg->baddr)) == seg);
604
605			if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0)
606				SIZE(bp) = size - sizeof(Head_t);
607			else	bp = NIL(Block_t*);
608		}
609
610		if(bp)
611		{	/**/ ASSERT(SIZE(bp) >= BODYSIZE);
612			/**/ ASSERT(SEGWILD(bp));
613			/**/ ASSERT(!vd->root || !vmintree(vd->root,bp));
614			SIZE(bp) |= BUSY|JUNK;
615			LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
616			CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
617		}
618	}
619
620	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
621		(*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
622
623	CLRLOCK(vd,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
624
625	CLRINUSE(vd, inuse);
626	return 0;
627}
628
629#if __STD_C
630static Void_t* bestalloc(Vmalloc_t* vm, reg size_t size )
631#else
632static Void_t* bestalloc(vm,size)
633Vmalloc_t*	vm;	/* region allocating from	*/
634reg size_t	size;	/* desired block size		*/
635#endif
636{
637	reg Vmdata_t*	vd = vm->data;
638	reg size_t	s;
639	reg int		n;
640	reg Block_t	*tp, *np;
641	reg int		local, inuse;
642	size_t		orgsize = 0;
643
644	if(!(_Vmassert & VM_init))
645	{	char	*chk = getenv("VMCHECK");
646		_Vmassert = VM_init;
647		if(chk)
648		{	int	set = 1;
649			for(;; set ? (_Vmassert |= n) : (_Vmassert &= ~n))
650			{
651				switch (*chk++)
652				{
653				case 0:
654					break;
655				case '-':
656				case '!':
657					set = 0;
658					n = 0;
659					continue;
660				case '+':
661					set = 1;
662					n = 0;
663					continue;
664				case '1':
665					n = VM_check;
666					continue;
667				case '2':
668					n = VM_abort;
669					continue;
670				case '3':
671					n = VM_check|VM_abort;
672					continue;
673				case 'a':
674				case 'A':
675					n = VM_abort;
676					continue;
677				case 'c':
678				case 'C':
679					n = VM_check;
680					continue;
681#if _mem_mmap_anon || _mem_mmap_zero
682				case 'm':
683				case 'M':
684					n = VM_mmap;
685#endif
686					continue;
687				case 'r':
688				case 'R':
689					n = VM_region;
690					continue;
691				default:
692					n = 0;
693					continue;
694				}
695				break;
696			}
697		}
698	}
699	/**/COUNT(N_alloc);
700
701	SETINUSE(vd, inuse);
702
703	if(!(local = vd->mode&VM_TRUST))
704	{	GETLOCAL(vd,local);	/**/ASSERT(!ISLOCK(vd,local));
705		if(ISLOCK(vd,local) )
706		{	CLRINUSE(vd, inuse);
707			return NIL(Void_t*);
708		}
709		SETLOCK(vd,local);
710		orgsize = size;
711	}
712
713	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
714	/**/ ASSERT(HEADSIZE == sizeof(Head_t));
715	/**/ ASSERT(BODYSIZE == sizeof(Body_t));
716	/**/ ASSERT((ALIGN%(BITS+1)) == 0 );
717	/**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 );
718	/**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 );
719	/**/ ASSERT((BODYSIZE%ALIGN) == 0 );
720	/**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) );
721
722	/* for ANSI requirement that malloc(0) returns non-NULL pointer */
723	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
724
725	if((tp = vd->free) )	/* reuse last free piece if appropriate */
726	{	/**/ASSERT(ISBUSY(SIZE(tp)) );
727		/**/ASSERT(ISJUNK(SIZE(tp)) );
728		/**/COUNT(N_last);
729
730		vd->free = NIL(Block_t*);
731		if((s = SIZE(tp)) >= size && s < (size << 1) )
732		{	if(s >= size + (sizeof(Head_t)+BODYSIZE) )
733			{	SIZE(tp) = size;
734				np = NEXT(tp);
735				SEG(np) = SEG(tp);
736				SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY;
737				vd->free = np;
738				SIZE(tp) |= s&BITS;
739			}
740			CLRJUNK(SIZE(tp));
741			goto done;
742		}
743
744		LINK(tp) = CACHE(vd)[S_CACHE];
745		CACHE(vd)[S_CACHE] = tp;
746	}
747
748	for(;;)
749	{	for(n = S_CACHE; n >= 0; --n)	/* best-fit except for coalescing */
750		{	bestreclaim(vd,NIL(Block_t*),n);
751			if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) )
752				goto got_block;
753		}
754
755		/**/ASSERT(!vd->free);
756		if((tp = vd->wild) && SIZE(tp) >= size)
757		{	/**/COUNT(N_wild);
758			vd->wild = NIL(Block_t*);
759			goto got_block;
760		}
761
762		KPVCOMPACT(vm,bestcompact);
763		if((tp = (*_Vmextend)(vm,size,bestsearch)) )
764			goto got_block;
765		else if(vd->mode&VM_AGAIN)
766			vd->mode &= ~VM_AGAIN;
767		else
768		{	CLRLOCK(vd,local);
769			CLRINUSE(vd, inuse);
770			return NIL(Void_t*);
771		}
772	}
773
774got_block:
775	/**/ ASSERT(!ISBITS(SIZE(tp)));
776	/**/ ASSERT(SIZE(tp) >= size);
777	/**/ ASSERT((SIZE(tp)%ALIGN) == 0);
778	/**/ ASSERT(!vd->free);
779
780	/* tell next block that we are no longer a free block */
781	CLRPFREE(SIZE(NEXT(tp)));	/**/ ASSERT(ISBUSY(SIZE(NEXT(tp))));
782
783	if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) )
784	{	SIZE(tp) = size;
785
786		np = NEXT(tp);
787		SEG(np) = SEG(tp);
788		SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK;
789
790		if(VMWILD(vd,np))
791		{	SIZE(np) &= ~BITS;
792			*SELF(np) = np; /**/ASSERT(ISBUSY(SIZE(NEXT(np))));
793			SETPFREE(SIZE(NEXT(np)));
794			vd->wild = np;
795		}
796		else	vd->free = np;
797	}
798
799	SETBUSY(SIZE(tp));
800
801done:
802	if(!local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST)
803		(*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0);
804
805	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
806	CLRLOCK(vd,local);
807	ANNOUNCE(local, vm, VM_ALLOC, DATA(tp), vm->disc);
808
809	CLRINUSE(vd, inuse);
810	return DATA(tp);
811}
812
813#if __STD_C
814static long bestaddr(Vmalloc_t* vm, Void_t* addr )
815#else
816static long bestaddr(vm, addr)
817Vmalloc_t*	vm;	/* region allocating from	*/
818Void_t*		addr;	/* address to check		*/
819#endif
820{
821	reg Seg_t*	seg;
822	reg Block_t	*b, *endb;
823	reg long	offset;
824	reg Vmdata_t*	vd = vm->data;
825	reg int		local, inuse;
826
827	SETINUSE(vd, inuse);
828
829	if(!(local = vd->mode&VM_TRUST) )
830	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
831		if(ISLOCK(vd,local))
832		{	CLRINUSE(vd, inuse);
833			return -1L;
834		}
835		SETLOCK(vd,local);
836	}
837
838	offset = -1L; b = endb = NIL(Block_t*);
839	for(seg = vd->seg; seg; seg = seg->next)
840	{	b = SEGBLOCK(seg);
841		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
842		if((Vmuchar_t*)addr > (Vmuchar_t*)b &&
843		   (Vmuchar_t*)addr < (Vmuchar_t*)endb)
844			break;
845	}
846
847	if(local && !(vd->mode&VM_TRUST) ) /* from bestfree or bestresize */
848	{	b = BLOCK(addr);
849		if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) )
850			offset = 0;
851		if(offset != 0 && vm->disc->exceptf)
852			(void)(*vm->disc->exceptf)(vm,VM_BADADDR,addr,vm->disc);
853	}
854	else if(seg)
855	{	while(b < endb)
856		{	reg Vmuchar_t*	data = (Vmuchar_t*)DATA(b);
857			reg size_t	size = SIZE(b)&~BITS;
858
859			if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size)
860			{	if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
861					offset = -1L;
862				else	offset = (Vmuchar_t*)addr - data;
863				goto done;
864			}
865
866			b = (Block_t*)((Vmuchar_t*)DATA(b) + size);
867		}
868	}
869
870done:
871	CLRLOCK(vd,local);
872	CLRINUSE(vd, inuse);
873	return offset;
874}
875
876#if __STD_C
877static int bestfree(Vmalloc_t* vm, Void_t* data )
878#else
879static int bestfree(vm, data )
880Vmalloc_t*	vm;
881Void_t*		data;
882#endif
883{
884	reg Vmdata_t*	vd = vm->data;
885	reg Block_t	*bp;
886	reg size_t	s;
887	reg int		local, inuse;
888
889#ifdef DEBUG
890	if((local = (int)integralof(data)) >= 0 && local <= 0xf)
891	{	int	vmassert = _Vmassert;
892		_Vmassert = local ? local : vmassert ? vmassert : (VM_check|VM_abort);
893		_vmbestcheck(vd, NIL(Block_t*));
894		_Vmassert = local ? local : vmassert;
895		return 0;
896	}
897#endif
898
899	if(!data) /* ANSI-ism */
900		return 0;
901
902	/**/COUNT(N_free);
903
904	SETINUSE(vd, inuse);
905
906	if(!(local = vd->mode&VM_TRUST) )
907	{	GETLOCAL(vd,local);	/**/ASSERT(!ISLOCK(vd,local));
908		if(ISLOCK(vd,local) || KPVADDR(vm,data,bestaddr) != 0 )
909		{	CLRINUSE(vd, inuse);
910			return -1;
911		}
912		SETLOCK(vd,local);
913	}
914
915	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
916	bp = BLOCK(data); s = SIZE(bp);
917
918	/* Keep an approximate average free block size.
919	** This is used in bestcompact() to decide when to release
920	** raw memory back to the underlying memory system.
921	*/
922	vd->pool = (vd->pool + (s&~BITS))/2;
923
924	if(ISBUSY(s) && !ISJUNK(s))
925	{	SETJUNK(SIZE(bp));
926	        if(s < MAXCACHE)
927	        {       /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) );
928	                LINK(bp) = CACHE(vd)[INDEX(s)];
929	                CACHE(vd)[INDEX(s)] = bp;
930	        }
931	        else if(!vd->free)
932	                vd->free = bp;
933	        else
934	        {       /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) );
935	                LINK(bp) = CACHE(vd)[S_CACHE];
936	                CACHE(vd)[S_CACHE] = bp;
937	        }
938
939		/* coalesce on freeing large blocks to avoid fragmentation */
940		if(SIZE(bp) >= 2*vd->incr)
941		{	bestreclaim(vd,NIL(Block_t*),0);
942			if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr)
943				KPVCOMPACT(vm,bestcompact);
944		}
945	}
946
947	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST )
948		(*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0);
949
950	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
951	CLRLOCK(vd,local);
952	ANNOUNCE(local, vm, VM_FREE, data, vm->disc);
953
954	CLRINUSE(vd, inuse);
955	return 0;
956}
957
958#if __STD_C
959static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type)
960#else
961static Void_t* bestresize(vm,data,size,type)
962Vmalloc_t*	vm;		/* region allocating from	*/
963Void_t*		data;		/* old block of data		*/
964reg size_t	size;		/* new size			*/
965int		type;		/* !=0 to move, <0 for not copy */
966#endif
967{
968	reg Block_t	*rp, *np, *t;
969	int		local, inuse;
970	size_t		s, bs, oldsize = 0, orgsize = 0;
971	Void_t		*oldd, *orgdata = NIL(Void_t*);
972	Vmdata_t	*vd = vm->data;
973
974	/**/COUNT(N_resize);
975
976	SETINUSE(vd, inuse);
977
978	if(!data)
979	{	if((data = bestalloc(vm,size)) )
980		{	oldsize = 0;
981			size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
982		}
983		goto done;
984	}
985	if(size == 0)
986	{	(void)bestfree(vm,data);
987		CLRINUSE(vd, inuse);
988		return NIL(Void_t*);
989	}
990
991	if(!(local = vd->mode&VM_TRUST) )
992	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
993		if(ISLOCK(vd,local) || (!local && KPVADDR(vm,data,bestaddr) != 0 ) )
994		{	CLRINUSE(vd, inuse);
995			return NIL(Void_t*);
996		}
997		SETLOCK(vd,local);
998
999		orgdata = data;	/* for tracing */
1000		orgsize = size;
1001	}
1002
1003	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1004	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
1005	rp = BLOCK(data);	/**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
1006	oldsize = SIZE(rp); CLRBITS(oldsize);
1007	if(oldsize < size)
1008	{	np = (Block_t*)((Vmuchar_t*)rp + oldsize + sizeof(Head_t));
1009		do	/* forward merge as much as possible */
1010		{	s = SIZE(np); /**/ASSERT(!ISPFREE(s));
1011			if(np == vd->free)
1012			{	vd->free = NIL(Block_t*);
1013				CLRBITS(s);
1014			}
1015			else if(ISJUNK(s) )
1016			{	if(!bestreclaim(vd,np,C_INDEX(s)) )
1017					/**/ASSERT(0); /* oops: did not see np! */
1018				s = SIZE(np); /**/ASSERT(s%ALIGN == 0);
1019			}
1020			else if(!ISBUSY(s) )
1021			{	if(np == vd->wild)
1022					vd->wild = NIL(Block_t*);
1023				else	REMOVE(vd,np,INDEX(s),t,bestsearch);
1024			}
1025			else	break;
1026
1027			SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0);
1028			np = (Block_t*)((Vmuchar_t*)np + s);
1029			CLRPFREE(SIZE(np));
1030		} while(SIZE(rp) < size);
1031
1032		if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) )
1033		{	reg Seg_t*	seg;
1034
1035			s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr);
1036			seg = SEG(rp);
1037			if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s,
1038				      vm->disc) == seg->addr )
1039			{	SIZE(rp) += s;
1040				seg->extent += s;
1041				seg->size += s;
1042				seg->baddr += s;
1043				s  = (SIZE(rp)&~BITS) + sizeof(Head_t);
1044				np = (Block_t*)((Vmuchar_t*)rp + s);
1045				SEG(np) = seg;
1046				SIZE(np) = BUSY;
1047			}
1048		}
1049	}
1050
1051	if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) )
1052	{	SIZE(rp) = size;
1053		np = NEXT(rp);
1054		SEG(np) = SEG(rp);
1055		SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK;
1056		CPYBITS(SIZE(rp),s);
1057		rp = np;
1058		goto do_free;
1059	}
1060	else if((bs = s&~BITS) < size)
1061	{	if(!(type&(VM_RSMOVE|VM_RSCOPY)) )
1062			data = NIL(Void_t*); /* old data is not moveable */
1063		else
1064		{	oldd = data;
1065			if((data = KPVALLOC(vm,size,bestalloc)) )
1066			{	if(type&VM_RSCOPY)
1067					memcpy(data, oldd, bs);
1068
1069			do_free: /* reclaim these right away */
1070				SETJUNK(SIZE(rp));
1071				LINK(rp) = CACHE(vd)[S_CACHE];
1072				CACHE(vd)[S_CACHE] = rp;
1073				bestreclaim(vd, NIL(Block_t*), S_CACHE);
1074			}
1075		}
1076	}
1077
1078	if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
1079		(*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0);
1080
1081	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1082	CLRLOCK(vd,local);
1083	ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc);
1084
1085done:	if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldsize )
1086		memset((Void_t*)((Vmuchar_t*)data + oldsize), 0, size-oldsize);
1087
1088	CLRINUSE(vd, inuse);
1089	return data;
1090}
1091
1092#if __STD_C
1093static long bestsize(Vmalloc_t* vm, Void_t* addr )
1094#else
1095static long bestsize(vm, addr)
1096Vmalloc_t*	vm;	/* region allocating from	*/
1097Void_t*		addr;	/* address to check		*/
1098#endif
1099{
1100	reg Seg_t*	seg;
1101	reg Block_t	*b, *endb;
1102	reg long	size;
1103	reg Vmdata_t*	vd = vm->data;
1104	reg int		inuse;
1105
1106	SETINUSE(vd, inuse);
1107
1108	if(!(vd->mode&VM_TRUST) )
1109	{	if(ISLOCK(vd,0))
1110		{	CLRINUSE(vd, inuse);
1111			return -1L;
1112		}
1113		SETLOCK(vd,0);
1114	}
1115
1116	size = -1L;
1117	for(seg = vd->seg; seg; seg = seg->next)
1118	{	b = SEGBLOCK(seg);
1119		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
1120		if((Vmuchar_t*)addr <= (Vmuchar_t*)b ||
1121		   (Vmuchar_t*)addr >= (Vmuchar_t*)endb)
1122			continue;
1123		while(b < endb)
1124		{	if(addr == DATA(b))
1125			{	if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) )
1126					size = -1L;
1127				else	size = (long)SIZE(b)&~BITS;
1128				goto done;
1129			}
1130			else if((Vmuchar_t*)addr <= (Vmuchar_t*)b)
1131				break;
1132
1133			b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
1134		}
1135	}
1136
1137done:
1138	CLRLOCK(vd,0);
1139	CLRINUSE(vd, inuse);
1140	return size;
1141}
1142
1143#if __STD_C
1144static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align)
1145#else
1146static Void_t* bestalign(vm, size, align)
1147Vmalloc_t*	vm;
1148size_t		size;
1149size_t		align;
1150#endif
1151{
1152	reg Vmuchar_t	*data;
1153	reg Block_t	*tp, *np;
1154	reg Seg_t*	seg;
1155	reg int		local, inuse;
1156	reg size_t	s, extra, orgsize = 0, orgalign = 0;
1157	reg Vmdata_t*	vd = vm->data;
1158
1159	if(size <= 0 || align <= 0)
1160		return NIL(Void_t*);
1161
1162	SETINUSE(vd, inuse);
1163
1164	if(!(local = vd->mode&VM_TRUST) )
1165	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
1166		if(ISLOCK(vd,local) )
1167		{	CLRINUSE(vd, inuse);
1168			return NIL(Void_t*);
1169		}
1170		SETLOCK(vd,local);
1171		orgsize = size;
1172		orgalign = align;
1173	}
1174
1175	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1176	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
1177	align = MULTIPLE(align,ALIGN);
1178
1179	/* hack so that dbalign() can store header data */
1180	if(VMETHOD(vd) != VM_MTDEBUG)
1181		extra = 0;
1182	else
1183	{	extra = DB_HEAD;
1184		while(align < extra || (align - extra) < sizeof(Block_t))
1185			align *= 2;
1186	}
1187
1188	/* reclaim all free blocks now to avoid fragmentation */
1189	bestreclaim(vd,NIL(Block_t*),0);
1190
1191	s = size + 2*(align+sizeof(Head_t)+extra);
1192	if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) )
1193		goto done;
1194
1195	tp = BLOCK(data);
1196	seg = SEG(tp);
1197
1198	/* get an aligned address that we can live with */
1199	if((s = (size_t)((VLONG(data)+extra)%align)) != 0)
1200		data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1201
1202	if((np = BLOCK(data)) != tp ) /* need to free left part */
1203	{	if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) )
1204		{	data += align;
1205			np = BLOCK(data);
1206		} /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1207
1208		s  = (Vmuchar_t*)np - (Vmuchar_t*)tp;
1209		SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY;
1210		SEG(np) = seg;
1211
1212		SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK;
1213		/**/ ASSERT(SIZE(tp) >= sizeof(Body_t) );
1214		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1215		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1216	}
1217
1218	/* free left-over if too big */
1219	if((s = SIZE(np) - size) >= sizeof(Block_t))
1220	{	SIZE(np) = size;
1221
1222		tp = NEXT(np);
1223		SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK;
1224		SEG(tp) = seg;
1225		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1226		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1227
1228		SIZE(np) |= s&BITS;
1229	}
1230
1231	bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */
1232
1233	if(!local && !(vd->mode&VM_TRUST) && _Vmtrace && (vd->mode&VM_TRACE) )
1234		(*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign);
1235
1236done:
1237	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1238	CLRLOCK(vd,local);
1239	ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)data, vm->disc);
1240
1241	CLRINUSE(vd, inuse);
1242	return (Void_t*)data;
1243}
1244
1245
1246#if _mem_win32
1247#if _PACKAGE_ast
1248#include		<ast_windows.h>
1249#else
1250#include		<windows.h>
1251#endif
1252#endif /* _lib_win32 */
1253
1254#if _mem_mmap_anon
1255#include		<sys/mman.h>
1256#ifndef MAP_ANON
1257#define	MAP_ANON	MAP_ANONYMOUS
1258#endif
1259#endif /* _mem_mmap_anon */
1260
1261#if _mem_mmap_zero
1262#include		<sys/fcntl.h>
1263#include		<sys/mman.h>
1264typedef struct _mmapdisc_s
1265{	Vmdisc_t	disc;
1266	int		fd;
1267	off_t		offset;
1268} Mmapdisc_t;
1269
1270#ifndef OPEN_MAX
1271#define	OPEN_MAX	64
1272#endif
1273#define OPEN_PRIVATE	(3*OPEN_MAX/4)
1274#endif /* _mem_mmap_zero */
1275
1276/* failure mode of mmap, sbrk and brk */
1277#ifndef MAP_FAILED
1278#define MAP_FAILED	((Void_t*)(-1))
1279#endif
1280#define BRK_FAILED	((Void_t*)(-1))
1281
1282/* make sure that allocated memory are addressable */
1283
1284#if _PACKAGE_ast
1285#include	<sig.h>
1286#else
1287#include	<signal.h>
1288typedef void	(*Sig_handler_t)(int);
1289#endif
1290
1291static int	Gotsegv = 0;
1292
1293#if __STD_C
1294static void sigsegv(int sig)
1295#else
1296static void sigsegv(sig)
1297int	sig;
1298#endif
1299{
1300	if(sig == SIGSEGV)
1301		Gotsegv = 1;
1302}
1303
1304#if __STD_C
1305static int okaddr(Void_t* addr, size_t nsize)
1306#else
1307static int okaddr(addr, nsize)
1308Void_t*	addr;
1309size_t	nsize;
1310#endif
1311{
1312	Sig_handler_t	segv;
1313	int		rv;
1314
1315	Gotsegv = 0; /* catch segment fault */
1316	segv = signal(SIGSEGV, sigsegv);
1317
1318	if(Gotsegv == 0)
1319		rv = *((char*)addr);
1320	if(Gotsegv == 0)
1321		rv += *(((char*)addr)+nsize-1);
1322	if(Gotsegv == 0)
1323		rv = rv == 0 ? 0 : 1;
1324	else	rv = -1;
1325
1326	signal(SIGSEGV, segv); /* restore signal catcher */
1327	Gotsegv = 0;
1328
1329	return rv;
1330}
1331
1332/* A discipline to get raw memory using sbrk/VirtualAlloc/mmap */
1333#if __STD_C
1334static Void_t* sbrkmem(Vmalloc_t* vm, Void_t* caddr,
1335			size_t csize, size_t nsize, Vmdisc_t* disc)
1336#else
1337static Void_t* sbrkmem(vm, caddr, csize, nsize, disc)
1338Vmalloc_t*	vm;	/* region doing allocation from		*/
1339Void_t*		caddr;	/* current address			*/
1340size_t		csize;	/* current size				*/
1341size_t		nsize;	/* new size				*/
1342Vmdisc_t*	disc;	/* discipline structure			*/
1343#endif
1344{
1345#undef _done_sbrkmem
1346
1347#if !defined(_done_sbrkmem) && defined(_mem_win32)
1348#define _done_sbrkmem	1
1349	NOTUSED(vm);
1350	NOTUSED(disc);
1351	if(csize == 0)
1352		return (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE);
1353	else if(nsize == 0)
1354		return VirtualFree((LPVOID)caddr,0,MEM_RELEASE) ? caddr : NIL(Void_t*);
1355	else	return NIL(Void_t*);
1356#endif /* MUST_WIN32 */
1357
1358#if !defined(_done_sbrkmem) && (_mem_sbrk || _mem_mmap_zero || _mem_mmap_anon)
1359#define _done_sbrkmem	1
1360	Vmuchar_t	*addr;
1361#if _mem_mmap_zero
1362	Mmapdisc_t	*mmdc = (Mmapdisc_t*)disc;
1363#else
1364	NOTUSED(disc);
1365#endif
1366	NOTUSED(vm);
1367
1368	if(csize == 0) /* allocating new memory */
1369	{
1370
1371#if _mem_sbrk	/* try using sbrk() and brk() */
1372		if(!(_Vmassert & VM_mmap))
1373		{
1374			addr = (Vmuchar_t*)sbrk(0); /* old break value */
1375			if(addr && addr != (Vmuchar_t*)BRK_FAILED )
1376			{
1377				if((addr+nsize) < addr)
1378					return NIL(Void_t*);
1379				if(brk(addr+nsize) == 0 )
1380				{	if(okaddr(addr,nsize) >= 0)
1381						return addr;
1382					(void)brk(addr); /* release reserved address */
1383				}
1384			}
1385		}
1386#endif /* _mem_sbrk */
1387
1388#if _mem_mmap_anon /* anonymous mmap */
1389		{
1390			addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1391                                        	MAP_ANON|MAP_PRIVATE, -1, 0);
1392			if(addr && addr != (Vmuchar_t*)MAP_FAILED)
1393			{	if(okaddr(addr,nsize) >= 0)
1394					return addr;
1395				(void)munmap(addr, nsize); /* release reserved address */
1396			}
1397		}
1398#endif /* _mem_mmap_anon */
1399
1400#if _mem_mmap_zero /* mmap from /dev/zero */
1401		{
1402			if(mmdc->fd < 0)
1403			{	int	fd;
1404				if(mmdc->fd != -1)
1405					return NIL(Void_t*);
1406				if((fd = open("/dev/zero", O_RDONLY)) < 0 )
1407				{	mmdc->fd = -2;
1408					return NIL(Void_t*);
1409				}
1410				if(fd >= OPEN_PRIVATE || (mmdc->fd = dup2(fd,OPEN_PRIVATE)) < 0 )
1411					mmdc->fd = fd;
1412				else	close(fd);
1413#ifdef FD_CLOEXEC
1414				fcntl(mmdc->fd, F_SETFD, FD_CLOEXEC);
1415#endif
1416			}
1417			addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1418						MAP_PRIVATE, mmdc->fd, mmdc->offset);
1419			if(addr && addr != (Vmuchar_t*)MAP_FAILED)
1420			{	if(okaddr(addr, nsize) >= 0)
1421				{	mmdc->offset += nsize;
1422					return addr;
1423				}
1424				(void)munmap(addr, nsize); /* release reserved address */
1425			}
1426		}
1427#endif /* _mem_mmap_zero */
1428
1429		return NIL(Void_t*);
1430	}
1431	else
1432	{	addr = caddr; /* in case !_mem_sbrk */
1433
1434#if _mem_sbrk
1435		{
1436			addr = (Vmuchar_t*)sbrk(0);
1437			if(!addr || addr == (Vmuchar_t*)BRK_FAILED)
1438				addr = caddr;
1439			else if(((Vmuchar_t*)caddr+csize) == addr) /* in sbrk-space */
1440			{	if(nsize <= csize)
1441					addr -= csize-nsize;
1442				else if((addr += nsize-csize) < (Vmuchar_t*)caddr)
1443					return NIL(Void_t*); /* wrapped around address */
1444				else	return brk(addr) == 0 ? caddr : NIL(Void_t*);
1445			}
1446		}
1447#endif /* _mem_sbrk */
1448
1449#if _mem_mmap_zero || _mem_mmap_anon
1450		{
1451			if(((Vmuchar_t*)caddr+csize) > addr) /* in mmap-space */
1452				if(nsize == 0 && munmap(caddr,csize) == 0)
1453					return caddr;
1454		}
1455#endif /* _mem_mmap_zero || _mem_mmap_anon */
1456
1457		return NIL(Void_t*);
1458	}
1459#endif /*_done_sbrkmem*/
1460
1461#if !_done_sbrkmem /* use native malloc/free as a last resort */
1462	/**/ASSERT(_std_malloc); /* _std_malloc should be well-defined */
1463	NOTUSED(vm);
1464	NOTUSED(disc);
1465	if(csize == 0)
1466		return (Void_t*)malloc(nsize);
1467	else if(nsize == 0)
1468	{	free(caddr);
1469		return caddr;
1470	}
1471	else	return NIL(Void_t*);
1472#endif /* _done_sbrkmem */
1473}
1474
1475#if _mem_mmap_zero
1476static Mmapdisc_t _Vmdcsbrk = { { sbrkmem, NIL(Vmexcept_f), 64*1024 }, -1, 0 };
1477#else
1478static Vmdisc_t _Vmdcsbrk = { sbrkmem, NIL(Vmexcept_f), 0 };
1479#endif
1480
1481static Vmethod_t _Vmbest =
1482{
1483	bestalloc,
1484	bestresize,
1485	bestfree,
1486	bestaddr,
1487	bestsize,
1488	bestcompact,
1489	bestalign,
1490	VM_MTBEST
1491};
1492
1493/* The heap region */
1494static Vmdata_t	_Vmdata =
1495{
1496	VM_MTBEST|VM_TRUST,		/* mode		*/
1497	0,				/* incr		*/
1498	0,				/* pool		*/
1499	NIL(Seg_t*),			/* seg		*/
1500	NIL(Block_t*),			/* free		*/
1501	NIL(Block_t*),			/* wild		*/
1502	NIL(Block_t*),			/* root		*/
1503};
1504Vmalloc_t _Vmheap =
1505{
1506	{ bestalloc,
1507	  bestresize,
1508	  bestfree,
1509	  bestaddr,
1510	  bestsize,
1511	  bestcompact,
1512	  bestalign,
1513	  VM_MTBEST
1514	},
1515	NIL(char*),			/* file		*/
1516	0,				/* line		*/
1517	0,				/* func		*/
1518	(Vmdisc_t*)(&_Vmdcsbrk),	/* disc		*/
1519	&_Vmdata,			/* data		*/
1520	NIL(Vmalloc_t*)			/* next		*/
1521};
1522
1523__DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap);
1524__DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap);
1525__DEFINE__(Vmethod_t*, Vmbest, &_Vmbest);
1526__DEFINE__(Vmdisc_t*,  Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsbrk) );
1527
1528#ifdef NoF
1529NoF(vmbest)
1530#endif
1531
1532#endif
1533