1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * The structure of the sbrk backend:
29  *
30  * +-----------+
31  * | sbrk_top  |
32  * +-----------+
33  *      | (vmem_sbrk_alloc(), vmem_free())
34  *      |
35  * +-----------+
36  * | sbrk_heap |
37  * +-----------+
38  *   | | ... |  (vmem_alloc(), vmem_free())
39  * <other arenas>
40  *
41  * The sbrk_top arena holds all controlled memory.  vmem_sbrk_alloc() handles
42  * allocations from it, including growing the heap when we run low.
43  *
44  * Growing the heap is complicated by the fact that we have to extend the
45  * sbrk_top arena (using _vmem_extend_alloc()), and that can fail.  Since
46  * other threads may be actively allocating, we can't return the memory.
47  *
48  * Instead, we put it on a doubly-linked list, sbrk_fails, which we search
49  * before calling sbrk().
50  */
51 
52 #include <errno.h>
53 #include <limits.h>
54 #include <sys/sysmacros.h>
55 #include <sys/mman.h>
56 #include <unistd.h>
57 
58 #include "vmem_base.h"
59 
60 #include "misc.h"
61 
62 size_t vmem_sbrk_pagesize = 0; /* the preferred page size of the heap */
63 
64 #define	VMEM_SBRK_MINALLOC	(64 * 1024)
65 size_t vmem_sbrk_minalloc = VMEM_SBRK_MINALLOC; /* minimum allocation */
66 
67 static size_t real_pagesize;
68 static vmem_t *sbrk_heap;
69 
70 typedef struct sbrk_fail {
71 	struct sbrk_fail *sf_next;
72 	struct sbrk_fail *sf_prev;
73 	void *sf_base;			/* == the sbrk_fail's address */
74 	size_t sf_size;			/* the size of this buffer */
75 } sbrk_fail_t;
76 
77 static sbrk_fail_t sbrk_fails = {
78 	&sbrk_fails,
79 	&sbrk_fails,
80 	NULL,
81 	0
82 };
83 
84 static mutex_t sbrk_faillock = DEFAULTMUTEX;
85 
86 /*
87  * Try to extend src with [pos, pos + size).
88  *
89  * If it fails, add the block to the sbrk_fails list.
90  */
91 static void *
vmem_sbrk_extend_alloc(vmem_t * src,void * pos,size_t size,size_t alloc,int vmflags)92 vmem_sbrk_extend_alloc(vmem_t *src, void *pos, size_t size, size_t alloc,
93     int vmflags)
94 {
95 	sbrk_fail_t *fnext, *fprev, *fp;
96 	void *ret;
97 
98 	ret = _vmem_extend_alloc(src, pos, size, alloc, vmflags);
99 	if (ret != NULL)
100 		return (ret);
101 
102 	fp = (sbrk_fail_t *)pos;
103 
104 	ASSERT(sizeof (sbrk_fail_t) <= size);
105 
106 	fp->sf_base = pos;
107 	fp->sf_size = size;
108 
109 	(void) mutex_lock(&sbrk_faillock);
110 	fp->sf_next = fnext = &sbrk_fails;
111 	fp->sf_prev = fprev = sbrk_fails.sf_prev;
112 	fnext->sf_prev = fp;
113 	fprev->sf_next = fp;
114 	(void) mutex_unlock(&sbrk_faillock);
115 
116 	return (NULL);
117 }
118 
119 /*
120  * Try to add at least size bytes to src, using the sbrk_fails list
121  */
122 static void *
vmem_sbrk_tryfail(vmem_t * src,size_t size,int vmflags)123 vmem_sbrk_tryfail(vmem_t *src, size_t size, int vmflags)
124 {
125 	sbrk_fail_t *fp;
126 
127 	(void) mutex_lock(&sbrk_faillock);
128 	for (fp = sbrk_fails.sf_next; fp != &sbrk_fails; fp = fp->sf_next) {
129 		if (fp->sf_size >= size) {
130 			fp->sf_next->sf_prev = fp->sf_prev;
131 			fp->sf_prev->sf_next = fp->sf_next;
132 			fp->sf_next = fp->sf_prev = NULL;
133 			break;
134 		}
135 	}
136 	(void) mutex_unlock(&sbrk_faillock);
137 
138 	if (fp != &sbrk_fails) {
139 		ASSERT(fp->sf_base == (void *)fp);
140 		return (vmem_sbrk_extend_alloc(src, fp, fp->sf_size, size,
141 		    vmflags));
142 	}
143 	/*
144 	 * nothing of the right size on the freelist
145 	 */
146 	return (NULL);
147 }
148 
149 static void *
vmem_sbrk_alloc(vmem_t * src,size_t size,int vmflags)150 vmem_sbrk_alloc(vmem_t *src, size_t size, int vmflags)
151 {
152 	extern void *_sbrk_grow_aligned(size_t min_size, size_t low_align,
153 	    size_t high_align, size_t *actual_size);
154 
155 	void *ret;
156 	void *buf;
157 	size_t buf_size;
158 
159 	int old_errno = errno;
160 
161 	ret = vmem_alloc(src, size, VM_NOSLEEP);
162 	if (ret != NULL) {
163 		errno = old_errno;
164 		return (ret);
165 	}
166 
167 	/*
168 	 * The allocation failed.  We need to grow the heap.
169 	 *
170 	 * First, try to use any buffers which failed earlier.
171 	 */
172 	if (sbrk_fails.sf_next != &sbrk_fails &&
173 	    (ret = vmem_sbrk_tryfail(src, size, vmflags)) != NULL)
174 		return (ret);
175 
176 	buf_size = MAX(size, vmem_sbrk_minalloc);
177 
178 	/*
179 	 * buf_size gets overwritten with the actual allocated size
180 	 */
181 	buf = _sbrk_grow_aligned(buf_size, real_pagesize, vmem_sbrk_pagesize,
182 	    &buf_size);
183 
184 	if (buf != MAP_FAILED) {
185 		ret = vmem_sbrk_extend_alloc(src, buf, buf_size, size, vmflags);
186 		if (ret != NULL) {
187 			errno = old_errno;
188 			return (ret);
189 		}
190 	}
191 
192 	/*
193 	 * Growing the heap failed. The vmem_alloc() above called umem_reap().
194 	 */
195 	ASSERT((vmflags & VM_NOSLEEP) == VM_NOSLEEP);
196 
197 	errno = old_errno;
198 	return (NULL);
199 }
200 
201 /*
202  * fork1() support
203  */
204 void
vmem_sbrk_lockup(void)205 vmem_sbrk_lockup(void)
206 {
207 	(void) mutex_lock(&sbrk_faillock);
208 }
209 
210 void
vmem_sbrk_release(void)211 vmem_sbrk_release(void)
212 {
213 	(void) mutex_unlock(&sbrk_faillock);
214 }
215 
216 vmem_t *
vmem_sbrk_arena(vmem_alloc_t ** a_out,vmem_free_t ** f_out)217 vmem_sbrk_arena(vmem_alloc_t **a_out, vmem_free_t **f_out)
218 {
219 	if (sbrk_heap == NULL) {
220 		size_t heap_size;
221 
222 		real_pagesize = sysconf(_SC_PAGESIZE);
223 
224 		heap_size = vmem_sbrk_pagesize;
225 
226 		if (issetugid()) {
227 			heap_size = 0;
228 		} else if (heap_size != 0 && !ISP2(heap_size)) {
229 			heap_size = 0;
230 			log_message("ignoring bad pagesize: 0x%p\n", heap_size);
231 		}
232 		if (heap_size <= real_pagesize) {
233 			heap_size = real_pagesize;
234 		} else {
235 			struct memcntl_mha mha;
236 			mha.mha_cmd = MHA_MAPSIZE_BSSBRK;
237 			mha.mha_flags = 0;
238 			mha.mha_pagesize = heap_size;
239 
240 			if (memcntl(NULL, 0, MC_HAT_ADVISE, (char *)&mha, 0, 0)
241 			    == -1) {
242 				log_message("unable to set MAPSIZE_BSSBRK to "
243 				    "0x%p\n", heap_size);
244 				heap_size = real_pagesize;
245 			}
246 		}
247 		vmem_sbrk_pagesize = heap_size;
248 
249 		/* validate vmem_sbrk_minalloc */
250 		if (vmem_sbrk_minalloc < VMEM_SBRK_MINALLOC)
251 			vmem_sbrk_minalloc = VMEM_SBRK_MINALLOC;
252 		vmem_sbrk_minalloc = P2ROUNDUP(vmem_sbrk_minalloc, heap_size);
253 
254 		sbrk_heap = vmem_init("sbrk_top", real_pagesize,
255 		    vmem_sbrk_alloc, vmem_free,
256 		    "sbrk_heap", NULL, 0, real_pagesize,
257 		    vmem_alloc, vmem_free);
258 	}
259 
260 	if (a_out != NULL)
261 		*a_out = vmem_alloc;
262 	if (f_out != NULL)
263 		*f_out = vmem_free;
264 
265 	return (sbrk_heap);
266 }
267