1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * Copyright (c) 2019 Joyent, Inc.
29 * Copyright (c) 2015 by Delphix. All rights reserved.
30 */
31
32/*
33 * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18
34 *
35 * The slab allocator, as described in the following two papers:
36 *
37 *	Jeff Bonwick,
38 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator.
39 *	Proceedings of the Summer 1994 Usenix Conference.
40 *	Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
41 *
42 *	Jeff Bonwick and Jonathan Adams,
43 *	Magazines and vmem: Extending the Slab Allocator to Many CPUs and
44 *	Arbitrary Resources.
45 *	Proceedings of the 2001 Usenix Conference.
46 *	Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
47 *
48 * 1. Overview
49 * -----------
50 * umem is very close to kmem in implementation.  There are seven major
51 * areas of divergence:
52 *
53 *	* Initialization
54 *
55 *	* CPU handling
56 *
57 *	* umem_update()
58 *
59 *	* KM_SLEEP v.s. UMEM_NOFAIL
60 *
61 *	* lock ordering
62 *
63 *	* changing UMEM_MAXBUF
64 *
65 *	* Per-thread caching for malloc/free
66 *
67 * 2. Initialization
68 * -----------------
69 * kmem is initialized early on in boot, and knows that no one will call
70 * into it before it is ready.  umem does not have these luxuries. Instead,
71 * initialization is divided into two phases:
72 *
73 *	* library initialization, and
74 *
75 *	* first use
76 *
77 * umem's full initialization happens at the time of the first allocation
78 * request (via malloc() and friends, umem_alloc(), or umem_zalloc()),
79 * or the first call to umem_cache_create().
80 *
81 * umem_free(), and umem_cache_alloc() do not require special handling,
82 * since the only way to get valid arguments for them is to successfully
83 * call a function from the first group.
84 *
85 * 2.1. Library Initialization: umem_startup()
86 * -------------------------------------------
87 * umem_startup() is libumem.so's .init section.  It calls pthread_atfork()
88 * to install the handlers necessary for umem's Fork1-Safety.  Because of
89 * race condition issues, all other pre-umem_init() initialization is done
90 * statically (i.e. by the dynamic linker).
91 *
92 * For standalone use, umem_startup() returns everything to its initial
93 * state.
94 *
95 * 2.2. First use: umem_init()
96 * ------------------------------
97 * The first time any memory allocation function is used, we have to
98 * create the backing caches and vmem arenas which are needed for it.
99 * umem_init() is the central point for that task.  When it completes,
100 * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable
101 * to initialize, probably due to lack of memory).
102 *
103 * There are four different paths from which umem_init() is called:
104 *
105 *	* from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF,
106 *
107 *	* from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF,
108 *
109 *	* from umem_cache_create(), and
110 *
111 *	* from memalign(), with align > UMEM_ALIGN.
112 *
113 * The last three just check if umem is initialized, and call umem_init()
114 * if it is not.  For performance reasons, the first case is more complicated.
115 *
116 * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF
117 * -----------------------------------------------------------------
118 * In this case, umem_cache_alloc(&umem_null_cache, ...) is called.
119 * There is special case code in which causes any allocation on
120 * &umem_null_cache to fail by returning (NULL), regardless of the
121 * flags argument.
122 *
123 * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call
124 * umem_alloc_retry().  umem_alloc_retry() sees that the allocation
125 * was agains &umem_null_cache, and calls umem_init().
126 *
127 * If initialization is successful, umem_alloc_retry() returns 1, which
128 * causes umem_alloc()/umem_zalloc() to start over, which causes it to load
129 * the (now valid) cache pointer from umem_alloc_table.
130 *
131 * 2.2.2. Dealing with race conditions
132 * -----------------------------------
133 * There are a couple race conditions resulting from the initialization
134 * code that we have to guard against:
135 *
136 *	* In umem_cache_create(), there is a special UMC_INTERNAL cflag
137 *	that is passed for caches created during initialization.  It
138 *	is illegal for a user to try to create a UMC_INTERNAL cache.
139 *	This allows initialization to proceed, but any other
140 *	umem_cache_create()s will block by calling umem_init().
141 *
142 *	* Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask
143 *	is always zero.  umem_cache_alloc uses cp->cache_cpu_mask to
144 *	mask the cpu number.  This prevents a race between grabbing a
145 *	cache pointer out of umem_alloc_table and growing the cpu array.
146 *
147 *
148 * 3. CPU handling
149 * ---------------
150 * kmem uses the CPU's sequence number to determine which "cpu cache" to
151 * use for an allocation.  Currently, there is no way to get the sequence
152 * number in userspace.
153 *
154 * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus
155 * umem_cpu_t structures.  CURCPU() is a a "hint" function, which we then mask
156 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
157 * The mechanics of this is all in the CPU(mask) macro.
158 *
159 * Currently, umem uses _lwp_self() as its hint.
160 *
161 *
162 * 4. The update thread
163 * --------------------
164 * kmem uses a task queue, kmem_taskq, to do periodic maintenance on
165 * every kmem cache.  vmem has a periodic timeout for hash table resizing.
166 * The kmem_taskq also provides a separate context for kmem_cache_reap()'s
167 * to be done in, avoiding issues of the context of kmem_reap() callers.
168 *
169 * Instead, umem has the concept of "updates", which are asynchronous requests
170 * for work attached to single caches.  All caches with pending work are
171 * on a doubly linked list rooted at the umem_null_cache.  All update state
172 * is protected by the umem_update_lock mutex, and the umem_update_cv is used
173 * for notification between threads.
174 *
175 * 4.1. Cache states with regards to updates
176 * -----------------------------------------
177 * A given cache is in one of three states:
178 *
179 * Inactive		cache_uflags is zero, cache_u{next,prev} are NULL
180 *
181 * Work Requested	cache_uflags is non-zero (but UMU_ACTIVE is not set),
182 *			cache_u{next,prev} link the cache onto the global
183 *			update list
184 *
185 * Active		cache_uflags has UMU_ACTIVE set, cache_u{next,prev}
186 *			are NULL, and either umem_update_thr or
187 *			umem_st_update_thr are actively doing work on the
188 *			cache.
189 *
190 * An update can be added to any cache in any state -- if the cache is
191 * Inactive, it transitions to being Work Requested.  If the cache is
192 * Active, the worker will notice the new update and act on it before
193 * transitioning the cache to the Inactive state.
194 *
195 * If a cache is in the Active state, UMU_NOTIFY can be set, which asks
196 * the worker to broadcast the umem_update_cv when it has finished.
197 *
198 * 4.2. Update interface
199 * ---------------------
200 * umem_add_update() adds an update to a particular cache.
201 * umem_updateall() adds an update to all caches.
202 * umem_remove_updates() returns a cache to the Inactive state.
203 *
204 * umem_process_updates() process all caches in the Work Requested state.
205 *
206 * 4.3. Reaping
207 * ------------
208 * When umem_reap() is called (at the time of heap growth), it schedule
209 * UMU_REAP updates on every cache.  It then checks to see if the update
210 * thread exists (umem_update_thr != 0).  If it is, it broadcasts
211 * the umem_update_cv to wake the update thread up, and returns.
212 *
213 * If the update thread does not exist (umem_update_thr == 0), and the
214 * program currently has multiple threads, umem_reap() attempts to create
215 * a new update thread.
216 *
217 * If the process is not multithreaded, or the creation fails, umem_reap()
218 * calls umem_st_update() to do an inline update.
219 *
220 * 4.4. The update thread
221 * ----------------------
222 * The update thread spends most of its time in cond_timedwait() on the
223 * umem_update_cv.  It wakes up under two conditions:
224 *
225 *	* The timedwait times out, in which case it needs to run a global
226 *	update, or
227 *
228 *	* someone cond_broadcast(3THR)s the umem_update_cv, in which case
229 *	it needs to check if there are any caches in the Work Requested
230 *	state.
231 *
232 * When it is time for another global update, umem calls umem_cache_update()
233 * on every cache, then calls vmem_update(), which tunes the vmem structures.
234 * umem_cache_update() can request further work using umem_add_update().
235 *
236 * After any work from the global update completes, the update timer is
237 * reset to umem_reap_interval seconds in the future.  This makes the
238 * updates self-throttling.
239 *
240 * Reaps are similarly self-throttling.  After a UMU_REAP update has
241 * been scheduled on all caches, umem_reap() sets a flag and wakes up the
242 * update thread.  The update thread notices the flag, and resets the
243 * reap state.
244 *
245 * 4.5. Inline updates
246 * -------------------
247 * If the update thread is not running, umem_st_update() is used instead.  It
248 * immediately does a global update (as above), then calls
249 * umem_process_updates() to process both the reaps that umem_reap() added and
250 * any work generated by the global update.  Afterwards, it resets the reap
251 * state.
252 *
253 * While the umem_st_update() is running, umem_st_update_thr holds the thread
254 * id of the thread performing the update.
255 *
256 * 4.6. Updates and fork1()
257 * ------------------------
258 * umem has fork1() pre- and post-handlers which lock up (and release) every
259 * mutex in every cache.  They also lock up the umem_update_lock.  Since
260 * fork1() only copies over a single lwp, other threads (including the update
261 * thread) could have been actively using a cache in the parent.  This
262 * can lead to inconsistencies in the child process.
263 *
264 * Because we locked all of the mutexes, the only possible inconsistancies are:
265 *
266 *	* a umem_cache_alloc() could leak its buffer.
267 *
268 *	* a caller of umem_depot_alloc() could leak a magazine, and all the
269 *	buffers contained in it.
270 *
271 *	* a cache could be in the Active update state.  In the child, there
272 *	would be no thread actually working on it.
273 *
274 *	* a umem_hash_rescale() could leak the new hash table.
275 *
276 *	* a umem_magazine_resize() could be in progress.
277 *
278 *	* a umem_reap() could be in progress.
279 *
280 * The memory leaks we can't do anything about.  umem_release_child() resets
281 * the update state, moves any caches in the Active state to the Work Requested
282 * state.  This might cause some updates to be re-run, but UMU_REAP and
283 * UMU_HASH_RESCALE are effectively idempotent, and the worst that can
284 * happen from umem_magazine_resize() is resizing the magazine twice in close
285 * succession.
286 *
287 * Much of the cleanup in umem_release_child() is skipped if
288 * umem_st_update_thr == thr_self().  This is so that applications which call
289 * fork1() from a cache callback does not break.  Needless to say, any such
290 * application is tremendously broken.
291 *
292 *
293 * 5. KM_SLEEP v.s. UMEM_NOFAIL
294 * ----------------------------
295 * Allocations against kmem and vmem have two basic modes:  SLEEP and
296 * NOSLEEP.  A sleeping allocation is will go to sleep (waiting for
297 * more memory) instead of failing (returning NULL).
298 *
299 * SLEEP allocations presume an extremely multithreaded model, with
300 * a lot of allocation and deallocation activity.  umem cannot presume
301 * that its clients have any particular type of behavior.  Instead,
302 * it provides two types of allocations:
303 *
304 *	* UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on
305 *	failure)
306 *
307 *	* UMEM_NOFAIL, which, on failure, calls an optional callback
308 *	(registered with umem_nofail_callback()).
309 *
310 * The callback is invoked with no locks held, and can do an arbitrary
311 * amount of work.  It then has a choice between:
312 *
313 *	* Returning UMEM_CALLBACK_RETRY, which will cause the allocation
314 *	to be restarted.
315 *
316 *	* Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2)
317 *	to be invoked with status.  If multiple threads attempt to do
318 *	this simultaneously, only one will call exit(2).
319 *
320 *	* Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C),
321 *	etc.)
322 *
323 * The default callback returns UMEM_CALLBACK_EXIT(255).
324 *
325 * To have these callbacks without risk of state corruption (in the case of
326 * a non-local exit), we have to ensure that the callbacks get invoked
327 * close to the original allocation, with no inconsistent state or held
328 * locks.  The following steps are taken:
329 *
330 *	* All invocations of vmem are VM_NOSLEEP.
331 *
332 *	* All constructor callbacks (which can themselves to allocations)
333 *	are passed UMEM_DEFAULT as their required allocation argument.  This
334 *	way, the constructor will fail, allowing the highest-level allocation
335 *	invoke the nofail callback.
336 *
337 *	If a constructor callback _does_ do a UMEM_NOFAIL allocation, and
338 *	the nofail callback does a non-local exit, we will leak the
339 *	partially-constructed buffer.
340 *
341 *
342 * 6. Lock Ordering
343 * ----------------
344 * umem has a few more locks than kmem does, mostly in the update path.  The
345 * overall lock ordering (earlier locks must be acquired first) is:
346 *
347 *	umem_init_lock
348 *
349 *	vmem_list_lock
350 *	vmem_nosleep_lock.vmpl_mutex
351 *	vmem_t's:
352 *		vm_lock
353 *	sbrk_lock
354 *
355 *	umem_cache_lock
356 *	umem_update_lock
357 *	umem_flags_lock
358 *	umem_cache_t's:
359 *		cache_cpu[*].cc_lock
360 *		cache_depot_lock
361 *		cache_lock
362 *	umem_log_header_t's:
363 *		lh_cpu[*].clh_lock
364 *		lh_lock
365 *
366 * 7. Changing UMEM_MAXBUF
367 * -----------------------
368 *
369 * When changing UMEM_MAXBUF extra care has to be taken. It is not sufficient to
370 * simply increase this number. First, one must update the umem_alloc_table to
371 * have the appropriate number of entires based upon the new size. If this is
372 * not done, this will lead to libumem blowing an assertion.
373 *
374 * The second place to update, which is not required, is the umem_alloc_sizes.
375 * These determine the default cache sizes that we're going to support.
376 *
377 * 8. Per-thread caching for malloc/free
378 * -------------------------------------
379 *
380 * "Time is an illusion. Lunchtime doubly so." -- Douglas Adams
381 *
382 * Time may be an illusion, but CPU cycles aren't.  While libumem is designed
383 * to be a highly scalable allocator, that scalability comes with a fixed cycle
384 * penalty even in the absence of contention: libumem must acquire (and release
385 * a per-CPU lock for each allocation.  When contention is low and malloc(3C)
386 * frequency is high, this overhead can dominate execution time.  To alleviate
387 * this, we allow for per-thread caching, a lock-free means of caching recent
388 * deallocations on a per-thread basis for use in satisfying subsequent calls
389 *
390 * In addition to improving performance, we also want to:
391 *	* Minimize fragmentation
392 *	* Not add additional memory overhead (no larger malloc tags)
393 *
394 * In the ulwp_t of each thread there is a private data structure called a
395 * umem_t that looks like:
396 *
397 * typedef struct {
398 *	size_t	tm_size;
399 *	void	*tm_roots[NTMEMBASE];  (Currently 16)
400 * } tmem_t;
401 *
402 * Each of the roots is treated as the head of a linked list. Each entry in the
403 * list can be thought of as a void ** which points to the next entry, until one
404 * of them points to NULL. If the head points to NULL, the list is empty.
405 *
406 * Each head corresponds to a umem_cache. Currently there is a linear mapping
407 * where the first root corresponds to the first cache, second root to the
408 * second cache, etc. This works because every allocation that malloc makes to
409 * umem_alloc that can be satisified by a umem_cache will actually return a
410 * number of bytes equal to the size of that cache. Because of this property and
411 * a one to one mapping between caches and roots we can guarantee that every
412 * entry in a given root's list will be able to satisfy the same requests as the
413 * corresponding cache.
414 *
415 * The choice of sixteen roots is based on where we believe we get the biggest
416 * bang for our buck. The per-thread caches will cache up to 256 byte and 448
417 * byte allocations on ILP32 and LP64 respectively. Generally applications plan
418 * more carefully how they do larger allocations than smaller ones. Therefore
419 * sixteen roots is a reasonable compromise between the amount of additional
420 * overhead per thread, and the likelihood of a program to benefit from it.
421 *
422 * The maximum amount of memory that can be cached in each thread is determined
423 * by the perthread_cache UMEM_OPTION. It corresponds to the umem_ptc_size
424 * value. The default value for this is currently 1 MB. Once umem_init() has
425 * finished this cannot be directly tuned without directly modifying the
426 * instruction text. If, upon calling free(3C), the amount cached would exceed
427 * this maximum, we instead actually return the buffer to the umem_cache instead
428 * of holding onto it in the thread.
429 *
430 * When a thread calls malloc(3C) it first determines which umem_cache it
431 * would be serviced by. If the allocation is not covered by ptcumem it goes to
432 * the normal malloc instead.  Next, it checks if the tmem_root's list is empty
433 * or not. If it is empty, we instead go and allocate the memory from
434 * umem_alloc. If it is not empty, we remove the head of the list, set the
435 * appropriate malloc tags, and return that buffer.
436 *
437 * When a thread calls free(3C) it first looks at the malloc tag and if it is
438 * invalid or the allocation exceeds the largest cache in ptcumem and sends it
439 * off to the original free() to handle and clean up appropriately. Next, it
440 * checks if the allocation size is covered by one of the per-thread roots and
441 * if it isn't, it passes it off to the original free() to be released. Finally,
442 * before it inserts this buffer as the head, it checks if adding this buffer
443 * would put the thread over its maximum cache size. If it would, it frees the
444 * buffer back to the umem_cache. Otherwise it increments the threads total
445 * cached amount and makes the buffer the new head of the appropriate tm_root.
446 *
447 * When a thread exits, all of the buffers that it has in its per-thread cache
448 * will be passed to umem_free() and returned to the appropriate umem_cache.
449 *
450 * 8.1 Handling addition and removal of umem_caches
451 * ------------------------------------------------
452 *
453 * The set of umem_caches that are used to back calls to umem_alloc() and
454 * ultimately malloc() are determined at program execution time. The default set
455 * of caches is defined below in umem_alloc_sizes[]. Various umem_options exist
456 * that modify the set of caches: size_add, size_clear, and size_remove. Because
457 * the set of caches can only be determined once umem_init() has been called and
458 * we have the additional goals of minimizing additional fragmentation and
459 * metadata space overhead in the malloc tags, this forces our hand to go down a
460 * slightly different path: the one tread by fasttrap and trapstat.
461 *
462 * During umem_init we're going to dynamically construct a new version of
463 * malloc(3C) and free(3C) that utilizes the known cache sizes and then ensure
464 * that ptcmalloc and ptcfree replace malloc and free as entries in the plt. If
465 * ptcmalloc and ptcfree cannot handle a request, they simply jump to the
466 * original libumem implementations.
467 *
468 * After creating all of the umem_caches, but before making them visible,
469 * umem_cache_init checks that umem_genasm_supported is non-zero. This value is
470 * set by each architecture in $ARCH/umem_genasm.c to indicate whether or not
471 * they support this. If the value is zero, then this process is skipped.
472 * Similarly, if the cache size has been tuned to zero by UMEM_OPTIONS, then
473 * this is also skipped.
474 *
475 * In umem_genasm.c, each architecture's implementation implements a single
476 * function called umem_genasm() that is responsible for generating the
477 * appropriate versions of ptcmalloc() and ptcfree(), placing them in the
478 * appropriate memory location, and finally doing the switch from malloc() and
479 * free() to ptcmalloc() and ptcfree().  Once the change has been made, there is
480 * no way to switch back, short of restarting the program or modifying program
481 * text with mdb.
482 *
483 * 8.2 Modifying the Procedure Linkage Table (PLT)
484 * -----------------------------------------------
485 *
486 * The last piece of this puzzle is how we actually jam ptcmalloc() into the
487 * PLT.  To handle this, we have defined two functions, _malloc and _free and
488 * used a special mapfile directive to place them into the a readable,
489 * writeable, and executable segment.  Next we use a standard #pragma weak for
490 * malloc and free and direct them to those symbols. By default, those symbols
491 * have text defined as nops for our generated functions and when they're
492 * invoked, they jump to the default malloc and free functions.
493 *
494 * When umem_genasm() is called, it goes through and generates new malloc() and
495 * free() functions in the text provided for by _malloc and _free just after the
496 * jump. Once both have been successfully generated, umem_genasm() nops over the
497 * original jump so that we now call into the genasm versions of these
498 * functions.
499 *
500 * 8.3 umem_genasm()
501 * -----------------
502 *
503 * umem_genasm() is currently implemented for i386 and amd64. This section
504 * describes the theory behind the construction. For specific byte code to
505 * assembly instructions and niceish C and asm versions of ptcmalloc and
506 * ptcfree, see the individual umem_genasm.c files. The layout consists of the
507 * following sections:
508 *
509 *	o. function-specfic prologue
510 *	o. function-generic cache-selecting elements
511 *	o. function-specific epilogue
512 *
513 * There are three different generic cache elements that exist:
514 *
515 *	o. the last or only cache
516 *	o. the intermediary caches if more than two
517 *	o. the first one if more than one cache
518 *
519 * The malloc and free prologues and epilogues mimic the necessary portions of
520 * libumem's malloc and free. This includes things like checking for size
521 * overflow, setting and verifying the malloc tags.
522 *
523 * It is an important constraint that these functions do not make use of the
524 * call instruction. The only jmp outside of the individual functions is to the
525 * original libumem malloc and free respectively. Because doing things like
526 * setting errno or raising an internal umem error on improper malloc tags would
527 * require using calls into the PLT, whenever we encounter one of those cases we
528 * just jump to the original malloc and free functions reusing the same stack
529 * frame.
530 *
531 * Each of the above sections, the three caches, and the malloc and free
532 * prologue and epilogue are implemented as blocks of machine code with the
533 * corresponding assembly in comments. There are known offsets into each block
534 * that corresponds to locations of data and addresses that we only know at run
535 * time. These blocks are copied as necessary and the blanks filled in
536 * appropriately.
537 *
538 * As mentioned in section 8.2, the trampoline library uses specifically named
539 * variables to communicate the buffers and size to use. These variables are:
540 *
541 *	o. umem_genasm_mptr: The buffer for ptcmalloc
542 *	o. umem_genasm_msize: The size in bytes of the above buffer
543 *	o. umem_genasm_fptr: The buffer for ptcfree
544 *	o. umem_genasm_fsize: The size in bytes of the above buffer
545 *
546 * Finally, to enable the generated assembly we need to remove the previous jump
547 * to the actual malloc that exists at the start of these buffers. On x86, this
548 * is a five byte region. We could zero out the jump offset to be a jmp +0, but
549 * using nops can be faster. We specifically use a single five byte nop on x86
550 * as it is faster. When porting ptcumem to other architectures, the various
551 * opcode changes and options should be analyzed.
552 *
553 * 8.4 Interface with libc.so
554 * --------------------------
555 *
556 * The tmem_t structure as described in the beginning of section 8, is part of a
557 * private interface with libc. There are three functions that exist to cover
558 * this. They are not documented in man pages or header files. They are in the
559 * SUNWprivate part of libc's mapfile.
560 *
561 *	o. _tmem_get_base(void)
562 *
563 *	Returns the offset from the ulwp_t (curthread) to the tmem_t structure.
564 *	This is a constant for all threads and is effectively a way to to do
565 *	::offsetof ulwp_t ul_tmem without having to know the specifics of the
566 *	structure outside of libc.
567 *
568 *	o. _tmem_get_nentries(void)
569 *
570 *	Returns the number of roots that exist in the tmem_t. This is one part
571 *	of the cap on the number of umem_caches that we can back with tmem.
572 *
573 *	o. _tmem_set_cleanup(void (*)(void *, int))
574 *
575 *	This sets a clean up handler that gets called back when a thread exits.
576 *	There is one call per buffer, the void * is a pointer to the buffer on
577 *	the list, the int is the index into the roots array for this buffer.
578 *
579 * 8.5 Tuning and disabling per-thread caching
580 * -------------------------------------------
581 *
582 * There is only one tunable for per-thread caching:  the amount of memory each
583 * thread should be able to cache.  This is specified via the perthread_cache
584 * UMEM_OPTION option.  No attempt is made to to sanity check the specified
585 * value; the limit is simply the maximum value of a size_t.
586 *
587 * If the perthread_cache UMEM_OPTION is set to zero, nomagazines was requested,
588 * or UMEM_DEBUG has been turned on then we will never call into umem_genasm;
589 * however, the trampoline audit library and jump will still be in place.
590 *
591 * 8.6 Observing efficacy of per-thread caching
592 * --------------------------------------------
593 *
594 * To understand the efficacy of per-thread caching, use the ::umastat dcmd
595 * to see the percentage of capacity consumed on a per-thread basis, the
596 * degree to which each umem cache contributes to per-thread cache consumption,
597 * and the number of buffers in per-thread caches on a per-umem cache basis.
598 * If more detail is required, the specific buffers in a per-thread cache can
599 * be iterated over with the umem_ptc_* walkers. (These walkers allow an
600 * optional ulwp_t to be specified to iterate only over a particular thread's
601 * cache.)
602 */
603
604#include <umem_impl.h>
605#include <sys/vmem_impl_user.h>
606#include "umem_base.h"
607#include "vmem_base.h"
608
609#include <sys/processor.h>
610#include <sys/sysmacros.h>
611
612#include <alloca.h>
613#include <errno.h>
614#include <limits.h>
615#include <stdio.h>
616#include <stdlib.h>
617#include <string.h>
618#include <strings.h>
619#include <signal.h>
620#include <unistd.h>
621#include <atomic.h>
622
623#include "misc.h"
624
625#define	UMEM_VMFLAGS(umflag)	(VM_NOSLEEP)
626
627size_t pagesize;
628
629/*
630 * The default set of caches to back umem_alloc().
631 * These sizes should be reevaluated periodically.
632 *
633 * We want allocations that are multiples of the coherency granularity
634 * (64 bytes) to be satisfied from a cache which is a multiple of 64
635 * bytes, so that it will be 64-byte aligned.  For all multiples of 64,
636 * the next kmem_cache_size greater than or equal to it must be a
637 * multiple of 64.
638 *
639 * This table must be in sorted order, from smallest to highest.  The
640 * highest slot must be UMEM_MAXBUF, and every slot afterwards must be
641 * zero.
642 */
643static int umem_alloc_sizes[] = {
644#ifdef _LP64
645	1 * 8,
646	1 * 16,
647	2 * 16,
648	3 * 16,
649#else
650	1 * 8,
651	2 * 8,
652	3 * 8,
653	4 * 8,		5 * 8,		6 * 8,		7 * 8,
654#endif
655	4 * 16,		5 * 16,		6 * 16,		7 * 16,
656	4 * 32,		5 * 32,		6 * 32,		7 * 32,
657	4 * 64,		5 * 64,		6 * 64,		7 * 64,
658	4 * 128,	5 * 128,	6 * 128,	7 * 128,
659	P2ALIGN(8192 / 7, 64),
660	P2ALIGN(8192 / 6, 64),
661	P2ALIGN(8192 / 5, 64),
662	P2ALIGN(8192 / 4, 64), 2304,
663	P2ALIGN(8192 / 3, 64),
664	P2ALIGN(8192 / 2, 64), 4544,
665	P2ALIGN(8192 / 1, 64), 9216,
666	4096 * 3,
667	8192 * 2,				/* = 8192 * 2 */
668	24576, 32768, 40960, 49152, 57344, 65536, 73728, 81920,
669	90112, 98304, 106496, 114688, 122880, UMEM_MAXBUF, /* 128k */
670	/* 24 slots for user expansion */
671	0, 0, 0, 0, 0, 0, 0, 0,
672	0, 0, 0, 0, 0, 0, 0, 0,
673	0, 0, 0, 0, 0, 0, 0, 0,
674};
675#define	NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes))
676
677static umem_magtype_t umem_magtype[] = {
678	{ 1,	8,	3200,	65536	},
679	{ 3,	16,	256,	32768	},
680	{ 7,	32,	64,	16384	},
681	{ 15,	64,	0,	8192	},
682	{ 31,	64,	0,	4096	},
683	{ 47,	64,	0,	2048	},
684	{ 63,	64,	0,	1024	},
685	{ 95,	64,	0,	512	},
686	{ 143,	64,	0,	0	},
687};
688
689/*
690 * umem tunables
691 */
692uint32_t umem_max_ncpus;	/* # of CPU caches. */
693
694uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */
695uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */
696uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */
697uint_t umem_abort = 1;		/* whether to abort on error */
698uint_t umem_output = 0;		/* whether to write to standard error */
699uint_t umem_logging = 0;	/* umem_log_enter() override */
700uint32_t umem_mtbf = 0;		/* mean time between failures [default: off] */
701size_t umem_transaction_log_size; /* size of transaction log */
702size_t umem_content_log_size;	/* size of content log */
703size_t umem_failure_log_size;	/* failure log [4 pages per CPU] */
704size_t umem_slab_log_size;	/* slab create log [4 pages per CPU] */
705size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */
706size_t umem_lite_minsize = 0;	/* minimum buffer size for UMF_LITE */
707size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */
708size_t umem_maxverify;		/* maximum bytes to inspect in debug routines */
709size_t umem_minfirewall;	/* hardware-enforced redzone threshold */
710size_t umem_ptc_size = 1048576;	/* size of per-thread cache (in bytes) */
711
712uint_t umem_flags = 0;
713uintptr_t umem_tmem_off;
714
715mutex_t			umem_init_lock;		/* locks initialization */
716cond_t			umem_init_cv;		/* initialization CV */
717thread_t		umem_init_thr;		/* thread initializing */
718int			umem_init_env_ready;	/* environ pre-initted */
719int			umem_ready = UMEM_READY_STARTUP;
720
721int			umem_ptc_enabled;	/* per-thread caching enabled */
722
723static umem_nofail_callback_t *nofail_callback;
724static mutex_t		umem_nofail_exit_lock;
725static thread_t		umem_nofail_exit_thr;
726
727static umem_cache_t	*umem_slab_cache;
728static umem_cache_t	*umem_bufctl_cache;
729static umem_cache_t	*umem_bufctl_audit_cache;
730
731mutex_t			umem_flags_lock;
732
733static vmem_t		*heap_arena;
734static vmem_alloc_t	*heap_alloc;
735static vmem_free_t	*heap_free;
736
737static vmem_t		*umem_internal_arena;
738static vmem_t		*umem_cache_arena;
739static vmem_t		*umem_hash_arena;
740static vmem_t		*umem_log_arena;
741static vmem_t		*umem_oversize_arena;
742static vmem_t		*umem_va_arena;
743static vmem_t		*umem_default_arena;
744static vmem_t		*umem_firewall_va_arena;
745static vmem_t		*umem_firewall_arena;
746
747vmem_t			*umem_memalign_arena;
748
749umem_log_header_t *umem_transaction_log;
750umem_log_header_t *umem_content_log;
751umem_log_header_t *umem_failure_log;
752umem_log_header_t *umem_slab_log;
753
754#define	CPUHINT()		(thr_self())
755#define	CPUHINT_MAX()		INT_MAX
756
757#define	CPU(mask)		(umem_cpus + (CPUHINT() & (mask)))
758static umem_cpu_t umem_startup_cpu = {	/* initial, single, cpu */
759	UMEM_CACHE_SIZE(0),
760	0
761};
762
763static uint32_t umem_cpu_mask = 0;			/* global cpu mask */
764static umem_cpu_t *umem_cpus = &umem_startup_cpu;	/* cpu list */
765
766volatile uint32_t umem_reaping;
767
768thread_t		umem_update_thr;
769struct timeval		umem_update_next;	/* timeofday of next update */
770volatile thread_t	umem_st_update_thr;	/* only used when single-thd */
771
772#define	IN_UPDATE()	(thr_self() == umem_update_thr || \
773			    thr_self() == umem_st_update_thr)
774#define	IN_REAP()	IN_UPDATE()
775
776mutex_t			umem_update_lock;	/* cache_u{next,prev,flags} */
777cond_t			umem_update_cv;
778
779volatile hrtime_t umem_reap_next;	/* min hrtime of next reap */
780
781mutex_t			umem_cache_lock;	/* inter-cache linkage only */
782
783#ifdef UMEM_STANDALONE
784umem_cache_t		umem_null_cache;
785static const umem_cache_t umem_null_cache_template = {
786#else
787umem_cache_t		umem_null_cache = {
788#endif
789	0, 0, 0, 0, 0,
790	0, 0,
791	0, 0,
792	0, 0,
793	"invalid_cache",
794	0, 0,
795	NULL, NULL, NULL, NULL,
796	NULL,
797	0, 0, 0, 0,
798	&umem_null_cache, &umem_null_cache,
799	&umem_null_cache, &umem_null_cache,
800	0,
801	DEFAULTMUTEX,				/* start of slab layer */
802	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
803	&umem_null_cache.cache_nullslab,
804	{
805		&umem_null_cache,
806		NULL,
807		&umem_null_cache.cache_nullslab,
808		&umem_null_cache.cache_nullslab,
809		NULL,
810		-1,
811		0
812	},
813	NULL,
814	NULL,
815	DEFAULTMUTEX,				/* start of depot layer */
816	NULL, {
817		NULL, 0, 0, 0, 0
818	}, {
819		NULL, 0, 0, 0, 0
820	}, {
821		{
822			DEFAULTMUTEX,		/* start of CPU cache */
823			0, 0, NULL, NULL, -1, -1, 0
824		}
825	}
826};
827
828#define	ALLOC_TABLE_4 \
829	&umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache
830
831#define	ALLOC_TABLE_64 \
832	ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
833	ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
834	ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
835	ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4
836
837#define	ALLOC_TABLE_1024 \
838	ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
839	ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
840	ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
841	ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64
842
843static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = {
844	ALLOC_TABLE_1024,
845	ALLOC_TABLE_1024,
846	ALLOC_TABLE_1024,
847	ALLOC_TABLE_1024,
848	ALLOC_TABLE_1024,
849	ALLOC_TABLE_1024,
850	ALLOC_TABLE_1024,
851	ALLOC_TABLE_1024,
852	ALLOC_TABLE_1024,
853	ALLOC_TABLE_1024,
854	ALLOC_TABLE_1024,
855	ALLOC_TABLE_1024,
856	ALLOC_TABLE_1024,
857	ALLOC_TABLE_1024,
858	ALLOC_TABLE_1024,
859	ALLOC_TABLE_1024
860};
861
862
863/* Used to constrain audit-log stack traces */
864caddr_t			umem_min_stack;
865caddr_t			umem_max_stack;
866
867
868#define	UMERR_MODIFIED	0	/* buffer modified while on freelist */
869#define	UMERR_REDZONE	1	/* redzone violation (write past end of buf) */
870#define	UMERR_DUPFREE	2	/* freed a buffer twice */
871#define	UMERR_BADADDR	3	/* freed a bad (unallocated) address */
872#define	UMERR_BADBUFTAG	4	/* buftag corrupted */
873#define	UMERR_BADBUFCTL	5	/* bufctl corrupted */
874#define	UMERR_BADCACHE	6	/* freed a buffer to the wrong cache */
875#define	UMERR_BADSIZE	7	/* alloc size != free size */
876#define	UMERR_BADBASE	8	/* buffer base address wrong */
877
878struct {
879	hrtime_t	ump_timestamp;	/* timestamp of error */
880	int		ump_error;	/* type of umem error (UMERR_*) */
881	void		*ump_buffer;	/* buffer that induced abort */
882	void		*ump_realbuf;	/* real start address for buffer */
883	umem_cache_t	*ump_cache;	/* buffer's cache according to client */
884	umem_cache_t	*ump_realcache;	/* actual cache containing buffer */
885	umem_slab_t	*ump_slab;	/* slab accoring to umem_findslab() */
886	umem_bufctl_t	*ump_bufctl;	/* bufctl */
887} umem_abort_info;
888
889static void
890copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
891{
892	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
893	uint64_t *buf = buf_arg;
894
895	while (buf < bufend)
896		*buf++ = pattern;
897}
898
899static void *
900verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
901{
902	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
903	uint64_t *buf;
904
905	for (buf = buf_arg; buf < bufend; buf++)
906		if (*buf != pattern)
907			return (buf);
908	return (NULL);
909}
910
911static void *
912verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
913{
914	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
915	uint64_t *buf;
916
917	for (buf = buf_arg; buf < bufend; buf++) {
918		if (*buf != old) {
919			copy_pattern(old, buf_arg,
920			    (char *)buf - (char *)buf_arg);
921			return (buf);
922		}
923		*buf = new;
924	}
925
926	return (NULL);
927}
928
929void
930umem_cache_applyall(void (*func)(umem_cache_t *))
931{
932	umem_cache_t *cp;
933
934	(void) mutex_lock(&umem_cache_lock);
935	for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
936	    cp = cp->cache_next)
937		func(cp);
938	(void) mutex_unlock(&umem_cache_lock);
939}
940
941static void
942umem_add_update_unlocked(umem_cache_t *cp, int flags)
943{
944	umem_cache_t *cnext, *cprev;
945
946	flags &= ~UMU_ACTIVE;
947
948	if (!flags)
949		return;
950
951	if (cp->cache_uflags & UMU_ACTIVE) {
952		cp->cache_uflags |= flags;
953	} else {
954		if (cp->cache_unext != NULL) {
955			ASSERT(cp->cache_uflags != 0);
956			cp->cache_uflags |= flags;
957		} else {
958			ASSERT(cp->cache_uflags == 0);
959			cp->cache_uflags = flags;
960			cp->cache_unext = cnext = &umem_null_cache;
961			cp->cache_uprev = cprev = umem_null_cache.cache_uprev;
962			cnext->cache_uprev = cp;
963			cprev->cache_unext = cp;
964		}
965	}
966}
967
968static void
969umem_add_update(umem_cache_t *cp, int flags)
970{
971	(void) mutex_lock(&umem_update_lock);
972
973	umem_add_update_unlocked(cp, flags);
974
975	if (!IN_UPDATE())
976		(void) cond_broadcast(&umem_update_cv);
977
978	(void) mutex_unlock(&umem_update_lock);
979}
980
981/*
982 * Remove a cache from the update list, waiting for any in-progress work to
983 * complete first.
984 */
985static void
986umem_remove_updates(umem_cache_t *cp)
987{
988	(void) mutex_lock(&umem_update_lock);
989
990	/*
991	 * Get it out of the active state
992	 */
993	while (cp->cache_uflags & UMU_ACTIVE) {
994		int cancel_state;
995
996		ASSERT(cp->cache_unext == NULL);
997
998		cp->cache_uflags |= UMU_NOTIFY;
999
1000		/*
1001		 * Make sure the update state is sane, before we wait
1002		 */
1003		ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0);
1004		ASSERT(umem_update_thr != thr_self() &&
1005		    umem_st_update_thr != thr_self());
1006
1007		(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
1008		    &cancel_state);
1009		(void) cond_wait(&umem_update_cv, &umem_update_lock);
1010		(void) pthread_setcancelstate(cancel_state, NULL);
1011	}
1012	/*
1013	 * Get it out of the Work Requested state
1014	 */
1015	if (cp->cache_unext != NULL) {
1016		cp->cache_uprev->cache_unext = cp->cache_unext;
1017		cp->cache_unext->cache_uprev = cp->cache_uprev;
1018		cp->cache_uprev = cp->cache_unext = NULL;
1019		cp->cache_uflags = 0;
1020	}
1021	/*
1022	 * Make sure it is in the Inactive state
1023	 */
1024	ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0);
1025	(void) mutex_unlock(&umem_update_lock);
1026}
1027
1028static void
1029umem_updateall(int flags)
1030{
1031	umem_cache_t *cp;
1032
1033	/*
1034	 * NOTE:  To prevent deadlock, umem_cache_lock is always acquired first.
1035	 *
1036	 * (umem_add_update is called from things run via umem_cache_applyall)
1037	 */
1038	(void) mutex_lock(&umem_cache_lock);
1039	(void) mutex_lock(&umem_update_lock);
1040
1041	for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
1042	    cp = cp->cache_next)
1043		umem_add_update_unlocked(cp, flags);
1044
1045	if (!IN_UPDATE())
1046		(void) cond_broadcast(&umem_update_cv);
1047
1048	(void) mutex_unlock(&umem_update_lock);
1049	(void) mutex_unlock(&umem_cache_lock);
1050}
1051
1052/*
1053 * Debugging support.  Given a buffer address, find its slab.
1054 */
1055static umem_slab_t *
1056umem_findslab(umem_cache_t *cp, void *buf)
1057{
1058	umem_slab_t *sp;
1059
1060	(void) mutex_lock(&cp->cache_lock);
1061	for (sp = cp->cache_nullslab.slab_next;
1062	    sp != &cp->cache_nullslab; sp = sp->slab_next) {
1063		if (UMEM_SLAB_MEMBER(sp, buf)) {
1064			(void) mutex_unlock(&cp->cache_lock);
1065			return (sp);
1066		}
1067	}
1068	(void) mutex_unlock(&cp->cache_lock);
1069
1070	return (NULL);
1071}
1072
1073static void
1074umem_error(int error, umem_cache_t *cparg, void *bufarg)
1075{
1076	umem_buftag_t *btp = NULL;
1077	umem_bufctl_t *bcp = NULL;
1078	umem_cache_t *cp = cparg;
1079	umem_slab_t *sp;
1080	uint64_t *off;
1081	void *buf = bufarg;
1082
1083	int old_logging = umem_logging;
1084
1085	umem_logging = 0;	/* stop logging when a bad thing happens */
1086
1087	umem_abort_info.ump_timestamp = gethrtime();
1088
1089	sp = umem_findslab(cp, buf);
1090	if (sp == NULL) {
1091		for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
1092		    cp = cp->cache_prev) {
1093			if ((sp = umem_findslab(cp, buf)) != NULL)
1094				break;
1095		}
1096	}
1097
1098	if (sp == NULL) {
1099		cp = NULL;
1100		error = UMERR_BADADDR;
1101	} else {
1102		if (cp != cparg)
1103			error = UMERR_BADCACHE;
1104		else
1105			buf = (char *)bufarg - ((uintptr_t)bufarg -
1106			    (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1107		if (buf != bufarg)
1108			error = UMERR_BADBASE;
1109		if (cp->cache_flags & UMF_BUFTAG)
1110			btp = UMEM_BUFTAG(cp, buf);
1111		if (cp->cache_flags & UMF_HASH) {
1112			(void) mutex_lock(&cp->cache_lock);
1113			for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1114				if (bcp->bc_addr == buf)
1115					break;
1116			(void) mutex_unlock(&cp->cache_lock);
1117			if (bcp == NULL && btp != NULL)
1118				bcp = btp->bt_bufctl;
1119			if (umem_findslab(cp->cache_bufctl_cache, bcp) ==
1120			    NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) ||
1121			    bcp->bc_addr != buf) {
1122				error = UMERR_BADBUFCTL;
1123				bcp = NULL;
1124			}
1125		}
1126	}
1127
1128	umem_abort_info.ump_error = error;
1129	umem_abort_info.ump_buffer = bufarg;
1130	umem_abort_info.ump_realbuf = buf;
1131	umem_abort_info.ump_cache = cparg;
1132	umem_abort_info.ump_realcache = cp;
1133	umem_abort_info.ump_slab = sp;
1134	umem_abort_info.ump_bufctl = bcp;
1135
1136	umem_printf("umem allocator: ");
1137
1138	switch (error) {
1139
1140	case UMERR_MODIFIED:
1141		umem_printf("buffer modified after being freed\n");
1142		off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1143		if (off == NULL)	/* shouldn't happen */
1144			off = buf;
1145		umem_printf("modification occurred at offset 0x%lx "
1146		    "(0x%llx replaced by 0x%llx)\n",
1147		    (uintptr_t)off - (uintptr_t)buf,
1148		    (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off);
1149		break;
1150
1151	case UMERR_REDZONE:
1152		umem_printf("redzone violation: write past end of buffer\n");
1153		break;
1154
1155	case UMERR_BADADDR:
1156		umem_printf("invalid free: buffer not in cache\n");
1157		break;
1158
1159	case UMERR_DUPFREE:
1160		umem_printf("duplicate free: buffer freed twice\n");
1161		break;
1162
1163	case UMERR_BADBUFTAG:
1164		umem_printf("boundary tag corrupted\n");
1165		umem_printf("bcp ^ bxstat = %lx, should be %lx\n",
1166		    (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1167		    UMEM_BUFTAG_FREE);
1168		break;
1169
1170	case UMERR_BADBUFCTL:
1171		umem_printf("bufctl corrupted\n");
1172		break;
1173
1174	case UMERR_BADCACHE:
1175		umem_printf("buffer freed to wrong cache\n");
1176		umem_printf("buffer was allocated from %s,\n", cp->cache_name);
1177		umem_printf("caller attempting free to %s.\n",
1178		    cparg->cache_name);
1179		break;
1180
1181	case UMERR_BADSIZE:
1182		umem_printf("bad free: free size (%u) != alloc size (%u)\n",
1183		    UMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1184		    UMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1185		break;
1186
1187	case UMERR_BADBASE:
1188		umem_printf("bad free: free address (%p) != alloc address "
1189		    "(%p)\n", bufarg, buf);
1190		break;
1191	}
1192
1193	umem_printf("buffer=%p  bufctl=%p  cache: %s\n",
1194	    bufarg, (void *)bcp, cparg->cache_name);
1195
1196	if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) &&
1197	    error != UMERR_BADBUFCTL) {
1198		int d;
1199		timespec_t ts;
1200		hrtime_t diff;
1201		umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp;
1202
1203		diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp;
1204		ts.tv_sec = diff / NANOSEC;
1205		ts.tv_nsec = diff % NANOSEC;
1206
1207		umem_printf("previous transaction on buffer %p:\n", buf);
1208		umem_printf("thread=%p  time=T-%ld.%09ld  slab=%p  cache: %s\n",
1209		    (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1210		    (void *)sp, cp->cache_name);
1211		for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) {
1212			(void) print_sym((void *)bcap->bc_stack[d]);
1213			umem_printf("\n");
1214		}
1215	}
1216
1217	umem_err_recoverable("umem: heap corruption detected");
1218
1219	umem_logging = old_logging;	/* resume logging */
1220}
1221
1222void
1223umem_nofail_callback(umem_nofail_callback_t *cb)
1224{
1225	nofail_callback = cb;
1226}
1227
1228static int
1229umem_alloc_retry(umem_cache_t *cp, int umflag)
1230{
1231	if (cp == &umem_null_cache) {
1232		if (umem_init())
1233			return (1);				/* retry */
1234		/*
1235		 * Initialization failed.  Do normal failure processing.
1236		 */
1237	}
1238	if (umem_flags & UMF_CHECKNULL) {
1239		umem_err_recoverable("umem: out of heap space");
1240	}
1241	if (umflag & UMEM_NOFAIL) {
1242		int def_result = UMEM_CALLBACK_EXIT(255);
1243		int result = def_result;
1244		umem_nofail_callback_t *callback = nofail_callback;
1245
1246		if (callback != NULL)
1247			result = callback();
1248
1249		if (result == UMEM_CALLBACK_RETRY)
1250			return (1);
1251
1252		if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) {
1253			log_message("nofail callback returned %x\n", result);
1254			result = def_result;
1255		}
1256
1257		/*
1258		 * only one thread will call exit
1259		 */
1260		if (umem_nofail_exit_thr == thr_self())
1261			umem_panic("recursive UMEM_CALLBACK_EXIT()\n");
1262
1263		(void) mutex_lock(&umem_nofail_exit_lock);
1264		umem_nofail_exit_thr = thr_self();
1265		exit(result & 0xFF);
1266		/*NOTREACHED*/
1267	}
1268	return (0);
1269}
1270
1271static umem_log_header_t *
1272umem_log_init(size_t logsize)
1273{
1274	umem_log_header_t *lhp;
1275	int nchunks = 4 * umem_max_ncpus;
1276	size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]);
1277	int i;
1278
1279	if (logsize == 0)
1280		return (NULL);
1281
1282	/*
1283	 * Make sure that lhp->lh_cpu[] is nicely aligned
1284	 * to prevent false sharing of cache lines.
1285	 */
1286	lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN);
1287	lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1288	    NULL, NULL, VM_NOSLEEP);
1289	if (lhp == NULL)
1290		goto fail;
1291
1292	bzero(lhp, lhsize);
1293
1294	(void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL);
1295	lhp->lh_nchunks = nchunks;
1296	lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE);
1297	if (lhp->lh_chunksize == 0)
1298		lhp->lh_chunksize = PAGESIZE;
1299
1300	lhp->lh_base = vmem_alloc(umem_log_arena,
1301	    lhp->lh_chunksize * nchunks, VM_NOSLEEP);
1302	if (lhp->lh_base == NULL)
1303		goto fail;
1304
1305	lhp->lh_free = vmem_alloc(umem_log_arena,
1306	    nchunks * sizeof (int), VM_NOSLEEP);
1307	if (lhp->lh_free == NULL)
1308		goto fail;
1309
1310	bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1311
1312	for (i = 0; i < umem_max_ncpus; i++) {
1313		umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1314		(void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL);
1315		clhp->clh_chunk = i;
1316	}
1317
1318	for (i = umem_max_ncpus; i < nchunks; i++)
1319		lhp->lh_free[i] = i;
1320
1321	lhp->lh_head = umem_max_ncpus;
1322	lhp->lh_tail = 0;
1323
1324	return (lhp);
1325
1326fail:
1327	if (lhp != NULL) {
1328		if (lhp->lh_base != NULL)
1329			vmem_free(umem_log_arena, lhp->lh_base,
1330			    lhp->lh_chunksize * nchunks);
1331
1332		vmem_xfree(umem_log_arena, lhp, lhsize);
1333	}
1334	return (NULL);
1335}
1336
1337static void *
1338umem_log_enter(umem_log_header_t *lhp, void *data, size_t size)
1339{
1340	void *logspace;
1341	umem_cpu_log_header_t *clhp;
1342
1343	if (lhp == NULL || umem_logging == 0)
1344		return (NULL);
1345
1346	clhp = &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number];
1347
1348	(void) mutex_lock(&clhp->clh_lock);
1349	clhp->clh_hits++;
1350	if (size > clhp->clh_avail) {
1351		(void) mutex_lock(&lhp->lh_lock);
1352		lhp->lh_hits++;
1353		lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1354		lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1355		clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1356		lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1357		clhp->clh_current = lhp->lh_base +
1358		    clhp->clh_chunk * lhp->lh_chunksize;
1359		clhp->clh_avail = lhp->lh_chunksize;
1360		if (size > lhp->lh_chunksize)
1361			size = lhp->lh_chunksize;
1362		(void) mutex_unlock(&lhp->lh_lock);
1363	}
1364	logspace = clhp->clh_current;
1365	clhp->clh_current += size;
1366	clhp->clh_avail -= size;
1367	bcopy(data, logspace, size);
1368	(void) mutex_unlock(&clhp->clh_lock);
1369	return (logspace);
1370}
1371
1372#define	UMEM_AUDIT(lp, cp, bcp)						\
1373{									\
1374	umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp);	\
1375	_bcp->bc_timestamp = gethrtime();				\
1376	_bcp->bc_thread = thr_self();					\
1377	_bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth,	\
1378	    (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL));	\
1379	_bcp->bc_lastlog = umem_log_enter((lp), _bcp,			\
1380	    UMEM_BUFCTL_AUDIT_SIZE);					\
1381}
1382
1383static void
1384umem_log_event(umem_log_header_t *lp, umem_cache_t *cp,
1385    umem_slab_t *sp, void *addr)
1386{
1387	umem_bufctl_audit_t *bcp;
1388	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
1389
1390	bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE);
1391	bcp->bc_addr = addr;
1392	bcp->bc_slab = sp;
1393	bcp->bc_cache = cp;
1394	UMEM_AUDIT(lp, cp, bcp);
1395}
1396
1397/*
1398 * Create a new slab for cache cp.
1399 */
1400static umem_slab_t *
1401umem_slab_create(umem_cache_t *cp, int umflag)
1402{
1403	size_t slabsize = cp->cache_slabsize;
1404	size_t chunksize = cp->cache_chunksize;
1405	int cache_flags = cp->cache_flags;
1406	size_t color, chunks;
1407	char *buf, *slab;
1408	umem_slab_t *sp;
1409	umem_bufctl_t *bcp;
1410	vmem_t *vmp = cp->cache_arena;
1411
1412	color = cp->cache_color + cp->cache_align;
1413	if (color > cp->cache_maxcolor)
1414		color = cp->cache_mincolor;
1415	cp->cache_color = color;
1416
1417	slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag));
1418
1419	if (slab == NULL)
1420		goto vmem_alloc_failure;
1421
1422	ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1423
1424	if (!(cp->cache_cflags & UMC_NOTOUCH) &&
1425	    (cp->cache_flags & UMF_DEADBEEF))
1426		copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1427
1428	if (cache_flags & UMF_HASH) {
1429		if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL)
1430			goto slab_alloc_failure;
1431		chunks = (slabsize - color) / chunksize;
1432	} else {
1433		sp = UMEM_SLAB(cp, slab);
1434		chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize;
1435	}
1436
1437	sp->slab_cache	= cp;
1438	sp->slab_head	= NULL;
1439	sp->slab_refcnt	= 0;
1440	sp->slab_base	= buf = slab + color;
1441	sp->slab_chunks	= chunks;
1442
1443	ASSERT(chunks > 0);
1444	while (chunks-- != 0) {
1445		if (cache_flags & UMF_HASH) {
1446			bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag);
1447			if (bcp == NULL)
1448				goto bufctl_alloc_failure;
1449			if (cache_flags & UMF_AUDIT) {
1450				umem_bufctl_audit_t *bcap =
1451				    (umem_bufctl_audit_t *)bcp;
1452				bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE);
1453				bcap->bc_cache = cp;
1454			}
1455			bcp->bc_addr = buf;
1456			bcp->bc_slab = sp;
1457		} else {
1458			bcp = UMEM_BUFCTL(cp, buf);
1459		}
1460		if (cache_flags & UMF_BUFTAG) {
1461			umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1462			btp->bt_redzone = UMEM_REDZONE_PATTERN;
1463			btp->bt_bufctl = bcp;
1464			btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1465			if (cache_flags & UMF_DEADBEEF) {
1466				copy_pattern(UMEM_FREE_PATTERN, buf,
1467				    cp->cache_verify);
1468			}
1469		}
1470		bcp->bc_next = sp->slab_head;
1471		sp->slab_head = bcp;
1472		buf += chunksize;
1473	}
1474
1475	umem_log_event(umem_slab_log, cp, sp, slab);
1476
1477	return (sp);
1478
1479bufctl_alloc_failure:
1480
1481	while ((bcp = sp->slab_head) != NULL) {
1482		sp->slab_head = bcp->bc_next;
1483		_umem_cache_free(cp->cache_bufctl_cache, bcp);
1484	}
1485	_umem_cache_free(umem_slab_cache, sp);
1486
1487slab_alloc_failure:
1488
1489	vmem_free(vmp, slab, slabsize);
1490
1491vmem_alloc_failure:
1492
1493	umem_log_event(umem_failure_log, cp, NULL, NULL);
1494	atomic_add_64(&cp->cache_alloc_fail, 1);
1495
1496	return (NULL);
1497}
1498
1499/*
1500 * Destroy a slab.
1501 */
1502static void
1503umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp)
1504{
1505	vmem_t *vmp = cp->cache_arena;
1506	void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1507
1508	if (cp->cache_flags & UMF_HASH) {
1509		umem_bufctl_t *bcp;
1510		while ((bcp = sp->slab_head) != NULL) {
1511			sp->slab_head = bcp->bc_next;
1512			_umem_cache_free(cp->cache_bufctl_cache, bcp);
1513		}
1514		_umem_cache_free(umem_slab_cache, sp);
1515	}
1516	vmem_free(vmp, slab, cp->cache_slabsize);
1517}
1518
1519/*
1520 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1521 */
1522static void *
1523umem_slab_alloc(umem_cache_t *cp, int umflag)
1524{
1525	umem_bufctl_t *bcp, **hash_bucket;
1526	umem_slab_t *sp;
1527	void *buf;
1528
1529	(void) mutex_lock(&cp->cache_lock);
1530	cp->cache_slab_alloc++;
1531	sp = cp->cache_freelist;
1532	ASSERT(sp->slab_cache == cp);
1533	if (sp->slab_head == NULL) {
1534		/*
1535		 * The freelist is empty.  Create a new slab.
1536		 */
1537		(void) mutex_unlock(&cp->cache_lock);
1538		if (cp == &umem_null_cache)
1539			return (NULL);
1540		if ((sp = umem_slab_create(cp, umflag)) == NULL)
1541			return (NULL);
1542		(void) mutex_lock(&cp->cache_lock);
1543		cp->cache_slab_create++;
1544		if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1545			cp->cache_bufmax = cp->cache_buftotal;
1546		sp->slab_next = cp->cache_freelist;
1547		sp->slab_prev = cp->cache_freelist->slab_prev;
1548		sp->slab_next->slab_prev = sp;
1549		sp->slab_prev->slab_next = sp;
1550		cp->cache_freelist = sp;
1551	}
1552
1553	sp->slab_refcnt++;
1554	ASSERT(sp->slab_refcnt <= sp->slab_chunks);
1555
1556	/*
1557	 * If we're taking the last buffer in the slab,
1558	 * remove the slab from the cache's freelist.
1559	 */
1560	bcp = sp->slab_head;
1561	if ((sp->slab_head = bcp->bc_next) == NULL) {
1562		cp->cache_freelist = sp->slab_next;
1563		ASSERT(sp->slab_refcnt == sp->slab_chunks);
1564	}
1565
1566	if (cp->cache_flags & UMF_HASH) {
1567		/*
1568		 * Add buffer to allocated-address hash table.
1569		 */
1570		buf = bcp->bc_addr;
1571		hash_bucket = UMEM_HASH(cp, buf);
1572		bcp->bc_next = *hash_bucket;
1573		*hash_bucket = bcp;
1574		if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1575			UMEM_AUDIT(umem_transaction_log, cp, bcp);
1576		}
1577	} else {
1578		buf = UMEM_BUF(cp, bcp);
1579	}
1580
1581	ASSERT(UMEM_SLAB_MEMBER(sp, buf));
1582
1583	(void) mutex_unlock(&cp->cache_lock);
1584
1585	return (buf);
1586}
1587
1588/*
1589 * Free a raw (unconstructed) buffer to cp's slab layer.
1590 */
1591static void
1592umem_slab_free(umem_cache_t *cp, void *buf)
1593{
1594	umem_slab_t *sp;
1595	umem_bufctl_t *bcp, **prev_bcpp;
1596
1597	ASSERT(buf != NULL);
1598
1599	(void) mutex_lock(&cp->cache_lock);
1600	cp->cache_slab_free++;
1601
1602	if (cp->cache_flags & UMF_HASH) {
1603		/*
1604		 * Look up buffer in allocated-address hash table.
1605		 */
1606		prev_bcpp = UMEM_HASH(cp, buf);
1607		while ((bcp = *prev_bcpp) != NULL) {
1608			if (bcp->bc_addr == buf) {
1609				*prev_bcpp = bcp->bc_next;
1610				sp = bcp->bc_slab;
1611				break;
1612			}
1613			cp->cache_lookup_depth++;
1614			prev_bcpp = &bcp->bc_next;
1615		}
1616	} else {
1617		bcp = UMEM_BUFCTL(cp, buf);
1618		sp = UMEM_SLAB(cp, buf);
1619	}
1620
1621	if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) {
1622		(void) mutex_unlock(&cp->cache_lock);
1623		umem_error(UMERR_BADADDR, cp, buf);
1624		return;
1625	}
1626
1627	if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1628		if (cp->cache_flags & UMF_CONTENTS)
1629			((umem_bufctl_audit_t *)bcp)->bc_contents =
1630			    umem_log_enter(umem_content_log, buf,
1631			    cp->cache_contents);
1632		UMEM_AUDIT(umem_transaction_log, cp, bcp);
1633	}
1634
1635	/*
1636	 * If this slab isn't currently on the freelist, put it there.
1637	 */
1638	if (sp->slab_head == NULL) {
1639		ASSERT(sp->slab_refcnt == sp->slab_chunks);
1640		ASSERT(cp->cache_freelist != sp);
1641		sp->slab_next->slab_prev = sp->slab_prev;
1642		sp->slab_prev->slab_next = sp->slab_next;
1643		sp->slab_next = cp->cache_freelist;
1644		sp->slab_prev = cp->cache_freelist->slab_prev;
1645		sp->slab_next->slab_prev = sp;
1646		sp->slab_prev->slab_next = sp;
1647		cp->cache_freelist = sp;
1648	}
1649
1650	bcp->bc_next = sp->slab_head;
1651	sp->slab_head = bcp;
1652
1653	ASSERT(sp->slab_refcnt >= 1);
1654	if (--sp->slab_refcnt == 0) {
1655		/*
1656		 * There are no outstanding allocations from this slab,
1657		 * so we can reclaim the memory.
1658		 */
1659		sp->slab_next->slab_prev = sp->slab_prev;
1660		sp->slab_prev->slab_next = sp->slab_next;
1661		if (sp == cp->cache_freelist)
1662			cp->cache_freelist = sp->slab_next;
1663		cp->cache_slab_destroy++;
1664		cp->cache_buftotal -= sp->slab_chunks;
1665		(void) mutex_unlock(&cp->cache_lock);
1666		umem_slab_destroy(cp, sp);
1667		return;
1668	}
1669	(void) mutex_unlock(&cp->cache_lock);
1670}
1671
1672static int
1673umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag)
1674{
1675	umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1676	umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
1677	uint32_t mtbf;
1678	int flags_nfatal;
1679
1680	if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
1681		umem_error(UMERR_BADBUFTAG, cp, buf);
1682		return (-1);
1683	}
1684
1685	btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC;
1686
1687	if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1688		umem_error(UMERR_BADBUFCTL, cp, buf);
1689		return (-1);
1690	}
1691
1692	btp->bt_redzone = UMEM_REDZONE_PATTERN;
1693
1694	if (cp->cache_flags & UMF_DEADBEEF) {
1695		if (verify_and_copy_pattern(UMEM_FREE_PATTERN,
1696		    UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) {
1697			umem_error(UMERR_MODIFIED, cp, buf);
1698			return (-1);
1699		}
1700	}
1701
1702	if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 &&
1703	    gethrtime() % mtbf == 0 &&
1704	    (umflag & (UMEM_FATAL_FLAGS)) == 0) {
1705		umem_log_event(umem_failure_log, cp, NULL, NULL);
1706	} else {
1707		mtbf = 0;
1708	}
1709
1710	/*
1711	 * We do not pass fatal flags on to the constructor.  This prevents
1712	 * leaking buffers in the event of a subordinate constructor failing.
1713	 */
1714	flags_nfatal = UMEM_DEFAULT;
1715	if (mtbf || (cp->cache_constructor != NULL &&
1716	    cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) {
1717		atomic_add_64(&cp->cache_alloc_fail, 1);
1718		btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1719		copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1720		umem_slab_free(cp, buf);
1721		return (-1);
1722	}
1723
1724	if (cp->cache_flags & UMF_AUDIT) {
1725		UMEM_AUDIT(umem_transaction_log, cp, bcp);
1726	}
1727
1728	return (0);
1729}
1730
1731static int
1732umem_cache_free_debug(umem_cache_t *cp, void *buf)
1733{
1734	umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1735	umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
1736	umem_slab_t *sp;
1737
1738	if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) {
1739		if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
1740			umem_error(UMERR_DUPFREE, cp, buf);
1741			return (-1);
1742		}
1743		sp = umem_findslab(cp, buf);
1744		if (sp == NULL || sp->slab_cache != cp)
1745			umem_error(UMERR_BADADDR, cp, buf);
1746		else
1747			umem_error(UMERR_REDZONE, cp, buf);
1748		return (-1);
1749	}
1750
1751	btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1752
1753	if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1754		umem_error(UMERR_BADBUFCTL, cp, buf);
1755		return (-1);
1756	}
1757
1758	if (btp->bt_redzone != UMEM_REDZONE_PATTERN) {
1759		umem_error(UMERR_REDZONE, cp, buf);
1760		return (-1);
1761	}
1762
1763	if (cp->cache_flags & UMF_AUDIT) {
1764		if (cp->cache_flags & UMF_CONTENTS)
1765			bcp->bc_contents = umem_log_enter(umem_content_log,
1766			    buf, cp->cache_contents);
1767		UMEM_AUDIT(umem_transaction_log, cp, bcp);
1768	}
1769
1770	if (cp->cache_destructor != NULL)
1771		cp->cache_destructor(buf, cp->cache_private);
1772
1773	if (cp->cache_flags & UMF_DEADBEEF)
1774		copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1775
1776	return (0);
1777}
1778
1779/*
1780 * Free each object in magazine mp to cp's slab layer, and free mp itself.
1781 */
1782static void
1783umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds)
1784{
1785	int round;
1786
1787	ASSERT(cp->cache_next == NULL || IN_UPDATE());
1788
1789	for (round = 0; round < nrounds; round++) {
1790		void *buf = mp->mag_round[round];
1791
1792		if ((cp->cache_flags & UMF_DEADBEEF) &&
1793		    verify_pattern(UMEM_FREE_PATTERN, buf,
1794		    cp->cache_verify) != NULL) {
1795			umem_error(UMERR_MODIFIED, cp, buf);
1796			continue;
1797		}
1798
1799		if (!(cp->cache_flags & UMF_BUFTAG) &&
1800		    cp->cache_destructor != NULL)
1801			cp->cache_destructor(buf, cp->cache_private);
1802
1803		umem_slab_free(cp, buf);
1804	}
1805	ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1806	_umem_cache_free(cp->cache_magtype->mt_cache, mp);
1807}
1808
1809/*
1810 * Allocate a magazine from the depot.
1811 */
1812static umem_magazine_t *
1813umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp)
1814{
1815	umem_magazine_t *mp;
1816
1817	/*
1818	 * If we can't get the depot lock without contention,
1819	 * update our contention count.  We use the depot
1820	 * contention rate to determine whether we need to
1821	 * increase the magazine size for better scalability.
1822	 */
1823	if (mutex_trylock(&cp->cache_depot_lock) != 0) {
1824		(void) mutex_lock(&cp->cache_depot_lock);
1825		cp->cache_depot_contention++;
1826	}
1827
1828	if ((mp = mlp->ml_list) != NULL) {
1829		ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1830		mlp->ml_list = mp->mag_next;
1831		if (--mlp->ml_total < mlp->ml_min)
1832			mlp->ml_min = mlp->ml_total;
1833		mlp->ml_alloc++;
1834	}
1835
1836	(void) mutex_unlock(&cp->cache_depot_lock);
1837
1838	return (mp);
1839}
1840
1841/*
1842 * Free a magazine to the depot.
1843 */
1844static void
1845umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp)
1846{
1847	(void) mutex_lock(&cp->cache_depot_lock);
1848	ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1849	mp->mag_next = mlp->ml_list;
1850	mlp->ml_list = mp;
1851	mlp->ml_total++;
1852	(void) mutex_unlock(&cp->cache_depot_lock);
1853}
1854
1855/*
1856 * Update the working set statistics for cp's depot.
1857 */
1858static void
1859umem_depot_ws_update(umem_cache_t *cp)
1860{
1861	(void) mutex_lock(&cp->cache_depot_lock);
1862	cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
1863	cp->cache_full.ml_min = cp->cache_full.ml_total;
1864	cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
1865	cp->cache_empty.ml_min = cp->cache_empty.ml_total;
1866	(void) mutex_unlock(&cp->cache_depot_lock);
1867}
1868
1869/*
1870 * Reap all magazines that have fallen out of the depot's working set.
1871 */
1872static void
1873umem_depot_ws_reap(umem_cache_t *cp)
1874{
1875	long reap;
1876	umem_magazine_t *mp;
1877
1878	ASSERT(cp->cache_next == NULL || IN_REAP());
1879
1880	reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
1881	while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL)
1882		umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
1883
1884	reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
1885	while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL)
1886		umem_magazine_destroy(cp, mp, 0);
1887}
1888
1889static void
1890umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds)
1891{
1892	ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
1893	    (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
1894	ASSERT(ccp->cc_magsize > 0);
1895
1896	ccp->cc_ploaded = ccp->cc_loaded;
1897	ccp->cc_prounds = ccp->cc_rounds;
1898	ccp->cc_loaded = mp;
1899	ccp->cc_rounds = rounds;
1900}
1901
1902/*
1903 * Allocate a constructed object from cache cp.
1904 */
1905#pragma weak umem_cache_alloc = _umem_cache_alloc
1906void *
1907_umem_cache_alloc(umem_cache_t *cp, int umflag)
1908{
1909	umem_cpu_cache_t *ccp;
1910	umem_magazine_t *fmp;
1911	void *buf;
1912	int flags_nfatal;
1913
1914retry:
1915	ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
1916	(void) mutex_lock(&ccp->cc_lock);
1917	for (;;) {
1918		/*
1919		 * If there's an object available in the current CPU's
1920		 * loaded magazine, just take it and return.
1921		 */
1922		if (ccp->cc_rounds > 0) {
1923			buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
1924			ccp->cc_alloc++;
1925			(void) mutex_unlock(&ccp->cc_lock);
1926			if ((ccp->cc_flags & UMF_BUFTAG) &&
1927			    umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1928				if (umem_alloc_retry(cp, umflag)) {
1929					goto retry;
1930				}
1931
1932				return (NULL);
1933			}
1934			return (buf);
1935		}
1936
1937		/*
1938		 * The loaded magazine is empty.  If the previously loaded
1939		 * magazine was full, exchange them and try again.
1940		 */
1941		if (ccp->cc_prounds > 0) {
1942			umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
1943			continue;
1944		}
1945
1946		/*
1947		 * If the magazine layer is disabled, break out now.
1948		 */
1949		if (ccp->cc_magsize == 0)
1950			break;
1951
1952		/*
1953		 * Try to get a full magazine from the depot.
1954		 */
1955		fmp = umem_depot_alloc(cp, &cp->cache_full);
1956		if (fmp != NULL) {
1957			if (ccp->cc_ploaded != NULL)
1958				umem_depot_free(cp, &cp->cache_empty,
1959				    ccp->cc_ploaded);
1960			umem_cpu_reload(ccp, fmp, ccp->cc_magsize);
1961			continue;
1962		}
1963
1964		/*
1965		 * There are no full magazines in the depot,
1966		 * so fall through to the slab layer.
1967		 */
1968		break;
1969	}
1970	(void) mutex_unlock(&ccp->cc_lock);
1971
1972	/*
1973	 * We couldn't allocate a constructed object from the magazine layer,
1974	 * so get a raw buffer from the slab layer and apply its constructor.
1975	 */
1976	buf = umem_slab_alloc(cp, umflag);
1977
1978	if (buf == NULL) {
1979		if (cp == &umem_null_cache)
1980			return (NULL);
1981		if (umem_alloc_retry(cp, umflag)) {
1982			goto retry;
1983		}
1984
1985		return (NULL);
1986	}
1987
1988	if (cp->cache_flags & UMF_BUFTAG) {
1989		/*
1990		 * Let umem_cache_alloc_debug() apply the constructor for us.
1991		 */
1992		if (umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1993			if (umem_alloc_retry(cp, umflag)) {
1994				goto retry;
1995			}
1996			return (NULL);
1997		}
1998		return (buf);
1999	}
2000
2001	/*
2002	 * We do not pass fatal flags on to the constructor.  This prevents
2003	 * leaking buffers in the event of a subordinate constructor failing.
2004	 */
2005	flags_nfatal = UMEM_DEFAULT;
2006	if (cp->cache_constructor != NULL &&
2007	    cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) {
2008		atomic_add_64(&cp->cache_alloc_fail, 1);
2009		umem_slab_free(cp, buf);
2010
2011		if (umem_alloc_retry(cp, umflag)) {
2012			goto retry;
2013		}
2014		return (NULL);
2015	}
2016
2017	return (buf);
2018}
2019
2020/*
2021 * Free a constructed object to cache cp.
2022 */
2023#pragma weak umem_cache_free = _umem_cache_free
2024void
2025_umem_cache_free(umem_cache_t *cp, void *buf)
2026{
2027	umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
2028	umem_magazine_t *emp;
2029	umem_magtype_t *mtp;
2030
2031	if (ccp->cc_flags & UMF_BUFTAG)
2032		if (umem_cache_free_debug(cp, buf) == -1)
2033			return;
2034
2035	(void) mutex_lock(&ccp->cc_lock);
2036	for (;;) {
2037		/*
2038		 * If there's a slot available in the current CPU's
2039		 * loaded magazine, just put the object there and return.
2040		 */
2041		if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2042			ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2043			ccp->cc_free++;
2044			(void) mutex_unlock(&ccp->cc_lock);
2045			return;
2046		}
2047
2048		/*
2049		 * The loaded magazine is full.  If the previously loaded
2050		 * magazine was empty, exchange them and try again.
2051		 */
2052		if (ccp->cc_prounds == 0) {
2053			umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2054			continue;
2055		}
2056
2057		/*
2058		 * If the magazine layer is disabled, break out now.
2059		 */
2060		if (ccp->cc_magsize == 0)
2061			break;
2062
2063		/*
2064		 * Try to get an empty magazine from the depot.
2065		 */
2066		emp = umem_depot_alloc(cp, &cp->cache_empty);
2067		if (emp != NULL) {
2068			if (ccp->cc_ploaded != NULL)
2069				umem_depot_free(cp, &cp->cache_full,
2070				    ccp->cc_ploaded);
2071			umem_cpu_reload(ccp, emp, 0);
2072			continue;
2073		}
2074
2075		/*
2076		 * There are no empty magazines in the depot,
2077		 * so try to allocate a new one.  We must drop all locks
2078		 * across umem_cache_alloc() because lower layers may
2079		 * attempt to allocate from this cache.
2080		 */
2081		mtp = cp->cache_magtype;
2082		(void) mutex_unlock(&ccp->cc_lock);
2083		emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT);
2084		(void) mutex_lock(&ccp->cc_lock);
2085
2086		if (emp != NULL) {
2087			/*
2088			 * We successfully allocated an empty magazine.
2089			 * However, we had to drop ccp->cc_lock to do it,
2090			 * so the cache's magazine size may have changed.
2091			 * If so, free the magazine and try again.
2092			 */
2093			if (ccp->cc_magsize != mtp->mt_magsize) {
2094				(void) mutex_unlock(&ccp->cc_lock);
2095				_umem_cache_free(mtp->mt_cache, emp);
2096				(void) mutex_lock(&ccp->cc_lock);
2097				continue;
2098			}
2099
2100			/*
2101			 * We got a magazine of the right size.  Add it to
2102			 * the depot and try the whole dance again.
2103			 */
2104			umem_depot_free(cp, &cp->cache_empty, emp);
2105			continue;
2106		}
2107
2108		/*
2109		 * We couldn't allocate an empty magazine,
2110		 * so fall through to the slab layer.
2111		 */
2112		break;
2113	}
2114	(void) mutex_unlock(&ccp->cc_lock);
2115
2116	/*
2117	 * We couldn't free our constructed object to the magazine layer,
2118	 * so apply its destructor and free it to the slab layer.
2119	 * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug()
2120	 * will have already applied the destructor.
2121	 */
2122	if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL)
2123		cp->cache_destructor(buf, cp->cache_private);
2124
2125	umem_slab_free(cp, buf);
2126}
2127
2128#pragma weak umem_zalloc = _umem_zalloc
2129void *
2130_umem_zalloc(size_t size, int umflag)
2131{
2132	size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
2133	void *buf;
2134
2135retry:
2136	if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
2137		umem_cache_t *cp = umem_alloc_table[index];
2138		buf = _umem_cache_alloc(cp, umflag);
2139		if (buf != NULL) {
2140			if (cp->cache_flags & UMF_BUFTAG) {
2141				umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2142				((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
2143				((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
2144			}
2145			bzero(buf, size);
2146		} else if (umem_alloc_retry(cp, umflag))
2147			goto retry;
2148	} else {
2149		buf = _umem_alloc(size, umflag);	/* handles failure */
2150		if (buf != NULL)
2151			bzero(buf, size);
2152	}
2153	return (buf);
2154}
2155
2156#pragma weak umem_alloc = _umem_alloc
2157void *
2158_umem_alloc(size_t size, int umflag)
2159{
2160	size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
2161	void *buf;
2162umem_alloc_retry:
2163	if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
2164		umem_cache_t *cp = umem_alloc_table[index];
2165		buf = _umem_cache_alloc(cp, umflag);
2166		if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) {
2167			umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2168			((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
2169			((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
2170		}
2171		if (buf == NULL && umem_alloc_retry(cp, umflag))
2172			goto umem_alloc_retry;
2173		return (buf);
2174	}
2175	if (size == 0)
2176		return (NULL);
2177	if (umem_oversize_arena == NULL) {
2178		if (umem_init())
2179			ASSERT(umem_oversize_arena != NULL);
2180		else
2181			return (NULL);
2182	}
2183	buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag));
2184	if (buf == NULL) {
2185		umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
2186		if (umem_alloc_retry(NULL, umflag))
2187			goto umem_alloc_retry;
2188	}
2189	return (buf);
2190}
2191
2192#pragma weak umem_alloc_align = _umem_alloc_align
2193void *
2194_umem_alloc_align(size_t size, size_t align, int umflag)
2195{
2196	void *buf;
2197
2198	if (size == 0)
2199		return (NULL);
2200	if ((align & (align - 1)) != 0)
2201		return (NULL);
2202	if (align < UMEM_ALIGN)
2203		align = UMEM_ALIGN;
2204
2205umem_alloc_align_retry:
2206	if (umem_memalign_arena == NULL) {
2207		if (umem_init())
2208			ASSERT(umem_oversize_arena != NULL);
2209		else
2210			return (NULL);
2211	}
2212	buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL,
2213	    UMEM_VMFLAGS(umflag));
2214	if (buf == NULL) {
2215		umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
2216		if (umem_alloc_retry(NULL, umflag))
2217			goto umem_alloc_align_retry;
2218	}
2219	return (buf);
2220}
2221
2222#pragma weak umem_free = _umem_free
2223void
2224_umem_free(void *buf, size_t size)
2225{
2226	size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
2227
2228	if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
2229		umem_cache_t *cp = umem_alloc_table[index];
2230		if (cp->cache_flags & UMF_BUFTAG) {
2231			umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2232			uint32_t *ip = (uint32_t *)btp;
2233			if (ip[1] != UMEM_SIZE_ENCODE(size)) {
2234				if (*(uint64_t *)buf == UMEM_FREE_PATTERN) {
2235					umem_error(UMERR_DUPFREE, cp, buf);
2236					return;
2237				}
2238				if (UMEM_SIZE_VALID(ip[1])) {
2239					ip[0] = UMEM_SIZE_ENCODE(size);
2240					umem_error(UMERR_BADSIZE, cp, buf);
2241				} else {
2242					umem_error(UMERR_REDZONE, cp, buf);
2243				}
2244				return;
2245			}
2246			if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) {
2247				umem_error(UMERR_REDZONE, cp, buf);
2248				return;
2249			}
2250			btp->bt_redzone = UMEM_REDZONE_PATTERN;
2251		}
2252		_umem_cache_free(cp, buf);
2253	} else {
2254		if (buf == NULL && size == 0)
2255			return;
2256		vmem_free(umem_oversize_arena, buf, size);
2257	}
2258}
2259
2260#pragma weak umem_free_align = _umem_free_align
2261void
2262_umem_free_align(void *buf, size_t size)
2263{
2264	if (buf == NULL && size == 0)
2265		return;
2266	vmem_xfree(umem_memalign_arena, buf, size);
2267}
2268
2269static void *
2270umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2271{
2272	size_t realsize = size + vmp->vm_quantum;
2273
2274	/*
2275	 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2276	 * vm_quantum will cause integer wraparound.  Check for this, and
2277	 * blow off the firewall page in this case.  Note that such a
2278	 * giant allocation (the entire address space) can never be
2279	 * satisfied, so it will either fail immediately (VM_NOSLEEP)
2280	 * or sleep forever (VM_SLEEP).  Thus, there is no need for a
2281	 * corresponding check in umem_firewall_va_free().
2282	 */
2283	if (realsize < size)
2284		realsize = size;
2285
2286	return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT));
2287}
2288
2289static void
2290umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
2291{
2292	vmem_free(vmp, addr, size + vmp->vm_quantum);
2293}
2294
2295/*
2296 * Reclaim all unused memory from a cache.
2297 */
2298static void
2299umem_cache_reap(umem_cache_t *cp)
2300{
2301	/*
2302	 * Ask the cache's owner to free some memory if possible.
2303	 * The idea is to handle things like the inode cache, which
2304	 * typically sits on a bunch of memory that it doesn't truly
2305	 * *need*.  Reclaim policy is entirely up to the owner; this
2306	 * callback is just an advisory plea for help.
2307	 */
2308	if (cp->cache_reclaim != NULL)
2309		cp->cache_reclaim(cp->cache_private);
2310
2311	umem_depot_ws_reap(cp);
2312}
2313
2314/*
2315 * Purge all magazines from a cache and set its magazine limit to zero.
2316 * All calls are serialized by being done by the update thread, except for
2317 * the final call from umem_cache_destroy().
2318 */
2319static void
2320umem_cache_magazine_purge(umem_cache_t *cp)
2321{
2322	umem_cpu_cache_t *ccp;
2323	umem_magazine_t *mp, *pmp;
2324	int rounds, prounds, cpu_seqid;
2325
2326	ASSERT(cp->cache_next == NULL || IN_UPDATE());
2327
2328	for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2329		ccp = &cp->cache_cpu[cpu_seqid];
2330
2331		(void) mutex_lock(&ccp->cc_lock);
2332		mp = ccp->cc_loaded;
2333		pmp = ccp->cc_ploaded;
2334		rounds = ccp->cc_rounds;
2335		prounds = ccp->cc_prounds;
2336		ccp->cc_loaded = NULL;
2337		ccp->cc_ploaded = NULL;
2338		ccp->cc_rounds = -1;
2339		ccp->cc_prounds = -1;
2340		ccp->cc_magsize = 0;
2341		(void) mutex_unlock(&ccp->cc_lock);
2342
2343		if (mp)
2344			umem_magazine_destroy(cp, mp, rounds);
2345		if (pmp)
2346			umem_magazine_destroy(cp, pmp, prounds);
2347	}
2348
2349	/*
2350	 * Updating the working set statistics twice in a row has the
2351	 * effect of setting the working set size to zero, so everything
2352	 * is eligible for reaping.
2353	 */
2354	umem_depot_ws_update(cp);
2355	umem_depot_ws_update(cp);
2356
2357	umem_depot_ws_reap(cp);
2358}
2359
2360/*
2361 * Enable per-cpu magazines on a cache.
2362 */
2363static void
2364umem_cache_magazine_enable(umem_cache_t *cp)
2365{
2366	int cpu_seqid;
2367
2368	if (cp->cache_flags & UMF_NOMAGAZINE)
2369		return;
2370
2371	for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2372		umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2373		(void) mutex_lock(&ccp->cc_lock);
2374		ccp->cc_magsize = cp->cache_magtype->mt_magsize;
2375		(void) mutex_unlock(&ccp->cc_lock);
2376	}
2377
2378}
2379
2380/*
2381 * Recompute a cache's magazine size.  The trade-off is that larger magazines
2382 * provide a higher transfer rate with the depot, while smaller magazines
2383 * reduce memory consumption.  Magazine resizing is an expensive operation;
2384 * it should not be done frequently.
2385 *
2386 * Changes to the magazine size are serialized by only having one thread
2387 * doing updates. (the update thread)
2388 *
2389 * Note: at present this only grows the magazine size.  It might be useful
2390 * to allow shrinkage too.
2391 */
2392static void
2393umem_cache_magazine_resize(umem_cache_t *cp)
2394{
2395	umem_magtype_t *mtp = cp->cache_magtype;
2396
2397	ASSERT(IN_UPDATE());
2398
2399	if (cp->cache_chunksize < mtp->mt_maxbuf) {
2400		umem_cache_magazine_purge(cp);
2401		(void) mutex_lock(&cp->cache_depot_lock);
2402		cp->cache_magtype = ++mtp;
2403		cp->cache_depot_contention_prev =
2404		    cp->cache_depot_contention + INT_MAX;
2405		(void) mutex_unlock(&cp->cache_depot_lock);
2406		umem_cache_magazine_enable(cp);
2407	}
2408}
2409
2410/*
2411 * Rescale a cache's hash table, so that the table size is roughly the
2412 * cache size.  We want the average lookup time to be extremely small.
2413 */
2414static void
2415umem_hash_rescale(umem_cache_t *cp)
2416{
2417	umem_bufctl_t **old_table, **new_table, *bcp;
2418	size_t old_size, new_size, h;
2419
2420	ASSERT(IN_UPDATE());
2421
2422	new_size = MAX(UMEM_HASH_INITIAL,
2423	    1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
2424	old_size = cp->cache_hash_mask + 1;
2425
2426	if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
2427		return;
2428
2429	new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *),
2430	    VM_NOSLEEP);
2431	if (new_table == NULL)
2432		return;
2433	bzero(new_table, new_size * sizeof (void *));
2434
2435	(void) mutex_lock(&cp->cache_lock);
2436
2437	old_size = cp->cache_hash_mask + 1;
2438	old_table = cp->cache_hash_table;
2439
2440	cp->cache_hash_mask = new_size - 1;
2441	cp->cache_hash_table = new_table;
2442	cp->cache_rescale++;
2443
2444	for (h = 0; h < old_size; h++) {
2445		bcp = old_table[h];
2446		while (bcp != NULL) {
2447			void *addr = bcp->bc_addr;
2448			umem_bufctl_t *next_bcp = bcp->bc_next;
2449			umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr);
2450			bcp->bc_next = *hash_bucket;
2451			*hash_bucket = bcp;
2452			bcp = next_bcp;
2453		}
2454	}
2455
2456	(void) mutex_unlock(&cp->cache_lock);
2457
2458	vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *));
2459}
2460
2461/*
2462 * Perform periodic maintenance on a cache: hash rescaling,
2463 * depot working-set update, and magazine resizing.
2464 */
2465void
2466umem_cache_update(umem_cache_t *cp)
2467{
2468	int update_flags = 0;
2469
2470	ASSERT(MUTEX_HELD(&umem_cache_lock));
2471
2472	/*
2473	 * If the cache has become much larger or smaller than its hash table,
2474	 * fire off a request to rescale the hash table.
2475	 */
2476	(void) mutex_lock(&cp->cache_lock);
2477
2478	if ((cp->cache_flags & UMF_HASH) &&
2479	    (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
2480	    (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
2481	    cp->cache_hash_mask > UMEM_HASH_INITIAL)))
2482		update_flags |= UMU_HASH_RESCALE;
2483
2484	(void) mutex_unlock(&cp->cache_lock);
2485
2486	/*
2487	 * Update the depot working set statistics.
2488	 */
2489	umem_depot_ws_update(cp);
2490
2491	/*
2492	 * If there's a lot of contention in the depot,
2493	 * increase the magazine size.
2494	 */
2495	(void) mutex_lock(&cp->cache_depot_lock);
2496
2497	if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
2498	    (int)(cp->cache_depot_contention -
2499	    cp->cache_depot_contention_prev) > umem_depot_contention)
2500		update_flags |= UMU_MAGAZINE_RESIZE;
2501
2502	cp->cache_depot_contention_prev = cp->cache_depot_contention;
2503
2504	(void) mutex_unlock(&cp->cache_depot_lock);
2505
2506	if (update_flags)
2507		umem_add_update(cp, update_flags);
2508}
2509
2510/*
2511 * Runs all pending updates.
2512 *
2513 * The update lock must be held on entrance, and will be held on exit.
2514 */
2515void
2516umem_process_updates(void)
2517{
2518	ASSERT(MUTEX_HELD(&umem_update_lock));
2519
2520	while (umem_null_cache.cache_unext != &umem_null_cache) {
2521		int notify = 0;
2522		umem_cache_t *cp = umem_null_cache.cache_unext;
2523
2524		cp->cache_uprev->cache_unext = cp->cache_unext;
2525		cp->cache_unext->cache_uprev = cp->cache_uprev;
2526		cp->cache_uprev = cp->cache_unext = NULL;
2527
2528		ASSERT(!(cp->cache_uflags & UMU_ACTIVE));
2529
2530		while (cp->cache_uflags) {
2531			int uflags = (cp->cache_uflags |= UMU_ACTIVE);
2532			(void) mutex_unlock(&umem_update_lock);
2533
2534			/*
2535			 * The order here is important.  Each step can speed up
2536			 * later steps.
2537			 */
2538
2539			if (uflags & UMU_HASH_RESCALE)
2540				umem_hash_rescale(cp);
2541
2542			if (uflags & UMU_MAGAZINE_RESIZE)
2543				umem_cache_magazine_resize(cp);
2544
2545			if (uflags & UMU_REAP)
2546				umem_cache_reap(cp);
2547
2548			(void) mutex_lock(&umem_update_lock);
2549
2550			/*
2551			 * check if anyone has requested notification
2552			 */
2553			if (cp->cache_uflags & UMU_NOTIFY) {
2554				uflags |= UMU_NOTIFY;
2555				notify = 1;
2556			}
2557			cp->cache_uflags &= ~uflags;
2558		}
2559		if (notify)
2560			(void) cond_broadcast(&umem_update_cv);
2561	}
2562}
2563
2564#ifndef UMEM_STANDALONE
2565static void
2566umem_st_update(void)
2567{
2568	ASSERT(MUTEX_HELD(&umem_update_lock));
2569	ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0);
2570
2571	umem_st_update_thr = thr_self();
2572
2573	(void) mutex_unlock(&umem_update_lock);
2574
2575	vmem_update(NULL);
2576	umem_cache_applyall(umem_cache_update);
2577
2578	(void) mutex_lock(&umem_update_lock);
2579
2580	umem_process_updates();	/* does all of the requested work */
2581
2582	umem_reap_next = gethrtime() +
2583	    (hrtime_t)umem_reap_interval * NANOSEC;
2584
2585	umem_reaping = UMEM_REAP_DONE;
2586
2587	umem_st_update_thr = 0;
2588}
2589#endif
2590
2591/*
2592 * Reclaim all unused memory from all caches.  Called from vmem when memory
2593 * gets tight.  Must be called with no locks held.
2594 *
2595 * This just requests a reap on all caches, and notifies the update thread.
2596 */
2597void
2598umem_reap(void)
2599{
2600#ifndef UMEM_STANDALONE
2601	extern int __nthreads(void);
2602#endif
2603
2604	if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE ||
2605	    gethrtime() < umem_reap_next)
2606		return;
2607
2608	(void) mutex_lock(&umem_update_lock);
2609
2610	if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) {
2611		(void) mutex_unlock(&umem_update_lock);
2612		return;
2613	}
2614	umem_reaping = UMEM_REAP_ADDING;	/* lock out other reaps */
2615
2616	(void) mutex_unlock(&umem_update_lock);
2617
2618	umem_updateall(UMU_REAP);
2619
2620	(void) mutex_lock(&umem_update_lock);
2621
2622	umem_reaping = UMEM_REAP_ACTIVE;
2623
2624	/* Standalone is single-threaded */
2625#ifndef UMEM_STANDALONE
2626	if (umem_update_thr == 0) {
2627		/*
2628		 * The update thread does not exist.  If the process is
2629		 * multi-threaded, create it.  If not, or the creation fails,
2630		 * do the update processing inline.
2631		 */
2632		ASSERT(umem_st_update_thr == 0);
2633
2634		if (__nthreads() <= 1 || umem_create_update_thread() == 0)
2635			umem_st_update();
2636	}
2637
2638	(void) cond_broadcast(&umem_update_cv);	/* wake up the update thread */
2639#endif
2640
2641	(void) mutex_unlock(&umem_update_lock);
2642}
2643
2644umem_cache_t *
2645umem_cache_create(
2646	char *name,		/* descriptive name for this cache */
2647	size_t bufsize,		/* size of the objects it manages */
2648	size_t align,		/* required object alignment */
2649	umem_constructor_t *constructor, /* object constructor */
2650	umem_destructor_t *destructor, /* object destructor */
2651	umem_reclaim_t *reclaim, /* memory reclaim callback */
2652	void *private,		/* pass-thru arg for constr/destr/reclaim */
2653	vmem_t *vmp,		/* vmem source for slab allocation */
2654	int cflags)		/* cache creation flags */
2655{
2656	int cpu_seqid;
2657	size_t chunksize;
2658	umem_cache_t *cp, *cnext, *cprev;
2659	umem_magtype_t *mtp;
2660	size_t csize;
2661	size_t phase;
2662
2663	/*
2664	 * The init thread is allowed to create internal and quantum caches.
2665	 *
2666	 * Other threads must wait until until initialization is complete.
2667	 */
2668	if (umem_init_thr == thr_self())
2669		ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0);
2670	else {
2671		ASSERT(!(cflags & UMC_INTERNAL));
2672		if (umem_ready != UMEM_READY && umem_init() == 0) {
2673			errno = EAGAIN;
2674			return (NULL);
2675		}
2676	}
2677
2678	csize = UMEM_CACHE_SIZE(umem_max_ncpus);
2679	phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE);
2680
2681	if (vmp == NULL)
2682		vmp = umem_default_arena;
2683
2684	ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0);
2685
2686	/*
2687	 * Check that the arguments are reasonable
2688	 */
2689	if ((align & (align - 1)) != 0 || align > vmp->vm_quantum ||
2690	    ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) ||
2691	    name == NULL || bufsize == 0) {
2692		errno = EINVAL;
2693		return (NULL);
2694	}
2695
2696	/*
2697	 * If align == 0, we set it to the minimum required alignment.
2698	 *
2699	 * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless
2700	 * UMC_NOTOUCH was passed.
2701	 */
2702	if (align == 0) {
2703		if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN)
2704			align = UMEM_SECOND_ALIGN;
2705		else
2706			align = UMEM_ALIGN;
2707	} else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0)
2708		align = UMEM_ALIGN;
2709
2710
2711	/*
2712	 * Get a umem_cache structure.  We arrange that cp->cache_cpu[]
2713	 * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent
2714	 * false sharing of per-CPU data.
2715	 */
2716	cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase,
2717	    0, NULL, NULL, VM_NOSLEEP);
2718
2719	if (cp == NULL) {
2720		errno = EAGAIN;
2721		return (NULL);
2722	}
2723
2724	bzero(cp, csize);
2725
2726	(void) mutex_lock(&umem_flags_lock);
2727	if (umem_flags & UMF_RANDOMIZE)
2728		umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) |
2729		    UMF_RANDOMIZE;
2730	cp->cache_flags = umem_flags | (cflags & UMF_DEBUG);
2731	(void) mutex_unlock(&umem_flags_lock);
2732
2733	/*
2734	 * Make sure all the various flags are reasonable.
2735	 */
2736	if (cp->cache_flags & UMF_LITE) {
2737		if (bufsize >= umem_lite_minsize &&
2738		    align <= umem_lite_maxalign &&
2739		    P2PHASE(bufsize, umem_lite_maxalign) != 0) {
2740			cp->cache_flags |= UMF_BUFTAG;
2741			cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2742		} else {
2743			cp->cache_flags &= ~UMF_DEBUG;
2744		}
2745	}
2746
2747	if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT))
2748		cp->cache_flags |= UMF_NOMAGAZINE;
2749
2750	if (cflags & UMC_NODEBUG)
2751		cp->cache_flags &= ~UMF_DEBUG;
2752
2753	if (cflags & UMC_NOTOUCH)
2754		cp->cache_flags &= ~UMF_TOUCH;
2755
2756	if (cflags & UMC_NOHASH)
2757		cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2758
2759	if (cflags & UMC_NOMAGAZINE)
2760		cp->cache_flags |= UMF_NOMAGAZINE;
2761
2762	if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH))
2763		cp->cache_flags |= UMF_REDZONE;
2764
2765	if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall &&
2766	    !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH))
2767		cp->cache_flags |= UMF_FIREWALL;
2768
2769	if (vmp != umem_default_arena || umem_firewall_arena == NULL)
2770		cp->cache_flags &= ~UMF_FIREWALL;
2771
2772	if (cp->cache_flags & UMF_FIREWALL) {
2773		cp->cache_flags &= ~UMF_BUFTAG;
2774		cp->cache_flags |= UMF_NOMAGAZINE;
2775		ASSERT(vmp == umem_default_arena);
2776		vmp = umem_firewall_arena;
2777	}
2778
2779	/*
2780	 * Set cache properties.
2781	 */
2782	(void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1);
2783	cp->cache_bufsize = bufsize;
2784	cp->cache_align = align;
2785	cp->cache_constructor = constructor;
2786	cp->cache_destructor = destructor;
2787	cp->cache_reclaim = reclaim;
2788	cp->cache_private = private;
2789	cp->cache_arena = vmp;
2790	cp->cache_cflags = cflags;
2791	cp->cache_cpu_mask = umem_cpu_mask;
2792
2793	/*
2794	 * Determine the chunk size.
2795	 */
2796	chunksize = bufsize;
2797
2798	if (align >= UMEM_ALIGN) {
2799		chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN);
2800		cp->cache_bufctl = chunksize - UMEM_ALIGN;
2801	}
2802
2803	if (cp->cache_flags & UMF_BUFTAG) {
2804		cp->cache_bufctl = chunksize;
2805		cp->cache_buftag = chunksize;
2806		chunksize += sizeof (umem_buftag_t);
2807	}
2808
2809	if (cp->cache_flags & UMF_DEADBEEF) {
2810		cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify);
2811		if (cp->cache_flags & UMF_LITE)
2812			cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN);
2813	}
2814
2815	cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave);
2816
2817	cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
2818
2819	if (chunksize < bufsize) {
2820		errno = ENOMEM;
2821		goto fail;
2822	}
2823
2824	/*
2825	 * Now that we know the chunk size, determine the optimal slab size.
2826	 */
2827	if (vmp == umem_firewall_arena) {
2828		cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
2829		cp->cache_mincolor = cp->cache_slabsize - chunksize;
2830		cp->cache_maxcolor = cp->cache_mincolor;
2831		cp->cache_flags |= UMF_HASH;
2832		ASSERT(!(cp->cache_flags & UMF_BUFTAG));
2833	} else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) &&
2834	    !(cp->cache_flags & UMF_AUDIT) &&
2835	    chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) {
2836		cp->cache_slabsize = vmp->vm_quantum;
2837		cp->cache_mincolor = 0;
2838		cp->cache_maxcolor =
2839		    (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize;
2840
2841		if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) {
2842			errno = EINVAL;
2843			goto fail;
2844		}
2845		ASSERT(!(cp->cache_flags & UMF_AUDIT));
2846	} else {
2847		size_t chunks, waste, slabsize;
2848		size_t minwaste = LONG_MAX;
2849		size_t bestfit = SIZE_MAX;
2850
2851		for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) {
2852			slabsize = P2ROUNDUP(chunksize * chunks,
2853			    vmp->vm_quantum);
2854			/*
2855			 * check for overflow
2856			 */
2857			if ((slabsize / chunks) < chunksize) {
2858				errno = ENOMEM;
2859				goto fail;
2860			}
2861			chunks = slabsize / chunksize;
2862			waste = (slabsize % chunksize) / chunks;
2863			if (waste < minwaste) {
2864				minwaste = waste;
2865				bestfit = slabsize;
2866			}
2867		}
2868		if (cflags & UMC_QCACHE)
2869			bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64);
2870		if (bestfit == SIZE_MAX) {
2871			errno = ENOMEM;
2872			goto fail;
2873		}
2874		cp->cache_slabsize = bestfit;
2875		cp->cache_mincolor = 0;
2876		cp->cache_maxcolor = bestfit % chunksize;
2877		cp->cache_flags |= UMF_HASH;
2878	}
2879
2880	if (cp->cache_flags & UMF_HASH) {
2881		ASSERT(!(cflags & UMC_NOHASH));
2882		cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ?
2883		    umem_bufctl_audit_cache : umem_bufctl_cache;
2884	}
2885
2886	if (cp->cache_maxcolor >= vmp->vm_quantum)
2887		cp->cache_maxcolor = vmp->vm_quantum - 1;
2888
2889	cp->cache_color = cp->cache_mincolor;
2890
2891	/*
2892	 * Initialize the rest of the slab layer.
2893	 */
2894	(void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL);
2895
2896	cp->cache_freelist = &cp->cache_nullslab;
2897	cp->cache_nullslab.slab_cache = cp;
2898	cp->cache_nullslab.slab_refcnt = -1;
2899	cp->cache_nullslab.slab_next = &cp->cache_nullslab;
2900	cp->cache_nullslab.slab_prev = &cp->cache_nullslab;
2901
2902	if (cp->cache_flags & UMF_HASH) {
2903		cp->cache_hash_table = vmem_alloc(umem_hash_arena,
2904		    UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP);
2905		if (cp->cache_hash_table == NULL) {
2906			errno = EAGAIN;
2907			goto fail_lock;
2908		}
2909		bzero(cp->cache_hash_table,
2910		    UMEM_HASH_INITIAL * sizeof (void *));
2911		cp->cache_hash_mask = UMEM_HASH_INITIAL - 1;
2912		cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
2913	}
2914
2915	/*
2916	 * Initialize the depot.
2917	 */
2918	(void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL);
2919
2920	for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
2921		continue;
2922
2923	cp->cache_magtype = mtp;
2924
2925	/*
2926	 * Initialize the CPU layer.
2927	 */
2928	for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2929		umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2930		(void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL);
2931		ccp->cc_flags = cp->cache_flags;
2932		ccp->cc_rounds = -1;
2933		ccp->cc_prounds = -1;
2934	}
2935
2936	/*
2937	 * Add the cache to the global list.  This makes it visible
2938	 * to umem_update(), so the cache must be ready for business.
2939	 */
2940	(void) mutex_lock(&umem_cache_lock);
2941	cp->cache_next = cnext = &umem_null_cache;
2942	cp->cache_prev = cprev = umem_null_cache.cache_prev;
2943	cnext->cache_prev = cp;
2944	cprev->cache_next = cp;
2945	(void) mutex_unlock(&umem_cache_lock);
2946
2947	if (umem_ready == UMEM_READY)
2948		umem_cache_magazine_enable(cp);
2949
2950	return (cp);
2951
2952fail_lock:
2953	(void) mutex_destroy(&cp->cache_lock);
2954fail:
2955	vmem_xfree(umem_cache_arena, cp, csize);
2956	return (NULL);
2957}
2958
2959void
2960umem_cache_destroy(umem_cache_t *cp)
2961{
2962	int cpu_seqid;
2963
2964	/*
2965	 * Remove the cache from the global cache list so that no new updates
2966	 * will be scheduled on its behalf, wait for any pending tasks to
2967	 * complete, purge the cache, and then destroy it.
2968	 */
2969	(void) mutex_lock(&umem_cache_lock);
2970	cp->cache_prev->cache_next = cp->cache_next;
2971	cp->cache_next->cache_prev = cp->cache_prev;
2972	cp->cache_prev = cp->cache_next = NULL;
2973	(void) mutex_unlock(&umem_cache_lock);
2974
2975	umem_remove_updates(cp);
2976
2977	umem_cache_magazine_purge(cp);
2978
2979	(void) mutex_lock(&cp->cache_lock);
2980	if (cp->cache_buftotal != 0)
2981		log_message("umem_cache_destroy: '%s' (%p) not empty\n",
2982		    cp->cache_name, (void *)cp);
2983	cp->cache_reclaim = NULL;
2984	/*
2985	 * The cache is now dead.  There should be no further activity.
2986	 * We enforce this by setting land mines in the constructor and
2987	 * destructor routines that induce a segmentation fault if invoked.
2988	 */
2989	cp->cache_constructor = (umem_constructor_t *)1;
2990	cp->cache_destructor = (umem_destructor_t *)2;
2991	(void) mutex_unlock(&cp->cache_lock);
2992
2993	if (cp->cache_hash_table != NULL)
2994		vmem_free(umem_hash_arena, cp->cache_hash_table,
2995		    (cp->cache_hash_mask + 1) * sizeof (void *));
2996
2997	for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++)
2998		(void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
2999
3000	(void) mutex_destroy(&cp->cache_depot_lock);
3001	(void) mutex_destroy(&cp->cache_lock);
3002
3003	vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus));
3004}
3005
3006void
3007umem_alloc_sizes_clear(void)
3008{
3009	int i;
3010
3011	umem_alloc_sizes[0] = UMEM_MAXBUF;
3012	for (i = 1; i < NUM_ALLOC_SIZES; i++)
3013		umem_alloc_sizes[i] = 0;
3014}
3015
3016void
3017umem_alloc_sizes_add(size_t size_arg)
3018{
3019	int i, j;
3020	size_t size = size_arg;
3021
3022	if (size == 0) {
3023		log_message("size_add: cannot add zero-sized cache\n",
3024		    size, UMEM_MAXBUF);
3025		return;
3026	}
3027
3028	if (size > UMEM_MAXBUF) {
3029		log_message("size_add: %ld > %d, cannot add\n", size,
3030		    UMEM_MAXBUF);
3031		return;
3032	}
3033
3034	if (umem_alloc_sizes[NUM_ALLOC_SIZES - 1] != 0) {
3035		log_message("size_add: no space in alloc_table for %d\n",
3036		    size);
3037		return;
3038	}
3039
3040	if (P2PHASE(size, UMEM_ALIGN) != 0) {
3041		size = P2ROUNDUP(size, UMEM_ALIGN);
3042		log_message("size_add: rounding %d up to %d\n", size_arg,
3043		    size);
3044	}
3045
3046	for (i = 0; i < NUM_ALLOC_SIZES; i++) {
3047		int cur = umem_alloc_sizes[i];
3048		if (cur == size) {
3049			log_message("size_add: %ld already in table\n",
3050			    size);
3051			return;
3052		}
3053		if (cur > size)
3054			break;
3055	}
3056
3057	for (j = NUM_ALLOC_SIZES - 1; j > i; j--)
3058		umem_alloc_sizes[j] = umem_alloc_sizes[j-1];
3059	umem_alloc_sizes[i] = size;
3060}
3061
3062void
3063umem_alloc_sizes_remove(size_t size)
3064{
3065	int i;
3066
3067	if (size == UMEM_MAXBUF) {
3068		log_message("size_remove: cannot remove %ld\n", size);
3069		return;
3070	}
3071
3072	for (i = 0; i < NUM_ALLOC_SIZES; i++) {
3073		int cur = umem_alloc_sizes[i];
3074		if (cur == size)
3075			break;
3076		else if (cur > size || cur == 0) {
3077			log_message("size_remove: %ld not found in table\n",
3078			    size);
3079			return;
3080		}
3081	}
3082
3083	for (; i + 1 < NUM_ALLOC_SIZES; i++)
3084		umem_alloc_sizes[i] = umem_alloc_sizes[i+1];
3085	umem_alloc_sizes[i] = 0;
3086}
3087
3088/*
3089 * We've been called back from libc to indicate that thread is terminating and
3090 * that it needs to release the per-thread memory that it has. We get to know
3091 * which entry in the thread's tmem array the allocation came from. Currently
3092 * this refers to first n umem_caches which makes this a pretty simple indexing
3093 * job.
3094 */
3095static void
3096umem_cache_tmem_cleanup(void *buf, int entry)
3097{
3098	size_t size;
3099	umem_cache_t *cp;
3100
3101	size = umem_alloc_sizes[entry];
3102	cp = umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT];
3103	_umem_cache_free(cp, buf);
3104}
3105
3106static int
3107umem_cache_init(void)
3108{
3109	int i;
3110	size_t size, max_size;
3111	umem_cache_t *cp;
3112	umem_magtype_t *mtp;
3113	char name[UMEM_CACHE_NAMELEN + 1];
3114	umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES];
3115
3116	for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) {
3117		mtp = &umem_magtype[i];
3118		(void) snprintf(name, sizeof (name), "umem_magazine_%d",
3119		    mtp->mt_magsize);
3120		mtp->mt_cache = umem_cache_create(name,
3121		    (mtp->mt_magsize + 1) * sizeof (void *),
3122		    mtp->mt_align, NULL, NULL, NULL, NULL,
3123		    umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
3124		if (mtp->mt_cache == NULL)
3125			return (0);
3126	}
3127
3128	umem_slab_cache = umem_cache_create("umem_slab_cache",
3129	    sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL,
3130	    umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
3131
3132	if (umem_slab_cache == NULL)
3133		return (0);
3134
3135	umem_bufctl_cache = umem_cache_create("umem_bufctl_cache",
3136	    sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL,
3137	    umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
3138
3139	if (umem_bufctl_cache == NULL)
3140		return (0);
3141
3142	/*
3143	 * The size of the umem_bufctl_audit structure depends upon
3144	 * umem_stack_depth.   See umem_impl.h for details on the size
3145	 * restrictions.
3146	 */
3147
3148	size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
3149	max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE;
3150
3151	if (size > max_size) {			/* too large -- truncate */
3152		int max_frames = UMEM_MAX_STACK_DEPTH;
3153
3154		ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size);
3155
3156		umem_stack_depth = max_frames;
3157		size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
3158	}
3159
3160	umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache",
3161	    size, 0, NULL, NULL, NULL, NULL, umem_internal_arena,
3162	    UMC_NOHASH | UMC_INTERNAL);
3163
3164	if (umem_bufctl_audit_cache == NULL)
3165		return (0);
3166
3167	if (vmem_backend & VMEM_BACKEND_MMAP)
3168		umem_va_arena = vmem_create("umem_va",
3169		    NULL, 0, pagesize,
3170		    vmem_alloc, vmem_free, heap_arena,
3171		    8 * pagesize, VM_NOSLEEP);
3172	else
3173		umem_va_arena = heap_arena;
3174
3175	if (umem_va_arena == NULL)
3176		return (0);
3177
3178	umem_default_arena = vmem_create("umem_default",
3179	    NULL, 0, pagesize,
3180	    heap_alloc, heap_free, umem_va_arena,
3181	    0, VM_NOSLEEP);
3182
3183	if (umem_default_arena == NULL)
3184		return (0);
3185
3186	/*
3187	 * make sure the umem_alloc table initializer is correct
3188	 */
3189	i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table);
3190	ASSERT(umem_alloc_table[i - 1] == &umem_null_cache);
3191
3192	/*
3193	 * Create the default caches to back umem_alloc()
3194	 */
3195	for (i = 0; i < NUM_ALLOC_SIZES; i++) {
3196		size_t cache_size = umem_alloc_sizes[i];
3197		size_t align = 0;
3198
3199		if (cache_size == 0)
3200			break;		/* 0 terminates the list */
3201
3202		/*
3203		 * If they allocate a multiple of the coherency granularity,
3204		 * they get a coherency-granularity-aligned address.
3205		 */
3206		if (IS_P2ALIGNED(cache_size, 64))
3207			align = 64;
3208		if (IS_P2ALIGNED(cache_size, pagesize))
3209			align = pagesize;
3210		(void) snprintf(name, sizeof (name), "umem_alloc_%lu",
3211		    (long)cache_size);
3212
3213		cp = umem_cache_create(name, cache_size, align,
3214		    NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL);
3215		if (cp == NULL)
3216			return (0);
3217
3218		umem_alloc_caches[i] = cp;
3219	}
3220
3221	umem_tmem_off = _tmem_get_base();
3222	_tmem_set_cleanup(umem_cache_tmem_cleanup);
3223
3224#ifndef	UMEM_STANDALONE
3225	if (umem_genasm_supported && !(umem_flags & UMF_DEBUG) &&
3226	    !(umem_flags & UMF_NOMAGAZINE) &&
3227	    umem_ptc_size > 0) {
3228		umem_ptc_enabled = umem_genasm(umem_alloc_sizes,
3229		    umem_alloc_caches, i) ? 1 : 0;
3230	}
3231#else
3232	umem_ptc_enabled = 0;
3233#endif
3234
3235	/*
3236	 * Initialization cannot fail at this point.  Make the caches
3237	 * visible to umem_alloc() and friends.
3238	 */
3239	size = UMEM_ALIGN;
3240	for (i = 0; i < NUM_ALLOC_SIZES; i++) {
3241		size_t cache_size = umem_alloc_sizes[i];
3242
3243		if (cache_size == 0)
3244			break;		/* 0 terminates the list */
3245
3246		cp = umem_alloc_caches[i];
3247
3248		while (size <= cache_size) {
3249			umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp;
3250			size += UMEM_ALIGN;
3251		}
3252	}
3253	ASSERT(size - UMEM_ALIGN == UMEM_MAXBUF);
3254	return (1);
3255}
3256
3257/*
3258 * umem_startup() is called early on, and must be called explicitly if we're
3259 * the standalone version.
3260 */
3261#ifdef UMEM_STANDALONE
3262void
3263#else
3264#pragma init(umem_startup)
3265static void
3266#endif
3267umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack,
3268    caddr_t maxstack)
3269{
3270#ifdef UMEM_STANDALONE
3271	int idx;
3272	/* Standalone doesn't fork */
3273#else
3274	umem_forkhandler_init(); /* register the fork handler */
3275#endif
3276
3277#ifdef __lint
3278	/* make lint happy */
3279	minstack = maxstack;
3280#endif
3281
3282#ifdef UMEM_STANDALONE
3283	umem_ready = UMEM_READY_STARTUP;
3284	umem_init_env_ready = 0;
3285
3286	umem_min_stack = minstack;
3287	umem_max_stack = maxstack;
3288
3289	nofail_callback = NULL;
3290	umem_slab_cache = NULL;
3291	umem_bufctl_cache = NULL;
3292	umem_bufctl_audit_cache = NULL;
3293	heap_arena = NULL;
3294	heap_alloc = NULL;
3295	heap_free = NULL;
3296	umem_internal_arena = NULL;
3297	umem_cache_arena = NULL;
3298	umem_hash_arena = NULL;
3299	umem_log_arena = NULL;
3300	umem_oversize_arena = NULL;
3301	umem_va_arena = NULL;
3302	umem_default_arena = NULL;
3303	umem_firewall_va_arena = NULL;
3304	umem_firewall_arena = NULL;
3305	umem_memalign_arena = NULL;
3306	umem_transaction_log = NULL;
3307	umem_content_log = NULL;
3308	umem_failure_log = NULL;
3309	umem_slab_log = NULL;
3310	umem_cpu_mask = 0;
3311
3312	umem_cpus = &umem_startup_cpu;
3313	umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0);
3314	umem_startup_cpu.cpu_number = 0;
3315
3316	bcopy(&umem_null_cache_template, &umem_null_cache,
3317	    sizeof (umem_cache_t));
3318
3319	for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++)
3320		umem_alloc_table[idx] = &umem_null_cache;
3321#endif
3322
3323	/*
3324	 * Perform initialization specific to the way we've been compiled
3325	 * (library or standalone)
3326	 */
3327	umem_type_init(start, len, pagesize);
3328
3329	vmem_startup();
3330}
3331
3332int
3333umem_init(void)
3334{
3335	size_t maxverify, minfirewall;
3336	size_t size;
3337	int idx;
3338	umem_cpu_t *new_cpus;
3339
3340	vmem_t *memalign_arena, *oversize_arena;
3341
3342	if (thr_self() != umem_init_thr) {
3343		/*
3344		 * The usual case -- non-recursive invocation of umem_init().
3345		 */
3346		(void) mutex_lock(&umem_init_lock);
3347		if (umem_ready != UMEM_READY_STARTUP) {
3348			/*
3349			 * someone else beat us to initializing umem.  Wait
3350			 * for them to complete, then return.
3351			 */
3352			while (umem_ready == UMEM_READY_INITING) {
3353				int cancel_state;
3354
3355				(void) pthread_setcancelstate(
3356				    PTHREAD_CANCEL_DISABLE, &cancel_state);
3357				(void) cond_wait(&umem_init_cv,
3358				    &umem_init_lock);
3359				(void) pthread_setcancelstate(
3360				    cancel_state, NULL);
3361			}
3362			ASSERT(umem_ready == UMEM_READY ||
3363			    umem_ready == UMEM_READY_INIT_FAILED);
3364			(void) mutex_unlock(&umem_init_lock);
3365			return (umem_ready == UMEM_READY);
3366		}
3367
3368		ASSERT(umem_ready == UMEM_READY_STARTUP);
3369		ASSERT(umem_init_env_ready == 0);
3370
3371		umem_ready = UMEM_READY_INITING;
3372		umem_init_thr = thr_self();
3373
3374		(void) mutex_unlock(&umem_init_lock);
3375		umem_setup_envvars(0);		/* can recurse -- see below */
3376		if (umem_init_env_ready) {
3377			/*
3378			 * initialization was completed already
3379			 */
3380			ASSERT(umem_ready == UMEM_READY ||
3381			    umem_ready == UMEM_READY_INIT_FAILED);
3382			ASSERT(umem_init_thr == 0);
3383			return (umem_ready == UMEM_READY);
3384		}
3385	} else if (!umem_init_env_ready) {
3386		/*
3387		 * The umem_setup_envvars() call (above) makes calls into
3388		 * the dynamic linker and directly into user-supplied code.
3389		 * Since we cannot know what that code will do, we could be
3390		 * recursively invoked (by, say, a malloc() call in the code
3391		 * itself, or in a (C++) _init section it causes to be fired).
3392		 *
3393		 * This code is where we end up if such recursion occurs.  We
3394		 * first clean up any partial results in the envvar code, then
3395		 * proceed to finish initialization processing in the recursive
3396		 * call.  The original call will notice this, and return
3397		 * immediately.
3398		 */
3399		umem_setup_envvars(1);		/* clean up any partial state */
3400	} else {
3401		umem_panic(
3402		    "recursive allocation while initializing umem\n");
3403	}
3404	umem_init_env_ready = 1;
3405
3406	/*
3407	 * From this point until we finish, recursion into umem_init() will
3408	 * cause a umem_panic().
3409	 */
3410	maxverify = minfirewall = ULONG_MAX;
3411
3412	/* LINTED constant condition */
3413	if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) {
3414		umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n",
3415		    sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE);
3416	}
3417
3418	umem_max_ncpus = umem_get_max_ncpus();
3419
3420	/*
3421	 * load tunables from environment
3422	 */
3423	umem_process_envvars();
3424
3425	if (issetugid())
3426		umem_mtbf = 0;
3427
3428	/*
3429	 * set up vmem
3430	 */
3431	if (!(umem_flags & UMF_AUDIT))
3432		vmem_no_debug();
3433
3434	heap_arena = vmem_heap_arena(&heap_alloc, &heap_free);
3435
3436	pagesize = heap_arena->vm_quantum;
3437
3438	umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize,
3439	    heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
3440
3441	umem_default_arena = umem_internal_arena;
3442
3443	if (umem_internal_arena == NULL)
3444		goto fail;
3445
3446	umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN,
3447	    vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
3448
3449	umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN,
3450	    vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
3451
3452	umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN,
3453	    heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
3454
3455	umem_firewall_va_arena = vmem_create("umem_firewall_va",
3456	    NULL, 0, pagesize,
3457	    umem_firewall_va_alloc, umem_firewall_va_free, heap_arena,
3458	    0, VM_NOSLEEP);
3459
3460	if (umem_cache_arena == NULL || umem_hash_arena == NULL ||
3461	    umem_log_arena == NULL || umem_firewall_va_arena == NULL)
3462		goto fail;
3463
3464	umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize,
3465	    heap_alloc, heap_free, umem_firewall_va_arena, 0,
3466	    VM_NOSLEEP);
3467
3468	if (umem_firewall_arena == NULL)
3469		goto fail;
3470
3471	oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize,
3472	    heap_alloc, heap_free, minfirewall < ULONG_MAX ?
3473	    umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
3474
3475	memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN,
3476	    heap_alloc, heap_free, minfirewall < ULONG_MAX ?
3477	    umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
3478
3479	if (oversize_arena == NULL || memalign_arena == NULL)
3480		goto fail;
3481
3482	if (umem_max_ncpus > CPUHINT_MAX())
3483		umem_max_ncpus = CPUHINT_MAX();
3484
3485	while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0)
3486		umem_max_ncpus++;
3487
3488	if (umem_max_ncpus == 0)
3489		umem_max_ncpus = 1;
3490
3491	size = umem_max_ncpus * sizeof (umem_cpu_t);
3492	new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP);
3493	if (new_cpus == NULL)
3494		goto fail;
3495
3496	bzero(new_cpus, size);
3497	for (idx = 0; idx < umem_max_ncpus; idx++) {
3498		new_cpus[idx].cpu_number = idx;
3499		new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx);
3500	}
3501	umem_cpus = new_cpus;
3502	umem_cpu_mask = (umem_max_ncpus - 1);
3503
3504	if (umem_maxverify == 0)
3505		umem_maxverify = maxverify;
3506
3507	if (umem_minfirewall == 0)
3508		umem_minfirewall = minfirewall;
3509
3510	/*
3511	 * Set up updating and reaping
3512	 */
3513	umem_reap_next = gethrtime() + NANOSEC;
3514
3515#ifndef UMEM_STANDALONE
3516	(void) gettimeofday(&umem_update_next, NULL);
3517#endif
3518
3519	/*
3520	 * Set up logging -- failure here is okay, since it will just disable
3521	 * the logs
3522	 */
3523	if (umem_logging) {
3524		umem_transaction_log = umem_log_init(umem_transaction_log_size);
3525		umem_content_log = umem_log_init(umem_content_log_size);
3526		umem_failure_log = umem_log_init(umem_failure_log_size);
3527		umem_slab_log = umem_log_init(umem_slab_log_size);
3528	}
3529
3530	/*
3531	 * Set up caches -- if successful, initialization cannot fail, since
3532	 * allocations from other threads can now succeed.
3533	 */
3534	if (umem_cache_init() == 0) {
3535		log_message("unable to create initial caches\n");
3536		goto fail;
3537	}
3538	umem_oversize_arena = oversize_arena;
3539	umem_memalign_arena = memalign_arena;
3540
3541	umem_cache_applyall(umem_cache_magazine_enable);
3542
3543	/*
3544	 * initialization done, ready to go
3545	 */
3546	(void) mutex_lock(&umem_init_lock);
3547	umem_ready = UMEM_READY;
3548	umem_init_thr = 0;
3549	(void) cond_broadcast(&umem_init_cv);
3550	(void) mutex_unlock(&umem_init_lock);
3551	return (1);
3552
3553fail:
3554	log_message("umem initialization failed\n");
3555
3556	(void) mutex_lock(&umem_init_lock);
3557	umem_ready = UMEM_READY_INIT_FAILED;
3558	umem_init_thr = 0;
3559	(void) cond_broadcast(&umem_init_cv);
3560	(void) mutex_unlock(&umem_init_lock);
3561	return (0);
3562}
3563
3564void
3565umem_setmtbf(uint32_t mtbf)
3566{
3567	umem_mtbf = mtbf;
3568}
3569