xref: /illumos-gate/usr/src/lib/libumem/common/umem.c (revision 7c478bd9)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate  * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18
31*7c478bd9Sstevel@tonic-gate  *
32*7c478bd9Sstevel@tonic-gate  * The slab allocator, as described in the following two papers:
33*7c478bd9Sstevel@tonic-gate  *
34*7c478bd9Sstevel@tonic-gate  *	Jeff Bonwick,
35*7c478bd9Sstevel@tonic-gate  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator.
36*7c478bd9Sstevel@tonic-gate  *	Proceedings of the Summer 1994 Usenix Conference.
37*7c478bd9Sstevel@tonic-gate  *	Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
38*7c478bd9Sstevel@tonic-gate  *
39*7c478bd9Sstevel@tonic-gate  *	Jeff Bonwick and Jonathan Adams,
40*7c478bd9Sstevel@tonic-gate  *	Magazines and vmem: Extending the Slab Allocator to Many CPUs and
41*7c478bd9Sstevel@tonic-gate  *	Arbitrary Resources.
42*7c478bd9Sstevel@tonic-gate  *	Proceedings of the 2001 Usenix Conference.
43*7c478bd9Sstevel@tonic-gate  *	Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
44*7c478bd9Sstevel@tonic-gate  *
45*7c478bd9Sstevel@tonic-gate  * 1. Overview
46*7c478bd9Sstevel@tonic-gate  * -----------
47*7c478bd9Sstevel@tonic-gate  * umem is very close to kmem in implementation.  There are four major
48*7c478bd9Sstevel@tonic-gate  * areas of divergence:
49*7c478bd9Sstevel@tonic-gate  *
50*7c478bd9Sstevel@tonic-gate  *	* Initialization
51*7c478bd9Sstevel@tonic-gate  *
52*7c478bd9Sstevel@tonic-gate  *	* CPU handling
53*7c478bd9Sstevel@tonic-gate  *
54*7c478bd9Sstevel@tonic-gate  *	* umem_update()
55*7c478bd9Sstevel@tonic-gate  *
56*7c478bd9Sstevel@tonic-gate  *	* KM_SLEEP v.s. UMEM_NOFAIL
57*7c478bd9Sstevel@tonic-gate  *
58*7c478bd9Sstevel@tonic-gate  *
59*7c478bd9Sstevel@tonic-gate  * 2. Initialization
60*7c478bd9Sstevel@tonic-gate  * -----------------
61*7c478bd9Sstevel@tonic-gate  * kmem is initialized early on in boot, and knows that no one will call
62*7c478bd9Sstevel@tonic-gate  * into it before it is ready.  umem does not have these luxuries. Instead,
63*7c478bd9Sstevel@tonic-gate  * initialization is divided into two phases:
64*7c478bd9Sstevel@tonic-gate  *
65*7c478bd9Sstevel@tonic-gate  *	* library initialization, and
66*7c478bd9Sstevel@tonic-gate  *
67*7c478bd9Sstevel@tonic-gate  *	* first use
68*7c478bd9Sstevel@tonic-gate  *
69*7c478bd9Sstevel@tonic-gate  * umem's full initialization happens at the time of the first allocation
70*7c478bd9Sstevel@tonic-gate  * request (via malloc() and friends, umem_alloc(), or umem_zalloc()),
71*7c478bd9Sstevel@tonic-gate  * or the first call to umem_cache_create().
72*7c478bd9Sstevel@tonic-gate  *
73*7c478bd9Sstevel@tonic-gate  * umem_free(), and umem_cache_alloc() do not require special handling,
74*7c478bd9Sstevel@tonic-gate  * since the only way to get valid arguments for them is to successfully
75*7c478bd9Sstevel@tonic-gate  * call a function from the first group.
76*7c478bd9Sstevel@tonic-gate  *
77*7c478bd9Sstevel@tonic-gate  * 2.1. Library Initialization: umem_startup()
78*7c478bd9Sstevel@tonic-gate  * -------------------------------------------
79*7c478bd9Sstevel@tonic-gate  * umem_startup() is libumem.so's .init section.  It calls pthread_atfork()
80*7c478bd9Sstevel@tonic-gate  * to install the handlers necessary for umem's Fork1-Safety.  Because of
81*7c478bd9Sstevel@tonic-gate  * race condition issues, all other pre-umem_init() initialization is done
82*7c478bd9Sstevel@tonic-gate  * statically (i.e. by the dynamic linker).
83*7c478bd9Sstevel@tonic-gate  *
84*7c478bd9Sstevel@tonic-gate  * For standalone use, umem_startup() returns everything to its initial
85*7c478bd9Sstevel@tonic-gate  * state.
86*7c478bd9Sstevel@tonic-gate  *
87*7c478bd9Sstevel@tonic-gate  * 2.2. First use: umem_init()
88*7c478bd9Sstevel@tonic-gate  * ------------------------------
89*7c478bd9Sstevel@tonic-gate  * The first time any memory allocation function is used, we have to
90*7c478bd9Sstevel@tonic-gate  * create the backing caches and vmem arenas which are needed for it.
91*7c478bd9Sstevel@tonic-gate  * umem_init() is the central point for that task.  When it completes,
92*7c478bd9Sstevel@tonic-gate  * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable
93*7c478bd9Sstevel@tonic-gate  * to initialize, probably due to lack of memory).
94*7c478bd9Sstevel@tonic-gate  *
95*7c478bd9Sstevel@tonic-gate  * There are four different paths from which umem_init() is called:
96*7c478bd9Sstevel@tonic-gate  *
97*7c478bd9Sstevel@tonic-gate  *	* from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF,
98*7c478bd9Sstevel@tonic-gate  *
99*7c478bd9Sstevel@tonic-gate  *	* from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF,
100*7c478bd9Sstevel@tonic-gate  *
101*7c478bd9Sstevel@tonic-gate  *	* from umem_cache_create(), and
102*7c478bd9Sstevel@tonic-gate  *
103*7c478bd9Sstevel@tonic-gate  *	* from memalign(), with align > UMEM_ALIGN.
104*7c478bd9Sstevel@tonic-gate  *
105*7c478bd9Sstevel@tonic-gate  * The last three just check if umem is initialized, and call umem_init()
106*7c478bd9Sstevel@tonic-gate  * if it is not.  For performance reasons, the first case is more complicated.
107*7c478bd9Sstevel@tonic-gate  *
108*7c478bd9Sstevel@tonic-gate  * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF
109*7c478bd9Sstevel@tonic-gate  * -----------------------------------------------------------------
110*7c478bd9Sstevel@tonic-gate  * In this case, umem_cache_alloc(&umem_null_cache, ...) is called.
111*7c478bd9Sstevel@tonic-gate  * There is special case code in which causes any allocation on
112*7c478bd9Sstevel@tonic-gate  * &umem_null_cache to fail by returning (NULL), regardless of the
113*7c478bd9Sstevel@tonic-gate  * flags argument.
114*7c478bd9Sstevel@tonic-gate  *
115*7c478bd9Sstevel@tonic-gate  * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call
116*7c478bd9Sstevel@tonic-gate  * umem_alloc_retry().  umem_alloc_retry() sees that the allocation
117*7c478bd9Sstevel@tonic-gate  * was agains &umem_null_cache, and calls umem_init().
118*7c478bd9Sstevel@tonic-gate  *
119*7c478bd9Sstevel@tonic-gate  * If initialization is successful, umem_alloc_retry() returns 1, which
120*7c478bd9Sstevel@tonic-gate  * causes umem_alloc()/umem_zalloc() to start over, which causes it to load
121*7c478bd9Sstevel@tonic-gate  * the (now valid) cache pointer from umem_alloc_table.
122*7c478bd9Sstevel@tonic-gate  *
123*7c478bd9Sstevel@tonic-gate  * 2.2.2. Dealing with race conditions
124*7c478bd9Sstevel@tonic-gate  * -----------------------------------
125*7c478bd9Sstevel@tonic-gate  * There are a couple race conditions resulting from the initialization
126*7c478bd9Sstevel@tonic-gate  * code that we have to guard against:
127*7c478bd9Sstevel@tonic-gate  *
128*7c478bd9Sstevel@tonic-gate  *	* In umem_cache_create(), there is a special UMC_INTERNAL cflag
129*7c478bd9Sstevel@tonic-gate  *	that is passed for caches created during initialization.  It
130*7c478bd9Sstevel@tonic-gate  *	is illegal for a user to try to create a UMC_INTERNAL cache.
131*7c478bd9Sstevel@tonic-gate  *	This allows initialization to proceed, but any other
132*7c478bd9Sstevel@tonic-gate  *	umem_cache_create()s will block by calling umem_init().
133*7c478bd9Sstevel@tonic-gate  *
134*7c478bd9Sstevel@tonic-gate  *	* Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask
135*7c478bd9Sstevel@tonic-gate  *	is always zero.  umem_cache_alloc uses cp->cache_cpu_mask to
136*7c478bd9Sstevel@tonic-gate  *	mask the cpu number.  This prevents a race between grabbing a
137*7c478bd9Sstevel@tonic-gate  *	cache pointer out of umem_alloc_table and growing the cpu array.
138*7c478bd9Sstevel@tonic-gate  *
139*7c478bd9Sstevel@tonic-gate  *
140*7c478bd9Sstevel@tonic-gate  * 3. CPU handling
141*7c478bd9Sstevel@tonic-gate  * ---------------
142*7c478bd9Sstevel@tonic-gate  * kmem uses the CPU's sequence number to determine which "cpu cache" to
143*7c478bd9Sstevel@tonic-gate  * use for an allocation.  Currently, there is no way to get the sequence
144*7c478bd9Sstevel@tonic-gate  * number in userspace.
145*7c478bd9Sstevel@tonic-gate  *
146*7c478bd9Sstevel@tonic-gate  * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus
147*7c478bd9Sstevel@tonic-gate  * umem_cpu_t structures.  CURCPU() is a a "hint" function, which we then mask
148*7c478bd9Sstevel@tonic-gate  * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
149*7c478bd9Sstevel@tonic-gate  * The mechanics of this is all in the CPU(mask) macro.
150*7c478bd9Sstevel@tonic-gate  *
151*7c478bd9Sstevel@tonic-gate  * Currently, umem uses _lwp_self() as its hint.
152*7c478bd9Sstevel@tonic-gate  *
153*7c478bd9Sstevel@tonic-gate  *
154*7c478bd9Sstevel@tonic-gate  * 4. The update thread
155*7c478bd9Sstevel@tonic-gate  * --------------------
156*7c478bd9Sstevel@tonic-gate  * kmem uses a task queue, kmem_taskq, to do periodic maintenance on
157*7c478bd9Sstevel@tonic-gate  * every kmem cache.  vmem has a periodic timeout for hash table resizing.
158*7c478bd9Sstevel@tonic-gate  * The kmem_taskq also provides a separate context for kmem_cache_reap()'s
159*7c478bd9Sstevel@tonic-gate  * to be done in, avoiding issues of the context of kmem_reap() callers.
160*7c478bd9Sstevel@tonic-gate  *
161*7c478bd9Sstevel@tonic-gate  * Instead, umem has the concept of "updates", which are asynchronous requests
162*7c478bd9Sstevel@tonic-gate  * for work attached to single caches.  All caches with pending work are
163*7c478bd9Sstevel@tonic-gate  * on a doubly linked list rooted at the umem_null_cache.  All update state
164*7c478bd9Sstevel@tonic-gate  * is protected by the umem_update_lock mutex, and the umem_update_cv is used
165*7c478bd9Sstevel@tonic-gate  * for notification between threads.
166*7c478bd9Sstevel@tonic-gate  *
167*7c478bd9Sstevel@tonic-gate  * 4.1. Cache states with regards to updates
168*7c478bd9Sstevel@tonic-gate  * -----------------------------------------
169*7c478bd9Sstevel@tonic-gate  * A given cache is in one of three states:
170*7c478bd9Sstevel@tonic-gate  *
171*7c478bd9Sstevel@tonic-gate  * Inactive		cache_uflags is zero, cache_u{next,prev} are NULL
172*7c478bd9Sstevel@tonic-gate  *
173*7c478bd9Sstevel@tonic-gate  * Work Requested	cache_uflags is non-zero (but UMU_ACTIVE is not set),
174*7c478bd9Sstevel@tonic-gate  *			cache_u{next,prev} link the cache onto the global
175*7c478bd9Sstevel@tonic-gate  *			update list
176*7c478bd9Sstevel@tonic-gate  *
177*7c478bd9Sstevel@tonic-gate  * Active		cache_uflags has UMU_ACTIVE set, cache_u{next,prev}
178*7c478bd9Sstevel@tonic-gate  *			are NULL, and either umem_update_thr or
179*7c478bd9Sstevel@tonic-gate  *			umem_st_update_thr are actively doing work on the
180*7c478bd9Sstevel@tonic-gate  *			cache.
181*7c478bd9Sstevel@tonic-gate  *
182*7c478bd9Sstevel@tonic-gate  * An update can be added to any cache in any state -- if the cache is
183*7c478bd9Sstevel@tonic-gate  * Inactive, it transitions to being Work Requested.  If the cache is
184*7c478bd9Sstevel@tonic-gate  * Active, the worker will notice the new update and act on it before
185*7c478bd9Sstevel@tonic-gate  * transitioning the cache to the Inactive state.
186*7c478bd9Sstevel@tonic-gate  *
187*7c478bd9Sstevel@tonic-gate  * If a cache is in the Active state, UMU_NOTIFY can be set, which asks
188*7c478bd9Sstevel@tonic-gate  * the worker to broadcast the umem_update_cv when it has finished.
189*7c478bd9Sstevel@tonic-gate  *
190*7c478bd9Sstevel@tonic-gate  * 4.2. Update interface
191*7c478bd9Sstevel@tonic-gate  * ---------------------
192*7c478bd9Sstevel@tonic-gate  * umem_add_update() adds an update to a particular cache.
193*7c478bd9Sstevel@tonic-gate  * umem_updateall() adds an update to all caches.
194*7c478bd9Sstevel@tonic-gate  * umem_remove_updates() returns a cache to the Inactive state.
195*7c478bd9Sstevel@tonic-gate  *
196*7c478bd9Sstevel@tonic-gate  * umem_process_updates() process all caches in the Work Requested state.
197*7c478bd9Sstevel@tonic-gate  *
198*7c478bd9Sstevel@tonic-gate  * 4.3. Reaping
199*7c478bd9Sstevel@tonic-gate  * ------------
200*7c478bd9Sstevel@tonic-gate  * When umem_reap() is called (at the time of heap growth), it schedule
201*7c478bd9Sstevel@tonic-gate  * UMU_REAP updates on every cache.  It then checks to see if the update
202*7c478bd9Sstevel@tonic-gate  * thread exists (umem_update_thr != 0).  If it is, it broadcasts
203*7c478bd9Sstevel@tonic-gate  * the umem_update_cv to wake the update thread up, and returns.
204*7c478bd9Sstevel@tonic-gate  *
205*7c478bd9Sstevel@tonic-gate  * If the update thread does not exist (umem_update_thr == 0), and the
206*7c478bd9Sstevel@tonic-gate  * program currently has multiple threads, umem_reap() attempts to create
207*7c478bd9Sstevel@tonic-gate  * a new update thread.
208*7c478bd9Sstevel@tonic-gate  *
209*7c478bd9Sstevel@tonic-gate  * If the process is not multithreaded, or the creation fails, umem_reap()
210*7c478bd9Sstevel@tonic-gate  * calls umem_st_update() to do an inline update.
211*7c478bd9Sstevel@tonic-gate  *
212*7c478bd9Sstevel@tonic-gate  * 4.4. The update thread
213*7c478bd9Sstevel@tonic-gate  * ----------------------
214*7c478bd9Sstevel@tonic-gate  * The update thread spends most of its time in cond_timedwait() on the
215*7c478bd9Sstevel@tonic-gate  * umem_update_cv.  It wakes up under two conditions:
216*7c478bd9Sstevel@tonic-gate  *
217*7c478bd9Sstevel@tonic-gate  *	* The timedwait times out, in which case it needs to run a global
218*7c478bd9Sstevel@tonic-gate  *	update, or
219*7c478bd9Sstevel@tonic-gate  *
220*7c478bd9Sstevel@tonic-gate  *	* someone cond_broadcast(3THR)s the umem_update_cv, in which case
221*7c478bd9Sstevel@tonic-gate  *	it needs to check if there are any caches in the Work Requested
222*7c478bd9Sstevel@tonic-gate  *	state.
223*7c478bd9Sstevel@tonic-gate  *
224*7c478bd9Sstevel@tonic-gate  * When it is time for another global update, umem calls umem_cache_update()
225*7c478bd9Sstevel@tonic-gate  * on every cache, then calls vmem_update(), which tunes the vmem structures.
226*7c478bd9Sstevel@tonic-gate  * umem_cache_update() can request further work using umem_add_update().
227*7c478bd9Sstevel@tonic-gate  *
228*7c478bd9Sstevel@tonic-gate  * After any work from the global update completes, the update timer is
229*7c478bd9Sstevel@tonic-gate  * reset to umem_reap_interval seconds in the future.  This makes the
230*7c478bd9Sstevel@tonic-gate  * updates self-throttling.
231*7c478bd9Sstevel@tonic-gate  *
232*7c478bd9Sstevel@tonic-gate  * Reaps are similarly self-throttling.  After a UMU_REAP update has
233*7c478bd9Sstevel@tonic-gate  * been scheduled on all caches, umem_reap() sets a flag and wakes up the
234*7c478bd9Sstevel@tonic-gate  * update thread.  The update thread notices the flag, and resets the
235*7c478bd9Sstevel@tonic-gate  * reap state.
236*7c478bd9Sstevel@tonic-gate  *
237*7c478bd9Sstevel@tonic-gate  * 4.5. Inline updates
238*7c478bd9Sstevel@tonic-gate  * -------------------
239*7c478bd9Sstevel@tonic-gate  * If the update thread is not running, umem_st_update() is used instead.  It
240*7c478bd9Sstevel@tonic-gate  * immediately does a global update (as above), then calls
241*7c478bd9Sstevel@tonic-gate  * umem_process_updates() to process both the reaps that umem_reap() added and
242*7c478bd9Sstevel@tonic-gate  * any work generated by the global update.  Afterwards, it resets the reap
243*7c478bd9Sstevel@tonic-gate  * state.
244*7c478bd9Sstevel@tonic-gate  *
245*7c478bd9Sstevel@tonic-gate  * While the umem_st_update() is running, umem_st_update_thr holds the thread
246*7c478bd9Sstevel@tonic-gate  * id of the thread performing the update.
247*7c478bd9Sstevel@tonic-gate  *
248*7c478bd9Sstevel@tonic-gate  * 4.6. Updates and fork1()
249*7c478bd9Sstevel@tonic-gate  * ------------------------
250*7c478bd9Sstevel@tonic-gate  * umem has fork1() pre- and post-handlers which lock up (and release) every
251*7c478bd9Sstevel@tonic-gate  * mutex in every cache.  They also lock up the umem_update_lock.  Since
252*7c478bd9Sstevel@tonic-gate  * fork1() only copies over a single lwp, other threads (including the update
253*7c478bd9Sstevel@tonic-gate  * thread) could have been actively using a cache in the parent.  This
254*7c478bd9Sstevel@tonic-gate  * can lead to inconsistencies in the child process.
255*7c478bd9Sstevel@tonic-gate  *
256*7c478bd9Sstevel@tonic-gate  * Because we locked all of the mutexes, the only possible inconsistancies are:
257*7c478bd9Sstevel@tonic-gate  *
258*7c478bd9Sstevel@tonic-gate  *	* a umem_cache_alloc() could leak its buffer.
259*7c478bd9Sstevel@tonic-gate  *
260*7c478bd9Sstevel@tonic-gate  *	* a caller of umem_depot_alloc() could leak a magazine, and all the
261*7c478bd9Sstevel@tonic-gate  *	buffers contained in it.
262*7c478bd9Sstevel@tonic-gate  *
263*7c478bd9Sstevel@tonic-gate  *	* a cache could be in the Active update state.  In the child, there
264*7c478bd9Sstevel@tonic-gate  *	would be no thread actually working on it.
265*7c478bd9Sstevel@tonic-gate  *
266*7c478bd9Sstevel@tonic-gate  *	* a umem_hash_rescale() could leak the new hash table.
267*7c478bd9Sstevel@tonic-gate  *
268*7c478bd9Sstevel@tonic-gate  *	* a umem_magazine_resize() could be in progress.
269*7c478bd9Sstevel@tonic-gate  *
270*7c478bd9Sstevel@tonic-gate  *	* a umem_reap() could be in progress.
271*7c478bd9Sstevel@tonic-gate  *
272*7c478bd9Sstevel@tonic-gate  * The memory leaks we can't do anything about.  umem_release_child() resets
273*7c478bd9Sstevel@tonic-gate  * the update state, moves any caches in the Active state to the Work Requested
274*7c478bd9Sstevel@tonic-gate  * state.  This might cause some updates to be re-run, but UMU_REAP and
275*7c478bd9Sstevel@tonic-gate  * UMU_HASH_RESCALE are effectively idempotent, and the worst that can
276*7c478bd9Sstevel@tonic-gate  * happen from umem_magazine_resize() is resizing the magazine twice in close
277*7c478bd9Sstevel@tonic-gate  * succession.
278*7c478bd9Sstevel@tonic-gate  *
279*7c478bd9Sstevel@tonic-gate  * Much of the cleanup in umem_release_child() is skipped if
280*7c478bd9Sstevel@tonic-gate  * umem_st_update_thr == thr_self().  This is so that applications which call
281*7c478bd9Sstevel@tonic-gate  * fork1() from a cache callback does not break.  Needless to say, any such
282*7c478bd9Sstevel@tonic-gate  * application is tremendously broken.
283*7c478bd9Sstevel@tonic-gate  *
284*7c478bd9Sstevel@tonic-gate  *
285*7c478bd9Sstevel@tonic-gate  * 5. KM_SLEEP v.s. UMEM_NOFAIL
286*7c478bd9Sstevel@tonic-gate  * ----------------------------
287*7c478bd9Sstevel@tonic-gate  * Allocations against kmem and vmem have two basic modes:  SLEEP and
288*7c478bd9Sstevel@tonic-gate  * NOSLEEP.  A sleeping allocation is will go to sleep (waiting for
289*7c478bd9Sstevel@tonic-gate  * more memory) instead of failing (returning NULL).
290*7c478bd9Sstevel@tonic-gate  *
291*7c478bd9Sstevel@tonic-gate  * SLEEP allocations presume an extremely multithreaded model, with
292*7c478bd9Sstevel@tonic-gate  * a lot of allocation and deallocation activity.  umem cannot presume
293*7c478bd9Sstevel@tonic-gate  * that its clients have any particular type of behavior.  Instead,
294*7c478bd9Sstevel@tonic-gate  * it provides two types of allocations:
295*7c478bd9Sstevel@tonic-gate  *
296*7c478bd9Sstevel@tonic-gate  *	* UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on
297*7c478bd9Sstevel@tonic-gate  *	failure)
298*7c478bd9Sstevel@tonic-gate  *
299*7c478bd9Sstevel@tonic-gate  *	* UMEM_NOFAIL, which, on failure, calls an optional callback
300*7c478bd9Sstevel@tonic-gate  *	(registered with umem_nofail_callback()).
301*7c478bd9Sstevel@tonic-gate  *
302*7c478bd9Sstevel@tonic-gate  * The callback is invoked with no locks held, and can do an arbitrary
303*7c478bd9Sstevel@tonic-gate  * amount of work.  It then has a choice between:
304*7c478bd9Sstevel@tonic-gate  *
305*7c478bd9Sstevel@tonic-gate  *	* Returning UMEM_CALLBACK_RETRY, which will cause the allocation
306*7c478bd9Sstevel@tonic-gate  *	to be restarted.
307*7c478bd9Sstevel@tonic-gate  *
308*7c478bd9Sstevel@tonic-gate  *	* Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2)
309*7c478bd9Sstevel@tonic-gate  *	to be invoked with status.  If multiple threads attempt to do
310*7c478bd9Sstevel@tonic-gate  *	this simultaneously, only one will call exit(2).
311*7c478bd9Sstevel@tonic-gate  *
312*7c478bd9Sstevel@tonic-gate  *	* Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C),
313*7c478bd9Sstevel@tonic-gate  *	etc.)
314*7c478bd9Sstevel@tonic-gate  *
315*7c478bd9Sstevel@tonic-gate  * The default callback returns UMEM_CALLBACK_EXIT(255).
316*7c478bd9Sstevel@tonic-gate  *
317*7c478bd9Sstevel@tonic-gate  * To have these callbacks without risk of state corruption (in the case of
318*7c478bd9Sstevel@tonic-gate  * a non-local exit), we have to ensure that the callbacks get invoked
319*7c478bd9Sstevel@tonic-gate  * close to the original allocation, with no inconsistent state or held
320*7c478bd9Sstevel@tonic-gate  * locks.  The following steps are taken:
321*7c478bd9Sstevel@tonic-gate  *
322*7c478bd9Sstevel@tonic-gate  *	* All invocations of vmem are VM_NOSLEEP.
323*7c478bd9Sstevel@tonic-gate  *
324*7c478bd9Sstevel@tonic-gate  *	* All constructor callbacks (which can themselves to allocations)
325*7c478bd9Sstevel@tonic-gate  *	are passed UMEM_DEFAULT as their required allocation argument.  This
326*7c478bd9Sstevel@tonic-gate  *	way, the constructor will fail, allowing the highest-level allocation
327*7c478bd9Sstevel@tonic-gate  *	invoke the nofail callback.
328*7c478bd9Sstevel@tonic-gate  *
329*7c478bd9Sstevel@tonic-gate  *	If a constructor callback _does_ do a UMEM_NOFAIL allocation, and
330*7c478bd9Sstevel@tonic-gate  *	the nofail callback does a non-local exit, we will leak the
331*7c478bd9Sstevel@tonic-gate  *	partially-constructed buffer.
332*7c478bd9Sstevel@tonic-gate  */
333*7c478bd9Sstevel@tonic-gate 
334*7c478bd9Sstevel@tonic-gate #include "mtlib.h"
335*7c478bd9Sstevel@tonic-gate #include <umem_impl.h>
336*7c478bd9Sstevel@tonic-gate #include <sys/vmem_impl_user.h>
337*7c478bd9Sstevel@tonic-gate #include "umem_base.h"
338*7c478bd9Sstevel@tonic-gate #include "vmem_base.h"
339*7c478bd9Sstevel@tonic-gate 
340*7c478bd9Sstevel@tonic-gate #include <sys/processor.h>
341*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
342*7c478bd9Sstevel@tonic-gate 
343*7c478bd9Sstevel@tonic-gate #include <alloca.h>
344*7c478bd9Sstevel@tonic-gate #include <errno.h>
345*7c478bd9Sstevel@tonic-gate #include <limits.h>
346*7c478bd9Sstevel@tonic-gate #include <stdio.h>
347*7c478bd9Sstevel@tonic-gate #include <stdlib.h>
348*7c478bd9Sstevel@tonic-gate #include <string.h>
349*7c478bd9Sstevel@tonic-gate #include <strings.h>
350*7c478bd9Sstevel@tonic-gate #include <signal.h>
351*7c478bd9Sstevel@tonic-gate #include <unistd.h>
352*7c478bd9Sstevel@tonic-gate #include <atomic.h>
353*7c478bd9Sstevel@tonic-gate 
354*7c478bd9Sstevel@tonic-gate #include "misc.h"
355*7c478bd9Sstevel@tonic-gate 
356*7c478bd9Sstevel@tonic-gate #define	UMEM_VMFLAGS(umflag)	(VM_NOSLEEP)
357*7c478bd9Sstevel@tonic-gate 
358*7c478bd9Sstevel@tonic-gate size_t pagesize;
359*7c478bd9Sstevel@tonic-gate 
360*7c478bd9Sstevel@tonic-gate /*
361*7c478bd9Sstevel@tonic-gate  * The default set of caches to back umem_alloc().
362*7c478bd9Sstevel@tonic-gate  * These sizes should be reevaluated periodically.
363*7c478bd9Sstevel@tonic-gate  *
364*7c478bd9Sstevel@tonic-gate  * We want allocations that are multiples of the coherency granularity
365*7c478bd9Sstevel@tonic-gate  * (64 bytes) to be satisfied from a cache which is a multiple of 64
366*7c478bd9Sstevel@tonic-gate  * bytes, so that it will be 64-byte aligned.  For all multiples of 64,
367*7c478bd9Sstevel@tonic-gate  * the next kmem_cache_size greater than or equal to it must be a
368*7c478bd9Sstevel@tonic-gate  * multiple of 64.
369*7c478bd9Sstevel@tonic-gate  */
370*7c478bd9Sstevel@tonic-gate static const int umem_alloc_sizes[] = {
371*7c478bd9Sstevel@tonic-gate #ifdef _LP64
372*7c478bd9Sstevel@tonic-gate 	1 * 8,
373*7c478bd9Sstevel@tonic-gate 	1 * 16,
374*7c478bd9Sstevel@tonic-gate 	2 * 16,
375*7c478bd9Sstevel@tonic-gate 	3 * 16,
376*7c478bd9Sstevel@tonic-gate #else
377*7c478bd9Sstevel@tonic-gate 	1 * 8,
378*7c478bd9Sstevel@tonic-gate 	2 * 8,
379*7c478bd9Sstevel@tonic-gate 	3 * 8,
380*7c478bd9Sstevel@tonic-gate 	4 * 8,		5 * 8,		6 * 8,		7 * 8,
381*7c478bd9Sstevel@tonic-gate #endif
382*7c478bd9Sstevel@tonic-gate 	4 * 16,		5 * 16,		6 * 16,		7 * 16,
383*7c478bd9Sstevel@tonic-gate 	4 * 32,		5 * 32,		6 * 32,		7 * 32,
384*7c478bd9Sstevel@tonic-gate 	4 * 64,		5 * 64,		6 * 64,		7 * 64,
385*7c478bd9Sstevel@tonic-gate 	4 * 128,	5 * 128,	6 * 128,	7 * 128,
386*7c478bd9Sstevel@tonic-gate 	P2ALIGN(8192 / 7, 64),
387*7c478bd9Sstevel@tonic-gate 	P2ALIGN(8192 / 6, 64),
388*7c478bd9Sstevel@tonic-gate 	P2ALIGN(8192 / 5, 64),
389*7c478bd9Sstevel@tonic-gate 	P2ALIGN(8192 / 4, 64),
390*7c478bd9Sstevel@tonic-gate 	P2ALIGN(8192 / 3, 64),
391*7c478bd9Sstevel@tonic-gate 	P2ALIGN(8192 / 2, 64),
392*7c478bd9Sstevel@tonic-gate 	P2ALIGN(8192 / 1, 64),
393*7c478bd9Sstevel@tonic-gate 	4096 * 3,
394*7c478bd9Sstevel@tonic-gate 	8192 * 2,
395*7c478bd9Sstevel@tonic-gate };
396*7c478bd9Sstevel@tonic-gate #define	NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes))
397*7c478bd9Sstevel@tonic-gate 
398*7c478bd9Sstevel@tonic-gate #define	UMEM_MAXBUF	16384
399*7c478bd9Sstevel@tonic-gate 
400*7c478bd9Sstevel@tonic-gate static umem_magtype_t umem_magtype[] = {
401*7c478bd9Sstevel@tonic-gate 	{ 1,	8,	3200,	65536	},
402*7c478bd9Sstevel@tonic-gate 	{ 3,	16,	256,	32768	},
403*7c478bd9Sstevel@tonic-gate 	{ 7,	32,	64,	16384	},
404*7c478bd9Sstevel@tonic-gate 	{ 15,	64,	0,	8192	},
405*7c478bd9Sstevel@tonic-gate 	{ 31,	64,	0,	4096	},
406*7c478bd9Sstevel@tonic-gate 	{ 47,	64,	0,	2048	},
407*7c478bd9Sstevel@tonic-gate 	{ 63,	64,	0,	1024	},
408*7c478bd9Sstevel@tonic-gate 	{ 95,	64,	0,	512	},
409*7c478bd9Sstevel@tonic-gate 	{ 143,	64,	0,	0	},
410*7c478bd9Sstevel@tonic-gate };
411*7c478bd9Sstevel@tonic-gate 
412*7c478bd9Sstevel@tonic-gate /*
413*7c478bd9Sstevel@tonic-gate  * umem tunables
414*7c478bd9Sstevel@tonic-gate  */
415*7c478bd9Sstevel@tonic-gate uint32_t umem_max_ncpus;	/* # of CPU caches. */
416*7c478bd9Sstevel@tonic-gate 
417*7c478bd9Sstevel@tonic-gate uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */
418*7c478bd9Sstevel@tonic-gate uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */
419*7c478bd9Sstevel@tonic-gate uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */
420*7c478bd9Sstevel@tonic-gate uint_t umem_abort = 1;		/* whether to abort on error */
421*7c478bd9Sstevel@tonic-gate uint_t umem_output = 0;		/* whether to write to standard error */
422*7c478bd9Sstevel@tonic-gate uint_t umem_logging = 0;	/* umem_log_enter() override */
423*7c478bd9Sstevel@tonic-gate uint32_t umem_mtbf = 0;		/* mean time between failures [default: off] */
424*7c478bd9Sstevel@tonic-gate size_t umem_transaction_log_size; /* size of transaction log */
425*7c478bd9Sstevel@tonic-gate size_t umem_content_log_size;	/* size of content log */
426*7c478bd9Sstevel@tonic-gate size_t umem_failure_log_size;	/* failure log [4 pages per CPU] */
427*7c478bd9Sstevel@tonic-gate size_t umem_slab_log_size;	/* slab create log [4 pages per CPU] */
428*7c478bd9Sstevel@tonic-gate size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */
429*7c478bd9Sstevel@tonic-gate size_t umem_lite_minsize = 0;	/* minimum buffer size for UMF_LITE */
430*7c478bd9Sstevel@tonic-gate size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */
431*7c478bd9Sstevel@tonic-gate size_t umem_maxverify;		/* maximum bytes to inspect in debug routines */
432*7c478bd9Sstevel@tonic-gate size_t umem_minfirewall;	/* hardware-enforced redzone threshold */
433*7c478bd9Sstevel@tonic-gate 
434*7c478bd9Sstevel@tonic-gate uint_t umem_flags = 0;
435*7c478bd9Sstevel@tonic-gate 
436*7c478bd9Sstevel@tonic-gate mutex_t			umem_init_lock;		/* locks initialization */
437*7c478bd9Sstevel@tonic-gate cond_t			umem_init_cv;		/* initialization CV */
438*7c478bd9Sstevel@tonic-gate thread_t		umem_init_thr;		/* thread initializing */
439*7c478bd9Sstevel@tonic-gate int			umem_init_env_ready;	/* environ pre-initted */
440*7c478bd9Sstevel@tonic-gate int			umem_ready = UMEM_READY_STARTUP;
441*7c478bd9Sstevel@tonic-gate 
442*7c478bd9Sstevel@tonic-gate static umem_nofail_callback_t *nofail_callback;
443*7c478bd9Sstevel@tonic-gate static mutex_t		umem_nofail_exit_lock;
444*7c478bd9Sstevel@tonic-gate static thread_t		umem_nofail_exit_thr;
445*7c478bd9Sstevel@tonic-gate 
446*7c478bd9Sstevel@tonic-gate static umem_cache_t	*umem_slab_cache;
447*7c478bd9Sstevel@tonic-gate static umem_cache_t	*umem_bufctl_cache;
448*7c478bd9Sstevel@tonic-gate static umem_cache_t	*umem_bufctl_audit_cache;
449*7c478bd9Sstevel@tonic-gate 
450*7c478bd9Sstevel@tonic-gate mutex_t			umem_flags_lock;
451*7c478bd9Sstevel@tonic-gate 
452*7c478bd9Sstevel@tonic-gate static vmem_t		*heap_arena;
453*7c478bd9Sstevel@tonic-gate static vmem_alloc_t	*heap_alloc;
454*7c478bd9Sstevel@tonic-gate static vmem_free_t	*heap_free;
455*7c478bd9Sstevel@tonic-gate 
456*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_internal_arena;
457*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_cache_arena;
458*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_hash_arena;
459*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_log_arena;
460*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_oversize_arena;
461*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_va_arena;
462*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_default_arena;
463*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_firewall_va_arena;
464*7c478bd9Sstevel@tonic-gate static vmem_t		*umem_firewall_arena;
465*7c478bd9Sstevel@tonic-gate 
466*7c478bd9Sstevel@tonic-gate vmem_t			*umem_memalign_arena;
467*7c478bd9Sstevel@tonic-gate 
468*7c478bd9Sstevel@tonic-gate umem_log_header_t *umem_transaction_log;
469*7c478bd9Sstevel@tonic-gate umem_log_header_t *umem_content_log;
470*7c478bd9Sstevel@tonic-gate umem_log_header_t *umem_failure_log;
471*7c478bd9Sstevel@tonic-gate umem_log_header_t *umem_slab_log;
472*7c478bd9Sstevel@tonic-gate 
473*7c478bd9Sstevel@tonic-gate extern thread_t _thr_self(void);
474*7c478bd9Sstevel@tonic-gate #define	CPUHINT()		(_thr_self())
475*7c478bd9Sstevel@tonic-gate #define	CPUHINT_MAX()		INT_MAX
476*7c478bd9Sstevel@tonic-gate 
477*7c478bd9Sstevel@tonic-gate #define	CPU(mask)		(umem_cpus + (CPUHINT() & (mask)))
478*7c478bd9Sstevel@tonic-gate static umem_cpu_t umem_startup_cpu = {	/* initial, single, cpu */
479*7c478bd9Sstevel@tonic-gate 	UMEM_CACHE_SIZE(0),
480*7c478bd9Sstevel@tonic-gate 	0
481*7c478bd9Sstevel@tonic-gate };
482*7c478bd9Sstevel@tonic-gate 
483*7c478bd9Sstevel@tonic-gate static uint32_t umem_cpu_mask = 0;			/* global cpu mask */
484*7c478bd9Sstevel@tonic-gate static umem_cpu_t *umem_cpus = &umem_startup_cpu;	/* cpu list */
485*7c478bd9Sstevel@tonic-gate 
486*7c478bd9Sstevel@tonic-gate volatile uint32_t umem_reaping;
487*7c478bd9Sstevel@tonic-gate 
488*7c478bd9Sstevel@tonic-gate thread_t		umem_update_thr;
489*7c478bd9Sstevel@tonic-gate struct timeval		umem_update_next;	/* timeofday of next update */
490*7c478bd9Sstevel@tonic-gate volatile thread_t	umem_st_update_thr;	/* only used when single-thd */
491*7c478bd9Sstevel@tonic-gate 
492*7c478bd9Sstevel@tonic-gate #define	IN_UPDATE()	(thr_self() == umem_update_thr || \
493*7c478bd9Sstevel@tonic-gate 			    thr_self() == umem_st_update_thr)
494*7c478bd9Sstevel@tonic-gate #define	IN_REAP()	IN_UPDATE()
495*7c478bd9Sstevel@tonic-gate 
496*7c478bd9Sstevel@tonic-gate mutex_t			umem_update_lock;	/* cache_u{next,prev,flags} */
497*7c478bd9Sstevel@tonic-gate cond_t			umem_update_cv;
498*7c478bd9Sstevel@tonic-gate 
499*7c478bd9Sstevel@tonic-gate volatile hrtime_t umem_reap_next;	/* min hrtime of next reap */
500*7c478bd9Sstevel@tonic-gate 
501*7c478bd9Sstevel@tonic-gate mutex_t			umem_cache_lock;	/* inter-cache linkage only */
502*7c478bd9Sstevel@tonic-gate 
503*7c478bd9Sstevel@tonic-gate #ifdef UMEM_STANDALONE
504*7c478bd9Sstevel@tonic-gate umem_cache_t		umem_null_cache;
505*7c478bd9Sstevel@tonic-gate static const umem_cache_t umem_null_cache_template = {
506*7c478bd9Sstevel@tonic-gate #else
507*7c478bd9Sstevel@tonic-gate umem_cache_t		umem_null_cache = {
508*7c478bd9Sstevel@tonic-gate #endif
509*7c478bd9Sstevel@tonic-gate 	0, 0, 0, 0, 0,
510*7c478bd9Sstevel@tonic-gate 	0, 0,
511*7c478bd9Sstevel@tonic-gate 	0, 0,
512*7c478bd9Sstevel@tonic-gate 	0, 0,
513*7c478bd9Sstevel@tonic-gate 	"invalid_cache",
514*7c478bd9Sstevel@tonic-gate 	0, 0,
515*7c478bd9Sstevel@tonic-gate 	NULL, NULL, NULL, NULL,
516*7c478bd9Sstevel@tonic-gate 	NULL,
517*7c478bd9Sstevel@tonic-gate 	0, 0, 0, 0,
518*7c478bd9Sstevel@tonic-gate 	&umem_null_cache, &umem_null_cache,
519*7c478bd9Sstevel@tonic-gate 	&umem_null_cache, &umem_null_cache,
520*7c478bd9Sstevel@tonic-gate 	0,
521*7c478bd9Sstevel@tonic-gate 	DEFAULTMUTEX,				/* start of slab layer */
522*7c478bd9Sstevel@tonic-gate 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
523*7c478bd9Sstevel@tonic-gate 	&umem_null_cache.cache_nullslab,
524*7c478bd9Sstevel@tonic-gate 	{
525*7c478bd9Sstevel@tonic-gate 		&umem_null_cache,
526*7c478bd9Sstevel@tonic-gate 		NULL,
527*7c478bd9Sstevel@tonic-gate 		&umem_null_cache.cache_nullslab,
528*7c478bd9Sstevel@tonic-gate 		&umem_null_cache.cache_nullslab,
529*7c478bd9Sstevel@tonic-gate 		NULL,
530*7c478bd9Sstevel@tonic-gate 		-1,
531*7c478bd9Sstevel@tonic-gate 		0
532*7c478bd9Sstevel@tonic-gate 	},
533*7c478bd9Sstevel@tonic-gate 	NULL,
534*7c478bd9Sstevel@tonic-gate 	NULL,
535*7c478bd9Sstevel@tonic-gate 	DEFAULTMUTEX,				/* start of depot layer */
536*7c478bd9Sstevel@tonic-gate 	NULL, {
537*7c478bd9Sstevel@tonic-gate 		NULL, 0, 0, 0, 0
538*7c478bd9Sstevel@tonic-gate 	}, {
539*7c478bd9Sstevel@tonic-gate 		NULL, 0, 0, 0, 0
540*7c478bd9Sstevel@tonic-gate 	}, {
541*7c478bd9Sstevel@tonic-gate 		{
542*7c478bd9Sstevel@tonic-gate 			DEFAULTMUTEX,		/* start of CPU cache */
543*7c478bd9Sstevel@tonic-gate 			0, 0, NULL, NULL, -1, -1, 0
544*7c478bd9Sstevel@tonic-gate 		}
545*7c478bd9Sstevel@tonic-gate 	}
546*7c478bd9Sstevel@tonic-gate };
547*7c478bd9Sstevel@tonic-gate 
548*7c478bd9Sstevel@tonic-gate #define	ALLOC_TABLE_4 \
549*7c478bd9Sstevel@tonic-gate 	&umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache
550*7c478bd9Sstevel@tonic-gate 
551*7c478bd9Sstevel@tonic-gate #define	ALLOC_TABLE_64 \
552*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
553*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
554*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
555*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4
556*7c478bd9Sstevel@tonic-gate 
557*7c478bd9Sstevel@tonic-gate #define	ALLOC_TABLE_1024 \
558*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
559*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
560*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
561*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64
562*7c478bd9Sstevel@tonic-gate 
563*7c478bd9Sstevel@tonic-gate static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = {
564*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_1024,
565*7c478bd9Sstevel@tonic-gate 	ALLOC_TABLE_1024
566*7c478bd9Sstevel@tonic-gate };
567*7c478bd9Sstevel@tonic-gate 
568*7c478bd9Sstevel@tonic-gate 
569*7c478bd9Sstevel@tonic-gate /* Used to constrain audit-log stack traces */
570*7c478bd9Sstevel@tonic-gate caddr_t			umem_min_stack;
571*7c478bd9Sstevel@tonic-gate caddr_t			umem_max_stack;
572*7c478bd9Sstevel@tonic-gate 
573*7c478bd9Sstevel@tonic-gate 
574*7c478bd9Sstevel@tonic-gate /*
575*7c478bd9Sstevel@tonic-gate  * we use the _ versions, since we don't want to be cancelled.
576*7c478bd9Sstevel@tonic-gate  * Actually, this is automatically taken care of by including "mtlib.h".
577*7c478bd9Sstevel@tonic-gate  */
578*7c478bd9Sstevel@tonic-gate extern int _cond_wait(cond_t *cv, mutex_t *mutex);
579*7c478bd9Sstevel@tonic-gate 
580*7c478bd9Sstevel@tonic-gate #define	UMERR_MODIFIED	0	/* buffer modified while on freelist */
581*7c478bd9Sstevel@tonic-gate #define	UMERR_REDZONE	1	/* redzone violation (write past end of buf) */
582*7c478bd9Sstevel@tonic-gate #define	UMERR_DUPFREE	2	/* freed a buffer twice */
583*7c478bd9Sstevel@tonic-gate #define	UMERR_BADADDR	3	/* freed a bad (unallocated) address */
584*7c478bd9Sstevel@tonic-gate #define	UMERR_BADBUFTAG	4	/* buftag corrupted */
585*7c478bd9Sstevel@tonic-gate #define	UMERR_BADBUFCTL	5	/* bufctl corrupted */
586*7c478bd9Sstevel@tonic-gate #define	UMERR_BADCACHE	6	/* freed a buffer to the wrong cache */
587*7c478bd9Sstevel@tonic-gate #define	UMERR_BADSIZE	7	/* alloc size != free size */
588*7c478bd9Sstevel@tonic-gate #define	UMERR_BADBASE	8	/* buffer base address wrong */
589*7c478bd9Sstevel@tonic-gate 
590*7c478bd9Sstevel@tonic-gate struct {
591*7c478bd9Sstevel@tonic-gate 	hrtime_t	ump_timestamp;	/* timestamp of error */
592*7c478bd9Sstevel@tonic-gate 	int		ump_error;	/* type of umem error (UMERR_*) */
593*7c478bd9Sstevel@tonic-gate 	void		*ump_buffer;	/* buffer that induced abort */
594*7c478bd9Sstevel@tonic-gate 	void		*ump_realbuf;	/* real start address for buffer */
595*7c478bd9Sstevel@tonic-gate 	umem_cache_t	*ump_cache;	/* buffer's cache according to client */
596*7c478bd9Sstevel@tonic-gate 	umem_cache_t	*ump_realcache;	/* actual cache containing buffer */
597*7c478bd9Sstevel@tonic-gate 	umem_slab_t	*ump_slab;	/* slab accoring to umem_findslab() */
598*7c478bd9Sstevel@tonic-gate 	umem_bufctl_t	*ump_bufctl;	/* bufctl */
599*7c478bd9Sstevel@tonic-gate } umem_abort_info;
600*7c478bd9Sstevel@tonic-gate 
601*7c478bd9Sstevel@tonic-gate static void
602*7c478bd9Sstevel@tonic-gate copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
603*7c478bd9Sstevel@tonic-gate {
604*7c478bd9Sstevel@tonic-gate 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
605*7c478bd9Sstevel@tonic-gate 	uint64_t *buf = buf_arg;
606*7c478bd9Sstevel@tonic-gate 
607*7c478bd9Sstevel@tonic-gate 	while (buf < bufend)
608*7c478bd9Sstevel@tonic-gate 		*buf++ = pattern;
609*7c478bd9Sstevel@tonic-gate }
610*7c478bd9Sstevel@tonic-gate 
611*7c478bd9Sstevel@tonic-gate static void *
612*7c478bd9Sstevel@tonic-gate verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
613*7c478bd9Sstevel@tonic-gate {
614*7c478bd9Sstevel@tonic-gate 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
615*7c478bd9Sstevel@tonic-gate 	uint64_t *buf;
616*7c478bd9Sstevel@tonic-gate 
617*7c478bd9Sstevel@tonic-gate 	for (buf = buf_arg; buf < bufend; buf++)
618*7c478bd9Sstevel@tonic-gate 		if (*buf != pattern)
619*7c478bd9Sstevel@tonic-gate 			return (buf);
620*7c478bd9Sstevel@tonic-gate 	return (NULL);
621*7c478bd9Sstevel@tonic-gate }
622*7c478bd9Sstevel@tonic-gate 
623*7c478bd9Sstevel@tonic-gate static void *
624*7c478bd9Sstevel@tonic-gate verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
625*7c478bd9Sstevel@tonic-gate {
626*7c478bd9Sstevel@tonic-gate 	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
627*7c478bd9Sstevel@tonic-gate 	uint64_t *buf;
628*7c478bd9Sstevel@tonic-gate 
629*7c478bd9Sstevel@tonic-gate 	for (buf = buf_arg; buf < bufend; buf++) {
630*7c478bd9Sstevel@tonic-gate 		if (*buf != old) {
631*7c478bd9Sstevel@tonic-gate 			copy_pattern(old, buf_arg,
632*7c478bd9Sstevel@tonic-gate 			    (char *)buf - (char *)buf_arg);
633*7c478bd9Sstevel@tonic-gate 			return (buf);
634*7c478bd9Sstevel@tonic-gate 		}
635*7c478bd9Sstevel@tonic-gate 		*buf = new;
636*7c478bd9Sstevel@tonic-gate 	}
637*7c478bd9Sstevel@tonic-gate 
638*7c478bd9Sstevel@tonic-gate 	return (NULL);
639*7c478bd9Sstevel@tonic-gate }
640*7c478bd9Sstevel@tonic-gate 
641*7c478bd9Sstevel@tonic-gate void
642*7c478bd9Sstevel@tonic-gate umem_cache_applyall(void (*func)(umem_cache_t *))
643*7c478bd9Sstevel@tonic-gate {
644*7c478bd9Sstevel@tonic-gate 	umem_cache_t *cp;
645*7c478bd9Sstevel@tonic-gate 
646*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_cache_lock);
647*7c478bd9Sstevel@tonic-gate 	for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
648*7c478bd9Sstevel@tonic-gate 	    cp = cp->cache_next)
649*7c478bd9Sstevel@tonic-gate 		func(cp);
650*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_cache_lock);
651*7c478bd9Sstevel@tonic-gate }
652*7c478bd9Sstevel@tonic-gate 
653*7c478bd9Sstevel@tonic-gate static void
654*7c478bd9Sstevel@tonic-gate umem_add_update_unlocked(umem_cache_t *cp, int flags)
655*7c478bd9Sstevel@tonic-gate {
656*7c478bd9Sstevel@tonic-gate 	umem_cache_t *cnext, *cprev;
657*7c478bd9Sstevel@tonic-gate 
658*7c478bd9Sstevel@tonic-gate 	flags &= ~UMU_ACTIVE;
659*7c478bd9Sstevel@tonic-gate 
660*7c478bd9Sstevel@tonic-gate 	if (!flags)
661*7c478bd9Sstevel@tonic-gate 		return;
662*7c478bd9Sstevel@tonic-gate 
663*7c478bd9Sstevel@tonic-gate 	if (cp->cache_uflags & UMU_ACTIVE) {
664*7c478bd9Sstevel@tonic-gate 		cp->cache_uflags |= flags;
665*7c478bd9Sstevel@tonic-gate 	} else {
666*7c478bd9Sstevel@tonic-gate 		if (cp->cache_unext != NULL) {
667*7c478bd9Sstevel@tonic-gate 			ASSERT(cp->cache_uflags != 0);
668*7c478bd9Sstevel@tonic-gate 			cp->cache_uflags |= flags;
669*7c478bd9Sstevel@tonic-gate 		} else {
670*7c478bd9Sstevel@tonic-gate 			ASSERT(cp->cache_uflags == 0);
671*7c478bd9Sstevel@tonic-gate 			cp->cache_uflags = flags;
672*7c478bd9Sstevel@tonic-gate 			cp->cache_unext = cnext = &umem_null_cache;
673*7c478bd9Sstevel@tonic-gate 			cp->cache_uprev = cprev = umem_null_cache.cache_uprev;
674*7c478bd9Sstevel@tonic-gate 			cnext->cache_uprev = cp;
675*7c478bd9Sstevel@tonic-gate 			cprev->cache_unext = cp;
676*7c478bd9Sstevel@tonic-gate 		}
677*7c478bd9Sstevel@tonic-gate 	}
678*7c478bd9Sstevel@tonic-gate }
679*7c478bd9Sstevel@tonic-gate 
680*7c478bd9Sstevel@tonic-gate static void
681*7c478bd9Sstevel@tonic-gate umem_add_update(umem_cache_t *cp, int flags)
682*7c478bd9Sstevel@tonic-gate {
683*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_update_lock);
684*7c478bd9Sstevel@tonic-gate 
685*7c478bd9Sstevel@tonic-gate 	umem_add_update_unlocked(cp, flags);
686*7c478bd9Sstevel@tonic-gate 
687*7c478bd9Sstevel@tonic-gate 	if (!IN_UPDATE())
688*7c478bd9Sstevel@tonic-gate 		(void) cond_broadcast(&umem_update_cv);
689*7c478bd9Sstevel@tonic-gate 
690*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_update_lock);
691*7c478bd9Sstevel@tonic-gate }
692*7c478bd9Sstevel@tonic-gate 
693*7c478bd9Sstevel@tonic-gate /*
694*7c478bd9Sstevel@tonic-gate  * Remove a cache from the update list, waiting for any in-progress work to
695*7c478bd9Sstevel@tonic-gate  * complete first.
696*7c478bd9Sstevel@tonic-gate  */
697*7c478bd9Sstevel@tonic-gate static void
698*7c478bd9Sstevel@tonic-gate umem_remove_updates(umem_cache_t *cp)
699*7c478bd9Sstevel@tonic-gate {
700*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_update_lock);
701*7c478bd9Sstevel@tonic-gate 
702*7c478bd9Sstevel@tonic-gate 	/*
703*7c478bd9Sstevel@tonic-gate 	 * Get it out of the active state
704*7c478bd9Sstevel@tonic-gate 	 */
705*7c478bd9Sstevel@tonic-gate 	while (cp->cache_uflags & UMU_ACTIVE) {
706*7c478bd9Sstevel@tonic-gate 		ASSERT(cp->cache_unext == NULL);
707*7c478bd9Sstevel@tonic-gate 
708*7c478bd9Sstevel@tonic-gate 		cp->cache_uflags |= UMU_NOTIFY;
709*7c478bd9Sstevel@tonic-gate 
710*7c478bd9Sstevel@tonic-gate 		/*
711*7c478bd9Sstevel@tonic-gate 		 * Make sure the update state is sane, before we wait
712*7c478bd9Sstevel@tonic-gate 		 */
713*7c478bd9Sstevel@tonic-gate 		ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0);
714*7c478bd9Sstevel@tonic-gate 		ASSERT(umem_update_thr != thr_self() &&
715*7c478bd9Sstevel@tonic-gate 		    umem_st_update_thr != thr_self());
716*7c478bd9Sstevel@tonic-gate 
717*7c478bd9Sstevel@tonic-gate 		(void) _cond_wait(&umem_update_cv, &umem_update_lock);
718*7c478bd9Sstevel@tonic-gate 	}
719*7c478bd9Sstevel@tonic-gate 	/*
720*7c478bd9Sstevel@tonic-gate 	 * Get it out of the Work Requested state
721*7c478bd9Sstevel@tonic-gate 	 */
722*7c478bd9Sstevel@tonic-gate 	if (cp->cache_unext != NULL) {
723*7c478bd9Sstevel@tonic-gate 		cp->cache_uprev->cache_unext = cp->cache_unext;
724*7c478bd9Sstevel@tonic-gate 		cp->cache_unext->cache_uprev = cp->cache_uprev;
725*7c478bd9Sstevel@tonic-gate 		cp->cache_uprev = cp->cache_unext = NULL;
726*7c478bd9Sstevel@tonic-gate 		cp->cache_uflags = 0;
727*7c478bd9Sstevel@tonic-gate 	}
728*7c478bd9Sstevel@tonic-gate 	/*
729*7c478bd9Sstevel@tonic-gate 	 * Make sure it is in the Inactive state
730*7c478bd9Sstevel@tonic-gate 	 */
731*7c478bd9Sstevel@tonic-gate 	ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0);
732*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_update_lock);
733*7c478bd9Sstevel@tonic-gate }
734*7c478bd9Sstevel@tonic-gate 
735*7c478bd9Sstevel@tonic-gate static void
736*7c478bd9Sstevel@tonic-gate umem_updateall(int flags)
737*7c478bd9Sstevel@tonic-gate {
738*7c478bd9Sstevel@tonic-gate 	umem_cache_t *cp;
739*7c478bd9Sstevel@tonic-gate 
740*7c478bd9Sstevel@tonic-gate 	/*
741*7c478bd9Sstevel@tonic-gate 	 * NOTE:  To prevent deadlock, umem_cache_lock is always acquired first.
742*7c478bd9Sstevel@tonic-gate 	 *
743*7c478bd9Sstevel@tonic-gate 	 * (umem_add_update is called from things run via umem_cache_applyall)
744*7c478bd9Sstevel@tonic-gate 	 */
745*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_cache_lock);
746*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_update_lock);
747*7c478bd9Sstevel@tonic-gate 
748*7c478bd9Sstevel@tonic-gate 	for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
749*7c478bd9Sstevel@tonic-gate 	    cp = cp->cache_next)
750*7c478bd9Sstevel@tonic-gate 		umem_add_update_unlocked(cp, flags);
751*7c478bd9Sstevel@tonic-gate 
752*7c478bd9Sstevel@tonic-gate 	if (!IN_UPDATE())
753*7c478bd9Sstevel@tonic-gate 		(void) cond_broadcast(&umem_update_cv);
754*7c478bd9Sstevel@tonic-gate 
755*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_update_lock);
756*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_cache_lock);
757*7c478bd9Sstevel@tonic-gate }
758*7c478bd9Sstevel@tonic-gate 
759*7c478bd9Sstevel@tonic-gate /*
760*7c478bd9Sstevel@tonic-gate  * Debugging support.  Given a buffer address, find its slab.
761*7c478bd9Sstevel@tonic-gate  */
762*7c478bd9Sstevel@tonic-gate static umem_slab_t *
763*7c478bd9Sstevel@tonic-gate umem_findslab(umem_cache_t *cp, void *buf)
764*7c478bd9Sstevel@tonic-gate {
765*7c478bd9Sstevel@tonic-gate 	umem_slab_t *sp;
766*7c478bd9Sstevel@tonic-gate 
767*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_lock);
768*7c478bd9Sstevel@tonic-gate 	for (sp = cp->cache_nullslab.slab_next;
769*7c478bd9Sstevel@tonic-gate 	    sp != &cp->cache_nullslab; sp = sp->slab_next) {
770*7c478bd9Sstevel@tonic-gate 		if (UMEM_SLAB_MEMBER(sp, buf)) {
771*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&cp->cache_lock);
772*7c478bd9Sstevel@tonic-gate 			return (sp);
773*7c478bd9Sstevel@tonic-gate 		}
774*7c478bd9Sstevel@tonic-gate 	}
775*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_lock);
776*7c478bd9Sstevel@tonic-gate 
777*7c478bd9Sstevel@tonic-gate 	return (NULL);
778*7c478bd9Sstevel@tonic-gate }
779*7c478bd9Sstevel@tonic-gate 
780*7c478bd9Sstevel@tonic-gate static void
781*7c478bd9Sstevel@tonic-gate umem_error(int error, umem_cache_t *cparg, void *bufarg)
782*7c478bd9Sstevel@tonic-gate {
783*7c478bd9Sstevel@tonic-gate 	umem_buftag_t *btp = NULL;
784*7c478bd9Sstevel@tonic-gate 	umem_bufctl_t *bcp = NULL;
785*7c478bd9Sstevel@tonic-gate 	umem_cache_t *cp = cparg;
786*7c478bd9Sstevel@tonic-gate 	umem_slab_t *sp;
787*7c478bd9Sstevel@tonic-gate 	uint64_t *off;
788*7c478bd9Sstevel@tonic-gate 	void *buf = bufarg;
789*7c478bd9Sstevel@tonic-gate 
790*7c478bd9Sstevel@tonic-gate 	int old_logging = umem_logging;
791*7c478bd9Sstevel@tonic-gate 
792*7c478bd9Sstevel@tonic-gate 	umem_logging = 0;	/* stop logging when a bad thing happens */
793*7c478bd9Sstevel@tonic-gate 
794*7c478bd9Sstevel@tonic-gate 	umem_abort_info.ump_timestamp = gethrtime();
795*7c478bd9Sstevel@tonic-gate 
796*7c478bd9Sstevel@tonic-gate 	sp = umem_findslab(cp, buf);
797*7c478bd9Sstevel@tonic-gate 	if (sp == NULL) {
798*7c478bd9Sstevel@tonic-gate 		for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
799*7c478bd9Sstevel@tonic-gate 		    cp = cp->cache_prev) {
800*7c478bd9Sstevel@tonic-gate 			if ((sp = umem_findslab(cp, buf)) != NULL)
801*7c478bd9Sstevel@tonic-gate 				break;
802*7c478bd9Sstevel@tonic-gate 		}
803*7c478bd9Sstevel@tonic-gate 	}
804*7c478bd9Sstevel@tonic-gate 
805*7c478bd9Sstevel@tonic-gate 	if (sp == NULL) {
806*7c478bd9Sstevel@tonic-gate 		cp = NULL;
807*7c478bd9Sstevel@tonic-gate 		error = UMERR_BADADDR;
808*7c478bd9Sstevel@tonic-gate 	} else {
809*7c478bd9Sstevel@tonic-gate 		if (cp != cparg)
810*7c478bd9Sstevel@tonic-gate 			error = UMERR_BADCACHE;
811*7c478bd9Sstevel@tonic-gate 		else
812*7c478bd9Sstevel@tonic-gate 			buf = (char *)bufarg - ((uintptr_t)bufarg -
813*7c478bd9Sstevel@tonic-gate 			    (uintptr_t)sp->slab_base) % cp->cache_chunksize;
814*7c478bd9Sstevel@tonic-gate 		if (buf != bufarg)
815*7c478bd9Sstevel@tonic-gate 			error = UMERR_BADBASE;
816*7c478bd9Sstevel@tonic-gate 		if (cp->cache_flags & UMF_BUFTAG)
817*7c478bd9Sstevel@tonic-gate 			btp = UMEM_BUFTAG(cp, buf);
818*7c478bd9Sstevel@tonic-gate 		if (cp->cache_flags & UMF_HASH) {
819*7c478bd9Sstevel@tonic-gate 			(void) mutex_lock(&cp->cache_lock);
820*7c478bd9Sstevel@tonic-gate 			for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
821*7c478bd9Sstevel@tonic-gate 				if (bcp->bc_addr == buf)
822*7c478bd9Sstevel@tonic-gate 					break;
823*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&cp->cache_lock);
824*7c478bd9Sstevel@tonic-gate 			if (bcp == NULL && btp != NULL)
825*7c478bd9Sstevel@tonic-gate 				bcp = btp->bt_bufctl;
826*7c478bd9Sstevel@tonic-gate 			if (umem_findslab(cp->cache_bufctl_cache, bcp) ==
827*7c478bd9Sstevel@tonic-gate 			    NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) ||
828*7c478bd9Sstevel@tonic-gate 			    bcp->bc_addr != buf) {
829*7c478bd9Sstevel@tonic-gate 				error = UMERR_BADBUFCTL;
830*7c478bd9Sstevel@tonic-gate 				bcp = NULL;
831*7c478bd9Sstevel@tonic-gate 			}
832*7c478bd9Sstevel@tonic-gate 		}
833*7c478bd9Sstevel@tonic-gate 	}
834*7c478bd9Sstevel@tonic-gate 
835*7c478bd9Sstevel@tonic-gate 	umem_abort_info.ump_error = error;
836*7c478bd9Sstevel@tonic-gate 	umem_abort_info.ump_buffer = bufarg;
837*7c478bd9Sstevel@tonic-gate 	umem_abort_info.ump_realbuf = buf;
838*7c478bd9Sstevel@tonic-gate 	umem_abort_info.ump_cache = cparg;
839*7c478bd9Sstevel@tonic-gate 	umem_abort_info.ump_realcache = cp;
840*7c478bd9Sstevel@tonic-gate 	umem_abort_info.ump_slab = sp;
841*7c478bd9Sstevel@tonic-gate 	umem_abort_info.ump_bufctl = bcp;
842*7c478bd9Sstevel@tonic-gate 
843*7c478bd9Sstevel@tonic-gate 	umem_printf("umem allocator: ");
844*7c478bd9Sstevel@tonic-gate 
845*7c478bd9Sstevel@tonic-gate 	switch (error) {
846*7c478bd9Sstevel@tonic-gate 
847*7c478bd9Sstevel@tonic-gate 	case UMERR_MODIFIED:
848*7c478bd9Sstevel@tonic-gate 		umem_printf("buffer modified after being freed\n");
849*7c478bd9Sstevel@tonic-gate 		off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
850*7c478bd9Sstevel@tonic-gate 		if (off == NULL)	/* shouldn't happen */
851*7c478bd9Sstevel@tonic-gate 			off = buf;
852*7c478bd9Sstevel@tonic-gate 		umem_printf("modification occurred at offset 0x%lx "
853*7c478bd9Sstevel@tonic-gate 		    "(0x%llx replaced by 0x%llx)\n",
854*7c478bd9Sstevel@tonic-gate 		    (uintptr_t)off - (uintptr_t)buf,
855*7c478bd9Sstevel@tonic-gate 		    (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off);
856*7c478bd9Sstevel@tonic-gate 		break;
857*7c478bd9Sstevel@tonic-gate 
858*7c478bd9Sstevel@tonic-gate 	case UMERR_REDZONE:
859*7c478bd9Sstevel@tonic-gate 		umem_printf("redzone violation: write past end of buffer\n");
860*7c478bd9Sstevel@tonic-gate 		break;
861*7c478bd9Sstevel@tonic-gate 
862*7c478bd9Sstevel@tonic-gate 	case UMERR_BADADDR:
863*7c478bd9Sstevel@tonic-gate 		umem_printf("invalid free: buffer not in cache\n");
864*7c478bd9Sstevel@tonic-gate 		break;
865*7c478bd9Sstevel@tonic-gate 
866*7c478bd9Sstevel@tonic-gate 	case UMERR_DUPFREE:
867*7c478bd9Sstevel@tonic-gate 		umem_printf("duplicate free: buffer freed twice\n");
868*7c478bd9Sstevel@tonic-gate 		break;
869*7c478bd9Sstevel@tonic-gate 
870*7c478bd9Sstevel@tonic-gate 	case UMERR_BADBUFTAG:
871*7c478bd9Sstevel@tonic-gate 		umem_printf("boundary tag corrupted\n");
872*7c478bd9Sstevel@tonic-gate 		umem_printf("bcp ^ bxstat = %lx, should be %lx\n",
873*7c478bd9Sstevel@tonic-gate 		    (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
874*7c478bd9Sstevel@tonic-gate 		    UMEM_BUFTAG_FREE);
875*7c478bd9Sstevel@tonic-gate 		break;
876*7c478bd9Sstevel@tonic-gate 
877*7c478bd9Sstevel@tonic-gate 	case UMERR_BADBUFCTL:
878*7c478bd9Sstevel@tonic-gate 		umem_printf("bufctl corrupted\n");
879*7c478bd9Sstevel@tonic-gate 		break;
880*7c478bd9Sstevel@tonic-gate 
881*7c478bd9Sstevel@tonic-gate 	case UMERR_BADCACHE:
882*7c478bd9Sstevel@tonic-gate 		umem_printf("buffer freed to wrong cache\n");
883*7c478bd9Sstevel@tonic-gate 		umem_printf("buffer was allocated from %s,\n", cp->cache_name);
884*7c478bd9Sstevel@tonic-gate 		umem_printf("caller attempting free to %s.\n",
885*7c478bd9Sstevel@tonic-gate 		    cparg->cache_name);
886*7c478bd9Sstevel@tonic-gate 		break;
887*7c478bd9Sstevel@tonic-gate 
888*7c478bd9Sstevel@tonic-gate 	case UMERR_BADSIZE:
889*7c478bd9Sstevel@tonic-gate 		umem_printf("bad free: free size (%u) != alloc size (%u)\n",
890*7c478bd9Sstevel@tonic-gate 		    UMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
891*7c478bd9Sstevel@tonic-gate 		    UMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
892*7c478bd9Sstevel@tonic-gate 		break;
893*7c478bd9Sstevel@tonic-gate 
894*7c478bd9Sstevel@tonic-gate 	case UMERR_BADBASE:
895*7c478bd9Sstevel@tonic-gate 		umem_printf("bad free: free address (%p) != alloc address "
896*7c478bd9Sstevel@tonic-gate 		    "(%p)\n", bufarg, buf);
897*7c478bd9Sstevel@tonic-gate 		break;
898*7c478bd9Sstevel@tonic-gate 	}
899*7c478bd9Sstevel@tonic-gate 
900*7c478bd9Sstevel@tonic-gate 	umem_printf("buffer=%p  bufctl=%p  cache: %s\n",
901*7c478bd9Sstevel@tonic-gate 	    bufarg, (void *)bcp, cparg->cache_name);
902*7c478bd9Sstevel@tonic-gate 
903*7c478bd9Sstevel@tonic-gate 	if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) &&
904*7c478bd9Sstevel@tonic-gate 	    error != UMERR_BADBUFCTL) {
905*7c478bd9Sstevel@tonic-gate 		int d;
906*7c478bd9Sstevel@tonic-gate 		timespec_t ts;
907*7c478bd9Sstevel@tonic-gate 		hrtime_t diff;
908*7c478bd9Sstevel@tonic-gate 		umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp;
909*7c478bd9Sstevel@tonic-gate 
910*7c478bd9Sstevel@tonic-gate 		diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp;
911*7c478bd9Sstevel@tonic-gate 		ts.tv_sec = diff / NANOSEC;
912*7c478bd9Sstevel@tonic-gate 		ts.tv_nsec = diff % NANOSEC;
913*7c478bd9Sstevel@tonic-gate 
914*7c478bd9Sstevel@tonic-gate 		umem_printf("previous transaction on buffer %p:\n", buf);
915*7c478bd9Sstevel@tonic-gate 		umem_printf("thread=%p  time=T-%ld.%09ld  slab=%p  cache: %s\n",
916*7c478bd9Sstevel@tonic-gate 		    (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
917*7c478bd9Sstevel@tonic-gate 		    (void *)sp, cp->cache_name);
918*7c478bd9Sstevel@tonic-gate 		for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) {
919*7c478bd9Sstevel@tonic-gate 			(void) print_sym((void *)bcap->bc_stack[d]);
920*7c478bd9Sstevel@tonic-gate 			umem_printf("\n");
921*7c478bd9Sstevel@tonic-gate 		}
922*7c478bd9Sstevel@tonic-gate 	}
923*7c478bd9Sstevel@tonic-gate 
924*7c478bd9Sstevel@tonic-gate 	umem_err_recoverable("umem: heap corruption detected");
925*7c478bd9Sstevel@tonic-gate 
926*7c478bd9Sstevel@tonic-gate 	umem_logging = old_logging;	/* resume logging */
927*7c478bd9Sstevel@tonic-gate }
928*7c478bd9Sstevel@tonic-gate 
929*7c478bd9Sstevel@tonic-gate void
930*7c478bd9Sstevel@tonic-gate umem_nofail_callback(umem_nofail_callback_t *cb)
931*7c478bd9Sstevel@tonic-gate {
932*7c478bd9Sstevel@tonic-gate 	nofail_callback = cb;
933*7c478bd9Sstevel@tonic-gate }
934*7c478bd9Sstevel@tonic-gate 
935*7c478bd9Sstevel@tonic-gate static int
936*7c478bd9Sstevel@tonic-gate umem_alloc_retry(umem_cache_t *cp, int umflag)
937*7c478bd9Sstevel@tonic-gate {
938*7c478bd9Sstevel@tonic-gate 	if (cp == &umem_null_cache) {
939*7c478bd9Sstevel@tonic-gate 		if (umem_init())
940*7c478bd9Sstevel@tonic-gate 			return (1);				/* retry */
941*7c478bd9Sstevel@tonic-gate 		/*
942*7c478bd9Sstevel@tonic-gate 		 * Initialization failed.  Do normal failure processing.
943*7c478bd9Sstevel@tonic-gate 		 */
944*7c478bd9Sstevel@tonic-gate 	}
945*7c478bd9Sstevel@tonic-gate 	if (umflag & UMEM_NOFAIL) {
946*7c478bd9Sstevel@tonic-gate 		int def_result = UMEM_CALLBACK_EXIT(255);
947*7c478bd9Sstevel@tonic-gate 		int result = def_result;
948*7c478bd9Sstevel@tonic-gate 		umem_nofail_callback_t *callback = nofail_callback;
949*7c478bd9Sstevel@tonic-gate 
950*7c478bd9Sstevel@tonic-gate 		if (callback != NULL)
951*7c478bd9Sstevel@tonic-gate 			result = callback();
952*7c478bd9Sstevel@tonic-gate 
953*7c478bd9Sstevel@tonic-gate 		if (result == UMEM_CALLBACK_RETRY)
954*7c478bd9Sstevel@tonic-gate 			return (1);
955*7c478bd9Sstevel@tonic-gate 
956*7c478bd9Sstevel@tonic-gate 		if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) {
957*7c478bd9Sstevel@tonic-gate 			log_message("nofail callback returned %x\n", result);
958*7c478bd9Sstevel@tonic-gate 			result = def_result;
959*7c478bd9Sstevel@tonic-gate 		}
960*7c478bd9Sstevel@tonic-gate 
961*7c478bd9Sstevel@tonic-gate 		/*
962*7c478bd9Sstevel@tonic-gate 		 * only one thread will call exit
963*7c478bd9Sstevel@tonic-gate 		 */
964*7c478bd9Sstevel@tonic-gate 		if (umem_nofail_exit_thr == thr_self())
965*7c478bd9Sstevel@tonic-gate 			umem_panic("recursive UMEM_CALLBACK_EXIT()\n");
966*7c478bd9Sstevel@tonic-gate 
967*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&umem_nofail_exit_lock);
968*7c478bd9Sstevel@tonic-gate 		umem_nofail_exit_thr = thr_self();
969*7c478bd9Sstevel@tonic-gate 		exit(result & 0xFF);
970*7c478bd9Sstevel@tonic-gate 		/*NOTREACHED*/
971*7c478bd9Sstevel@tonic-gate 	}
972*7c478bd9Sstevel@tonic-gate 	return (0);
973*7c478bd9Sstevel@tonic-gate }
974*7c478bd9Sstevel@tonic-gate 
975*7c478bd9Sstevel@tonic-gate static umem_log_header_t *
976*7c478bd9Sstevel@tonic-gate umem_log_init(size_t logsize)
977*7c478bd9Sstevel@tonic-gate {
978*7c478bd9Sstevel@tonic-gate 	umem_log_header_t *lhp;
979*7c478bd9Sstevel@tonic-gate 	int nchunks = 4 * umem_max_ncpus;
980*7c478bd9Sstevel@tonic-gate 	size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]);
981*7c478bd9Sstevel@tonic-gate 	int i;
982*7c478bd9Sstevel@tonic-gate 
983*7c478bd9Sstevel@tonic-gate 	if (logsize == 0)
984*7c478bd9Sstevel@tonic-gate 		return (NULL);
985*7c478bd9Sstevel@tonic-gate 
986*7c478bd9Sstevel@tonic-gate 	/*
987*7c478bd9Sstevel@tonic-gate 	 * Make sure that lhp->lh_cpu[] is nicely aligned
988*7c478bd9Sstevel@tonic-gate 	 * to prevent false sharing of cache lines.
989*7c478bd9Sstevel@tonic-gate 	 */
990*7c478bd9Sstevel@tonic-gate 	lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN);
991*7c478bd9Sstevel@tonic-gate 	lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
992*7c478bd9Sstevel@tonic-gate 	    NULL, NULL, VM_NOSLEEP);
993*7c478bd9Sstevel@tonic-gate 	if (lhp == NULL)
994*7c478bd9Sstevel@tonic-gate 		goto fail;
995*7c478bd9Sstevel@tonic-gate 
996*7c478bd9Sstevel@tonic-gate 	bzero(lhp, lhsize);
997*7c478bd9Sstevel@tonic-gate 
998*7c478bd9Sstevel@tonic-gate 	(void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL);
999*7c478bd9Sstevel@tonic-gate 	lhp->lh_nchunks = nchunks;
1000*7c478bd9Sstevel@tonic-gate 	lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE);
1001*7c478bd9Sstevel@tonic-gate 	if (lhp->lh_chunksize == 0)
1002*7c478bd9Sstevel@tonic-gate 		lhp->lh_chunksize = PAGESIZE;
1003*7c478bd9Sstevel@tonic-gate 
1004*7c478bd9Sstevel@tonic-gate 	lhp->lh_base = vmem_alloc(umem_log_arena,
1005*7c478bd9Sstevel@tonic-gate 	    lhp->lh_chunksize * nchunks, VM_NOSLEEP);
1006*7c478bd9Sstevel@tonic-gate 	if (lhp->lh_base == NULL)
1007*7c478bd9Sstevel@tonic-gate 		goto fail;
1008*7c478bd9Sstevel@tonic-gate 
1009*7c478bd9Sstevel@tonic-gate 	lhp->lh_free = vmem_alloc(umem_log_arena,
1010*7c478bd9Sstevel@tonic-gate 	    nchunks * sizeof (int), VM_NOSLEEP);
1011*7c478bd9Sstevel@tonic-gate 	if (lhp->lh_free == NULL)
1012*7c478bd9Sstevel@tonic-gate 		goto fail;
1013*7c478bd9Sstevel@tonic-gate 
1014*7c478bd9Sstevel@tonic-gate 	bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1015*7c478bd9Sstevel@tonic-gate 
1016*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < umem_max_ncpus; i++) {
1017*7c478bd9Sstevel@tonic-gate 		umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1018*7c478bd9Sstevel@tonic-gate 		(void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL);
1019*7c478bd9Sstevel@tonic-gate 		clhp->clh_chunk = i;
1020*7c478bd9Sstevel@tonic-gate 	}
1021*7c478bd9Sstevel@tonic-gate 
1022*7c478bd9Sstevel@tonic-gate 	for (i = umem_max_ncpus; i < nchunks; i++)
1023*7c478bd9Sstevel@tonic-gate 		lhp->lh_free[i] = i;
1024*7c478bd9Sstevel@tonic-gate 
1025*7c478bd9Sstevel@tonic-gate 	lhp->lh_head = umem_max_ncpus;
1026*7c478bd9Sstevel@tonic-gate 	lhp->lh_tail = 0;
1027*7c478bd9Sstevel@tonic-gate 
1028*7c478bd9Sstevel@tonic-gate 	return (lhp);
1029*7c478bd9Sstevel@tonic-gate 
1030*7c478bd9Sstevel@tonic-gate fail:
1031*7c478bd9Sstevel@tonic-gate 	if (lhp != NULL) {
1032*7c478bd9Sstevel@tonic-gate 		if (lhp->lh_base != NULL)
1033*7c478bd9Sstevel@tonic-gate 			vmem_free(umem_log_arena, lhp->lh_base,
1034*7c478bd9Sstevel@tonic-gate 			    lhp->lh_chunksize * nchunks);
1035*7c478bd9Sstevel@tonic-gate 
1036*7c478bd9Sstevel@tonic-gate 		vmem_xfree(umem_log_arena, lhp, lhsize);
1037*7c478bd9Sstevel@tonic-gate 	}
1038*7c478bd9Sstevel@tonic-gate 	return (NULL);
1039*7c478bd9Sstevel@tonic-gate }
1040*7c478bd9Sstevel@tonic-gate 
1041*7c478bd9Sstevel@tonic-gate static void *
1042*7c478bd9Sstevel@tonic-gate umem_log_enter(umem_log_header_t *lhp, void *data, size_t size)
1043*7c478bd9Sstevel@tonic-gate {
1044*7c478bd9Sstevel@tonic-gate 	void *logspace;
1045*7c478bd9Sstevel@tonic-gate 	umem_cpu_log_header_t *clhp =
1046*7c478bd9Sstevel@tonic-gate 	    &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number];
1047*7c478bd9Sstevel@tonic-gate 
1048*7c478bd9Sstevel@tonic-gate 	if (lhp == NULL || umem_logging == 0)
1049*7c478bd9Sstevel@tonic-gate 		return (NULL);
1050*7c478bd9Sstevel@tonic-gate 
1051*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&clhp->clh_lock);
1052*7c478bd9Sstevel@tonic-gate 	clhp->clh_hits++;
1053*7c478bd9Sstevel@tonic-gate 	if (size > clhp->clh_avail) {
1054*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&lhp->lh_lock);
1055*7c478bd9Sstevel@tonic-gate 		lhp->lh_hits++;
1056*7c478bd9Sstevel@tonic-gate 		lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1057*7c478bd9Sstevel@tonic-gate 		lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1058*7c478bd9Sstevel@tonic-gate 		clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1059*7c478bd9Sstevel@tonic-gate 		lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1060*7c478bd9Sstevel@tonic-gate 		clhp->clh_current = lhp->lh_base +
1061*7c478bd9Sstevel@tonic-gate 		    clhp->clh_chunk * lhp->lh_chunksize;
1062*7c478bd9Sstevel@tonic-gate 		clhp->clh_avail = lhp->lh_chunksize;
1063*7c478bd9Sstevel@tonic-gate 		if (size > lhp->lh_chunksize)
1064*7c478bd9Sstevel@tonic-gate 			size = lhp->lh_chunksize;
1065*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&lhp->lh_lock);
1066*7c478bd9Sstevel@tonic-gate 	}
1067*7c478bd9Sstevel@tonic-gate 	logspace = clhp->clh_current;
1068*7c478bd9Sstevel@tonic-gate 	clhp->clh_current += size;
1069*7c478bd9Sstevel@tonic-gate 	clhp->clh_avail -= size;
1070*7c478bd9Sstevel@tonic-gate 	bcopy(data, logspace, size);
1071*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&clhp->clh_lock);
1072*7c478bd9Sstevel@tonic-gate 	return (logspace);
1073*7c478bd9Sstevel@tonic-gate }
1074*7c478bd9Sstevel@tonic-gate 
1075*7c478bd9Sstevel@tonic-gate #define	UMEM_AUDIT(lp, cp, bcp)						\
1076*7c478bd9Sstevel@tonic-gate {									\
1077*7c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp);	\
1078*7c478bd9Sstevel@tonic-gate 	_bcp->bc_timestamp = gethrtime();				\
1079*7c478bd9Sstevel@tonic-gate 	_bcp->bc_thread = thr_self();					\
1080*7c478bd9Sstevel@tonic-gate 	_bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth,	\
1081*7c478bd9Sstevel@tonic-gate 	    (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL));	\
1082*7c478bd9Sstevel@tonic-gate 	_bcp->bc_lastlog = umem_log_enter((lp), _bcp,			\
1083*7c478bd9Sstevel@tonic-gate 	    UMEM_BUFCTL_AUDIT_SIZE);					\
1084*7c478bd9Sstevel@tonic-gate }
1085*7c478bd9Sstevel@tonic-gate 
1086*7c478bd9Sstevel@tonic-gate static void
1087*7c478bd9Sstevel@tonic-gate umem_log_event(umem_log_header_t *lp, umem_cache_t *cp,
1088*7c478bd9Sstevel@tonic-gate 	umem_slab_t *sp, void *addr)
1089*7c478bd9Sstevel@tonic-gate {
1090*7c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp;
1091*7c478bd9Sstevel@tonic-gate 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
1092*7c478bd9Sstevel@tonic-gate 
1093*7c478bd9Sstevel@tonic-gate 	bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE);
1094*7c478bd9Sstevel@tonic-gate 	bcp->bc_addr = addr;
1095*7c478bd9Sstevel@tonic-gate 	bcp->bc_slab = sp;
1096*7c478bd9Sstevel@tonic-gate 	bcp->bc_cache = cp;
1097*7c478bd9Sstevel@tonic-gate 	UMEM_AUDIT(lp, cp, bcp);
1098*7c478bd9Sstevel@tonic-gate }
1099*7c478bd9Sstevel@tonic-gate 
1100*7c478bd9Sstevel@tonic-gate /*
1101*7c478bd9Sstevel@tonic-gate  * Create a new slab for cache cp.
1102*7c478bd9Sstevel@tonic-gate  */
1103*7c478bd9Sstevel@tonic-gate static umem_slab_t *
1104*7c478bd9Sstevel@tonic-gate umem_slab_create(umem_cache_t *cp, int umflag)
1105*7c478bd9Sstevel@tonic-gate {
1106*7c478bd9Sstevel@tonic-gate 	size_t slabsize = cp->cache_slabsize;
1107*7c478bd9Sstevel@tonic-gate 	size_t chunksize = cp->cache_chunksize;
1108*7c478bd9Sstevel@tonic-gate 	int cache_flags = cp->cache_flags;
1109*7c478bd9Sstevel@tonic-gate 	size_t color, chunks;
1110*7c478bd9Sstevel@tonic-gate 	char *buf, *slab;
1111*7c478bd9Sstevel@tonic-gate 	umem_slab_t *sp;
1112*7c478bd9Sstevel@tonic-gate 	umem_bufctl_t *bcp;
1113*7c478bd9Sstevel@tonic-gate 	vmem_t *vmp = cp->cache_arena;
1114*7c478bd9Sstevel@tonic-gate 
1115*7c478bd9Sstevel@tonic-gate 	color = cp->cache_color + cp->cache_align;
1116*7c478bd9Sstevel@tonic-gate 	if (color > cp->cache_maxcolor)
1117*7c478bd9Sstevel@tonic-gate 		color = cp->cache_mincolor;
1118*7c478bd9Sstevel@tonic-gate 	cp->cache_color = color;
1119*7c478bd9Sstevel@tonic-gate 
1120*7c478bd9Sstevel@tonic-gate 	slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag));
1121*7c478bd9Sstevel@tonic-gate 
1122*7c478bd9Sstevel@tonic-gate 	if (slab == NULL)
1123*7c478bd9Sstevel@tonic-gate 		goto vmem_alloc_failure;
1124*7c478bd9Sstevel@tonic-gate 
1125*7c478bd9Sstevel@tonic-gate 	ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1126*7c478bd9Sstevel@tonic-gate 
1127*7c478bd9Sstevel@tonic-gate 	if (!(cp->cache_cflags & UMC_NOTOUCH) &&
1128*7c478bd9Sstevel@tonic-gate 	    (cp->cache_flags & UMF_DEADBEEF))
1129*7c478bd9Sstevel@tonic-gate 		copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1130*7c478bd9Sstevel@tonic-gate 
1131*7c478bd9Sstevel@tonic-gate 	if (cache_flags & UMF_HASH) {
1132*7c478bd9Sstevel@tonic-gate 		if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL)
1133*7c478bd9Sstevel@tonic-gate 			goto slab_alloc_failure;
1134*7c478bd9Sstevel@tonic-gate 		chunks = (slabsize - color) / chunksize;
1135*7c478bd9Sstevel@tonic-gate 	} else {
1136*7c478bd9Sstevel@tonic-gate 		sp = UMEM_SLAB(cp, slab);
1137*7c478bd9Sstevel@tonic-gate 		chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize;
1138*7c478bd9Sstevel@tonic-gate 	}
1139*7c478bd9Sstevel@tonic-gate 
1140*7c478bd9Sstevel@tonic-gate 	sp->slab_cache	= cp;
1141*7c478bd9Sstevel@tonic-gate 	sp->slab_head	= NULL;
1142*7c478bd9Sstevel@tonic-gate 	sp->slab_refcnt	= 0;
1143*7c478bd9Sstevel@tonic-gate 	sp->slab_base	= buf = slab + color;
1144*7c478bd9Sstevel@tonic-gate 	sp->slab_chunks	= chunks;
1145*7c478bd9Sstevel@tonic-gate 
1146*7c478bd9Sstevel@tonic-gate 	ASSERT(chunks > 0);
1147*7c478bd9Sstevel@tonic-gate 	while (chunks-- != 0) {
1148*7c478bd9Sstevel@tonic-gate 		if (cache_flags & UMF_HASH) {
1149*7c478bd9Sstevel@tonic-gate 			bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag);
1150*7c478bd9Sstevel@tonic-gate 			if (bcp == NULL)
1151*7c478bd9Sstevel@tonic-gate 				goto bufctl_alloc_failure;
1152*7c478bd9Sstevel@tonic-gate 			if (cache_flags & UMF_AUDIT) {
1153*7c478bd9Sstevel@tonic-gate 				umem_bufctl_audit_t *bcap =
1154*7c478bd9Sstevel@tonic-gate 				    (umem_bufctl_audit_t *)bcp;
1155*7c478bd9Sstevel@tonic-gate 				bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE);
1156*7c478bd9Sstevel@tonic-gate 				bcap->bc_cache = cp;
1157*7c478bd9Sstevel@tonic-gate 			}
1158*7c478bd9Sstevel@tonic-gate 			bcp->bc_addr = buf;
1159*7c478bd9Sstevel@tonic-gate 			bcp->bc_slab = sp;
1160*7c478bd9Sstevel@tonic-gate 		} else {
1161*7c478bd9Sstevel@tonic-gate 			bcp = UMEM_BUFCTL(cp, buf);
1162*7c478bd9Sstevel@tonic-gate 		}
1163*7c478bd9Sstevel@tonic-gate 		if (cache_flags & UMF_BUFTAG) {
1164*7c478bd9Sstevel@tonic-gate 			umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1165*7c478bd9Sstevel@tonic-gate 			btp->bt_redzone = UMEM_REDZONE_PATTERN;
1166*7c478bd9Sstevel@tonic-gate 			btp->bt_bufctl = bcp;
1167*7c478bd9Sstevel@tonic-gate 			btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1168*7c478bd9Sstevel@tonic-gate 			if (cache_flags & UMF_DEADBEEF) {
1169*7c478bd9Sstevel@tonic-gate 				copy_pattern(UMEM_FREE_PATTERN, buf,
1170*7c478bd9Sstevel@tonic-gate 				    cp->cache_verify);
1171*7c478bd9Sstevel@tonic-gate 			}
1172*7c478bd9Sstevel@tonic-gate 		}
1173*7c478bd9Sstevel@tonic-gate 		bcp->bc_next = sp->slab_head;
1174*7c478bd9Sstevel@tonic-gate 		sp->slab_head = bcp;
1175*7c478bd9Sstevel@tonic-gate 		buf += chunksize;
1176*7c478bd9Sstevel@tonic-gate 	}
1177*7c478bd9Sstevel@tonic-gate 
1178*7c478bd9Sstevel@tonic-gate 	umem_log_event(umem_slab_log, cp, sp, slab);
1179*7c478bd9Sstevel@tonic-gate 
1180*7c478bd9Sstevel@tonic-gate 	return (sp);
1181*7c478bd9Sstevel@tonic-gate 
1182*7c478bd9Sstevel@tonic-gate bufctl_alloc_failure:
1183*7c478bd9Sstevel@tonic-gate 
1184*7c478bd9Sstevel@tonic-gate 	while ((bcp = sp->slab_head) != NULL) {
1185*7c478bd9Sstevel@tonic-gate 		sp->slab_head = bcp->bc_next;
1186*7c478bd9Sstevel@tonic-gate 		_umem_cache_free(cp->cache_bufctl_cache, bcp);
1187*7c478bd9Sstevel@tonic-gate 	}
1188*7c478bd9Sstevel@tonic-gate 	_umem_cache_free(umem_slab_cache, sp);
1189*7c478bd9Sstevel@tonic-gate 
1190*7c478bd9Sstevel@tonic-gate slab_alloc_failure:
1191*7c478bd9Sstevel@tonic-gate 
1192*7c478bd9Sstevel@tonic-gate 	vmem_free(vmp, slab, slabsize);
1193*7c478bd9Sstevel@tonic-gate 
1194*7c478bd9Sstevel@tonic-gate vmem_alloc_failure:
1195*7c478bd9Sstevel@tonic-gate 
1196*7c478bd9Sstevel@tonic-gate 	umem_log_event(umem_failure_log, cp, NULL, NULL);
1197*7c478bd9Sstevel@tonic-gate 	atomic_add_64(&cp->cache_alloc_fail, 1);
1198*7c478bd9Sstevel@tonic-gate 
1199*7c478bd9Sstevel@tonic-gate 	return (NULL);
1200*7c478bd9Sstevel@tonic-gate }
1201*7c478bd9Sstevel@tonic-gate 
1202*7c478bd9Sstevel@tonic-gate /*
1203*7c478bd9Sstevel@tonic-gate  * Destroy a slab.
1204*7c478bd9Sstevel@tonic-gate  */
1205*7c478bd9Sstevel@tonic-gate static void
1206*7c478bd9Sstevel@tonic-gate umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp)
1207*7c478bd9Sstevel@tonic-gate {
1208*7c478bd9Sstevel@tonic-gate 	vmem_t *vmp = cp->cache_arena;
1209*7c478bd9Sstevel@tonic-gate 	void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1210*7c478bd9Sstevel@tonic-gate 
1211*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_HASH) {
1212*7c478bd9Sstevel@tonic-gate 		umem_bufctl_t *bcp;
1213*7c478bd9Sstevel@tonic-gate 		while ((bcp = sp->slab_head) != NULL) {
1214*7c478bd9Sstevel@tonic-gate 			sp->slab_head = bcp->bc_next;
1215*7c478bd9Sstevel@tonic-gate 			_umem_cache_free(cp->cache_bufctl_cache, bcp);
1216*7c478bd9Sstevel@tonic-gate 		}
1217*7c478bd9Sstevel@tonic-gate 		_umem_cache_free(umem_slab_cache, sp);
1218*7c478bd9Sstevel@tonic-gate 	}
1219*7c478bd9Sstevel@tonic-gate 	vmem_free(vmp, slab, cp->cache_slabsize);
1220*7c478bd9Sstevel@tonic-gate }
1221*7c478bd9Sstevel@tonic-gate 
1222*7c478bd9Sstevel@tonic-gate /*
1223*7c478bd9Sstevel@tonic-gate  * Allocate a raw (unconstructed) buffer from cp's slab layer.
1224*7c478bd9Sstevel@tonic-gate  */
1225*7c478bd9Sstevel@tonic-gate static void *
1226*7c478bd9Sstevel@tonic-gate umem_slab_alloc(umem_cache_t *cp, int umflag)
1227*7c478bd9Sstevel@tonic-gate {
1228*7c478bd9Sstevel@tonic-gate 	umem_bufctl_t *bcp, **hash_bucket;
1229*7c478bd9Sstevel@tonic-gate 	umem_slab_t *sp;
1230*7c478bd9Sstevel@tonic-gate 	void *buf;
1231*7c478bd9Sstevel@tonic-gate 
1232*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_lock);
1233*7c478bd9Sstevel@tonic-gate 	cp->cache_slab_alloc++;
1234*7c478bd9Sstevel@tonic-gate 	sp = cp->cache_freelist;
1235*7c478bd9Sstevel@tonic-gate 	ASSERT(sp->slab_cache == cp);
1236*7c478bd9Sstevel@tonic-gate 	if (sp->slab_head == NULL) {
1237*7c478bd9Sstevel@tonic-gate 		/*
1238*7c478bd9Sstevel@tonic-gate 		 * The freelist is empty.  Create a new slab.
1239*7c478bd9Sstevel@tonic-gate 		 */
1240*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&cp->cache_lock);
1241*7c478bd9Sstevel@tonic-gate 		if (cp == &umem_null_cache)
1242*7c478bd9Sstevel@tonic-gate 			return (NULL);
1243*7c478bd9Sstevel@tonic-gate 		if ((sp = umem_slab_create(cp, umflag)) == NULL)
1244*7c478bd9Sstevel@tonic-gate 			return (NULL);
1245*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&cp->cache_lock);
1246*7c478bd9Sstevel@tonic-gate 		cp->cache_slab_create++;
1247*7c478bd9Sstevel@tonic-gate 		if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1248*7c478bd9Sstevel@tonic-gate 			cp->cache_bufmax = cp->cache_buftotal;
1249*7c478bd9Sstevel@tonic-gate 		sp->slab_next = cp->cache_freelist;
1250*7c478bd9Sstevel@tonic-gate 		sp->slab_prev = cp->cache_freelist->slab_prev;
1251*7c478bd9Sstevel@tonic-gate 		sp->slab_next->slab_prev = sp;
1252*7c478bd9Sstevel@tonic-gate 		sp->slab_prev->slab_next = sp;
1253*7c478bd9Sstevel@tonic-gate 		cp->cache_freelist = sp;
1254*7c478bd9Sstevel@tonic-gate 	}
1255*7c478bd9Sstevel@tonic-gate 
1256*7c478bd9Sstevel@tonic-gate 	sp->slab_refcnt++;
1257*7c478bd9Sstevel@tonic-gate 	ASSERT(sp->slab_refcnt <= sp->slab_chunks);
1258*7c478bd9Sstevel@tonic-gate 
1259*7c478bd9Sstevel@tonic-gate 	/*
1260*7c478bd9Sstevel@tonic-gate 	 * If we're taking the last buffer in the slab,
1261*7c478bd9Sstevel@tonic-gate 	 * remove the slab from the cache's freelist.
1262*7c478bd9Sstevel@tonic-gate 	 */
1263*7c478bd9Sstevel@tonic-gate 	bcp = sp->slab_head;
1264*7c478bd9Sstevel@tonic-gate 	if ((sp->slab_head = bcp->bc_next) == NULL) {
1265*7c478bd9Sstevel@tonic-gate 		cp->cache_freelist = sp->slab_next;
1266*7c478bd9Sstevel@tonic-gate 		ASSERT(sp->slab_refcnt == sp->slab_chunks);
1267*7c478bd9Sstevel@tonic-gate 	}
1268*7c478bd9Sstevel@tonic-gate 
1269*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_HASH) {
1270*7c478bd9Sstevel@tonic-gate 		/*
1271*7c478bd9Sstevel@tonic-gate 		 * Add buffer to allocated-address hash table.
1272*7c478bd9Sstevel@tonic-gate 		 */
1273*7c478bd9Sstevel@tonic-gate 		buf = bcp->bc_addr;
1274*7c478bd9Sstevel@tonic-gate 		hash_bucket = UMEM_HASH(cp, buf);
1275*7c478bd9Sstevel@tonic-gate 		bcp->bc_next = *hash_bucket;
1276*7c478bd9Sstevel@tonic-gate 		*hash_bucket = bcp;
1277*7c478bd9Sstevel@tonic-gate 		if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1278*7c478bd9Sstevel@tonic-gate 			UMEM_AUDIT(umem_transaction_log, cp, bcp);
1279*7c478bd9Sstevel@tonic-gate 		}
1280*7c478bd9Sstevel@tonic-gate 	} else {
1281*7c478bd9Sstevel@tonic-gate 		buf = UMEM_BUF(cp, bcp);
1282*7c478bd9Sstevel@tonic-gate 	}
1283*7c478bd9Sstevel@tonic-gate 
1284*7c478bd9Sstevel@tonic-gate 	ASSERT(UMEM_SLAB_MEMBER(sp, buf));
1285*7c478bd9Sstevel@tonic-gate 
1286*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_lock);
1287*7c478bd9Sstevel@tonic-gate 
1288*7c478bd9Sstevel@tonic-gate 	return (buf);
1289*7c478bd9Sstevel@tonic-gate }
1290*7c478bd9Sstevel@tonic-gate 
1291*7c478bd9Sstevel@tonic-gate /*
1292*7c478bd9Sstevel@tonic-gate  * Free a raw (unconstructed) buffer to cp's slab layer.
1293*7c478bd9Sstevel@tonic-gate  */
1294*7c478bd9Sstevel@tonic-gate static void
1295*7c478bd9Sstevel@tonic-gate umem_slab_free(umem_cache_t *cp, void *buf)
1296*7c478bd9Sstevel@tonic-gate {
1297*7c478bd9Sstevel@tonic-gate 	umem_slab_t *sp;
1298*7c478bd9Sstevel@tonic-gate 	umem_bufctl_t *bcp, **prev_bcpp;
1299*7c478bd9Sstevel@tonic-gate 
1300*7c478bd9Sstevel@tonic-gate 	ASSERT(buf != NULL);
1301*7c478bd9Sstevel@tonic-gate 
1302*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_lock);
1303*7c478bd9Sstevel@tonic-gate 	cp->cache_slab_free++;
1304*7c478bd9Sstevel@tonic-gate 
1305*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_HASH) {
1306*7c478bd9Sstevel@tonic-gate 		/*
1307*7c478bd9Sstevel@tonic-gate 		 * Look up buffer in allocated-address hash table.
1308*7c478bd9Sstevel@tonic-gate 		 */
1309*7c478bd9Sstevel@tonic-gate 		prev_bcpp = UMEM_HASH(cp, buf);
1310*7c478bd9Sstevel@tonic-gate 		while ((bcp = *prev_bcpp) != NULL) {
1311*7c478bd9Sstevel@tonic-gate 			if (bcp->bc_addr == buf) {
1312*7c478bd9Sstevel@tonic-gate 				*prev_bcpp = bcp->bc_next;
1313*7c478bd9Sstevel@tonic-gate 				sp = bcp->bc_slab;
1314*7c478bd9Sstevel@tonic-gate 				break;
1315*7c478bd9Sstevel@tonic-gate 			}
1316*7c478bd9Sstevel@tonic-gate 			cp->cache_lookup_depth++;
1317*7c478bd9Sstevel@tonic-gate 			prev_bcpp = &bcp->bc_next;
1318*7c478bd9Sstevel@tonic-gate 		}
1319*7c478bd9Sstevel@tonic-gate 	} else {
1320*7c478bd9Sstevel@tonic-gate 		bcp = UMEM_BUFCTL(cp, buf);
1321*7c478bd9Sstevel@tonic-gate 		sp = UMEM_SLAB(cp, buf);
1322*7c478bd9Sstevel@tonic-gate 	}
1323*7c478bd9Sstevel@tonic-gate 
1324*7c478bd9Sstevel@tonic-gate 	if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) {
1325*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&cp->cache_lock);
1326*7c478bd9Sstevel@tonic-gate 		umem_error(UMERR_BADADDR, cp, buf);
1327*7c478bd9Sstevel@tonic-gate 		return;
1328*7c478bd9Sstevel@tonic-gate 	}
1329*7c478bd9Sstevel@tonic-gate 
1330*7c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1331*7c478bd9Sstevel@tonic-gate 		if (cp->cache_flags & UMF_CONTENTS)
1332*7c478bd9Sstevel@tonic-gate 			((umem_bufctl_audit_t *)bcp)->bc_contents =
1333*7c478bd9Sstevel@tonic-gate 			    umem_log_enter(umem_content_log, buf,
1334*7c478bd9Sstevel@tonic-gate 			    cp->cache_contents);
1335*7c478bd9Sstevel@tonic-gate 		UMEM_AUDIT(umem_transaction_log, cp, bcp);
1336*7c478bd9Sstevel@tonic-gate 	}
1337*7c478bd9Sstevel@tonic-gate 
1338*7c478bd9Sstevel@tonic-gate 	/*
1339*7c478bd9Sstevel@tonic-gate 	 * If this slab isn't currently on the freelist, put it there.
1340*7c478bd9Sstevel@tonic-gate 	 */
1341*7c478bd9Sstevel@tonic-gate 	if (sp->slab_head == NULL) {
1342*7c478bd9Sstevel@tonic-gate 		ASSERT(sp->slab_refcnt == sp->slab_chunks);
1343*7c478bd9Sstevel@tonic-gate 		ASSERT(cp->cache_freelist != sp);
1344*7c478bd9Sstevel@tonic-gate 		sp->slab_next->slab_prev = sp->slab_prev;
1345*7c478bd9Sstevel@tonic-gate 		sp->slab_prev->slab_next = sp->slab_next;
1346*7c478bd9Sstevel@tonic-gate 		sp->slab_next = cp->cache_freelist;
1347*7c478bd9Sstevel@tonic-gate 		sp->slab_prev = cp->cache_freelist->slab_prev;
1348*7c478bd9Sstevel@tonic-gate 		sp->slab_next->slab_prev = sp;
1349*7c478bd9Sstevel@tonic-gate 		sp->slab_prev->slab_next = sp;
1350*7c478bd9Sstevel@tonic-gate 		cp->cache_freelist = sp;
1351*7c478bd9Sstevel@tonic-gate 	}
1352*7c478bd9Sstevel@tonic-gate 
1353*7c478bd9Sstevel@tonic-gate 	bcp->bc_next = sp->slab_head;
1354*7c478bd9Sstevel@tonic-gate 	sp->slab_head = bcp;
1355*7c478bd9Sstevel@tonic-gate 
1356*7c478bd9Sstevel@tonic-gate 	ASSERT(sp->slab_refcnt >= 1);
1357*7c478bd9Sstevel@tonic-gate 	if (--sp->slab_refcnt == 0) {
1358*7c478bd9Sstevel@tonic-gate 		/*
1359*7c478bd9Sstevel@tonic-gate 		 * There are no outstanding allocations from this slab,
1360*7c478bd9Sstevel@tonic-gate 		 * so we can reclaim the memory.
1361*7c478bd9Sstevel@tonic-gate 		 */
1362*7c478bd9Sstevel@tonic-gate 		sp->slab_next->slab_prev = sp->slab_prev;
1363*7c478bd9Sstevel@tonic-gate 		sp->slab_prev->slab_next = sp->slab_next;
1364*7c478bd9Sstevel@tonic-gate 		if (sp == cp->cache_freelist)
1365*7c478bd9Sstevel@tonic-gate 			cp->cache_freelist = sp->slab_next;
1366*7c478bd9Sstevel@tonic-gate 		cp->cache_slab_destroy++;
1367*7c478bd9Sstevel@tonic-gate 		cp->cache_buftotal -= sp->slab_chunks;
1368*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&cp->cache_lock);
1369*7c478bd9Sstevel@tonic-gate 		umem_slab_destroy(cp, sp);
1370*7c478bd9Sstevel@tonic-gate 		return;
1371*7c478bd9Sstevel@tonic-gate 	}
1372*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_lock);
1373*7c478bd9Sstevel@tonic-gate }
1374*7c478bd9Sstevel@tonic-gate 
1375*7c478bd9Sstevel@tonic-gate static int
1376*7c478bd9Sstevel@tonic-gate umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag)
1377*7c478bd9Sstevel@tonic-gate {
1378*7c478bd9Sstevel@tonic-gate 	umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1379*7c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
1380*7c478bd9Sstevel@tonic-gate 	uint32_t mtbf;
1381*7c478bd9Sstevel@tonic-gate 	int flags_nfatal;
1382*7c478bd9Sstevel@tonic-gate 
1383*7c478bd9Sstevel@tonic-gate 	if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
1384*7c478bd9Sstevel@tonic-gate 		umem_error(UMERR_BADBUFTAG, cp, buf);
1385*7c478bd9Sstevel@tonic-gate 		return (-1);
1386*7c478bd9Sstevel@tonic-gate 	}
1387*7c478bd9Sstevel@tonic-gate 
1388*7c478bd9Sstevel@tonic-gate 	btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC;
1389*7c478bd9Sstevel@tonic-gate 
1390*7c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1391*7c478bd9Sstevel@tonic-gate 		umem_error(UMERR_BADBUFCTL, cp, buf);
1392*7c478bd9Sstevel@tonic-gate 		return (-1);
1393*7c478bd9Sstevel@tonic-gate 	}
1394*7c478bd9Sstevel@tonic-gate 
1395*7c478bd9Sstevel@tonic-gate 	btp->bt_redzone = UMEM_REDZONE_PATTERN;
1396*7c478bd9Sstevel@tonic-gate 
1397*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_DEADBEEF) {
1398*7c478bd9Sstevel@tonic-gate 		if (verify_and_copy_pattern(UMEM_FREE_PATTERN,
1399*7c478bd9Sstevel@tonic-gate 		    UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) {
1400*7c478bd9Sstevel@tonic-gate 			umem_error(UMERR_MODIFIED, cp, buf);
1401*7c478bd9Sstevel@tonic-gate 			return (-1);
1402*7c478bd9Sstevel@tonic-gate 		}
1403*7c478bd9Sstevel@tonic-gate 	}
1404*7c478bd9Sstevel@tonic-gate 
1405*7c478bd9Sstevel@tonic-gate 	if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 &&
1406*7c478bd9Sstevel@tonic-gate 	    gethrtime() % mtbf == 0 &&
1407*7c478bd9Sstevel@tonic-gate 	    (umflag & (UMEM_FATAL_FLAGS)) == 0) {
1408*7c478bd9Sstevel@tonic-gate 		umem_log_event(umem_failure_log, cp, NULL, NULL);
1409*7c478bd9Sstevel@tonic-gate 	} else {
1410*7c478bd9Sstevel@tonic-gate 		mtbf = 0;
1411*7c478bd9Sstevel@tonic-gate 	}
1412*7c478bd9Sstevel@tonic-gate 
1413*7c478bd9Sstevel@tonic-gate 	/*
1414*7c478bd9Sstevel@tonic-gate 	 * We do not pass fatal flags on to the constructor.  This prevents
1415*7c478bd9Sstevel@tonic-gate 	 * leaking buffers in the event of a subordinate constructor failing.
1416*7c478bd9Sstevel@tonic-gate 	 */
1417*7c478bd9Sstevel@tonic-gate 	flags_nfatal = UMEM_DEFAULT;
1418*7c478bd9Sstevel@tonic-gate 	if (mtbf || (cp->cache_constructor != NULL &&
1419*7c478bd9Sstevel@tonic-gate 	    cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) {
1420*7c478bd9Sstevel@tonic-gate 		atomic_add_64(&cp->cache_alloc_fail, 1);
1421*7c478bd9Sstevel@tonic-gate 		btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1422*7c478bd9Sstevel@tonic-gate 		copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1423*7c478bd9Sstevel@tonic-gate 		umem_slab_free(cp, buf);
1424*7c478bd9Sstevel@tonic-gate 		return (-1);
1425*7c478bd9Sstevel@tonic-gate 	}
1426*7c478bd9Sstevel@tonic-gate 
1427*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_AUDIT) {
1428*7c478bd9Sstevel@tonic-gate 		UMEM_AUDIT(umem_transaction_log, cp, bcp);
1429*7c478bd9Sstevel@tonic-gate 	}
1430*7c478bd9Sstevel@tonic-gate 
1431*7c478bd9Sstevel@tonic-gate 	return (0);
1432*7c478bd9Sstevel@tonic-gate }
1433*7c478bd9Sstevel@tonic-gate 
1434*7c478bd9Sstevel@tonic-gate static int
1435*7c478bd9Sstevel@tonic-gate umem_cache_free_debug(umem_cache_t *cp, void *buf)
1436*7c478bd9Sstevel@tonic-gate {
1437*7c478bd9Sstevel@tonic-gate 	umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1438*7c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
1439*7c478bd9Sstevel@tonic-gate 	umem_slab_t *sp;
1440*7c478bd9Sstevel@tonic-gate 
1441*7c478bd9Sstevel@tonic-gate 	if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) {
1442*7c478bd9Sstevel@tonic-gate 		if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
1443*7c478bd9Sstevel@tonic-gate 			umem_error(UMERR_DUPFREE, cp, buf);
1444*7c478bd9Sstevel@tonic-gate 			return (-1);
1445*7c478bd9Sstevel@tonic-gate 		}
1446*7c478bd9Sstevel@tonic-gate 		sp = umem_findslab(cp, buf);
1447*7c478bd9Sstevel@tonic-gate 		if (sp == NULL || sp->slab_cache != cp)
1448*7c478bd9Sstevel@tonic-gate 			umem_error(UMERR_BADADDR, cp, buf);
1449*7c478bd9Sstevel@tonic-gate 		else
1450*7c478bd9Sstevel@tonic-gate 			umem_error(UMERR_REDZONE, cp, buf);
1451*7c478bd9Sstevel@tonic-gate 		return (-1);
1452*7c478bd9Sstevel@tonic-gate 	}
1453*7c478bd9Sstevel@tonic-gate 
1454*7c478bd9Sstevel@tonic-gate 	btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1455*7c478bd9Sstevel@tonic-gate 
1456*7c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1457*7c478bd9Sstevel@tonic-gate 		umem_error(UMERR_BADBUFCTL, cp, buf);
1458*7c478bd9Sstevel@tonic-gate 		return (-1);
1459*7c478bd9Sstevel@tonic-gate 	}
1460*7c478bd9Sstevel@tonic-gate 
1461*7c478bd9Sstevel@tonic-gate 	if (btp->bt_redzone != UMEM_REDZONE_PATTERN) {
1462*7c478bd9Sstevel@tonic-gate 		umem_error(UMERR_REDZONE, cp, buf);
1463*7c478bd9Sstevel@tonic-gate 		return (-1);
1464*7c478bd9Sstevel@tonic-gate 	}
1465*7c478bd9Sstevel@tonic-gate 
1466*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_AUDIT) {
1467*7c478bd9Sstevel@tonic-gate 		if (cp->cache_flags & UMF_CONTENTS)
1468*7c478bd9Sstevel@tonic-gate 			bcp->bc_contents = umem_log_enter(umem_content_log,
1469*7c478bd9Sstevel@tonic-gate 			    buf, cp->cache_contents);
1470*7c478bd9Sstevel@tonic-gate 		UMEM_AUDIT(umem_transaction_log, cp, bcp);
1471*7c478bd9Sstevel@tonic-gate 	}
1472*7c478bd9Sstevel@tonic-gate 
1473*7c478bd9Sstevel@tonic-gate 	if (cp->cache_destructor != NULL)
1474*7c478bd9Sstevel@tonic-gate 		cp->cache_destructor(buf, cp->cache_private);
1475*7c478bd9Sstevel@tonic-gate 
1476*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_DEADBEEF)
1477*7c478bd9Sstevel@tonic-gate 		copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1478*7c478bd9Sstevel@tonic-gate 
1479*7c478bd9Sstevel@tonic-gate 	return (0);
1480*7c478bd9Sstevel@tonic-gate }
1481*7c478bd9Sstevel@tonic-gate 
1482*7c478bd9Sstevel@tonic-gate /*
1483*7c478bd9Sstevel@tonic-gate  * Free each object in magazine mp to cp's slab layer, and free mp itself.
1484*7c478bd9Sstevel@tonic-gate  */
1485*7c478bd9Sstevel@tonic-gate static void
1486*7c478bd9Sstevel@tonic-gate umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds)
1487*7c478bd9Sstevel@tonic-gate {
1488*7c478bd9Sstevel@tonic-gate 	int round;
1489*7c478bd9Sstevel@tonic-gate 
1490*7c478bd9Sstevel@tonic-gate 	ASSERT(cp->cache_next == NULL || IN_UPDATE());
1491*7c478bd9Sstevel@tonic-gate 
1492*7c478bd9Sstevel@tonic-gate 	for (round = 0; round < nrounds; round++) {
1493*7c478bd9Sstevel@tonic-gate 		void *buf = mp->mag_round[round];
1494*7c478bd9Sstevel@tonic-gate 
1495*7c478bd9Sstevel@tonic-gate 		if ((cp->cache_flags & UMF_DEADBEEF) &&
1496*7c478bd9Sstevel@tonic-gate 		    verify_pattern(UMEM_FREE_PATTERN, buf,
1497*7c478bd9Sstevel@tonic-gate 		    cp->cache_verify) != NULL) {
1498*7c478bd9Sstevel@tonic-gate 			umem_error(UMERR_MODIFIED, cp, buf);
1499*7c478bd9Sstevel@tonic-gate 			continue;
1500*7c478bd9Sstevel@tonic-gate 		}
1501*7c478bd9Sstevel@tonic-gate 
1502*7c478bd9Sstevel@tonic-gate 		if (!(cp->cache_flags & UMF_BUFTAG) &&
1503*7c478bd9Sstevel@tonic-gate 		    cp->cache_destructor != NULL)
1504*7c478bd9Sstevel@tonic-gate 			cp->cache_destructor(buf, cp->cache_private);
1505*7c478bd9Sstevel@tonic-gate 
1506*7c478bd9Sstevel@tonic-gate 		umem_slab_free(cp, buf);
1507*7c478bd9Sstevel@tonic-gate 	}
1508*7c478bd9Sstevel@tonic-gate 	ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1509*7c478bd9Sstevel@tonic-gate 	_umem_cache_free(cp->cache_magtype->mt_cache, mp);
1510*7c478bd9Sstevel@tonic-gate }
1511*7c478bd9Sstevel@tonic-gate 
1512*7c478bd9Sstevel@tonic-gate /*
1513*7c478bd9Sstevel@tonic-gate  * Allocate a magazine from the depot.
1514*7c478bd9Sstevel@tonic-gate  */
1515*7c478bd9Sstevel@tonic-gate static umem_magazine_t *
1516*7c478bd9Sstevel@tonic-gate umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp)
1517*7c478bd9Sstevel@tonic-gate {
1518*7c478bd9Sstevel@tonic-gate 	umem_magazine_t *mp;
1519*7c478bd9Sstevel@tonic-gate 
1520*7c478bd9Sstevel@tonic-gate 	/*
1521*7c478bd9Sstevel@tonic-gate 	 * If we can't get the depot lock without contention,
1522*7c478bd9Sstevel@tonic-gate 	 * update our contention count.  We use the depot
1523*7c478bd9Sstevel@tonic-gate 	 * contention rate to determine whether we need to
1524*7c478bd9Sstevel@tonic-gate 	 * increase the magazine size for better scalability.
1525*7c478bd9Sstevel@tonic-gate 	 */
1526*7c478bd9Sstevel@tonic-gate 	if (mutex_trylock(&cp->cache_depot_lock) != 0) {
1527*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&cp->cache_depot_lock);
1528*7c478bd9Sstevel@tonic-gate 		cp->cache_depot_contention++;
1529*7c478bd9Sstevel@tonic-gate 	}
1530*7c478bd9Sstevel@tonic-gate 
1531*7c478bd9Sstevel@tonic-gate 	if ((mp = mlp->ml_list) != NULL) {
1532*7c478bd9Sstevel@tonic-gate 		ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1533*7c478bd9Sstevel@tonic-gate 		mlp->ml_list = mp->mag_next;
1534*7c478bd9Sstevel@tonic-gate 		if (--mlp->ml_total < mlp->ml_min)
1535*7c478bd9Sstevel@tonic-gate 			mlp->ml_min = mlp->ml_total;
1536*7c478bd9Sstevel@tonic-gate 		mlp->ml_alloc++;
1537*7c478bd9Sstevel@tonic-gate 	}
1538*7c478bd9Sstevel@tonic-gate 
1539*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_depot_lock);
1540*7c478bd9Sstevel@tonic-gate 
1541*7c478bd9Sstevel@tonic-gate 	return (mp);
1542*7c478bd9Sstevel@tonic-gate }
1543*7c478bd9Sstevel@tonic-gate 
1544*7c478bd9Sstevel@tonic-gate /*
1545*7c478bd9Sstevel@tonic-gate  * Free a magazine to the depot.
1546*7c478bd9Sstevel@tonic-gate  */
1547*7c478bd9Sstevel@tonic-gate static void
1548*7c478bd9Sstevel@tonic-gate umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp)
1549*7c478bd9Sstevel@tonic-gate {
1550*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_depot_lock);
1551*7c478bd9Sstevel@tonic-gate 	ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1552*7c478bd9Sstevel@tonic-gate 	mp->mag_next = mlp->ml_list;
1553*7c478bd9Sstevel@tonic-gate 	mlp->ml_list = mp;
1554*7c478bd9Sstevel@tonic-gate 	mlp->ml_total++;
1555*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_depot_lock);
1556*7c478bd9Sstevel@tonic-gate }
1557*7c478bd9Sstevel@tonic-gate 
1558*7c478bd9Sstevel@tonic-gate /*
1559*7c478bd9Sstevel@tonic-gate  * Update the working set statistics for cp's depot.
1560*7c478bd9Sstevel@tonic-gate  */
1561*7c478bd9Sstevel@tonic-gate static void
1562*7c478bd9Sstevel@tonic-gate umem_depot_ws_update(umem_cache_t *cp)
1563*7c478bd9Sstevel@tonic-gate {
1564*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_depot_lock);
1565*7c478bd9Sstevel@tonic-gate 	cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
1566*7c478bd9Sstevel@tonic-gate 	cp->cache_full.ml_min = cp->cache_full.ml_total;
1567*7c478bd9Sstevel@tonic-gate 	cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
1568*7c478bd9Sstevel@tonic-gate 	cp->cache_empty.ml_min = cp->cache_empty.ml_total;
1569*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_depot_lock);
1570*7c478bd9Sstevel@tonic-gate }
1571*7c478bd9Sstevel@tonic-gate 
1572*7c478bd9Sstevel@tonic-gate /*
1573*7c478bd9Sstevel@tonic-gate  * Reap all magazines that have fallen out of the depot's working set.
1574*7c478bd9Sstevel@tonic-gate  */
1575*7c478bd9Sstevel@tonic-gate static void
1576*7c478bd9Sstevel@tonic-gate umem_depot_ws_reap(umem_cache_t *cp)
1577*7c478bd9Sstevel@tonic-gate {
1578*7c478bd9Sstevel@tonic-gate 	long reap;
1579*7c478bd9Sstevel@tonic-gate 	umem_magazine_t *mp;
1580*7c478bd9Sstevel@tonic-gate 
1581*7c478bd9Sstevel@tonic-gate 	ASSERT(cp->cache_next == NULL || IN_REAP());
1582*7c478bd9Sstevel@tonic-gate 
1583*7c478bd9Sstevel@tonic-gate 	reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
1584*7c478bd9Sstevel@tonic-gate 	while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL)
1585*7c478bd9Sstevel@tonic-gate 		umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
1586*7c478bd9Sstevel@tonic-gate 
1587*7c478bd9Sstevel@tonic-gate 	reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
1588*7c478bd9Sstevel@tonic-gate 	while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL)
1589*7c478bd9Sstevel@tonic-gate 		umem_magazine_destroy(cp, mp, 0);
1590*7c478bd9Sstevel@tonic-gate }
1591*7c478bd9Sstevel@tonic-gate 
1592*7c478bd9Sstevel@tonic-gate static void
1593*7c478bd9Sstevel@tonic-gate umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds)
1594*7c478bd9Sstevel@tonic-gate {
1595*7c478bd9Sstevel@tonic-gate 	ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
1596*7c478bd9Sstevel@tonic-gate 	    (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
1597*7c478bd9Sstevel@tonic-gate 	ASSERT(ccp->cc_magsize > 0);
1598*7c478bd9Sstevel@tonic-gate 
1599*7c478bd9Sstevel@tonic-gate 	ccp->cc_ploaded = ccp->cc_loaded;
1600*7c478bd9Sstevel@tonic-gate 	ccp->cc_prounds = ccp->cc_rounds;
1601*7c478bd9Sstevel@tonic-gate 	ccp->cc_loaded = mp;
1602*7c478bd9Sstevel@tonic-gate 	ccp->cc_rounds = rounds;
1603*7c478bd9Sstevel@tonic-gate }
1604*7c478bd9Sstevel@tonic-gate 
1605*7c478bd9Sstevel@tonic-gate /*
1606*7c478bd9Sstevel@tonic-gate  * Allocate a constructed object from cache cp.
1607*7c478bd9Sstevel@tonic-gate  */
1608*7c478bd9Sstevel@tonic-gate #pragma weak umem_cache_alloc = _umem_cache_alloc
1609*7c478bd9Sstevel@tonic-gate void *
1610*7c478bd9Sstevel@tonic-gate _umem_cache_alloc(umem_cache_t *cp, int umflag)
1611*7c478bd9Sstevel@tonic-gate {
1612*7c478bd9Sstevel@tonic-gate 	umem_cpu_cache_t *ccp;
1613*7c478bd9Sstevel@tonic-gate 	umem_magazine_t *fmp;
1614*7c478bd9Sstevel@tonic-gate 	void *buf;
1615*7c478bd9Sstevel@tonic-gate 	int flags_nfatal;
1616*7c478bd9Sstevel@tonic-gate 
1617*7c478bd9Sstevel@tonic-gate retry:
1618*7c478bd9Sstevel@tonic-gate 	ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
1619*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&ccp->cc_lock);
1620*7c478bd9Sstevel@tonic-gate 	for (;;) {
1621*7c478bd9Sstevel@tonic-gate 		/*
1622*7c478bd9Sstevel@tonic-gate 		 * If there's an object available in the current CPU's
1623*7c478bd9Sstevel@tonic-gate 		 * loaded magazine, just take it and return.
1624*7c478bd9Sstevel@tonic-gate 		 */
1625*7c478bd9Sstevel@tonic-gate 		if (ccp->cc_rounds > 0) {
1626*7c478bd9Sstevel@tonic-gate 			buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
1627*7c478bd9Sstevel@tonic-gate 			ccp->cc_alloc++;
1628*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&ccp->cc_lock);
1629*7c478bd9Sstevel@tonic-gate 			if ((ccp->cc_flags & UMF_BUFTAG) &&
1630*7c478bd9Sstevel@tonic-gate 			    umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1631*7c478bd9Sstevel@tonic-gate 				if (umem_alloc_retry(cp, umflag)) {
1632*7c478bd9Sstevel@tonic-gate 					goto retry;
1633*7c478bd9Sstevel@tonic-gate 				}
1634*7c478bd9Sstevel@tonic-gate 
1635*7c478bd9Sstevel@tonic-gate 				return (NULL);
1636*7c478bd9Sstevel@tonic-gate 			}
1637*7c478bd9Sstevel@tonic-gate 			return (buf);
1638*7c478bd9Sstevel@tonic-gate 		}
1639*7c478bd9Sstevel@tonic-gate 
1640*7c478bd9Sstevel@tonic-gate 		/*
1641*7c478bd9Sstevel@tonic-gate 		 * The loaded magazine is empty.  If the previously loaded
1642*7c478bd9Sstevel@tonic-gate 		 * magazine was full, exchange them and try again.
1643*7c478bd9Sstevel@tonic-gate 		 */
1644*7c478bd9Sstevel@tonic-gate 		if (ccp->cc_prounds > 0) {
1645*7c478bd9Sstevel@tonic-gate 			umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
1646*7c478bd9Sstevel@tonic-gate 			continue;
1647*7c478bd9Sstevel@tonic-gate 		}
1648*7c478bd9Sstevel@tonic-gate 
1649*7c478bd9Sstevel@tonic-gate 		/*
1650*7c478bd9Sstevel@tonic-gate 		 * If the magazine layer is disabled, break out now.
1651*7c478bd9Sstevel@tonic-gate 		 */
1652*7c478bd9Sstevel@tonic-gate 		if (ccp->cc_magsize == 0)
1653*7c478bd9Sstevel@tonic-gate 			break;
1654*7c478bd9Sstevel@tonic-gate 
1655*7c478bd9Sstevel@tonic-gate 		/*
1656*7c478bd9Sstevel@tonic-gate 		 * Try to get a full magazine from the depot.
1657*7c478bd9Sstevel@tonic-gate 		 */
1658*7c478bd9Sstevel@tonic-gate 		fmp = umem_depot_alloc(cp, &cp->cache_full);
1659*7c478bd9Sstevel@tonic-gate 		if (fmp != NULL) {
1660*7c478bd9Sstevel@tonic-gate 			if (ccp->cc_ploaded != NULL)
1661*7c478bd9Sstevel@tonic-gate 				umem_depot_free(cp, &cp->cache_empty,
1662*7c478bd9Sstevel@tonic-gate 				    ccp->cc_ploaded);
1663*7c478bd9Sstevel@tonic-gate 			umem_cpu_reload(ccp, fmp, ccp->cc_magsize);
1664*7c478bd9Sstevel@tonic-gate 			continue;
1665*7c478bd9Sstevel@tonic-gate 		}
1666*7c478bd9Sstevel@tonic-gate 
1667*7c478bd9Sstevel@tonic-gate 		/*
1668*7c478bd9Sstevel@tonic-gate 		 * There are no full magazines in the depot,
1669*7c478bd9Sstevel@tonic-gate 		 * so fall through to the slab layer.
1670*7c478bd9Sstevel@tonic-gate 		 */
1671*7c478bd9Sstevel@tonic-gate 		break;
1672*7c478bd9Sstevel@tonic-gate 	}
1673*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&ccp->cc_lock);
1674*7c478bd9Sstevel@tonic-gate 
1675*7c478bd9Sstevel@tonic-gate 	/*
1676*7c478bd9Sstevel@tonic-gate 	 * We couldn't allocate a constructed object from the magazine layer,
1677*7c478bd9Sstevel@tonic-gate 	 * so get a raw buffer from the slab layer and apply its constructor.
1678*7c478bd9Sstevel@tonic-gate 	 */
1679*7c478bd9Sstevel@tonic-gate 	buf = umem_slab_alloc(cp, umflag);
1680*7c478bd9Sstevel@tonic-gate 
1681*7c478bd9Sstevel@tonic-gate 	if (buf == NULL) {
1682*7c478bd9Sstevel@tonic-gate 		if (cp == &umem_null_cache)
1683*7c478bd9Sstevel@tonic-gate 			return (NULL);
1684*7c478bd9Sstevel@tonic-gate 		if (umem_alloc_retry(cp, umflag)) {
1685*7c478bd9Sstevel@tonic-gate 			goto retry;
1686*7c478bd9Sstevel@tonic-gate 		}
1687*7c478bd9Sstevel@tonic-gate 
1688*7c478bd9Sstevel@tonic-gate 		return (NULL);
1689*7c478bd9Sstevel@tonic-gate 	}
1690*7c478bd9Sstevel@tonic-gate 
1691*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_BUFTAG) {
1692*7c478bd9Sstevel@tonic-gate 		/*
1693*7c478bd9Sstevel@tonic-gate 		 * Let umem_cache_alloc_debug() apply the constructor for us.
1694*7c478bd9Sstevel@tonic-gate 		 */
1695*7c478bd9Sstevel@tonic-gate 		if (umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1696*7c478bd9Sstevel@tonic-gate 			if (umem_alloc_retry(cp, umflag)) {
1697*7c478bd9Sstevel@tonic-gate 				goto retry;
1698*7c478bd9Sstevel@tonic-gate 			}
1699*7c478bd9Sstevel@tonic-gate 			return (NULL);
1700*7c478bd9Sstevel@tonic-gate 		}
1701*7c478bd9Sstevel@tonic-gate 		return (buf);
1702*7c478bd9Sstevel@tonic-gate 	}
1703*7c478bd9Sstevel@tonic-gate 
1704*7c478bd9Sstevel@tonic-gate 	/*
1705*7c478bd9Sstevel@tonic-gate 	 * We do not pass fatal flags on to the constructor.  This prevents
1706*7c478bd9Sstevel@tonic-gate 	 * leaking buffers in the event of a subordinate constructor failing.
1707*7c478bd9Sstevel@tonic-gate 	 */
1708*7c478bd9Sstevel@tonic-gate 	flags_nfatal = UMEM_DEFAULT;
1709*7c478bd9Sstevel@tonic-gate 	if (cp->cache_constructor != NULL &&
1710*7c478bd9Sstevel@tonic-gate 	    cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) {
1711*7c478bd9Sstevel@tonic-gate 		atomic_add_64(&cp->cache_alloc_fail, 1);
1712*7c478bd9Sstevel@tonic-gate 		umem_slab_free(cp, buf);
1713*7c478bd9Sstevel@tonic-gate 
1714*7c478bd9Sstevel@tonic-gate 		if (umem_alloc_retry(cp, umflag)) {
1715*7c478bd9Sstevel@tonic-gate 			goto retry;
1716*7c478bd9Sstevel@tonic-gate 		}
1717*7c478bd9Sstevel@tonic-gate 		return (NULL);
1718*7c478bd9Sstevel@tonic-gate 	}
1719*7c478bd9Sstevel@tonic-gate 
1720*7c478bd9Sstevel@tonic-gate 	return (buf);
1721*7c478bd9Sstevel@tonic-gate }
1722*7c478bd9Sstevel@tonic-gate 
1723*7c478bd9Sstevel@tonic-gate /*
1724*7c478bd9Sstevel@tonic-gate  * Free a constructed object to cache cp.
1725*7c478bd9Sstevel@tonic-gate  */
1726*7c478bd9Sstevel@tonic-gate #pragma weak umem_cache_free = _umem_cache_free
1727*7c478bd9Sstevel@tonic-gate void
1728*7c478bd9Sstevel@tonic-gate _umem_cache_free(umem_cache_t *cp, void *buf)
1729*7c478bd9Sstevel@tonic-gate {
1730*7c478bd9Sstevel@tonic-gate 	umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
1731*7c478bd9Sstevel@tonic-gate 	umem_magazine_t *emp;
1732*7c478bd9Sstevel@tonic-gate 	umem_magtype_t *mtp;
1733*7c478bd9Sstevel@tonic-gate 
1734*7c478bd9Sstevel@tonic-gate 	if (ccp->cc_flags & UMF_BUFTAG)
1735*7c478bd9Sstevel@tonic-gate 		if (umem_cache_free_debug(cp, buf) == -1)
1736*7c478bd9Sstevel@tonic-gate 			return;
1737*7c478bd9Sstevel@tonic-gate 
1738*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&ccp->cc_lock);
1739*7c478bd9Sstevel@tonic-gate 	for (;;) {
1740*7c478bd9Sstevel@tonic-gate 		/*
1741*7c478bd9Sstevel@tonic-gate 		 * If there's a slot available in the current CPU's
1742*7c478bd9Sstevel@tonic-gate 		 * loaded magazine, just put the object there and return.
1743*7c478bd9Sstevel@tonic-gate 		 */
1744*7c478bd9Sstevel@tonic-gate 		if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
1745*7c478bd9Sstevel@tonic-gate 			ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
1746*7c478bd9Sstevel@tonic-gate 			ccp->cc_free++;
1747*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&ccp->cc_lock);
1748*7c478bd9Sstevel@tonic-gate 			return;
1749*7c478bd9Sstevel@tonic-gate 		}
1750*7c478bd9Sstevel@tonic-gate 
1751*7c478bd9Sstevel@tonic-gate 		/*
1752*7c478bd9Sstevel@tonic-gate 		 * The loaded magazine is full.  If the previously loaded
1753*7c478bd9Sstevel@tonic-gate 		 * magazine was empty, exchange them and try again.
1754*7c478bd9Sstevel@tonic-gate 		 */
1755*7c478bd9Sstevel@tonic-gate 		if (ccp->cc_prounds == 0) {
1756*7c478bd9Sstevel@tonic-gate 			umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
1757*7c478bd9Sstevel@tonic-gate 			continue;
1758*7c478bd9Sstevel@tonic-gate 		}
1759*7c478bd9Sstevel@tonic-gate 
1760*7c478bd9Sstevel@tonic-gate 		/*
1761*7c478bd9Sstevel@tonic-gate 		 * If the magazine layer is disabled, break out now.
1762*7c478bd9Sstevel@tonic-gate 		 */
1763*7c478bd9Sstevel@tonic-gate 		if (ccp->cc_magsize == 0)
1764*7c478bd9Sstevel@tonic-gate 			break;
1765*7c478bd9Sstevel@tonic-gate 
1766*7c478bd9Sstevel@tonic-gate 		/*
1767*7c478bd9Sstevel@tonic-gate 		 * Try to get an empty magazine from the depot.
1768*7c478bd9Sstevel@tonic-gate 		 */
1769*7c478bd9Sstevel@tonic-gate 		emp = umem_depot_alloc(cp, &cp->cache_empty);
1770*7c478bd9Sstevel@tonic-gate 		if (emp != NULL) {
1771*7c478bd9Sstevel@tonic-gate 			if (ccp->cc_ploaded != NULL)
1772*7c478bd9Sstevel@tonic-gate 				umem_depot_free(cp, &cp->cache_full,
1773*7c478bd9Sstevel@tonic-gate 				    ccp->cc_ploaded);
1774*7c478bd9Sstevel@tonic-gate 			umem_cpu_reload(ccp, emp, 0);
1775*7c478bd9Sstevel@tonic-gate 			continue;
1776*7c478bd9Sstevel@tonic-gate 		}
1777*7c478bd9Sstevel@tonic-gate 
1778*7c478bd9Sstevel@tonic-gate 		/*
1779*7c478bd9Sstevel@tonic-gate 		 * There are no empty magazines in the depot,
1780*7c478bd9Sstevel@tonic-gate 		 * so try to allocate a new one.  We must drop all locks
1781*7c478bd9Sstevel@tonic-gate 		 * across umem_cache_alloc() because lower layers may
1782*7c478bd9Sstevel@tonic-gate 		 * attempt to allocate from this cache.
1783*7c478bd9Sstevel@tonic-gate 		 */
1784*7c478bd9Sstevel@tonic-gate 		mtp = cp->cache_magtype;
1785*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&ccp->cc_lock);
1786*7c478bd9Sstevel@tonic-gate 		emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT);
1787*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&ccp->cc_lock);
1788*7c478bd9Sstevel@tonic-gate 
1789*7c478bd9Sstevel@tonic-gate 		if (emp != NULL) {
1790*7c478bd9Sstevel@tonic-gate 			/*
1791*7c478bd9Sstevel@tonic-gate 			 * We successfully allocated an empty magazine.
1792*7c478bd9Sstevel@tonic-gate 			 * However, we had to drop ccp->cc_lock to do it,
1793*7c478bd9Sstevel@tonic-gate 			 * so the cache's magazine size may have changed.
1794*7c478bd9Sstevel@tonic-gate 			 * If so, free the magazine and try again.
1795*7c478bd9Sstevel@tonic-gate 			 */
1796*7c478bd9Sstevel@tonic-gate 			if (ccp->cc_magsize != mtp->mt_magsize) {
1797*7c478bd9Sstevel@tonic-gate 				(void) mutex_unlock(&ccp->cc_lock);
1798*7c478bd9Sstevel@tonic-gate 				_umem_cache_free(mtp->mt_cache, emp);
1799*7c478bd9Sstevel@tonic-gate 				(void) mutex_lock(&ccp->cc_lock);
1800*7c478bd9Sstevel@tonic-gate 				continue;
1801*7c478bd9Sstevel@tonic-gate 			}
1802*7c478bd9Sstevel@tonic-gate 
1803*7c478bd9Sstevel@tonic-gate 			/*
1804*7c478bd9Sstevel@tonic-gate 			 * We got a magazine of the right size.  Add it to
1805*7c478bd9Sstevel@tonic-gate 			 * the depot and try the whole dance again.
1806*7c478bd9Sstevel@tonic-gate 			 */
1807*7c478bd9Sstevel@tonic-gate 			umem_depot_free(cp, &cp->cache_empty, emp);
1808*7c478bd9Sstevel@tonic-gate 			continue;
1809*7c478bd9Sstevel@tonic-gate 		}
1810*7c478bd9Sstevel@tonic-gate 
1811*7c478bd9Sstevel@tonic-gate 		/*
1812*7c478bd9Sstevel@tonic-gate 		 * We couldn't allocate an empty magazine,
1813*7c478bd9Sstevel@tonic-gate 		 * so fall through to the slab layer.
1814*7c478bd9Sstevel@tonic-gate 		 */
1815*7c478bd9Sstevel@tonic-gate 		break;
1816*7c478bd9Sstevel@tonic-gate 	}
1817*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&ccp->cc_lock);
1818*7c478bd9Sstevel@tonic-gate 
1819*7c478bd9Sstevel@tonic-gate 	/*
1820*7c478bd9Sstevel@tonic-gate 	 * We couldn't free our constructed object to the magazine layer,
1821*7c478bd9Sstevel@tonic-gate 	 * so apply its destructor and free it to the slab layer.
1822*7c478bd9Sstevel@tonic-gate 	 * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug()
1823*7c478bd9Sstevel@tonic-gate 	 * will have already applied the destructor.
1824*7c478bd9Sstevel@tonic-gate 	 */
1825*7c478bd9Sstevel@tonic-gate 	if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL)
1826*7c478bd9Sstevel@tonic-gate 		cp->cache_destructor(buf, cp->cache_private);
1827*7c478bd9Sstevel@tonic-gate 
1828*7c478bd9Sstevel@tonic-gate 	umem_slab_free(cp, buf);
1829*7c478bd9Sstevel@tonic-gate }
1830*7c478bd9Sstevel@tonic-gate 
1831*7c478bd9Sstevel@tonic-gate #pragma weak umem_zalloc = _umem_zalloc
1832*7c478bd9Sstevel@tonic-gate void *
1833*7c478bd9Sstevel@tonic-gate _umem_zalloc(size_t size, int umflag)
1834*7c478bd9Sstevel@tonic-gate {
1835*7c478bd9Sstevel@tonic-gate 	size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
1836*7c478bd9Sstevel@tonic-gate 	void *buf;
1837*7c478bd9Sstevel@tonic-gate 
1838*7c478bd9Sstevel@tonic-gate retry:
1839*7c478bd9Sstevel@tonic-gate 	if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
1840*7c478bd9Sstevel@tonic-gate 		umem_cache_t *cp = umem_alloc_table[index];
1841*7c478bd9Sstevel@tonic-gate 		buf = _umem_cache_alloc(cp, umflag);
1842*7c478bd9Sstevel@tonic-gate 		if (buf != NULL) {
1843*7c478bd9Sstevel@tonic-gate 			if (cp->cache_flags & UMF_BUFTAG) {
1844*7c478bd9Sstevel@tonic-gate 				umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1845*7c478bd9Sstevel@tonic-gate 				((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
1846*7c478bd9Sstevel@tonic-gate 				((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
1847*7c478bd9Sstevel@tonic-gate 			}
1848*7c478bd9Sstevel@tonic-gate 			bzero(buf, size);
1849*7c478bd9Sstevel@tonic-gate 		} else if (umem_alloc_retry(cp, umflag))
1850*7c478bd9Sstevel@tonic-gate 			goto retry;
1851*7c478bd9Sstevel@tonic-gate 	} else {
1852*7c478bd9Sstevel@tonic-gate 		buf = _umem_alloc(size, umflag);	/* handles failure */
1853*7c478bd9Sstevel@tonic-gate 		if (buf != NULL)
1854*7c478bd9Sstevel@tonic-gate 			bzero(buf, size);
1855*7c478bd9Sstevel@tonic-gate 	}
1856*7c478bd9Sstevel@tonic-gate 	return (buf);
1857*7c478bd9Sstevel@tonic-gate }
1858*7c478bd9Sstevel@tonic-gate 
1859*7c478bd9Sstevel@tonic-gate #pragma weak umem_alloc = _umem_alloc
1860*7c478bd9Sstevel@tonic-gate void *
1861*7c478bd9Sstevel@tonic-gate _umem_alloc(size_t size, int umflag)
1862*7c478bd9Sstevel@tonic-gate {
1863*7c478bd9Sstevel@tonic-gate 	size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
1864*7c478bd9Sstevel@tonic-gate 	void *buf;
1865*7c478bd9Sstevel@tonic-gate umem_alloc_retry:
1866*7c478bd9Sstevel@tonic-gate 	if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
1867*7c478bd9Sstevel@tonic-gate 		umem_cache_t *cp = umem_alloc_table[index];
1868*7c478bd9Sstevel@tonic-gate 		buf = _umem_cache_alloc(cp, umflag);
1869*7c478bd9Sstevel@tonic-gate 		if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) {
1870*7c478bd9Sstevel@tonic-gate 			umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1871*7c478bd9Sstevel@tonic-gate 			((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
1872*7c478bd9Sstevel@tonic-gate 			((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
1873*7c478bd9Sstevel@tonic-gate 		}
1874*7c478bd9Sstevel@tonic-gate 		if (buf == NULL && umem_alloc_retry(cp, umflag))
1875*7c478bd9Sstevel@tonic-gate 			goto umem_alloc_retry;
1876*7c478bd9Sstevel@tonic-gate 		return (buf);
1877*7c478bd9Sstevel@tonic-gate 	}
1878*7c478bd9Sstevel@tonic-gate 	if (size == 0)
1879*7c478bd9Sstevel@tonic-gate 		return (NULL);
1880*7c478bd9Sstevel@tonic-gate 	if (umem_oversize_arena == NULL) {
1881*7c478bd9Sstevel@tonic-gate 		if (umem_init())
1882*7c478bd9Sstevel@tonic-gate 			ASSERT(umem_oversize_arena != NULL);
1883*7c478bd9Sstevel@tonic-gate 		else
1884*7c478bd9Sstevel@tonic-gate 			return (NULL);
1885*7c478bd9Sstevel@tonic-gate 	}
1886*7c478bd9Sstevel@tonic-gate 	buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag));
1887*7c478bd9Sstevel@tonic-gate 	if (buf == NULL) {
1888*7c478bd9Sstevel@tonic-gate 		umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
1889*7c478bd9Sstevel@tonic-gate 		if (umem_alloc_retry(NULL, umflag))
1890*7c478bd9Sstevel@tonic-gate 			goto umem_alloc_retry;
1891*7c478bd9Sstevel@tonic-gate 	}
1892*7c478bd9Sstevel@tonic-gate 	return (buf);
1893*7c478bd9Sstevel@tonic-gate }
1894*7c478bd9Sstevel@tonic-gate 
1895*7c478bd9Sstevel@tonic-gate #pragma weak umem_alloc_align = _umem_alloc_align
1896*7c478bd9Sstevel@tonic-gate void *
1897*7c478bd9Sstevel@tonic-gate _umem_alloc_align(size_t size, size_t align, int umflag)
1898*7c478bd9Sstevel@tonic-gate {
1899*7c478bd9Sstevel@tonic-gate 	void *buf;
1900*7c478bd9Sstevel@tonic-gate 
1901*7c478bd9Sstevel@tonic-gate 	if (size == 0)
1902*7c478bd9Sstevel@tonic-gate 		return (NULL);
1903*7c478bd9Sstevel@tonic-gate 	if ((align & (align - 1)) != 0)
1904*7c478bd9Sstevel@tonic-gate 		return (NULL);
1905*7c478bd9Sstevel@tonic-gate 	if (align < UMEM_ALIGN)
1906*7c478bd9Sstevel@tonic-gate 		align = UMEM_ALIGN;
1907*7c478bd9Sstevel@tonic-gate 
1908*7c478bd9Sstevel@tonic-gate umem_alloc_align_retry:
1909*7c478bd9Sstevel@tonic-gate 	if (umem_memalign_arena == NULL) {
1910*7c478bd9Sstevel@tonic-gate 		if (umem_init())
1911*7c478bd9Sstevel@tonic-gate 			ASSERT(umem_oversize_arena != NULL);
1912*7c478bd9Sstevel@tonic-gate 		else
1913*7c478bd9Sstevel@tonic-gate 			return (NULL);
1914*7c478bd9Sstevel@tonic-gate 	}
1915*7c478bd9Sstevel@tonic-gate 	buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL,
1916*7c478bd9Sstevel@tonic-gate 	    UMEM_VMFLAGS(umflag));
1917*7c478bd9Sstevel@tonic-gate 	if (buf == NULL) {
1918*7c478bd9Sstevel@tonic-gate 		umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
1919*7c478bd9Sstevel@tonic-gate 		if (umem_alloc_retry(NULL, umflag))
1920*7c478bd9Sstevel@tonic-gate 			goto umem_alloc_align_retry;
1921*7c478bd9Sstevel@tonic-gate 	}
1922*7c478bd9Sstevel@tonic-gate 	return (buf);
1923*7c478bd9Sstevel@tonic-gate }
1924*7c478bd9Sstevel@tonic-gate 
1925*7c478bd9Sstevel@tonic-gate #pragma weak umem_free = _umem_free
1926*7c478bd9Sstevel@tonic-gate void
1927*7c478bd9Sstevel@tonic-gate _umem_free(void *buf, size_t size)
1928*7c478bd9Sstevel@tonic-gate {
1929*7c478bd9Sstevel@tonic-gate 	size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
1930*7c478bd9Sstevel@tonic-gate 
1931*7c478bd9Sstevel@tonic-gate 	if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
1932*7c478bd9Sstevel@tonic-gate 		umem_cache_t *cp = umem_alloc_table[index];
1933*7c478bd9Sstevel@tonic-gate 		if (cp->cache_flags & UMF_BUFTAG) {
1934*7c478bd9Sstevel@tonic-gate 			umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1935*7c478bd9Sstevel@tonic-gate 			uint32_t *ip = (uint32_t *)btp;
1936*7c478bd9Sstevel@tonic-gate 			if (ip[1] != UMEM_SIZE_ENCODE(size)) {
1937*7c478bd9Sstevel@tonic-gate 				if (*(uint64_t *)buf == UMEM_FREE_PATTERN) {
1938*7c478bd9Sstevel@tonic-gate 					umem_error(UMERR_DUPFREE, cp, buf);
1939*7c478bd9Sstevel@tonic-gate 					return;
1940*7c478bd9Sstevel@tonic-gate 				}
1941*7c478bd9Sstevel@tonic-gate 				if (UMEM_SIZE_VALID(ip[1])) {
1942*7c478bd9Sstevel@tonic-gate 					ip[0] = UMEM_SIZE_ENCODE(size);
1943*7c478bd9Sstevel@tonic-gate 					umem_error(UMERR_BADSIZE, cp, buf);
1944*7c478bd9Sstevel@tonic-gate 				} else {
1945*7c478bd9Sstevel@tonic-gate 					umem_error(UMERR_REDZONE, cp, buf);
1946*7c478bd9Sstevel@tonic-gate 				}
1947*7c478bd9Sstevel@tonic-gate 				return;
1948*7c478bd9Sstevel@tonic-gate 			}
1949*7c478bd9Sstevel@tonic-gate 			if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) {
1950*7c478bd9Sstevel@tonic-gate 				umem_error(UMERR_REDZONE, cp, buf);
1951*7c478bd9Sstevel@tonic-gate 				return;
1952*7c478bd9Sstevel@tonic-gate 			}
1953*7c478bd9Sstevel@tonic-gate 			btp->bt_redzone = UMEM_REDZONE_PATTERN;
1954*7c478bd9Sstevel@tonic-gate 		}
1955*7c478bd9Sstevel@tonic-gate 		_umem_cache_free(cp, buf);
1956*7c478bd9Sstevel@tonic-gate 	} else {
1957*7c478bd9Sstevel@tonic-gate 		if (buf == NULL && size == 0)
1958*7c478bd9Sstevel@tonic-gate 			return;
1959*7c478bd9Sstevel@tonic-gate 		vmem_free(umem_oversize_arena, buf, size);
1960*7c478bd9Sstevel@tonic-gate 	}
1961*7c478bd9Sstevel@tonic-gate }
1962*7c478bd9Sstevel@tonic-gate 
1963*7c478bd9Sstevel@tonic-gate #pragma weak umem_free_align = _umem_free_align
1964*7c478bd9Sstevel@tonic-gate void
1965*7c478bd9Sstevel@tonic-gate _umem_free_align(void *buf, size_t size)
1966*7c478bd9Sstevel@tonic-gate {
1967*7c478bd9Sstevel@tonic-gate 	if (buf == NULL && size == 0)
1968*7c478bd9Sstevel@tonic-gate 		return;
1969*7c478bd9Sstevel@tonic-gate 	vmem_xfree(umem_memalign_arena, buf, size);
1970*7c478bd9Sstevel@tonic-gate }
1971*7c478bd9Sstevel@tonic-gate 
1972*7c478bd9Sstevel@tonic-gate static void *
1973*7c478bd9Sstevel@tonic-gate umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
1974*7c478bd9Sstevel@tonic-gate {
1975*7c478bd9Sstevel@tonic-gate 	size_t realsize = size + vmp->vm_quantum;
1976*7c478bd9Sstevel@tonic-gate 
1977*7c478bd9Sstevel@tonic-gate 	/*
1978*7c478bd9Sstevel@tonic-gate 	 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
1979*7c478bd9Sstevel@tonic-gate 	 * vm_quantum will cause integer wraparound.  Check for this, and
1980*7c478bd9Sstevel@tonic-gate 	 * blow off the firewall page in this case.  Note that such a
1981*7c478bd9Sstevel@tonic-gate 	 * giant allocation (the entire address space) can never be
1982*7c478bd9Sstevel@tonic-gate 	 * satisfied, so it will either fail immediately (VM_NOSLEEP)
1983*7c478bd9Sstevel@tonic-gate 	 * or sleep forever (VM_SLEEP).  Thus, there is no need for a
1984*7c478bd9Sstevel@tonic-gate 	 * corresponding check in umem_firewall_va_free().
1985*7c478bd9Sstevel@tonic-gate 	 */
1986*7c478bd9Sstevel@tonic-gate 	if (realsize < size)
1987*7c478bd9Sstevel@tonic-gate 		realsize = size;
1988*7c478bd9Sstevel@tonic-gate 
1989*7c478bd9Sstevel@tonic-gate 	return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT));
1990*7c478bd9Sstevel@tonic-gate }
1991*7c478bd9Sstevel@tonic-gate 
1992*7c478bd9Sstevel@tonic-gate static void
1993*7c478bd9Sstevel@tonic-gate umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
1994*7c478bd9Sstevel@tonic-gate {
1995*7c478bd9Sstevel@tonic-gate 	vmem_free(vmp, addr, size + vmp->vm_quantum);
1996*7c478bd9Sstevel@tonic-gate }
1997*7c478bd9Sstevel@tonic-gate 
1998*7c478bd9Sstevel@tonic-gate /*
1999*7c478bd9Sstevel@tonic-gate  * Reclaim all unused memory from a cache.
2000*7c478bd9Sstevel@tonic-gate  */
2001*7c478bd9Sstevel@tonic-gate static void
2002*7c478bd9Sstevel@tonic-gate umem_cache_reap(umem_cache_t *cp)
2003*7c478bd9Sstevel@tonic-gate {
2004*7c478bd9Sstevel@tonic-gate 	/*
2005*7c478bd9Sstevel@tonic-gate 	 * Ask the cache's owner to free some memory if possible.
2006*7c478bd9Sstevel@tonic-gate 	 * The idea is to handle things like the inode cache, which
2007*7c478bd9Sstevel@tonic-gate 	 * typically sits on a bunch of memory that it doesn't truly
2008*7c478bd9Sstevel@tonic-gate 	 * *need*.  Reclaim policy is entirely up to the owner; this
2009*7c478bd9Sstevel@tonic-gate 	 * callback is just an advisory plea for help.
2010*7c478bd9Sstevel@tonic-gate 	 */
2011*7c478bd9Sstevel@tonic-gate 	if (cp->cache_reclaim != NULL)
2012*7c478bd9Sstevel@tonic-gate 		cp->cache_reclaim(cp->cache_private);
2013*7c478bd9Sstevel@tonic-gate 
2014*7c478bd9Sstevel@tonic-gate 	umem_depot_ws_reap(cp);
2015*7c478bd9Sstevel@tonic-gate }
2016*7c478bd9Sstevel@tonic-gate 
2017*7c478bd9Sstevel@tonic-gate /*
2018*7c478bd9Sstevel@tonic-gate  * Purge all magazines from a cache and set its magazine limit to zero.
2019*7c478bd9Sstevel@tonic-gate  * All calls are serialized by being done by the update thread, except for
2020*7c478bd9Sstevel@tonic-gate  * the final call from umem_cache_destroy().
2021*7c478bd9Sstevel@tonic-gate  */
2022*7c478bd9Sstevel@tonic-gate static void
2023*7c478bd9Sstevel@tonic-gate umem_cache_magazine_purge(umem_cache_t *cp)
2024*7c478bd9Sstevel@tonic-gate {
2025*7c478bd9Sstevel@tonic-gate 	umem_cpu_cache_t *ccp;
2026*7c478bd9Sstevel@tonic-gate 	umem_magazine_t *mp, *pmp;
2027*7c478bd9Sstevel@tonic-gate 	int rounds, prounds, cpu_seqid;
2028*7c478bd9Sstevel@tonic-gate 
2029*7c478bd9Sstevel@tonic-gate 	ASSERT(cp->cache_next == NULL || IN_UPDATE());
2030*7c478bd9Sstevel@tonic-gate 
2031*7c478bd9Sstevel@tonic-gate 	for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2032*7c478bd9Sstevel@tonic-gate 		ccp = &cp->cache_cpu[cpu_seqid];
2033*7c478bd9Sstevel@tonic-gate 
2034*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&ccp->cc_lock);
2035*7c478bd9Sstevel@tonic-gate 		mp = ccp->cc_loaded;
2036*7c478bd9Sstevel@tonic-gate 		pmp = ccp->cc_ploaded;
2037*7c478bd9Sstevel@tonic-gate 		rounds = ccp->cc_rounds;
2038*7c478bd9Sstevel@tonic-gate 		prounds = ccp->cc_prounds;
2039*7c478bd9Sstevel@tonic-gate 		ccp->cc_loaded = NULL;
2040*7c478bd9Sstevel@tonic-gate 		ccp->cc_ploaded = NULL;
2041*7c478bd9Sstevel@tonic-gate 		ccp->cc_rounds = -1;
2042*7c478bd9Sstevel@tonic-gate 		ccp->cc_prounds = -1;
2043*7c478bd9Sstevel@tonic-gate 		ccp->cc_magsize = 0;
2044*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&ccp->cc_lock);
2045*7c478bd9Sstevel@tonic-gate 
2046*7c478bd9Sstevel@tonic-gate 		if (mp)
2047*7c478bd9Sstevel@tonic-gate 			umem_magazine_destroy(cp, mp, rounds);
2048*7c478bd9Sstevel@tonic-gate 		if (pmp)
2049*7c478bd9Sstevel@tonic-gate 			umem_magazine_destroy(cp, pmp, prounds);
2050*7c478bd9Sstevel@tonic-gate 	}
2051*7c478bd9Sstevel@tonic-gate 
2052*7c478bd9Sstevel@tonic-gate 	/*
2053*7c478bd9Sstevel@tonic-gate 	 * Updating the working set statistics twice in a row has the
2054*7c478bd9Sstevel@tonic-gate 	 * effect of setting the working set size to zero, so everything
2055*7c478bd9Sstevel@tonic-gate 	 * is eligible for reaping.
2056*7c478bd9Sstevel@tonic-gate 	 */
2057*7c478bd9Sstevel@tonic-gate 	umem_depot_ws_update(cp);
2058*7c478bd9Sstevel@tonic-gate 	umem_depot_ws_update(cp);
2059*7c478bd9Sstevel@tonic-gate 
2060*7c478bd9Sstevel@tonic-gate 	umem_depot_ws_reap(cp);
2061*7c478bd9Sstevel@tonic-gate }
2062*7c478bd9Sstevel@tonic-gate 
2063*7c478bd9Sstevel@tonic-gate /*
2064*7c478bd9Sstevel@tonic-gate  * Enable per-cpu magazines on a cache.
2065*7c478bd9Sstevel@tonic-gate  */
2066*7c478bd9Sstevel@tonic-gate static void
2067*7c478bd9Sstevel@tonic-gate umem_cache_magazine_enable(umem_cache_t *cp)
2068*7c478bd9Sstevel@tonic-gate {
2069*7c478bd9Sstevel@tonic-gate 	int cpu_seqid;
2070*7c478bd9Sstevel@tonic-gate 
2071*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_NOMAGAZINE)
2072*7c478bd9Sstevel@tonic-gate 		return;
2073*7c478bd9Sstevel@tonic-gate 
2074*7c478bd9Sstevel@tonic-gate 	for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2075*7c478bd9Sstevel@tonic-gate 		umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2076*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&ccp->cc_lock);
2077*7c478bd9Sstevel@tonic-gate 		ccp->cc_magsize = cp->cache_magtype->mt_magsize;
2078*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&ccp->cc_lock);
2079*7c478bd9Sstevel@tonic-gate 	}
2080*7c478bd9Sstevel@tonic-gate 
2081*7c478bd9Sstevel@tonic-gate }
2082*7c478bd9Sstevel@tonic-gate 
2083*7c478bd9Sstevel@tonic-gate /*
2084*7c478bd9Sstevel@tonic-gate  * Recompute a cache's magazine size.  The trade-off is that larger magazines
2085*7c478bd9Sstevel@tonic-gate  * provide a higher transfer rate with the depot, while smaller magazines
2086*7c478bd9Sstevel@tonic-gate  * reduce memory consumption.  Magazine resizing is an expensive operation;
2087*7c478bd9Sstevel@tonic-gate  * it should not be done frequently.
2088*7c478bd9Sstevel@tonic-gate  *
2089*7c478bd9Sstevel@tonic-gate  * Changes to the magazine size are serialized by only having one thread
2090*7c478bd9Sstevel@tonic-gate  * doing updates. (the update thread)
2091*7c478bd9Sstevel@tonic-gate  *
2092*7c478bd9Sstevel@tonic-gate  * Note: at present this only grows the magazine size.  It might be useful
2093*7c478bd9Sstevel@tonic-gate  * to allow shrinkage too.
2094*7c478bd9Sstevel@tonic-gate  */
2095*7c478bd9Sstevel@tonic-gate static void
2096*7c478bd9Sstevel@tonic-gate umem_cache_magazine_resize(umem_cache_t *cp)
2097*7c478bd9Sstevel@tonic-gate {
2098*7c478bd9Sstevel@tonic-gate 	umem_magtype_t *mtp = cp->cache_magtype;
2099*7c478bd9Sstevel@tonic-gate 
2100*7c478bd9Sstevel@tonic-gate 	ASSERT(IN_UPDATE());
2101*7c478bd9Sstevel@tonic-gate 
2102*7c478bd9Sstevel@tonic-gate 	if (cp->cache_chunksize < mtp->mt_maxbuf) {
2103*7c478bd9Sstevel@tonic-gate 		umem_cache_magazine_purge(cp);
2104*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&cp->cache_depot_lock);
2105*7c478bd9Sstevel@tonic-gate 		cp->cache_magtype = ++mtp;
2106*7c478bd9Sstevel@tonic-gate 		cp->cache_depot_contention_prev =
2107*7c478bd9Sstevel@tonic-gate 		    cp->cache_depot_contention + INT_MAX;
2108*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&cp->cache_depot_lock);
2109*7c478bd9Sstevel@tonic-gate 		umem_cache_magazine_enable(cp);
2110*7c478bd9Sstevel@tonic-gate 	}
2111*7c478bd9Sstevel@tonic-gate }
2112*7c478bd9Sstevel@tonic-gate 
2113*7c478bd9Sstevel@tonic-gate /*
2114*7c478bd9Sstevel@tonic-gate  * Rescale a cache's hash table, so that the table size is roughly the
2115*7c478bd9Sstevel@tonic-gate  * cache size.  We want the average lookup time to be extremely small.
2116*7c478bd9Sstevel@tonic-gate  */
2117*7c478bd9Sstevel@tonic-gate static void
2118*7c478bd9Sstevel@tonic-gate umem_hash_rescale(umem_cache_t *cp)
2119*7c478bd9Sstevel@tonic-gate {
2120*7c478bd9Sstevel@tonic-gate 	umem_bufctl_t **old_table, **new_table, *bcp;
2121*7c478bd9Sstevel@tonic-gate 	size_t old_size, new_size, h;
2122*7c478bd9Sstevel@tonic-gate 
2123*7c478bd9Sstevel@tonic-gate 	ASSERT(IN_UPDATE());
2124*7c478bd9Sstevel@tonic-gate 
2125*7c478bd9Sstevel@tonic-gate 	new_size = MAX(UMEM_HASH_INITIAL,
2126*7c478bd9Sstevel@tonic-gate 	    1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
2127*7c478bd9Sstevel@tonic-gate 	old_size = cp->cache_hash_mask + 1;
2128*7c478bd9Sstevel@tonic-gate 
2129*7c478bd9Sstevel@tonic-gate 	if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
2130*7c478bd9Sstevel@tonic-gate 		return;
2131*7c478bd9Sstevel@tonic-gate 
2132*7c478bd9Sstevel@tonic-gate 	new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *),
2133*7c478bd9Sstevel@tonic-gate 	    VM_NOSLEEP);
2134*7c478bd9Sstevel@tonic-gate 	if (new_table == NULL)
2135*7c478bd9Sstevel@tonic-gate 		return;
2136*7c478bd9Sstevel@tonic-gate 	bzero(new_table, new_size * sizeof (void *));
2137*7c478bd9Sstevel@tonic-gate 
2138*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_lock);
2139*7c478bd9Sstevel@tonic-gate 
2140*7c478bd9Sstevel@tonic-gate 	old_size = cp->cache_hash_mask + 1;
2141*7c478bd9Sstevel@tonic-gate 	old_table = cp->cache_hash_table;
2142*7c478bd9Sstevel@tonic-gate 
2143*7c478bd9Sstevel@tonic-gate 	cp->cache_hash_mask = new_size - 1;
2144*7c478bd9Sstevel@tonic-gate 	cp->cache_hash_table = new_table;
2145*7c478bd9Sstevel@tonic-gate 	cp->cache_rescale++;
2146*7c478bd9Sstevel@tonic-gate 
2147*7c478bd9Sstevel@tonic-gate 	for (h = 0; h < old_size; h++) {
2148*7c478bd9Sstevel@tonic-gate 		bcp = old_table[h];
2149*7c478bd9Sstevel@tonic-gate 		while (bcp != NULL) {
2150*7c478bd9Sstevel@tonic-gate 			void *addr = bcp->bc_addr;
2151*7c478bd9Sstevel@tonic-gate 			umem_bufctl_t *next_bcp = bcp->bc_next;
2152*7c478bd9Sstevel@tonic-gate 			umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr);
2153*7c478bd9Sstevel@tonic-gate 			bcp->bc_next = *hash_bucket;
2154*7c478bd9Sstevel@tonic-gate 			*hash_bucket = bcp;
2155*7c478bd9Sstevel@tonic-gate 			bcp = next_bcp;
2156*7c478bd9Sstevel@tonic-gate 		}
2157*7c478bd9Sstevel@tonic-gate 	}
2158*7c478bd9Sstevel@tonic-gate 
2159*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_lock);
2160*7c478bd9Sstevel@tonic-gate 
2161*7c478bd9Sstevel@tonic-gate 	vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *));
2162*7c478bd9Sstevel@tonic-gate }
2163*7c478bd9Sstevel@tonic-gate 
2164*7c478bd9Sstevel@tonic-gate /*
2165*7c478bd9Sstevel@tonic-gate  * Perform periodic maintenance on a cache: hash rescaling,
2166*7c478bd9Sstevel@tonic-gate  * depot working-set update, and magazine resizing.
2167*7c478bd9Sstevel@tonic-gate  */
2168*7c478bd9Sstevel@tonic-gate void
2169*7c478bd9Sstevel@tonic-gate umem_cache_update(umem_cache_t *cp)
2170*7c478bd9Sstevel@tonic-gate {
2171*7c478bd9Sstevel@tonic-gate 	int update_flags = 0;
2172*7c478bd9Sstevel@tonic-gate 
2173*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&umem_cache_lock));
2174*7c478bd9Sstevel@tonic-gate 
2175*7c478bd9Sstevel@tonic-gate 	/*
2176*7c478bd9Sstevel@tonic-gate 	 * If the cache has become much larger or smaller than its hash table,
2177*7c478bd9Sstevel@tonic-gate 	 * fire off a request to rescale the hash table.
2178*7c478bd9Sstevel@tonic-gate 	 */
2179*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_lock);
2180*7c478bd9Sstevel@tonic-gate 
2181*7c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_HASH) &&
2182*7c478bd9Sstevel@tonic-gate 	    (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
2183*7c478bd9Sstevel@tonic-gate 	    (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
2184*7c478bd9Sstevel@tonic-gate 	    cp->cache_hash_mask > UMEM_HASH_INITIAL)))
2185*7c478bd9Sstevel@tonic-gate 		update_flags |= UMU_HASH_RESCALE;
2186*7c478bd9Sstevel@tonic-gate 
2187*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_lock);
2188*7c478bd9Sstevel@tonic-gate 
2189*7c478bd9Sstevel@tonic-gate 	/*
2190*7c478bd9Sstevel@tonic-gate 	 * Update the depot working set statistics.
2191*7c478bd9Sstevel@tonic-gate 	 */
2192*7c478bd9Sstevel@tonic-gate 	umem_depot_ws_update(cp);
2193*7c478bd9Sstevel@tonic-gate 
2194*7c478bd9Sstevel@tonic-gate 	/*
2195*7c478bd9Sstevel@tonic-gate 	 * If there's a lot of contention in the depot,
2196*7c478bd9Sstevel@tonic-gate 	 * increase the magazine size.
2197*7c478bd9Sstevel@tonic-gate 	 */
2198*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_depot_lock);
2199*7c478bd9Sstevel@tonic-gate 
2200*7c478bd9Sstevel@tonic-gate 	if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
2201*7c478bd9Sstevel@tonic-gate 	    (int)(cp->cache_depot_contention -
2202*7c478bd9Sstevel@tonic-gate 	    cp->cache_depot_contention_prev) > umem_depot_contention)
2203*7c478bd9Sstevel@tonic-gate 		update_flags |= UMU_MAGAZINE_RESIZE;
2204*7c478bd9Sstevel@tonic-gate 
2205*7c478bd9Sstevel@tonic-gate 	cp->cache_depot_contention_prev = cp->cache_depot_contention;
2206*7c478bd9Sstevel@tonic-gate 
2207*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_depot_lock);
2208*7c478bd9Sstevel@tonic-gate 
2209*7c478bd9Sstevel@tonic-gate 	if (update_flags)
2210*7c478bd9Sstevel@tonic-gate 		umem_add_update(cp, update_flags);
2211*7c478bd9Sstevel@tonic-gate }
2212*7c478bd9Sstevel@tonic-gate 
2213*7c478bd9Sstevel@tonic-gate /*
2214*7c478bd9Sstevel@tonic-gate  * Runs all pending updates.
2215*7c478bd9Sstevel@tonic-gate  *
2216*7c478bd9Sstevel@tonic-gate  * The update lock must be held on entrance, and will be held on exit.
2217*7c478bd9Sstevel@tonic-gate  */
2218*7c478bd9Sstevel@tonic-gate void
2219*7c478bd9Sstevel@tonic-gate umem_process_updates(void)
2220*7c478bd9Sstevel@tonic-gate {
2221*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&umem_update_lock));
2222*7c478bd9Sstevel@tonic-gate 
2223*7c478bd9Sstevel@tonic-gate 	while (umem_null_cache.cache_unext != &umem_null_cache) {
2224*7c478bd9Sstevel@tonic-gate 		int notify = 0;
2225*7c478bd9Sstevel@tonic-gate 		umem_cache_t *cp = umem_null_cache.cache_unext;
2226*7c478bd9Sstevel@tonic-gate 
2227*7c478bd9Sstevel@tonic-gate 		cp->cache_uprev->cache_unext = cp->cache_unext;
2228*7c478bd9Sstevel@tonic-gate 		cp->cache_unext->cache_uprev = cp->cache_uprev;
2229*7c478bd9Sstevel@tonic-gate 		cp->cache_uprev = cp->cache_unext = NULL;
2230*7c478bd9Sstevel@tonic-gate 
2231*7c478bd9Sstevel@tonic-gate 		ASSERT(!(cp->cache_uflags & UMU_ACTIVE));
2232*7c478bd9Sstevel@tonic-gate 
2233*7c478bd9Sstevel@tonic-gate 		while (cp->cache_uflags) {
2234*7c478bd9Sstevel@tonic-gate 			int uflags = (cp->cache_uflags |= UMU_ACTIVE);
2235*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&umem_update_lock);
2236*7c478bd9Sstevel@tonic-gate 
2237*7c478bd9Sstevel@tonic-gate 			/*
2238*7c478bd9Sstevel@tonic-gate 			 * The order here is important.  Each step can speed up
2239*7c478bd9Sstevel@tonic-gate 			 * later steps.
2240*7c478bd9Sstevel@tonic-gate 			 */
2241*7c478bd9Sstevel@tonic-gate 
2242*7c478bd9Sstevel@tonic-gate 			if (uflags & UMU_HASH_RESCALE)
2243*7c478bd9Sstevel@tonic-gate 				umem_hash_rescale(cp);
2244*7c478bd9Sstevel@tonic-gate 
2245*7c478bd9Sstevel@tonic-gate 			if (uflags & UMU_MAGAZINE_RESIZE)
2246*7c478bd9Sstevel@tonic-gate 				umem_cache_magazine_resize(cp);
2247*7c478bd9Sstevel@tonic-gate 
2248*7c478bd9Sstevel@tonic-gate 			if (uflags & UMU_REAP)
2249*7c478bd9Sstevel@tonic-gate 				umem_cache_reap(cp);
2250*7c478bd9Sstevel@tonic-gate 
2251*7c478bd9Sstevel@tonic-gate 			(void) mutex_lock(&umem_update_lock);
2252*7c478bd9Sstevel@tonic-gate 
2253*7c478bd9Sstevel@tonic-gate 			/*
2254*7c478bd9Sstevel@tonic-gate 			 * check if anyone has requested notification
2255*7c478bd9Sstevel@tonic-gate 			 */
2256*7c478bd9Sstevel@tonic-gate 			if (cp->cache_uflags & UMU_NOTIFY) {
2257*7c478bd9Sstevel@tonic-gate 				uflags |= UMU_NOTIFY;
2258*7c478bd9Sstevel@tonic-gate 				notify = 1;
2259*7c478bd9Sstevel@tonic-gate 			}
2260*7c478bd9Sstevel@tonic-gate 			cp->cache_uflags &= ~uflags;
2261*7c478bd9Sstevel@tonic-gate 		}
2262*7c478bd9Sstevel@tonic-gate 		if (notify)
2263*7c478bd9Sstevel@tonic-gate 			(void) cond_broadcast(&umem_update_cv);
2264*7c478bd9Sstevel@tonic-gate 	}
2265*7c478bd9Sstevel@tonic-gate }
2266*7c478bd9Sstevel@tonic-gate 
2267*7c478bd9Sstevel@tonic-gate #ifndef UMEM_STANDALONE
2268*7c478bd9Sstevel@tonic-gate static void
2269*7c478bd9Sstevel@tonic-gate umem_st_update(void)
2270*7c478bd9Sstevel@tonic-gate {
2271*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&umem_update_lock));
2272*7c478bd9Sstevel@tonic-gate 	ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0);
2273*7c478bd9Sstevel@tonic-gate 
2274*7c478bd9Sstevel@tonic-gate 	umem_st_update_thr = thr_self();
2275*7c478bd9Sstevel@tonic-gate 
2276*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_update_lock);
2277*7c478bd9Sstevel@tonic-gate 
2278*7c478bd9Sstevel@tonic-gate 	vmem_update(NULL);
2279*7c478bd9Sstevel@tonic-gate 	umem_cache_applyall(umem_cache_update);
2280*7c478bd9Sstevel@tonic-gate 
2281*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_update_lock);
2282*7c478bd9Sstevel@tonic-gate 
2283*7c478bd9Sstevel@tonic-gate 	umem_process_updates();	/* does all of the requested work */
2284*7c478bd9Sstevel@tonic-gate 
2285*7c478bd9Sstevel@tonic-gate 	umem_reap_next = gethrtime() +
2286*7c478bd9Sstevel@tonic-gate 	    (hrtime_t)umem_reap_interval * NANOSEC;
2287*7c478bd9Sstevel@tonic-gate 
2288*7c478bd9Sstevel@tonic-gate 	umem_reaping = UMEM_REAP_DONE;
2289*7c478bd9Sstevel@tonic-gate 
2290*7c478bd9Sstevel@tonic-gate 	umem_st_update_thr = 0;
2291*7c478bd9Sstevel@tonic-gate }
2292*7c478bd9Sstevel@tonic-gate #endif
2293*7c478bd9Sstevel@tonic-gate 
2294*7c478bd9Sstevel@tonic-gate /*
2295*7c478bd9Sstevel@tonic-gate  * Reclaim all unused memory from all caches.  Called from vmem when memory
2296*7c478bd9Sstevel@tonic-gate  * gets tight.  Must be called with no locks held.
2297*7c478bd9Sstevel@tonic-gate  *
2298*7c478bd9Sstevel@tonic-gate  * This just requests a reap on all caches, and notifies the update thread.
2299*7c478bd9Sstevel@tonic-gate  */
2300*7c478bd9Sstevel@tonic-gate void
2301*7c478bd9Sstevel@tonic-gate umem_reap(void)
2302*7c478bd9Sstevel@tonic-gate {
2303*7c478bd9Sstevel@tonic-gate #ifndef UMEM_STANDALONE
2304*7c478bd9Sstevel@tonic-gate 	extern int __nthreads(void);
2305*7c478bd9Sstevel@tonic-gate #endif
2306*7c478bd9Sstevel@tonic-gate 
2307*7c478bd9Sstevel@tonic-gate 	if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE ||
2308*7c478bd9Sstevel@tonic-gate 	    gethrtime() < umem_reap_next)
2309*7c478bd9Sstevel@tonic-gate 		return;
2310*7c478bd9Sstevel@tonic-gate 
2311*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_update_lock);
2312*7c478bd9Sstevel@tonic-gate 
2313*7c478bd9Sstevel@tonic-gate 	if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) {
2314*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&umem_update_lock);
2315*7c478bd9Sstevel@tonic-gate 		return;
2316*7c478bd9Sstevel@tonic-gate 	}
2317*7c478bd9Sstevel@tonic-gate 	umem_reaping = UMEM_REAP_ADDING;	/* lock out other reaps */
2318*7c478bd9Sstevel@tonic-gate 
2319*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_update_lock);
2320*7c478bd9Sstevel@tonic-gate 
2321*7c478bd9Sstevel@tonic-gate 	umem_updateall(UMU_REAP);
2322*7c478bd9Sstevel@tonic-gate 
2323*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_update_lock);
2324*7c478bd9Sstevel@tonic-gate 
2325*7c478bd9Sstevel@tonic-gate 	umem_reaping = UMEM_REAP_ACTIVE;
2326*7c478bd9Sstevel@tonic-gate 
2327*7c478bd9Sstevel@tonic-gate 	/* Standalone is single-threaded */
2328*7c478bd9Sstevel@tonic-gate #ifndef UMEM_STANDALONE
2329*7c478bd9Sstevel@tonic-gate 	if (umem_update_thr == 0) {
2330*7c478bd9Sstevel@tonic-gate 		/*
2331*7c478bd9Sstevel@tonic-gate 		 * The update thread does not exist.  If the process is
2332*7c478bd9Sstevel@tonic-gate 		 * multi-threaded, create it.  If not, or the creation fails,
2333*7c478bd9Sstevel@tonic-gate 		 * do the update processing inline.
2334*7c478bd9Sstevel@tonic-gate 		 */
2335*7c478bd9Sstevel@tonic-gate 		ASSERT(umem_st_update_thr == 0);
2336*7c478bd9Sstevel@tonic-gate 
2337*7c478bd9Sstevel@tonic-gate 		if (__nthreads() <= 1 || umem_create_update_thread() == 0)
2338*7c478bd9Sstevel@tonic-gate 			umem_st_update();
2339*7c478bd9Sstevel@tonic-gate 	}
2340*7c478bd9Sstevel@tonic-gate 
2341*7c478bd9Sstevel@tonic-gate 	(void) cond_broadcast(&umem_update_cv);	/* wake up the update thread */
2342*7c478bd9Sstevel@tonic-gate #endif
2343*7c478bd9Sstevel@tonic-gate 
2344*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_update_lock);
2345*7c478bd9Sstevel@tonic-gate }
2346*7c478bd9Sstevel@tonic-gate 
2347*7c478bd9Sstevel@tonic-gate umem_cache_t *
2348*7c478bd9Sstevel@tonic-gate umem_cache_create(
2349*7c478bd9Sstevel@tonic-gate 	char *name,		/* descriptive name for this cache */
2350*7c478bd9Sstevel@tonic-gate 	size_t bufsize,		/* size of the objects it manages */
2351*7c478bd9Sstevel@tonic-gate 	size_t align,		/* required object alignment */
2352*7c478bd9Sstevel@tonic-gate 	umem_constructor_t *constructor, /* object constructor */
2353*7c478bd9Sstevel@tonic-gate 	umem_destructor_t *destructor, /* object destructor */
2354*7c478bd9Sstevel@tonic-gate 	umem_reclaim_t *reclaim, /* memory reclaim callback */
2355*7c478bd9Sstevel@tonic-gate 	void *private,		/* pass-thru arg for constr/destr/reclaim */
2356*7c478bd9Sstevel@tonic-gate 	vmem_t *vmp,		/* vmem source for slab allocation */
2357*7c478bd9Sstevel@tonic-gate 	int cflags)		/* cache creation flags */
2358*7c478bd9Sstevel@tonic-gate {
2359*7c478bd9Sstevel@tonic-gate 	int cpu_seqid;
2360*7c478bd9Sstevel@tonic-gate 	size_t chunksize;
2361*7c478bd9Sstevel@tonic-gate 	umem_cache_t *cp, *cnext, *cprev;
2362*7c478bd9Sstevel@tonic-gate 	umem_magtype_t *mtp;
2363*7c478bd9Sstevel@tonic-gate 	size_t csize;
2364*7c478bd9Sstevel@tonic-gate 	size_t phase;
2365*7c478bd9Sstevel@tonic-gate 
2366*7c478bd9Sstevel@tonic-gate 	/*
2367*7c478bd9Sstevel@tonic-gate 	 * The init thread is allowed to create internal and quantum caches.
2368*7c478bd9Sstevel@tonic-gate 	 *
2369*7c478bd9Sstevel@tonic-gate 	 * Other threads must wait until until initialization is complete.
2370*7c478bd9Sstevel@tonic-gate 	 */
2371*7c478bd9Sstevel@tonic-gate 	if (umem_init_thr == thr_self())
2372*7c478bd9Sstevel@tonic-gate 		ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0);
2373*7c478bd9Sstevel@tonic-gate 	else {
2374*7c478bd9Sstevel@tonic-gate 		ASSERT(!(cflags & UMC_INTERNAL));
2375*7c478bd9Sstevel@tonic-gate 		if (umem_ready != UMEM_READY && umem_init() == 0) {
2376*7c478bd9Sstevel@tonic-gate 			errno = EAGAIN;
2377*7c478bd9Sstevel@tonic-gate 			return (NULL);
2378*7c478bd9Sstevel@tonic-gate 		}
2379*7c478bd9Sstevel@tonic-gate 	}
2380*7c478bd9Sstevel@tonic-gate 
2381*7c478bd9Sstevel@tonic-gate 	csize = UMEM_CACHE_SIZE(umem_max_ncpus);
2382*7c478bd9Sstevel@tonic-gate 	phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE);
2383*7c478bd9Sstevel@tonic-gate 
2384*7c478bd9Sstevel@tonic-gate 	if (vmp == NULL)
2385*7c478bd9Sstevel@tonic-gate 		vmp = umem_default_arena;
2386*7c478bd9Sstevel@tonic-gate 
2387*7c478bd9Sstevel@tonic-gate 	ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0);
2388*7c478bd9Sstevel@tonic-gate 
2389*7c478bd9Sstevel@tonic-gate 	/*
2390*7c478bd9Sstevel@tonic-gate 	 * Check that the arguments are reasonable
2391*7c478bd9Sstevel@tonic-gate 	 */
2392*7c478bd9Sstevel@tonic-gate 	if ((align & (align - 1)) != 0 || align > vmp->vm_quantum ||
2393*7c478bd9Sstevel@tonic-gate 	    ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) ||
2394*7c478bd9Sstevel@tonic-gate 	    name == NULL || bufsize == 0) {
2395*7c478bd9Sstevel@tonic-gate 		errno = EINVAL;
2396*7c478bd9Sstevel@tonic-gate 		return (NULL);
2397*7c478bd9Sstevel@tonic-gate 	}
2398*7c478bd9Sstevel@tonic-gate 
2399*7c478bd9Sstevel@tonic-gate 	/*
2400*7c478bd9Sstevel@tonic-gate 	 * If align == 0, we set it to the minimum required alignment.
2401*7c478bd9Sstevel@tonic-gate 	 *
2402*7c478bd9Sstevel@tonic-gate 	 * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless
2403*7c478bd9Sstevel@tonic-gate 	 * UMC_NOTOUCH was passed.
2404*7c478bd9Sstevel@tonic-gate 	 */
2405*7c478bd9Sstevel@tonic-gate 	if (align == 0) {
2406*7c478bd9Sstevel@tonic-gate 		if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN)
2407*7c478bd9Sstevel@tonic-gate 			align = UMEM_SECOND_ALIGN;
2408*7c478bd9Sstevel@tonic-gate 		else
2409*7c478bd9Sstevel@tonic-gate 			align = UMEM_ALIGN;
2410*7c478bd9Sstevel@tonic-gate 	} else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0)
2411*7c478bd9Sstevel@tonic-gate 		align = UMEM_ALIGN;
2412*7c478bd9Sstevel@tonic-gate 
2413*7c478bd9Sstevel@tonic-gate 
2414*7c478bd9Sstevel@tonic-gate 	/*
2415*7c478bd9Sstevel@tonic-gate 	 * Get a umem_cache structure.  We arrange that cp->cache_cpu[]
2416*7c478bd9Sstevel@tonic-gate 	 * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent
2417*7c478bd9Sstevel@tonic-gate 	 * false sharing of per-CPU data.
2418*7c478bd9Sstevel@tonic-gate 	 */
2419*7c478bd9Sstevel@tonic-gate 	cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase,
2420*7c478bd9Sstevel@tonic-gate 	    0, NULL, NULL, VM_NOSLEEP);
2421*7c478bd9Sstevel@tonic-gate 
2422*7c478bd9Sstevel@tonic-gate 	if (cp == NULL) {
2423*7c478bd9Sstevel@tonic-gate 		errno = EAGAIN;
2424*7c478bd9Sstevel@tonic-gate 		return (NULL);
2425*7c478bd9Sstevel@tonic-gate 	}
2426*7c478bd9Sstevel@tonic-gate 
2427*7c478bd9Sstevel@tonic-gate 	bzero(cp, csize);
2428*7c478bd9Sstevel@tonic-gate 
2429*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_flags_lock);
2430*7c478bd9Sstevel@tonic-gate 	if (umem_flags & UMF_RANDOMIZE)
2431*7c478bd9Sstevel@tonic-gate 		umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) |
2432*7c478bd9Sstevel@tonic-gate 		    UMF_RANDOMIZE;
2433*7c478bd9Sstevel@tonic-gate 	cp->cache_flags = umem_flags | (cflags & UMF_DEBUG);
2434*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_flags_lock);
2435*7c478bd9Sstevel@tonic-gate 
2436*7c478bd9Sstevel@tonic-gate 	/*
2437*7c478bd9Sstevel@tonic-gate 	 * Make sure all the various flags are reasonable.
2438*7c478bd9Sstevel@tonic-gate 	 */
2439*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_LITE) {
2440*7c478bd9Sstevel@tonic-gate 		if (bufsize >= umem_lite_minsize &&
2441*7c478bd9Sstevel@tonic-gate 		    align <= umem_lite_maxalign &&
2442*7c478bd9Sstevel@tonic-gate 		    P2PHASE(bufsize, umem_lite_maxalign) != 0) {
2443*7c478bd9Sstevel@tonic-gate 			cp->cache_flags |= UMF_BUFTAG;
2444*7c478bd9Sstevel@tonic-gate 			cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2445*7c478bd9Sstevel@tonic-gate 		} else {
2446*7c478bd9Sstevel@tonic-gate 			cp->cache_flags &= ~UMF_DEBUG;
2447*7c478bd9Sstevel@tonic-gate 		}
2448*7c478bd9Sstevel@tonic-gate 	}
2449*7c478bd9Sstevel@tonic-gate 
2450*7c478bd9Sstevel@tonic-gate 	if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT))
2451*7c478bd9Sstevel@tonic-gate 		cp->cache_flags |= UMF_NOMAGAZINE;
2452*7c478bd9Sstevel@tonic-gate 
2453*7c478bd9Sstevel@tonic-gate 	if (cflags & UMC_NODEBUG)
2454*7c478bd9Sstevel@tonic-gate 		cp->cache_flags &= ~UMF_DEBUG;
2455*7c478bd9Sstevel@tonic-gate 
2456*7c478bd9Sstevel@tonic-gate 	if (cflags & UMC_NOTOUCH)
2457*7c478bd9Sstevel@tonic-gate 		cp->cache_flags &= ~UMF_TOUCH;
2458*7c478bd9Sstevel@tonic-gate 
2459*7c478bd9Sstevel@tonic-gate 	if (cflags & UMC_NOHASH)
2460*7c478bd9Sstevel@tonic-gate 		cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2461*7c478bd9Sstevel@tonic-gate 
2462*7c478bd9Sstevel@tonic-gate 	if (cflags & UMC_NOMAGAZINE)
2463*7c478bd9Sstevel@tonic-gate 		cp->cache_flags |= UMF_NOMAGAZINE;
2464*7c478bd9Sstevel@tonic-gate 
2465*7c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH))
2466*7c478bd9Sstevel@tonic-gate 		cp->cache_flags |= UMF_REDZONE;
2467*7c478bd9Sstevel@tonic-gate 
2468*7c478bd9Sstevel@tonic-gate 	if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall &&
2469*7c478bd9Sstevel@tonic-gate 	    !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH))
2470*7c478bd9Sstevel@tonic-gate 		cp->cache_flags |= UMF_FIREWALL;
2471*7c478bd9Sstevel@tonic-gate 
2472*7c478bd9Sstevel@tonic-gate 	if (vmp != umem_default_arena || umem_firewall_arena == NULL)
2473*7c478bd9Sstevel@tonic-gate 		cp->cache_flags &= ~UMF_FIREWALL;
2474*7c478bd9Sstevel@tonic-gate 
2475*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_FIREWALL) {
2476*7c478bd9Sstevel@tonic-gate 		cp->cache_flags &= ~UMF_BUFTAG;
2477*7c478bd9Sstevel@tonic-gate 		cp->cache_flags |= UMF_NOMAGAZINE;
2478*7c478bd9Sstevel@tonic-gate 		ASSERT(vmp == umem_default_arena);
2479*7c478bd9Sstevel@tonic-gate 		vmp = umem_firewall_arena;
2480*7c478bd9Sstevel@tonic-gate 	}
2481*7c478bd9Sstevel@tonic-gate 
2482*7c478bd9Sstevel@tonic-gate 	/*
2483*7c478bd9Sstevel@tonic-gate 	 * Set cache properties.
2484*7c478bd9Sstevel@tonic-gate 	 */
2485*7c478bd9Sstevel@tonic-gate 	(void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1);
2486*7c478bd9Sstevel@tonic-gate 	cp->cache_bufsize = bufsize;
2487*7c478bd9Sstevel@tonic-gate 	cp->cache_align = align;
2488*7c478bd9Sstevel@tonic-gate 	cp->cache_constructor = constructor;
2489*7c478bd9Sstevel@tonic-gate 	cp->cache_destructor = destructor;
2490*7c478bd9Sstevel@tonic-gate 	cp->cache_reclaim = reclaim;
2491*7c478bd9Sstevel@tonic-gate 	cp->cache_private = private;
2492*7c478bd9Sstevel@tonic-gate 	cp->cache_arena = vmp;
2493*7c478bd9Sstevel@tonic-gate 	cp->cache_cflags = cflags;
2494*7c478bd9Sstevel@tonic-gate 	cp->cache_cpu_mask = umem_cpu_mask;
2495*7c478bd9Sstevel@tonic-gate 
2496*7c478bd9Sstevel@tonic-gate 	/*
2497*7c478bd9Sstevel@tonic-gate 	 * Determine the chunk size.
2498*7c478bd9Sstevel@tonic-gate 	 */
2499*7c478bd9Sstevel@tonic-gate 	chunksize = bufsize;
2500*7c478bd9Sstevel@tonic-gate 
2501*7c478bd9Sstevel@tonic-gate 	if (align >= UMEM_ALIGN) {
2502*7c478bd9Sstevel@tonic-gate 		chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN);
2503*7c478bd9Sstevel@tonic-gate 		cp->cache_bufctl = chunksize - UMEM_ALIGN;
2504*7c478bd9Sstevel@tonic-gate 	}
2505*7c478bd9Sstevel@tonic-gate 
2506*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_BUFTAG) {
2507*7c478bd9Sstevel@tonic-gate 		cp->cache_bufctl = chunksize;
2508*7c478bd9Sstevel@tonic-gate 		cp->cache_buftag = chunksize;
2509*7c478bd9Sstevel@tonic-gate 		chunksize += sizeof (umem_buftag_t);
2510*7c478bd9Sstevel@tonic-gate 	}
2511*7c478bd9Sstevel@tonic-gate 
2512*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_DEADBEEF) {
2513*7c478bd9Sstevel@tonic-gate 		cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify);
2514*7c478bd9Sstevel@tonic-gate 		if (cp->cache_flags & UMF_LITE)
2515*7c478bd9Sstevel@tonic-gate 			cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN);
2516*7c478bd9Sstevel@tonic-gate 	}
2517*7c478bd9Sstevel@tonic-gate 
2518*7c478bd9Sstevel@tonic-gate 	cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave);
2519*7c478bd9Sstevel@tonic-gate 
2520*7c478bd9Sstevel@tonic-gate 	cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
2521*7c478bd9Sstevel@tonic-gate 
2522*7c478bd9Sstevel@tonic-gate 	if (chunksize < bufsize) {
2523*7c478bd9Sstevel@tonic-gate 		errno = ENOMEM;
2524*7c478bd9Sstevel@tonic-gate 		goto fail;
2525*7c478bd9Sstevel@tonic-gate 	}
2526*7c478bd9Sstevel@tonic-gate 
2527*7c478bd9Sstevel@tonic-gate 	/*
2528*7c478bd9Sstevel@tonic-gate 	 * Now that we know the chunk size, determine the optimal slab size.
2529*7c478bd9Sstevel@tonic-gate 	 */
2530*7c478bd9Sstevel@tonic-gate 	if (vmp == umem_firewall_arena) {
2531*7c478bd9Sstevel@tonic-gate 		cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
2532*7c478bd9Sstevel@tonic-gate 		cp->cache_mincolor = cp->cache_slabsize - chunksize;
2533*7c478bd9Sstevel@tonic-gate 		cp->cache_maxcolor = cp->cache_mincolor;
2534*7c478bd9Sstevel@tonic-gate 		cp->cache_flags |= UMF_HASH;
2535*7c478bd9Sstevel@tonic-gate 		ASSERT(!(cp->cache_flags & UMF_BUFTAG));
2536*7c478bd9Sstevel@tonic-gate 	} else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) &&
2537*7c478bd9Sstevel@tonic-gate 	    !(cp->cache_flags & UMF_AUDIT) &&
2538*7c478bd9Sstevel@tonic-gate 	    chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) {
2539*7c478bd9Sstevel@tonic-gate 		cp->cache_slabsize = vmp->vm_quantum;
2540*7c478bd9Sstevel@tonic-gate 		cp->cache_mincolor = 0;
2541*7c478bd9Sstevel@tonic-gate 		cp->cache_maxcolor =
2542*7c478bd9Sstevel@tonic-gate 		    (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize;
2543*7c478bd9Sstevel@tonic-gate 
2544*7c478bd9Sstevel@tonic-gate 		if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) {
2545*7c478bd9Sstevel@tonic-gate 			errno = EINVAL;
2546*7c478bd9Sstevel@tonic-gate 			goto fail;
2547*7c478bd9Sstevel@tonic-gate 		}
2548*7c478bd9Sstevel@tonic-gate 		ASSERT(!(cp->cache_flags & UMF_AUDIT));
2549*7c478bd9Sstevel@tonic-gate 	} else {
2550*7c478bd9Sstevel@tonic-gate 		size_t chunks, bestfit, waste, slabsize;
2551*7c478bd9Sstevel@tonic-gate 		size_t minwaste = LONG_MAX;
2552*7c478bd9Sstevel@tonic-gate 
2553*7c478bd9Sstevel@tonic-gate 		for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) {
2554*7c478bd9Sstevel@tonic-gate 			slabsize = P2ROUNDUP(chunksize * chunks,
2555*7c478bd9Sstevel@tonic-gate 			    vmp->vm_quantum);
2556*7c478bd9Sstevel@tonic-gate 			/*
2557*7c478bd9Sstevel@tonic-gate 			 * check for overflow
2558*7c478bd9Sstevel@tonic-gate 			 */
2559*7c478bd9Sstevel@tonic-gate 			if ((slabsize / chunks) < chunksize) {
2560*7c478bd9Sstevel@tonic-gate 				errno = ENOMEM;
2561*7c478bd9Sstevel@tonic-gate 				goto fail;
2562*7c478bd9Sstevel@tonic-gate 			}
2563*7c478bd9Sstevel@tonic-gate 			chunks = slabsize / chunksize;
2564*7c478bd9Sstevel@tonic-gate 			waste = (slabsize % chunksize) / chunks;
2565*7c478bd9Sstevel@tonic-gate 			if (waste < minwaste) {
2566*7c478bd9Sstevel@tonic-gate 				minwaste = waste;
2567*7c478bd9Sstevel@tonic-gate 				bestfit = slabsize;
2568*7c478bd9Sstevel@tonic-gate 			}
2569*7c478bd9Sstevel@tonic-gate 		}
2570*7c478bd9Sstevel@tonic-gate 		if (cflags & UMC_QCACHE)
2571*7c478bd9Sstevel@tonic-gate 			bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64);
2572*7c478bd9Sstevel@tonic-gate 		cp->cache_slabsize = bestfit;
2573*7c478bd9Sstevel@tonic-gate 		cp->cache_mincolor = 0;
2574*7c478bd9Sstevel@tonic-gate 		cp->cache_maxcolor = bestfit % chunksize;
2575*7c478bd9Sstevel@tonic-gate 		cp->cache_flags |= UMF_HASH;
2576*7c478bd9Sstevel@tonic-gate 	}
2577*7c478bd9Sstevel@tonic-gate 
2578*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_HASH) {
2579*7c478bd9Sstevel@tonic-gate 		ASSERT(!(cflags & UMC_NOHASH));
2580*7c478bd9Sstevel@tonic-gate 		cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ?
2581*7c478bd9Sstevel@tonic-gate 		    umem_bufctl_audit_cache : umem_bufctl_cache;
2582*7c478bd9Sstevel@tonic-gate 	}
2583*7c478bd9Sstevel@tonic-gate 
2584*7c478bd9Sstevel@tonic-gate 	if (cp->cache_maxcolor >= vmp->vm_quantum)
2585*7c478bd9Sstevel@tonic-gate 		cp->cache_maxcolor = vmp->vm_quantum - 1;
2586*7c478bd9Sstevel@tonic-gate 
2587*7c478bd9Sstevel@tonic-gate 	cp->cache_color = cp->cache_mincolor;
2588*7c478bd9Sstevel@tonic-gate 
2589*7c478bd9Sstevel@tonic-gate 	/*
2590*7c478bd9Sstevel@tonic-gate 	 * Initialize the rest of the slab layer.
2591*7c478bd9Sstevel@tonic-gate 	 */
2592*7c478bd9Sstevel@tonic-gate 	(void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL);
2593*7c478bd9Sstevel@tonic-gate 
2594*7c478bd9Sstevel@tonic-gate 	cp->cache_freelist = &cp->cache_nullslab;
2595*7c478bd9Sstevel@tonic-gate 	cp->cache_nullslab.slab_cache = cp;
2596*7c478bd9Sstevel@tonic-gate 	cp->cache_nullslab.slab_refcnt = -1;
2597*7c478bd9Sstevel@tonic-gate 	cp->cache_nullslab.slab_next = &cp->cache_nullslab;
2598*7c478bd9Sstevel@tonic-gate 	cp->cache_nullslab.slab_prev = &cp->cache_nullslab;
2599*7c478bd9Sstevel@tonic-gate 
2600*7c478bd9Sstevel@tonic-gate 	if (cp->cache_flags & UMF_HASH) {
2601*7c478bd9Sstevel@tonic-gate 		cp->cache_hash_table = vmem_alloc(umem_hash_arena,
2602*7c478bd9Sstevel@tonic-gate 		    UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP);
2603*7c478bd9Sstevel@tonic-gate 		if (cp->cache_hash_table == NULL) {
2604*7c478bd9Sstevel@tonic-gate 			errno = EAGAIN;
2605*7c478bd9Sstevel@tonic-gate 			goto fail_lock;
2606*7c478bd9Sstevel@tonic-gate 		}
2607*7c478bd9Sstevel@tonic-gate 		bzero(cp->cache_hash_table,
2608*7c478bd9Sstevel@tonic-gate 		    UMEM_HASH_INITIAL * sizeof (void *));
2609*7c478bd9Sstevel@tonic-gate 		cp->cache_hash_mask = UMEM_HASH_INITIAL - 1;
2610*7c478bd9Sstevel@tonic-gate 		cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
2611*7c478bd9Sstevel@tonic-gate 	}
2612*7c478bd9Sstevel@tonic-gate 
2613*7c478bd9Sstevel@tonic-gate 	/*
2614*7c478bd9Sstevel@tonic-gate 	 * Initialize the depot.
2615*7c478bd9Sstevel@tonic-gate 	 */
2616*7c478bd9Sstevel@tonic-gate 	(void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL);
2617*7c478bd9Sstevel@tonic-gate 
2618*7c478bd9Sstevel@tonic-gate 	for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
2619*7c478bd9Sstevel@tonic-gate 		continue;
2620*7c478bd9Sstevel@tonic-gate 
2621*7c478bd9Sstevel@tonic-gate 	cp->cache_magtype = mtp;
2622*7c478bd9Sstevel@tonic-gate 
2623*7c478bd9Sstevel@tonic-gate 	/*
2624*7c478bd9Sstevel@tonic-gate 	 * Initialize the CPU layer.
2625*7c478bd9Sstevel@tonic-gate 	 */
2626*7c478bd9Sstevel@tonic-gate 	for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2627*7c478bd9Sstevel@tonic-gate 		umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2628*7c478bd9Sstevel@tonic-gate 		(void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL);
2629*7c478bd9Sstevel@tonic-gate 		ccp->cc_flags = cp->cache_flags;
2630*7c478bd9Sstevel@tonic-gate 		ccp->cc_rounds = -1;
2631*7c478bd9Sstevel@tonic-gate 		ccp->cc_prounds = -1;
2632*7c478bd9Sstevel@tonic-gate 	}
2633*7c478bd9Sstevel@tonic-gate 
2634*7c478bd9Sstevel@tonic-gate 	/*
2635*7c478bd9Sstevel@tonic-gate 	 * Add the cache to the global list.  This makes it visible
2636*7c478bd9Sstevel@tonic-gate 	 * to umem_update(), so the cache must be ready for business.
2637*7c478bd9Sstevel@tonic-gate 	 */
2638*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_cache_lock);
2639*7c478bd9Sstevel@tonic-gate 	cp->cache_next = cnext = &umem_null_cache;
2640*7c478bd9Sstevel@tonic-gate 	cp->cache_prev = cprev = umem_null_cache.cache_prev;
2641*7c478bd9Sstevel@tonic-gate 	cnext->cache_prev = cp;
2642*7c478bd9Sstevel@tonic-gate 	cprev->cache_next = cp;
2643*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_cache_lock);
2644*7c478bd9Sstevel@tonic-gate 
2645*7c478bd9Sstevel@tonic-gate 	if (umem_ready == UMEM_READY)
2646*7c478bd9Sstevel@tonic-gate 		umem_cache_magazine_enable(cp);
2647*7c478bd9Sstevel@tonic-gate 
2648*7c478bd9Sstevel@tonic-gate 	return (cp);
2649*7c478bd9Sstevel@tonic-gate 
2650*7c478bd9Sstevel@tonic-gate fail_lock:
2651*7c478bd9Sstevel@tonic-gate 	(void) mutex_destroy(&cp->cache_lock);
2652*7c478bd9Sstevel@tonic-gate fail:
2653*7c478bd9Sstevel@tonic-gate 	vmem_xfree(umem_cache_arena, cp, csize);
2654*7c478bd9Sstevel@tonic-gate 	return (NULL);
2655*7c478bd9Sstevel@tonic-gate }
2656*7c478bd9Sstevel@tonic-gate 
2657*7c478bd9Sstevel@tonic-gate void
2658*7c478bd9Sstevel@tonic-gate umem_cache_destroy(umem_cache_t *cp)
2659*7c478bd9Sstevel@tonic-gate {
2660*7c478bd9Sstevel@tonic-gate 	int cpu_seqid;
2661*7c478bd9Sstevel@tonic-gate 
2662*7c478bd9Sstevel@tonic-gate 	/*
2663*7c478bd9Sstevel@tonic-gate 	 * Remove the cache from the global cache list so that no new updates
2664*7c478bd9Sstevel@tonic-gate 	 * will be scheduled on its behalf, wait for any pending tasks to
2665*7c478bd9Sstevel@tonic-gate 	 * complete, purge the cache, and then destroy it.
2666*7c478bd9Sstevel@tonic-gate 	 */
2667*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_cache_lock);
2668*7c478bd9Sstevel@tonic-gate 	cp->cache_prev->cache_next = cp->cache_next;
2669*7c478bd9Sstevel@tonic-gate 	cp->cache_next->cache_prev = cp->cache_prev;
2670*7c478bd9Sstevel@tonic-gate 	cp->cache_prev = cp->cache_next = NULL;
2671*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_cache_lock);
2672*7c478bd9Sstevel@tonic-gate 
2673*7c478bd9Sstevel@tonic-gate 	umem_remove_updates(cp);
2674*7c478bd9Sstevel@tonic-gate 
2675*7c478bd9Sstevel@tonic-gate 	umem_cache_magazine_purge(cp);
2676*7c478bd9Sstevel@tonic-gate 
2677*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&cp->cache_lock);
2678*7c478bd9Sstevel@tonic-gate 	if (cp->cache_buftotal != 0)
2679*7c478bd9Sstevel@tonic-gate 		log_message("umem_cache_destroy: '%s' (%p) not empty\n",
2680*7c478bd9Sstevel@tonic-gate 		    cp->cache_name, (void *)cp);
2681*7c478bd9Sstevel@tonic-gate 	cp->cache_reclaim = NULL;
2682*7c478bd9Sstevel@tonic-gate 	/*
2683*7c478bd9Sstevel@tonic-gate 	 * The cache is now dead.  There should be no further activity.
2684*7c478bd9Sstevel@tonic-gate 	 * We enforce this by setting land mines in the constructor and
2685*7c478bd9Sstevel@tonic-gate 	 * destructor routines that induce a segmentation fault if invoked.
2686*7c478bd9Sstevel@tonic-gate 	 */
2687*7c478bd9Sstevel@tonic-gate 	cp->cache_constructor = (umem_constructor_t *)1;
2688*7c478bd9Sstevel@tonic-gate 	cp->cache_destructor = (umem_destructor_t *)2;
2689*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&cp->cache_lock);
2690*7c478bd9Sstevel@tonic-gate 
2691*7c478bd9Sstevel@tonic-gate 	if (cp->cache_hash_table != NULL)
2692*7c478bd9Sstevel@tonic-gate 		vmem_free(umem_hash_arena, cp->cache_hash_table,
2693*7c478bd9Sstevel@tonic-gate 		    (cp->cache_hash_mask + 1) * sizeof (void *));
2694*7c478bd9Sstevel@tonic-gate 
2695*7c478bd9Sstevel@tonic-gate 	for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++)
2696*7c478bd9Sstevel@tonic-gate 		(void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
2697*7c478bd9Sstevel@tonic-gate 
2698*7c478bd9Sstevel@tonic-gate 	(void) mutex_destroy(&cp->cache_depot_lock);
2699*7c478bd9Sstevel@tonic-gate 	(void) mutex_destroy(&cp->cache_lock);
2700*7c478bd9Sstevel@tonic-gate 
2701*7c478bd9Sstevel@tonic-gate 	vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus));
2702*7c478bd9Sstevel@tonic-gate }
2703*7c478bd9Sstevel@tonic-gate 
2704*7c478bd9Sstevel@tonic-gate static int
2705*7c478bd9Sstevel@tonic-gate umem_cache_init(void)
2706*7c478bd9Sstevel@tonic-gate {
2707*7c478bd9Sstevel@tonic-gate 	int i;
2708*7c478bd9Sstevel@tonic-gate 	size_t size, max_size;
2709*7c478bd9Sstevel@tonic-gate 	umem_cache_t *cp;
2710*7c478bd9Sstevel@tonic-gate 	umem_magtype_t *mtp;
2711*7c478bd9Sstevel@tonic-gate 	char name[UMEM_CACHE_NAMELEN + 1];
2712*7c478bd9Sstevel@tonic-gate 	umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES];
2713*7c478bd9Sstevel@tonic-gate 
2714*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) {
2715*7c478bd9Sstevel@tonic-gate 		mtp = &umem_magtype[i];
2716*7c478bd9Sstevel@tonic-gate 		(void) snprintf(name, sizeof (name), "umem_magazine_%d",
2717*7c478bd9Sstevel@tonic-gate 		    mtp->mt_magsize);
2718*7c478bd9Sstevel@tonic-gate 		mtp->mt_cache = umem_cache_create(name,
2719*7c478bd9Sstevel@tonic-gate 		    (mtp->mt_magsize + 1) * sizeof (void *),
2720*7c478bd9Sstevel@tonic-gate 		    mtp->mt_align, NULL, NULL, NULL, NULL,
2721*7c478bd9Sstevel@tonic-gate 		    umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
2722*7c478bd9Sstevel@tonic-gate 		if (mtp->mt_cache == NULL)
2723*7c478bd9Sstevel@tonic-gate 			return (0);
2724*7c478bd9Sstevel@tonic-gate 	}
2725*7c478bd9Sstevel@tonic-gate 
2726*7c478bd9Sstevel@tonic-gate 	umem_slab_cache = umem_cache_create("umem_slab_cache",
2727*7c478bd9Sstevel@tonic-gate 	    sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL,
2728*7c478bd9Sstevel@tonic-gate 	    umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
2729*7c478bd9Sstevel@tonic-gate 
2730*7c478bd9Sstevel@tonic-gate 	if (umem_slab_cache == NULL)
2731*7c478bd9Sstevel@tonic-gate 		return (0);
2732*7c478bd9Sstevel@tonic-gate 
2733*7c478bd9Sstevel@tonic-gate 	umem_bufctl_cache = umem_cache_create("umem_bufctl_cache",
2734*7c478bd9Sstevel@tonic-gate 	    sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL,
2735*7c478bd9Sstevel@tonic-gate 	    umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
2736*7c478bd9Sstevel@tonic-gate 
2737*7c478bd9Sstevel@tonic-gate 	if (umem_bufctl_cache == NULL)
2738*7c478bd9Sstevel@tonic-gate 		return (0);
2739*7c478bd9Sstevel@tonic-gate 
2740*7c478bd9Sstevel@tonic-gate 	/*
2741*7c478bd9Sstevel@tonic-gate 	 * The size of the umem_bufctl_audit structure depends upon
2742*7c478bd9Sstevel@tonic-gate 	 * umem_stack_depth.   See umem_impl.h for details on the size
2743*7c478bd9Sstevel@tonic-gate 	 * restrictions.
2744*7c478bd9Sstevel@tonic-gate 	 */
2745*7c478bd9Sstevel@tonic-gate 
2746*7c478bd9Sstevel@tonic-gate 	size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
2747*7c478bd9Sstevel@tonic-gate 	max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE;
2748*7c478bd9Sstevel@tonic-gate 
2749*7c478bd9Sstevel@tonic-gate 	if (size > max_size) {			/* too large -- truncate */
2750*7c478bd9Sstevel@tonic-gate 		int max_frames = UMEM_MAX_STACK_DEPTH;
2751*7c478bd9Sstevel@tonic-gate 
2752*7c478bd9Sstevel@tonic-gate 		ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size);
2753*7c478bd9Sstevel@tonic-gate 
2754*7c478bd9Sstevel@tonic-gate 		umem_stack_depth = max_frames;
2755*7c478bd9Sstevel@tonic-gate 		size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
2756*7c478bd9Sstevel@tonic-gate 	}
2757*7c478bd9Sstevel@tonic-gate 
2758*7c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache",
2759*7c478bd9Sstevel@tonic-gate 	    size, 0, NULL, NULL, NULL, NULL, umem_internal_arena,
2760*7c478bd9Sstevel@tonic-gate 	    UMC_NOHASH | UMC_INTERNAL);
2761*7c478bd9Sstevel@tonic-gate 
2762*7c478bd9Sstevel@tonic-gate 	if (umem_bufctl_audit_cache == NULL)
2763*7c478bd9Sstevel@tonic-gate 		return (0);
2764*7c478bd9Sstevel@tonic-gate 
2765*7c478bd9Sstevel@tonic-gate 	if (vmem_backend & VMEM_BACKEND_MMAP)
2766*7c478bd9Sstevel@tonic-gate 		umem_va_arena = vmem_create("umem_va",
2767*7c478bd9Sstevel@tonic-gate 		    NULL, 0, pagesize,
2768*7c478bd9Sstevel@tonic-gate 		    vmem_alloc, vmem_free, heap_arena,
2769*7c478bd9Sstevel@tonic-gate 		    8 * pagesize, VM_NOSLEEP);
2770*7c478bd9Sstevel@tonic-gate 	else
2771*7c478bd9Sstevel@tonic-gate 		umem_va_arena = heap_arena;
2772*7c478bd9Sstevel@tonic-gate 
2773*7c478bd9Sstevel@tonic-gate 	if (umem_va_arena == NULL)
2774*7c478bd9Sstevel@tonic-gate 		return (0);
2775*7c478bd9Sstevel@tonic-gate 
2776*7c478bd9Sstevel@tonic-gate 	umem_default_arena = vmem_create("umem_default",
2777*7c478bd9Sstevel@tonic-gate 	    NULL, 0, pagesize,
2778*7c478bd9Sstevel@tonic-gate 	    heap_alloc, heap_free, umem_va_arena,
2779*7c478bd9Sstevel@tonic-gate 	    0, VM_NOSLEEP);
2780*7c478bd9Sstevel@tonic-gate 
2781*7c478bd9Sstevel@tonic-gate 	if (umem_default_arena == NULL)
2782*7c478bd9Sstevel@tonic-gate 		return (0);
2783*7c478bd9Sstevel@tonic-gate 
2784*7c478bd9Sstevel@tonic-gate 	/*
2785*7c478bd9Sstevel@tonic-gate 	 * make sure the umem_alloc table initializer is correct
2786*7c478bd9Sstevel@tonic-gate 	 */
2787*7c478bd9Sstevel@tonic-gate 	i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table);
2788*7c478bd9Sstevel@tonic-gate 	ASSERT(umem_alloc_table[i - 1] == &umem_null_cache);
2789*7c478bd9Sstevel@tonic-gate 
2790*7c478bd9Sstevel@tonic-gate 	/*
2791*7c478bd9Sstevel@tonic-gate 	 * Create the default caches to back umem_alloc()
2792*7c478bd9Sstevel@tonic-gate 	 */
2793*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < NUM_ALLOC_SIZES; i++) {
2794*7c478bd9Sstevel@tonic-gate 		size_t cache_size = umem_alloc_sizes[i];
2795*7c478bd9Sstevel@tonic-gate 		size_t align = 0;
2796*7c478bd9Sstevel@tonic-gate 		/*
2797*7c478bd9Sstevel@tonic-gate 		 * If they allocate a multiple of the coherency granularity,
2798*7c478bd9Sstevel@tonic-gate 		 * they get a coherency-granularity-aligned address.
2799*7c478bd9Sstevel@tonic-gate 		 */
2800*7c478bd9Sstevel@tonic-gate 		if (IS_P2ALIGNED(cache_size, 64))
2801*7c478bd9Sstevel@tonic-gate 			align = 64;
2802*7c478bd9Sstevel@tonic-gate 		if (IS_P2ALIGNED(cache_size, pagesize))
2803*7c478bd9Sstevel@tonic-gate 			align = pagesize;
2804*7c478bd9Sstevel@tonic-gate 		(void) snprintf(name, sizeof (name), "umem_alloc_%lu",
2805*7c478bd9Sstevel@tonic-gate 		    (long)cache_size);
2806*7c478bd9Sstevel@tonic-gate 
2807*7c478bd9Sstevel@tonic-gate 		cp = umem_cache_create(name, cache_size, align,
2808*7c478bd9Sstevel@tonic-gate 		    NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL);
2809*7c478bd9Sstevel@tonic-gate 		if (cp == NULL)
2810*7c478bd9Sstevel@tonic-gate 			return (0);
2811*7c478bd9Sstevel@tonic-gate 
2812*7c478bd9Sstevel@tonic-gate 		umem_alloc_caches[i] = cp;
2813*7c478bd9Sstevel@tonic-gate 	}
2814*7c478bd9Sstevel@tonic-gate 
2815*7c478bd9Sstevel@tonic-gate 	/*
2816*7c478bd9Sstevel@tonic-gate 	 * Initialization cannot fail at this point.  Make the caches
2817*7c478bd9Sstevel@tonic-gate 	 * visible to umem_alloc() and friends.
2818*7c478bd9Sstevel@tonic-gate 	 */
2819*7c478bd9Sstevel@tonic-gate 	size = UMEM_ALIGN;
2820*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < NUM_ALLOC_SIZES; i++) {
2821*7c478bd9Sstevel@tonic-gate 		size_t cache_size = umem_alloc_sizes[i];
2822*7c478bd9Sstevel@tonic-gate 
2823*7c478bd9Sstevel@tonic-gate 		cp = umem_alloc_caches[i];
2824*7c478bd9Sstevel@tonic-gate 
2825*7c478bd9Sstevel@tonic-gate 		while (size <= cache_size) {
2826*7c478bd9Sstevel@tonic-gate 			umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp;
2827*7c478bd9Sstevel@tonic-gate 			size += UMEM_ALIGN;
2828*7c478bd9Sstevel@tonic-gate 		}
2829*7c478bd9Sstevel@tonic-gate 	}
2830*7c478bd9Sstevel@tonic-gate 	return (1);
2831*7c478bd9Sstevel@tonic-gate }
2832*7c478bd9Sstevel@tonic-gate 
2833*7c478bd9Sstevel@tonic-gate /*
2834*7c478bd9Sstevel@tonic-gate  * umem_startup() is called early on, and must be called explicitly if we're
2835*7c478bd9Sstevel@tonic-gate  * the standalone version.
2836*7c478bd9Sstevel@tonic-gate  */
2837*7c478bd9Sstevel@tonic-gate #ifdef UMEM_STANDALONE
2838*7c478bd9Sstevel@tonic-gate void
2839*7c478bd9Sstevel@tonic-gate #else
2840*7c478bd9Sstevel@tonic-gate #pragma init(umem_startup)
2841*7c478bd9Sstevel@tonic-gate static void
2842*7c478bd9Sstevel@tonic-gate #endif
2843*7c478bd9Sstevel@tonic-gate umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack,
2844*7c478bd9Sstevel@tonic-gate     caddr_t maxstack)
2845*7c478bd9Sstevel@tonic-gate {
2846*7c478bd9Sstevel@tonic-gate #ifdef UMEM_STANDALONE
2847*7c478bd9Sstevel@tonic-gate 	int idx;
2848*7c478bd9Sstevel@tonic-gate 	/* Standalone doesn't fork */
2849*7c478bd9Sstevel@tonic-gate #else
2850*7c478bd9Sstevel@tonic-gate 	umem_forkhandler_init(); /* register the fork handler */
2851*7c478bd9Sstevel@tonic-gate #endif
2852*7c478bd9Sstevel@tonic-gate 
2853*7c478bd9Sstevel@tonic-gate #ifdef __lint
2854*7c478bd9Sstevel@tonic-gate 	/* make lint happy */
2855*7c478bd9Sstevel@tonic-gate 	minstack = maxstack;
2856*7c478bd9Sstevel@tonic-gate #endif
2857*7c478bd9Sstevel@tonic-gate 
2858*7c478bd9Sstevel@tonic-gate #ifdef UMEM_STANDALONE
2859*7c478bd9Sstevel@tonic-gate 	umem_ready = UMEM_READY_STARTUP;
2860*7c478bd9Sstevel@tonic-gate 	umem_init_env_ready = 0;
2861*7c478bd9Sstevel@tonic-gate 
2862*7c478bd9Sstevel@tonic-gate 	umem_min_stack = minstack;
2863*7c478bd9Sstevel@tonic-gate 	umem_max_stack = maxstack;
2864*7c478bd9Sstevel@tonic-gate 
2865*7c478bd9Sstevel@tonic-gate 	nofail_callback = NULL;
2866*7c478bd9Sstevel@tonic-gate 	umem_slab_cache = NULL;
2867*7c478bd9Sstevel@tonic-gate 	umem_bufctl_cache = NULL;
2868*7c478bd9Sstevel@tonic-gate 	umem_bufctl_audit_cache = NULL;
2869*7c478bd9Sstevel@tonic-gate 	heap_arena = NULL;
2870*7c478bd9Sstevel@tonic-gate 	heap_alloc = NULL;
2871*7c478bd9Sstevel@tonic-gate 	heap_free = NULL;
2872*7c478bd9Sstevel@tonic-gate 	umem_internal_arena = NULL;
2873*7c478bd9Sstevel@tonic-gate 	umem_cache_arena = NULL;
2874*7c478bd9Sstevel@tonic-gate 	umem_hash_arena = NULL;
2875*7c478bd9Sstevel@tonic-gate 	umem_log_arena = NULL;
2876*7c478bd9Sstevel@tonic-gate 	umem_oversize_arena = NULL;
2877*7c478bd9Sstevel@tonic-gate 	umem_va_arena = NULL;
2878*7c478bd9Sstevel@tonic-gate 	umem_default_arena = NULL;
2879*7c478bd9Sstevel@tonic-gate 	umem_firewall_va_arena = NULL;
2880*7c478bd9Sstevel@tonic-gate 	umem_firewall_arena = NULL;
2881*7c478bd9Sstevel@tonic-gate 	umem_memalign_arena = NULL;
2882*7c478bd9Sstevel@tonic-gate 	umem_transaction_log = NULL;
2883*7c478bd9Sstevel@tonic-gate 	umem_content_log = NULL;
2884*7c478bd9Sstevel@tonic-gate 	umem_failure_log = NULL;
2885*7c478bd9Sstevel@tonic-gate 	umem_slab_log = NULL;
2886*7c478bd9Sstevel@tonic-gate 	umem_cpu_mask = 0;
2887*7c478bd9Sstevel@tonic-gate 
2888*7c478bd9Sstevel@tonic-gate 	umem_cpus = &umem_startup_cpu;
2889*7c478bd9Sstevel@tonic-gate 	umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0);
2890*7c478bd9Sstevel@tonic-gate 	umem_startup_cpu.cpu_number = 0;
2891*7c478bd9Sstevel@tonic-gate 
2892*7c478bd9Sstevel@tonic-gate 	bcopy(&umem_null_cache_template, &umem_null_cache,
2893*7c478bd9Sstevel@tonic-gate 	    sizeof (umem_cache_t));
2894*7c478bd9Sstevel@tonic-gate 
2895*7c478bd9Sstevel@tonic-gate 	for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++)
2896*7c478bd9Sstevel@tonic-gate 		umem_alloc_table[idx] = &umem_null_cache;
2897*7c478bd9Sstevel@tonic-gate #endif
2898*7c478bd9Sstevel@tonic-gate 
2899*7c478bd9Sstevel@tonic-gate 	/*
2900*7c478bd9Sstevel@tonic-gate 	 * Perform initialization specific to the way we've been compiled
2901*7c478bd9Sstevel@tonic-gate 	 * (library or standalone)
2902*7c478bd9Sstevel@tonic-gate 	 */
2903*7c478bd9Sstevel@tonic-gate 	umem_type_init(start, len, pagesize);
2904*7c478bd9Sstevel@tonic-gate 
2905*7c478bd9Sstevel@tonic-gate 	vmem_startup();
2906*7c478bd9Sstevel@tonic-gate }
2907*7c478bd9Sstevel@tonic-gate 
2908*7c478bd9Sstevel@tonic-gate int
2909*7c478bd9Sstevel@tonic-gate umem_init(void)
2910*7c478bd9Sstevel@tonic-gate {
2911*7c478bd9Sstevel@tonic-gate 	size_t maxverify, minfirewall;
2912*7c478bd9Sstevel@tonic-gate 	size_t size;
2913*7c478bd9Sstevel@tonic-gate 	int idx;
2914*7c478bd9Sstevel@tonic-gate 	umem_cpu_t *new_cpus;
2915*7c478bd9Sstevel@tonic-gate 
2916*7c478bd9Sstevel@tonic-gate 	vmem_t *memalign_arena, *oversize_arena;
2917*7c478bd9Sstevel@tonic-gate 
2918*7c478bd9Sstevel@tonic-gate 	if (thr_self() != umem_init_thr) {
2919*7c478bd9Sstevel@tonic-gate 		/*
2920*7c478bd9Sstevel@tonic-gate 		 * The usual case -- non-recursive invocation of umem_init().
2921*7c478bd9Sstevel@tonic-gate 		 */
2922*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&umem_init_lock);
2923*7c478bd9Sstevel@tonic-gate 		if (umem_ready != UMEM_READY_STARTUP) {
2924*7c478bd9Sstevel@tonic-gate 			/*
2925*7c478bd9Sstevel@tonic-gate 			 * someone else beat us to initializing umem.  Wait
2926*7c478bd9Sstevel@tonic-gate 			 * for them to complete, then return.
2927*7c478bd9Sstevel@tonic-gate 			 */
2928*7c478bd9Sstevel@tonic-gate 			while (umem_ready == UMEM_READY_INITING)
2929*7c478bd9Sstevel@tonic-gate 				(void) _cond_wait(&umem_init_cv,
2930*7c478bd9Sstevel@tonic-gate 				    &umem_init_lock);
2931*7c478bd9Sstevel@tonic-gate 			ASSERT(umem_ready == UMEM_READY ||
2932*7c478bd9Sstevel@tonic-gate 			    umem_ready == UMEM_READY_INIT_FAILED);
2933*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&umem_init_lock);
2934*7c478bd9Sstevel@tonic-gate 			return (umem_ready == UMEM_READY);
2935*7c478bd9Sstevel@tonic-gate 		}
2936*7c478bd9Sstevel@tonic-gate 
2937*7c478bd9Sstevel@tonic-gate 		ASSERT(umem_ready == UMEM_READY_STARTUP);
2938*7c478bd9Sstevel@tonic-gate 		ASSERT(umem_init_env_ready == 0);
2939*7c478bd9Sstevel@tonic-gate 
2940*7c478bd9Sstevel@tonic-gate 		umem_ready = UMEM_READY_INITING;
2941*7c478bd9Sstevel@tonic-gate 		umem_init_thr = thr_self();
2942*7c478bd9Sstevel@tonic-gate 
2943*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&umem_init_lock);
2944*7c478bd9Sstevel@tonic-gate 		umem_setup_envvars(0);		/* can recurse -- see below */
2945*7c478bd9Sstevel@tonic-gate 		if (umem_init_env_ready) {
2946*7c478bd9Sstevel@tonic-gate 			/*
2947*7c478bd9Sstevel@tonic-gate 			 * initialization was completed already
2948*7c478bd9Sstevel@tonic-gate 			 */
2949*7c478bd9Sstevel@tonic-gate 			ASSERT(umem_ready == UMEM_READY ||
2950*7c478bd9Sstevel@tonic-gate 			    umem_ready == UMEM_READY_INIT_FAILED);
2951*7c478bd9Sstevel@tonic-gate 			ASSERT(umem_init_thr == 0);
2952*7c478bd9Sstevel@tonic-gate 			return (umem_ready == UMEM_READY);
2953*7c478bd9Sstevel@tonic-gate 		}
2954*7c478bd9Sstevel@tonic-gate 	} else if (!umem_init_env_ready) {
2955*7c478bd9Sstevel@tonic-gate 		/*
2956*7c478bd9Sstevel@tonic-gate 		 * The umem_setup_envvars() call (above) makes calls into
2957*7c478bd9Sstevel@tonic-gate 		 * the dynamic linker and directly into user-supplied code.
2958*7c478bd9Sstevel@tonic-gate 		 * Since we cannot know what that code will do, we could be
2959*7c478bd9Sstevel@tonic-gate 		 * recursively invoked (by, say, a malloc() call in the code
2960*7c478bd9Sstevel@tonic-gate 		 * itself, or in a (C++) _init section it causes to be fired).
2961*7c478bd9Sstevel@tonic-gate 		 *
2962*7c478bd9Sstevel@tonic-gate 		 * This code is where we end up if such recursion occurs.  We
2963*7c478bd9Sstevel@tonic-gate 		 * first clean up any partial results in the envvar code, then
2964*7c478bd9Sstevel@tonic-gate 		 * proceed to finish initialization processing in the recursive
2965*7c478bd9Sstevel@tonic-gate 		 * call.  The original call will notice this, and return
2966*7c478bd9Sstevel@tonic-gate 		 * immediately.
2967*7c478bd9Sstevel@tonic-gate 		 */
2968*7c478bd9Sstevel@tonic-gate 		umem_setup_envvars(1);		/* clean up any partial state */
2969*7c478bd9Sstevel@tonic-gate 	} else {
2970*7c478bd9Sstevel@tonic-gate 		umem_panic(
2971*7c478bd9Sstevel@tonic-gate 		    "recursive allocation while initializing umem\n");
2972*7c478bd9Sstevel@tonic-gate 	}
2973*7c478bd9Sstevel@tonic-gate 	umem_init_env_ready = 1;
2974*7c478bd9Sstevel@tonic-gate 
2975*7c478bd9Sstevel@tonic-gate 	/*
2976*7c478bd9Sstevel@tonic-gate 	 * From this point until we finish, recursion into umem_init() will
2977*7c478bd9Sstevel@tonic-gate 	 * cause a umem_panic().
2978*7c478bd9Sstevel@tonic-gate 	 */
2979*7c478bd9Sstevel@tonic-gate 	maxverify = minfirewall = ULONG_MAX;
2980*7c478bd9Sstevel@tonic-gate 
2981*7c478bd9Sstevel@tonic-gate 	/* LINTED constant condition */
2982*7c478bd9Sstevel@tonic-gate 	if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) {
2983*7c478bd9Sstevel@tonic-gate 		umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n",
2984*7c478bd9Sstevel@tonic-gate 		    sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE);
2985*7c478bd9Sstevel@tonic-gate 	}
2986*7c478bd9Sstevel@tonic-gate 
2987*7c478bd9Sstevel@tonic-gate 	umem_max_ncpus = umem_get_max_ncpus();
2988*7c478bd9Sstevel@tonic-gate 
2989*7c478bd9Sstevel@tonic-gate 	/*
2990*7c478bd9Sstevel@tonic-gate 	 * load tunables from environment
2991*7c478bd9Sstevel@tonic-gate 	 */
2992*7c478bd9Sstevel@tonic-gate 	umem_process_envvars();
2993*7c478bd9Sstevel@tonic-gate 
2994*7c478bd9Sstevel@tonic-gate 	if (issetugid())
2995*7c478bd9Sstevel@tonic-gate 		umem_mtbf = 0;
2996*7c478bd9Sstevel@tonic-gate 
2997*7c478bd9Sstevel@tonic-gate 	/*
2998*7c478bd9Sstevel@tonic-gate 	 * set up vmem
2999*7c478bd9Sstevel@tonic-gate 	 */
3000*7c478bd9Sstevel@tonic-gate 	if (!(umem_flags & UMF_AUDIT))
3001*7c478bd9Sstevel@tonic-gate 		vmem_no_debug();
3002*7c478bd9Sstevel@tonic-gate 
3003*7c478bd9Sstevel@tonic-gate 	heap_arena = vmem_heap_arena(&heap_alloc, &heap_free);
3004*7c478bd9Sstevel@tonic-gate 
3005*7c478bd9Sstevel@tonic-gate 	pagesize = heap_arena->vm_quantum;
3006*7c478bd9Sstevel@tonic-gate 
3007*7c478bd9Sstevel@tonic-gate 	umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize,
3008*7c478bd9Sstevel@tonic-gate 	    heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
3009*7c478bd9Sstevel@tonic-gate 
3010*7c478bd9Sstevel@tonic-gate 	umem_default_arena = umem_internal_arena;
3011*7c478bd9Sstevel@tonic-gate 
3012*7c478bd9Sstevel@tonic-gate 	if (umem_internal_arena == NULL)
3013*7c478bd9Sstevel@tonic-gate 		goto fail;
3014*7c478bd9Sstevel@tonic-gate 
3015*7c478bd9Sstevel@tonic-gate 	umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN,
3016*7c478bd9Sstevel@tonic-gate 	    vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
3017*7c478bd9Sstevel@tonic-gate 
3018*7c478bd9Sstevel@tonic-gate 	umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN,
3019*7c478bd9Sstevel@tonic-gate 	    vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
3020*7c478bd9Sstevel@tonic-gate 
3021*7c478bd9Sstevel@tonic-gate 	umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN,
3022*7c478bd9Sstevel@tonic-gate 	    heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
3023*7c478bd9Sstevel@tonic-gate 
3024*7c478bd9Sstevel@tonic-gate 	umem_firewall_va_arena = vmem_create("umem_firewall_va",
3025*7c478bd9Sstevel@tonic-gate 	    NULL, 0, pagesize,
3026*7c478bd9Sstevel@tonic-gate 	    umem_firewall_va_alloc, umem_firewall_va_free, heap_arena,
3027*7c478bd9Sstevel@tonic-gate 	    0, VM_NOSLEEP);
3028*7c478bd9Sstevel@tonic-gate 
3029*7c478bd9Sstevel@tonic-gate 	if (umem_cache_arena == NULL || umem_hash_arena == NULL ||
3030*7c478bd9Sstevel@tonic-gate 	    umem_log_arena == NULL || umem_firewall_va_arena == NULL)
3031*7c478bd9Sstevel@tonic-gate 		goto fail;
3032*7c478bd9Sstevel@tonic-gate 
3033*7c478bd9Sstevel@tonic-gate 	umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize,
3034*7c478bd9Sstevel@tonic-gate 	    heap_alloc, heap_free, umem_firewall_va_arena, 0,
3035*7c478bd9Sstevel@tonic-gate 	    VM_NOSLEEP);
3036*7c478bd9Sstevel@tonic-gate 
3037*7c478bd9Sstevel@tonic-gate 	if (umem_firewall_arena == NULL)
3038*7c478bd9Sstevel@tonic-gate 		goto fail;
3039*7c478bd9Sstevel@tonic-gate 
3040*7c478bd9Sstevel@tonic-gate 	oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize,
3041*7c478bd9Sstevel@tonic-gate 	    heap_alloc, heap_free, minfirewall < ULONG_MAX ?
3042*7c478bd9Sstevel@tonic-gate 	    umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
3043*7c478bd9Sstevel@tonic-gate 
3044*7c478bd9Sstevel@tonic-gate 	memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN,
3045*7c478bd9Sstevel@tonic-gate 	    heap_alloc, heap_free, minfirewall < ULONG_MAX ?
3046*7c478bd9Sstevel@tonic-gate 	    umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
3047*7c478bd9Sstevel@tonic-gate 
3048*7c478bd9Sstevel@tonic-gate 	if (oversize_arena == NULL || memalign_arena == NULL)
3049*7c478bd9Sstevel@tonic-gate 		goto fail;
3050*7c478bd9Sstevel@tonic-gate 
3051*7c478bd9Sstevel@tonic-gate 	if (umem_max_ncpus > CPUHINT_MAX())
3052*7c478bd9Sstevel@tonic-gate 		umem_max_ncpus = CPUHINT_MAX();
3053*7c478bd9Sstevel@tonic-gate 
3054*7c478bd9Sstevel@tonic-gate 	while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0)
3055*7c478bd9Sstevel@tonic-gate 		umem_max_ncpus++;
3056*7c478bd9Sstevel@tonic-gate 
3057*7c478bd9Sstevel@tonic-gate 	if (umem_max_ncpus == 0)
3058*7c478bd9Sstevel@tonic-gate 		umem_max_ncpus = 1;
3059*7c478bd9Sstevel@tonic-gate 
3060*7c478bd9Sstevel@tonic-gate 	size = umem_max_ncpus * sizeof (umem_cpu_t);
3061*7c478bd9Sstevel@tonic-gate 	new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP);
3062*7c478bd9Sstevel@tonic-gate 	if (new_cpus == NULL)
3063*7c478bd9Sstevel@tonic-gate 		goto fail;
3064*7c478bd9Sstevel@tonic-gate 
3065*7c478bd9Sstevel@tonic-gate 	bzero(new_cpus, size);
3066*7c478bd9Sstevel@tonic-gate 	for (idx = 0; idx < umem_max_ncpus; idx++) {
3067*7c478bd9Sstevel@tonic-gate 		new_cpus[idx].cpu_number = idx;
3068*7c478bd9Sstevel@tonic-gate 		new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx);
3069*7c478bd9Sstevel@tonic-gate 	}
3070*7c478bd9Sstevel@tonic-gate 	umem_cpus = new_cpus;
3071*7c478bd9Sstevel@tonic-gate 	umem_cpu_mask = (umem_max_ncpus - 1);
3072*7c478bd9Sstevel@tonic-gate 
3073*7c478bd9Sstevel@tonic-gate 	if (umem_maxverify == 0)
3074*7c478bd9Sstevel@tonic-gate 		umem_maxverify = maxverify;
3075*7c478bd9Sstevel@tonic-gate 
3076*7c478bd9Sstevel@tonic-gate 	if (umem_minfirewall == 0)
3077*7c478bd9Sstevel@tonic-gate 		umem_minfirewall = minfirewall;
3078*7c478bd9Sstevel@tonic-gate 
3079*7c478bd9Sstevel@tonic-gate 	/*
3080*7c478bd9Sstevel@tonic-gate 	 * Set up updating and reaping
3081*7c478bd9Sstevel@tonic-gate 	 */
3082*7c478bd9Sstevel@tonic-gate 	umem_reap_next = gethrtime() + NANOSEC;
3083*7c478bd9Sstevel@tonic-gate 
3084*7c478bd9Sstevel@tonic-gate #ifndef UMEM_STANDALONE
3085*7c478bd9Sstevel@tonic-gate 	(void) gettimeofday(&umem_update_next, NULL);
3086*7c478bd9Sstevel@tonic-gate #endif
3087*7c478bd9Sstevel@tonic-gate 
3088*7c478bd9Sstevel@tonic-gate 	/*
3089*7c478bd9Sstevel@tonic-gate 	 * Set up logging -- failure here is okay, since it will just disable
3090*7c478bd9Sstevel@tonic-gate 	 * the logs
3091*7c478bd9Sstevel@tonic-gate 	 */
3092*7c478bd9Sstevel@tonic-gate 	if (umem_logging) {
3093*7c478bd9Sstevel@tonic-gate 		umem_transaction_log = umem_log_init(umem_transaction_log_size);
3094*7c478bd9Sstevel@tonic-gate 		umem_content_log = umem_log_init(umem_content_log_size);
3095*7c478bd9Sstevel@tonic-gate 		umem_failure_log = umem_log_init(umem_failure_log_size);
3096*7c478bd9Sstevel@tonic-gate 		umem_slab_log = umem_log_init(umem_slab_log_size);
3097*7c478bd9Sstevel@tonic-gate 	}
3098*7c478bd9Sstevel@tonic-gate 
3099*7c478bd9Sstevel@tonic-gate 	/*
3100*7c478bd9Sstevel@tonic-gate 	 * Set up caches -- if successful, initialization cannot fail, since
3101*7c478bd9Sstevel@tonic-gate 	 * allocations from other threads can now succeed.
3102*7c478bd9Sstevel@tonic-gate 	 */
3103*7c478bd9Sstevel@tonic-gate 	if (umem_cache_init() == 0) {
3104*7c478bd9Sstevel@tonic-gate 		log_message("unable to create initial caches\n");
3105*7c478bd9Sstevel@tonic-gate 		goto fail;
3106*7c478bd9Sstevel@tonic-gate 	}
3107*7c478bd9Sstevel@tonic-gate 	umem_oversize_arena = oversize_arena;
3108*7c478bd9Sstevel@tonic-gate 	umem_memalign_arena = memalign_arena;
3109*7c478bd9Sstevel@tonic-gate 
3110*7c478bd9Sstevel@tonic-gate 	umem_cache_applyall(umem_cache_magazine_enable);
3111*7c478bd9Sstevel@tonic-gate 
3112*7c478bd9Sstevel@tonic-gate 	/*
3113*7c478bd9Sstevel@tonic-gate 	 * initialization done, ready to go
3114*7c478bd9Sstevel@tonic-gate 	 */
3115*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_init_lock);
3116*7c478bd9Sstevel@tonic-gate 	umem_ready = UMEM_READY;
3117*7c478bd9Sstevel@tonic-gate 	umem_init_thr = 0;
3118*7c478bd9Sstevel@tonic-gate 	(void) cond_broadcast(&umem_init_cv);
3119*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_init_lock);
3120*7c478bd9Sstevel@tonic-gate 	return (1);
3121*7c478bd9Sstevel@tonic-gate 
3122*7c478bd9Sstevel@tonic-gate fail:
3123*7c478bd9Sstevel@tonic-gate 	log_message("umem initialization failed\n");
3124*7c478bd9Sstevel@tonic-gate 
3125*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&umem_init_lock);
3126*7c478bd9Sstevel@tonic-gate 	umem_ready = UMEM_READY_INIT_FAILED;
3127*7c478bd9Sstevel@tonic-gate 	umem_init_thr = 0;
3128*7c478bd9Sstevel@tonic-gate 	(void) cond_broadcast(&umem_init_cv);
3129*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&umem_init_lock);
3130*7c478bd9Sstevel@tonic-gate 	return (0);
3131*7c478bd9Sstevel@tonic-gate }
3132