1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24 * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright 2018, Joyent, Inc.
26 * Copyright 2020 Oxide Computer Company
27 */
28
29/*
30 * Kernel memory allocator, as described in the following two papers and a
31 * statement about the consolidator:
32 *
33 * Jeff Bonwick,
34 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
35 * Proceedings of the Summer 1994 Usenix Conference.
36 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
37 *
38 * Jeff Bonwick and Jonathan Adams,
39 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
40 * Arbitrary Resources.
41 * Proceedings of the 2001 Usenix Conference.
42 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
43 *
44 * kmem Slab Consolidator Big Theory Statement:
45 *
46 * 1. Motivation
47 *
48 * As stated in Bonwick94, slabs provide the following advantages over other
49 * allocation structures in terms of memory fragmentation:
50 *
51 *  - Internal fragmentation (per-buffer wasted space) is minimal.
52 *  - Severe external fragmentation (unused buffers on the free list) is
53 *    unlikely.
54 *
55 * Segregating objects by size eliminates one source of external fragmentation,
56 * and according to Bonwick:
57 *
58 *   The other reason that slabs reduce external fragmentation is that all
59 *   objects in a slab are of the same type, so they have the same lifetime
60 *   distribution. The resulting segregation of short-lived and long-lived
61 *   objects at slab granularity reduces the likelihood of an entire page being
62 *   held hostage due to a single long-lived allocation [Barrett93, Hanson90].
63 *
64 * While unlikely, severe external fragmentation remains possible. Clients that
65 * allocate both short- and long-lived objects from the same cache cannot
66 * anticipate the distribution of long-lived objects within the allocator's slab
67 * implementation. Even a small percentage of long-lived objects distributed
68 * randomly across many slabs can lead to a worst case scenario where the client
69 * frees the majority of its objects and the system gets back almost none of the
70 * slabs. Despite the client doing what it reasonably can to help the system
71 * reclaim memory, the allocator cannot shake free enough slabs because of
72 * lonely allocations stubbornly hanging on. Although the allocator is in a
73 * position to diagnose the fragmentation, there is nothing that the allocator
74 * by itself can do about it. It only takes a single allocated object to prevent
75 * an entire slab from being reclaimed, and any object handed out by
76 * kmem_cache_alloc() is by definition in the client's control. Conversely,
77 * although the client is in a position to move a long-lived object, it has no
78 * way of knowing if the object is causing fragmentation, and if so, where to
79 * move it. A solution necessarily requires further cooperation between the
80 * allocator and the client.
81 *
82 * 2. Move Callback
83 *
84 * The kmem slab consolidator therefore adds a move callback to the
85 * allocator/client interface, improving worst-case external fragmentation in
86 * kmem caches that supply a function to move objects from one memory location
87 * to another. In a situation of low memory kmem attempts to consolidate all of
88 * a cache's slabs at once; otherwise it works slowly to bring external
89 * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
90 * thereby helping to avoid a low memory situation in the future.
91 *
92 * The callback has the following signature:
93 *
94 *   kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
95 *
96 * It supplies the kmem client with two addresses: the allocated object that
97 * kmem wants to move and a buffer selected by kmem for the client to use as the
98 * copy destination. The callback is kmem's way of saying "Please get off of
99 * this buffer and use this one instead." kmem knows where it wants to move the
100 * object in order to best reduce fragmentation. All the client needs to know
101 * about the second argument (void *new) is that it is an allocated, constructed
102 * object ready to take the contents of the old object. When the move function
103 * is called, the system is likely to be low on memory, and the new object
104 * spares the client from having to worry about allocating memory for the
105 * requested move. The third argument supplies the size of the object, in case a
106 * single move function handles multiple caches whose objects differ only in
107 * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
108 * user argument passed to the constructor, destructor, and reclaim functions is
109 * also passed to the move callback.
110 *
111 * 2.1 Setting the Move Callback
112 *
113 * The client sets the move callback after creating the cache and before
114 * allocating from it:
115 *
116 *	object_cache = kmem_cache_create(...);
117 *      kmem_cache_set_move(object_cache, object_move);
118 *
119 * 2.2 Move Callback Return Values
120 *
121 * Only the client knows about its own data and when is a good time to move it.
122 * The client is cooperating with kmem to return unused memory to the system,
123 * and kmem respectfully accepts this help at the client's convenience. When
124 * asked to move an object, the client can respond with any of the following:
125 *
126 *   typedef enum kmem_cbrc {
127 *           KMEM_CBRC_YES,
128 *           KMEM_CBRC_NO,
129 *           KMEM_CBRC_LATER,
130 *           KMEM_CBRC_DONT_NEED,
131 *           KMEM_CBRC_DONT_KNOW
132 *   } kmem_cbrc_t;
133 *
134 * The client must not explicitly kmem_cache_free() either of the objects passed
135 * to the callback, since kmem wants to free them directly to the slab layer
136 * (bypassing the per-CPU magazine layer). The response tells kmem which of the
137 * objects to free:
138 *
139 *       YES: (Did it) The client moved the object, so kmem frees the old one.
140 *        NO: (Never) The client refused, so kmem frees the new object (the
141 *            unused copy destination). kmem also marks the slab of the old
142 *            object so as not to bother the client with further callbacks for
143 *            that object as long as the slab remains on the partial slab list.
144 *            (The system won't be getting the slab back as long as the
145 *            immovable object holds it hostage, so there's no point in moving
146 *            any of its objects.)
147 *     LATER: The client is using the object and cannot move it now, so kmem
148 *            frees the new object (the unused copy destination). kmem still
149 *            attempts to move other objects off the slab, since it expects to
150 *            succeed in clearing the slab in a later callback. The client
151 *            should use LATER instead of NO if the object is likely to become
152 *            movable very soon.
153 * DONT_NEED: The client no longer needs the object, so kmem frees the old along
154 *            with the new object (the unused copy destination). This response
155 *            is the client's opportunity to be a model citizen and give back as
156 *            much as it can.
157 * DONT_KNOW: The client does not know about the object because
158 *            a) the client has just allocated the object and not yet put it
159 *               wherever it expects to find known objects
160 *            b) the client has removed the object from wherever it expects to
161 *               find known objects and is about to free it, or
162 *            c) the client has freed the object.
163 *            In all these cases (a, b, and c) kmem frees the new object (the
164 *            unused copy destination).  In the first case, the object is in
165 *            use and the correct action is that for LATER; in the latter two
166 *            cases, we know that the object is either freed or about to be
167 *            freed, in which case it is either already in a magazine or about
168 *            to be in one.  In these cases, we know that the object will either
169 *            be reallocated and reused, or it will end up in a full magazine
170 *            that will be reaped (thereby liberating the slab).  Because it
171 *            is prohibitively expensive to differentiate these cases, and
172 *            because the defrag code is executed when we're low on memory
173 *            (thereby biasing the system to reclaim full magazines) we treat
174 *            all DONT_KNOW cases as LATER and rely on cache reaping to
175 *            generally clean up full magazines.  While we take the same action
176 *            for these cases, we maintain their semantic distinction:  if
177 *            defragmentation is not occurring, it is useful to know if this
178 *            is due to objects in use (LATER) or objects in an unknown state
179 *            of transition (DONT_KNOW).
180 *
181 * 2.3 Object States
182 *
183 * Neither kmem nor the client can be assumed to know the object's whereabouts
184 * at the time of the callback. An object belonging to a kmem cache may be in
185 * any of the following states:
186 *
187 * 1. Uninitialized on the slab
188 * 2. Allocated from the slab but not constructed (still uninitialized)
189 * 3. Allocated from the slab, constructed, but not yet ready for business
190 *    (not in a valid state for the move callback)
191 * 4. In use (valid and known to the client)
192 * 5. About to be freed (no longer in a valid state for the move callback)
193 * 6. Freed to a magazine (still constructed)
194 * 7. Allocated from a magazine, not yet ready for business (not in a valid
195 *    state for the move callback), and about to return to state #4
196 * 8. Deconstructed on a magazine that is about to be freed
197 * 9. Freed to the slab
198 *
199 * Since the move callback may be called at any time while the object is in any
200 * of the above states (except state #1), the client needs a safe way to
201 * determine whether or not it knows about the object. Specifically, the client
202 * needs to know whether or not the object is in state #4, the only state in
203 * which a move is valid. If the object is in any other state, the client should
204 * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
205 * the object's fields.
206 *
207 * Note that although an object may be in state #4 when kmem initiates the move
208 * request, the object may no longer be in that state by the time kmem actually
209 * calls the move function. Not only does the client free objects
210 * asynchronously, kmem itself puts move requests on a queue where thay are
211 * pending until kmem processes them from another context. Also, objects freed
212 * to a magazine appear allocated from the point of view of the slab layer, so
213 * kmem may even initiate requests for objects in a state other than state #4.
214 *
215 * 2.3.1 Magazine Layer
216 *
217 * An important insight revealed by the states listed above is that the magazine
218 * layer is populated only by kmem_cache_free(). Magazines of constructed
219 * objects are never populated directly from the slab layer (which contains raw,
220 * unconstructed objects). Whenever an allocation request cannot be satisfied
221 * from the magazine layer, the magazines are bypassed and the request is
222 * satisfied from the slab layer (creating a new slab if necessary). kmem calls
223 * the object constructor only when allocating from the slab layer, and only in
224 * response to kmem_cache_alloc() or to prepare the destination buffer passed in
225 * the move callback. kmem does not preconstruct objects in anticipation of
226 * kmem_cache_alloc().
227 *
228 * 2.3.2 Object Constructor and Destructor
229 *
230 * If the client supplies a destructor, it must be valid to call the destructor
231 * on a newly created object (immediately after the constructor).
232 *
233 * 2.4 Recognizing Known Objects
234 *
235 * There is a simple test to determine safely whether or not the client knows
236 * about a given object in the move callback. It relies on the fact that kmem
237 * guarantees that the object of the move callback has only been touched by the
238 * client itself or else by kmem. kmem does this by ensuring that none of the
239 * cache's slabs are freed to the virtual memory (VM) subsystem while a move
240 * callback is pending. When the last object on a slab is freed, if there is a
241 * pending move, kmem puts the slab on a per-cache dead list and defers freeing
242 * slabs on that list until all pending callbacks are completed. That way,
243 * clients can be certain that the object of a move callback is in one of the
244 * states listed above, making it possible to distinguish known objects (in
245 * state #4) using the two low order bits of any pointer member (with the
246 * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
247 * platforms).
248 *
249 * The test works as long as the client always transitions objects from state #4
250 * (known, in use) to state #5 (about to be freed, invalid) by setting the low
251 * order bit of the client-designated pointer member. Since kmem only writes
252 * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
253 * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
254 * guaranteed to set at least one of the two low order bits. Therefore, given an
255 * object with a back pointer to a 'container_t *o_container', the client can
256 * test
257 *
258 *      container_t *container = object->o_container;
259 *      if ((uintptr_t)container & 0x3) {
260 *              return (KMEM_CBRC_DONT_KNOW);
261 *      }
262 *
263 * Typically, an object will have a pointer to some structure with a list or
264 * hash where objects from the cache are kept while in use. Assuming that the
265 * client has some way of knowing that the container structure is valid and will
266 * not go away during the move, and assuming that the structure includes a lock
267 * to protect whatever collection is used, then the client would continue as
268 * follows:
269 *
270 *	// Ensure that the container structure does not go away.
271 *      if (container_hold(container) == 0) {
272 *              return (KMEM_CBRC_DONT_KNOW);
273 *      }
274 *      mutex_enter(&container->c_objects_lock);
275 *      if (container != object->o_container) {
276 *              mutex_exit(&container->c_objects_lock);
277 *              container_rele(container);
278 *              return (KMEM_CBRC_DONT_KNOW);
279 *      }
280 *
281 * At this point the client knows that the object cannot be freed as long as
282 * c_objects_lock is held. Note that after acquiring the lock, the client must
283 * recheck the o_container pointer in case the object was removed just before
284 * acquiring the lock.
285 *
286 * When the client is about to free an object, it must first remove that object
287 * from the list, hash, or other structure where it is kept. At that time, to
288 * mark the object so it can be distinguished from the remaining, known objects,
289 * the client sets the designated low order bit:
290 *
291 *      mutex_enter(&container->c_objects_lock);
292 *      object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
293 *      list_remove(&container->c_objects, object);
294 *      mutex_exit(&container->c_objects_lock);
295 *
296 * In the common case, the object is freed to the magazine layer, where it may
297 * be reused on a subsequent allocation without the overhead of calling the
298 * constructor. While in the magazine it appears allocated from the point of
299 * view of the slab layer, making it a candidate for the move callback. Most
300 * objects unrecognized by the client in the move callback fall into this
301 * category and are cheaply distinguished from known objects by the test
302 * described earlier. Because searching magazines is prohibitively expensive
303 * for kmem, clients that do not mark freed objects (and therefore return
304 * KMEM_CBRC_DONT_KNOW for large numbers of objects) may find defragmentation
305 * efficacy reduced.
306 *
307 * Invalidating the designated pointer member before freeing the object marks
308 * the object to be avoided in the callback, and conversely, assigning a valid
309 * value to the designated pointer member after allocating the object makes the
310 * object fair game for the callback:
311 *
312 *      ... allocate object ...
313 *      ... set any initial state not set by the constructor ...
314 *
315 *      mutex_enter(&container->c_objects_lock);
316 *      list_insert_tail(&container->c_objects, object);
317 *      membar_producer();
318 *      object->o_container = container;
319 *      mutex_exit(&container->c_objects_lock);
320 *
321 * Note that everything else must be valid before setting o_container makes the
322 * object fair game for the move callback. The membar_producer() call ensures
323 * that all the object's state is written to memory before setting the pointer
324 * that transitions the object from state #3 or #7 (allocated, constructed, not
325 * yet in use) to state #4 (in use, valid). That's important because the move
326 * function has to check the validity of the pointer before it can safely
327 * acquire the lock protecting the collection where it expects to find known
328 * objects.
329 *
330 * This method of distinguishing known objects observes the usual symmetry:
331 * invalidating the designated pointer is the first thing the client does before
332 * freeing the object, and setting the designated pointer is the last thing the
333 * client does after allocating the object. Of course, the client is not
334 * required to use this method. Fundamentally, how the client recognizes known
335 * objects is completely up to the client, but this method is recommended as an
336 * efficient and safe way to take advantage of the guarantees made by kmem. If
337 * the entire object is arbitrary data without any markable bits from a suitable
338 * pointer member, then the client must find some other method, such as
339 * searching a hash table of known objects.
340 *
341 * 2.5 Preventing Objects From Moving
342 *
343 * Besides a way to distinguish known objects, the other thing that the client
344 * needs is a strategy to ensure that an object will not move while the client
345 * is actively using it. The details of satisfying this requirement tend to be
346 * highly cache-specific. It might seem that the same rules that let a client
347 * remove an object safely should also decide when an object can be moved
348 * safely. However, any object state that makes a removal attempt invalid is
349 * likely to be long-lasting for objects that the client does not expect to
350 * remove. kmem knows nothing about the object state and is equally likely (from
351 * the client's point of view) to request a move for any object in the cache,
352 * whether prepared for removal or not. Even a low percentage of objects stuck
353 * in place by unremovability will defeat the consolidator if the stuck objects
354 * are the same long-lived allocations likely to hold slabs hostage.
355 * Fundamentally, the consolidator is not aimed at common cases. Severe external
356 * fragmentation is a worst case scenario manifested as sparsely allocated
357 * slabs, by definition a low percentage of the cache's objects. When deciding
358 * what makes an object movable, keep in mind the goal of the consolidator: to
359 * bring worst-case external fragmentation within the limits guaranteed for
360 * internal fragmentation. Removability is a poor criterion if it is likely to
361 * exclude more than an insignificant percentage of objects for long periods of
362 * time.
363 *
364 * A tricky general solution exists, and it has the advantage of letting you
365 * move any object at almost any moment, practically eliminating the likelihood
366 * that an object can hold a slab hostage. However, if there is a cache-specific
367 * way to ensure that an object is not actively in use in the vast majority of
368 * cases, a simpler solution that leverages this cache-specific knowledge is
369 * preferred.
370 *
371 * 2.5.1 Cache-Specific Solution
372 *
373 * As an example of a cache-specific solution, the ZFS znode cache takes
374 * advantage of the fact that the vast majority of znodes are only being
375 * referenced from the DNLC. (A typical case might be a few hundred in active
376 * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
377 * client has established that it recognizes the znode and can access its fields
378 * safely (using the method described earlier), it then tests whether the znode
379 * is referenced by anything other than the DNLC. If so, it assumes that the
380 * znode may be in active use and is unsafe to move, so it drops its locks and
381 * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
382 * else znodes are used, no change is needed to protect against the possibility
383 * of the znode moving. The disadvantage is that it remains possible for an
384 * application to hold a znode slab hostage with an open file descriptor.
385 * However, this case ought to be rare and the consolidator has a way to deal
386 * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
387 * object, kmem eventually stops believing it and treats the slab as if the
388 * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
389 * then focus on getting it off of the partial slab list by allocating rather
390 * than freeing all of its objects. (Either way of getting a slab off the
391 * free list reduces fragmentation.)
392 *
393 * 2.5.2 General Solution
394 *
395 * The general solution, on the other hand, requires an explicit hold everywhere
396 * the object is used to prevent it from moving. To keep the client locking
397 * strategy as uncomplicated as possible, kmem guarantees the simplifying
398 * assumption that move callbacks are sequential, even across multiple caches.
399 * Internally, a global queue processed by a single thread supports all caches
400 * implementing the callback function. No matter how many caches supply a move
401 * function, the consolidator never moves more than one object at a time, so the
402 * client does not have to worry about tricky lock ordering involving several
403 * related objects from different kmem caches.
404 *
405 * The general solution implements the explicit hold as a read-write lock, which
406 * allows multiple readers to access an object from the cache simultaneously
407 * while a single writer is excluded from moving it. A single rwlock for the
408 * entire cache would lock out all threads from using any of the cache's objects
409 * even though only a single object is being moved, so to reduce contention,
410 * the client can fan out the single rwlock into an array of rwlocks hashed by
411 * the object address, making it probable that moving one object will not
412 * prevent other threads from using a different object. The rwlock cannot be a
413 * member of the object itself, because the possibility of the object moving
414 * makes it unsafe to access any of the object's fields until the lock is
415 * acquired.
416 *
417 * Assuming a small, fixed number of locks, it's possible that multiple objects
418 * will hash to the same lock. A thread that needs to use multiple objects in
419 * the same function may acquire the same lock multiple times. Since rwlocks are
420 * reentrant for readers, and since there is never more than a single writer at
421 * a time (assuming that the client acquires the lock as a writer only when
422 * moving an object inside the callback), there would seem to be no problem.
423 * However, a client locking multiple objects in the same function must handle
424 * one case of potential deadlock: Assume that thread A needs to prevent both
425 * object 1 and object 2 from moving, and thread B, the callback, meanwhile
426 * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
427 * same lock, that thread A will acquire the lock for object 1 as a reader
428 * before thread B sets the lock's write-wanted bit, preventing thread A from
429 * reacquiring the lock for object 2 as a reader. Unable to make forward
430 * progress, thread A will never release the lock for object 1, resulting in
431 * deadlock.
432 *
433 * There are two ways of avoiding the deadlock just described. The first is to
434 * use rw_tryenter() rather than rw_enter() in the callback function when
435 * attempting to acquire the lock as a writer. If tryenter discovers that the
436 * same object (or another object hashed to the same lock) is already in use, it
437 * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
438 * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
439 * since it allows a thread to acquire the lock as a reader in spite of a
440 * waiting writer. This second approach insists on moving the object now, no
441 * matter how many readers the move function must wait for in order to do so,
442 * and could delay the completion of the callback indefinitely (blocking
443 * callbacks to other clients). In practice, a less insistent callback using
444 * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
445 * little reason to use anything else.
446 *
447 * Avoiding deadlock is not the only problem that an implementation using an
448 * explicit hold needs to solve. Locking the object in the first place (to
449 * prevent it from moving) remains a problem, since the object could move
450 * between the time you obtain a pointer to the object and the time you acquire
451 * the rwlock hashed to that pointer value. Therefore the client needs to
452 * recheck the value of the pointer after acquiring the lock, drop the lock if
453 * the value has changed, and try again. This requires a level of indirection:
454 * something that points to the object rather than the object itself, that the
455 * client can access safely while attempting to acquire the lock. (The object
456 * itself cannot be referenced safely because it can move at any time.)
457 * The following lock-acquisition function takes whatever is safe to reference
458 * (arg), follows its pointer to the object (using function f), and tries as
459 * often as necessary to acquire the hashed lock and verify that the object
460 * still has not moved:
461 *
462 *      object_t *
463 *      object_hold(object_f f, void *arg)
464 *      {
465 *              object_t *op;
466 *
467 *              op = f(arg);
468 *              if (op == NULL) {
469 *                      return (NULL);
470 *              }
471 *
472 *              rw_enter(OBJECT_RWLOCK(op), RW_READER);
473 *              while (op != f(arg)) {
474 *                      rw_exit(OBJECT_RWLOCK(op));
475 *                      op = f(arg);
476 *                      if (op == NULL) {
477 *                              break;
478 *                      }
479 *                      rw_enter(OBJECT_RWLOCK(op), RW_READER);
480 *              }
481 *
482 *              return (op);
483 *      }
484 *
485 * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
486 * lock reacquisition loop, while necessary, almost never executes. The function
487 * pointer f (used to obtain the object pointer from arg) has the following type
488 * definition:
489 *
490 *      typedef object_t *(*object_f)(void *arg);
491 *
492 * An object_f implementation is likely to be as simple as accessing a structure
493 * member:
494 *
495 *      object_t *
496 *      s_object(void *arg)
497 *      {
498 *              something_t *sp = arg;
499 *              return (sp->s_object);
500 *      }
501 *
502 * The flexibility of a function pointer allows the path to the object to be
503 * arbitrarily complex and also supports the notion that depending on where you
504 * are using the object, you may need to get it from someplace different.
505 *
506 * The function that releases the explicit hold is simpler because it does not
507 * have to worry about the object moving:
508 *
509 *      void
510 *      object_rele(object_t *op)
511 *      {
512 *              rw_exit(OBJECT_RWLOCK(op));
513 *      }
514 *
515 * The caller is spared these details so that obtaining and releasing an
516 * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
517 * of object_hold() only needs to know that the returned object pointer is valid
518 * if not NULL and that the object will not move until released.
519 *
520 * Although object_hold() prevents an object from moving, it does not prevent it
521 * from being freed. The caller must take measures before calling object_hold()
522 * (afterwards is too late) to ensure that the held object cannot be freed. The
523 * caller must do so without accessing the unsafe object reference, so any lock
524 * or reference count used to ensure the continued existence of the object must
525 * live outside the object itself.
526 *
527 * Obtaining a new object is a special case where an explicit hold is impossible
528 * for the caller. Any function that returns a newly allocated object (either as
529 * a return value, or as an in-out paramter) must return it already held; after
530 * the caller gets it is too late, since the object cannot be safely accessed
531 * without the level of indirection described earlier. The following
532 * object_alloc() example uses the same code shown earlier to transition a new
533 * object into the state of being recognized (by the client) as a known object.
534 * The function must acquire the hold (rw_enter) before that state transition
535 * makes the object movable:
536 *
537 *      static object_t *
538 *      object_alloc(container_t *container)
539 *      {
540 *              object_t *object = kmem_cache_alloc(object_cache, 0);
541 *              ... set any initial state not set by the constructor ...
542 *              rw_enter(OBJECT_RWLOCK(object), RW_READER);
543 *              mutex_enter(&container->c_objects_lock);
544 *              list_insert_tail(&container->c_objects, object);
545 *              membar_producer();
546 *              object->o_container = container;
547 *              mutex_exit(&container->c_objects_lock);
548 *              return (object);
549 *      }
550 *
551 * Functions that implicitly acquire an object hold (any function that calls
552 * object_alloc() to supply an object for the caller) need to be carefully noted
553 * so that the matching object_rele() is not neglected. Otherwise, leaked holds
554 * prevent all objects hashed to the affected rwlocks from ever being moved.
555 *
556 * The pointer to a held object can be hashed to the holding rwlock even after
557 * the object has been freed. Although it is possible to release the hold
558 * after freeing the object, you may decide to release the hold implicitly in
559 * whatever function frees the object, so as to release the hold as soon as
560 * possible, and for the sake of symmetry with the function that implicitly
561 * acquires the hold when it allocates the object. Here, object_free() releases
562 * the hold acquired by object_alloc(). Its implicit object_rele() forms a
563 * matching pair with object_hold():
564 *
565 *      void
566 *      object_free(object_t *object)
567 *      {
568 *              container_t *container;
569 *
570 *              ASSERT(object_held(object));
571 *              container = object->o_container;
572 *              mutex_enter(&container->c_objects_lock);
573 *              object->o_container =
574 *                  (void *)((uintptr_t)object->o_container | 0x1);
575 *              list_remove(&container->c_objects, object);
576 *              mutex_exit(&container->c_objects_lock);
577 *              object_rele(object);
578 *              kmem_cache_free(object_cache, object);
579 *      }
580 *
581 * Note that object_free() cannot safely accept an object pointer as an argument
582 * unless the object is already held. Any function that calls object_free()
583 * needs to be carefully noted since it similarly forms a matching pair with
584 * object_hold().
585 *
586 * To complete the picture, the following callback function implements the
587 * general solution by moving objects only if they are currently unheld:
588 *
589 *      static kmem_cbrc_t
590 *      object_move(void *buf, void *newbuf, size_t size, void *arg)
591 *      {
592 *              object_t *op = buf, *np = newbuf;
593 *              container_t *container;
594 *
595 *              container = op->o_container;
596 *              if ((uintptr_t)container & 0x3) {
597 *                      return (KMEM_CBRC_DONT_KNOW);
598 *              }
599 *
600 *	        // Ensure that the container structure does not go away.
601 *              if (container_hold(container) == 0) {
602 *                      return (KMEM_CBRC_DONT_KNOW);
603 *              }
604 *
605 *              mutex_enter(&container->c_objects_lock);
606 *              if (container != op->o_container) {
607 *                      mutex_exit(&container->c_objects_lock);
608 *                      container_rele(container);
609 *                      return (KMEM_CBRC_DONT_KNOW);
610 *              }
611 *
612 *              if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
613 *                      mutex_exit(&container->c_objects_lock);
614 *                      container_rele(container);
615 *                      return (KMEM_CBRC_LATER);
616 *              }
617 *
618 *              object_move_impl(op, np); // critical section
619 *              rw_exit(OBJECT_RWLOCK(op));
620 *
621 *              op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
622 *              list_link_replace(&op->o_link_node, &np->o_link_node);
623 *              mutex_exit(&container->c_objects_lock);
624 *              container_rele(container);
625 *              return (KMEM_CBRC_YES);
626 *      }
627 *
628 * Note that object_move() must invalidate the designated o_container pointer of
629 * the old object in the same way that object_free() does, since kmem will free
630 * the object in response to the KMEM_CBRC_YES return value.
631 *
632 * The lock order in object_move() differs from object_alloc(), which locks
633 * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
634 * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
635 * not a problem. Holding the lock on the object list in the example above
636 * through the entire callback not only prevents the object from going away, it
637 * also allows you to lock the list elsewhere and know that none of its elements
638 * will move during iteration.
639 *
640 * Adding an explicit hold everywhere an object from the cache is used is tricky
641 * and involves much more change to client code than a cache-specific solution
642 * that leverages existing state to decide whether or not an object is
643 * movable. However, this approach has the advantage that no object remains
644 * immovable for any significant length of time, making it extremely unlikely
645 * that long-lived allocations can continue holding slabs hostage; and it works
646 * for any cache.
647 *
648 * 3. Consolidator Implementation
649 *
650 * Once the client supplies a move function that a) recognizes known objects and
651 * b) avoids moving objects that are actively in use, the remaining work is up
652 * to the consolidator to decide which objects to move and when to issue
653 * callbacks.
654 *
655 * The consolidator relies on the fact that a cache's slabs are ordered by
656 * usage. Each slab has a fixed number of objects. Depending on the slab's
657 * "color" (the offset of the first object from the beginning of the slab;
658 * offsets are staggered to mitigate false sharing of cache lines) it is either
659 * the maximum number of objects per slab determined at cache creation time or
660 * else the number closest to the maximum that fits within the space remaining
661 * after the initial offset. A completely allocated slab may contribute some
662 * internal fragmentation (per-slab overhead) but no external fragmentation, so
663 * it is of no interest to the consolidator. At the other extreme, slabs whose
664 * objects have all been freed to the slab are released to the virtual memory
665 * (VM) subsystem (objects freed to magazines are still allocated as far as the
666 * slab is concerned). External fragmentation exists when there are slabs
667 * somewhere between these extremes. A partial slab has at least one but not all
668 * of its objects allocated. The more partial slabs, and the fewer allocated
669 * objects on each of them, the higher the fragmentation. Hence the
670 * consolidator's overall strategy is to reduce the number of partial slabs by
671 * moving allocated objects from the least allocated slabs to the most allocated
672 * slabs.
673 *
674 * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
675 * slabs are kept separately in an unordered list. Since the majority of slabs
676 * tend to be completely allocated (a typical unfragmented cache may have
677 * thousands of complete slabs and only a single partial slab), separating
678 * complete slabs improves the efficiency of partial slab ordering, since the
679 * complete slabs do not affect the depth or balance of the AVL tree. This
680 * ordered sequence of partial slabs acts as a "free list" supplying objects for
681 * allocation requests.
682 *
683 * Objects are always allocated from the first partial slab in the free list,
684 * where the allocation is most likely to eliminate a partial slab (by
685 * completely allocating it). Conversely, when a single object from a completely
686 * allocated slab is freed to the slab, that slab is added to the front of the
687 * free list. Since most free list activity involves highly allocated slabs
688 * coming and going at the front of the list, slabs tend naturally toward the
689 * ideal order: highly allocated at the front, sparsely allocated at the back.
690 * Slabs with few allocated objects are likely to become completely free if they
691 * keep a safe distance away from the front of the free list. Slab misorders
692 * interfere with the natural tendency of slabs to become completely free or
693 * completely allocated. For example, a slab with a single allocated object
694 * needs only a single free to escape the cache; its natural desire is
695 * frustrated when it finds itself at the front of the list where a second
696 * allocation happens just before the free could have released it. Another slab
697 * with all but one object allocated might have supplied the buffer instead, so
698 * that both (as opposed to neither) of the slabs would have been taken off the
699 * free list.
700 *
701 * Although slabs tend naturally toward the ideal order, misorders allowed by a
702 * simple list implementation defeat the consolidator's strategy of merging
703 * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
704 * needs another way to fix misorders to optimize its callback strategy. One
705 * approach is to periodically scan a limited number of slabs, advancing a
706 * marker to hold the current scan position, and to move extreme misorders to
707 * the front or back of the free list and to the front or back of the current
708 * scan range. By making consecutive scan ranges overlap by one slab, the least
709 * allocated slab in the current range can be carried along from the end of one
710 * scan to the start of the next.
711 *
712 * Maintaining partial slabs in an AVL tree relieves kmem of this additional
713 * task, however. Since most of the cache's activity is in the magazine layer,
714 * and allocations from the slab layer represent only a startup cost, the
715 * overhead of maintaining a balanced tree is not a significant concern compared
716 * to the opportunity of reducing complexity by eliminating the partial slab
717 * scanner just described. The overhead of an AVL tree is minimized by
718 * maintaining only partial slabs in the tree and keeping completely allocated
719 * slabs separately in a list. To avoid increasing the size of the slab
720 * structure the AVL linkage pointers are reused for the slab's list linkage,
721 * since the slab will always be either partial or complete, never stored both
722 * ways at the same time. To further minimize the overhead of the AVL tree the
723 * compare function that orders partial slabs by usage divides the range of
724 * allocated object counts into bins such that counts within the same bin are
725 * considered equal. Binning partial slabs makes it less likely that allocating
726 * or freeing a single object will change the slab's order, requiring a tree
727 * reinsertion (an avl_remove() followed by an avl_add(), both potentially
728 * requiring some rebalancing of the tree). Allocation counts closest to
729 * completely free and completely allocated are left unbinned (finely sorted) to
730 * better support the consolidator's strategy of merging slabs at either
731 * extreme.
732 *
733 * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
734 *
735 * The consolidator piggybacks on the kmem maintenance thread and is called on
736 * the same interval as kmem_cache_update(), once per cache every fifteen
737 * seconds. kmem maintains a running count of unallocated objects in the slab
738 * layer (cache_bufslab). The consolidator checks whether that number exceeds
739 * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
740 * there is a significant number of slabs in the cache (arbitrarily a minimum
741 * 101 total slabs). Unused objects that have fallen out of the magazine layer's
742 * working set are included in the assessment, and magazines in the depot are
743 * reaped if those objects would lift cache_bufslab above the fragmentation
744 * threshold. Once the consolidator decides that a cache is fragmented, it looks
745 * for a candidate slab to reclaim, starting at the end of the partial slab free
746 * list and scanning backwards. At first the consolidator is choosy: only a slab
747 * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
748 * single allocated object, regardless of percentage). If there is difficulty
749 * finding a candidate slab, kmem raises the allocation threshold incrementally,
750 * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
751 * external fragmentation (unused objects on the free list) below 12.5% (1/8),
752 * even in the worst case of every slab in the cache being almost 7/8 allocated.
753 * The threshold can also be lowered incrementally when candidate slabs are easy
754 * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
755 * is no longer fragmented.
756 *
757 * 3.2 Generating Callbacks
758 *
759 * Once an eligible slab is chosen, a callback is generated for every allocated
760 * object on the slab, in the hope that the client will move everything off the
761 * slab and make it reclaimable. Objects selected as move destinations are
762 * chosen from slabs at the front of the free list. Assuming slabs in the ideal
763 * order (most allocated at the front, least allocated at the back) and a
764 * cooperative client, the consolidator will succeed in removing slabs from both
765 * ends of the free list, completely allocating on the one hand and completely
766 * freeing on the other. Objects selected as move destinations are allocated in
767 * the kmem maintenance thread where move requests are enqueued. A separate
768 * callback thread removes pending callbacks from the queue and calls the
769 * client. The separate thread ensures that client code (the move function) does
770 * not interfere with internal kmem maintenance tasks. A map of pending
771 * callbacks keyed by object address (the object to be moved) is checked to
772 * ensure that duplicate callbacks are not generated for the same object.
773 * Allocating the move destination (the object to move to) prevents subsequent
774 * callbacks from selecting the same destination as an earlier pending callback.
775 *
776 * Move requests can also be generated by kmem_cache_reap() when the system is
777 * desperate for memory and by kmem_cache_move_notify(), called by the client to
778 * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
779 * The map of pending callbacks is protected by the same lock that protects the
780 * slab layer.
781 *
782 * When the system is desperate for memory, kmem does not bother to determine
783 * whether or not the cache exceeds the fragmentation threshold, but tries to
784 * consolidate as many slabs as possible. Normally, the consolidator chews
785 * slowly, one sparsely allocated slab at a time during each maintenance
786 * interval that the cache is fragmented. When desperate, the consolidator
787 * starts at the last partial slab and enqueues callbacks for every allocated
788 * object on every partial slab, working backwards until it reaches the first
789 * partial slab. The first partial slab, meanwhile, advances in pace with the
790 * consolidator as allocations to supply move destinations for the enqueued
791 * callbacks use up the highly allocated slabs at the front of the free list.
792 * Ideally, the overgrown free list collapses like an accordion, starting at
793 * both ends and ending at the center with a single partial slab.
794 *
795 * 3.3 Client Responses
796 *
797 * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
798 * marks the slab that supplied the stuck object non-reclaimable and moves it to
799 * front of the free list. The slab remains marked as long as it remains on the
800 * free list, and it appears more allocated to the partial slab compare function
801 * than any unmarked slab, no matter how many of its objects are allocated.
802 * Since even one immovable object ties up the entire slab, the goal is to
803 * completely allocate any slab that cannot be completely freed. kmem does not
804 * bother generating callbacks to move objects from a marked slab unless the
805 * system is desperate.
806 *
807 * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
808 * slab. If the client responds LATER too many times, kmem disbelieves and
809 * treats the response as a NO. The count is cleared when the slab is taken off
810 * the partial slab list or when the client moves one of the slab's objects.
811 *
812 * 4. Observability
813 *
814 * A kmem cache's external fragmentation is best observed with 'mdb -k' using
815 * the ::kmem_slabs dcmd. For a complete description of the command, enter
816 * '::help kmem_slabs' at the mdb prompt.
817 */
818
819#include <sys/kmem_impl.h>
820#include <sys/vmem_impl.h>
821#include <sys/param.h>
822#include <sys/sysmacros.h>
823#include <sys/vm.h>
824#include <sys/proc.h>
825#include <sys/tuneable.h>
826#include <sys/systm.h>
827#include <sys/cmn_err.h>
828#include <sys/debug.h>
829#include <sys/sdt.h>
830#include <sys/mutex.h>
831#include <sys/bitmap.h>
832#include <sys/atomic.h>
833#include <sys/kobj.h>
834#include <sys/disp.h>
835#include <vm/seg_kmem.h>
836#include <sys/log.h>
837#include <sys/callb.h>
838#include <sys/taskq.h>
839#include <sys/modctl.h>
840#include <sys/reboot.h>
841#include <sys/id32.h>
842#include <sys/zone.h>
843#include <sys/netstack.h>
844#ifdef	DEBUG
845#include <sys/random.h>
846#endif
847
848extern void streams_msg_init(void);
849extern int segkp_fromheap;
850extern void segkp_cache_free(void);
851extern int callout_init_done;
852
853struct kmem_cache_kstat {
854	kstat_named_t	kmc_buf_size;
855	kstat_named_t	kmc_align;
856	kstat_named_t	kmc_chunk_size;
857	kstat_named_t	kmc_slab_size;
858	kstat_named_t	kmc_alloc;
859	kstat_named_t	kmc_alloc_fail;
860	kstat_named_t	kmc_free;
861	kstat_named_t	kmc_depot_alloc;
862	kstat_named_t	kmc_depot_free;
863	kstat_named_t	kmc_depot_contention;
864	kstat_named_t	kmc_slab_alloc;
865	kstat_named_t	kmc_slab_free;
866	kstat_named_t	kmc_buf_constructed;
867	kstat_named_t	kmc_buf_avail;
868	kstat_named_t	kmc_buf_inuse;
869	kstat_named_t	kmc_buf_total;
870	kstat_named_t	kmc_buf_max;
871	kstat_named_t	kmc_slab_create;
872	kstat_named_t	kmc_slab_destroy;
873	kstat_named_t	kmc_vmem_source;
874	kstat_named_t	kmc_hash_size;
875	kstat_named_t	kmc_hash_lookup_depth;
876	kstat_named_t	kmc_hash_rescale;
877	kstat_named_t	kmc_full_magazines;
878	kstat_named_t	kmc_empty_magazines;
879	kstat_named_t	kmc_magazine_size;
880	kstat_named_t	kmc_reap; /* number of kmem_cache_reap() calls */
881	kstat_named_t	kmc_defrag; /* attempts to defrag all partial slabs */
882	kstat_named_t	kmc_scan; /* attempts to defrag one partial slab */
883	kstat_named_t	kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
884	kstat_named_t	kmc_move_yes;
885	kstat_named_t	kmc_move_no;
886	kstat_named_t	kmc_move_later;
887	kstat_named_t	kmc_move_dont_need;
888	kstat_named_t	kmc_move_dont_know; /* obj unrecognized by client ... */
889	kstat_named_t	kmc_move_hunt_found; /* ... but found in mag layer */
890	kstat_named_t	kmc_move_slabs_freed; /* slabs freed by consolidator */
891	kstat_named_t	kmc_move_reclaimable; /* buffers, if consolidator ran */
892} kmem_cache_kstat = {
893	{ "buf_size",		KSTAT_DATA_UINT64 },
894	{ "align",		KSTAT_DATA_UINT64 },
895	{ "chunk_size",		KSTAT_DATA_UINT64 },
896	{ "slab_size",		KSTAT_DATA_UINT64 },
897	{ "alloc",		KSTAT_DATA_UINT64 },
898	{ "alloc_fail",		KSTAT_DATA_UINT64 },
899	{ "free",		KSTAT_DATA_UINT64 },
900	{ "depot_alloc",	KSTAT_DATA_UINT64 },
901	{ "depot_free",		KSTAT_DATA_UINT64 },
902	{ "depot_contention",	KSTAT_DATA_UINT64 },
903	{ "slab_alloc",		KSTAT_DATA_UINT64 },
904	{ "slab_free",		KSTAT_DATA_UINT64 },
905	{ "buf_constructed",	KSTAT_DATA_UINT64 },
906	{ "buf_avail",		KSTAT_DATA_UINT64 },
907	{ "buf_inuse",		KSTAT_DATA_UINT64 },
908	{ "buf_total",		KSTAT_DATA_UINT64 },
909	{ "buf_max",		KSTAT_DATA_UINT64 },
910	{ "slab_create",	KSTAT_DATA_UINT64 },
911	{ "slab_destroy",	KSTAT_DATA_UINT64 },
912	{ "vmem_source",	KSTAT_DATA_UINT64 },
913	{ "hash_size",		KSTAT_DATA_UINT64 },
914	{ "hash_lookup_depth",	KSTAT_DATA_UINT64 },
915	{ "hash_rescale",	KSTAT_DATA_UINT64 },
916	{ "full_magazines",	KSTAT_DATA_UINT64 },
917	{ "empty_magazines",	KSTAT_DATA_UINT64 },
918	{ "magazine_size",	KSTAT_DATA_UINT64 },
919	{ "reap",		KSTAT_DATA_UINT64 },
920	{ "defrag",		KSTAT_DATA_UINT64 },
921	{ "scan",		KSTAT_DATA_UINT64 },
922	{ "move_callbacks",	KSTAT_DATA_UINT64 },
923	{ "move_yes",		KSTAT_DATA_UINT64 },
924	{ "move_no",		KSTAT_DATA_UINT64 },
925	{ "move_later",		KSTAT_DATA_UINT64 },
926	{ "move_dont_need",	KSTAT_DATA_UINT64 },
927	{ "move_dont_know",	KSTAT_DATA_UINT64 },
928	{ "move_hunt_found",	KSTAT_DATA_UINT64 },
929	{ "move_slabs_freed",	KSTAT_DATA_UINT64 },
930	{ "move_reclaimable",	KSTAT_DATA_UINT64 },
931};
932
933static kmutex_t kmem_cache_kstat_lock;
934
935/*
936 * The default set of caches to back kmem_alloc().
937 * These sizes should be reevaluated periodically.
938 *
939 * We want allocations that are multiples of the coherency granularity
940 * (64 bytes) to be satisfied from a cache which is a multiple of 64
941 * bytes, so that it will be 64-byte aligned.  For all multiples of 64,
942 * the next kmem_cache_size greater than or equal to it must be a
943 * multiple of 64.
944 *
945 * We split the table into two sections:  size <= 4k and size > 4k.  This
946 * saves a lot of space and cache footprint in our cache tables.
947 */
948static const int kmem_alloc_sizes[] = {
949	1 * 8,
950	2 * 8,
951	3 * 8,
952	4 * 8,		5 * 8,		6 * 8,		7 * 8,
953	4 * 16,		5 * 16,		6 * 16,		7 * 16,
954	4 * 32,		5 * 32,		6 * 32,		7 * 32,
955	4 * 64,		5 * 64,		6 * 64,		7 * 64,
956	4 * 128,	5 * 128,	6 * 128,	7 * 128,
957	P2ALIGN(8192 / 7, 64),
958	P2ALIGN(8192 / 6, 64),
959	P2ALIGN(8192 / 5, 64),
960	P2ALIGN(8192 / 4, 64),
961	P2ALIGN(8192 / 3, 64),
962	P2ALIGN(8192 / 2, 64),
963};
964
965static const int kmem_big_alloc_sizes[] = {
966	2 * 4096,	3 * 4096,
967	2 * 8192,	3 * 8192,
968	4 * 8192,	5 * 8192,	6 * 8192,	7 * 8192,
969	8 * 8192,	9 * 8192,	10 * 8192,	11 * 8192,
970	12 * 8192,	13 * 8192,	14 * 8192,	15 * 8192,
971	16 * 8192
972};
973
974#define	KMEM_MAXBUF		4096
975#define	KMEM_BIG_MAXBUF_32BIT	32768
976#define	KMEM_BIG_MAXBUF		131072
977
978#define	KMEM_BIG_MULTIPLE	4096	/* big_alloc_sizes must be a multiple */
979#define	KMEM_BIG_SHIFT		12	/* lg(KMEM_BIG_MULTIPLE) */
980
981static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
982static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
983
984#define	KMEM_ALLOC_TABLE_MAX	(KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
985static size_t kmem_big_alloc_table_max = 0;	/* # of filled elements */
986
987static kmem_magtype_t kmem_magtype[] = {
988	{ 1,	8,	3200,	65536	},
989	{ 3,	16,	256,	32768	},
990	{ 7,	32,	64,	16384	},
991	{ 15,	64,	0,	8192	},
992	{ 31,	64,	0,	4096	},
993	{ 47,	64,	0,	2048	},
994	{ 63,	64,	0,	1024	},
995	{ 95,	64,	0,	512	},
996	{ 143,	64,	0,	0	},
997};
998
999static uint32_t kmem_reaping;
1000static uint32_t kmem_reaping_idspace;
1001
1002/*
1003 * kmem tunables
1004 */
1005clock_t kmem_reap_interval;	/* cache reaping rate [15 * HZ ticks] */
1006int kmem_depot_contention = 3;	/* max failed tryenters per real interval */
1007pgcnt_t kmem_reapahead = 0;	/* start reaping N pages before pageout */
1008int kmem_panic = 1;		/* whether to panic on error */
1009int kmem_logging = 1;		/* kmem_log_enter() override */
1010uint32_t kmem_mtbf = 0;		/* mean time between failures [default: off] */
1011size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
1012size_t kmem_content_log_size;	/* content log size [2% of memory] */
1013size_t kmem_failure_log_size;	/* failure log [4 pages per CPU] */
1014size_t kmem_slab_log_size;	/* slab create log [4 pages per CPU] */
1015size_t kmem_zerosized_log_size;	/* zero-sized log [4 pages per CPU] */
1016size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1017size_t kmem_lite_minsize = 0;	/* minimum buffer size for KMF_LITE */
1018size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1019int kmem_lite_pcs = 4;		/* number of PCs to store in KMF_LITE mode */
1020size_t kmem_maxverify;		/* maximum bytes to inspect in debug routines */
1021size_t kmem_minfirewall;	/* hardware-enforced redzone threshold */
1022
1023#ifdef DEBUG
1024int kmem_warn_zerosized = 1;	/* whether to warn on zero-sized KM_SLEEP */
1025#else
1026int kmem_warn_zerosized = 0;	/* whether to warn on zero-sized KM_SLEEP */
1027#endif
1028
1029int kmem_panic_zerosized = 0;	/* whether to panic on zero-sized KM_SLEEP */
1030
1031#ifdef _LP64
1032size_t	kmem_max_cached = KMEM_BIG_MAXBUF;	/* maximum kmem_alloc cache */
1033#else
1034size_t	kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1035#endif
1036
1037#ifdef DEBUG
1038int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1039#else
1040int kmem_flags = 0;
1041#endif
1042int kmem_ready;
1043
1044static kmem_cache_t	*kmem_slab_cache;
1045static kmem_cache_t	*kmem_bufctl_cache;
1046static kmem_cache_t	*kmem_bufctl_audit_cache;
1047
1048static kmutex_t		kmem_cache_lock;	/* inter-cache linkage only */
1049static list_t		kmem_caches;
1050
1051static taskq_t		*kmem_taskq;
1052static kmutex_t		kmem_flags_lock;
1053static vmem_t		*kmem_metadata_arena;
1054static vmem_t		*kmem_msb_arena;	/* arena for metadata caches */
1055static vmem_t		*kmem_cache_arena;
1056static vmem_t		*kmem_hash_arena;
1057static vmem_t		*kmem_log_arena;
1058static vmem_t		*kmem_oversize_arena;
1059static vmem_t		*kmem_va_arena;
1060static vmem_t		*kmem_default_arena;
1061static vmem_t		*kmem_firewall_va_arena;
1062static vmem_t		*kmem_firewall_arena;
1063
1064static int		kmem_zerosized;		/* # of zero-sized allocs */
1065
1066/*
1067 * kmem slab consolidator thresholds (tunables)
1068 */
1069size_t kmem_frag_minslabs = 101;	/* minimum total slabs */
1070size_t kmem_frag_numer = 1;		/* free buffers (numerator) */
1071size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1072/*
1073 * Maximum number of slabs from which to move buffers during a single
1074 * maintenance interval while the system is not low on memory.
1075 */
1076size_t kmem_reclaim_max_slabs = 1;
1077/*
1078 * Number of slabs to scan backwards from the end of the partial slab list
1079 * when searching for buffers to relocate.
1080 */
1081size_t kmem_reclaim_scan_range = 12;
1082
1083/* consolidator knobs */
1084boolean_t kmem_move_noreap;
1085boolean_t kmem_move_blocked;
1086boolean_t kmem_move_fulltilt;
1087boolean_t kmem_move_any_partial;
1088
1089#ifdef	DEBUG
1090/*
1091 * kmem consolidator debug tunables:
1092 * Ensure code coverage by occasionally running the consolidator even when the
1093 * caches are not fragmented (they may never be). These intervals are mean time
1094 * in cache maintenance intervals (kmem_cache_update).
1095 */
1096uint32_t kmem_mtb_move = 60;	/* defrag 1 slab (~15min) */
1097uint32_t kmem_mtb_reap = 1800;	/* defrag all slabs (~7.5hrs) */
1098#endif	/* DEBUG */
1099
1100static kmem_cache_t	*kmem_defrag_cache;
1101static kmem_cache_t	*kmem_move_cache;
1102static taskq_t		*kmem_move_taskq;
1103
1104static void kmem_cache_scan(kmem_cache_t *);
1105static void kmem_cache_defrag(kmem_cache_t *);
1106static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1107
1108
1109kmem_log_header_t	*kmem_transaction_log;
1110kmem_log_header_t	*kmem_content_log;
1111kmem_log_header_t	*kmem_failure_log;
1112kmem_log_header_t	*kmem_slab_log;
1113kmem_log_header_t	*kmem_zerosized_log;
1114
1115static int		kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1116
1117#define	KMEM_BUFTAG_LITE_ENTER(bt, count, caller)			\
1118	if ((count) > 0) {						\
1119		pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history;	\
1120		pc_t *_e;						\
1121		/* memmove() the old entries down one notch */		\
1122		for (_e = &_s[(count) - 1]; _e > _s; _e--)		\
1123			*_e = *(_e - 1);				\
1124		*_s = (uintptr_t)(caller);				\
1125	}
1126
1127#define	KMERR_MODIFIED	0	/* buffer modified while on freelist */
1128#define	KMERR_REDZONE	1	/* redzone violation (write past end of buf) */
1129#define	KMERR_DUPFREE	2	/* freed a buffer twice */
1130#define	KMERR_BADADDR	3	/* freed a bad (unallocated) address */
1131#define	KMERR_BADBUFTAG	4	/* buftag corrupted */
1132#define	KMERR_BADBUFCTL	5	/* bufctl corrupted */
1133#define	KMERR_BADCACHE	6	/* freed a buffer to the wrong cache */
1134#define	KMERR_BADSIZE	7	/* alloc size != free size */
1135#define	KMERR_BADBASE	8	/* buffer base address wrong */
1136
1137struct {
1138	hrtime_t	kmp_timestamp;	/* timestamp of panic */
1139	int		kmp_error;	/* type of kmem error */
1140	void		*kmp_buffer;	/* buffer that induced panic */
1141	void		*kmp_realbuf;	/* real start address for buffer */
1142	kmem_cache_t	*kmp_cache;	/* buffer's cache according to client */
1143	kmem_cache_t	*kmp_realcache;	/* actual cache containing buffer */
1144	kmem_slab_t	*kmp_slab;	/* slab accoring to kmem_findslab() */
1145	kmem_bufctl_t	*kmp_bufctl;	/* bufctl */
1146} kmem_panic_info;
1147
1148
1149static void
1150copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1151{
1152	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1153	uint64_t *buf = buf_arg;
1154
1155	while (buf < bufend)
1156		*buf++ = pattern;
1157}
1158
1159static void *
1160verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1161{
1162	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1163	uint64_t *buf;
1164
1165	for (buf = buf_arg; buf < bufend; buf++)
1166		if (*buf != pattern)
1167			return (buf);
1168	return (NULL);
1169}
1170
1171static void *
1172verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1173{
1174	uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1175	uint64_t *buf;
1176
1177	for (buf = buf_arg; buf < bufend; buf++) {
1178		if (*buf != old) {
1179			copy_pattern(old, buf_arg,
1180			    (char *)buf - (char *)buf_arg);
1181			return (buf);
1182		}
1183		*buf = new;
1184	}
1185
1186	return (NULL);
1187}
1188
1189static void
1190kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1191{
1192	kmem_cache_t *cp;
1193
1194	mutex_enter(&kmem_cache_lock);
1195	for (cp = list_head(&kmem_caches); cp != NULL;
1196	    cp = list_next(&kmem_caches, cp))
1197		if (tq != NULL)
1198			(void) taskq_dispatch(tq, (task_func_t *)func, cp,
1199			    tqflag);
1200		else
1201			func(cp);
1202	mutex_exit(&kmem_cache_lock);
1203}
1204
1205static void
1206kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1207{
1208	kmem_cache_t *cp;
1209
1210	mutex_enter(&kmem_cache_lock);
1211	for (cp = list_head(&kmem_caches); cp != NULL;
1212	    cp = list_next(&kmem_caches, cp)) {
1213		if (!(cp->cache_cflags & KMC_IDENTIFIER))
1214			continue;
1215		if (tq != NULL)
1216			(void) taskq_dispatch(tq, (task_func_t *)func, cp,
1217			    tqflag);
1218		else
1219			func(cp);
1220	}
1221	mutex_exit(&kmem_cache_lock);
1222}
1223
1224/*
1225 * Debugging support.  Given a buffer address, find its slab.
1226 */
1227static kmem_slab_t *
1228kmem_findslab(kmem_cache_t *cp, void *buf)
1229{
1230	kmem_slab_t *sp;
1231
1232	mutex_enter(&cp->cache_lock);
1233	for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1234	    sp = list_next(&cp->cache_complete_slabs, sp)) {
1235		if (KMEM_SLAB_MEMBER(sp, buf)) {
1236			mutex_exit(&cp->cache_lock);
1237			return (sp);
1238		}
1239	}
1240	for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1241	    sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1242		if (KMEM_SLAB_MEMBER(sp, buf)) {
1243			mutex_exit(&cp->cache_lock);
1244			return (sp);
1245		}
1246	}
1247	mutex_exit(&cp->cache_lock);
1248
1249	return (NULL);
1250}
1251
1252static void
1253kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1254{
1255	kmem_buftag_t *btp = NULL;
1256	kmem_bufctl_t *bcp = NULL;
1257	kmem_cache_t *cp = cparg;
1258	kmem_slab_t *sp;
1259	uint64_t *off;
1260	void *buf = bufarg;
1261
1262	kmem_logging = 0;	/* stop logging when a bad thing happens */
1263
1264	kmem_panic_info.kmp_timestamp = gethrtime();
1265
1266	sp = kmem_findslab(cp, buf);
1267	if (sp == NULL) {
1268		for (cp = list_tail(&kmem_caches); cp != NULL;
1269		    cp = list_prev(&kmem_caches, cp)) {
1270			if ((sp = kmem_findslab(cp, buf)) != NULL)
1271				break;
1272		}
1273	}
1274
1275	if (sp == NULL) {
1276		cp = NULL;
1277		error = KMERR_BADADDR;
1278	} else {
1279		if (cp != cparg)
1280			error = KMERR_BADCACHE;
1281		else
1282			buf = (char *)bufarg - ((uintptr_t)bufarg -
1283			    (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1284		if (buf != bufarg)
1285			error = KMERR_BADBASE;
1286		if (cp->cache_flags & KMF_BUFTAG)
1287			btp = KMEM_BUFTAG(cp, buf);
1288		if (cp->cache_flags & KMF_HASH) {
1289			mutex_enter(&cp->cache_lock);
1290			for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1291				if (bcp->bc_addr == buf)
1292					break;
1293			mutex_exit(&cp->cache_lock);
1294			if (bcp == NULL && btp != NULL)
1295				bcp = btp->bt_bufctl;
1296			if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1297			    NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1298			    bcp->bc_addr != buf) {
1299				error = KMERR_BADBUFCTL;
1300				bcp = NULL;
1301			}
1302		}
1303	}
1304
1305	kmem_panic_info.kmp_error = error;
1306	kmem_panic_info.kmp_buffer = bufarg;
1307	kmem_panic_info.kmp_realbuf = buf;
1308	kmem_panic_info.kmp_cache = cparg;
1309	kmem_panic_info.kmp_realcache = cp;
1310	kmem_panic_info.kmp_slab = sp;
1311	kmem_panic_info.kmp_bufctl = bcp;
1312
1313	printf("kernel memory allocator: ");
1314
1315	switch (error) {
1316
1317	case KMERR_MODIFIED:
1318		printf("buffer modified after being freed\n");
1319		off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1320		if (off == NULL)	/* shouldn't happen */
1321			off = buf;
1322		printf("modification occurred at offset 0x%lx "
1323		    "(0x%llx replaced by 0x%llx)\n",
1324		    (uintptr_t)off - (uintptr_t)buf,
1325		    (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1326		break;
1327
1328	case KMERR_REDZONE:
1329		printf("redzone violation: write past end of buffer\n");
1330		break;
1331
1332	case KMERR_BADADDR:
1333		printf("invalid free: buffer not in cache\n");
1334		break;
1335
1336	case KMERR_DUPFREE:
1337		printf("duplicate free: buffer freed twice\n");
1338		break;
1339
1340	case KMERR_BADBUFTAG:
1341		printf("boundary tag corrupted\n");
1342		printf("bcp ^ bxstat = %lx, should be %lx\n",
1343		    (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1344		    KMEM_BUFTAG_FREE);
1345		break;
1346
1347	case KMERR_BADBUFCTL:
1348		printf("bufctl corrupted\n");
1349		break;
1350
1351	case KMERR_BADCACHE:
1352		printf("buffer freed to wrong cache\n");
1353		printf("buffer was allocated from %s,\n", cp->cache_name);
1354		printf("caller attempting free to %s.\n", cparg->cache_name);
1355		break;
1356
1357	case KMERR_BADSIZE:
1358		printf("bad free: free size (%u) != alloc size (%u)\n",
1359		    KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1360		    KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1361		break;
1362
1363	case KMERR_BADBASE:
1364		printf("bad free: free address (%p) != alloc address (%p)\n",
1365		    bufarg, buf);
1366		break;
1367	}
1368
1369	printf("buffer=%p  bufctl=%p  cache: %s\n",
1370	    bufarg, (void *)bcp, cparg->cache_name);
1371
1372	if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1373	    error != KMERR_BADBUFCTL) {
1374		int d;
1375		timestruc_t ts;
1376		kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1377
1378		hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1379		printf("previous transaction on buffer %p:\n", buf);
1380		printf("thread=%p  time=T-%ld.%09ld  slab=%p  cache: %s\n",
1381		    (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1382		    (void *)sp, cp->cache_name);
1383		for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1384			ulong_t off;
1385			char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1386			printf("%s+%lx\n", sym ? sym : "?", off);
1387		}
1388	}
1389	if (kmem_panic > 0)
1390		panic("kernel heap corruption detected");
1391	if (kmem_panic == 0)
1392		debug_enter(NULL);
1393	kmem_logging = 1;	/* resume logging */
1394}
1395
1396static kmem_log_header_t *
1397kmem_log_init(size_t logsize)
1398{
1399	kmem_log_header_t *lhp;
1400	int nchunks = 4 * max_ncpus;
1401	size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1402	int i;
1403
1404	/*
1405	 * Make sure that lhp->lh_cpu[] is nicely aligned
1406	 * to prevent false sharing of cache lines.
1407	 */
1408	lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1409	lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1410	    NULL, NULL, VM_SLEEP);
1411	bzero(lhp, lhsize);
1412
1413	mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1414	lhp->lh_nchunks = nchunks;
1415	lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1416	lhp->lh_base = vmem_alloc(kmem_log_arena,
1417	    lhp->lh_chunksize * nchunks, VM_SLEEP);
1418	lhp->lh_free = vmem_alloc(kmem_log_arena,
1419	    nchunks * sizeof (int), VM_SLEEP);
1420	bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1421
1422	for (i = 0; i < max_ncpus; i++) {
1423		kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1424		mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1425		clhp->clh_chunk = i;
1426	}
1427
1428	for (i = max_ncpus; i < nchunks; i++)
1429		lhp->lh_free[i] = i;
1430
1431	lhp->lh_head = max_ncpus;
1432	lhp->lh_tail = 0;
1433
1434	return (lhp);
1435}
1436
1437static void *
1438kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1439{
1440	void *logspace;
1441	kmem_cpu_log_header_t *clhp;
1442
1443	if (lhp == NULL || kmem_logging == 0 || panicstr)
1444		return (NULL);
1445
1446	clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1447
1448	mutex_enter(&clhp->clh_lock);
1449	clhp->clh_hits++;
1450	if (size > clhp->clh_avail) {
1451		mutex_enter(&lhp->lh_lock);
1452		lhp->lh_hits++;
1453		lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1454		lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1455		clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1456		lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1457		clhp->clh_current = lhp->lh_base +
1458		    clhp->clh_chunk * lhp->lh_chunksize;
1459		clhp->clh_avail = lhp->lh_chunksize;
1460		if (size > lhp->lh_chunksize)
1461			size = lhp->lh_chunksize;
1462		mutex_exit(&lhp->lh_lock);
1463	}
1464	logspace = clhp->clh_current;
1465	clhp->clh_current += size;
1466	clhp->clh_avail -= size;
1467	bcopy(data, logspace, size);
1468	mutex_exit(&clhp->clh_lock);
1469	return (logspace);
1470}
1471
1472#define	KMEM_AUDIT(lp, cp, bcp)						\
1473{									\
1474	kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp);	\
1475	_bcp->bc_timestamp = gethrtime();				\
1476	_bcp->bc_thread = curthread;					\
1477	_bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH);	\
1478	_bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp));	\
1479}
1480
1481static void
1482kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1483    kmem_slab_t *sp, void *addr)
1484{
1485	kmem_bufctl_audit_t bca;
1486
1487	bzero(&bca, sizeof (kmem_bufctl_audit_t));
1488	bca.bc_addr = addr;
1489	bca.bc_slab = sp;
1490	bca.bc_cache = cp;
1491	KMEM_AUDIT(lp, cp, &bca);
1492}
1493
1494/*
1495 * Create a new slab for cache cp.
1496 */
1497static kmem_slab_t *
1498kmem_slab_create(kmem_cache_t *cp, int kmflag)
1499{
1500	size_t slabsize = cp->cache_slabsize;
1501	size_t chunksize = cp->cache_chunksize;
1502	int cache_flags = cp->cache_flags;
1503	size_t color, chunks;
1504	char *buf, *slab;
1505	kmem_slab_t *sp;
1506	kmem_bufctl_t *bcp;
1507	vmem_t *vmp = cp->cache_arena;
1508
1509	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1510
1511	color = cp->cache_color + cp->cache_align;
1512	if (color > cp->cache_maxcolor)
1513		color = cp->cache_mincolor;
1514	cp->cache_color = color;
1515
1516	slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1517
1518	if (slab == NULL)
1519		goto vmem_alloc_failure;
1520
1521	ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1522
1523	/*
1524	 * Reverify what was already checked in kmem_cache_set_move(), since the
1525	 * consolidator depends (for correctness) on slabs being initialized
1526	 * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1527	 * clients to distinguish uninitialized memory from known objects).
1528	 */
1529	ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1530	if (!(cp->cache_cflags & KMC_NOTOUCH))
1531		copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1532
1533	if (cache_flags & KMF_HASH) {
1534		if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1535			goto slab_alloc_failure;
1536		chunks = (slabsize - color) / chunksize;
1537	} else {
1538		sp = KMEM_SLAB(cp, slab);
1539		chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1540	}
1541
1542	sp->slab_cache	= cp;
1543	sp->slab_head	= NULL;
1544	sp->slab_refcnt	= 0;
1545	sp->slab_base	= buf = slab + color;
1546	sp->slab_chunks	= chunks;
1547	sp->slab_stuck_offset = (uint32_t)-1;
1548	sp->slab_later_count = 0;
1549	sp->slab_flags = 0;
1550
1551	ASSERT(chunks > 0);
1552	while (chunks-- != 0) {
1553		if (cache_flags & KMF_HASH) {
1554			bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1555			if (bcp == NULL)
1556				goto bufctl_alloc_failure;
1557			if (cache_flags & KMF_AUDIT) {
1558				kmem_bufctl_audit_t *bcap =
1559				    (kmem_bufctl_audit_t *)bcp;
1560				bzero(bcap, sizeof (kmem_bufctl_audit_t));
1561				bcap->bc_cache = cp;
1562			}
1563			bcp->bc_addr = buf;
1564			bcp->bc_slab = sp;
1565		} else {
1566			bcp = KMEM_BUFCTL(cp, buf);
1567		}
1568		if (cache_flags & KMF_BUFTAG) {
1569			kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1570			btp->bt_redzone = KMEM_REDZONE_PATTERN;
1571			btp->bt_bufctl = bcp;
1572			btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1573			if (cache_flags & KMF_DEADBEEF) {
1574				copy_pattern(KMEM_FREE_PATTERN, buf,
1575				    cp->cache_verify);
1576			}
1577		}
1578		bcp->bc_next = sp->slab_head;
1579		sp->slab_head = bcp;
1580		buf += chunksize;
1581	}
1582
1583	kmem_log_event(kmem_slab_log, cp, sp, slab);
1584
1585	return (sp);
1586
1587bufctl_alloc_failure:
1588
1589	while ((bcp = sp->slab_head) != NULL) {
1590		sp->slab_head = bcp->bc_next;
1591		kmem_cache_free(cp->cache_bufctl_cache, bcp);
1592	}
1593	kmem_cache_free(kmem_slab_cache, sp);
1594
1595slab_alloc_failure:
1596
1597	vmem_free(vmp, slab, slabsize);
1598
1599vmem_alloc_failure:
1600
1601	kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1602	atomic_inc_64(&cp->cache_alloc_fail);
1603
1604	return (NULL);
1605}
1606
1607/*
1608 * Destroy a slab.
1609 */
1610static void
1611kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1612{
1613	vmem_t *vmp = cp->cache_arena;
1614	void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1615
1616	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1617	ASSERT(sp->slab_refcnt == 0);
1618
1619	if (cp->cache_flags & KMF_HASH) {
1620		kmem_bufctl_t *bcp;
1621		while ((bcp = sp->slab_head) != NULL) {
1622			sp->slab_head = bcp->bc_next;
1623			kmem_cache_free(cp->cache_bufctl_cache, bcp);
1624		}
1625		kmem_cache_free(kmem_slab_cache, sp);
1626	}
1627	vmem_free(vmp, slab, cp->cache_slabsize);
1628}
1629
1630static void *
1631kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1632{
1633	kmem_bufctl_t *bcp, **hash_bucket;
1634	void *buf;
1635	boolean_t new_slab = (sp->slab_refcnt == 0);
1636
1637	ASSERT(MUTEX_HELD(&cp->cache_lock));
1638	/*
1639	 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1640	 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1641	 * slab is newly created.
1642	 */
1643	ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1644	    (sp == avl_first(&cp->cache_partial_slabs))));
1645	ASSERT(sp->slab_cache == cp);
1646
1647	cp->cache_slab_alloc++;
1648	cp->cache_bufslab--;
1649	sp->slab_refcnt++;
1650
1651	bcp = sp->slab_head;
1652	sp->slab_head = bcp->bc_next;
1653
1654	if (cp->cache_flags & KMF_HASH) {
1655		/*
1656		 * Add buffer to allocated-address hash table.
1657		 */
1658		buf = bcp->bc_addr;
1659		hash_bucket = KMEM_HASH(cp, buf);
1660		bcp->bc_next = *hash_bucket;
1661		*hash_bucket = bcp;
1662		if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1663			KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1664		}
1665	} else {
1666		buf = KMEM_BUF(cp, bcp);
1667	}
1668
1669	ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1670
1671	if (sp->slab_head == NULL) {
1672		ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1673		if (new_slab) {
1674			ASSERT(sp->slab_chunks == 1);
1675		} else {
1676			ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1677			avl_remove(&cp->cache_partial_slabs, sp);
1678			sp->slab_later_count = 0; /* clear history */
1679			sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1680			sp->slab_stuck_offset = (uint32_t)-1;
1681		}
1682		list_insert_head(&cp->cache_complete_slabs, sp);
1683		cp->cache_complete_slab_count++;
1684		return (buf);
1685	}
1686
1687	ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1688	/*
1689	 * Peek to see if the magazine layer is enabled before
1690	 * we prefill.  We're not holding the cpu cache lock,
1691	 * so the peek could be wrong, but there's no harm in it.
1692	 */
1693	if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1694	    (KMEM_CPU_CACHE(cp)->cc_magsize != 0))  {
1695		kmem_slab_prefill(cp, sp);
1696		return (buf);
1697	}
1698
1699	if (new_slab) {
1700		avl_add(&cp->cache_partial_slabs, sp);
1701		return (buf);
1702	}
1703
1704	/*
1705	 * The slab is now more allocated than it was, so the
1706	 * order remains unchanged.
1707	 */
1708	ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1709	return (buf);
1710}
1711
1712/*
1713 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1714 */
1715static void *
1716kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1717{
1718	kmem_slab_t *sp;
1719	void *buf;
1720	boolean_t test_destructor;
1721
1722	mutex_enter(&cp->cache_lock);
1723	test_destructor = (cp->cache_slab_alloc == 0);
1724	sp = avl_first(&cp->cache_partial_slabs);
1725	if (sp == NULL) {
1726		ASSERT(cp->cache_bufslab == 0);
1727
1728		/*
1729		 * The freelist is empty.  Create a new slab.
1730		 */
1731		mutex_exit(&cp->cache_lock);
1732		if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1733			return (NULL);
1734		}
1735		mutex_enter(&cp->cache_lock);
1736		cp->cache_slab_create++;
1737		if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1738			cp->cache_bufmax = cp->cache_buftotal;
1739		cp->cache_bufslab += sp->slab_chunks;
1740	}
1741
1742	buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1743	ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1744	    (cp->cache_complete_slab_count +
1745	    avl_numnodes(&cp->cache_partial_slabs) +
1746	    (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1747	mutex_exit(&cp->cache_lock);
1748
1749	if (test_destructor && cp->cache_destructor != NULL) {
1750		/*
1751		 * On the first kmem_slab_alloc(), assert that it is valid to
1752		 * call the destructor on a newly constructed object without any
1753		 * client involvement.
1754		 */
1755		if ((cp->cache_constructor == NULL) ||
1756		    cp->cache_constructor(buf, cp->cache_private,
1757		    kmflag) == 0) {
1758			cp->cache_destructor(buf, cp->cache_private);
1759		}
1760		copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1761		    cp->cache_bufsize);
1762		if (cp->cache_flags & KMF_DEADBEEF) {
1763			copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1764		}
1765	}
1766
1767	return (buf);
1768}
1769
1770static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1771
1772/*
1773 * Free a raw (unconstructed) buffer to cp's slab layer.
1774 */
1775static void
1776kmem_slab_free(kmem_cache_t *cp, void *buf)
1777{
1778	kmem_slab_t *sp;
1779	kmem_bufctl_t *bcp, **prev_bcpp;
1780
1781	ASSERT(buf != NULL);
1782
1783	mutex_enter(&cp->cache_lock);
1784	cp->cache_slab_free++;
1785
1786	if (cp->cache_flags & KMF_HASH) {
1787		/*
1788		 * Look up buffer in allocated-address hash table.
1789		 */
1790		prev_bcpp = KMEM_HASH(cp, buf);
1791		while ((bcp = *prev_bcpp) != NULL) {
1792			if (bcp->bc_addr == buf) {
1793				*prev_bcpp = bcp->bc_next;
1794				sp = bcp->bc_slab;
1795				break;
1796			}
1797			cp->cache_lookup_depth++;
1798			prev_bcpp = &bcp->bc_next;
1799		}
1800	} else {
1801		bcp = KMEM_BUFCTL(cp, buf);
1802		sp = KMEM_SLAB(cp, buf);
1803	}
1804
1805	if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1806		mutex_exit(&cp->cache_lock);
1807		kmem_error(KMERR_BADADDR, cp, buf);
1808		return;
1809	}
1810
1811	if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1812		/*
1813		 * If this is the buffer that prevented the consolidator from
1814		 * clearing the slab, we can reset the slab flags now that the
1815		 * buffer is freed. (It makes sense to do this in
1816		 * kmem_cache_free(), where the client gives up ownership of the
1817		 * buffer, but on the hot path the test is too expensive.)
1818		 */
1819		kmem_slab_move_yes(cp, sp, buf);
1820	}
1821
1822	if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1823		if (cp->cache_flags & KMF_CONTENTS)
1824			((kmem_bufctl_audit_t *)bcp)->bc_contents =
1825			    kmem_log_enter(kmem_content_log, buf,
1826			    cp->cache_contents);
1827		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1828	}
1829
1830	bcp->bc_next = sp->slab_head;
1831	sp->slab_head = bcp;
1832
1833	cp->cache_bufslab++;
1834	ASSERT(sp->slab_refcnt >= 1);
1835
1836	if (--sp->slab_refcnt == 0) {
1837		/*
1838		 * There are no outstanding allocations from this slab,
1839		 * so we can reclaim the memory.
1840		 */
1841		if (sp->slab_chunks == 1) {
1842			list_remove(&cp->cache_complete_slabs, sp);
1843			cp->cache_complete_slab_count--;
1844		} else {
1845			avl_remove(&cp->cache_partial_slabs, sp);
1846		}
1847
1848		cp->cache_buftotal -= sp->slab_chunks;
1849		cp->cache_bufslab -= sp->slab_chunks;
1850		/*
1851		 * Defer releasing the slab to the virtual memory subsystem
1852		 * while there is a pending move callback, since we guarantee
1853		 * that buffers passed to the move callback have only been
1854		 * touched by kmem or by the client itself. Since the memory
1855		 * patterns baddcafe (uninitialized) and deadbeef (freed) both
1856		 * set at least one of the two lowest order bits, the client can
1857		 * test those bits in the move callback to determine whether or
1858		 * not it knows about the buffer (assuming that the client also
1859		 * sets one of those low order bits whenever it frees a buffer).
1860		 */
1861		if (cp->cache_defrag == NULL ||
1862		    (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1863		    !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1864			cp->cache_slab_destroy++;
1865			mutex_exit(&cp->cache_lock);
1866			kmem_slab_destroy(cp, sp);
1867		} else {
1868			list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1869			/*
1870			 * Slabs are inserted at both ends of the deadlist to
1871			 * distinguish between slabs freed while move callbacks
1872			 * are pending (list head) and a slab freed while the
1873			 * lock is dropped in kmem_move_buffers() (list tail) so
1874			 * that in both cases slab_destroy() is called from the
1875			 * right context.
1876			 */
1877			if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1878				list_insert_tail(deadlist, sp);
1879			} else {
1880				list_insert_head(deadlist, sp);
1881			}
1882			cp->cache_defrag->kmd_deadcount++;
1883			mutex_exit(&cp->cache_lock);
1884		}
1885		return;
1886	}
1887
1888	if (bcp->bc_next == NULL) {
1889		/* Transition the slab from completely allocated to partial. */
1890		ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1891		ASSERT(sp->slab_chunks > 1);
1892		list_remove(&cp->cache_complete_slabs, sp);
1893		cp->cache_complete_slab_count--;
1894		avl_add(&cp->cache_partial_slabs, sp);
1895	} else {
1896		(void) avl_update_gt(&cp->cache_partial_slabs, sp);
1897	}
1898
1899	ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1900	    (cp->cache_complete_slab_count +
1901	    avl_numnodes(&cp->cache_partial_slabs) +
1902	    (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1903	mutex_exit(&cp->cache_lock);
1904}
1905
1906/*
1907 * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1908 */
1909static int
1910kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1911    caddr_t caller)
1912{
1913	kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1914	kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1915	uint32_t mtbf;
1916
1917	if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1918		kmem_error(KMERR_BADBUFTAG, cp, buf);
1919		return (-1);
1920	}
1921
1922	btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1923
1924	if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1925		kmem_error(KMERR_BADBUFCTL, cp, buf);
1926		return (-1);
1927	}
1928
1929	if (cp->cache_flags & KMF_DEADBEEF) {
1930		if (!construct && (cp->cache_flags & KMF_LITE)) {
1931			if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1932				kmem_error(KMERR_MODIFIED, cp, buf);
1933				return (-1);
1934			}
1935			if (cp->cache_constructor != NULL)
1936				*(uint64_t *)buf = btp->bt_redzone;
1937			else
1938				*(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1939		} else {
1940			construct = 1;
1941			if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1942			    KMEM_UNINITIALIZED_PATTERN, buf,
1943			    cp->cache_verify)) {
1944				kmem_error(KMERR_MODIFIED, cp, buf);
1945				return (-1);
1946			}
1947		}
1948	}
1949	btp->bt_redzone = KMEM_REDZONE_PATTERN;
1950
1951	if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1952	    gethrtime() % mtbf == 0 &&
1953	    (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1954		kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1955		if (!construct && cp->cache_destructor != NULL)
1956			cp->cache_destructor(buf, cp->cache_private);
1957	} else {
1958		mtbf = 0;
1959	}
1960
1961	if (mtbf || (construct && cp->cache_constructor != NULL &&
1962	    cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1963		atomic_inc_64(&cp->cache_alloc_fail);
1964		btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1965		if (cp->cache_flags & KMF_DEADBEEF)
1966			copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1967		kmem_slab_free(cp, buf);
1968		return (1);
1969	}
1970
1971	if (cp->cache_flags & KMF_AUDIT) {
1972		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1973	}
1974
1975	if ((cp->cache_flags & KMF_LITE) &&
1976	    !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1977		KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
1978	}
1979
1980	return (0);
1981}
1982
1983static int
1984kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
1985{
1986	kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1987	kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1988	kmem_slab_t *sp;
1989
1990	if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
1991		if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1992			kmem_error(KMERR_DUPFREE, cp, buf);
1993			return (-1);
1994		}
1995		sp = kmem_findslab(cp, buf);
1996		if (sp == NULL || sp->slab_cache != cp)
1997			kmem_error(KMERR_BADADDR, cp, buf);
1998		else
1999			kmem_error(KMERR_REDZONE, cp, buf);
2000		return (-1);
2001	}
2002
2003	btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2004
2005	if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2006		kmem_error(KMERR_BADBUFCTL, cp, buf);
2007		return (-1);
2008	}
2009
2010	if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
2011		kmem_error(KMERR_REDZONE, cp, buf);
2012		return (-1);
2013	}
2014
2015	if (cp->cache_flags & KMF_AUDIT) {
2016		if (cp->cache_flags & KMF_CONTENTS)
2017			bcp->bc_contents = kmem_log_enter(kmem_content_log,
2018			    buf, cp->cache_contents);
2019		KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2020	}
2021
2022	if ((cp->cache_flags & KMF_LITE) &&
2023	    !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2024		KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2025	}
2026
2027	if (cp->cache_flags & KMF_DEADBEEF) {
2028		if (cp->cache_flags & KMF_LITE)
2029			btp->bt_redzone = *(uint64_t *)buf;
2030		else if (cp->cache_destructor != NULL)
2031			cp->cache_destructor(buf, cp->cache_private);
2032
2033		copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2034	}
2035
2036	return (0);
2037}
2038
2039/*
2040 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2041 */
2042static void
2043kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2044{
2045	int round;
2046
2047	ASSERT(!list_link_active(&cp->cache_link) ||
2048	    taskq_member(kmem_taskq, curthread));
2049
2050	for (round = 0; round < nrounds; round++) {
2051		void *buf = mp->mag_round[round];
2052
2053		if (cp->cache_flags & KMF_DEADBEEF) {
2054			if (verify_pattern(KMEM_FREE_PATTERN, buf,
2055			    cp->cache_verify) != NULL) {
2056				kmem_error(KMERR_MODIFIED, cp, buf);
2057				continue;
2058			}
2059			if ((cp->cache_flags & KMF_LITE) &&
2060			    cp->cache_destructor != NULL) {
2061				kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2062				*(uint64_t *)buf = btp->bt_redzone;
2063				cp->cache_destructor(buf, cp->cache_private);
2064				*(uint64_t *)buf = KMEM_FREE_PATTERN;
2065			}
2066		} else if (cp->cache_destructor != NULL) {
2067			cp->cache_destructor(buf, cp->cache_private);
2068		}
2069
2070		kmem_slab_free(cp, buf);
2071	}
2072	ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2073	kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2074}
2075
2076/*
2077 * Allocate a magazine from the depot.
2078 */
2079static kmem_magazine_t *
2080kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2081{
2082	kmem_magazine_t *mp;
2083
2084	/*
2085	 * If we can't get the depot lock without contention,
2086	 * update our contention count.  We use the depot
2087	 * contention rate to determine whether we need to
2088	 * increase the magazine size for better scalability.
2089	 */
2090	if (!mutex_tryenter(&cp->cache_depot_lock)) {
2091		mutex_enter(&cp->cache_depot_lock);
2092		cp->cache_depot_contention++;
2093	}
2094
2095	if ((mp = mlp->ml_list) != NULL) {
2096		ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2097		mlp->ml_list = mp->mag_next;
2098		if (--mlp->ml_total < mlp->ml_min)
2099			mlp->ml_min = mlp->ml_total;
2100		mlp->ml_alloc++;
2101	}
2102
2103	mutex_exit(&cp->cache_depot_lock);
2104
2105	return (mp);
2106}
2107
2108/*
2109 * Free a magazine to the depot.
2110 */
2111static void
2112kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2113{
2114	mutex_enter(&cp->cache_depot_lock);
2115	ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2116	mp->mag_next = mlp->ml_list;
2117	mlp->ml_list = mp;
2118	mlp->ml_total++;
2119	mutex_exit(&cp->cache_depot_lock);
2120}
2121
2122/*
2123 * Update the working set statistics for cp's depot.
2124 */
2125static void
2126kmem_depot_ws_update(kmem_cache_t *cp)
2127{
2128	mutex_enter(&cp->cache_depot_lock);
2129	cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2130	cp->cache_full.ml_min = cp->cache_full.ml_total;
2131	cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2132	cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2133	mutex_exit(&cp->cache_depot_lock);
2134}
2135
2136/*
2137 * Set the working set statistics for cp's depot to zero.  (Everything is
2138 * eligible for reaping.)
2139 */
2140static void
2141kmem_depot_ws_zero(kmem_cache_t *cp)
2142{
2143	mutex_enter(&cp->cache_depot_lock);
2144	cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2145	cp->cache_full.ml_min = cp->cache_full.ml_total;
2146	cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2147	cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2148	mutex_exit(&cp->cache_depot_lock);
2149}
2150
2151/*
2152 * The number of bytes to reap before we call kpreempt(). The default (1MB)
2153 * causes us to preempt reaping up to hundreds of times per second. Using a
2154 * larger value (1GB) causes this to have virtually no effect.
2155 */
2156size_t kmem_reap_preempt_bytes = 1024 * 1024;
2157
2158/*
2159 * Reap all magazines that have fallen out of the depot's working set.
2160 */
2161static void
2162kmem_depot_ws_reap(kmem_cache_t *cp)
2163{
2164	size_t bytes = 0;
2165	long reap;
2166	kmem_magazine_t *mp;
2167
2168	ASSERT(!list_link_active(&cp->cache_link) ||
2169	    taskq_member(kmem_taskq, curthread));
2170
2171	reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2172	while (reap-- &&
2173	    (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2174		kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2175		bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2176		if (bytes > kmem_reap_preempt_bytes) {
2177			kpreempt(KPREEMPT_SYNC);
2178			bytes = 0;
2179		}
2180	}
2181
2182	reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2183	while (reap-- &&
2184	    (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2185		kmem_magazine_destroy(cp, mp, 0);
2186		bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2187		if (bytes > kmem_reap_preempt_bytes) {
2188			kpreempt(KPREEMPT_SYNC);
2189			bytes = 0;
2190		}
2191	}
2192}
2193
2194static void
2195kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2196{
2197	ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2198	    (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2199	ASSERT(ccp->cc_magsize > 0);
2200
2201	ccp->cc_ploaded = ccp->cc_loaded;
2202	ccp->cc_prounds = ccp->cc_rounds;
2203	ccp->cc_loaded = mp;
2204	ccp->cc_rounds = rounds;
2205}
2206
2207/*
2208 * Intercept kmem alloc/free calls during crash dump in order to avoid
2209 * changing kmem state while memory is being saved to the dump device.
2210 * Otherwise, ::kmem_verify will report "corrupt buffers".  Note that
2211 * there are no locks because only one CPU calls kmem during a crash
2212 * dump. To enable this feature, first create the associated vmem
2213 * arena with VMC_DUMPSAFE.
2214 */
2215static void *kmem_dump_start;	/* start of pre-reserved heap */
2216static void *kmem_dump_end;	/* end of heap area */
2217static void *kmem_dump_curr;	/* current free heap pointer */
2218static size_t kmem_dump_size;	/* size of heap area */
2219
2220/* append to each buf created in the pre-reserved heap */
2221typedef struct kmem_dumpctl {
2222	void	*kdc_next;	/* cache dump free list linkage */
2223} kmem_dumpctl_t;
2224
2225#define	KMEM_DUMPCTL(cp, buf)	\
2226	((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2227	    sizeof (void *)))
2228
2229/* set non zero for full report */
2230uint_t kmem_dump_verbose = 0;
2231
2232/* stats for overize heap */
2233uint_t kmem_dump_oversize_allocs = 0;
2234uint_t kmem_dump_oversize_max = 0;
2235
2236static void
2237kmem_dumppr(char **pp, char *e, const char *format, ...)
2238{
2239	char *p = *pp;
2240
2241	if (p < e) {
2242		int n;
2243		va_list ap;
2244
2245		va_start(ap, format);
2246		n = vsnprintf(p, e - p, format, ap);
2247		va_end(ap);
2248		*pp = p + n;
2249	}
2250}
2251
2252/*
2253 * Called when dumpadm(1M) configures dump parameters.
2254 */
2255void
2256kmem_dump_init(size_t size)
2257{
2258	/* Our caller ensures size is always set. */
2259	ASSERT3U(size, >, 0);
2260
2261	if (kmem_dump_start != NULL)
2262		kmem_free(kmem_dump_start, kmem_dump_size);
2263
2264	kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2265	kmem_dump_size = size;
2266	kmem_dump_curr = kmem_dump_start;
2267	kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2268	copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2269}
2270
2271/*
2272 * Set flag for each kmem_cache_t if is safe to use alternate dump
2273 * memory. Called just before panic crash dump starts. Set the flag
2274 * for the calling CPU.
2275 */
2276void
2277kmem_dump_begin(void)
2278{
2279	kmem_cache_t *cp;
2280
2281	ASSERT(panicstr != NULL);
2282
2283	for (cp = list_head(&kmem_caches); cp != NULL;
2284	    cp = list_next(&kmem_caches, cp)) {
2285		kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2286
2287		if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2288			cp->cache_flags |= KMF_DUMPDIVERT;
2289			ccp->cc_flags |= KMF_DUMPDIVERT;
2290			ccp->cc_dump_rounds = ccp->cc_rounds;
2291			ccp->cc_dump_prounds = ccp->cc_prounds;
2292			ccp->cc_rounds = ccp->cc_prounds = -1;
2293		} else {
2294			cp->cache_flags |= KMF_DUMPUNSAFE;
2295			ccp->cc_flags |= KMF_DUMPUNSAFE;
2296		}
2297	}
2298}
2299
2300/*
2301 * finished dump intercept
2302 * print any warnings on the console
2303 * return verbose information to dumpsys() in the given buffer
2304 */
2305size_t
2306kmem_dump_finish(char *buf, size_t size)
2307{
2308	int percent = 0;
2309	size_t used;
2310	char *e = buf + size;
2311	char *p = buf;
2312
2313	if (kmem_dump_curr == kmem_dump_end) {
2314		cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
2315		    "bytes: kmem state in dump may be inconsistent",
2316		    kmem_dump_size);
2317	}
2318
2319	if (kmem_dump_verbose == 0)
2320		return (0);
2321
2322	used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2323	percent = (used * 100) / kmem_dump_size;
2324
2325	kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2326	kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2327	kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2328	kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2329	    kmem_dump_oversize_allocs);
2330	kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2331	    kmem_dump_oversize_max);
2332
2333	/* return buffer size used */
2334	if (p < e)
2335		bzero(p, e - p);
2336	return (p - buf);
2337}
2338
2339/*
2340 * Allocate a constructed object from alternate dump memory.
2341 */
2342void *
2343kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2344{
2345	void *buf;
2346	void *curr;
2347	char *bufend;
2348
2349	/* return a constructed object */
2350	if ((buf = cp->cache_dump.kd_freelist) != NULL) {
2351		cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2352		return (buf);
2353	}
2354
2355	/* create a new constructed object */
2356	curr = kmem_dump_curr;
2357	buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2358	bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2359
2360	/* hat layer objects cannot cross a page boundary */
2361	if (cp->cache_align < PAGESIZE) {
2362		char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2363		if (bufend > page) {
2364			bufend += page - (char *)buf;
2365			buf = (void *)page;
2366		}
2367	}
2368
2369	/* fall back to normal alloc if reserved area is used up */
2370	if (bufend > (char *)kmem_dump_end) {
2371		kmem_dump_curr = kmem_dump_end;
2372		cp->cache_dump.kd_alloc_fails++;
2373		return (NULL);
2374	}
2375
2376	/*
2377	 * Must advance curr pointer before calling a constructor that
2378	 * may also allocate memory.
2379	 */
2380	kmem_dump_curr = bufend;
2381
2382	/* run constructor */
2383	if (cp->cache_constructor != NULL &&
2384	    cp->cache_constructor(buf, cp->cache_private, kmflag)
2385	    != 0) {
2386#ifdef DEBUG
2387		printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2388		    cp->cache_name, (void *)cp);
2389#endif
2390		/* reset curr pointer iff no allocs were done */
2391		if (kmem_dump_curr == bufend)
2392			kmem_dump_curr = curr;
2393
2394		cp->cache_dump.kd_alloc_fails++;
2395		/* fall back to normal alloc if the constructor fails */
2396		return (NULL);
2397	}
2398
2399	return (buf);
2400}
2401
2402/*
2403 * Free a constructed object in alternate dump memory.
2404 */
2405int
2406kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2407{
2408	/* save constructed buffers for next time */
2409	if ((char *)buf >= (char *)kmem_dump_start &&
2410	    (char *)buf < (char *)kmem_dump_end) {
2411		KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
2412		cp->cache_dump.kd_freelist = buf;
2413		return (0);
2414	}
2415
2416	/* just drop buffers that were allocated before dump started */
2417	if (kmem_dump_curr < kmem_dump_end)
2418		return (0);
2419
2420	/* fall back to normal free if reserved area is used up */
2421	return (1);
2422}
2423
2424/*
2425 * Allocate a constructed object from cache cp.
2426 */
2427void *
2428kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2429{
2430	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2431	kmem_magazine_t *fmp;
2432	void *buf;
2433
2434	mutex_enter(&ccp->cc_lock);
2435	for (;;) {
2436		/*
2437		 * If there's an object available in the current CPU's
2438		 * loaded magazine, just take it and return.
2439		 */
2440		if (ccp->cc_rounds > 0) {
2441			buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2442			ccp->cc_alloc++;
2443			mutex_exit(&ccp->cc_lock);
2444			if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2445				if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2446					ASSERT(!(ccp->cc_flags &
2447					    KMF_DUMPDIVERT));
2448					cp->cache_dump.kd_unsafe++;
2449				}
2450				if ((ccp->cc_flags & KMF_BUFTAG) &&
2451				    kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2452				    caller()) != 0) {
2453					if (kmflag & KM_NOSLEEP)
2454						return (NULL);
2455					mutex_enter(&ccp->cc_lock);
2456					continue;
2457				}
2458			}
2459			return (buf);
2460		}
2461
2462		/*
2463		 * The loaded magazine is empty.  If the previously loaded
2464		 * magazine was full, exchange them and try again.
2465		 */
2466		if (ccp->cc_prounds > 0) {
2467			kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2468			continue;
2469		}
2470
2471		/*
2472		 * Return an alternate buffer at dump time to preserve
2473		 * the heap.
2474		 */
2475		if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2476			if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2477				ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2478				/* log it so that we can warn about it */
2479				cp->cache_dump.kd_unsafe++;
2480			} else {
2481				if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2482				    NULL) {
2483					mutex_exit(&ccp->cc_lock);
2484					return (buf);
2485				}
2486				break;		/* fall back to slab layer */
2487			}
2488		}
2489
2490		/*
2491		 * If the magazine layer is disabled, break out now.
2492		 */
2493		if (ccp->cc_magsize == 0)
2494			break;
2495
2496		/*
2497		 * Try to get a full magazine from the depot.
2498		 */
2499		fmp = kmem_depot_alloc(cp, &cp->cache_full);
2500		if (fmp != NULL) {
2501			if (ccp->cc_ploaded != NULL)
2502				kmem_depot_free(cp, &cp->cache_empty,
2503				    ccp->cc_ploaded);
2504			kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2505			continue;
2506		}
2507
2508		/*
2509		 * There are no full magazines in the depot,
2510		 * so fall through to the slab layer.
2511		 */
2512		break;
2513	}
2514	mutex_exit(&ccp->cc_lock);
2515
2516	/*
2517	 * We couldn't allocate a constructed object from the magazine layer,
2518	 * so get a raw buffer from the slab layer and apply its constructor.
2519	 */
2520	buf = kmem_slab_alloc(cp, kmflag);
2521
2522	if (buf == NULL)
2523		return (NULL);
2524
2525	if (cp->cache_flags & KMF_BUFTAG) {
2526		/*
2527		 * Make kmem_cache_alloc_debug() apply the constructor for us.
2528		 */
2529		int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2530		if (rc != 0) {
2531			if (kmflag & KM_NOSLEEP)
2532				return (NULL);
2533			/*
2534			 * kmem_cache_alloc_debug() detected corruption
2535			 * but didn't panic (kmem_panic <= 0). We should not be
2536			 * here because the constructor failed (indicated by a
2537			 * return code of 1). Try again.
2538			 */
2539			ASSERT(rc == -1);
2540			return (kmem_cache_alloc(cp, kmflag));
2541		}
2542		return (buf);
2543	}
2544
2545	if (cp->cache_constructor != NULL &&
2546	    cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2547		atomic_inc_64(&cp->cache_alloc_fail);
2548		kmem_slab_free(cp, buf);
2549		return (NULL);
2550	}
2551
2552	return (buf);
2553}
2554
2555/*
2556 * The freed argument tells whether or not kmem_cache_free_debug() has already
2557 * been called so that we can avoid the duplicate free error. For example, a
2558 * buffer on a magazine has already been freed by the client but is still
2559 * constructed.
2560 */
2561static void
2562kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2563{
2564	if (!freed && (cp->cache_flags & KMF_BUFTAG))
2565		if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2566			return;
2567
2568	/*
2569	 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2570	 * kmem_cache_free_debug() will have already applied the destructor.
2571	 */
2572	if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2573	    cp->cache_destructor != NULL) {
2574		if (cp->cache_flags & KMF_DEADBEEF) {	/* KMF_LITE implied */
2575			kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2576			*(uint64_t *)buf = btp->bt_redzone;
2577			cp->cache_destructor(buf, cp->cache_private);
2578			*(uint64_t *)buf = KMEM_FREE_PATTERN;
2579		} else {
2580			cp->cache_destructor(buf, cp->cache_private);
2581		}
2582	}
2583
2584	kmem_slab_free(cp, buf);
2585}
2586
2587/*
2588 * Used when there's no room to free a buffer to the per-CPU cache.
2589 * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2590 * caller should try freeing to the per-CPU cache again.
2591 * Note that we don't directly install the magazine in the cpu cache,
2592 * since its state may have changed wildly while the lock was dropped.
2593 */
2594static int
2595kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2596{
2597	kmem_magazine_t *emp;
2598	kmem_magtype_t *mtp;
2599
2600	ASSERT(MUTEX_HELD(&ccp->cc_lock));
2601	ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2602	    ((uint_t)ccp->cc_rounds == -1)) &&
2603	    ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2604	    ((uint_t)ccp->cc_prounds == -1)));
2605
2606	emp = kmem_depot_alloc(cp, &cp->cache_empty);
2607	if (emp != NULL) {
2608		if (ccp->cc_ploaded != NULL)
2609			kmem_depot_free(cp, &cp->cache_full,
2610			    ccp->cc_ploaded);
2611		kmem_cpu_reload(ccp, emp, 0);
2612		return (1);
2613	}
2614	/*
2615	 * There are no empty magazines in the depot,
2616	 * so try to allocate a new one.  We must drop all locks
2617	 * across kmem_cache_alloc() because lower layers may
2618	 * attempt to allocate from this cache.
2619	 */
2620	mtp = cp->cache_magtype;
2621	mutex_exit(&ccp->cc_lock);
2622	emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2623	mutex_enter(&ccp->cc_lock);
2624
2625	if (emp != NULL) {
2626		/*
2627		 * We successfully allocated an empty magazine.
2628		 * However, we had to drop ccp->cc_lock to do it,
2629		 * so the cache's magazine size may have changed.
2630		 * If so, free the magazine and try again.
2631		 */
2632		if (ccp->cc_magsize != mtp->mt_magsize) {
2633			mutex_exit(&ccp->cc_lock);
2634			kmem_cache_free(mtp->mt_cache, emp);
2635			mutex_enter(&ccp->cc_lock);
2636			return (1);
2637		}
2638
2639		/*
2640		 * We got a magazine of the right size.  Add it to
2641		 * the depot and try the whole dance again.
2642		 */
2643		kmem_depot_free(cp, &cp->cache_empty, emp);
2644		return (1);
2645	}
2646
2647	/*
2648	 * We couldn't allocate an empty magazine,
2649	 * so fall through to the slab layer.
2650	 */
2651	return (0);
2652}
2653
2654/*
2655 * Free a constructed object to cache cp.
2656 */
2657void
2658kmem_cache_free(kmem_cache_t *cp, void *buf)
2659{
2660	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2661
2662	/*
2663	 * The client must not free either of the buffers passed to the move
2664	 * callback function.
2665	 */
2666	ASSERT(cp->cache_defrag == NULL ||
2667	    cp->cache_defrag->kmd_thread != curthread ||
2668	    (buf != cp->cache_defrag->kmd_from_buf &&
2669	    buf != cp->cache_defrag->kmd_to_buf));
2670
2671	if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2672		if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2673			ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2674			/* log it so that we can warn about it */
2675			cp->cache_dump.kd_unsafe++;
2676		} else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2677			return;
2678		}
2679		if (ccp->cc_flags & KMF_BUFTAG) {
2680			if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2681				return;
2682		}
2683	}
2684
2685	mutex_enter(&ccp->cc_lock);
2686	/*
2687	 * Any changes to this logic should be reflected in kmem_slab_prefill()
2688	 */
2689	for (;;) {
2690		/*
2691		 * If there's a slot available in the current CPU's
2692		 * loaded magazine, just put the object there and return.
2693		 */
2694		if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2695			ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2696			ccp->cc_free++;
2697			mutex_exit(&ccp->cc_lock);
2698			return;
2699		}
2700
2701		/*
2702		 * The loaded magazine is full.  If the previously loaded
2703		 * magazine was empty, exchange them and try again.
2704		 */
2705		if (ccp->cc_prounds == 0) {
2706			kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2707			continue;
2708		}
2709
2710		/*
2711		 * If the magazine layer is disabled, break out now.
2712		 */
2713		if (ccp->cc_magsize == 0)
2714			break;
2715
2716		if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2717			/*
2718			 * We couldn't free our constructed object to the
2719			 * magazine layer, so apply its destructor and free it
2720			 * to the slab layer.
2721			 */
2722			break;
2723		}
2724	}
2725	mutex_exit(&ccp->cc_lock);
2726	kmem_slab_free_constructed(cp, buf, B_TRUE);
2727}
2728
2729static void
2730kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2731{
2732	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2733	int cache_flags = cp->cache_flags;
2734
2735	kmem_bufctl_t *next, *head;
2736	size_t nbufs;
2737
2738	/*
2739	 * Completely allocate the newly created slab and put the pre-allocated
2740	 * buffers in magazines. Any of the buffers that cannot be put in
2741	 * magazines must be returned to the slab.
2742	 */
2743	ASSERT(MUTEX_HELD(&cp->cache_lock));
2744	ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2745	ASSERT(cp->cache_constructor == NULL);
2746	ASSERT(sp->slab_cache == cp);
2747	ASSERT(sp->slab_refcnt == 1);
2748	ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2749	ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2750
2751	head = sp->slab_head;
2752	nbufs = (sp->slab_chunks - sp->slab_refcnt);
2753	sp->slab_head = NULL;
2754	sp->slab_refcnt += nbufs;
2755	cp->cache_bufslab -= nbufs;
2756	cp->cache_slab_alloc += nbufs;
2757	list_insert_head(&cp->cache_complete_slabs, sp);
2758	cp->cache_complete_slab_count++;
2759	mutex_exit(&cp->cache_lock);
2760	mutex_enter(&ccp->cc_lock);
2761
2762	while (head != NULL) {
2763		void *buf = KMEM_BUF(cp, head);
2764		/*
2765		 * If there's a slot available in the current CPU's
2766		 * loaded magazine, just put the object there and
2767		 * continue.
2768		 */
2769		if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2770			ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2771			    buf;
2772			ccp->cc_free++;
2773			nbufs--;
2774			head = head->bc_next;
2775			continue;
2776		}
2777
2778		/*
2779		 * The loaded magazine is full.  If the previously
2780		 * loaded magazine was empty, exchange them and try
2781		 * again.
2782		 */
2783		if (ccp->cc_prounds == 0) {
2784			kmem_cpu_reload(ccp, ccp->cc_ploaded,
2785			    ccp->cc_prounds);
2786			continue;
2787		}
2788
2789		/*
2790		 * If the magazine layer is disabled, break out now.
2791		 */
2792
2793		if (ccp->cc_magsize == 0) {
2794			break;
2795		}
2796
2797		if (!kmem_cpucache_magazine_alloc(ccp, cp))
2798			break;
2799	}
2800	mutex_exit(&ccp->cc_lock);
2801	if (nbufs != 0) {
2802		ASSERT(head != NULL);
2803
2804		/*
2805		 * If there was a failure, return remaining objects to
2806		 * the slab
2807		 */
2808		while (head != NULL) {
2809			ASSERT(nbufs != 0);
2810			next = head->bc_next;
2811			head->bc_next = NULL;
2812			kmem_slab_free(cp, KMEM_BUF(cp, head));
2813			head = next;
2814			nbufs--;
2815		}
2816	}
2817	ASSERT(head == NULL);
2818	ASSERT(nbufs == 0);
2819	mutex_enter(&cp->cache_lock);
2820}
2821
2822void *
2823kmem_zalloc(size_t size, int kmflag)
2824{
2825	size_t index;
2826	void *buf;
2827
2828	if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2829		kmem_cache_t *cp = kmem_alloc_table[index];
2830		buf = kmem_cache_alloc(cp, kmflag);
2831		if (buf != NULL) {
2832			if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2833				kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2834				((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2835				((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2836
2837				if (cp->cache_flags & KMF_LITE) {
2838					KMEM_BUFTAG_LITE_ENTER(btp,
2839					    kmem_lite_count, caller());
2840				}
2841			}
2842			bzero(buf, size);
2843		}
2844	} else {
2845		buf = kmem_alloc(size, kmflag);
2846		if (buf != NULL)
2847			bzero(buf, size);
2848	}
2849	return (buf);
2850}
2851
2852void *
2853kmem_alloc(size_t size, int kmflag)
2854{
2855	size_t index;
2856	kmem_cache_t *cp;
2857	void *buf;
2858
2859	if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2860		cp = kmem_alloc_table[index];
2861		/* fall through to kmem_cache_alloc() */
2862
2863	} else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2864	    kmem_big_alloc_table_max) {
2865		cp = kmem_big_alloc_table[index];
2866		/* fall through to kmem_cache_alloc() */
2867
2868	} else {
2869		if (size == 0) {
2870			if (kmflag != KM_SLEEP && !(kmflag & KM_PANIC))
2871				return (NULL);
2872
2873			/*
2874			 * If this is a sleeping allocation or one that has
2875			 * been specified to panic on allocation failure, we
2876			 * consider it to be deprecated behavior to allocate
2877			 * 0 bytes.  If we have been configured to panic under
2878			 * this condition, we panic; if to warn, we warn -- and
2879			 * regardless, we log to the kmem_zerosized_log that
2880			 * that this condition has occurred (which gives us
2881			 * enough information to be able to debug it).
2882			 */
2883			if (kmem_panic && kmem_panic_zerosized)
2884				panic("attempted to kmem_alloc() size of 0");
2885
2886			if (kmem_warn_zerosized) {
2887				cmn_err(CE_WARN, "kmem_alloc(): sleeping "
2888				    "allocation with size of 0; "
2889				    "see kmem_zerosized_log for details");
2890			}
2891
2892			kmem_log_event(kmem_zerosized_log, NULL, NULL, NULL);
2893
2894			return (NULL);
2895		}
2896
2897		buf = vmem_alloc(kmem_oversize_arena, size,
2898		    kmflag & KM_VMFLAGS);
2899		if (buf == NULL)
2900			kmem_log_event(kmem_failure_log, NULL, NULL,
2901			    (void *)size);
2902		else if (KMEM_DUMP(kmem_slab_cache)) {
2903			/* stats for dump intercept */
2904			kmem_dump_oversize_allocs++;
2905			if (size > kmem_dump_oversize_max)
2906				kmem_dump_oversize_max = size;
2907		}
2908		return (buf);
2909	}
2910
2911	buf = kmem_cache_alloc(cp, kmflag);
2912	if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2913		kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2914		((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2915		((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2916
2917		if (cp->cache_flags & KMF_LITE) {
2918			KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2919		}
2920	}
2921	return (buf);
2922}
2923
2924void
2925kmem_free(void *buf, size_t size)
2926{
2927	size_t index;
2928	kmem_cache_t *cp;
2929
2930	if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2931		cp = kmem_alloc_table[index];
2932		/* fall through to kmem_cache_free() */
2933
2934	} else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2935	    kmem_big_alloc_table_max) {
2936		cp = kmem_big_alloc_table[index];
2937		/* fall through to kmem_cache_free() */
2938
2939	} else {
2940		EQUIV(buf == NULL, size == 0);
2941		if (buf == NULL && size == 0)
2942			return;
2943		vmem_free(kmem_oversize_arena, buf, size);
2944		return;
2945	}
2946
2947	if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2948		kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2949		uint32_t *ip = (uint32_t *)btp;
2950		if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2951			if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2952				kmem_error(KMERR_DUPFREE, cp, buf);
2953				return;
2954			}
2955			if (KMEM_SIZE_VALID(ip[1])) {
2956				ip[0] = KMEM_SIZE_ENCODE(size);
2957				kmem_error(KMERR_BADSIZE, cp, buf);
2958			} else {
2959				kmem_error(KMERR_REDZONE, cp, buf);
2960			}
2961			return;
2962		}
2963		if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2964			kmem_error(KMERR_REDZONE, cp, buf);
2965			return;
2966		}
2967		btp->bt_redzone = KMEM_REDZONE_PATTERN;
2968		if (cp->cache_flags & KMF_LITE) {
2969			KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2970			    caller());
2971		}
2972	}
2973	kmem_cache_free(cp, buf);
2974}
2975
2976void *
2977kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2978{
2979	size_t realsize = size + vmp->vm_quantum;
2980	void *addr;
2981
2982	/*
2983	 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2984	 * vm_quantum will cause integer wraparound.  Check for this, and
2985	 * blow off the firewall page in this case.  Note that such a
2986	 * giant allocation (the entire kernel address space) can never
2987	 * be satisfied, so it will either fail immediately (VM_NOSLEEP)
2988	 * or sleep forever (VM_SLEEP).  Thus, there is no need for a
2989	 * corresponding check in kmem_firewall_va_free().
2990	 */
2991	if (realsize < size)
2992		realsize = size;
2993
2994	/*
2995	 * While boot still owns resource management, make sure that this
2996	 * redzone virtual address allocation is properly accounted for in
2997	 * OBPs "virtual-memory" "available" lists because we're
2998	 * effectively claiming them for a red zone.  If we don't do this,
2999	 * the available lists become too fragmented and too large for the
3000	 * current boot/kernel memory list interface.
3001	 */
3002	addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3003
3004	if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3005		(void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3006
3007	return (addr);
3008}
3009
3010void
3011kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3012{
3013	ASSERT((kvseg.s_base == NULL ?
3014	    va_to_pfn((char *)addr + size) :
3015	    hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3016
3017	vmem_free(vmp, addr, size + vmp->vm_quantum);
3018}
3019
3020/*
3021 * Try to allocate at least `size' bytes of memory without sleeping or
3022 * panicking. Return actual allocated size in `asize'. If allocation failed,
3023 * try final allocation with sleep or panic allowed.
3024 */
3025void *
3026kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3027{
3028	void *p;
3029
3030	*asize = P2ROUNDUP(size, KMEM_ALIGN);
3031	do {
3032		p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3033		if (p != NULL)
3034			return (p);
3035		*asize += KMEM_ALIGN;
3036	} while (*asize <= PAGESIZE);
3037
3038	*asize = P2ROUNDUP(size, KMEM_ALIGN);
3039	return (kmem_alloc(*asize, kmflag));
3040}
3041
3042/*
3043 * Reclaim all unused memory from a cache.
3044 */
3045static void
3046kmem_cache_reap(kmem_cache_t *cp)
3047{
3048	ASSERT(taskq_member(kmem_taskq, curthread));
3049	cp->cache_reap++;
3050
3051	/*
3052	 * Ask the cache's owner to free some memory if possible.
3053	 * The idea is to handle things like the inode cache, which
3054	 * typically sits on a bunch of memory that it doesn't truly
3055	 * *need*.  Reclaim policy is entirely up to the owner; this
3056	 * callback is just an advisory plea for help.
3057	 */
3058	if (cp->cache_reclaim != NULL) {
3059		long delta;
3060
3061		/*
3062		 * Reclaimed memory should be reapable (not included in the
3063		 * depot's working set).
3064		 */
3065		delta = cp->cache_full.ml_total;
3066		cp->cache_reclaim(cp->cache_private);
3067		delta = cp->cache_full.ml_total - delta;
3068		if (delta > 0) {
3069			mutex_enter(&cp->cache_depot_lock);
3070			cp->cache_full.ml_reaplimit += delta;
3071			cp->cache_full.ml_min += delta;
3072			mutex_exit(&cp->cache_depot_lock);
3073		}
3074	}
3075
3076	kmem_depot_ws_reap(cp);
3077
3078	if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3079		kmem_cache_defrag(cp);
3080	}
3081}
3082
3083static void
3084kmem_reap_timeout(void *flag_arg)
3085{
3086	uint32_t *flag = (uint32_t *)flag_arg;
3087
3088	ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3089	*flag = 0;
3090}
3091
3092static void
3093kmem_reap_done(void *flag)
3094{
3095	if (!callout_init_done) {
3096		/* can't schedule a timeout at this point */
3097		kmem_reap_timeout(flag);
3098	} else {
3099		(void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3100	}
3101}
3102
3103static void
3104kmem_reap_start(void *flag)
3105{
3106	ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3107
3108	if (flag == &kmem_reaping) {
3109		kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3110		/*
3111		 * if we have segkp under heap, reap segkp cache.
3112		 */
3113		if (segkp_fromheap)
3114			segkp_cache_free();
3115	}
3116	else
3117		kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3118
3119	/*
3120	 * We use taskq_dispatch() to schedule a timeout to clear
3121	 * the flag so that kmem_reap() becomes self-throttling:
3122	 * we won't reap again until the current reap completes *and*
3123	 * at least kmem_reap_interval ticks have elapsed.
3124	 */
3125	if (taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP) ==
3126	    TASKQID_INVALID)
3127		kmem_reap_done(flag);
3128}
3129
3130static void
3131kmem_reap_common(void *flag_arg)
3132{
3133	uint32_t *flag = (uint32_t *)flag_arg;
3134
3135	if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3136	    atomic_cas_32(flag, 0, 1) != 0)
3137		return;
3138
3139	/*
3140	 * It may not be kosher to do memory allocation when a reap is called
3141	 * (for example, if vmem_populate() is in the call chain).  So we
3142	 * start the reap going with a TQ_NOALLOC dispatch.  If the dispatch
3143	 * fails, we reset the flag, and the next reap will try again.
3144	 */
3145	if (taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC) ==
3146	    TASKQID_INVALID)
3147		*flag = 0;
3148}
3149
3150/*
3151 * Reclaim all unused memory from all caches.  Called from the VM system
3152 * when memory gets tight.
3153 */
3154void
3155kmem_reap(void)
3156{
3157	kmem_reap_common(&kmem_reaping);
3158}
3159
3160/*
3161 * Reclaim all unused memory from identifier arenas, called when a vmem
3162 * arena not back by memory is exhausted.  Since reaping memory-backed caches
3163 * cannot help with identifier exhaustion, we avoid both a large amount of
3164 * work and unwanted side-effects from reclaim callbacks.
3165 */
3166void
3167kmem_reap_idspace(void)
3168{
3169	kmem_reap_common(&kmem_reaping_idspace);
3170}
3171
3172/*
3173 * Purge all magazines from a cache and set its magazine limit to zero.
3174 * All calls are serialized by the kmem_taskq lock, except for the final
3175 * call from kmem_cache_destroy().
3176 */
3177static void
3178kmem_cache_magazine_purge(kmem_cache_t *cp)
3179{
3180	kmem_cpu_cache_t *ccp;
3181	kmem_magazine_t *mp, *pmp;
3182	int rounds, prounds, cpu_seqid;
3183
3184	ASSERT(!list_link_active(&cp->cache_link) ||
3185	    taskq_member(kmem_taskq, curthread));
3186	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3187
3188	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3189		ccp = &cp->cache_cpu[cpu_seqid];
3190
3191		mutex_enter(&ccp->cc_lock);
3192		mp = ccp->cc_loaded;
3193		pmp = ccp->cc_ploaded;
3194		rounds = ccp->cc_rounds;
3195		prounds = ccp->cc_prounds;
3196		ccp->cc_loaded = NULL;
3197		ccp->cc_ploaded = NULL;
3198		ccp->cc_rounds = -1;
3199		ccp->cc_prounds = -1;
3200		ccp->cc_magsize = 0;
3201		mutex_exit(&ccp->cc_lock);
3202
3203		if (mp)
3204			kmem_magazine_destroy(cp, mp, rounds);
3205		if (pmp)
3206			kmem_magazine_destroy(cp, pmp, prounds);
3207	}
3208
3209	kmem_depot_ws_zero(cp);
3210	kmem_depot_ws_reap(cp);
3211}
3212
3213/*
3214 * Enable per-cpu magazines on a cache.
3215 */
3216static void
3217kmem_cache_magazine_enable(kmem_cache_t *cp)
3218{
3219	int cpu_seqid;
3220
3221	if (cp->cache_flags & KMF_NOMAGAZINE)
3222		return;
3223
3224	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3225		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3226		mutex_enter(&ccp->cc_lock);
3227		ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3228		mutex_exit(&ccp->cc_lock);
3229	}
3230
3231}
3232
3233/*
3234 * Allow our caller to determine if there are running reaps.
3235 *
3236 * This call is very conservative and may return B_TRUE even when
3237 * reaping activity isn't active. If it returns B_FALSE, then reaping
3238 * activity is definitely inactive.
3239 */
3240boolean_t
3241kmem_cache_reap_active(void)
3242{
3243	return (!taskq_empty(kmem_taskq));
3244}
3245
3246/*
3247 * Reap (almost) everything soon.
3248 *
3249 * Note: this does not wait for the reap-tasks to complete. Caller
3250 * should use kmem_cache_reap_active() (above) and/or moderation to
3251 * avoid scheduling too many reap-tasks.
3252 */
3253void
3254kmem_cache_reap_soon(kmem_cache_t *cp)
3255{
3256	ASSERT(list_link_active(&cp->cache_link));
3257
3258	kmem_depot_ws_zero(cp);
3259
3260	(void) taskq_dispatch(kmem_taskq,
3261	    (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3262}
3263
3264/*
3265 * Recompute a cache's magazine size.  The trade-off is that larger magazines
3266 * provide a higher transfer rate with the depot, while smaller magazines
3267 * reduce memory consumption.  Magazine resizing is an expensive operation;
3268 * it should not be done frequently.
3269 *
3270 * Changes to the magazine size are serialized by the kmem_taskq lock.
3271 *
3272 * Note: at present this only grows the magazine size.  It might be useful
3273 * to allow shrinkage too.
3274 */
3275static void
3276kmem_cache_magazine_resize(kmem_cache_t *cp)
3277{
3278	kmem_magtype_t *mtp = cp->cache_magtype;
3279
3280	ASSERT(taskq_member(kmem_taskq, curthread));
3281
3282	if (cp->cache_chunksize < mtp->mt_maxbuf) {
3283		kmem_cache_magazine_purge(cp);
3284		mutex_enter(&cp->cache_depot_lock);
3285		cp->cache_magtype = ++mtp;
3286		cp->cache_depot_contention_prev =
3287		    cp->cache_depot_contention + INT_MAX;
3288		mutex_exit(&cp->cache_depot_lock);
3289		kmem_cache_magazine_enable(cp);
3290	}
3291}
3292
3293/*
3294 * Rescale a cache's hash table, so that the table size is roughly the
3295 * cache size.  We want the average lookup time to be extremely small.
3296 */
3297static void
3298kmem_hash_rescale(kmem_cache_t *cp)
3299{
3300	kmem_bufctl_t **old_table, **new_table, *bcp;
3301	size_t old_size, new_size, h;
3302
3303	ASSERT(taskq_member(kmem_taskq, curthread));
3304
3305	new_size = MAX(KMEM_HASH_INITIAL,
3306	    1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3307	old_size = cp->cache_hash_mask + 1;
3308
3309	if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3310		return;
3311
3312	new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3313	    VM_NOSLEEP);
3314	if (new_table == NULL)
3315		return;
3316	bzero(new_table, new_size * sizeof (void *));
3317
3318	mutex_enter(&cp->cache_lock);
3319
3320	old_size = cp->cache_hash_mask + 1;
3321	old_table = cp->cache_hash_table;
3322
3323	cp->cache_hash_mask = new_size - 1;
3324	cp->cache_hash_table = new_table;
3325	cp->cache_rescale++;
3326
3327	for (h = 0; h < old_size; h++) {
3328		bcp = old_table[h];
3329		while (bcp != NULL) {
3330			void *addr = bcp->bc_addr;
3331			kmem_bufctl_t *next_bcp = bcp->bc_next;
3332			kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3333			bcp->bc_next = *hash_bucket;
3334			*hash_bucket = bcp;
3335			bcp = next_bcp;
3336		}
3337	}
3338
3339	mutex_exit(&cp->cache_lock);
3340
3341	vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3342}
3343
3344/*
3345 * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3346 * update, magazine resizing, and slab consolidation.
3347 */
3348static void
3349kmem_cache_update(kmem_cache_t *cp)
3350{
3351	int need_hash_rescale = 0;
3352	int need_magazine_resize = 0;
3353
3354	ASSERT(MUTEX_HELD(&kmem_cache_lock));
3355
3356	/*
3357	 * If the cache has become much larger or smaller than its hash table,
3358	 * fire off a request to rescale the hash table.
3359	 */
3360	mutex_enter(&cp->cache_lock);
3361
3362	if ((cp->cache_flags & KMF_HASH) &&
3363	    (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3364	    (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3365	    cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3366		need_hash_rescale = 1;
3367
3368	mutex_exit(&cp->cache_lock);
3369
3370	/*
3371	 * Update the depot working set statistics.
3372	 */
3373	kmem_depot_ws_update(cp);
3374
3375	/*
3376	 * If there's a lot of contention in the depot,
3377	 * increase the magazine size.
3378	 */
3379	mutex_enter(&cp->cache_depot_lock);
3380
3381	if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3382	    (int)(cp->cache_depot_contention -
3383	    cp->cache_depot_contention_prev) > kmem_depot_contention)
3384		need_magazine_resize = 1;
3385
3386	cp->cache_depot_contention_prev = cp->cache_depot_contention;
3387
3388	mutex_exit(&cp->cache_depot_lock);
3389
3390	if (need_hash_rescale)
3391		(void) taskq_dispatch(kmem_taskq,
3392		    (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3393
3394	if (need_magazine_resize)
3395		(void) taskq_dispatch(kmem_taskq,
3396		    (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3397
3398	if (cp->cache_defrag != NULL)
3399		(void) taskq_dispatch(kmem_taskq,
3400		    (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3401}
3402
3403static void kmem_update(void *);
3404
3405static void
3406kmem_update_timeout(void *dummy)
3407{
3408	(void) timeout(kmem_update, dummy, kmem_reap_interval);
3409}
3410
3411static void
3412kmem_update(void *dummy)
3413{
3414	kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3415
3416	/*
3417	 * We use taskq_dispatch() to reschedule the timeout so that
3418	 * kmem_update() becomes self-throttling: it won't schedule
3419	 * new tasks until all previous tasks have completed.
3420	 */
3421	if (taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP)
3422	    == TASKQID_INVALID)
3423		kmem_update_timeout(NULL);
3424}
3425
3426static int
3427kmem_cache_kstat_update(kstat_t *ksp, int rw)
3428{
3429	struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3430	kmem_cache_t *cp = ksp->ks_private;
3431	uint64_t cpu_buf_avail;
3432	uint64_t buf_avail = 0;
3433	int cpu_seqid;
3434	long reap;
3435
3436	ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3437
3438	if (rw == KSTAT_WRITE)
3439		return (EACCES);
3440
3441	mutex_enter(&cp->cache_lock);
3442
3443	kmcp->kmc_alloc_fail.value.ui64		= cp->cache_alloc_fail;
3444	kmcp->kmc_alloc.value.ui64		= cp->cache_slab_alloc;
3445	kmcp->kmc_free.value.ui64		= cp->cache_slab_free;
3446	kmcp->kmc_slab_alloc.value.ui64		= cp->cache_slab_alloc;
3447	kmcp->kmc_slab_free.value.ui64		= cp->cache_slab_free;
3448
3449	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3450		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3451
3452		mutex_enter(&ccp->cc_lock);
3453
3454		cpu_buf_avail = 0;
3455		if (ccp->cc_rounds > 0)
3456			cpu_buf_avail += ccp->cc_rounds;
3457		if (ccp->cc_prounds > 0)
3458			cpu_buf_avail += ccp->cc_prounds;
3459
3460		kmcp->kmc_alloc.value.ui64	+= ccp->cc_alloc;
3461		kmcp->kmc_free.value.ui64	+= ccp->cc_free;
3462		buf_avail			+= cpu_buf_avail;
3463
3464		mutex_exit(&ccp->cc_lock);
3465	}
3466
3467	mutex_enter(&cp->cache_depot_lock);
3468
3469	kmcp->kmc_depot_alloc.value.ui64	= cp->cache_full.ml_alloc;
3470	kmcp->kmc_depot_free.value.ui64		= cp->cache_empty.ml_alloc;
3471	kmcp->kmc_depot_contention.value.ui64	= cp->cache_depot_contention;
3472	kmcp->kmc_full_magazines.value.ui64	= cp->cache_full.ml_total;
3473	kmcp->kmc_empty_magazines.value.ui64	= cp->cache_empty.ml_total;
3474	kmcp->kmc_magazine_size.value.ui64	=
3475	    (cp->cache_flags & KMF_NOMAGAZINE) ?
3476	    0 : cp->cache_magtype->mt_magsize;
3477
3478	kmcp->kmc_alloc.value.ui64		+= cp->cache_full.ml_alloc;
3479	kmcp->kmc_free.value.ui64		+= cp->cache_empty.ml_alloc;
3480	buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3481
3482	reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3483	reap = MIN(reap, cp->cache_full.ml_total);
3484
3485	mutex_exit(&cp->cache_depot_lock);
3486
3487	kmcp->kmc_buf_size.value.ui64	= cp->cache_bufsize;
3488	kmcp->kmc_align.value.ui64	= cp->cache_align;
3489	kmcp->kmc_chunk_size.value.ui64	= cp->cache_chunksize;
3490	kmcp->kmc_slab_size.value.ui64	= cp->cache_slabsize;
3491	kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3492	buf_avail += cp->cache_bufslab;
3493	kmcp->kmc_buf_avail.value.ui64	= buf_avail;
3494	kmcp->kmc_buf_inuse.value.ui64	= cp->cache_buftotal - buf_avail;
3495	kmcp->kmc_buf_total.value.ui64	= cp->cache_buftotal;
3496	kmcp->kmc_buf_max.value.ui64	= cp->cache_bufmax;
3497	kmcp->kmc_slab_create.value.ui64	= cp->cache_slab_create;
3498	kmcp->kmc_slab_destroy.value.ui64	= cp->cache_slab_destroy;
3499	kmcp->kmc_hash_size.value.ui64	= (cp->cache_flags & KMF_HASH) ?
3500	    cp->cache_hash_mask + 1 : 0;
3501	kmcp->kmc_hash_lookup_depth.value.ui64	= cp->cache_lookup_depth;
3502	kmcp->kmc_hash_rescale.value.ui64	= cp->cache_rescale;
3503	kmcp->kmc_vmem_source.value.ui64	= cp->cache_arena->vm_id;
3504	kmcp->kmc_reap.value.ui64	= cp->cache_reap;
3505
3506	if (cp->cache_defrag == NULL) {
3507		kmcp->kmc_move_callbacks.value.ui64	= 0;
3508		kmcp->kmc_move_yes.value.ui64		= 0;
3509		kmcp->kmc_move_no.value.ui64		= 0;
3510		kmcp->kmc_move_later.value.ui64		= 0;
3511		kmcp->kmc_move_dont_need.value.ui64	= 0;
3512		kmcp->kmc_move_dont_know.value.ui64	= 0;
3513		kmcp->kmc_move_hunt_found.value.ui64	= 0;
3514		kmcp->kmc_move_slabs_freed.value.ui64	= 0;
3515		kmcp->kmc_defrag.value.ui64		= 0;
3516		kmcp->kmc_scan.value.ui64		= 0;
3517		kmcp->kmc_move_reclaimable.value.ui64	= 0;
3518	} else {
3519		int64_t reclaimable;
3520
3521		kmem_defrag_t *kd = cp->cache_defrag;
3522		kmcp->kmc_move_callbacks.value.ui64	= kd->kmd_callbacks;
3523		kmcp->kmc_move_yes.value.ui64		= kd->kmd_yes;
3524		kmcp->kmc_move_no.value.ui64		= kd->kmd_no;
3525		kmcp->kmc_move_later.value.ui64		= kd->kmd_later;
3526		kmcp->kmc_move_dont_need.value.ui64	= kd->kmd_dont_need;
3527		kmcp->kmc_move_dont_know.value.ui64	= kd->kmd_dont_know;
3528		kmcp->kmc_move_hunt_found.value.ui64	= 0;
3529		kmcp->kmc_move_slabs_freed.value.ui64	= kd->kmd_slabs_freed;
3530		kmcp->kmc_defrag.value.ui64		= kd->kmd_defrags;
3531		kmcp->kmc_scan.value.ui64		= kd->kmd_scans;
3532
3533		reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3534		reclaimable = MAX(reclaimable, 0);
3535		reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3536		kmcp->kmc_move_reclaimable.value.ui64	= reclaimable;
3537	}
3538
3539	mutex_exit(&cp->cache_lock);
3540	return (0);
3541}
3542
3543/*
3544 * Return a named statistic about a particular cache.
3545 * This shouldn't be called very often, so it's currently designed for
3546 * simplicity (leverages existing kstat support) rather than efficiency.
3547 */
3548uint64_t
3549kmem_cache_stat(kmem_cache_t *cp, char *name)
3550{
3551	int i;
3552	kstat_t *ksp = cp->cache_kstat;
3553	kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3554	uint64_t value = 0;
3555
3556	if (ksp != NULL) {
3557		mutex_enter(&kmem_cache_kstat_lock);
3558		(void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3559		for (i = 0; i < ksp->ks_ndata; i++) {
3560			if (strcmp(knp[i].name, name) == 0) {
3561				value = knp[i].value.ui64;
3562				break;
3563			}
3564		}
3565		mutex_exit(&kmem_cache_kstat_lock);
3566	}
3567	return (value);
3568}
3569
3570/*
3571 * Return an estimate of currently available kernel heap memory.
3572 * On 32-bit systems, physical memory may exceed virtual memory,
3573 * we just truncate the result at 1GB.
3574 */
3575size_t
3576kmem_avail(void)
3577{
3578	spgcnt_t rmem = availrmem - tune.t_minarmem;
3579	spgcnt_t fmem = freemem - minfree;
3580
3581	return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3582	    1 << (30 - PAGESHIFT))));
3583}
3584
3585/*
3586 * Return the maximum amount of memory that is (in theory) allocatable
3587 * from the heap. This may be used as an estimate only since there
3588 * is no guarentee this space will still be available when an allocation
3589 * request is made, nor that the space may be allocated in one big request
3590 * due to kernel heap fragmentation.
3591 */
3592size_t
3593kmem_maxavail(void)
3594{
3595	spgcnt_t pmem = availrmem - tune.t_minarmem;
3596	spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3597
3598	return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3599}
3600
3601/*
3602 * Indicate whether memory-intensive kmem debugging is enabled.
3603 */
3604int
3605kmem_debugging(void)
3606{
3607	return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3608}
3609
3610/* binning function, sorts finely at the two extremes */
3611#define	KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift)				\
3612	((((sp)->slab_refcnt <= (binshift)) ||				\
3613	    (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift)))	\
3614	    ? -(sp)->slab_refcnt					\
3615	    : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3616
3617/*
3618 * Minimizing the number of partial slabs on the freelist minimizes
3619 * fragmentation (the ratio of unused buffers held by the slab layer). There are
3620 * two ways to get a slab off of the freelist: 1) free all the buffers on the
3621 * slab, and 2) allocate all the buffers on the slab. It follows that we want
3622 * the most-used slabs at the front of the list where they have the best chance
3623 * of being completely allocated, and the least-used slabs at a safe distance
3624 * from the front to improve the odds that the few remaining buffers will all be
3625 * freed before another allocation can tie up the slab. For that reason a slab
3626 * with a higher slab_refcnt sorts less than than a slab with a lower
3627 * slab_refcnt.
3628 *
3629 * However, if a slab has at least one buffer that is deemed unfreeable, we
3630 * would rather have that slab at the front of the list regardless of
3631 * slab_refcnt, since even one unfreeable buffer makes the entire slab
3632 * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3633 * callback, the slab is marked unfreeable for as long as it remains on the
3634 * freelist.
3635 */
3636static int
3637kmem_partial_slab_cmp(const void *p0, const void *p1)
3638{
3639	const kmem_cache_t *cp;
3640	const kmem_slab_t *s0 = p0;
3641	const kmem_slab_t *s1 = p1;
3642	int w0, w1;
3643	size_t binshift;
3644
3645	ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3646	ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3647	ASSERT(s0->slab_cache == s1->slab_cache);
3648	cp = s1->slab_cache;
3649	ASSERT(MUTEX_HELD(&cp->cache_lock));
3650	binshift = cp->cache_partial_binshift;
3651
3652	/* weight of first slab */
3653	w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3654	if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3655		w0 -= cp->cache_maxchunks;
3656	}
3657
3658	/* weight of second slab */
3659	w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3660	if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3661		w1 -= cp->cache_maxchunks;
3662	}
3663
3664	if (w0 < w1)
3665		return (-1);
3666	if (w0 > w1)
3667		return (1);
3668
3669	/* compare pointer values */
3670	if ((uintptr_t)s0 < (uintptr_t)s1)
3671		return (-1);
3672	if ((uintptr_t)s0 > (uintptr_t)s1)
3673		return (1);
3674
3675	return (0);
3676}
3677
3678/*
3679 * It must be valid to call the destructor (if any) on a newly created object.
3680 * That is, the constructor (if any) must leave the object in a valid state for
3681 * the destructor.
3682 */
3683kmem_cache_t *
3684kmem_cache_create(
3685	char *name,		/* descriptive name for this cache */
3686	size_t bufsize,		/* size of the objects it manages */
3687	size_t align,		/* required object alignment */
3688	int (*constructor)(void *, void *, int), /* object constructor */
3689	void (*destructor)(void *, void *),	/* object destructor */
3690	void (*reclaim)(void *), /* memory reclaim callback */
3691	void *private,		/* pass-thru arg for constr/destr/reclaim */
3692	vmem_t *vmp,		/* vmem source for slab allocation */
3693	int cflags)		/* cache creation flags */
3694{
3695	int cpu_seqid;
3696	size_t chunksize;
3697	kmem_cache_t *cp;
3698	kmem_magtype_t *mtp;
3699	size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3700
3701#ifdef	DEBUG
3702	/*
3703	 * Cache names should conform to the rules for valid C identifiers
3704	 */
3705	if (!strident_valid(name)) {
3706		cmn_err(CE_CONT,
3707		    "kmem_cache_create: '%s' is an invalid cache name\n"
3708		    "cache names must conform to the rules for "
3709		    "C identifiers\n", name);
3710	}
3711#endif	/* DEBUG */
3712
3713	if (vmp == NULL)
3714		vmp = kmem_default_arena;
3715
3716	/*
3717	 * If this kmem cache has an identifier vmem arena as its source, mark
3718	 * it such to allow kmem_reap_idspace().
3719	 */
3720	ASSERT(!(cflags & KMC_IDENTIFIER));   /* consumer should not set this */
3721	if (vmp->vm_cflags & VMC_IDENTIFIER)
3722		cflags |= KMC_IDENTIFIER;
3723
3724	/*
3725	 * Get a kmem_cache structure.  We arrange that cp->cache_cpu[]
3726	 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3727	 * false sharing of per-CPU data.
3728	 */
3729	cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3730	    P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3731	bzero(cp, csize);
3732	list_link_init(&cp->cache_link);
3733
3734	if (align == 0)
3735		align = KMEM_ALIGN;
3736
3737	/*
3738	 * If we're not at least KMEM_ALIGN aligned, we can't use free
3739	 * memory to hold bufctl information (because we can't safely
3740	 * perform word loads and stores on it).
3741	 */
3742	if (align < KMEM_ALIGN)
3743		cflags |= KMC_NOTOUCH;
3744
3745	if (!ISP2(align) || align > vmp->vm_quantum)
3746		panic("kmem_cache_create: bad alignment %lu", align);
3747
3748	mutex_enter(&kmem_flags_lock);
3749	if (kmem_flags & KMF_RANDOMIZE)
3750		kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3751		    KMF_RANDOMIZE;
3752	cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3753	mutex_exit(&kmem_flags_lock);
3754
3755	/*
3756	 * Make sure all the various flags are reasonable.
3757	 */
3758	ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3759
3760	if (cp->cache_flags & KMF_LITE) {
3761		if (bufsize >= kmem_lite_minsize &&
3762		    align <= kmem_lite_maxalign &&
3763		    P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3764			cp->cache_flags |= KMF_BUFTAG;
3765			cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3766		} else {
3767			cp->cache_flags &= ~KMF_DEBUG;
3768		}
3769	}
3770
3771	if (cp->cache_flags & KMF_DEADBEEF)
3772		cp->cache_flags |= KMF_REDZONE;
3773
3774	if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3775		cp->cache_flags |= KMF_NOMAGAZINE;
3776
3777	if (cflags & KMC_NODEBUG)
3778		cp->cache_flags &= ~KMF_DEBUG;
3779
3780	if (cflags & KMC_NOTOUCH)
3781		cp->cache_flags &= ~KMF_TOUCH;
3782
3783	if (cflags & KMC_PREFILL)
3784		cp->cache_flags |= KMF_PREFILL;
3785
3786	if (cflags & KMC_NOHASH)
3787		cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3788
3789	if (cflags & KMC_NOMAGAZINE)
3790		cp->cache_flags |= KMF_NOMAGAZINE;
3791
3792	if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3793		cp->cache_flags |= KMF_REDZONE;
3794
3795	if (!(cp->cache_flags & KMF_AUDIT))
3796		cp->cache_flags &= ~KMF_CONTENTS;
3797
3798	if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3799	    !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3800		cp->cache_flags |= KMF_FIREWALL;
3801
3802	if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3803		cp->cache_flags &= ~KMF_FIREWALL;
3804
3805	if (cp->cache_flags & KMF_FIREWALL) {
3806		cp->cache_flags &= ~KMF_BUFTAG;
3807		cp->cache_flags |= KMF_NOMAGAZINE;
3808		ASSERT(vmp == kmem_default_arena);
3809		vmp = kmem_firewall_arena;
3810	}
3811
3812	/*
3813	 * Set cache properties.
3814	 */
3815	(void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3816	strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3817	cp->cache_bufsize = bufsize;
3818	cp->cache_align = align;
3819	cp->cache_constructor = constructor;
3820	cp->cache_destructor = destructor;
3821	cp->cache_reclaim = reclaim;
3822	cp->cache_private = private;
3823	cp->cache_arena = vmp;
3824	cp->cache_cflags = cflags;
3825
3826	/*
3827	 * Determine the chunk size.
3828	 */
3829	chunksize = bufsize;
3830
3831	if (align >= KMEM_ALIGN) {
3832		chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3833		cp->cache_bufctl = chunksize - KMEM_ALIGN;
3834	}
3835
3836	if (cp->cache_flags & KMF_BUFTAG) {
3837		cp->cache_bufctl = chunksize;
3838		cp->cache_buftag = chunksize;
3839		if (cp->cache_flags & KMF_LITE)
3840			chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3841		else
3842			chunksize += sizeof (kmem_buftag_t);
3843	}
3844
3845	if (cp->cache_flags & KMF_DEADBEEF) {
3846		cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3847		if (cp->cache_flags & KMF_LITE)
3848			cp->cache_verify = sizeof (uint64_t);
3849	}
3850
3851	cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3852
3853	cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3854
3855	/*
3856	 * Now that we know the chunk size, determine the optimal slab size.
3857	 */
3858	if (vmp == kmem_firewall_arena) {
3859		cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3860		cp->cache_mincolor = cp->cache_slabsize - chunksize;
3861		cp->cache_maxcolor = cp->cache_mincolor;
3862		cp->cache_flags |= KMF_HASH;
3863		ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3864	} else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3865	    !(cp->cache_flags & KMF_AUDIT) &&
3866	    chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3867		cp->cache_slabsize = vmp->vm_quantum;
3868		cp->cache_mincolor = 0;
3869		cp->cache_maxcolor =
3870		    (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3871		ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3872		ASSERT(!(cp->cache_flags & KMF_AUDIT));
3873	} else {
3874		size_t chunks, bestfit, waste, slabsize;
3875		size_t minwaste = LONG_MAX;
3876
3877		bestfit = 0;
3878		for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3879			slabsize = P2ROUNDUP(chunksize * chunks,
3880			    vmp->vm_quantum);
3881			chunks = slabsize / chunksize;
3882			waste = (slabsize % chunksize) / chunks;
3883			if (waste < minwaste) {
3884				minwaste = waste;
3885				bestfit = slabsize;
3886			}
3887		}
3888		if (cflags & KMC_QCACHE)
3889			bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3890		cp->cache_slabsize = bestfit;
3891		cp->cache_mincolor = 0;
3892		cp->cache_maxcolor = bestfit % chunksize;
3893		cp->cache_flags |= KMF_HASH;
3894	}
3895
3896	cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3897	cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3898
3899	/*
3900	 * Disallowing prefill when either the DEBUG or HASH flag is set or when
3901	 * there is a constructor avoids some tricky issues with debug setup
3902	 * that may be revisited later. We cannot allow prefill in a
3903	 * metadata cache because of potential recursion.
3904	 */
3905	if (vmp == kmem_msb_arena ||
3906	    cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3907	    cp->cache_constructor != NULL)
3908		cp->cache_flags &= ~KMF_PREFILL;
3909
3910	if (cp->cache_flags & KMF_HASH) {
3911		ASSERT(!(cflags & KMC_NOHASH));
3912		cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3913		    kmem_bufctl_audit_cache : kmem_bufctl_cache;
3914	}
3915
3916	if (cp->cache_maxcolor >= vmp->vm_quantum)
3917		cp->cache_maxcolor = vmp->vm_quantum - 1;
3918
3919	cp->cache_color = cp->cache_mincolor;
3920
3921	/*
3922	 * Initialize the rest of the slab layer.
3923	 */
3924	mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3925
3926	avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3927	    sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3928	/* LINTED: E_TRUE_LOGICAL_EXPR */
3929	ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3930	/* reuse partial slab AVL linkage for complete slab list linkage */
3931	list_create(&cp->cache_complete_slabs,
3932	    sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3933
3934	if (cp->cache_flags & KMF_HASH) {
3935		cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3936		    KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3937		bzero(cp->cache_hash_table,
3938		    KMEM_HASH_INITIAL * sizeof (void *));
3939		cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3940		cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3941	}
3942
3943	/*
3944	 * Initialize the depot.
3945	 */
3946	mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3947
3948	for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3949		continue;
3950
3951	cp->cache_magtype = mtp;
3952
3953	/*
3954	 * Initialize the CPU layer.
3955	 */
3956	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3957		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3958		mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3959		ccp->cc_flags = cp->cache_flags;
3960		ccp->cc_rounds = -1;
3961		ccp->cc_prounds = -1;
3962	}
3963
3964	/*
3965	 * Create the cache's kstats.
3966	 */
3967	if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3968	    "kmem_cache", KSTAT_TYPE_NAMED,
3969	    sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3970	    KSTAT_FLAG_VIRTUAL)) != NULL) {
3971		cp->cache_kstat->ks_data = &kmem_cache_kstat;
3972		cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3973		cp->cache_kstat->ks_private = cp;
3974		cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3975		kstat_install(cp->cache_kstat);
3976	}
3977
3978	/*
3979	 * Add the cache to the global list.  This makes it visible
3980	 * to kmem_update(), so the cache must be ready for business.
3981	 */
3982	mutex_enter(&kmem_cache_lock);
3983	list_insert_tail(&kmem_caches, cp);
3984	mutex_exit(&kmem_cache_lock);
3985
3986	if (kmem_ready)
3987		kmem_cache_magazine_enable(cp);
3988
3989	return (cp);
3990}
3991
3992static int
3993kmem_move_cmp(const void *buf, const void *p)
3994{
3995	const kmem_move_t *kmm = p;
3996	uintptr_t v1 = (uintptr_t)buf;
3997	uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
3998	return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
3999}
4000
4001static void
4002kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
4003{
4004	kmd->kmd_reclaim_numer = 1;
4005}
4006
4007/*
4008 * Initially, when choosing candidate slabs for buffers to move, we want to be
4009 * very selective and take only slabs that are less than
4010 * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4011 * slabs, then we raise the allocation ceiling incrementally. The reclaim
4012 * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4013 * longer fragmented.
4014 */
4015static void
4016kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4017{
4018	if (direction > 0) {
4019		/* make it easier to find a candidate slab */
4020		if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4021			kmd->kmd_reclaim_numer++;
4022		}
4023	} else {
4024		/* be more selective */
4025		if (kmd->kmd_reclaim_numer > 1) {
4026			kmd->kmd_reclaim_numer--;
4027		}
4028	}
4029}
4030
4031void
4032kmem_cache_set_move(kmem_cache_t *cp,
4033    kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4034{
4035	kmem_defrag_t *defrag;
4036
4037	ASSERT(move != NULL);
4038	/*
4039	 * The consolidator does not support NOTOUCH caches because kmem cannot
4040	 * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4041	 * a low order bit usable by clients to distinguish uninitialized memory
4042	 * from known objects (see kmem_slab_create).
4043	 */
4044	ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4045	ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4046
4047	/*
4048	 * We should not be holding anyone's cache lock when calling
4049	 * kmem_cache_alloc(), so allocate in all cases before acquiring the
4050	 * lock.
4051	 */
4052	defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4053
4054	mutex_enter(&cp->cache_lock);
4055
4056	if (KMEM_IS_MOVABLE(cp)) {
4057		if (cp->cache_move == NULL) {
4058			ASSERT(cp->cache_slab_alloc == 0);
4059
4060			cp->cache_defrag = defrag;
4061			defrag = NULL; /* nothing to free */
4062			bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4063			avl_create(&cp->cache_defrag->kmd_moves_pending,
4064			    kmem_move_cmp, sizeof (kmem_move_t),
4065			    offsetof(kmem_move_t, kmm_entry));
4066			/* LINTED: E_TRUE_LOGICAL_EXPR */
4067			ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4068			/* reuse the slab's AVL linkage for deadlist linkage */
4069			list_create(&cp->cache_defrag->kmd_deadlist,
4070			    sizeof (kmem_slab_t),
4071			    offsetof(kmem_slab_t, slab_link));
4072			kmem_reset_reclaim_threshold(cp->cache_defrag);
4073		}
4074		cp->cache_move = move;
4075	}
4076
4077	mutex_exit(&cp->cache_lock);
4078
4079	if (defrag != NULL) {
4080		kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4081	}
4082}
4083
4084void
4085kmem_cache_destroy(kmem_cache_t *cp)
4086{
4087	int cpu_seqid;
4088
4089	/*
4090	 * Remove the cache from the global cache list so that no one else
4091	 * can schedule tasks on its behalf, wait for any pending tasks to
4092	 * complete, purge the cache, and then destroy it.
4093	 */
4094	mutex_enter(&kmem_cache_lock);
4095	list_remove(&kmem_caches, cp);
4096	mutex_exit(&kmem_cache_lock);
4097
4098	if (kmem_taskq != NULL)
4099		taskq_wait(kmem_taskq);
4100
4101	if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4102		taskq_wait(kmem_move_taskq);
4103
4104	kmem_cache_magazine_purge(cp);
4105
4106	mutex_enter(&cp->cache_lock);
4107	if (cp->cache_buftotal != 0)
4108		cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4109		    cp->cache_name, (void *)cp);
4110	if (cp->cache_defrag != NULL) {
4111		avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4112		list_destroy(&cp->cache_defrag->kmd_deadlist);
4113		kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4114		cp->cache_defrag = NULL;
4115	}
4116	/*
4117	 * The cache is now dead.  There should be no further activity.  We
4118	 * enforce this by setting land mines in the constructor, destructor,
4119	 * reclaim, and move routines that induce a kernel text fault if
4120	 * invoked.
4121	 */
4122	cp->cache_constructor = (int (*)(void *, void *, int))1;
4123	cp->cache_destructor = (void (*)(void *, void *))2;
4124	cp->cache_reclaim = (void (*)(void *))3;
4125	cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4126	mutex_exit(&cp->cache_lock);
4127
4128	kstat_delete(cp->cache_kstat);
4129
4130	if (cp->cache_hash_table != NULL)
4131		vmem_free(kmem_hash_arena, cp->cache_hash_table,
4132		    (cp->cache_hash_mask + 1) * sizeof (void *));
4133
4134	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4135		mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4136
4137	mutex_destroy(&cp->cache_depot_lock);
4138	mutex_destroy(&cp->cache_lock);
4139
4140	vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4141}
4142
4143/*ARGSUSED*/
4144static int
4145kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4146{
4147	ASSERT(MUTEX_HELD(&cpu_lock));
4148	if (what == CPU_UNCONFIG) {
4149		kmem_cache_applyall(kmem_cache_magazine_purge,
4150		    kmem_taskq, TQ_SLEEP);
4151		kmem_cache_applyall(kmem_cache_magazine_enable,
4152		    kmem_taskq, TQ_SLEEP);
4153	}
4154	return (0);
4155}
4156
4157static void
4158kmem_alloc_caches_create(const int *array, size_t count,
4159    kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4160{
4161	char name[KMEM_CACHE_NAMELEN + 1];
4162	size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4163	size_t size = table_unit;
4164	int i;
4165
4166	for (i = 0; i < count; i++) {
4167		size_t cache_size = array[i];
4168		size_t align = KMEM_ALIGN;
4169		kmem_cache_t *cp;
4170
4171		/* if the table has an entry for maxbuf, we're done */
4172		if (size > maxbuf)
4173			break;
4174
4175		/* cache size must be a multiple of the table unit */
4176		ASSERT(P2PHASE(cache_size, table_unit) == 0);
4177
4178		/*
4179		 * If they allocate a multiple of the coherency granularity,
4180		 * they get a coherency-granularity-aligned address.
4181		 */
4182		if (IS_P2ALIGNED(cache_size, 64))
4183			align = 64;
4184		if (IS_P2ALIGNED(cache_size, PAGESIZE))
4185			align = PAGESIZE;
4186		(void) snprintf(name, sizeof (name),
4187		    "kmem_alloc_%lu", cache_size);
4188		cp = kmem_cache_create(name, cache_size, align,
4189		    NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4190
4191		while (size <= cache_size) {
4192			alloc_table[(size - 1) >> shift] = cp;
4193			size += table_unit;
4194		}
4195	}
4196
4197	ASSERT(size > maxbuf);		/* i.e. maxbuf <= max(cache_size) */
4198}
4199
4200static void
4201kmem_cache_init(int pass, int use_large_pages)
4202{
4203	int i;
4204	size_t maxbuf;
4205	kmem_magtype_t *mtp;
4206
4207	for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4208		char name[KMEM_CACHE_NAMELEN + 1];
4209
4210		mtp = &kmem_magtype[i];
4211		(void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4212		mtp->mt_cache = kmem_cache_create(name,
4213		    (mtp->mt_magsize + 1) * sizeof (void *),
4214		    mtp->mt_align, NULL, NULL, NULL, NULL,
4215		    kmem_msb_arena, KMC_NOHASH);
4216	}
4217
4218	kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4219	    sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4220	    kmem_msb_arena, KMC_NOHASH);
4221
4222	kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4223	    sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4224	    kmem_msb_arena, KMC_NOHASH);
4225
4226	kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4227	    sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4228	    kmem_msb_arena, KMC_NOHASH);
4229
4230	if (pass == 2) {
4231		kmem_va_arena = vmem_create("kmem_va",
4232		    NULL, 0, PAGESIZE,
4233		    vmem_alloc, vmem_free, heap_arena,
4234		    8 * PAGESIZE, VM_SLEEP);
4235
4236		if (use_large_pages) {
4237			kmem_default_arena = vmem_xcreate("kmem_default",
4238			    NULL, 0, PAGESIZE,
4239			    segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4240			    0, VMC_DUMPSAFE | VM_SLEEP);
4241		} else {
4242			kmem_default_arena = vmem_create("kmem_default",
4243			    NULL, 0, PAGESIZE,
4244			    segkmem_alloc, segkmem_free, kmem_va_arena,
4245			    0, VMC_DUMPSAFE | VM_SLEEP);
4246		}
4247
4248		/* Figure out what our maximum cache size is */
4249		maxbuf = kmem_max_cached;
4250		if (maxbuf <= KMEM_MAXBUF) {
4251			maxbuf = 0;
4252			kmem_max_cached = KMEM_MAXBUF;
4253		} else {
4254			size_t size = 0;
4255			size_t max =
4256			    sizeof (kmem_big_alloc_sizes) / sizeof (int);
4257			/*
4258			 * Round maxbuf up to an existing cache size.  If maxbuf
4259			 * is larger than the largest cache, we truncate it to
4260			 * the largest cache's size.
4261			 */
4262			for (i = 0; i < max; i++) {
4263				size = kmem_big_alloc_sizes[i];
4264				if (maxbuf <= size)
4265					break;
4266			}
4267			kmem_max_cached = maxbuf = size;
4268		}
4269
4270		/*
4271		 * The big alloc table may not be completely overwritten, so
4272		 * we clear out any stale cache pointers from the first pass.
4273		 */
4274		bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4275	} else {
4276		/*
4277		 * During the first pass, the kmem_alloc_* caches
4278		 * are treated as metadata.
4279		 */
4280		kmem_default_arena = kmem_msb_arena;
4281		maxbuf = KMEM_BIG_MAXBUF_32BIT;
4282	}
4283
4284	/*
4285	 * Set up the default caches to back kmem_alloc()
4286	 */
4287	kmem_alloc_caches_create(
4288	    kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4289	    kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4290
4291	kmem_alloc_caches_create(
4292	    kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4293	    kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4294
4295	kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4296}
4297
4298void
4299kmem_init(void)
4300{
4301	kmem_cache_t *cp;
4302	int old_kmem_flags = kmem_flags;
4303	int use_large_pages = 0;
4304	size_t maxverify, minfirewall;
4305
4306	kstat_init();
4307
4308	/*
4309	 * Don't do firewalled allocations if the heap is less than 1TB
4310	 * (i.e. on a 32-bit kernel)
4311	 * The resulting VM_NEXTFIT allocations would create too much
4312	 * fragmentation in a small heap.
4313	 */
4314#if defined(_LP64)
4315	maxverify = minfirewall = PAGESIZE / 2;
4316#else
4317	maxverify = minfirewall = ULONG_MAX;
4318#endif
4319
4320	/* LINTED */
4321	ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4322
4323	list_create(&kmem_caches, sizeof (kmem_cache_t),
4324	    offsetof(kmem_cache_t, cache_link));
4325
4326	kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4327	    vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4328	    VM_SLEEP | VMC_NO_QCACHE);
4329
4330	kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4331	    PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4332	    VMC_DUMPSAFE | VM_SLEEP);
4333
4334	kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4335	    segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4336
4337	kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4338	    segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4339
4340	kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4341	    segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4342
4343	kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4344	    NULL, 0, PAGESIZE,
4345	    kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4346	    0, VM_SLEEP);
4347
4348	kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4349	    segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4350	    VMC_DUMPSAFE | VM_SLEEP);
4351
4352	/* temporary oversize arena for mod_read_system_file */
4353	kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4354	    segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4355
4356	kmem_reap_interval = 15 * hz;
4357
4358	/*
4359	 * Read /etc/system.  This is a chicken-and-egg problem because
4360	 * kmem_flags may be set in /etc/system, but mod_read_system_file()
4361	 * needs to use the allocator.  The simplest solution is to create
4362	 * all the standard kmem caches, read /etc/system, destroy all the
4363	 * caches we just created, and then create them all again in light
4364	 * of the (possibly) new kmem_flags and other kmem tunables.
4365	 */
4366	kmem_cache_init(1, 0);
4367
4368	mod_read_system_file(boothowto & RB_ASKNAME);
4369
4370	while ((cp = list_tail(&kmem_caches)) != NULL)
4371		kmem_cache_destroy(cp);
4372
4373	vmem_destroy(kmem_oversize_arena);
4374
4375	if (old_kmem_flags & KMF_STICKY)
4376		kmem_flags = old_kmem_flags;
4377
4378	if (!(kmem_flags & KMF_AUDIT))
4379		vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4380
4381	if (kmem_maxverify == 0)
4382		kmem_maxverify = maxverify;
4383
4384	if (kmem_minfirewall == 0)
4385		kmem_minfirewall = minfirewall;
4386
4387	/*
4388	 * give segkmem a chance to figure out if we are using large pages
4389	 * for the kernel heap
4390	 */
4391	use_large_pages = segkmem_lpsetup();
4392
4393	/*
4394	 * To protect against corruption, we keep the actual number of callers
4395	 * KMF_LITE records seperate from the tunable.  We arbitrarily clamp
4396	 * to 16, since the overhead for small buffers quickly gets out of
4397	 * hand.
4398	 *
4399	 * The real limit would depend on the needs of the largest KMC_NOHASH
4400	 * cache.
4401	 */
4402	kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4403	kmem_lite_pcs = kmem_lite_count;
4404
4405	/*
4406	 * Normally, we firewall oversized allocations when possible, but
4407	 * if we are using large pages for kernel memory, and we don't have
4408	 * any non-LITE debugging flags set, we want to allocate oversized
4409	 * buffers from large pages, and so skip the firewalling.
4410	 */
4411	if (use_large_pages &&
4412	    ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4413		kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4414		    PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4415		    0, VMC_DUMPSAFE | VM_SLEEP);
4416	} else {
4417		kmem_oversize_arena = vmem_create("kmem_oversize",
4418		    NULL, 0, PAGESIZE,
4419		    segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4420		    kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4421		    VM_SLEEP);
4422	}
4423
4424	kmem_cache_init(2, use_large_pages);
4425
4426	if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4427		if (kmem_transaction_log_size == 0)
4428			kmem_transaction_log_size = kmem_maxavail() / 50;
4429		kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4430	}
4431
4432	if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4433		if (kmem_content_log_size == 0)
4434			kmem_content_log_size = kmem_maxavail() / 50;
4435		kmem_content_log = kmem_log_init(kmem_content_log_size);
4436	}
4437
4438	kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4439	kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4440	kmem_zerosized_log = kmem_log_init(kmem_zerosized_log_size);
4441
4442	/*
4443	 * Initialize STREAMS message caches so allocb() is available.
4444	 * This allows us to initialize the logging framework (cmn_err(9F),
4445	 * strlog(9F), etc) so we can start recording messages.
4446	 */
4447	streams_msg_init();
4448
4449	/*
4450	 * Initialize the ZSD framework in Zones so modules loaded henceforth
4451	 * can register their callbacks.
4452	 */
4453	zone_zsd_init();
4454
4455	log_init();
4456	taskq_init();
4457
4458	/*
4459	 * Warn about invalid or dangerous values of kmem_flags.
4460	 * Always warn about unsupported values.
4461	 */
4462	if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4463	    KMF_CONTENTS | KMF_LITE)) != 0) ||
4464	    ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4465		cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
4466		    "See the Solaris Tunable Parameters Reference Manual.",
4467		    kmem_flags);
4468
4469#ifdef DEBUG
4470	if ((kmem_flags & KMF_DEBUG) == 0)
4471		cmn_err(CE_NOTE, "kmem debugging disabled.");
4472#else
4473	/*
4474	 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4475	 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4476	 * if KMF_AUDIT is set). We should warn the user about the performance
4477	 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4478	 * isn't set (since that disables AUDIT).
4479	 */
4480	if (!(kmem_flags & KMF_LITE) &&
4481	    (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4482		cmn_err(CE_WARN, "High-overhead kmem debugging features "
4483		    "enabled (kmem_flags = 0x%x).  Performance degradation "
4484		    "and large memory overhead possible. See the Solaris "
4485		    "Tunable Parameters Reference Manual.", kmem_flags);
4486#endif /* not DEBUG */
4487
4488	kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4489
4490	kmem_ready = 1;
4491
4492	/*
4493	 * Initialize the platform-specific aligned/DMA memory allocator.
4494	 */
4495	ka_init();
4496
4497	/*
4498	 * Initialize 32-bit ID cache.
4499	 */
4500	id32_init();
4501
4502	/*
4503	 * Initialize the networking stack so modules loaded can
4504	 * register their callbacks.
4505	 */
4506	netstack_init();
4507}
4508
4509static void
4510kmem_move_init(void)
4511{
4512	kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4513	    sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4514	    kmem_msb_arena, KMC_NOHASH);
4515	kmem_move_cache = kmem_cache_create("kmem_move_cache",
4516	    sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4517	    kmem_msb_arena, KMC_NOHASH);
4518
4519	/*
4520	 * kmem guarantees that move callbacks are sequential and that even
4521	 * across multiple caches no two moves ever execute simultaneously.
4522	 * Move callbacks are processed on a separate taskq so that client code
4523	 * does not interfere with internal maintenance tasks.
4524	 */
4525	kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4526	    minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4527}
4528
4529void
4530kmem_thread_init(void)
4531{
4532	kmem_move_init();
4533
4534	/*
4535	 * This taskq is used for various kmem maintenance functions, including
4536	 * kmem_reap().   When maintenance is required on every cache,
4537	 * kmem_cache_applyall() dispatches one task per cache onto this queue.
4538	 *
4539	 * In the case of kmem_reap(), the system may be under increasingly
4540	 * dire memory pressure and may not be able to allocate a new task
4541	 * entry.  The count of entries to prepopulate (below) should cover at
4542	 * least as many caches as we generally expect to exist on the system
4543	 * so that they may all be scheduled for reaping under those
4544	 * conditions.
4545	 */
4546	kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4547	    600, INT_MAX, TASKQ_PREPOPULATE);
4548}
4549
4550void
4551kmem_mp_init(void)
4552{
4553	mutex_enter(&cpu_lock);
4554	register_cpu_setup_func(kmem_cpu_setup, NULL);
4555	mutex_exit(&cpu_lock);
4556
4557	kmem_update_timeout(NULL);
4558
4559	taskq_mp_init();
4560}
4561
4562/*
4563 * Return the slab of the allocated buffer, or NULL if the buffer is not
4564 * allocated. This function may be called with a known slab address to determine
4565 * whether or not the buffer is allocated, or with a NULL slab address to obtain
4566 * an allocated buffer's slab.
4567 */
4568static kmem_slab_t *
4569kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4570{
4571	kmem_bufctl_t *bcp, *bufbcp;
4572
4573	ASSERT(MUTEX_HELD(&cp->cache_lock));
4574	ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4575
4576	if (cp->cache_flags & KMF_HASH) {
4577		for (bcp = *KMEM_HASH(cp, buf);
4578		    (bcp != NULL) && (bcp->bc_addr != buf);
4579		    bcp = bcp->bc_next) {
4580			continue;
4581		}
4582		ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4583		return (bcp == NULL ? NULL : bcp->bc_slab);
4584	}
4585
4586	if (sp == NULL) {
4587		sp = KMEM_SLAB(cp, buf);
4588	}
4589	bufbcp = KMEM_BUFCTL(cp, buf);
4590	for (bcp = sp->slab_head;
4591	    (bcp != NULL) && (bcp != bufbcp);
4592	    bcp = bcp->bc_next) {
4593		continue;
4594	}
4595	return (bcp == NULL ? sp : NULL);
4596}
4597
4598static boolean_t
4599kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4600{
4601	long refcnt = sp->slab_refcnt;
4602
4603	ASSERT(cp->cache_defrag != NULL);
4604
4605	/*
4606	 * For code coverage we want to be able to move an object within the
4607	 * same slab (the only partial slab) even if allocating the destination
4608	 * buffer resulted in a completely allocated slab.
4609	 */
4610	if (flags & KMM_DEBUG) {
4611		return ((flags & KMM_DESPERATE) ||
4612		    ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4613	}
4614
4615	/* If we're desperate, we don't care if the client said NO. */
4616	if (flags & KMM_DESPERATE) {
4617		return (refcnt < sp->slab_chunks); /* any partial */
4618	}
4619
4620	if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4621		return (B_FALSE);
4622	}
4623
4624	if ((refcnt == 1) || kmem_move_any_partial) {
4625		return (refcnt < sp->slab_chunks);
4626	}
4627
4628	/*
4629	 * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4630	 * slabs with a progressively higher percentage of used buffers can be
4631	 * reclaimed until the cache as a whole is no longer fragmented.
4632	 *
4633	 *	sp->slab_refcnt   kmd_reclaim_numer
4634	 *	--------------- < ------------------
4635	 *	sp->slab_chunks   KMEM_VOID_FRACTION
4636	 */
4637	return ((refcnt * KMEM_VOID_FRACTION) <
4638	    (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4639}
4640
4641/*
4642 * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4643 * or when the buffer is freed.
4644 */
4645static void
4646kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4647{
4648	ASSERT(MUTEX_HELD(&cp->cache_lock));
4649	ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4650
4651	if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4652		return;
4653	}
4654
4655	if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4656		if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4657			avl_remove(&cp->cache_partial_slabs, sp);
4658			sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4659			sp->slab_stuck_offset = (uint32_t)-1;
4660			avl_add(&cp->cache_partial_slabs, sp);
4661		}
4662	} else {
4663		sp->slab_later_count = 0;
4664		sp->slab_stuck_offset = (uint32_t)-1;
4665	}
4666}
4667
4668static void
4669kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4670{
4671	ASSERT(taskq_member(kmem_move_taskq, curthread));
4672	ASSERT(MUTEX_HELD(&cp->cache_lock));
4673	ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4674
4675	if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4676		return;
4677	}
4678
4679	avl_remove(&cp->cache_partial_slabs, sp);
4680	sp->slab_later_count = 0;
4681	sp->slab_flags |= KMEM_SLAB_NOMOVE;
4682	sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4683	avl_add(&cp->cache_partial_slabs, sp);
4684}
4685
4686static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4687
4688/*
4689 * The move callback takes two buffer addresses, the buffer to be moved, and a
4690 * newly allocated and constructed buffer selected by kmem as the destination.
4691 * It also takes the size of the buffer and an optional user argument specified
4692 * at cache creation time. kmem guarantees that the buffer to be moved has not
4693 * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4694 * guarantee the present whereabouts of the buffer to be moved, so it is up to
4695 * the client to safely determine whether or not it is still using the buffer.
4696 * The client must not free either of the buffers passed to the move callback,
4697 * since kmem wants to free them directly to the slab layer. The client response
4698 * tells kmem which of the two buffers to free:
4699 *
4700 * YES		kmem frees the old buffer (the move was successful)
4701 * NO		kmem frees the new buffer, marks the slab of the old buffer
4702 *              non-reclaimable to avoid bothering the client again
4703 * LATER	kmem frees the new buffer, increments slab_later_count
4704 * DONT_KNOW	kmem frees the new buffer
4705 * DONT_NEED	kmem frees both the old buffer and the new buffer
4706 *
4707 * The pending callback argument now being processed contains both of the
4708 * buffers (old and new) passed to the move callback function, the slab of the
4709 * old buffer, and flags related to the move request, such as whether or not the
4710 * system was desperate for memory.
4711 *
4712 * Slabs are not freed while there is a pending callback, but instead are kept
4713 * on a deadlist, which is drained after the last callback completes. This means
4714 * that slabs are safe to access until kmem_move_end(), no matter how many of
4715 * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4716 * zero for as long as the slab remains on the deadlist and until the slab is
4717 * freed.
4718 */
4719static void
4720kmem_move_buffer(kmem_move_t *callback)
4721{
4722	kmem_cbrc_t response;
4723	kmem_slab_t *sp = callback->kmm_from_slab;
4724	kmem_cache_t *cp = sp->slab_cache;
4725	boolean_t free_on_slab;
4726
4727	ASSERT(taskq_member(kmem_move_taskq, curthread));
4728	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4729	ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4730
4731	/*
4732	 * The number of allocated buffers on the slab may have changed since we
4733	 * last checked the slab's reclaimability (when the pending move was
4734	 * enqueued), or the client may have responded NO when asked to move
4735	 * another buffer on the same slab.
4736	 */
4737	if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4738		kmem_slab_free(cp, callback->kmm_to_buf);
4739		kmem_move_end(cp, callback);
4740		return;
4741	}
4742
4743	/*
4744	 * Checking the slab layer is easy, so we might as well do that here
4745	 * in case we can avoid bothering the client.
4746	 */
4747	mutex_enter(&cp->cache_lock);
4748	free_on_slab = (kmem_slab_allocated(cp, sp,
4749	    callback->kmm_from_buf) == NULL);
4750	mutex_exit(&cp->cache_lock);
4751
4752	if (free_on_slab) {
4753		kmem_slab_free(cp, callback->kmm_to_buf);
4754		kmem_move_end(cp, callback);
4755		return;
4756	}
4757
4758	if (cp->cache_flags & KMF_BUFTAG) {
4759		/*
4760		 * Make kmem_cache_alloc_debug() apply the constructor for us.
4761		 */
4762		if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4763		    KM_NOSLEEP, 1, caller()) != 0) {
4764			kmem_move_end(cp, callback);
4765			return;
4766		}
4767	} else if (cp->cache_constructor != NULL &&
4768	    cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4769	    KM_NOSLEEP) != 0) {
4770		atomic_inc_64(&cp->cache_alloc_fail);
4771		kmem_slab_free(cp, callback->kmm_to_buf);
4772		kmem_move_end(cp, callback);
4773		return;
4774	}
4775
4776	cp->cache_defrag->kmd_callbacks++;
4777	cp->cache_defrag->kmd_thread = curthread;
4778	cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4779	cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4780	DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4781	    callback);
4782
4783	response = cp->cache_move(callback->kmm_from_buf,
4784	    callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4785
4786	DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4787	    callback, kmem_cbrc_t, response);
4788	cp->cache_defrag->kmd_thread = NULL;
4789	cp->cache_defrag->kmd_from_buf = NULL;
4790	cp->cache_defrag->kmd_to_buf = NULL;
4791
4792	if (response == KMEM_CBRC_YES) {
4793		cp->cache_defrag->kmd_yes++;
4794		kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4795		/* slab safe to access until kmem_move_end() */
4796		if (sp->slab_refcnt == 0)
4797			cp->cache_defrag->kmd_slabs_freed++;
4798		mutex_enter(&cp->cache_lock);
4799		kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4800		mutex_exit(&cp->cache_lock);
4801		kmem_move_end(cp, callback);
4802		return;
4803	}
4804
4805	switch (response) {
4806	case KMEM_CBRC_NO:
4807		cp->cache_defrag->kmd_no++;
4808		mutex_enter(&cp->cache_lock);
4809		kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4810		mutex_exit(&cp->cache_lock);
4811		break;
4812	case KMEM_CBRC_LATER:
4813		cp->cache_defrag->kmd_later++;
4814		mutex_enter(&cp->cache_lock);
4815		if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4816			mutex_exit(&cp->cache_lock);
4817			break;
4818		}
4819
4820		if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4821			kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4822		} else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4823			sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4824			    callback->kmm_from_buf);
4825		}
4826		mutex_exit(&cp->cache_lock);
4827		break;
4828	case KMEM_CBRC_DONT_NEED:
4829		cp->cache_defrag->kmd_dont_need++;
4830		kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4831		if (sp->slab_refcnt == 0)
4832			cp->cache_defrag->kmd_slabs_freed++;
4833		mutex_enter(&cp->cache_lock);
4834		kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4835		mutex_exit(&cp->cache_lock);
4836		break;
4837	case KMEM_CBRC_DONT_KNOW:
4838		/*
4839		 * If we don't know if we can move this buffer or not, we'll
4840		 * just assume that we can't:  if the buffer is in fact free,
4841		 * then it is sitting in one of the per-CPU magazines or in
4842		 * a full magazine in the depot layer.  Either way, because
4843		 * defrag is induced in the same logic that reaps a cache,
4844		 * it's likely that full magazines will be returned to the
4845		 * system soon (thereby accomplishing what we're trying to
4846		 * accomplish here: return those magazines to their slabs).
4847		 * Given this, any work that we might do now to locate a buffer
4848		 * in a magazine is wasted (and expensive!) work; we bump
4849		 * a counter in this case and otherwise assume that we can't
4850		 * move it.
4851		 */
4852		cp->cache_defrag->kmd_dont_know++;
4853		break;
4854	default:
4855		panic("'%s' (%p) unexpected move callback response %d\n",
4856		    cp->cache_name, (void *)cp, response);
4857	}
4858
4859	kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4860	kmem_move_end(cp, callback);
4861}
4862
4863/* Return B_FALSE if there is insufficient memory for the move request. */
4864static boolean_t
4865kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4866{
4867	void *to_buf;
4868	avl_index_t index;
4869	kmem_move_t *callback, *pending;
4870	ulong_t n;
4871
4872	ASSERT(taskq_member(kmem_taskq, curthread));
4873	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4874	ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4875
4876	callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4877
4878	if (callback == NULL)
4879		return (B_FALSE);
4880
4881	callback->kmm_from_slab = sp;
4882	callback->kmm_from_buf = buf;
4883	callback->kmm_flags = flags;
4884
4885	mutex_enter(&cp->cache_lock);
4886
4887	n = avl_numnodes(&cp->cache_partial_slabs);
4888	if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
4889		mutex_exit(&cp->cache_lock);
4890		kmem_cache_free(kmem_move_cache, callback);
4891		return (B_TRUE); /* there is no need for the move request */
4892	}
4893
4894	pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4895	if (pending != NULL) {
4896		/*
4897		 * If the move is already pending and we're desperate now,
4898		 * update the move flags.
4899		 */
4900		if (flags & KMM_DESPERATE) {
4901			pending->kmm_flags |= KMM_DESPERATE;
4902		}
4903		mutex_exit(&cp->cache_lock);
4904		kmem_cache_free(kmem_move_cache, callback);
4905		return (B_TRUE);
4906	}
4907
4908	to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4909	    B_FALSE);
4910	callback->kmm_to_buf = to_buf;
4911	avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4912
4913	mutex_exit(&cp->cache_lock);
4914
4915	if (taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4916	    callback, TQ_NOSLEEP) == TASKQID_INVALID) {
4917		mutex_enter(&cp->cache_lock);
4918		avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4919		mutex_exit(&cp->cache_lock);
4920		kmem_slab_free(cp, to_buf);
4921		kmem_cache_free(kmem_move_cache, callback);
4922		return (B_FALSE);
4923	}
4924
4925	return (B_TRUE);
4926}
4927
4928static void
4929kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4930{
4931	avl_index_t index;
4932
4933	ASSERT(cp->cache_defrag != NULL);
4934	ASSERT(taskq_member(kmem_move_taskq, curthread));
4935	ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4936
4937	mutex_enter(&cp->cache_lock);
4938	VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4939	    callback->kmm_from_buf, &index) != NULL);
4940	avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4941	if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4942		list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4943		kmem_slab_t *sp;
4944
4945		/*
4946		 * The last pending move completed. Release all slabs from the
4947		 * front of the dead list except for any slab at the tail that
4948		 * needs to be released from the context of kmem_move_buffers().
4949		 * kmem deferred unmapping the buffers on these slabs in order
4950		 * to guarantee that buffers passed to the move callback have
4951		 * been touched only by kmem or by the client itself.
4952		 */
4953		while ((sp = list_remove_head(deadlist)) != NULL) {
4954			if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
4955				list_insert_tail(deadlist, sp);
4956				break;
4957			}
4958			cp->cache_defrag->kmd_deadcount--;
4959			cp->cache_slab_destroy++;
4960			mutex_exit(&cp->cache_lock);
4961			kmem_slab_destroy(cp, sp);
4962			mutex_enter(&cp->cache_lock);
4963		}
4964	}
4965	mutex_exit(&cp->cache_lock);
4966	kmem_cache_free(kmem_move_cache, callback);
4967}
4968
4969/*
4970 * Move buffers from least used slabs first by scanning backwards from the end
4971 * of the partial slab list. Scan at most max_scan candidate slabs and move
4972 * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
4973 * If desperate to reclaim memory, move buffers from any partial slab, otherwise
4974 * skip slabs with a ratio of allocated buffers at or above the current
4975 * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
4976 * scan is aborted) so that the caller can adjust the reclaimability threshold
4977 * depending on how many reclaimable slabs it finds.
4978 *
4979 * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
4980 * move request, since it is not valid for kmem_move_begin() to call
4981 * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
4982 */
4983static int
4984kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
4985    int flags)
4986{
4987	kmem_slab_t *sp;
4988	void *buf;
4989	int i, j; /* slab index, buffer index */
4990	int s; /* reclaimable slabs */
4991	int b; /* allocated (movable) buffers on reclaimable slab */
4992	boolean_t success;
4993	int refcnt;
4994	int nomove;
4995
4996	ASSERT(taskq_member(kmem_taskq, curthread));
4997	ASSERT(MUTEX_HELD(&cp->cache_lock));
4998	ASSERT(kmem_move_cache != NULL);
4999	ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
5000	ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5001	    avl_numnodes(&cp->cache_partial_slabs) > 1);
5002
5003	if (kmem_move_blocked) {
5004		return (0);
5005	}
5006
5007	if (kmem_move_fulltilt) {
5008		flags |= KMM_DESPERATE;
5009	}
5010
5011	if (max_scan == 0 || (flags & KMM_DESPERATE)) {
5012		/*
5013		 * Scan as many slabs as needed to find the desired number of
5014		 * candidate slabs.
5015		 */
5016		max_scan = (size_t)-1;
5017	}
5018
5019	if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5020		/* Find as many candidate slabs as possible. */
5021		max_slabs = (size_t)-1;
5022	}
5023
5024	sp = avl_last(&cp->cache_partial_slabs);
5025	ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5026	for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5027	    ((sp != avl_first(&cp->cache_partial_slabs)) ||
5028	    (flags & KMM_DEBUG));
5029	    sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5030
5031		if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5032			continue;
5033		}
5034		s++;
5035
5036		/* Look for allocated buffers to move. */
5037		for (j = 0, b = 0, buf = sp->slab_base;
5038		    (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5039		    buf = (((char *)buf) + cp->cache_chunksize), j++) {
5040
5041			if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5042				continue;
5043			}
5044
5045			b++;
5046
5047			/*
5048			 * Prevent the slab from being destroyed while we drop
5049			 * cache_lock and while the pending move is not yet
5050			 * registered. Flag the pending move while
5051			 * kmd_moves_pending may still be empty, since we can't
5052			 * yet rely on a non-zero pending move count to prevent
5053			 * the slab from being destroyed.
5054			 */
5055			ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5056			sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5057			/*
5058			 * Recheck refcnt and nomove after reacquiring the lock,
5059			 * since these control the order of partial slabs, and
5060			 * we want to know if we can pick up the scan where we
5061			 * left off.
5062			 */
5063			refcnt = sp->slab_refcnt;
5064			nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5065			mutex_exit(&cp->cache_lock);
5066
5067			success = kmem_move_begin(cp, sp, buf, flags);
5068
5069			/*
5070			 * Now, before the lock is reacquired, kmem could
5071			 * process all pending move requests and purge the
5072			 * deadlist, so that upon reacquiring the lock, sp has
5073			 * been remapped. Or, the client may free all the
5074			 * objects on the slab while the pending moves are still
5075			 * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5076			 * flag causes the slab to be put at the end of the
5077			 * deadlist and prevents it from being destroyed, since
5078			 * we plan to destroy it here after reacquiring the
5079			 * lock.
5080			 */
5081			mutex_enter(&cp->cache_lock);
5082			ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5083			sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5084
5085			if (sp->slab_refcnt == 0) {
5086				list_t *deadlist =
5087				    &cp->cache_defrag->kmd_deadlist;
5088				list_remove(deadlist, sp);
5089
5090				if (!avl_is_empty(
5091				    &cp->cache_defrag->kmd_moves_pending)) {
5092					/*
5093					 * A pending move makes it unsafe to
5094					 * destroy the slab, because even though
5095					 * the move is no longer needed, the
5096					 * context where that is determined
5097					 * requires the slab to exist.
5098					 * Fortunately, a pending move also
5099					 * means we don't need to destroy the
5100					 * slab here, since it will get
5101					 * destroyed along with any other slabs
5102					 * on the deadlist after the last
5103					 * pending move completes.
5104					 */
5105					list_insert_head(deadlist, sp);
5106					return (-1);
5107				}
5108
5109				/*
5110				 * Destroy the slab now if it was completely
5111				 * freed while we dropped cache_lock and there
5112				 * are no pending moves. Since slab_refcnt
5113				 * cannot change once it reaches zero, no new
5114				 * pending moves from that slab are possible.
5115				 */
5116				cp->cache_defrag->kmd_deadcount--;
5117				cp->cache_slab_destroy++;
5118				mutex_exit(&cp->cache_lock);
5119				kmem_slab_destroy(cp, sp);
5120				mutex_enter(&cp->cache_lock);
5121				/*
5122				 * Since we can't pick up the scan where we left
5123				 * off, abort the scan and say nothing about the
5124				 * number of reclaimable slabs.
5125				 */
5126				return (-1);
5127			}
5128
5129			if (!success) {
5130				/*
5131				 * Abort the scan if there is not enough memory
5132				 * for the request and say nothing about the
5133				 * number of reclaimable slabs.
5134				 */
5135				return (-1);
5136			}
5137
5138			/*
5139			 * The slab's position changed while the lock was
5140			 * dropped, so we don't know where we are in the
5141			 * sequence any more.
5142			 */
5143			if (sp->slab_refcnt != refcnt) {
5144				/*
5145				 * If this is a KMM_DEBUG move, the slab_refcnt
5146				 * may have changed because we allocated a
5147				 * destination buffer on the same slab. In that
5148				 * case, we're not interested in counting it.
5149				 */
5150				return (-1);
5151			}
5152			if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove)
5153				return (-1);
5154
5155			/*
5156			 * Generating a move request allocates a destination
5157			 * buffer from the slab layer, bumping the first partial
5158			 * slab if it is completely allocated. If the current
5159			 * slab becomes the first partial slab as a result, we
5160			 * can't continue to scan backwards.
5161			 *
5162			 * If this is a KMM_DEBUG move and we allocated the
5163			 * destination buffer from the last partial slab, then
5164			 * the buffer we're moving is on the same slab and our
5165			 * slab_refcnt has changed, causing us to return before
5166			 * reaching here if there are no partial slabs left.
5167			 */
5168			ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5169			if (sp == avl_first(&cp->cache_partial_slabs)) {
5170				/*
5171				 * We're not interested in a second KMM_DEBUG
5172				 * move.
5173				 */
5174				goto end_scan;
5175			}
5176		}
5177	}
5178end_scan:
5179
5180	return (s);
5181}
5182
5183typedef struct kmem_move_notify_args {
5184	kmem_cache_t *kmna_cache;
5185	void *kmna_buf;
5186} kmem_move_notify_args_t;
5187
5188static void
5189kmem_cache_move_notify_task(void *arg)
5190{
5191	kmem_move_notify_args_t *args = arg;
5192	kmem_cache_t *cp = args->kmna_cache;
5193	void *buf = args->kmna_buf;
5194	kmem_slab_t *sp;
5195
5196	ASSERT(taskq_member(kmem_taskq, curthread));
5197	ASSERT(list_link_active(&cp->cache_link));
5198
5199	kmem_free(args, sizeof (kmem_move_notify_args_t));
5200	mutex_enter(&cp->cache_lock);
5201	sp = kmem_slab_allocated(cp, NULL, buf);
5202
5203	/* Ignore the notification if the buffer is no longer allocated. */
5204	if (sp == NULL) {
5205		mutex_exit(&cp->cache_lock);
5206		return;
5207	}
5208
5209	/* Ignore the notification if there's no reason to move the buffer. */
5210	if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5211		/*
5212		 * So far the notification is not ignored. Ignore the
5213		 * notification if the slab is not marked by an earlier refusal
5214		 * to move a buffer.
5215		 */
5216		if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5217		    (sp->slab_later_count == 0)) {
5218			mutex_exit(&cp->cache_lock);
5219			return;
5220		}
5221
5222		kmem_slab_move_yes(cp, sp, buf);
5223		ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5224		sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5225		mutex_exit(&cp->cache_lock);
5226		/* see kmem_move_buffers() about dropping the lock */
5227		(void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5228		mutex_enter(&cp->cache_lock);
5229		ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5230		sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5231		if (sp->slab_refcnt == 0) {
5232			list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5233			list_remove(deadlist, sp);
5234
5235			if (!avl_is_empty(
5236			    &cp->cache_defrag->kmd_moves_pending)) {
5237				list_insert_head(deadlist, sp);
5238				mutex_exit(&cp->cache_lock);
5239				return;
5240			}
5241
5242			cp->cache_defrag->kmd_deadcount--;
5243			cp->cache_slab_destroy++;
5244			mutex_exit(&cp->cache_lock);
5245			kmem_slab_destroy(cp, sp);
5246			return;
5247		}
5248	} else {
5249		kmem_slab_move_yes(cp, sp, buf);
5250	}
5251	mutex_exit(&cp->cache_lock);
5252}
5253
5254void
5255kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5256{
5257	kmem_move_notify_args_t *args;
5258
5259	args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5260	if (args != NULL) {
5261		args->kmna_cache = cp;
5262		args->kmna_buf = buf;
5263		if (taskq_dispatch(kmem_taskq,
5264		    (task_func_t *)kmem_cache_move_notify_task, args,
5265		    TQ_NOSLEEP) == TASKQID_INVALID)
5266			kmem_free(args, sizeof (kmem_move_notify_args_t));
5267	}
5268}
5269
5270static void
5271kmem_cache_defrag(kmem_cache_t *cp)
5272{
5273	size_t n;
5274
5275	ASSERT(cp->cache_defrag != NULL);
5276
5277	mutex_enter(&cp->cache_lock);
5278	n = avl_numnodes(&cp->cache_partial_slabs);
5279	if (n > 1) {
5280		/* kmem_move_buffers() drops and reacquires cache_lock */
5281		cp->cache_defrag->kmd_defrags++;
5282		(void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5283	}
5284	mutex_exit(&cp->cache_lock);
5285}
5286
5287/* Is this cache above the fragmentation threshold? */
5288static boolean_t
5289kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5290{
5291	/*
5292	 *	nfree		kmem_frag_numer
5293	 * ------------------ > ---------------
5294	 * cp->cache_buftotal	kmem_frag_denom
5295	 */
5296	return ((nfree * kmem_frag_denom) >
5297	    (cp->cache_buftotal * kmem_frag_numer));
5298}
5299
5300static boolean_t
5301kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5302{
5303	boolean_t fragmented;
5304	uint64_t nfree;
5305
5306	ASSERT(MUTEX_HELD(&cp->cache_lock));
5307	*doreap = B_FALSE;
5308
5309	if (kmem_move_fulltilt) {
5310		if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5311			return (B_TRUE);
5312		}
5313	} else {
5314		if ((cp->cache_complete_slab_count + avl_numnodes(
5315		    &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5316			return (B_FALSE);
5317		}
5318	}
5319
5320	nfree = cp->cache_bufslab;
5321	fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5322	    kmem_cache_frag_threshold(cp, nfree));
5323
5324	/*
5325	 * Free buffers in the magazine layer appear allocated from the point of
5326	 * view of the slab layer. We want to know if the slab layer would
5327	 * appear fragmented if we included free buffers from magazines that
5328	 * have fallen out of the working set.
5329	 */
5330	if (!fragmented) {
5331		long reap;
5332
5333		mutex_enter(&cp->cache_depot_lock);
5334		reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5335		reap = MIN(reap, cp->cache_full.ml_total);
5336		mutex_exit(&cp->cache_depot_lock);
5337
5338		nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5339		if (kmem_cache_frag_threshold(cp, nfree)) {
5340			*doreap = B_TRUE;
5341		}
5342	}
5343
5344	return (fragmented);
5345}
5346
5347/* Called periodically from kmem_taskq */
5348static void
5349kmem_cache_scan(kmem_cache_t *cp)
5350{
5351	boolean_t reap = B_FALSE;
5352	kmem_defrag_t *kmd;
5353
5354	ASSERT(taskq_member(kmem_taskq, curthread));
5355
5356	mutex_enter(&cp->cache_lock);
5357
5358	kmd = cp->cache_defrag;
5359	if (kmd->kmd_consolidate > 0) {
5360		kmd->kmd_consolidate--;
5361		mutex_exit(&cp->cache_lock);
5362		kmem_cache_reap(cp);
5363		return;
5364	}
5365
5366	if (kmem_cache_is_fragmented(cp, &reap)) {
5367		size_t slabs_found;
5368
5369		/*
5370		 * Consolidate reclaimable slabs from the end of the partial
5371		 * slab list (scan at most kmem_reclaim_scan_range slabs to find
5372		 * reclaimable slabs). Keep track of how many candidate slabs we
5373		 * looked for and how many we actually found so we can adjust
5374		 * the definition of a candidate slab if we're having trouble
5375		 * finding them.
5376		 *
5377		 * kmem_move_buffers() drops and reacquires cache_lock.
5378		 */
5379		kmd->kmd_scans++;
5380		slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5381		    kmem_reclaim_max_slabs, 0);
5382		if (slabs_found >= 0) {
5383			kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5384			kmd->kmd_slabs_found += slabs_found;
5385		}
5386
5387		if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5388			kmd->kmd_tries = 0;
5389
5390			/*
5391			 * If we had difficulty finding candidate slabs in
5392			 * previous scans, adjust the threshold so that
5393			 * candidates are easier to find.
5394			 */
5395			if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5396				kmem_adjust_reclaim_threshold(kmd, -1);
5397			} else if ((kmd->kmd_slabs_found * 2) <
5398			    kmd->kmd_slabs_sought) {
5399				kmem_adjust_reclaim_threshold(kmd, 1);
5400			}
5401			kmd->kmd_slabs_sought = 0;
5402			kmd->kmd_slabs_found = 0;
5403		}
5404	} else {
5405		kmem_reset_reclaim_threshold(cp->cache_defrag);
5406#ifdef	DEBUG
5407		if (!avl_is_empty(&cp->cache_partial_slabs)) {
5408			/*
5409			 * In a debug kernel we want the consolidator to
5410			 * run occasionally even when there is plenty of
5411			 * memory.
5412			 */
5413			uint16_t debug_rand;
5414
5415			(void) random_get_bytes((uint8_t *)&debug_rand, 2);
5416			if (!kmem_move_noreap &&
5417			    ((debug_rand % kmem_mtb_reap) == 0)) {
5418				mutex_exit(&cp->cache_lock);
5419				kmem_cache_reap(cp);
5420				return;
5421			} else if ((debug_rand % kmem_mtb_move) == 0) {
5422				kmd->kmd_scans++;
5423				(void) kmem_move_buffers(cp,
5424				    kmem_reclaim_scan_range, 1, KMM_DEBUG);
5425			}
5426		}
5427#endif	/* DEBUG */
5428	}
5429
5430	mutex_exit(&cp->cache_lock);
5431
5432	if (reap)
5433		kmem_depot_ws_reap(cp);
5434}
5435