17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57d692464Sdp * Common Development and Distribution License (the "License"). 67d692464Sdp * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 229f1b636aStomee * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate /* 29*b5fca8f8Stomee * Kernel memory allocator, as described in the following two papers and a 30*b5fca8f8Stomee * statement about the consolidator: 317c478bd9Sstevel@tonic-gate * 327c478bd9Sstevel@tonic-gate * Jeff Bonwick, 337c478bd9Sstevel@tonic-gate * The Slab Allocator: An Object-Caching Kernel Memory Allocator. 347c478bd9Sstevel@tonic-gate * Proceedings of the Summer 1994 Usenix Conference. 357c478bd9Sstevel@tonic-gate * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf. 367c478bd9Sstevel@tonic-gate * 377c478bd9Sstevel@tonic-gate * Jeff Bonwick and Jonathan Adams, 387c478bd9Sstevel@tonic-gate * Magazines and vmem: Extending the Slab Allocator to Many CPUs and 397c478bd9Sstevel@tonic-gate * Arbitrary Resources. 407c478bd9Sstevel@tonic-gate * Proceedings of the 2001 Usenix Conference. 417c478bd9Sstevel@tonic-gate * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf. 42*b5fca8f8Stomee * 43*b5fca8f8Stomee * kmem Slab Consolidator Big Theory Statement: 44*b5fca8f8Stomee * 45*b5fca8f8Stomee * 1. Motivation 46*b5fca8f8Stomee * 47*b5fca8f8Stomee * As stated in Bonwick94, slabs provide the following advantages over other 48*b5fca8f8Stomee * allocation structures in terms of memory fragmentation: 49*b5fca8f8Stomee * 50*b5fca8f8Stomee * - Internal fragmentation (per-buffer wasted space) is minimal. 51*b5fca8f8Stomee * - Severe external fragmentation (unused buffers on the free list) is 52*b5fca8f8Stomee * unlikely. 53*b5fca8f8Stomee * 54*b5fca8f8Stomee * Segregating objects by size eliminates one source of external fragmentation, 55*b5fca8f8Stomee * and according to Bonwick: 56*b5fca8f8Stomee * 57*b5fca8f8Stomee * The other reason that slabs reduce external fragmentation is that all 58*b5fca8f8Stomee * objects in a slab are of the same type, so they have the same lifetime 59*b5fca8f8Stomee * distribution. The resulting segregation of short-lived and long-lived 60*b5fca8f8Stomee * objects at slab granularity reduces the likelihood of an entire page being 61*b5fca8f8Stomee * held hostage due to a single long-lived allocation [Barrett93, Hanson90]. 62*b5fca8f8Stomee * 63*b5fca8f8Stomee * While unlikely, severe external fragmentation remains possible. Clients that 64*b5fca8f8Stomee * allocate both short- and long-lived objects from the same cache cannot 65*b5fca8f8Stomee * anticipate the distribution of long-lived objects within the allocator's slab 66*b5fca8f8Stomee * implementation. Even a small percentage of long-lived objects distributed 67*b5fca8f8Stomee * randomly across many slabs can lead to a worst case scenario where the client 68*b5fca8f8Stomee * frees the majority of its objects and the system gets back almost none of the 69*b5fca8f8Stomee * slabs. Despite the client doing what it reasonably can to help the system 70*b5fca8f8Stomee * reclaim memory, the allocator cannot shake free enough slabs because of 71*b5fca8f8Stomee * lonely allocations stubbornly hanging on. Although the allocator is in a 72*b5fca8f8Stomee * position to diagnose the fragmentation, there is nothing that the allocator 73*b5fca8f8Stomee * by itself can do about it. It only takes a single allocated object to prevent 74*b5fca8f8Stomee * an entire slab from being reclaimed, and any object handed out by 75*b5fca8f8Stomee * kmem_cache_alloc() is by definition in the client's control. Conversely, 76*b5fca8f8Stomee * although the client is in a position to move a long-lived object, it has no 77*b5fca8f8Stomee * way of knowing if the object is causing fragmentation, and if so, where to 78*b5fca8f8Stomee * move it. A solution necessarily requires further cooperation between the 79*b5fca8f8Stomee * allocator and the client. 80*b5fca8f8Stomee * 81*b5fca8f8Stomee * 2. Move Callback 82*b5fca8f8Stomee * 83*b5fca8f8Stomee * The kmem slab consolidator therefore adds a move callback to the 84*b5fca8f8Stomee * allocator/client interface, improving worst-case external fragmentation in 85*b5fca8f8Stomee * kmem caches that supply a function to move objects from one memory location 86*b5fca8f8Stomee * to another. In a situation of low memory kmem attempts to consolidate all of 87*b5fca8f8Stomee * a cache's slabs at once; otherwise it works slowly to bring external 88*b5fca8f8Stomee * fragmentation within the 1/8 limit guaranteed for internal fragmentation, 89*b5fca8f8Stomee * thereby helping to avoid a low memory situation in the future. 90*b5fca8f8Stomee * 91*b5fca8f8Stomee * The callback has the following signature: 92*b5fca8f8Stomee * 93*b5fca8f8Stomee * kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg) 94*b5fca8f8Stomee * 95*b5fca8f8Stomee * It supplies the kmem client with two addresses: the allocated object that 96*b5fca8f8Stomee * kmem wants to move and a buffer selected by kmem for the client to use as the 97*b5fca8f8Stomee * copy destination. The callback is kmem's way of saying "Please get off of 98*b5fca8f8Stomee * this buffer and use this one instead." kmem knows where it wants to move the 99*b5fca8f8Stomee * object in order to best reduce fragmentation. All the client needs to know 100*b5fca8f8Stomee * about the second argument (void *new) is that it is an allocated, constructed 101*b5fca8f8Stomee * object ready to take the contents of the old object. When the move function 102*b5fca8f8Stomee * is called, the system is likely to be low on memory, and the new object 103*b5fca8f8Stomee * spares the client from having to worry about allocating memory for the 104*b5fca8f8Stomee * requested move. The third argument supplies the size of the object, in case a 105*b5fca8f8Stomee * single move function handles multiple caches whose objects differ only in 106*b5fca8f8Stomee * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional 107*b5fca8f8Stomee * user argument passed to the constructor, destructor, and reclaim functions is 108*b5fca8f8Stomee * also passed to the move callback. 109*b5fca8f8Stomee * 110*b5fca8f8Stomee * 2.1 Setting the Move Callback 111*b5fca8f8Stomee * 112*b5fca8f8Stomee * The client sets the move callback after creating the cache and before 113*b5fca8f8Stomee * allocating from it: 114*b5fca8f8Stomee * 115*b5fca8f8Stomee * object_cache = kmem_cache_create(...); 116*b5fca8f8Stomee * kmem_cache_set_move(object_cache, object_move); 117*b5fca8f8Stomee * 118*b5fca8f8Stomee * 2.2 Move Callback Return Values 119*b5fca8f8Stomee * 120*b5fca8f8Stomee * Only the client knows about its own data and when is a good time to move it. 121*b5fca8f8Stomee * The client is cooperating with kmem to return unused memory to the system, 122*b5fca8f8Stomee * and kmem respectfully accepts this help at the client's convenience. When 123*b5fca8f8Stomee * asked to move an object, the client can respond with any of the following: 124*b5fca8f8Stomee * 125*b5fca8f8Stomee * typedef enum kmem_cbrc { 126*b5fca8f8Stomee * KMEM_CBRC_YES, 127*b5fca8f8Stomee * KMEM_CBRC_NO, 128*b5fca8f8Stomee * KMEM_CBRC_LATER, 129*b5fca8f8Stomee * KMEM_CBRC_DONT_NEED, 130*b5fca8f8Stomee * KMEM_CBRC_DONT_KNOW 131*b5fca8f8Stomee * } kmem_cbrc_t; 132*b5fca8f8Stomee * 133*b5fca8f8Stomee * The client must not explicitly kmem_cache_free() either of the objects passed 134*b5fca8f8Stomee * to the callback, since kmem wants to free them directly to the slab layer 135*b5fca8f8Stomee * (bypassing the per-CPU magazine layer). The response tells kmem which of the 136*b5fca8f8Stomee * objects to free: 137*b5fca8f8Stomee * 138*b5fca8f8Stomee * YES: (Did it) The client moved the object, so kmem frees the old one. 139*b5fca8f8Stomee * NO: (Never) The client refused, so kmem frees the new object (the 140*b5fca8f8Stomee * unused copy destination). kmem also marks the slab of the old 141*b5fca8f8Stomee * object so as not to bother the client with further callbacks for 142*b5fca8f8Stomee * that object as long as the slab remains on the partial slab list. 143*b5fca8f8Stomee * (The system won't be getting the slab back as long as the 144*b5fca8f8Stomee * immovable object holds it hostage, so there's no point in moving 145*b5fca8f8Stomee * any of its objects.) 146*b5fca8f8Stomee * LATER: The client is using the object and cannot move it now, so kmem 147*b5fca8f8Stomee * frees the new object (the unused copy destination). kmem still 148*b5fca8f8Stomee * attempts to move other objects off the slab, since it expects to 149*b5fca8f8Stomee * succeed in clearing the slab in a later callback. The client 150*b5fca8f8Stomee * should use LATER instead of NO if the object is likely to become 151*b5fca8f8Stomee * movable very soon. 152*b5fca8f8Stomee * DONT_NEED: The client no longer needs the object, so kmem frees the old along 153*b5fca8f8Stomee * with the new object (the unused copy destination). This response 154*b5fca8f8Stomee * is the client's opportunity to be a model citizen and give back as 155*b5fca8f8Stomee * much as it can. 156*b5fca8f8Stomee * DONT_KNOW: The client does not know about the object because 157*b5fca8f8Stomee * a) the client has just allocated the object and not yet put it 158*b5fca8f8Stomee * wherever it expects to find known objects 159*b5fca8f8Stomee * b) the client has removed the object from wherever it expects to 160*b5fca8f8Stomee * find known objects and is about to free it, or 161*b5fca8f8Stomee * c) the client has freed the object. 162*b5fca8f8Stomee * In all these cases (a, b, and c) kmem frees the new object (the 163*b5fca8f8Stomee * unused copy destination) and searches for the old object in the 164*b5fca8f8Stomee * magazine layer. If found, the object is removed from the magazine 165*b5fca8f8Stomee * layer and freed to the slab layer so it will no longer hold the 166*b5fca8f8Stomee * slab hostage. 167*b5fca8f8Stomee * 168*b5fca8f8Stomee * 2.3 Object States 169*b5fca8f8Stomee * 170*b5fca8f8Stomee * Neither kmem nor the client can be assumed to know the object's whereabouts 171*b5fca8f8Stomee * at the time of the callback. An object belonging to a kmem cache may be in 172*b5fca8f8Stomee * any of the following states: 173*b5fca8f8Stomee * 174*b5fca8f8Stomee * 1. Uninitialized on the slab 175*b5fca8f8Stomee * 2. Allocated from the slab but not constructed (still uninitialized) 176*b5fca8f8Stomee * 3. Allocated from the slab, constructed, but not yet ready for business 177*b5fca8f8Stomee * (not in a valid state for the move callback) 178*b5fca8f8Stomee * 4. In use (valid and known to the client) 179*b5fca8f8Stomee * 5. About to be freed (no longer in a valid state for the move callback) 180*b5fca8f8Stomee * 6. Freed to a magazine (still constructed) 181*b5fca8f8Stomee * 7. Allocated from a magazine, not yet ready for business (not in a valid 182*b5fca8f8Stomee * state for the move callback), and about to return to state #4 183*b5fca8f8Stomee * 8. Deconstructed on a magazine that is about to be freed 184*b5fca8f8Stomee * 9. Freed to the slab 185*b5fca8f8Stomee * 186*b5fca8f8Stomee * Since the move callback may be called at any time while the object is in any 187*b5fca8f8Stomee * of the above states (except state #1), the client needs a safe way to 188*b5fca8f8Stomee * determine whether or not it knows about the object. Specifically, the client 189*b5fca8f8Stomee * needs to know whether or not the object is in state #4, the only state in 190*b5fca8f8Stomee * which a move is valid. If the object is in any other state, the client should 191*b5fca8f8Stomee * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of 192*b5fca8f8Stomee * the object's fields. 193*b5fca8f8Stomee * 194*b5fca8f8Stomee * Note that although an object may be in state #4 when kmem initiates the move 195*b5fca8f8Stomee * request, the object may no longer be in that state by the time kmem actually 196*b5fca8f8Stomee * calls the move function. Not only does the client free objects 197*b5fca8f8Stomee * asynchronously, kmem itself puts move requests on a queue where thay are 198*b5fca8f8Stomee * pending until kmem processes them from another context. Also, objects freed 199*b5fca8f8Stomee * to a magazine appear allocated from the point of view of the slab layer, so 200*b5fca8f8Stomee * kmem may even initiate requests for objects in a state other than state #4. 201*b5fca8f8Stomee * 202*b5fca8f8Stomee * 2.3.1 Magazine Layer 203*b5fca8f8Stomee * 204*b5fca8f8Stomee * An important insight revealed by the states listed above is that the magazine 205*b5fca8f8Stomee * layer is populated only by kmem_cache_free(). Magazines of constructed 206*b5fca8f8Stomee * objects are never populated directly from the slab layer (which contains raw, 207*b5fca8f8Stomee * unconstructed objects). Whenever an allocation request cannot be satisfied 208*b5fca8f8Stomee * from the magazine layer, the magazines are bypassed and the request is 209*b5fca8f8Stomee * satisfied from the slab layer (creating a new slab if necessary). kmem calls 210*b5fca8f8Stomee * the object constructor only when allocating from the slab layer, and only in 211*b5fca8f8Stomee * response to kmem_cache_alloc() or to prepare the destination buffer passed in 212*b5fca8f8Stomee * the move callback. kmem does not preconstruct objects in anticipation of 213*b5fca8f8Stomee * kmem_cache_alloc(). 214*b5fca8f8Stomee * 215*b5fca8f8Stomee * 2.3.2 Object Constructor and Destructor 216*b5fca8f8Stomee * 217*b5fca8f8Stomee * If the client supplies a destructor, it must be valid to call the destructor 218*b5fca8f8Stomee * on a newly created object (immediately after the constructor). 219*b5fca8f8Stomee * 220*b5fca8f8Stomee * 2.4 Recognizing Known Objects 221*b5fca8f8Stomee * 222*b5fca8f8Stomee * There is a simple test to determine safely whether or not the client knows 223*b5fca8f8Stomee * about a given object in the move callback. It relies on the fact that kmem 224*b5fca8f8Stomee * guarantees that the object of the move callback has only been touched by the 225*b5fca8f8Stomee * client itself or else by kmem. kmem does this by ensuring that none of the 226*b5fca8f8Stomee * cache's slabs are freed to the virtual memory (VM) subsystem while a move 227*b5fca8f8Stomee * callback is pending. When the last object on a slab is freed, if there is a 228*b5fca8f8Stomee * pending move, kmem puts the slab on a per-cache dead list and defers freeing 229*b5fca8f8Stomee * slabs on that list until all pending callbacks are completed. That way, 230*b5fca8f8Stomee * clients can be certain that the object of a move callback is in one of the 231*b5fca8f8Stomee * states listed above, making it possible to distinguish known objects (in 232*b5fca8f8Stomee * state #4) using the two low order bits of any pointer member (with the 233*b5fca8f8Stomee * exception of 'char *' or 'short *' which may not be 4-byte aligned on some 234*b5fca8f8Stomee * platforms). 235*b5fca8f8Stomee * 236*b5fca8f8Stomee * The test works as long as the client always transitions objects from state #4 237*b5fca8f8Stomee * (known, in use) to state #5 (about to be freed, invalid) by setting the low 238*b5fca8f8Stomee * order bit of the client-designated pointer member. Since kmem only writes 239*b5fca8f8Stomee * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and 240*b5fca8f8Stomee * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is 241*b5fca8f8Stomee * guaranteed to set at least one of the two low order bits. Therefore, given an 242*b5fca8f8Stomee * object with a back pointer to a 'container_t *o_container', the client can 243*b5fca8f8Stomee * test 244*b5fca8f8Stomee * 245*b5fca8f8Stomee * container_t *container = object->o_container; 246*b5fca8f8Stomee * if ((uintptr_t)container & 0x3) { 247*b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW); 248*b5fca8f8Stomee * } 249*b5fca8f8Stomee * 250*b5fca8f8Stomee * Typically, an object will have a pointer to some structure with a list or 251*b5fca8f8Stomee * hash where objects from the cache are kept while in use. Assuming that the 252*b5fca8f8Stomee * client has some way of knowing that the container structure is valid and will 253*b5fca8f8Stomee * not go away during the move, and assuming that the structure includes a lock 254*b5fca8f8Stomee * to protect whatever collection is used, then the client would continue as 255*b5fca8f8Stomee * follows: 256*b5fca8f8Stomee * 257*b5fca8f8Stomee * // Ensure that the container structure does not go away. 258*b5fca8f8Stomee * if (container_hold(container) == 0) { 259*b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW); 260*b5fca8f8Stomee * } 261*b5fca8f8Stomee * mutex_enter(&container->c_objects_lock); 262*b5fca8f8Stomee * if (container != object->o_container) { 263*b5fca8f8Stomee * mutex_exit(&container->c_objects_lock); 264*b5fca8f8Stomee * container_rele(container); 265*b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW); 266*b5fca8f8Stomee * } 267*b5fca8f8Stomee * 268*b5fca8f8Stomee * At this point the client knows that the object cannot be freed as long as 269*b5fca8f8Stomee * c_objects_lock is held. Note that after acquiring the lock, the client must 270*b5fca8f8Stomee * recheck the o_container pointer in case the object was removed just before 271*b5fca8f8Stomee * acquiring the lock. 272*b5fca8f8Stomee * 273*b5fca8f8Stomee * When the client is about to free an object, it must first remove that object 274*b5fca8f8Stomee * from the list, hash, or other structure where it is kept. At that time, to 275*b5fca8f8Stomee * mark the object so it can be distinguished from the remaining, known objects, 276*b5fca8f8Stomee * the client sets the designated low order bit: 277*b5fca8f8Stomee * 278*b5fca8f8Stomee * mutex_enter(&container->c_objects_lock); 279*b5fca8f8Stomee * object->o_container = (void *)((uintptr_t)object->o_container | 0x1); 280*b5fca8f8Stomee * list_remove(&container->c_objects, object); 281*b5fca8f8Stomee * mutex_exit(&container->c_objects_lock); 282*b5fca8f8Stomee * 283*b5fca8f8Stomee * In the common case, the object is freed to the magazine layer, where it may 284*b5fca8f8Stomee * be reused on a subsequent allocation without the overhead of calling the 285*b5fca8f8Stomee * constructor. While in the magazine it appears allocated from the point of 286*b5fca8f8Stomee * view of the slab layer, making it a candidate for the move callback. Most 287*b5fca8f8Stomee * objects unrecognized by the client in the move callback fall into this 288*b5fca8f8Stomee * category and are cheaply distinguished from known objects by the test 289*b5fca8f8Stomee * described earlier. Since recognition is cheap for the client, and searching 290*b5fca8f8Stomee * magazines is expensive for kmem, kmem defers searching until the client first 291*b5fca8f8Stomee * returns KMEM_CBRC_DONT_KNOW. As long as the needed effort is reasonable, kmem 292*b5fca8f8Stomee * elsewhere does what it can to avoid bothering the client unnecessarily. 293*b5fca8f8Stomee * 294*b5fca8f8Stomee * Invalidating the designated pointer member before freeing the object marks 295*b5fca8f8Stomee * the object to be avoided in the callback, and conversely, assigning a valid 296*b5fca8f8Stomee * value to the designated pointer member after allocating the object makes the 297*b5fca8f8Stomee * object fair game for the callback: 298*b5fca8f8Stomee * 299*b5fca8f8Stomee * ... allocate object ... 300*b5fca8f8Stomee * ... set any initial state not set by the constructor ... 301*b5fca8f8Stomee * 302*b5fca8f8Stomee * mutex_enter(&container->c_objects_lock); 303*b5fca8f8Stomee * list_insert_tail(&container->c_objects, object); 304*b5fca8f8Stomee * membar_producer(); 305*b5fca8f8Stomee * object->o_container = container; 306*b5fca8f8Stomee * mutex_exit(&container->c_objects_lock); 307*b5fca8f8Stomee * 308*b5fca8f8Stomee * Note that everything else must be valid before setting o_container makes the 309*b5fca8f8Stomee * object fair game for the move callback. The membar_producer() call ensures 310*b5fca8f8Stomee * that all the object's state is written to memory before setting the pointer 311*b5fca8f8Stomee * that transitions the object from state #3 or #7 (allocated, constructed, not 312*b5fca8f8Stomee * yet in use) to state #4 (in use, valid). That's important because the move 313*b5fca8f8Stomee * function has to check the validity of the pointer before it can safely 314*b5fca8f8Stomee * acquire the lock protecting the collection where it expects to find known 315*b5fca8f8Stomee * objects. 316*b5fca8f8Stomee * 317*b5fca8f8Stomee * This method of distinguishing known objects observes the usual symmetry: 318*b5fca8f8Stomee * invalidating the designated pointer is the first thing the client does before 319*b5fca8f8Stomee * freeing the object, and setting the designated pointer is the last thing the 320*b5fca8f8Stomee * client does after allocating the object. Of course, the client is not 321*b5fca8f8Stomee * required to use this method. Fundamentally, how the client recognizes known 322*b5fca8f8Stomee * objects is completely up to the client, but this method is recommended as an 323*b5fca8f8Stomee * efficient and safe way to take advantage of the guarantees made by kmem. If 324*b5fca8f8Stomee * the entire object is arbitrary data without any markable bits from a suitable 325*b5fca8f8Stomee * pointer member, then the client must find some other method, such as 326*b5fca8f8Stomee * searching a hash table of known objects. 327*b5fca8f8Stomee * 328*b5fca8f8Stomee * 2.5 Preventing Objects From Moving 329*b5fca8f8Stomee * 330*b5fca8f8Stomee * Besides a way to distinguish known objects, the other thing that the client 331*b5fca8f8Stomee * needs is a strategy to ensure that an object will not move while the client 332*b5fca8f8Stomee * is actively using it. The details of satisfying this requirement tend to be 333*b5fca8f8Stomee * highly cache-specific. It might seem that the same rules that let a client 334*b5fca8f8Stomee * remove an object safely should also decide when an object can be moved 335*b5fca8f8Stomee * safely. However, any object state that makes a removal attempt invalid is 336*b5fca8f8Stomee * likely to be long-lasting for objects that the client does not expect to 337*b5fca8f8Stomee * remove. kmem knows nothing about the object state and is equally likely (from 338*b5fca8f8Stomee * the client's point of view) to request a move for any object in the cache, 339*b5fca8f8Stomee * whether prepared for removal or not. Even a low percentage of objects stuck 340*b5fca8f8Stomee * in place by unremovability will defeat the consolidator if the stuck objects 341*b5fca8f8Stomee * are the same long-lived allocations likely to hold slabs hostage. 342*b5fca8f8Stomee * Fundamentally, the consolidator is not aimed at common cases. Severe external 343*b5fca8f8Stomee * fragmentation is a worst case scenario manifested as sparsely allocated 344*b5fca8f8Stomee * slabs, by definition a low percentage of the cache's objects. When deciding 345*b5fca8f8Stomee * what makes an object movable, keep in mind the goal of the consolidator: to 346*b5fca8f8Stomee * bring worst-case external fragmentation within the limits guaranteed for 347*b5fca8f8Stomee * internal fragmentation. Removability is a poor criterion if it is likely to 348*b5fca8f8Stomee * exclude more than an insignificant percentage of objects for long periods of 349*b5fca8f8Stomee * time. 350*b5fca8f8Stomee * 351*b5fca8f8Stomee * A tricky general solution exists, and it has the advantage of letting you 352*b5fca8f8Stomee * move any object at almost any moment, practically eliminating the likelihood 353*b5fca8f8Stomee * that an object can hold a slab hostage. However, if there is a cache-specific 354*b5fca8f8Stomee * way to ensure that an object is not actively in use in the vast majority of 355*b5fca8f8Stomee * cases, a simpler solution that leverages this cache-specific knowledge is 356*b5fca8f8Stomee * preferred. 357*b5fca8f8Stomee * 358*b5fca8f8Stomee * 2.5.1 Cache-Specific Solution 359*b5fca8f8Stomee * 360*b5fca8f8Stomee * As an example of a cache-specific solution, the ZFS znode cache takes 361*b5fca8f8Stomee * advantage of the fact that the vast majority of znodes are only being 362*b5fca8f8Stomee * referenced from the DNLC. (A typical case might be a few hundred in active 363*b5fca8f8Stomee * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS 364*b5fca8f8Stomee * client has established that it recognizes the znode and can access its fields 365*b5fca8f8Stomee * safely (using the method described earlier), it then tests whether the znode 366*b5fca8f8Stomee * is referenced by anything other than the DNLC. If so, it assumes that the 367*b5fca8f8Stomee * znode may be in active use and is unsafe to move, so it drops its locks and 368*b5fca8f8Stomee * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere 369*b5fca8f8Stomee * else znodes are used, no change is needed to protect against the possibility 370*b5fca8f8Stomee * of the znode moving. The disadvantage is that it remains possible for an 371*b5fca8f8Stomee * application to hold a znode slab hostage with an open file descriptor. 372*b5fca8f8Stomee * However, this case ought to be rare and the consolidator has a way to deal 373*b5fca8f8Stomee * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same 374*b5fca8f8Stomee * object, kmem eventually stops believing it and treats the slab as if the 375*b5fca8f8Stomee * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can 376*b5fca8f8Stomee * then focus on getting it off of the partial slab list by allocating rather 377*b5fca8f8Stomee * than freeing all of its objects. (Either way of getting a slab off the 378*b5fca8f8Stomee * free list reduces fragmentation.) 379*b5fca8f8Stomee * 380*b5fca8f8Stomee * 2.5.2 General Solution 381*b5fca8f8Stomee * 382*b5fca8f8Stomee * The general solution, on the other hand, requires an explicit hold everywhere 383*b5fca8f8Stomee * the object is used to prevent it from moving. To keep the client locking 384*b5fca8f8Stomee * strategy as uncomplicated as possible, kmem guarantees the simplifying 385*b5fca8f8Stomee * assumption that move callbacks are sequential, even across multiple caches. 386*b5fca8f8Stomee * Internally, a global queue processed by a single thread supports all caches 387*b5fca8f8Stomee * implementing the callback function. No matter how many caches supply a move 388*b5fca8f8Stomee * function, the consolidator never moves more than one object at a time, so the 389*b5fca8f8Stomee * client does not have to worry about tricky lock ordering involving several 390*b5fca8f8Stomee * related objects from different kmem caches. 391*b5fca8f8Stomee * 392*b5fca8f8Stomee * The general solution implements the explicit hold as a read-write lock, which 393*b5fca8f8Stomee * allows multiple readers to access an object from the cache simultaneously 394*b5fca8f8Stomee * while a single writer is excluded from moving it. A single rwlock for the 395*b5fca8f8Stomee * entire cache would lock out all threads from using any of the cache's objects 396*b5fca8f8Stomee * even though only a single object is being moved, so to reduce contention, 397*b5fca8f8Stomee * the client can fan out the single rwlock into an array of rwlocks hashed by 398*b5fca8f8Stomee * the object address, making it probable that moving one object will not 399*b5fca8f8Stomee * prevent other threads from using a different object. The rwlock cannot be a 400*b5fca8f8Stomee * member of the object itself, because the possibility of the object moving 401*b5fca8f8Stomee * makes it unsafe to access any of the object's fields until the lock is 402*b5fca8f8Stomee * acquired. 403*b5fca8f8Stomee * 404*b5fca8f8Stomee * Assuming a small, fixed number of locks, it's possible that multiple objects 405*b5fca8f8Stomee * will hash to the same lock. A thread that needs to use multiple objects in 406*b5fca8f8Stomee * the same function may acquire the same lock multiple times. Since rwlocks are 407*b5fca8f8Stomee * reentrant for readers, and since there is never more than a single writer at 408*b5fca8f8Stomee * a time (assuming that the client acquires the lock as a writer only when 409*b5fca8f8Stomee * moving an object inside the callback), there would seem to be no problem. 410*b5fca8f8Stomee * However, a client locking multiple objects in the same function must handle 411*b5fca8f8Stomee * one case of potential deadlock: Assume that thread A needs to prevent both 412*b5fca8f8Stomee * object 1 and object 2 from moving, and thread B, the callback, meanwhile 413*b5fca8f8Stomee * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the 414*b5fca8f8Stomee * same lock, that thread A will acquire the lock for object 1 as a reader 415*b5fca8f8Stomee * before thread B sets the lock's write-wanted bit, preventing thread A from 416*b5fca8f8Stomee * reacquiring the lock for object 2 as a reader. Unable to make forward 417*b5fca8f8Stomee * progress, thread A will never release the lock for object 1, resulting in 418*b5fca8f8Stomee * deadlock. 419*b5fca8f8Stomee * 420*b5fca8f8Stomee * There are two ways of avoiding the deadlock just described. The first is to 421*b5fca8f8Stomee * use rw_tryenter() rather than rw_enter() in the callback function when 422*b5fca8f8Stomee * attempting to acquire the lock as a writer. If tryenter discovers that the 423*b5fca8f8Stomee * same object (or another object hashed to the same lock) is already in use, it 424*b5fca8f8Stomee * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use 425*b5fca8f8Stomee * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t, 426*b5fca8f8Stomee * since it allows a thread to acquire the lock as a reader in spite of a 427*b5fca8f8Stomee * waiting writer. This second approach insists on moving the object now, no 428*b5fca8f8Stomee * matter how many readers the move function must wait for in order to do so, 429*b5fca8f8Stomee * and could delay the completion of the callback indefinitely (blocking 430*b5fca8f8Stomee * callbacks to other clients). In practice, a less insistent callback using 431*b5fca8f8Stomee * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems 432*b5fca8f8Stomee * little reason to use anything else. 433*b5fca8f8Stomee * 434*b5fca8f8Stomee * Avoiding deadlock is not the only problem that an implementation using an 435*b5fca8f8Stomee * explicit hold needs to solve. Locking the object in the first place (to 436*b5fca8f8Stomee * prevent it from moving) remains a problem, since the object could move 437*b5fca8f8Stomee * between the time you obtain a pointer to the object and the time you acquire 438*b5fca8f8Stomee * the rwlock hashed to that pointer value. Therefore the client needs to 439*b5fca8f8Stomee * recheck the value of the pointer after acquiring the lock, drop the lock if 440*b5fca8f8Stomee * the value has changed, and try again. This requires a level of indirection: 441*b5fca8f8Stomee * something that points to the object rather than the object itself, that the 442*b5fca8f8Stomee * client can access safely while attempting to acquire the lock. (The object 443*b5fca8f8Stomee * itself cannot be referenced safely because it can move at any time.) 444*b5fca8f8Stomee * The following lock-acquisition function takes whatever is safe to reference 445*b5fca8f8Stomee * (arg), follows its pointer to the object (using function f), and tries as 446*b5fca8f8Stomee * often as necessary to acquire the hashed lock and verify that the object 447*b5fca8f8Stomee * still has not moved: 448*b5fca8f8Stomee * 449*b5fca8f8Stomee * object_t * 450*b5fca8f8Stomee * object_hold(object_f f, void *arg) 451*b5fca8f8Stomee * { 452*b5fca8f8Stomee * object_t *op; 453*b5fca8f8Stomee * 454*b5fca8f8Stomee * op = f(arg); 455*b5fca8f8Stomee * if (op == NULL) { 456*b5fca8f8Stomee * return (NULL); 457*b5fca8f8Stomee * } 458*b5fca8f8Stomee * 459*b5fca8f8Stomee * rw_enter(OBJECT_RWLOCK(op), RW_READER); 460*b5fca8f8Stomee * while (op != f(arg)) { 461*b5fca8f8Stomee * rw_exit(OBJECT_RWLOCK(op)); 462*b5fca8f8Stomee * op = f(arg); 463*b5fca8f8Stomee * if (op == NULL) { 464*b5fca8f8Stomee * break; 465*b5fca8f8Stomee * } 466*b5fca8f8Stomee * rw_enter(OBJECT_RWLOCK(op), RW_READER); 467*b5fca8f8Stomee * } 468*b5fca8f8Stomee * 469*b5fca8f8Stomee * return (op); 470*b5fca8f8Stomee * } 471*b5fca8f8Stomee * 472*b5fca8f8Stomee * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The 473*b5fca8f8Stomee * lock reacquisition loop, while necessary, almost never executes. The function 474*b5fca8f8Stomee * pointer f (used to obtain the object pointer from arg) has the following type 475*b5fca8f8Stomee * definition: 476*b5fca8f8Stomee * 477*b5fca8f8Stomee * typedef object_t *(*object_f)(void *arg); 478*b5fca8f8Stomee * 479*b5fca8f8Stomee * An object_f implementation is likely to be as simple as accessing a structure 480*b5fca8f8Stomee * member: 481*b5fca8f8Stomee * 482*b5fca8f8Stomee * object_t * 483*b5fca8f8Stomee * s_object(void *arg) 484*b5fca8f8Stomee * { 485*b5fca8f8Stomee * something_t *sp = arg; 486*b5fca8f8Stomee * return (sp->s_object); 487*b5fca8f8Stomee * } 488*b5fca8f8Stomee * 489*b5fca8f8Stomee * The flexibility of a function pointer allows the path to the object to be 490*b5fca8f8Stomee * arbitrarily complex and also supports the notion that depending on where you 491*b5fca8f8Stomee * are using the object, you may need to get it from someplace different. 492*b5fca8f8Stomee * 493*b5fca8f8Stomee * The function that releases the explicit hold is simpler because it does not 494*b5fca8f8Stomee * have to worry about the object moving: 495*b5fca8f8Stomee * 496*b5fca8f8Stomee * void 497*b5fca8f8Stomee * object_rele(object_t *op) 498*b5fca8f8Stomee * { 499*b5fca8f8Stomee * rw_exit(OBJECT_RWLOCK(op)); 500*b5fca8f8Stomee * } 501*b5fca8f8Stomee * 502*b5fca8f8Stomee * The caller is spared these details so that obtaining and releasing an 503*b5fca8f8Stomee * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller 504*b5fca8f8Stomee * of object_hold() only needs to know that the returned object pointer is valid 505*b5fca8f8Stomee * if not NULL and that the object will not move until released. 506*b5fca8f8Stomee * 507*b5fca8f8Stomee * Although object_hold() prevents an object from moving, it does not prevent it 508*b5fca8f8Stomee * from being freed. The caller must take measures before calling object_hold() 509*b5fca8f8Stomee * (afterwards is too late) to ensure that the held object cannot be freed. The 510*b5fca8f8Stomee * caller must do so without accessing the unsafe object reference, so any lock 511*b5fca8f8Stomee * or reference count used to ensure the continued existence of the object must 512*b5fca8f8Stomee * live outside the object itself. 513*b5fca8f8Stomee * 514*b5fca8f8Stomee * Obtaining a new object is a special case where an explicit hold is impossible 515*b5fca8f8Stomee * for the caller. Any function that returns a newly allocated object (either as 516*b5fca8f8Stomee * a return value, or as an in-out paramter) must return it already held; after 517*b5fca8f8Stomee * the caller gets it is too late, since the object cannot be safely accessed 518*b5fca8f8Stomee * without the level of indirection described earlier. The following 519*b5fca8f8Stomee * object_alloc() example uses the same code shown earlier to transition a new 520*b5fca8f8Stomee * object into the state of being recognized (by the client) as a known object. 521*b5fca8f8Stomee * The function must acquire the hold (rw_enter) before that state transition 522*b5fca8f8Stomee * makes the object movable: 523*b5fca8f8Stomee * 524*b5fca8f8Stomee * static object_t * 525*b5fca8f8Stomee * object_alloc(container_t *container) 526*b5fca8f8Stomee * { 527*b5fca8f8Stomee * object_t *object = kmem_cache_create(object_cache, 0); 528*b5fca8f8Stomee * ... set any initial state not set by the constructor ... 529*b5fca8f8Stomee * rw_enter(OBJECT_RWLOCK(object), RW_READER); 530*b5fca8f8Stomee * mutex_enter(&container->c_objects_lock); 531*b5fca8f8Stomee * list_insert_tail(&container->c_objects, object); 532*b5fca8f8Stomee * membar_producer(); 533*b5fca8f8Stomee * object->o_container = container; 534*b5fca8f8Stomee * mutex_exit(&container->c_objects_lock); 535*b5fca8f8Stomee * return (object); 536*b5fca8f8Stomee * } 537*b5fca8f8Stomee * 538*b5fca8f8Stomee * Functions that implicitly acquire an object hold (any function that calls 539*b5fca8f8Stomee * object_alloc() to supply an object for the caller) need to be carefully noted 540*b5fca8f8Stomee * so that the matching object_rele() is not neglected. Otherwise, leaked holds 541*b5fca8f8Stomee * prevent all objects hashed to the affected rwlocks from ever being moved. 542*b5fca8f8Stomee * 543*b5fca8f8Stomee * The pointer to a held object can be hashed to the holding rwlock even after 544*b5fca8f8Stomee * the object has been freed. Although it is possible to release the hold 545*b5fca8f8Stomee * after freeing the object, you may decide to release the hold implicitly in 546*b5fca8f8Stomee * whatever function frees the object, so as to release the hold as soon as 547*b5fca8f8Stomee * possible, and for the sake of symmetry with the function that implicitly 548*b5fca8f8Stomee * acquires the hold when it allocates the object. Here, object_free() releases 549*b5fca8f8Stomee * the hold acquired by object_alloc(). Its implicit object_rele() forms a 550*b5fca8f8Stomee * matching pair with object_hold(): 551*b5fca8f8Stomee * 552*b5fca8f8Stomee * void 553*b5fca8f8Stomee * object_free(object_t *object) 554*b5fca8f8Stomee * { 555*b5fca8f8Stomee * container_t *container; 556*b5fca8f8Stomee * 557*b5fca8f8Stomee * ASSERT(object_held(object)); 558*b5fca8f8Stomee * container = object->o_container; 559*b5fca8f8Stomee * mutex_enter(&container->c_objects_lock); 560*b5fca8f8Stomee * object->o_container = 561*b5fca8f8Stomee * (void *)((uintptr_t)object->o_container | 0x1); 562*b5fca8f8Stomee * list_remove(&container->c_objects, object); 563*b5fca8f8Stomee * mutex_exit(&container->c_objects_lock); 564*b5fca8f8Stomee * object_rele(object); 565*b5fca8f8Stomee * kmem_cache_free(object_cache, object); 566*b5fca8f8Stomee * } 567*b5fca8f8Stomee * 568*b5fca8f8Stomee * Note that object_free() cannot safely accept an object pointer as an argument 569*b5fca8f8Stomee * unless the object is already held. Any function that calls object_free() 570*b5fca8f8Stomee * needs to be carefully noted since it similarly forms a matching pair with 571*b5fca8f8Stomee * object_hold(). 572*b5fca8f8Stomee * 573*b5fca8f8Stomee * To complete the picture, the following callback function implements the 574*b5fca8f8Stomee * general solution by moving objects only if they are currently unheld: 575*b5fca8f8Stomee * 576*b5fca8f8Stomee * static kmem_cbrc_t 577*b5fca8f8Stomee * object_move(void *buf, void *newbuf, size_t size, void *arg) 578*b5fca8f8Stomee * { 579*b5fca8f8Stomee * object_t *op = buf, *np = newbuf; 580*b5fca8f8Stomee * container_t *container; 581*b5fca8f8Stomee * 582*b5fca8f8Stomee * container = op->o_container; 583*b5fca8f8Stomee * if ((uintptr_t)container & 0x3) { 584*b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW); 585*b5fca8f8Stomee * } 586*b5fca8f8Stomee * 587*b5fca8f8Stomee * // Ensure that the container structure does not go away. 588*b5fca8f8Stomee * if (container_hold(container) == 0) { 589*b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW); 590*b5fca8f8Stomee * } 591*b5fca8f8Stomee * 592*b5fca8f8Stomee * mutex_enter(&container->c_objects_lock); 593*b5fca8f8Stomee * if (container != op->o_container) { 594*b5fca8f8Stomee * mutex_exit(&container->c_objects_lock); 595*b5fca8f8Stomee * container_rele(container); 596*b5fca8f8Stomee * return (KMEM_CBRC_DONT_KNOW); 597*b5fca8f8Stomee * } 598*b5fca8f8Stomee * 599*b5fca8f8Stomee * if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) { 600*b5fca8f8Stomee * mutex_exit(&container->c_objects_lock); 601*b5fca8f8Stomee * container_rele(container); 602*b5fca8f8Stomee * return (KMEM_CBRC_LATER); 603*b5fca8f8Stomee * } 604*b5fca8f8Stomee * 605*b5fca8f8Stomee * object_move_impl(op, np); // critical section 606*b5fca8f8Stomee * rw_exit(OBJECT_RWLOCK(op)); 607*b5fca8f8Stomee * 608*b5fca8f8Stomee * op->o_container = (void *)((uintptr_t)op->o_container | 0x1); 609*b5fca8f8Stomee * list_link_replace(&op->o_link_node, &np->o_link_node); 610*b5fca8f8Stomee * mutex_exit(&container->c_objects_lock); 611*b5fca8f8Stomee * container_rele(container); 612*b5fca8f8Stomee * return (KMEM_CBRC_YES); 613*b5fca8f8Stomee * } 614*b5fca8f8Stomee * 615*b5fca8f8Stomee * Note that object_move() must invalidate the designated o_container pointer of 616*b5fca8f8Stomee * the old object in the same way that object_free() does, since kmem will free 617*b5fca8f8Stomee * the object in response to the KMEM_CBRC_YES return value. 618*b5fca8f8Stomee * 619*b5fca8f8Stomee * The lock order in object_move() differs from object_alloc(), which locks 620*b5fca8f8Stomee * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the 621*b5fca8f8Stomee * callback uses rw_tryenter() (preventing the deadlock described earlier), it's 622*b5fca8f8Stomee * not a problem. Holding the lock on the object list in the example above 623*b5fca8f8Stomee * through the entire callback not only prevents the object from going away, it 624*b5fca8f8Stomee * also allows you to lock the list elsewhere and know that none of its elements 625*b5fca8f8Stomee * will move during iteration. 626*b5fca8f8Stomee * 627*b5fca8f8Stomee * Adding an explicit hold everywhere an object from the cache is used is tricky 628*b5fca8f8Stomee * and involves much more change to client code than a cache-specific solution 629*b5fca8f8Stomee * that leverages existing state to decide whether or not an object is 630*b5fca8f8Stomee * movable. However, this approach has the advantage that no object remains 631*b5fca8f8Stomee * immovable for any significant length of time, making it extremely unlikely 632*b5fca8f8Stomee * that long-lived allocations can continue holding slabs hostage; and it works 633*b5fca8f8Stomee * for any cache. 634*b5fca8f8Stomee * 635*b5fca8f8Stomee * 3. Consolidator Implementation 636*b5fca8f8Stomee * 637*b5fca8f8Stomee * Once the client supplies a move function that a) recognizes known objects and 638*b5fca8f8Stomee * b) avoids moving objects that are actively in use, the remaining work is up 639*b5fca8f8Stomee * to the consolidator to decide which objects to move and when to issue 640*b5fca8f8Stomee * callbacks. 641*b5fca8f8Stomee * 642*b5fca8f8Stomee * The consolidator relies on the fact that a cache's slabs are ordered by 643*b5fca8f8Stomee * usage. Each slab has a fixed number of objects. Depending on the slab's 644*b5fca8f8Stomee * "color" (the offset of the first object from the beginning of the slab; 645*b5fca8f8Stomee * offsets are staggered to mitigate false sharing of cache lines) it is either 646*b5fca8f8Stomee * the maximum number of objects per slab determined at cache creation time or 647*b5fca8f8Stomee * else the number closest to the maximum that fits within the space remaining 648*b5fca8f8Stomee * after the initial offset. A completely allocated slab may contribute some 649*b5fca8f8Stomee * internal fragmentation (per-slab overhead) but no external fragmentation, so 650*b5fca8f8Stomee * it is of no interest to the consolidator. At the other extreme, slabs whose 651*b5fca8f8Stomee * objects have all been freed to the slab are released to the virtual memory 652*b5fca8f8Stomee * (VM) subsystem (objects freed to magazines are still allocated as far as the 653*b5fca8f8Stomee * slab is concerned). External fragmentation exists when there are slabs 654*b5fca8f8Stomee * somewhere between these extremes. A partial slab has at least one but not all 655*b5fca8f8Stomee * of its objects allocated. The more partial slabs, and the fewer allocated 656*b5fca8f8Stomee * objects on each of them, the higher the fragmentation. Hence the 657*b5fca8f8Stomee * consolidator's overall strategy is to reduce the number of partial slabs by 658*b5fca8f8Stomee * moving allocated objects from the least allocated slabs to the most allocated 659*b5fca8f8Stomee * slabs. 660*b5fca8f8Stomee * 661*b5fca8f8Stomee * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated 662*b5fca8f8Stomee * slabs are kept separately in an unordered list. Since the majority of slabs 663*b5fca8f8Stomee * tend to be completely allocated (a typical unfragmented cache may have 664*b5fca8f8Stomee * thousands of complete slabs and only a single partial slab), separating 665*b5fca8f8Stomee * complete slabs improves the efficiency of partial slab ordering, since the 666*b5fca8f8Stomee * complete slabs do not affect the depth or balance of the AVL tree. This 667*b5fca8f8Stomee * ordered sequence of partial slabs acts as a "free list" supplying objects for 668*b5fca8f8Stomee * allocation requests. 669*b5fca8f8Stomee * 670*b5fca8f8Stomee * Objects are always allocated from the first partial slab in the free list, 671*b5fca8f8Stomee * where the allocation is most likely to eliminate a partial slab (by 672*b5fca8f8Stomee * completely allocating it). Conversely, when a single object from a completely 673*b5fca8f8Stomee * allocated slab is freed to the slab, that slab is added to the front of the 674*b5fca8f8Stomee * free list. Since most free list activity involves highly allocated slabs 675*b5fca8f8Stomee * coming and going at the front of the list, slabs tend naturally toward the 676*b5fca8f8Stomee * ideal order: highly allocated at the front, sparsely allocated at the back. 677*b5fca8f8Stomee * Slabs with few allocated objects are likely to become completely free if they 678*b5fca8f8Stomee * keep a safe distance away from the front of the free list. Slab misorders 679*b5fca8f8Stomee * interfere with the natural tendency of slabs to become completely free or 680*b5fca8f8Stomee * completely allocated. For example, a slab with a single allocated object 681*b5fca8f8Stomee * needs only a single free to escape the cache; its natural desire is 682*b5fca8f8Stomee * frustrated when it finds itself at the front of the list where a second 683*b5fca8f8Stomee * allocation happens just before the free could have released it. Another slab 684*b5fca8f8Stomee * with all but one object allocated might have supplied the buffer instead, so 685*b5fca8f8Stomee * that both (as opposed to neither) of the slabs would have been taken off the 686*b5fca8f8Stomee * free list. 687*b5fca8f8Stomee * 688*b5fca8f8Stomee * Although slabs tend naturally toward the ideal order, misorders allowed by a 689*b5fca8f8Stomee * simple list implementation defeat the consolidator's strategy of merging 690*b5fca8f8Stomee * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem 691*b5fca8f8Stomee * needs another way to fix misorders to optimize its callback strategy. One 692*b5fca8f8Stomee * approach is to periodically scan a limited number of slabs, advancing a 693*b5fca8f8Stomee * marker to hold the current scan position, and to move extreme misorders to 694*b5fca8f8Stomee * the front or back of the free list and to the front or back of the current 695*b5fca8f8Stomee * scan range. By making consecutive scan ranges overlap by one slab, the least 696*b5fca8f8Stomee * allocated slab in the current range can be carried along from the end of one 697*b5fca8f8Stomee * scan to the start of the next. 698*b5fca8f8Stomee * 699*b5fca8f8Stomee * Maintaining partial slabs in an AVL tree relieves kmem of this additional 700*b5fca8f8Stomee * task, however. Since most of the cache's activity is in the magazine layer, 701*b5fca8f8Stomee * and allocations from the slab layer represent only a startup cost, the 702*b5fca8f8Stomee * overhead of maintaining a balanced tree is not a significant concern compared 703*b5fca8f8Stomee * to the opportunity of reducing complexity by eliminating the partial slab 704*b5fca8f8Stomee * scanner just described. The overhead of an AVL tree is minimized by 705*b5fca8f8Stomee * maintaining only partial slabs in the tree and keeping completely allocated 706*b5fca8f8Stomee * slabs separately in a list. To avoid increasing the size of the slab 707*b5fca8f8Stomee * structure the AVL linkage pointers are reused for the slab's list linkage, 708*b5fca8f8Stomee * since the slab will always be either partial or complete, never stored both 709*b5fca8f8Stomee * ways at the same time. To further minimize the overhead of the AVL tree the 710*b5fca8f8Stomee * compare function that orders partial slabs by usage divides the range of 711*b5fca8f8Stomee * allocated object counts into bins such that counts within the same bin are 712*b5fca8f8Stomee * considered equal. Binning partial slabs makes it less likely that allocating 713*b5fca8f8Stomee * or freeing a single object will change the slab's order, requiring a tree 714*b5fca8f8Stomee * reinsertion (an avl_remove() followed by an avl_add(), both potentially 715*b5fca8f8Stomee * requiring some rebalancing of the tree). Allocation counts closest to 716*b5fca8f8Stomee * completely free and completely allocated are left unbinned (finely sorted) to 717*b5fca8f8Stomee * better support the consolidator's strategy of merging slabs at either 718*b5fca8f8Stomee * extreme. 719*b5fca8f8Stomee * 720*b5fca8f8Stomee * 3.1 Assessing Fragmentation and Selecting Candidate Slabs 721*b5fca8f8Stomee * 722*b5fca8f8Stomee * The consolidator piggybacks on the kmem maintenance thread and is called on 723*b5fca8f8Stomee * the same interval as kmem_cache_update(), once per cache every fifteen 724*b5fca8f8Stomee * seconds. kmem maintains a running count of unallocated objects in the slab 725*b5fca8f8Stomee * layer (cache_bufslab). The consolidator checks whether that number exceeds 726*b5fca8f8Stomee * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether 727*b5fca8f8Stomee * there is a significant number of slabs in the cache (arbitrarily a minimum 728*b5fca8f8Stomee * 101 total slabs). Unused objects that have fallen out of the magazine layer's 729*b5fca8f8Stomee * working set are included in the assessment, and magazines in the depot are 730*b5fca8f8Stomee * reaped if those objects would lift cache_bufslab above the fragmentation 731*b5fca8f8Stomee * threshold. Once the consolidator decides that a cache is fragmented, it looks 732*b5fca8f8Stomee * for a candidate slab to reclaim, starting at the end of the partial slab free 733*b5fca8f8Stomee * list and scanning backwards. At first the consolidator is choosy: only a slab 734*b5fca8f8Stomee * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a 735*b5fca8f8Stomee * single allocated object, regardless of percentage). If there is difficulty 736*b5fca8f8Stomee * finding a candidate slab, kmem raises the allocation threshold incrementally, 737*b5fca8f8Stomee * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce 738*b5fca8f8Stomee * external fragmentation (unused objects on the free list) below 12.5% (1/8), 739*b5fca8f8Stomee * even in the worst case of every slab in the cache being almost 7/8 allocated. 740*b5fca8f8Stomee * The threshold can also be lowered incrementally when candidate slabs are easy 741*b5fca8f8Stomee * to find, and the threshold is reset to the minimum 1/8 as soon as the cache 742*b5fca8f8Stomee * is no longer fragmented. 743*b5fca8f8Stomee * 744*b5fca8f8Stomee * 3.2 Generating Callbacks 745*b5fca8f8Stomee * 746*b5fca8f8Stomee * Once an eligible slab is chosen, a callback is generated for every allocated 747*b5fca8f8Stomee * object on the slab, in the hope that the client will move everything off the 748*b5fca8f8Stomee * slab and make it reclaimable. Objects selected as move destinations are 749*b5fca8f8Stomee * chosen from slabs at the front of the free list. Assuming slabs in the ideal 750*b5fca8f8Stomee * order (most allocated at the front, least allocated at the back) and a 751*b5fca8f8Stomee * cooperative client, the consolidator will succeed in removing slabs from both 752*b5fca8f8Stomee * ends of the free list, completely allocating on the one hand and completely 753*b5fca8f8Stomee * freeing on the other. Objects selected as move destinations are allocated in 754*b5fca8f8Stomee * the kmem maintenance thread where move requests are enqueued. A separate 755*b5fca8f8Stomee * callback thread removes pending callbacks from the queue and calls the 756*b5fca8f8Stomee * client. The separate thread ensures that client code (the move function) does 757*b5fca8f8Stomee * not interfere with internal kmem maintenance tasks. A map of pending 758*b5fca8f8Stomee * callbacks keyed by object address (the object to be moved) is checked to 759*b5fca8f8Stomee * ensure that duplicate callbacks are not generated for the same object. 760*b5fca8f8Stomee * Allocating the move destination (the object to move to) prevents subsequent 761*b5fca8f8Stomee * callbacks from selecting the same destination as an earlier pending callback. 762*b5fca8f8Stomee * 763*b5fca8f8Stomee * Move requests can also be generated by kmem_cache_reap() when the system is 764*b5fca8f8Stomee * desperate for memory and by kmem_cache_move_notify(), called by the client to 765*b5fca8f8Stomee * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible. 766*b5fca8f8Stomee * The map of pending callbacks is protected by the same lock that protects the 767*b5fca8f8Stomee * slab layer. 768*b5fca8f8Stomee * 769*b5fca8f8Stomee * When the system is desperate for memory, kmem does not bother to determine 770*b5fca8f8Stomee * whether or not the cache exceeds the fragmentation threshold, but tries to 771*b5fca8f8Stomee * consolidate as many slabs as possible. Normally, the consolidator chews 772*b5fca8f8Stomee * slowly, one sparsely allocated slab at a time during each maintenance 773*b5fca8f8Stomee * interval that the cache is fragmented. When desperate, the consolidator 774*b5fca8f8Stomee * starts at the last partial slab and enqueues callbacks for every allocated 775*b5fca8f8Stomee * object on every partial slab, working backwards until it reaches the first 776*b5fca8f8Stomee * partial slab. The first partial slab, meanwhile, advances in pace with the 777*b5fca8f8Stomee * consolidator as allocations to supply move destinations for the enqueued 778*b5fca8f8Stomee * callbacks use up the highly allocated slabs at the front of the free list. 779*b5fca8f8Stomee * Ideally, the overgrown free list collapses like an accordion, starting at 780*b5fca8f8Stomee * both ends and ending at the center with a single partial slab. 781*b5fca8f8Stomee * 782*b5fca8f8Stomee * 3.3 Client Responses 783*b5fca8f8Stomee * 784*b5fca8f8Stomee * When the client returns KMEM_CBRC_NO in response to the move callback, kmem 785*b5fca8f8Stomee * marks the slab that supplied the stuck object non-reclaimable and moves it to 786*b5fca8f8Stomee * front of the free list. The slab remains marked as long as it remains on the 787*b5fca8f8Stomee * free list, and it appears more allocated to the partial slab compare function 788*b5fca8f8Stomee * than any unmarked slab, no matter how many of its objects are allocated. 789*b5fca8f8Stomee * Since even one immovable object ties up the entire slab, the goal is to 790*b5fca8f8Stomee * completely allocate any slab that cannot be completely freed. kmem does not 791*b5fca8f8Stomee * bother generating callbacks to move objects from a marked slab unless the 792*b5fca8f8Stomee * system is desperate. 793*b5fca8f8Stomee * 794*b5fca8f8Stomee * When the client responds KMEM_CBRC_LATER, kmem increments a count for the 795*b5fca8f8Stomee * slab. If the client responds LATER too many times, kmem disbelieves and 796*b5fca8f8Stomee * treats the response as a NO. The count is cleared when the slab is taken off 797*b5fca8f8Stomee * the partial slab list or when the client moves one of the slab's objects. 798*b5fca8f8Stomee * 799*b5fca8f8Stomee * 4. Observability 800*b5fca8f8Stomee * 801*b5fca8f8Stomee * A kmem cache's external fragmentation is best observed with 'mdb -k' using 802*b5fca8f8Stomee * the ::kmem_slabs dcmd. For a complete description of the command, enter 803*b5fca8f8Stomee * '::help kmem_slabs' at the mdb prompt. 8047c478bd9Sstevel@tonic-gate */ 8057c478bd9Sstevel@tonic-gate 8067c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h> 8077c478bd9Sstevel@tonic-gate #include <sys/vmem_impl.h> 8087c478bd9Sstevel@tonic-gate #include <sys/param.h> 8097c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 8107c478bd9Sstevel@tonic-gate #include <sys/vm.h> 8117c478bd9Sstevel@tonic-gate #include <sys/proc.h> 8127c478bd9Sstevel@tonic-gate #include <sys/tuneable.h> 8137c478bd9Sstevel@tonic-gate #include <sys/systm.h> 8147c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 8157c478bd9Sstevel@tonic-gate #include <sys/debug.h> 816*b5fca8f8Stomee #include <sys/sdt.h> 8177c478bd9Sstevel@tonic-gate #include <sys/mutex.h> 8187c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 8197c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 8207c478bd9Sstevel@tonic-gate #include <sys/kobj.h> 8217c478bd9Sstevel@tonic-gate #include <sys/disp.h> 8227c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 8237c478bd9Sstevel@tonic-gate #include <sys/log.h> 8247c478bd9Sstevel@tonic-gate #include <sys/callb.h> 8257c478bd9Sstevel@tonic-gate #include <sys/taskq.h> 8267c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 8277c478bd9Sstevel@tonic-gate #include <sys/reboot.h> 8287c478bd9Sstevel@tonic-gate #include <sys/id32.h> 8297c478bd9Sstevel@tonic-gate #include <sys/zone.h> 830f4b3ec61Sdh #include <sys/netstack.h> 831*b5fca8f8Stomee #ifdef DEBUG 832*b5fca8f8Stomee #include <sys/random.h> 833*b5fca8f8Stomee #endif 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate extern void streams_msg_init(void); 8367c478bd9Sstevel@tonic-gate extern int segkp_fromheap; 8377c478bd9Sstevel@tonic-gate extern void segkp_cache_free(void); 8387c478bd9Sstevel@tonic-gate 8397c478bd9Sstevel@tonic-gate struct kmem_cache_kstat { 8407c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_size; 8417c478bd9Sstevel@tonic-gate kstat_named_t kmc_align; 8427c478bd9Sstevel@tonic-gate kstat_named_t kmc_chunk_size; 8437c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_size; 8447c478bd9Sstevel@tonic-gate kstat_named_t kmc_alloc; 8457c478bd9Sstevel@tonic-gate kstat_named_t kmc_alloc_fail; 8467c478bd9Sstevel@tonic-gate kstat_named_t kmc_free; 8477c478bd9Sstevel@tonic-gate kstat_named_t kmc_depot_alloc; 8487c478bd9Sstevel@tonic-gate kstat_named_t kmc_depot_free; 8497c478bd9Sstevel@tonic-gate kstat_named_t kmc_depot_contention; 8507c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_alloc; 8517c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_free; 8527c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_constructed; 8537c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_avail; 8547c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_inuse; 8557c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_total; 8567c478bd9Sstevel@tonic-gate kstat_named_t kmc_buf_max; 8577c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_create; 8587c478bd9Sstevel@tonic-gate kstat_named_t kmc_slab_destroy; 8597c478bd9Sstevel@tonic-gate kstat_named_t kmc_vmem_source; 8607c478bd9Sstevel@tonic-gate kstat_named_t kmc_hash_size; 8617c478bd9Sstevel@tonic-gate kstat_named_t kmc_hash_lookup_depth; 8627c478bd9Sstevel@tonic-gate kstat_named_t kmc_hash_rescale; 8637c478bd9Sstevel@tonic-gate kstat_named_t kmc_full_magazines; 8647c478bd9Sstevel@tonic-gate kstat_named_t kmc_empty_magazines; 8657c478bd9Sstevel@tonic-gate kstat_named_t kmc_magazine_size; 866*b5fca8f8Stomee kstat_named_t kmc_move_callbacks; 867*b5fca8f8Stomee kstat_named_t kmc_move_yes; 868*b5fca8f8Stomee kstat_named_t kmc_move_no; 869*b5fca8f8Stomee kstat_named_t kmc_move_later; 870*b5fca8f8Stomee kstat_named_t kmc_move_dont_need; 871*b5fca8f8Stomee kstat_named_t kmc_move_dont_know; 872*b5fca8f8Stomee kstat_named_t kmc_move_hunt_found; 8737c478bd9Sstevel@tonic-gate } kmem_cache_kstat = { 8747c478bd9Sstevel@tonic-gate { "buf_size", KSTAT_DATA_UINT64 }, 8757c478bd9Sstevel@tonic-gate { "align", KSTAT_DATA_UINT64 }, 8767c478bd9Sstevel@tonic-gate { "chunk_size", KSTAT_DATA_UINT64 }, 8777c478bd9Sstevel@tonic-gate { "slab_size", KSTAT_DATA_UINT64 }, 8787c478bd9Sstevel@tonic-gate { "alloc", KSTAT_DATA_UINT64 }, 8797c478bd9Sstevel@tonic-gate { "alloc_fail", KSTAT_DATA_UINT64 }, 8807c478bd9Sstevel@tonic-gate { "free", KSTAT_DATA_UINT64 }, 8817c478bd9Sstevel@tonic-gate { "depot_alloc", KSTAT_DATA_UINT64 }, 8827c478bd9Sstevel@tonic-gate { "depot_free", KSTAT_DATA_UINT64 }, 8837c478bd9Sstevel@tonic-gate { "depot_contention", KSTAT_DATA_UINT64 }, 8847c478bd9Sstevel@tonic-gate { "slab_alloc", KSTAT_DATA_UINT64 }, 8857c478bd9Sstevel@tonic-gate { "slab_free", KSTAT_DATA_UINT64 }, 8867c478bd9Sstevel@tonic-gate { "buf_constructed", KSTAT_DATA_UINT64 }, 8877c478bd9Sstevel@tonic-gate { "buf_avail", KSTAT_DATA_UINT64 }, 8887c478bd9Sstevel@tonic-gate { "buf_inuse", KSTAT_DATA_UINT64 }, 8897c478bd9Sstevel@tonic-gate { "buf_total", KSTAT_DATA_UINT64 }, 8907c478bd9Sstevel@tonic-gate { "buf_max", KSTAT_DATA_UINT64 }, 8917c478bd9Sstevel@tonic-gate { "slab_create", KSTAT_DATA_UINT64 }, 8927c478bd9Sstevel@tonic-gate { "slab_destroy", KSTAT_DATA_UINT64 }, 8937c478bd9Sstevel@tonic-gate { "vmem_source", KSTAT_DATA_UINT64 }, 8947c478bd9Sstevel@tonic-gate { "hash_size", KSTAT_DATA_UINT64 }, 8957c478bd9Sstevel@tonic-gate { "hash_lookup_depth", KSTAT_DATA_UINT64 }, 8967c478bd9Sstevel@tonic-gate { "hash_rescale", KSTAT_DATA_UINT64 }, 8977c478bd9Sstevel@tonic-gate { "full_magazines", KSTAT_DATA_UINT64 }, 8987c478bd9Sstevel@tonic-gate { "empty_magazines", KSTAT_DATA_UINT64 }, 8997c478bd9Sstevel@tonic-gate { "magazine_size", KSTAT_DATA_UINT64 }, 900*b5fca8f8Stomee { "move_callbacks", KSTAT_DATA_UINT64 }, 901*b5fca8f8Stomee { "move_yes", KSTAT_DATA_UINT64 }, 902*b5fca8f8Stomee { "move_no", KSTAT_DATA_UINT64 }, 903*b5fca8f8Stomee { "move_later", KSTAT_DATA_UINT64 }, 904*b5fca8f8Stomee { "move_dont_need", KSTAT_DATA_UINT64 }, 905*b5fca8f8Stomee { "move_dont_know", KSTAT_DATA_UINT64 }, 906*b5fca8f8Stomee { "move_hunt_found", KSTAT_DATA_UINT64 }, 9077c478bd9Sstevel@tonic-gate }; 9087c478bd9Sstevel@tonic-gate 9097c478bd9Sstevel@tonic-gate static kmutex_t kmem_cache_kstat_lock; 9107c478bd9Sstevel@tonic-gate 9117c478bd9Sstevel@tonic-gate /* 9127c478bd9Sstevel@tonic-gate * The default set of caches to back kmem_alloc(). 9137c478bd9Sstevel@tonic-gate * These sizes should be reevaluated periodically. 9147c478bd9Sstevel@tonic-gate * 9157c478bd9Sstevel@tonic-gate * We want allocations that are multiples of the coherency granularity 9167c478bd9Sstevel@tonic-gate * (64 bytes) to be satisfied from a cache which is a multiple of 64 9177c478bd9Sstevel@tonic-gate * bytes, so that it will be 64-byte aligned. For all multiples of 64, 9187c478bd9Sstevel@tonic-gate * the next kmem_cache_size greater than or equal to it must be a 9197c478bd9Sstevel@tonic-gate * multiple of 64. 9207c478bd9Sstevel@tonic-gate */ 9217c478bd9Sstevel@tonic-gate static const int kmem_alloc_sizes[] = { 9227c478bd9Sstevel@tonic-gate 1 * 8, 9237c478bd9Sstevel@tonic-gate 2 * 8, 9247c478bd9Sstevel@tonic-gate 3 * 8, 9257c478bd9Sstevel@tonic-gate 4 * 8, 5 * 8, 6 * 8, 7 * 8, 9267c478bd9Sstevel@tonic-gate 4 * 16, 5 * 16, 6 * 16, 7 * 16, 9277c478bd9Sstevel@tonic-gate 4 * 32, 5 * 32, 6 * 32, 7 * 32, 9287c478bd9Sstevel@tonic-gate 4 * 64, 5 * 64, 6 * 64, 7 * 64, 9297c478bd9Sstevel@tonic-gate 4 * 128, 5 * 128, 6 * 128, 7 * 128, 9307c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 7, 64), 9317c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 6, 64), 9327c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 5, 64), 9337c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 4, 64), 9347c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 3, 64), 9357c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 2, 64), 9367c478bd9Sstevel@tonic-gate P2ALIGN(8192 / 1, 64), 9377c478bd9Sstevel@tonic-gate 4096 * 3, 9387c478bd9Sstevel@tonic-gate 8192 * 2, 939ad23a2dbSjohansen 8192 * 3, 940ad23a2dbSjohansen 8192 * 4, 9417c478bd9Sstevel@tonic-gate }; 9427c478bd9Sstevel@tonic-gate 943ad23a2dbSjohansen #define KMEM_MAXBUF 32768 9447c478bd9Sstevel@tonic-gate 9457c478bd9Sstevel@tonic-gate static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT]; 9467c478bd9Sstevel@tonic-gate 9477c478bd9Sstevel@tonic-gate static kmem_magtype_t kmem_magtype[] = { 9487c478bd9Sstevel@tonic-gate { 1, 8, 3200, 65536 }, 9497c478bd9Sstevel@tonic-gate { 3, 16, 256, 32768 }, 9507c478bd9Sstevel@tonic-gate { 7, 32, 64, 16384 }, 9517c478bd9Sstevel@tonic-gate { 15, 64, 0, 8192 }, 9527c478bd9Sstevel@tonic-gate { 31, 64, 0, 4096 }, 9537c478bd9Sstevel@tonic-gate { 47, 64, 0, 2048 }, 9547c478bd9Sstevel@tonic-gate { 63, 64, 0, 1024 }, 9557c478bd9Sstevel@tonic-gate { 95, 64, 0, 512 }, 9567c478bd9Sstevel@tonic-gate { 143, 64, 0, 0 }, 9577c478bd9Sstevel@tonic-gate }; 9587c478bd9Sstevel@tonic-gate 9597c478bd9Sstevel@tonic-gate static uint32_t kmem_reaping; 9607c478bd9Sstevel@tonic-gate static uint32_t kmem_reaping_idspace; 9617c478bd9Sstevel@tonic-gate 9627c478bd9Sstevel@tonic-gate /* 9637c478bd9Sstevel@tonic-gate * kmem tunables 9647c478bd9Sstevel@tonic-gate */ 9657c478bd9Sstevel@tonic-gate clock_t kmem_reap_interval; /* cache reaping rate [15 * HZ ticks] */ 9667c478bd9Sstevel@tonic-gate int kmem_depot_contention = 3; /* max failed tryenters per real interval */ 9677c478bd9Sstevel@tonic-gate pgcnt_t kmem_reapahead = 0; /* start reaping N pages before pageout */ 9687c478bd9Sstevel@tonic-gate int kmem_panic = 1; /* whether to panic on error */ 9697c478bd9Sstevel@tonic-gate int kmem_logging = 1; /* kmem_log_enter() override */ 9707c478bd9Sstevel@tonic-gate uint32_t kmem_mtbf = 0; /* mean time between failures [default: off] */ 9717c478bd9Sstevel@tonic-gate size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */ 9727c478bd9Sstevel@tonic-gate size_t kmem_content_log_size; /* content log size [2% of memory] */ 9737c478bd9Sstevel@tonic-gate size_t kmem_failure_log_size; /* failure log [4 pages per CPU] */ 9747c478bd9Sstevel@tonic-gate size_t kmem_slab_log_size; /* slab create log [4 pages per CPU] */ 9757c478bd9Sstevel@tonic-gate size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */ 9767c478bd9Sstevel@tonic-gate size_t kmem_lite_minsize = 0; /* minimum buffer size for KMF_LITE */ 9777c478bd9Sstevel@tonic-gate size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */ 9787c478bd9Sstevel@tonic-gate int kmem_lite_pcs = 4; /* number of PCs to store in KMF_LITE mode */ 9797c478bd9Sstevel@tonic-gate size_t kmem_maxverify; /* maximum bytes to inspect in debug routines */ 9807c478bd9Sstevel@tonic-gate size_t kmem_minfirewall; /* hardware-enforced redzone threshold */ 9817c478bd9Sstevel@tonic-gate 9827c478bd9Sstevel@tonic-gate #ifdef DEBUG 9837c478bd9Sstevel@tonic-gate int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS; 9847c478bd9Sstevel@tonic-gate #else 9857c478bd9Sstevel@tonic-gate int kmem_flags = 0; 9867c478bd9Sstevel@tonic-gate #endif 9877c478bd9Sstevel@tonic-gate int kmem_ready; 9887c478bd9Sstevel@tonic-gate 9897c478bd9Sstevel@tonic-gate static kmem_cache_t *kmem_slab_cache; 9907c478bd9Sstevel@tonic-gate static kmem_cache_t *kmem_bufctl_cache; 9917c478bd9Sstevel@tonic-gate static kmem_cache_t *kmem_bufctl_audit_cache; 9927c478bd9Sstevel@tonic-gate 9937c478bd9Sstevel@tonic-gate static kmutex_t kmem_cache_lock; /* inter-cache linkage only */ 994*b5fca8f8Stomee static list_t kmem_caches; 9957c478bd9Sstevel@tonic-gate 9967c478bd9Sstevel@tonic-gate static taskq_t *kmem_taskq; 9977c478bd9Sstevel@tonic-gate static kmutex_t kmem_flags_lock; 9987c478bd9Sstevel@tonic-gate static vmem_t *kmem_metadata_arena; 9997c478bd9Sstevel@tonic-gate static vmem_t *kmem_msb_arena; /* arena for metadata caches */ 10007c478bd9Sstevel@tonic-gate static vmem_t *kmem_cache_arena; 10017c478bd9Sstevel@tonic-gate static vmem_t *kmem_hash_arena; 10027c478bd9Sstevel@tonic-gate static vmem_t *kmem_log_arena; 10037c478bd9Sstevel@tonic-gate static vmem_t *kmem_oversize_arena; 10047c478bd9Sstevel@tonic-gate static vmem_t *kmem_va_arena; 10057c478bd9Sstevel@tonic-gate static vmem_t *kmem_default_arena; 10067c478bd9Sstevel@tonic-gate static vmem_t *kmem_firewall_va_arena; 10077c478bd9Sstevel@tonic-gate static vmem_t *kmem_firewall_arena; 10087c478bd9Sstevel@tonic-gate 1009*b5fca8f8Stomee /* 1010*b5fca8f8Stomee * Define KMEM_STATS to turn on statistic gathering. By default, it is only 1011*b5fca8f8Stomee * turned on when DEBUG is also defined. 1012*b5fca8f8Stomee */ 1013*b5fca8f8Stomee #ifdef DEBUG 1014*b5fca8f8Stomee #define KMEM_STATS 1015*b5fca8f8Stomee #endif /* DEBUG */ 1016*b5fca8f8Stomee 1017*b5fca8f8Stomee #ifdef KMEM_STATS 1018*b5fca8f8Stomee #define KMEM_STAT_ADD(stat) ((stat)++) 1019*b5fca8f8Stomee #define KMEM_STAT_COND_ADD(cond, stat) ((void) (!(cond) || (stat)++)) 1020*b5fca8f8Stomee #else 1021*b5fca8f8Stomee #define KMEM_STAT_ADD(stat) /* nothing */ 1022*b5fca8f8Stomee #define KMEM_STAT_COND_ADD(cond, stat) /* nothing */ 1023*b5fca8f8Stomee #endif /* KMEM_STATS */ 1024*b5fca8f8Stomee 1025*b5fca8f8Stomee /* 1026*b5fca8f8Stomee * kmem slab consolidator thresholds (tunables) 1027*b5fca8f8Stomee */ 1028*b5fca8f8Stomee static size_t kmem_frag_minslabs = 101; /* minimum total slabs */ 1029*b5fca8f8Stomee static size_t kmem_frag_numer = 1; /* free buffers (numerator) */ 1030*b5fca8f8Stomee static size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */ 1031*b5fca8f8Stomee /* 1032*b5fca8f8Stomee * Maximum number of slabs from which to move buffers during a single 1033*b5fca8f8Stomee * maintenance interval while the system is not low on memory. 1034*b5fca8f8Stomee */ 1035*b5fca8f8Stomee static size_t kmem_reclaim_max_slabs = 1; 1036*b5fca8f8Stomee /* 1037*b5fca8f8Stomee * Number of slabs to scan backwards from the end of the partial slab list 1038*b5fca8f8Stomee * when searching for buffers to relocate. 1039*b5fca8f8Stomee */ 1040*b5fca8f8Stomee static size_t kmem_reclaim_scan_range = 12; 1041*b5fca8f8Stomee 1042*b5fca8f8Stomee #ifdef KMEM_STATS 1043*b5fca8f8Stomee static struct { 1044*b5fca8f8Stomee uint64_t kms_callbacks; 1045*b5fca8f8Stomee uint64_t kms_yes; 1046*b5fca8f8Stomee uint64_t kms_no; 1047*b5fca8f8Stomee uint64_t kms_later; 1048*b5fca8f8Stomee uint64_t kms_dont_need; 1049*b5fca8f8Stomee uint64_t kms_dont_know; 1050*b5fca8f8Stomee uint64_t kms_hunt_found_slab; 1051*b5fca8f8Stomee uint64_t kms_hunt_found_mag; 1052*b5fca8f8Stomee uint64_t kms_hunt_notfound; 1053*b5fca8f8Stomee uint64_t kms_hunt_alloc_fail; 1054*b5fca8f8Stomee uint64_t kms_hunt_lucky; 1055*b5fca8f8Stomee uint64_t kms_notify; 1056*b5fca8f8Stomee uint64_t kms_notify_callbacks; 1057*b5fca8f8Stomee uint64_t kms_disbelief; 1058*b5fca8f8Stomee uint64_t kms_already_pending; 1059*b5fca8f8Stomee uint64_t kms_callback_alloc_fail; 1060*b5fca8f8Stomee uint64_t kms_endscan_slab_destroyed; 1061*b5fca8f8Stomee uint64_t kms_endscan_nomem; 1062*b5fca8f8Stomee uint64_t kms_endscan_slab_all_used; 1063*b5fca8f8Stomee uint64_t kms_endscan_refcnt_changed; 1064*b5fca8f8Stomee uint64_t kms_endscan_nomove_changed; 1065*b5fca8f8Stomee uint64_t kms_endscan_freelist; 1066*b5fca8f8Stomee uint64_t kms_avl_update; 1067*b5fca8f8Stomee uint64_t kms_avl_noupdate; 1068*b5fca8f8Stomee uint64_t kms_no_longer_reclaimable; 1069*b5fca8f8Stomee uint64_t kms_notify_no_longer_reclaimable; 1070*b5fca8f8Stomee uint64_t kms_alloc_fail; 1071*b5fca8f8Stomee uint64_t kms_constructor_fail; 1072*b5fca8f8Stomee uint64_t kms_dead_slabs_freed; 1073*b5fca8f8Stomee uint64_t kms_defrags; 1074*b5fca8f8Stomee uint64_t kms_scan_depot_ws_reaps; 1075*b5fca8f8Stomee uint64_t kms_debug_reaps; 1076*b5fca8f8Stomee uint64_t kms_debug_move_scans; 1077*b5fca8f8Stomee } kmem_move_stats; 1078*b5fca8f8Stomee #endif /* KMEM_STATS */ 1079*b5fca8f8Stomee 1080*b5fca8f8Stomee /* consolidator knobs */ 1081*b5fca8f8Stomee static boolean_t kmem_move_noreap; 1082*b5fca8f8Stomee static boolean_t kmem_move_blocked; 1083*b5fca8f8Stomee static boolean_t kmem_move_fulltilt; 1084*b5fca8f8Stomee static boolean_t kmem_move_any_partial; 1085*b5fca8f8Stomee 1086*b5fca8f8Stomee #ifdef DEBUG 1087*b5fca8f8Stomee /* 1088*b5fca8f8Stomee * Ensure code coverage by occasionally running the consolidator even when the 1089*b5fca8f8Stomee * caches are not fragmented (they may never be). These intervals are mean time 1090*b5fca8f8Stomee * in cache maintenance intervals (kmem_cache_update). 1091*b5fca8f8Stomee */ 1092*b5fca8f8Stomee static int kmem_mtb_move = 60; /* defrag 1 slab (~15min) */ 1093*b5fca8f8Stomee static int kmem_mtb_reap = 1800; /* defrag all slabs (~7.5hrs) */ 1094*b5fca8f8Stomee #endif /* DEBUG */ 1095*b5fca8f8Stomee 1096*b5fca8f8Stomee static kmem_cache_t *kmem_defrag_cache; 1097*b5fca8f8Stomee static kmem_cache_t *kmem_move_cache; 1098*b5fca8f8Stomee static taskq_t *kmem_move_taskq; 1099*b5fca8f8Stomee 1100*b5fca8f8Stomee static void kmem_cache_scan(kmem_cache_t *); 1101*b5fca8f8Stomee static void kmem_cache_defrag(kmem_cache_t *); 1102*b5fca8f8Stomee 1103*b5fca8f8Stomee 11047c478bd9Sstevel@tonic-gate kmem_log_header_t *kmem_transaction_log; 11057c478bd9Sstevel@tonic-gate kmem_log_header_t *kmem_content_log; 11067c478bd9Sstevel@tonic-gate kmem_log_header_t *kmem_failure_log; 11077c478bd9Sstevel@tonic-gate kmem_log_header_t *kmem_slab_log; 11087c478bd9Sstevel@tonic-gate 11097c478bd9Sstevel@tonic-gate static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */ 11107c478bd9Sstevel@tonic-gate 11117c478bd9Sstevel@tonic-gate #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \ 11127c478bd9Sstevel@tonic-gate if ((count) > 0) { \ 11137c478bd9Sstevel@tonic-gate pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \ 11147c478bd9Sstevel@tonic-gate pc_t *_e; \ 11157c478bd9Sstevel@tonic-gate /* memmove() the old entries down one notch */ \ 11167c478bd9Sstevel@tonic-gate for (_e = &_s[(count) - 1]; _e > _s; _e--) \ 11177c478bd9Sstevel@tonic-gate *_e = *(_e - 1); \ 11187c478bd9Sstevel@tonic-gate *_s = (uintptr_t)(caller); \ 11197c478bd9Sstevel@tonic-gate } 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate #define KMERR_MODIFIED 0 /* buffer modified while on freelist */ 11227c478bd9Sstevel@tonic-gate #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */ 11237c478bd9Sstevel@tonic-gate #define KMERR_DUPFREE 2 /* freed a buffer twice */ 11247c478bd9Sstevel@tonic-gate #define KMERR_BADADDR 3 /* freed a bad (unallocated) address */ 11257c478bd9Sstevel@tonic-gate #define KMERR_BADBUFTAG 4 /* buftag corrupted */ 11267c478bd9Sstevel@tonic-gate #define KMERR_BADBUFCTL 5 /* bufctl corrupted */ 11277c478bd9Sstevel@tonic-gate #define KMERR_BADCACHE 6 /* freed a buffer to the wrong cache */ 11287c478bd9Sstevel@tonic-gate #define KMERR_BADSIZE 7 /* alloc size != free size */ 11297c478bd9Sstevel@tonic-gate #define KMERR_BADBASE 8 /* buffer base address wrong */ 11307c478bd9Sstevel@tonic-gate 11317c478bd9Sstevel@tonic-gate struct { 11327c478bd9Sstevel@tonic-gate hrtime_t kmp_timestamp; /* timestamp of panic */ 11337c478bd9Sstevel@tonic-gate int kmp_error; /* type of kmem error */ 11347c478bd9Sstevel@tonic-gate void *kmp_buffer; /* buffer that induced panic */ 11357c478bd9Sstevel@tonic-gate void *kmp_realbuf; /* real start address for buffer */ 11367c478bd9Sstevel@tonic-gate kmem_cache_t *kmp_cache; /* buffer's cache according to client */ 11377c478bd9Sstevel@tonic-gate kmem_cache_t *kmp_realcache; /* actual cache containing buffer */ 11387c478bd9Sstevel@tonic-gate kmem_slab_t *kmp_slab; /* slab accoring to kmem_findslab() */ 11397c478bd9Sstevel@tonic-gate kmem_bufctl_t *kmp_bufctl; /* bufctl */ 11407c478bd9Sstevel@tonic-gate } kmem_panic_info; 11417c478bd9Sstevel@tonic-gate 11427c478bd9Sstevel@tonic-gate 11437c478bd9Sstevel@tonic-gate static void 11447c478bd9Sstevel@tonic-gate copy_pattern(uint64_t pattern, void *buf_arg, size_t size) 11457c478bd9Sstevel@tonic-gate { 11467c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 11477c478bd9Sstevel@tonic-gate uint64_t *buf = buf_arg; 11487c478bd9Sstevel@tonic-gate 11497c478bd9Sstevel@tonic-gate while (buf < bufend) 11507c478bd9Sstevel@tonic-gate *buf++ = pattern; 11517c478bd9Sstevel@tonic-gate } 11527c478bd9Sstevel@tonic-gate 11537c478bd9Sstevel@tonic-gate static void * 11547c478bd9Sstevel@tonic-gate verify_pattern(uint64_t pattern, void *buf_arg, size_t size) 11557c478bd9Sstevel@tonic-gate { 11567c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 11577c478bd9Sstevel@tonic-gate uint64_t *buf; 11587c478bd9Sstevel@tonic-gate 11597c478bd9Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) 11607c478bd9Sstevel@tonic-gate if (*buf != pattern) 11617c478bd9Sstevel@tonic-gate return (buf); 11627c478bd9Sstevel@tonic-gate return (NULL); 11637c478bd9Sstevel@tonic-gate } 11647c478bd9Sstevel@tonic-gate 11657c478bd9Sstevel@tonic-gate static void * 11667c478bd9Sstevel@tonic-gate verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size) 11677c478bd9Sstevel@tonic-gate { 11687c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 11697c478bd9Sstevel@tonic-gate uint64_t *buf; 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++) { 11727c478bd9Sstevel@tonic-gate if (*buf != old) { 11737c478bd9Sstevel@tonic-gate copy_pattern(old, buf_arg, 11749f1b636aStomee (char *)buf - (char *)buf_arg); 11757c478bd9Sstevel@tonic-gate return (buf); 11767c478bd9Sstevel@tonic-gate } 11777c478bd9Sstevel@tonic-gate *buf = new; 11787c478bd9Sstevel@tonic-gate } 11797c478bd9Sstevel@tonic-gate 11807c478bd9Sstevel@tonic-gate return (NULL); 11817c478bd9Sstevel@tonic-gate } 11827c478bd9Sstevel@tonic-gate 11837c478bd9Sstevel@tonic-gate static void 11847c478bd9Sstevel@tonic-gate kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag) 11857c478bd9Sstevel@tonic-gate { 11867c478bd9Sstevel@tonic-gate kmem_cache_t *cp; 11877c478bd9Sstevel@tonic-gate 11887c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_lock); 1189*b5fca8f8Stomee for (cp = list_head(&kmem_caches); cp != NULL; 1190*b5fca8f8Stomee cp = list_next(&kmem_caches, cp)) 11917c478bd9Sstevel@tonic-gate if (tq != NULL) 11927c478bd9Sstevel@tonic-gate (void) taskq_dispatch(tq, (task_func_t *)func, cp, 11937c478bd9Sstevel@tonic-gate tqflag); 11947c478bd9Sstevel@tonic-gate else 11957c478bd9Sstevel@tonic-gate func(cp); 11967c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_lock); 11977c478bd9Sstevel@tonic-gate } 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate static void 12007c478bd9Sstevel@tonic-gate kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag) 12017c478bd9Sstevel@tonic-gate { 12027c478bd9Sstevel@tonic-gate kmem_cache_t *cp; 12037c478bd9Sstevel@tonic-gate 12047c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_lock); 1205*b5fca8f8Stomee for (cp = list_head(&kmem_caches); cp != NULL; 1206*b5fca8f8Stomee cp = list_next(&kmem_caches, cp)) { 12077c478bd9Sstevel@tonic-gate if (!(cp->cache_cflags & KMC_IDENTIFIER)) 12087c478bd9Sstevel@tonic-gate continue; 12097c478bd9Sstevel@tonic-gate if (tq != NULL) 12107c478bd9Sstevel@tonic-gate (void) taskq_dispatch(tq, (task_func_t *)func, cp, 12117c478bd9Sstevel@tonic-gate tqflag); 12127c478bd9Sstevel@tonic-gate else 12137c478bd9Sstevel@tonic-gate func(cp); 12147c478bd9Sstevel@tonic-gate } 12157c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_lock); 12167c478bd9Sstevel@tonic-gate } 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate /* 12197c478bd9Sstevel@tonic-gate * Debugging support. Given a buffer address, find its slab. 12207c478bd9Sstevel@tonic-gate */ 12217c478bd9Sstevel@tonic-gate static kmem_slab_t * 12227c478bd9Sstevel@tonic-gate kmem_findslab(kmem_cache_t *cp, void *buf) 12237c478bd9Sstevel@tonic-gate { 12247c478bd9Sstevel@tonic-gate kmem_slab_t *sp; 12257c478bd9Sstevel@tonic-gate 12267c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock); 1227*b5fca8f8Stomee for (sp = list_head(&cp->cache_complete_slabs); sp != NULL; 1228*b5fca8f8Stomee sp = list_next(&cp->cache_complete_slabs, sp)) { 1229*b5fca8f8Stomee if (KMEM_SLAB_MEMBER(sp, buf)) { 1230*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 1231*b5fca8f8Stomee return (sp); 1232*b5fca8f8Stomee } 1233*b5fca8f8Stomee } 1234*b5fca8f8Stomee for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL; 1235*b5fca8f8Stomee sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) { 12367c478bd9Sstevel@tonic-gate if (KMEM_SLAB_MEMBER(sp, buf)) { 12377c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 12387c478bd9Sstevel@tonic-gate return (sp); 12397c478bd9Sstevel@tonic-gate } 12407c478bd9Sstevel@tonic-gate } 12417c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 12427c478bd9Sstevel@tonic-gate 12437c478bd9Sstevel@tonic-gate return (NULL); 12447c478bd9Sstevel@tonic-gate } 12457c478bd9Sstevel@tonic-gate 12467c478bd9Sstevel@tonic-gate static void 12477c478bd9Sstevel@tonic-gate kmem_error(int error, kmem_cache_t *cparg, void *bufarg) 12487c478bd9Sstevel@tonic-gate { 12497c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = NULL; 12507c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp = NULL; 12517c478bd9Sstevel@tonic-gate kmem_cache_t *cp = cparg; 12527c478bd9Sstevel@tonic-gate kmem_slab_t *sp; 12537c478bd9Sstevel@tonic-gate uint64_t *off; 12547c478bd9Sstevel@tonic-gate void *buf = bufarg; 12557c478bd9Sstevel@tonic-gate 12567c478bd9Sstevel@tonic-gate kmem_logging = 0; /* stop logging when a bad thing happens */ 12577c478bd9Sstevel@tonic-gate 12587c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_timestamp = gethrtime(); 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate sp = kmem_findslab(cp, buf); 12617c478bd9Sstevel@tonic-gate if (sp == NULL) { 1262*b5fca8f8Stomee for (cp = list_tail(&kmem_caches); cp != NULL; 1263*b5fca8f8Stomee cp = list_prev(&kmem_caches, cp)) { 12647c478bd9Sstevel@tonic-gate if ((sp = kmem_findslab(cp, buf)) != NULL) 12657c478bd9Sstevel@tonic-gate break; 12667c478bd9Sstevel@tonic-gate } 12677c478bd9Sstevel@tonic-gate } 12687c478bd9Sstevel@tonic-gate 12697c478bd9Sstevel@tonic-gate if (sp == NULL) { 12707c478bd9Sstevel@tonic-gate cp = NULL; 12717c478bd9Sstevel@tonic-gate error = KMERR_BADADDR; 12727c478bd9Sstevel@tonic-gate } else { 12737c478bd9Sstevel@tonic-gate if (cp != cparg) 12747c478bd9Sstevel@tonic-gate error = KMERR_BADCACHE; 12757c478bd9Sstevel@tonic-gate else 12767c478bd9Sstevel@tonic-gate buf = (char *)bufarg - ((uintptr_t)bufarg - 12777c478bd9Sstevel@tonic-gate (uintptr_t)sp->slab_base) % cp->cache_chunksize; 12787c478bd9Sstevel@tonic-gate if (buf != bufarg) 12797c478bd9Sstevel@tonic-gate error = KMERR_BADBASE; 12807c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) 12817c478bd9Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf); 12827c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 12837c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock); 12847c478bd9Sstevel@tonic-gate for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) 12857c478bd9Sstevel@tonic-gate if (bcp->bc_addr == buf) 12867c478bd9Sstevel@tonic-gate break; 12877c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 12887c478bd9Sstevel@tonic-gate if (bcp == NULL && btp != NULL) 12897c478bd9Sstevel@tonic-gate bcp = btp->bt_bufctl; 12907c478bd9Sstevel@tonic-gate if (kmem_findslab(cp->cache_bufctl_cache, bcp) == 12917c478bd9Sstevel@tonic-gate NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) || 12927c478bd9Sstevel@tonic-gate bcp->bc_addr != buf) { 12937c478bd9Sstevel@tonic-gate error = KMERR_BADBUFCTL; 12947c478bd9Sstevel@tonic-gate bcp = NULL; 12957c478bd9Sstevel@tonic-gate } 12967c478bd9Sstevel@tonic-gate } 12977c478bd9Sstevel@tonic-gate } 12987c478bd9Sstevel@tonic-gate 12997c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_error = error; 13007c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_buffer = bufarg; 13017c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_realbuf = buf; 13027c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_cache = cparg; 13037c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_realcache = cp; 13047c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_slab = sp; 13057c478bd9Sstevel@tonic-gate kmem_panic_info.kmp_bufctl = bcp; 13067c478bd9Sstevel@tonic-gate 13077c478bd9Sstevel@tonic-gate printf("kernel memory allocator: "); 13087c478bd9Sstevel@tonic-gate 13097c478bd9Sstevel@tonic-gate switch (error) { 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate case KMERR_MODIFIED: 13127c478bd9Sstevel@tonic-gate printf("buffer modified after being freed\n"); 13137c478bd9Sstevel@tonic-gate off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); 13147c478bd9Sstevel@tonic-gate if (off == NULL) /* shouldn't happen */ 13157c478bd9Sstevel@tonic-gate off = buf; 13167c478bd9Sstevel@tonic-gate printf("modification occurred at offset 0x%lx " 13177c478bd9Sstevel@tonic-gate "(0x%llx replaced by 0x%llx)\n", 13187c478bd9Sstevel@tonic-gate (uintptr_t)off - (uintptr_t)buf, 13197c478bd9Sstevel@tonic-gate (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off); 13207c478bd9Sstevel@tonic-gate break; 13217c478bd9Sstevel@tonic-gate 13227c478bd9Sstevel@tonic-gate case KMERR_REDZONE: 13237c478bd9Sstevel@tonic-gate printf("redzone violation: write past end of buffer\n"); 13247c478bd9Sstevel@tonic-gate break; 13257c478bd9Sstevel@tonic-gate 13267c478bd9Sstevel@tonic-gate case KMERR_BADADDR: 13277c478bd9Sstevel@tonic-gate printf("invalid free: buffer not in cache\n"); 13287c478bd9Sstevel@tonic-gate break; 13297c478bd9Sstevel@tonic-gate 13307c478bd9Sstevel@tonic-gate case KMERR_DUPFREE: 13317c478bd9Sstevel@tonic-gate printf("duplicate free: buffer freed twice\n"); 13327c478bd9Sstevel@tonic-gate break; 13337c478bd9Sstevel@tonic-gate 13347c478bd9Sstevel@tonic-gate case KMERR_BADBUFTAG: 13357c478bd9Sstevel@tonic-gate printf("boundary tag corrupted\n"); 13367c478bd9Sstevel@tonic-gate printf("bcp ^ bxstat = %lx, should be %lx\n", 13377c478bd9Sstevel@tonic-gate (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat, 13387c478bd9Sstevel@tonic-gate KMEM_BUFTAG_FREE); 13397c478bd9Sstevel@tonic-gate break; 13407c478bd9Sstevel@tonic-gate 13417c478bd9Sstevel@tonic-gate case KMERR_BADBUFCTL: 13427c478bd9Sstevel@tonic-gate printf("bufctl corrupted\n"); 13437c478bd9Sstevel@tonic-gate break; 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate case KMERR_BADCACHE: 13467c478bd9Sstevel@tonic-gate printf("buffer freed to wrong cache\n"); 13477c478bd9Sstevel@tonic-gate printf("buffer was allocated from %s,\n", cp->cache_name); 13487c478bd9Sstevel@tonic-gate printf("caller attempting free to %s.\n", cparg->cache_name); 13497c478bd9Sstevel@tonic-gate break; 13507c478bd9Sstevel@tonic-gate 13517c478bd9Sstevel@tonic-gate case KMERR_BADSIZE: 13527c478bd9Sstevel@tonic-gate printf("bad free: free size (%u) != alloc size (%u)\n", 13537c478bd9Sstevel@tonic-gate KMEM_SIZE_DECODE(((uint32_t *)btp)[0]), 13547c478bd9Sstevel@tonic-gate KMEM_SIZE_DECODE(((uint32_t *)btp)[1])); 13557c478bd9Sstevel@tonic-gate break; 13567c478bd9Sstevel@tonic-gate 13577c478bd9Sstevel@tonic-gate case KMERR_BADBASE: 13587c478bd9Sstevel@tonic-gate printf("bad free: free address (%p) != alloc address (%p)\n", 13597c478bd9Sstevel@tonic-gate bufarg, buf); 13607c478bd9Sstevel@tonic-gate break; 13617c478bd9Sstevel@tonic-gate } 13627c478bd9Sstevel@tonic-gate 13637c478bd9Sstevel@tonic-gate printf("buffer=%p bufctl=%p cache: %s\n", 13647c478bd9Sstevel@tonic-gate bufarg, (void *)bcp, cparg->cache_name); 13657c478bd9Sstevel@tonic-gate 13667c478bd9Sstevel@tonic-gate if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) && 13677c478bd9Sstevel@tonic-gate error != KMERR_BADBUFCTL) { 13687c478bd9Sstevel@tonic-gate int d; 13697c478bd9Sstevel@tonic-gate timestruc_t ts; 13707c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp; 13717c478bd9Sstevel@tonic-gate 13727c478bd9Sstevel@tonic-gate hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts); 13737c478bd9Sstevel@tonic-gate printf("previous transaction on buffer %p:\n", buf); 13747c478bd9Sstevel@tonic-gate printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n", 13757c478bd9Sstevel@tonic-gate (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec, 13767c478bd9Sstevel@tonic-gate (void *)sp, cp->cache_name); 13777c478bd9Sstevel@tonic-gate for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) { 13787c478bd9Sstevel@tonic-gate ulong_t off; 13797c478bd9Sstevel@tonic-gate char *sym = kobj_getsymname(bcap->bc_stack[d], &off); 13807c478bd9Sstevel@tonic-gate printf("%s+%lx\n", sym ? sym : "?", off); 13817c478bd9Sstevel@tonic-gate } 13827c478bd9Sstevel@tonic-gate } 13837c478bd9Sstevel@tonic-gate if (kmem_panic > 0) 13847c478bd9Sstevel@tonic-gate panic("kernel heap corruption detected"); 13857c478bd9Sstevel@tonic-gate if (kmem_panic == 0) 13867c478bd9Sstevel@tonic-gate debug_enter(NULL); 13877c478bd9Sstevel@tonic-gate kmem_logging = 1; /* resume logging */ 13887c478bd9Sstevel@tonic-gate } 13897c478bd9Sstevel@tonic-gate 13907c478bd9Sstevel@tonic-gate static kmem_log_header_t * 13917c478bd9Sstevel@tonic-gate kmem_log_init(size_t logsize) 13927c478bd9Sstevel@tonic-gate { 13937c478bd9Sstevel@tonic-gate kmem_log_header_t *lhp; 13947c478bd9Sstevel@tonic-gate int nchunks = 4 * max_ncpus; 13957c478bd9Sstevel@tonic-gate size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus]; 13967c478bd9Sstevel@tonic-gate int i; 13977c478bd9Sstevel@tonic-gate 13987c478bd9Sstevel@tonic-gate /* 13997c478bd9Sstevel@tonic-gate * Make sure that lhp->lh_cpu[] is nicely aligned 14007c478bd9Sstevel@tonic-gate * to prevent false sharing of cache lines. 14017c478bd9Sstevel@tonic-gate */ 14027c478bd9Sstevel@tonic-gate lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN); 14037c478bd9Sstevel@tonic-gate lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0, 14047c478bd9Sstevel@tonic-gate NULL, NULL, VM_SLEEP); 14057c478bd9Sstevel@tonic-gate bzero(lhp, lhsize); 14067c478bd9Sstevel@tonic-gate 14077c478bd9Sstevel@tonic-gate mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL); 14087c478bd9Sstevel@tonic-gate lhp->lh_nchunks = nchunks; 14097c478bd9Sstevel@tonic-gate lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE); 14107c478bd9Sstevel@tonic-gate lhp->lh_base = vmem_alloc(kmem_log_arena, 14117c478bd9Sstevel@tonic-gate lhp->lh_chunksize * nchunks, VM_SLEEP); 14127c478bd9Sstevel@tonic-gate lhp->lh_free = vmem_alloc(kmem_log_arena, 14137c478bd9Sstevel@tonic-gate nchunks * sizeof (int), VM_SLEEP); 14147c478bd9Sstevel@tonic-gate bzero(lhp->lh_base, lhp->lh_chunksize * nchunks); 14157c478bd9Sstevel@tonic-gate 14167c478bd9Sstevel@tonic-gate for (i = 0; i < max_ncpus; i++) { 14177c478bd9Sstevel@tonic-gate kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i]; 14187c478bd9Sstevel@tonic-gate mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL); 14197c478bd9Sstevel@tonic-gate clhp->clh_chunk = i; 14207c478bd9Sstevel@tonic-gate } 14217c478bd9Sstevel@tonic-gate 14227c478bd9Sstevel@tonic-gate for (i = max_ncpus; i < nchunks; i++) 14237c478bd9Sstevel@tonic-gate lhp->lh_free[i] = i; 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate lhp->lh_head = max_ncpus; 14267c478bd9Sstevel@tonic-gate lhp->lh_tail = 0; 14277c478bd9Sstevel@tonic-gate 14287c478bd9Sstevel@tonic-gate return (lhp); 14297c478bd9Sstevel@tonic-gate } 14307c478bd9Sstevel@tonic-gate 14317c478bd9Sstevel@tonic-gate static void * 14327c478bd9Sstevel@tonic-gate kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size) 14337c478bd9Sstevel@tonic-gate { 14347c478bd9Sstevel@tonic-gate void *logspace; 14357c478bd9Sstevel@tonic-gate kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid]; 14367c478bd9Sstevel@tonic-gate 14377c478bd9Sstevel@tonic-gate if (lhp == NULL || kmem_logging == 0 || panicstr) 14387c478bd9Sstevel@tonic-gate return (NULL); 14397c478bd9Sstevel@tonic-gate 14407c478bd9Sstevel@tonic-gate mutex_enter(&clhp->clh_lock); 14417c478bd9Sstevel@tonic-gate clhp->clh_hits++; 14427c478bd9Sstevel@tonic-gate if (size > clhp->clh_avail) { 14437c478bd9Sstevel@tonic-gate mutex_enter(&lhp->lh_lock); 14447c478bd9Sstevel@tonic-gate lhp->lh_hits++; 14457c478bd9Sstevel@tonic-gate lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk; 14467c478bd9Sstevel@tonic-gate lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks; 14477c478bd9Sstevel@tonic-gate clhp->clh_chunk = lhp->lh_free[lhp->lh_head]; 14487c478bd9Sstevel@tonic-gate lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks; 14497c478bd9Sstevel@tonic-gate clhp->clh_current = lhp->lh_base + 14509f1b636aStomee clhp->clh_chunk * lhp->lh_chunksize; 14517c478bd9Sstevel@tonic-gate clhp->clh_avail = lhp->lh_chunksize; 14527c478bd9Sstevel@tonic-gate if (size > lhp->lh_chunksize) 14537c478bd9Sstevel@tonic-gate size = lhp->lh_chunksize; 14547c478bd9Sstevel@tonic-gate mutex_exit(&lhp->lh_lock); 14557c478bd9Sstevel@tonic-gate } 14567c478bd9Sstevel@tonic-gate logspace = clhp->clh_current; 14577c478bd9Sstevel@tonic-gate clhp->clh_current += size; 14587c478bd9Sstevel@tonic-gate clhp->clh_avail -= size; 14597c478bd9Sstevel@tonic-gate bcopy(data, logspace, size); 14607c478bd9Sstevel@tonic-gate mutex_exit(&clhp->clh_lock); 14617c478bd9Sstevel@tonic-gate return (logspace); 14627c478bd9Sstevel@tonic-gate } 14637c478bd9Sstevel@tonic-gate 14647c478bd9Sstevel@tonic-gate #define KMEM_AUDIT(lp, cp, bcp) \ 14657c478bd9Sstevel@tonic-gate { \ 14667c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \ 14677c478bd9Sstevel@tonic-gate _bcp->bc_timestamp = gethrtime(); \ 14687c478bd9Sstevel@tonic-gate _bcp->bc_thread = curthread; \ 14697c478bd9Sstevel@tonic-gate _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \ 14707c478bd9Sstevel@tonic-gate _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \ 14717c478bd9Sstevel@tonic-gate } 14727c478bd9Sstevel@tonic-gate 14737c478bd9Sstevel@tonic-gate static void 14747c478bd9Sstevel@tonic-gate kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp, 14757c478bd9Sstevel@tonic-gate kmem_slab_t *sp, void *addr) 14767c478bd9Sstevel@tonic-gate { 14777c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bca; 14787c478bd9Sstevel@tonic-gate 14797c478bd9Sstevel@tonic-gate bzero(&bca, sizeof (kmem_bufctl_audit_t)); 14807c478bd9Sstevel@tonic-gate bca.bc_addr = addr; 14817c478bd9Sstevel@tonic-gate bca.bc_slab = sp; 14827c478bd9Sstevel@tonic-gate bca.bc_cache = cp; 14837c478bd9Sstevel@tonic-gate KMEM_AUDIT(lp, cp, &bca); 14847c478bd9Sstevel@tonic-gate } 14857c478bd9Sstevel@tonic-gate 14867c478bd9Sstevel@tonic-gate /* 14877c478bd9Sstevel@tonic-gate * Create a new slab for cache cp. 14887c478bd9Sstevel@tonic-gate */ 14897c478bd9Sstevel@tonic-gate static kmem_slab_t * 14907c478bd9Sstevel@tonic-gate kmem_slab_create(kmem_cache_t *cp, int kmflag) 14917c478bd9Sstevel@tonic-gate { 14927c478bd9Sstevel@tonic-gate size_t slabsize = cp->cache_slabsize; 14937c478bd9Sstevel@tonic-gate size_t chunksize = cp->cache_chunksize; 14947c478bd9Sstevel@tonic-gate int cache_flags = cp->cache_flags; 14957c478bd9Sstevel@tonic-gate size_t color, chunks; 14967c478bd9Sstevel@tonic-gate char *buf, *slab; 14977c478bd9Sstevel@tonic-gate kmem_slab_t *sp; 14987c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp; 14997c478bd9Sstevel@tonic-gate vmem_t *vmp = cp->cache_arena; 15007c478bd9Sstevel@tonic-gate 1501*b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 1502*b5fca8f8Stomee 15037c478bd9Sstevel@tonic-gate color = cp->cache_color + cp->cache_align; 15047c478bd9Sstevel@tonic-gate if (color > cp->cache_maxcolor) 15057c478bd9Sstevel@tonic-gate color = cp->cache_mincolor; 15067c478bd9Sstevel@tonic-gate cp->cache_color = color; 15077c478bd9Sstevel@tonic-gate 15087c478bd9Sstevel@tonic-gate slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS); 15097c478bd9Sstevel@tonic-gate 15107c478bd9Sstevel@tonic-gate if (slab == NULL) 15117c478bd9Sstevel@tonic-gate goto vmem_alloc_failure; 15127c478bd9Sstevel@tonic-gate 15137c478bd9Sstevel@tonic-gate ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0); 15147c478bd9Sstevel@tonic-gate 1515*b5fca8f8Stomee /* 1516*b5fca8f8Stomee * Reverify what was already checked in kmem_cache_set_move(), since the 1517*b5fca8f8Stomee * consolidator depends (for correctness) on slabs being initialized 1518*b5fca8f8Stomee * with the 0xbaddcafe memory pattern (setting a low order bit usable by 1519*b5fca8f8Stomee * clients to distinguish uninitialized memory from known objects). 1520*b5fca8f8Stomee */ 1521*b5fca8f8Stomee ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH)); 15227c478bd9Sstevel@tonic-gate if (!(cp->cache_cflags & KMC_NOTOUCH)) 15237c478bd9Sstevel@tonic-gate copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize); 15247c478bd9Sstevel@tonic-gate 15257c478bd9Sstevel@tonic-gate if (cache_flags & KMF_HASH) { 15267c478bd9Sstevel@tonic-gate if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL) 15277c478bd9Sstevel@tonic-gate goto slab_alloc_failure; 15287c478bd9Sstevel@tonic-gate chunks = (slabsize - color) / chunksize; 15297c478bd9Sstevel@tonic-gate } else { 15307c478bd9Sstevel@tonic-gate sp = KMEM_SLAB(cp, slab); 15317c478bd9Sstevel@tonic-gate chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize; 15327c478bd9Sstevel@tonic-gate } 15337c478bd9Sstevel@tonic-gate 15347c478bd9Sstevel@tonic-gate sp->slab_cache = cp; 15357c478bd9Sstevel@tonic-gate sp->slab_head = NULL; 15367c478bd9Sstevel@tonic-gate sp->slab_refcnt = 0; 15377c478bd9Sstevel@tonic-gate sp->slab_base = buf = slab + color; 15387c478bd9Sstevel@tonic-gate sp->slab_chunks = chunks; 1539*b5fca8f8Stomee sp->slab_stuck_offset = (uint32_t)-1; 1540*b5fca8f8Stomee sp->slab_later_count = 0; 1541*b5fca8f8Stomee sp->slab_flags = 0; 15427c478bd9Sstevel@tonic-gate 15437c478bd9Sstevel@tonic-gate ASSERT(chunks > 0); 15447c478bd9Sstevel@tonic-gate while (chunks-- != 0) { 15457c478bd9Sstevel@tonic-gate if (cache_flags & KMF_HASH) { 15467c478bd9Sstevel@tonic-gate bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag); 15477c478bd9Sstevel@tonic-gate if (bcp == NULL) 15487c478bd9Sstevel@tonic-gate goto bufctl_alloc_failure; 15497c478bd9Sstevel@tonic-gate if (cache_flags & KMF_AUDIT) { 15507c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcap = 15517c478bd9Sstevel@tonic-gate (kmem_bufctl_audit_t *)bcp; 15527c478bd9Sstevel@tonic-gate bzero(bcap, sizeof (kmem_bufctl_audit_t)); 15537c478bd9Sstevel@tonic-gate bcap->bc_cache = cp; 15547c478bd9Sstevel@tonic-gate } 15557c478bd9Sstevel@tonic-gate bcp->bc_addr = buf; 15567c478bd9Sstevel@tonic-gate bcp->bc_slab = sp; 15577c478bd9Sstevel@tonic-gate } else { 15587c478bd9Sstevel@tonic-gate bcp = KMEM_BUFCTL(cp, buf); 15597c478bd9Sstevel@tonic-gate } 15607c478bd9Sstevel@tonic-gate if (cache_flags & KMF_BUFTAG) { 15617c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 15627c478bd9Sstevel@tonic-gate btp->bt_redzone = KMEM_REDZONE_PATTERN; 15637c478bd9Sstevel@tonic-gate btp->bt_bufctl = bcp; 15647c478bd9Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE; 15657c478bd9Sstevel@tonic-gate if (cache_flags & KMF_DEADBEEF) { 15667c478bd9Sstevel@tonic-gate copy_pattern(KMEM_FREE_PATTERN, buf, 15677c478bd9Sstevel@tonic-gate cp->cache_verify); 15687c478bd9Sstevel@tonic-gate } 15697c478bd9Sstevel@tonic-gate } 15707c478bd9Sstevel@tonic-gate bcp->bc_next = sp->slab_head; 15717c478bd9Sstevel@tonic-gate sp->slab_head = bcp; 15727c478bd9Sstevel@tonic-gate buf += chunksize; 15737c478bd9Sstevel@tonic-gate } 15747c478bd9Sstevel@tonic-gate 15757c478bd9Sstevel@tonic-gate kmem_log_event(kmem_slab_log, cp, sp, slab); 15767c478bd9Sstevel@tonic-gate 15777c478bd9Sstevel@tonic-gate return (sp); 15787c478bd9Sstevel@tonic-gate 15797c478bd9Sstevel@tonic-gate bufctl_alloc_failure: 15807c478bd9Sstevel@tonic-gate 15817c478bd9Sstevel@tonic-gate while ((bcp = sp->slab_head) != NULL) { 15827c478bd9Sstevel@tonic-gate sp->slab_head = bcp->bc_next; 15837c478bd9Sstevel@tonic-gate kmem_cache_free(cp->cache_bufctl_cache, bcp); 15847c478bd9Sstevel@tonic-gate } 15857c478bd9Sstevel@tonic-gate kmem_cache_free(kmem_slab_cache, sp); 15867c478bd9Sstevel@tonic-gate 15877c478bd9Sstevel@tonic-gate slab_alloc_failure: 15887c478bd9Sstevel@tonic-gate 15897c478bd9Sstevel@tonic-gate vmem_free(vmp, slab, slabsize); 15907c478bd9Sstevel@tonic-gate 15917c478bd9Sstevel@tonic-gate vmem_alloc_failure: 15927c478bd9Sstevel@tonic-gate 15937c478bd9Sstevel@tonic-gate kmem_log_event(kmem_failure_log, cp, NULL, NULL); 15947c478bd9Sstevel@tonic-gate atomic_add_64(&cp->cache_alloc_fail, 1); 15957c478bd9Sstevel@tonic-gate 15967c478bd9Sstevel@tonic-gate return (NULL); 15977c478bd9Sstevel@tonic-gate } 15987c478bd9Sstevel@tonic-gate 15997c478bd9Sstevel@tonic-gate /* 16007c478bd9Sstevel@tonic-gate * Destroy a slab. 16017c478bd9Sstevel@tonic-gate */ 16027c478bd9Sstevel@tonic-gate static void 16037c478bd9Sstevel@tonic-gate kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp) 16047c478bd9Sstevel@tonic-gate { 16057c478bd9Sstevel@tonic-gate vmem_t *vmp = cp->cache_arena; 16067c478bd9Sstevel@tonic-gate void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum); 16077c478bd9Sstevel@tonic-gate 1608*b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 1609*b5fca8f8Stomee ASSERT(sp->slab_refcnt == 0); 1610*b5fca8f8Stomee 16117c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 16127c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp; 16137c478bd9Sstevel@tonic-gate while ((bcp = sp->slab_head) != NULL) { 16147c478bd9Sstevel@tonic-gate sp->slab_head = bcp->bc_next; 16157c478bd9Sstevel@tonic-gate kmem_cache_free(cp->cache_bufctl_cache, bcp); 16167c478bd9Sstevel@tonic-gate } 16177c478bd9Sstevel@tonic-gate kmem_cache_free(kmem_slab_cache, sp); 16187c478bd9Sstevel@tonic-gate } 16197c478bd9Sstevel@tonic-gate vmem_free(vmp, slab, cp->cache_slabsize); 16207c478bd9Sstevel@tonic-gate } 16217c478bd9Sstevel@tonic-gate 16227c478bd9Sstevel@tonic-gate static void * 1623*b5fca8f8Stomee kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp) 16247c478bd9Sstevel@tonic-gate { 16257c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp, **hash_bucket; 16267c478bd9Sstevel@tonic-gate void *buf; 16277c478bd9Sstevel@tonic-gate 1628*b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock)); 1629*b5fca8f8Stomee /* 1630*b5fca8f8Stomee * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we 1631*b5fca8f8Stomee * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the 1632*b5fca8f8Stomee * slab is newly created (sp->slab_refcnt == 0). 1633*b5fca8f8Stomee */ 1634*b5fca8f8Stomee ASSERT((sp->slab_refcnt == 0) || (KMEM_SLAB_IS_PARTIAL(sp) && 1635*b5fca8f8Stomee (sp == avl_first(&cp->cache_partial_slabs)))); 16367c478bd9Sstevel@tonic-gate ASSERT(sp->slab_cache == cp); 16377c478bd9Sstevel@tonic-gate 1638*b5fca8f8Stomee cp->cache_slab_alloc++; 16399f1b636aStomee cp->cache_bufslab--; 16407c478bd9Sstevel@tonic-gate sp->slab_refcnt++; 16417c478bd9Sstevel@tonic-gate 16427c478bd9Sstevel@tonic-gate bcp = sp->slab_head; 16437c478bd9Sstevel@tonic-gate if ((sp->slab_head = bcp->bc_next) == NULL) { 1644*b5fca8f8Stomee ASSERT(KMEM_SLAB_IS_ALL_USED(sp)); 1645*b5fca8f8Stomee if (sp->slab_refcnt == 1) { 1646*b5fca8f8Stomee ASSERT(sp->slab_chunks == 1); 1647*b5fca8f8Stomee } else { 1648*b5fca8f8Stomee ASSERT(sp->slab_chunks > 1); /* the slab was partial */ 1649*b5fca8f8Stomee avl_remove(&cp->cache_partial_slabs, sp); 1650*b5fca8f8Stomee sp->slab_later_count = 0; /* clear history */ 1651*b5fca8f8Stomee sp->slab_flags &= ~KMEM_SLAB_NOMOVE; 1652*b5fca8f8Stomee sp->slab_stuck_offset = (uint32_t)-1; 1653*b5fca8f8Stomee } 1654*b5fca8f8Stomee list_insert_head(&cp->cache_complete_slabs, sp); 1655*b5fca8f8Stomee cp->cache_complete_slab_count++; 1656*b5fca8f8Stomee } else { 1657*b5fca8f8Stomee ASSERT(KMEM_SLAB_IS_PARTIAL(sp)); 1658*b5fca8f8Stomee if (sp->slab_refcnt == 1) { 1659*b5fca8f8Stomee avl_add(&cp->cache_partial_slabs, sp); 1660*b5fca8f8Stomee } else { 1661*b5fca8f8Stomee /* 1662*b5fca8f8Stomee * The slab is now more allocated than it was, so the 1663*b5fca8f8Stomee * order remains unchanged. 1664*b5fca8f8Stomee */ 1665*b5fca8f8Stomee ASSERT(!avl_update(&cp->cache_partial_slabs, sp)); 1666*b5fca8f8Stomee } 16677c478bd9Sstevel@tonic-gate } 16687c478bd9Sstevel@tonic-gate 16697c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 16707c478bd9Sstevel@tonic-gate /* 16717c478bd9Sstevel@tonic-gate * Add buffer to allocated-address hash table. 16727c478bd9Sstevel@tonic-gate */ 16737c478bd9Sstevel@tonic-gate buf = bcp->bc_addr; 16747c478bd9Sstevel@tonic-gate hash_bucket = KMEM_HASH(cp, buf); 16757c478bd9Sstevel@tonic-gate bcp->bc_next = *hash_bucket; 16767c478bd9Sstevel@tonic-gate *hash_bucket = bcp; 16777c478bd9Sstevel@tonic-gate if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { 16787c478bd9Sstevel@tonic-gate KMEM_AUDIT(kmem_transaction_log, cp, bcp); 16797c478bd9Sstevel@tonic-gate } 16807c478bd9Sstevel@tonic-gate } else { 16817c478bd9Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp); 16827c478bd9Sstevel@tonic-gate } 16837c478bd9Sstevel@tonic-gate 16847c478bd9Sstevel@tonic-gate ASSERT(KMEM_SLAB_MEMBER(sp, buf)); 1685*b5fca8f8Stomee return (buf); 1686*b5fca8f8Stomee } 1687*b5fca8f8Stomee 1688*b5fca8f8Stomee /* 1689*b5fca8f8Stomee * Allocate a raw (unconstructed) buffer from cp's slab layer. 1690*b5fca8f8Stomee */ 1691*b5fca8f8Stomee static void * 1692*b5fca8f8Stomee kmem_slab_alloc(kmem_cache_t *cp, int kmflag) 1693*b5fca8f8Stomee { 1694*b5fca8f8Stomee kmem_slab_t *sp; 1695*b5fca8f8Stomee void *buf; 1696*b5fca8f8Stomee 1697*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 1698*b5fca8f8Stomee sp = avl_first(&cp->cache_partial_slabs); 1699*b5fca8f8Stomee if (sp == NULL) { 1700*b5fca8f8Stomee ASSERT(cp->cache_bufslab == 0); 1701*b5fca8f8Stomee 1702*b5fca8f8Stomee /* 1703*b5fca8f8Stomee * The freelist is empty. Create a new slab. 1704*b5fca8f8Stomee */ 1705*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 1706*b5fca8f8Stomee if ((sp = kmem_slab_create(cp, kmflag)) == NULL) { 1707*b5fca8f8Stomee return (NULL); 1708*b5fca8f8Stomee } 1709*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 1710*b5fca8f8Stomee cp->cache_slab_create++; 1711*b5fca8f8Stomee if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) 1712*b5fca8f8Stomee cp->cache_bufmax = cp->cache_buftotal; 1713*b5fca8f8Stomee cp->cache_bufslab += sp->slab_chunks; 1714*b5fca8f8Stomee } 17157c478bd9Sstevel@tonic-gate 1716*b5fca8f8Stomee buf = kmem_slab_alloc_impl(cp, sp); 1717*b5fca8f8Stomee ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == 1718*b5fca8f8Stomee (cp->cache_complete_slab_count + 1719*b5fca8f8Stomee avl_numnodes(&cp->cache_partial_slabs) + 1720*b5fca8f8Stomee (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); 17217c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 17227c478bd9Sstevel@tonic-gate 17237c478bd9Sstevel@tonic-gate return (buf); 17247c478bd9Sstevel@tonic-gate } 17257c478bd9Sstevel@tonic-gate 1726*b5fca8f8Stomee static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *); 1727*b5fca8f8Stomee 17287c478bd9Sstevel@tonic-gate /* 17297c478bd9Sstevel@tonic-gate * Free a raw (unconstructed) buffer to cp's slab layer. 17307c478bd9Sstevel@tonic-gate */ 17317c478bd9Sstevel@tonic-gate static void 17327c478bd9Sstevel@tonic-gate kmem_slab_free(kmem_cache_t *cp, void *buf) 17337c478bd9Sstevel@tonic-gate { 17347c478bd9Sstevel@tonic-gate kmem_slab_t *sp; 17357c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp, **prev_bcpp; 17367c478bd9Sstevel@tonic-gate 17377c478bd9Sstevel@tonic-gate ASSERT(buf != NULL); 17387c478bd9Sstevel@tonic-gate 17397c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock); 17407c478bd9Sstevel@tonic-gate cp->cache_slab_free++; 17417c478bd9Sstevel@tonic-gate 17427c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 17437c478bd9Sstevel@tonic-gate /* 17447c478bd9Sstevel@tonic-gate * Look up buffer in allocated-address hash table. 17457c478bd9Sstevel@tonic-gate */ 17467c478bd9Sstevel@tonic-gate prev_bcpp = KMEM_HASH(cp, buf); 17477c478bd9Sstevel@tonic-gate while ((bcp = *prev_bcpp) != NULL) { 17487c478bd9Sstevel@tonic-gate if (bcp->bc_addr == buf) { 17497c478bd9Sstevel@tonic-gate *prev_bcpp = bcp->bc_next; 17507c478bd9Sstevel@tonic-gate sp = bcp->bc_slab; 17517c478bd9Sstevel@tonic-gate break; 17527c478bd9Sstevel@tonic-gate } 17537c478bd9Sstevel@tonic-gate cp->cache_lookup_depth++; 17547c478bd9Sstevel@tonic-gate prev_bcpp = &bcp->bc_next; 17557c478bd9Sstevel@tonic-gate } 17567c478bd9Sstevel@tonic-gate } else { 17577c478bd9Sstevel@tonic-gate bcp = KMEM_BUFCTL(cp, buf); 17587c478bd9Sstevel@tonic-gate sp = KMEM_SLAB(cp, buf); 17597c478bd9Sstevel@tonic-gate } 17607c478bd9Sstevel@tonic-gate 17617c478bd9Sstevel@tonic-gate if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) { 17627c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 17637c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADADDR, cp, buf); 17647c478bd9Sstevel@tonic-gate return; 17657c478bd9Sstevel@tonic-gate } 17667c478bd9Sstevel@tonic-gate 1767*b5fca8f8Stomee if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) { 1768*b5fca8f8Stomee /* 1769*b5fca8f8Stomee * If this is the buffer that prevented the consolidator from 1770*b5fca8f8Stomee * clearing the slab, we can reset the slab flags now that the 1771*b5fca8f8Stomee * buffer is freed. (It makes sense to do this in 1772*b5fca8f8Stomee * kmem_cache_free(), where the client gives up ownership of the 1773*b5fca8f8Stomee * buffer, but on the hot path the test is too expensive.) 1774*b5fca8f8Stomee */ 1775*b5fca8f8Stomee kmem_slab_move_yes(cp, sp, buf); 1776*b5fca8f8Stomee } 1777*b5fca8f8Stomee 17787c478bd9Sstevel@tonic-gate if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { 17797c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_CONTENTS) 17807c478bd9Sstevel@tonic-gate ((kmem_bufctl_audit_t *)bcp)->bc_contents = 17817c478bd9Sstevel@tonic-gate kmem_log_enter(kmem_content_log, buf, 17829f1b636aStomee cp->cache_contents); 17837c478bd9Sstevel@tonic-gate KMEM_AUDIT(kmem_transaction_log, cp, bcp); 17847c478bd9Sstevel@tonic-gate } 17857c478bd9Sstevel@tonic-gate 17867c478bd9Sstevel@tonic-gate bcp->bc_next = sp->slab_head; 17877c478bd9Sstevel@tonic-gate sp->slab_head = bcp; 17887c478bd9Sstevel@tonic-gate 17899f1b636aStomee cp->cache_bufslab++; 17907c478bd9Sstevel@tonic-gate ASSERT(sp->slab_refcnt >= 1); 1791*b5fca8f8Stomee 17927c478bd9Sstevel@tonic-gate if (--sp->slab_refcnt == 0) { 17937c478bd9Sstevel@tonic-gate /* 17947c478bd9Sstevel@tonic-gate * There are no outstanding allocations from this slab, 17957c478bd9Sstevel@tonic-gate * so we can reclaim the memory. 17967c478bd9Sstevel@tonic-gate */ 1797*b5fca8f8Stomee if (sp->slab_chunks == 1) { 1798*b5fca8f8Stomee list_remove(&cp->cache_complete_slabs, sp); 1799*b5fca8f8Stomee cp->cache_complete_slab_count--; 1800*b5fca8f8Stomee } else { 1801*b5fca8f8Stomee avl_remove(&cp->cache_partial_slabs, sp); 1802*b5fca8f8Stomee } 1803*b5fca8f8Stomee 18047c478bd9Sstevel@tonic-gate cp->cache_buftotal -= sp->slab_chunks; 18059f1b636aStomee cp->cache_bufslab -= sp->slab_chunks; 1806*b5fca8f8Stomee /* 1807*b5fca8f8Stomee * Defer releasing the slab to the virtual memory subsystem 1808*b5fca8f8Stomee * while there is a pending move callback, since we guarantee 1809*b5fca8f8Stomee * that buffers passed to the move callback have only been 1810*b5fca8f8Stomee * touched by kmem or by the client itself. Since the memory 1811*b5fca8f8Stomee * patterns baddcafe (uninitialized) and deadbeef (freed) both 1812*b5fca8f8Stomee * set at least one of the two lowest order bits, the client can 1813*b5fca8f8Stomee * test those bits in the move callback to determine whether or 1814*b5fca8f8Stomee * not it knows about the buffer (assuming that the client also 1815*b5fca8f8Stomee * sets one of those low order bits whenever it frees a buffer). 1816*b5fca8f8Stomee */ 1817*b5fca8f8Stomee if (cp->cache_defrag == NULL || 1818*b5fca8f8Stomee (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) && 1819*b5fca8f8Stomee !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) { 1820*b5fca8f8Stomee cp->cache_slab_destroy++; 1821*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 1822*b5fca8f8Stomee kmem_slab_destroy(cp, sp); 1823*b5fca8f8Stomee } else { 1824*b5fca8f8Stomee list_t *deadlist = &cp->cache_defrag->kmd_deadlist; 1825*b5fca8f8Stomee /* 1826*b5fca8f8Stomee * Slabs are inserted at both ends of the deadlist to 1827*b5fca8f8Stomee * distinguish between slabs freed while move callbacks 1828*b5fca8f8Stomee * are pending (list head) and a slab freed while the 1829*b5fca8f8Stomee * lock is dropped in kmem_move_buffers() (list tail) so 1830*b5fca8f8Stomee * that in both cases slab_destroy() is called from the 1831*b5fca8f8Stomee * right context. 1832*b5fca8f8Stomee */ 1833*b5fca8f8Stomee if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) { 1834*b5fca8f8Stomee list_insert_tail(deadlist, sp); 1835*b5fca8f8Stomee } else { 1836*b5fca8f8Stomee list_insert_head(deadlist, sp); 1837*b5fca8f8Stomee } 1838*b5fca8f8Stomee cp->cache_defrag->kmd_deadcount++; 1839*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 1840*b5fca8f8Stomee } 18417c478bd9Sstevel@tonic-gate return; 18427c478bd9Sstevel@tonic-gate } 1843*b5fca8f8Stomee 1844*b5fca8f8Stomee if (bcp->bc_next == NULL) { 1845*b5fca8f8Stomee /* Transition the slab from completely allocated to partial. */ 1846*b5fca8f8Stomee ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1)); 1847*b5fca8f8Stomee ASSERT(sp->slab_chunks > 1); 1848*b5fca8f8Stomee list_remove(&cp->cache_complete_slabs, sp); 1849*b5fca8f8Stomee cp->cache_complete_slab_count--; 1850*b5fca8f8Stomee avl_add(&cp->cache_partial_slabs, sp); 1851*b5fca8f8Stomee } else { 1852*b5fca8f8Stomee #ifdef DEBUG 1853*b5fca8f8Stomee if (avl_update_gt(&cp->cache_partial_slabs, sp)) { 1854*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_avl_update); 1855*b5fca8f8Stomee } else { 1856*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_avl_noupdate); 1857*b5fca8f8Stomee } 1858*b5fca8f8Stomee #else 1859*b5fca8f8Stomee (void) avl_update_gt(&cp->cache_partial_slabs, sp); 1860*b5fca8f8Stomee #endif 1861*b5fca8f8Stomee } 1862*b5fca8f8Stomee 1863*b5fca8f8Stomee ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == 1864*b5fca8f8Stomee (cp->cache_complete_slab_count + 1865*b5fca8f8Stomee avl_numnodes(&cp->cache_partial_slabs) + 1866*b5fca8f8Stomee (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); 18677c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 18687c478bd9Sstevel@tonic-gate } 18697c478bd9Sstevel@tonic-gate 1870*b5fca8f8Stomee /* 1871*b5fca8f8Stomee * Return -1 if kmem_error, 1 if constructor fails, 0 if successful. 1872*b5fca8f8Stomee */ 18737c478bd9Sstevel@tonic-gate static int 18747c478bd9Sstevel@tonic-gate kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct, 18757c478bd9Sstevel@tonic-gate caddr_t caller) 18767c478bd9Sstevel@tonic-gate { 18777c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 18787c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl; 18797c478bd9Sstevel@tonic-gate uint32_t mtbf; 18807c478bd9Sstevel@tonic-gate 18817c478bd9Sstevel@tonic-gate if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) { 18827c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADBUFTAG, cp, buf); 18837c478bd9Sstevel@tonic-gate return (-1); 18847c478bd9Sstevel@tonic-gate } 18857c478bd9Sstevel@tonic-gate 18867c478bd9Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC; 18877c478bd9Sstevel@tonic-gate 18887c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { 18897c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADBUFCTL, cp, buf); 18907c478bd9Sstevel@tonic-gate return (-1); 18917c478bd9Sstevel@tonic-gate } 18927c478bd9Sstevel@tonic-gate 18937c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) { 18947c478bd9Sstevel@tonic-gate if (!construct && (cp->cache_flags & KMF_LITE)) { 18957c478bd9Sstevel@tonic-gate if (*(uint64_t *)buf != KMEM_FREE_PATTERN) { 18967c478bd9Sstevel@tonic-gate kmem_error(KMERR_MODIFIED, cp, buf); 18977c478bd9Sstevel@tonic-gate return (-1); 18987c478bd9Sstevel@tonic-gate } 18997c478bd9Sstevel@tonic-gate if (cp->cache_constructor != NULL) 19007c478bd9Sstevel@tonic-gate *(uint64_t *)buf = btp->bt_redzone; 19017c478bd9Sstevel@tonic-gate else 19027c478bd9Sstevel@tonic-gate *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN; 19037c478bd9Sstevel@tonic-gate } else { 19047c478bd9Sstevel@tonic-gate construct = 1; 19057c478bd9Sstevel@tonic-gate if (verify_and_copy_pattern(KMEM_FREE_PATTERN, 19067c478bd9Sstevel@tonic-gate KMEM_UNINITIALIZED_PATTERN, buf, 19077c478bd9Sstevel@tonic-gate cp->cache_verify)) { 19087c478bd9Sstevel@tonic-gate kmem_error(KMERR_MODIFIED, cp, buf); 19097c478bd9Sstevel@tonic-gate return (-1); 19107c478bd9Sstevel@tonic-gate } 19117c478bd9Sstevel@tonic-gate } 19127c478bd9Sstevel@tonic-gate } 19137c478bd9Sstevel@tonic-gate btp->bt_redzone = KMEM_REDZONE_PATTERN; 19147c478bd9Sstevel@tonic-gate 19157c478bd9Sstevel@tonic-gate if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 && 19167c478bd9Sstevel@tonic-gate gethrtime() % mtbf == 0 && 19177c478bd9Sstevel@tonic-gate (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) { 19187c478bd9Sstevel@tonic-gate kmem_log_event(kmem_failure_log, cp, NULL, NULL); 19197c478bd9Sstevel@tonic-gate if (!construct && cp->cache_destructor != NULL) 19207c478bd9Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private); 19217c478bd9Sstevel@tonic-gate } else { 19227c478bd9Sstevel@tonic-gate mtbf = 0; 19237c478bd9Sstevel@tonic-gate } 19247c478bd9Sstevel@tonic-gate 19257c478bd9Sstevel@tonic-gate if (mtbf || (construct && cp->cache_constructor != NULL && 19267c478bd9Sstevel@tonic-gate cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) { 19277c478bd9Sstevel@tonic-gate atomic_add_64(&cp->cache_alloc_fail, 1); 19287c478bd9Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE; 19297c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) 19307c478bd9Sstevel@tonic-gate copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); 19317c478bd9Sstevel@tonic-gate kmem_slab_free(cp, buf); 1932*b5fca8f8Stomee return (1); 19337c478bd9Sstevel@tonic-gate } 19347c478bd9Sstevel@tonic-gate 19357c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_AUDIT) { 19367c478bd9Sstevel@tonic-gate KMEM_AUDIT(kmem_transaction_log, cp, bcp); 19377c478bd9Sstevel@tonic-gate } 19387c478bd9Sstevel@tonic-gate 19397c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_LITE) && 19407c478bd9Sstevel@tonic-gate !(cp->cache_cflags & KMC_KMEM_ALLOC)) { 19417c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller); 19427c478bd9Sstevel@tonic-gate } 19437c478bd9Sstevel@tonic-gate 19447c478bd9Sstevel@tonic-gate return (0); 19457c478bd9Sstevel@tonic-gate } 19467c478bd9Sstevel@tonic-gate 19477c478bd9Sstevel@tonic-gate static int 19487c478bd9Sstevel@tonic-gate kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller) 19497c478bd9Sstevel@tonic-gate { 19507c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 19517c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl; 19527c478bd9Sstevel@tonic-gate kmem_slab_t *sp; 19537c478bd9Sstevel@tonic-gate 19547c478bd9Sstevel@tonic-gate if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) { 19557c478bd9Sstevel@tonic-gate if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) { 19567c478bd9Sstevel@tonic-gate kmem_error(KMERR_DUPFREE, cp, buf); 19577c478bd9Sstevel@tonic-gate return (-1); 19587c478bd9Sstevel@tonic-gate } 19597c478bd9Sstevel@tonic-gate sp = kmem_findslab(cp, buf); 19607c478bd9Sstevel@tonic-gate if (sp == NULL || sp->slab_cache != cp) 19617c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADADDR, cp, buf); 19627c478bd9Sstevel@tonic-gate else 19637c478bd9Sstevel@tonic-gate kmem_error(KMERR_REDZONE, cp, buf); 19647c478bd9Sstevel@tonic-gate return (-1); 19657c478bd9Sstevel@tonic-gate } 19667c478bd9Sstevel@tonic-gate 19677c478bd9Sstevel@tonic-gate btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE; 19687c478bd9Sstevel@tonic-gate 19697c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { 19707c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADBUFCTL, cp, buf); 19717c478bd9Sstevel@tonic-gate return (-1); 19727c478bd9Sstevel@tonic-gate } 19737c478bd9Sstevel@tonic-gate 19747c478bd9Sstevel@tonic-gate if (btp->bt_redzone != KMEM_REDZONE_PATTERN) { 19757c478bd9Sstevel@tonic-gate kmem_error(KMERR_REDZONE, cp, buf); 19767c478bd9Sstevel@tonic-gate return (-1); 19777c478bd9Sstevel@tonic-gate } 19787c478bd9Sstevel@tonic-gate 19797c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_AUDIT) { 19807c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_CONTENTS) 19817c478bd9Sstevel@tonic-gate bcp->bc_contents = kmem_log_enter(kmem_content_log, 19827c478bd9Sstevel@tonic-gate buf, cp->cache_contents); 19837c478bd9Sstevel@tonic-gate KMEM_AUDIT(kmem_transaction_log, cp, bcp); 19847c478bd9Sstevel@tonic-gate } 19857c478bd9Sstevel@tonic-gate 19867c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_LITE) && 19877c478bd9Sstevel@tonic-gate !(cp->cache_cflags & KMC_KMEM_ALLOC)) { 19887c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller); 19897c478bd9Sstevel@tonic-gate } 19907c478bd9Sstevel@tonic-gate 19917c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) { 19927c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) 19937c478bd9Sstevel@tonic-gate btp->bt_redzone = *(uint64_t *)buf; 19947c478bd9Sstevel@tonic-gate else if (cp->cache_destructor != NULL) 19957c478bd9Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private); 19967c478bd9Sstevel@tonic-gate 19977c478bd9Sstevel@tonic-gate copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); 19987c478bd9Sstevel@tonic-gate } 19997c478bd9Sstevel@tonic-gate 20007c478bd9Sstevel@tonic-gate return (0); 20017c478bd9Sstevel@tonic-gate } 20027c478bd9Sstevel@tonic-gate 20037c478bd9Sstevel@tonic-gate /* 20047c478bd9Sstevel@tonic-gate * Free each object in magazine mp to cp's slab layer, and free mp itself. 20057c478bd9Sstevel@tonic-gate */ 20067c478bd9Sstevel@tonic-gate static void 20077c478bd9Sstevel@tonic-gate kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds) 20087c478bd9Sstevel@tonic-gate { 20097c478bd9Sstevel@tonic-gate int round; 20107c478bd9Sstevel@tonic-gate 2011*b5fca8f8Stomee ASSERT(!list_link_active(&cp->cache_link) || 2012*b5fca8f8Stomee taskq_member(kmem_taskq, curthread)); 20137c478bd9Sstevel@tonic-gate 20147c478bd9Sstevel@tonic-gate for (round = 0; round < nrounds; round++) { 20157c478bd9Sstevel@tonic-gate void *buf = mp->mag_round[round]; 20167c478bd9Sstevel@tonic-gate 20177c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) { 20187c478bd9Sstevel@tonic-gate if (verify_pattern(KMEM_FREE_PATTERN, buf, 20197c478bd9Sstevel@tonic-gate cp->cache_verify) != NULL) { 20207c478bd9Sstevel@tonic-gate kmem_error(KMERR_MODIFIED, cp, buf); 20217c478bd9Sstevel@tonic-gate continue; 20227c478bd9Sstevel@tonic-gate } 20237c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_LITE) && 20247c478bd9Sstevel@tonic-gate cp->cache_destructor != NULL) { 20257c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 20267c478bd9Sstevel@tonic-gate *(uint64_t *)buf = btp->bt_redzone; 20277c478bd9Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private); 20287c478bd9Sstevel@tonic-gate *(uint64_t *)buf = KMEM_FREE_PATTERN; 20297c478bd9Sstevel@tonic-gate } 20307c478bd9Sstevel@tonic-gate } else if (cp->cache_destructor != NULL) { 20317c478bd9Sstevel@tonic-gate cp->cache_destructor(buf, cp->cache_private); 20327c478bd9Sstevel@tonic-gate } 20337c478bd9Sstevel@tonic-gate 20347c478bd9Sstevel@tonic-gate kmem_slab_free(cp, buf); 20357c478bd9Sstevel@tonic-gate } 20367c478bd9Sstevel@tonic-gate ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); 20377c478bd9Sstevel@tonic-gate kmem_cache_free(cp->cache_magtype->mt_cache, mp); 20387c478bd9Sstevel@tonic-gate } 20397c478bd9Sstevel@tonic-gate 20407c478bd9Sstevel@tonic-gate /* 20417c478bd9Sstevel@tonic-gate * Allocate a magazine from the depot. 20427c478bd9Sstevel@tonic-gate */ 20437c478bd9Sstevel@tonic-gate static kmem_magazine_t * 20447c478bd9Sstevel@tonic-gate kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp) 20457c478bd9Sstevel@tonic-gate { 20467c478bd9Sstevel@tonic-gate kmem_magazine_t *mp; 20477c478bd9Sstevel@tonic-gate 20487c478bd9Sstevel@tonic-gate /* 20497c478bd9Sstevel@tonic-gate * If we can't get the depot lock without contention, 20507c478bd9Sstevel@tonic-gate * update our contention count. We use the depot 20517c478bd9Sstevel@tonic-gate * contention rate to determine whether we need to 20527c478bd9Sstevel@tonic-gate * increase the magazine size for better scalability. 20537c478bd9Sstevel@tonic-gate */ 20547c478bd9Sstevel@tonic-gate if (!mutex_tryenter(&cp->cache_depot_lock)) { 20557c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock); 20567c478bd9Sstevel@tonic-gate cp->cache_depot_contention++; 20577c478bd9Sstevel@tonic-gate } 20587c478bd9Sstevel@tonic-gate 20597c478bd9Sstevel@tonic-gate if ((mp = mlp->ml_list) != NULL) { 20607c478bd9Sstevel@tonic-gate ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); 20617c478bd9Sstevel@tonic-gate mlp->ml_list = mp->mag_next; 20627c478bd9Sstevel@tonic-gate if (--mlp->ml_total < mlp->ml_min) 20637c478bd9Sstevel@tonic-gate mlp->ml_min = mlp->ml_total; 20647c478bd9Sstevel@tonic-gate mlp->ml_alloc++; 20657c478bd9Sstevel@tonic-gate } 20667c478bd9Sstevel@tonic-gate 20677c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock); 20687c478bd9Sstevel@tonic-gate 20697c478bd9Sstevel@tonic-gate return (mp); 20707c478bd9Sstevel@tonic-gate } 20717c478bd9Sstevel@tonic-gate 20727c478bd9Sstevel@tonic-gate /* 20737c478bd9Sstevel@tonic-gate * Free a magazine to the depot. 20747c478bd9Sstevel@tonic-gate */ 20757c478bd9Sstevel@tonic-gate static void 20767c478bd9Sstevel@tonic-gate kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp) 20777c478bd9Sstevel@tonic-gate { 20787c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock); 20797c478bd9Sstevel@tonic-gate ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); 20807c478bd9Sstevel@tonic-gate mp->mag_next = mlp->ml_list; 20817c478bd9Sstevel@tonic-gate mlp->ml_list = mp; 20827c478bd9Sstevel@tonic-gate mlp->ml_total++; 20837c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock); 20847c478bd9Sstevel@tonic-gate } 20857c478bd9Sstevel@tonic-gate 20867c478bd9Sstevel@tonic-gate /* 20877c478bd9Sstevel@tonic-gate * Update the working set statistics for cp's depot. 20887c478bd9Sstevel@tonic-gate */ 20897c478bd9Sstevel@tonic-gate static void 20907c478bd9Sstevel@tonic-gate kmem_depot_ws_update(kmem_cache_t *cp) 20917c478bd9Sstevel@tonic-gate { 20927c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock); 20937c478bd9Sstevel@tonic-gate cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; 20947c478bd9Sstevel@tonic-gate cp->cache_full.ml_min = cp->cache_full.ml_total; 20957c478bd9Sstevel@tonic-gate cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; 20967c478bd9Sstevel@tonic-gate cp->cache_empty.ml_min = cp->cache_empty.ml_total; 20977c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock); 20987c478bd9Sstevel@tonic-gate } 20997c478bd9Sstevel@tonic-gate 21007c478bd9Sstevel@tonic-gate /* 21017c478bd9Sstevel@tonic-gate * Reap all magazines that have fallen out of the depot's working set. 21027c478bd9Sstevel@tonic-gate */ 21037c478bd9Sstevel@tonic-gate static void 21047c478bd9Sstevel@tonic-gate kmem_depot_ws_reap(kmem_cache_t *cp) 21057c478bd9Sstevel@tonic-gate { 21067c478bd9Sstevel@tonic-gate long reap; 21077c478bd9Sstevel@tonic-gate kmem_magazine_t *mp; 21087c478bd9Sstevel@tonic-gate 2109*b5fca8f8Stomee ASSERT(!list_link_active(&cp->cache_link) || 2110*b5fca8f8Stomee taskq_member(kmem_taskq, curthread)); 21117c478bd9Sstevel@tonic-gate 21127c478bd9Sstevel@tonic-gate reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 21137c478bd9Sstevel@tonic-gate while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) 21147c478bd9Sstevel@tonic-gate kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); 21157c478bd9Sstevel@tonic-gate 21167c478bd9Sstevel@tonic-gate reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); 21177c478bd9Sstevel@tonic-gate while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) 21187c478bd9Sstevel@tonic-gate kmem_magazine_destroy(cp, mp, 0); 21197c478bd9Sstevel@tonic-gate } 21207c478bd9Sstevel@tonic-gate 21217c478bd9Sstevel@tonic-gate static void 21227c478bd9Sstevel@tonic-gate kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds) 21237c478bd9Sstevel@tonic-gate { 21247c478bd9Sstevel@tonic-gate ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) || 21257c478bd9Sstevel@tonic-gate (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize)); 21267c478bd9Sstevel@tonic-gate ASSERT(ccp->cc_magsize > 0); 21277c478bd9Sstevel@tonic-gate 21287c478bd9Sstevel@tonic-gate ccp->cc_ploaded = ccp->cc_loaded; 21297c478bd9Sstevel@tonic-gate ccp->cc_prounds = ccp->cc_rounds; 21307c478bd9Sstevel@tonic-gate ccp->cc_loaded = mp; 21317c478bd9Sstevel@tonic-gate ccp->cc_rounds = rounds; 21327c478bd9Sstevel@tonic-gate } 21337c478bd9Sstevel@tonic-gate 21347c478bd9Sstevel@tonic-gate /* 21357c478bd9Sstevel@tonic-gate * Allocate a constructed object from cache cp. 21367c478bd9Sstevel@tonic-gate */ 21377c478bd9Sstevel@tonic-gate void * 21387c478bd9Sstevel@tonic-gate kmem_cache_alloc(kmem_cache_t *cp, int kmflag) 21397c478bd9Sstevel@tonic-gate { 21407c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); 21417c478bd9Sstevel@tonic-gate kmem_magazine_t *fmp; 21427c478bd9Sstevel@tonic-gate void *buf; 21437c478bd9Sstevel@tonic-gate 21447c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock); 21457c478bd9Sstevel@tonic-gate for (;;) { 21467c478bd9Sstevel@tonic-gate /* 21477c478bd9Sstevel@tonic-gate * If there's an object available in the current CPU's 21487c478bd9Sstevel@tonic-gate * loaded magazine, just take it and return. 21497c478bd9Sstevel@tonic-gate */ 21507c478bd9Sstevel@tonic-gate if (ccp->cc_rounds > 0) { 21517c478bd9Sstevel@tonic-gate buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds]; 21527c478bd9Sstevel@tonic-gate ccp->cc_alloc++; 21537c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock); 21547c478bd9Sstevel@tonic-gate if ((ccp->cc_flags & KMF_BUFTAG) && 21557c478bd9Sstevel@tonic-gate kmem_cache_alloc_debug(cp, buf, kmflag, 0, 2156*b5fca8f8Stomee caller()) != 0) { 21577c478bd9Sstevel@tonic-gate if (kmflag & KM_NOSLEEP) 21587c478bd9Sstevel@tonic-gate return (NULL); 21597c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock); 21607c478bd9Sstevel@tonic-gate continue; 21617c478bd9Sstevel@tonic-gate } 21627c478bd9Sstevel@tonic-gate return (buf); 21637c478bd9Sstevel@tonic-gate } 21647c478bd9Sstevel@tonic-gate 21657c478bd9Sstevel@tonic-gate /* 21667c478bd9Sstevel@tonic-gate * The loaded magazine is empty. If the previously loaded 21677c478bd9Sstevel@tonic-gate * magazine was full, exchange them and try again. 21687c478bd9Sstevel@tonic-gate */ 21697c478bd9Sstevel@tonic-gate if (ccp->cc_prounds > 0) { 21707c478bd9Sstevel@tonic-gate kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 21717c478bd9Sstevel@tonic-gate continue; 21727c478bd9Sstevel@tonic-gate } 21737c478bd9Sstevel@tonic-gate 21747c478bd9Sstevel@tonic-gate /* 21757c478bd9Sstevel@tonic-gate * If the magazine layer is disabled, break out now. 21767c478bd9Sstevel@tonic-gate */ 21777c478bd9Sstevel@tonic-gate if (ccp->cc_magsize == 0) 21787c478bd9Sstevel@tonic-gate break; 21797c478bd9Sstevel@tonic-gate 21807c478bd9Sstevel@tonic-gate /* 21817c478bd9Sstevel@tonic-gate * Try to get a full magazine from the depot. 21827c478bd9Sstevel@tonic-gate */ 21837c478bd9Sstevel@tonic-gate fmp = kmem_depot_alloc(cp, &cp->cache_full); 21847c478bd9Sstevel@tonic-gate if (fmp != NULL) { 21857c478bd9Sstevel@tonic-gate if (ccp->cc_ploaded != NULL) 21867c478bd9Sstevel@tonic-gate kmem_depot_free(cp, &cp->cache_empty, 21877c478bd9Sstevel@tonic-gate ccp->cc_ploaded); 21887c478bd9Sstevel@tonic-gate kmem_cpu_reload(ccp, fmp, ccp->cc_magsize); 21897c478bd9Sstevel@tonic-gate continue; 21907c478bd9Sstevel@tonic-gate } 21917c478bd9Sstevel@tonic-gate 21927c478bd9Sstevel@tonic-gate /* 21937c478bd9Sstevel@tonic-gate * There are no full magazines in the depot, 21947c478bd9Sstevel@tonic-gate * so fall through to the slab layer. 21957c478bd9Sstevel@tonic-gate */ 21967c478bd9Sstevel@tonic-gate break; 21977c478bd9Sstevel@tonic-gate } 21987c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock); 21997c478bd9Sstevel@tonic-gate 22007c478bd9Sstevel@tonic-gate /* 22017c478bd9Sstevel@tonic-gate * We couldn't allocate a constructed object from the magazine layer, 22027c478bd9Sstevel@tonic-gate * so get a raw buffer from the slab layer and apply its constructor. 22037c478bd9Sstevel@tonic-gate */ 22047c478bd9Sstevel@tonic-gate buf = kmem_slab_alloc(cp, kmflag); 22057c478bd9Sstevel@tonic-gate 22067c478bd9Sstevel@tonic-gate if (buf == NULL) 22077c478bd9Sstevel@tonic-gate return (NULL); 22087c478bd9Sstevel@tonic-gate 22097c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 22107c478bd9Sstevel@tonic-gate /* 22117c478bd9Sstevel@tonic-gate * Make kmem_cache_alloc_debug() apply the constructor for us. 22127c478bd9Sstevel@tonic-gate */ 2213*b5fca8f8Stomee int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller()); 2214*b5fca8f8Stomee if (rc != 0) { 22157c478bd9Sstevel@tonic-gate if (kmflag & KM_NOSLEEP) 22167c478bd9Sstevel@tonic-gate return (NULL); 22177c478bd9Sstevel@tonic-gate /* 22187c478bd9Sstevel@tonic-gate * kmem_cache_alloc_debug() detected corruption 2219*b5fca8f8Stomee * but didn't panic (kmem_panic <= 0). We should not be 2220*b5fca8f8Stomee * here because the constructor failed (indicated by a 2221*b5fca8f8Stomee * return code of 1). Try again. 22227c478bd9Sstevel@tonic-gate */ 2223*b5fca8f8Stomee ASSERT(rc == -1); 22247c478bd9Sstevel@tonic-gate return (kmem_cache_alloc(cp, kmflag)); 22257c478bd9Sstevel@tonic-gate } 22267c478bd9Sstevel@tonic-gate return (buf); 22277c478bd9Sstevel@tonic-gate } 22287c478bd9Sstevel@tonic-gate 22297c478bd9Sstevel@tonic-gate if (cp->cache_constructor != NULL && 22307c478bd9Sstevel@tonic-gate cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) { 22317c478bd9Sstevel@tonic-gate atomic_add_64(&cp->cache_alloc_fail, 1); 22327c478bd9Sstevel@tonic-gate kmem_slab_free(cp, buf); 22337c478bd9Sstevel@tonic-gate return (NULL); 22347c478bd9Sstevel@tonic-gate } 22357c478bd9Sstevel@tonic-gate 22367c478bd9Sstevel@tonic-gate return (buf); 22377c478bd9Sstevel@tonic-gate } 22387c478bd9Sstevel@tonic-gate 22397c478bd9Sstevel@tonic-gate /* 2240*b5fca8f8Stomee * The freed argument tells whether or not kmem_cache_free_debug() has already 2241*b5fca8f8Stomee * been called so that we can avoid the duplicate free error. For example, a 2242*b5fca8f8Stomee * buffer on a magazine has already been freed by the client but is still 2243*b5fca8f8Stomee * constructed. 22447c478bd9Sstevel@tonic-gate */ 2245*b5fca8f8Stomee static void 2246*b5fca8f8Stomee kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed) 22477c478bd9Sstevel@tonic-gate { 2248*b5fca8f8Stomee if (!freed && (cp->cache_flags & KMF_BUFTAG)) 22497c478bd9Sstevel@tonic-gate if (kmem_cache_free_debug(cp, buf, caller()) == -1) 22507c478bd9Sstevel@tonic-gate return; 22517c478bd9Sstevel@tonic-gate 2252*b5fca8f8Stomee /* 2253*b5fca8f8Stomee * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not, 2254*b5fca8f8Stomee * kmem_cache_free_debug() will have already applied the destructor. 2255*b5fca8f8Stomee */ 2256*b5fca8f8Stomee if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF && 2257*b5fca8f8Stomee cp->cache_destructor != NULL) { 2258*b5fca8f8Stomee if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */ 2259*b5fca8f8Stomee kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 2260*b5fca8f8Stomee *(uint64_t *)buf = btp->bt_redzone; 2261*b5fca8f8Stomee cp->cache_destructor(buf, cp->cache_private); 2262*b5fca8f8Stomee *(uint64_t *)buf = KMEM_FREE_PATTERN; 2263*b5fca8f8Stomee } else { 2264*b5fca8f8Stomee cp->cache_destructor(buf, cp->cache_private); 2265*b5fca8f8Stomee } 2266*b5fca8f8Stomee } 2267*b5fca8f8Stomee 2268*b5fca8f8Stomee kmem_slab_free(cp, buf); 2269*b5fca8f8Stomee } 2270*b5fca8f8Stomee 2271*b5fca8f8Stomee /* 2272*b5fca8f8Stomee * Free a constructed object to cache cp. 2273*b5fca8f8Stomee */ 2274*b5fca8f8Stomee void 2275*b5fca8f8Stomee kmem_cache_free(kmem_cache_t *cp, void *buf) 2276*b5fca8f8Stomee { 2277*b5fca8f8Stomee kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); 2278*b5fca8f8Stomee kmem_magazine_t *emp; 2279*b5fca8f8Stomee kmem_magtype_t *mtp; 2280*b5fca8f8Stomee 2281*b5fca8f8Stomee /* 2282*b5fca8f8Stomee * The client must not free either of the buffers passed to the move 2283*b5fca8f8Stomee * callback function. 2284*b5fca8f8Stomee */ 2285*b5fca8f8Stomee ASSERT(cp->cache_defrag == NULL || 2286*b5fca8f8Stomee cp->cache_defrag->kmd_thread != curthread || 2287*b5fca8f8Stomee (buf != cp->cache_defrag->kmd_from_buf && 2288*b5fca8f8Stomee buf != cp->cache_defrag->kmd_to_buf)); 2289*b5fca8f8Stomee 2290*b5fca8f8Stomee if (ccp->cc_flags & KMF_BUFTAG) 2291*b5fca8f8Stomee if (kmem_cache_free_debug(cp, buf, caller()) == -1) 2292*b5fca8f8Stomee return; 2293*b5fca8f8Stomee 2294*b5fca8f8Stomee mutex_enter(&ccp->cc_lock); 2295*b5fca8f8Stomee for (;;) { 2296*b5fca8f8Stomee /* 2297*b5fca8f8Stomee * If there's a slot available in the current CPU's 2298*b5fca8f8Stomee * loaded magazine, just put the object there and return. 2299*b5fca8f8Stomee */ 2300*b5fca8f8Stomee if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) { 2301*b5fca8f8Stomee ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf; 2302*b5fca8f8Stomee ccp->cc_free++; 2303*b5fca8f8Stomee mutex_exit(&ccp->cc_lock); 2304*b5fca8f8Stomee return; 2305*b5fca8f8Stomee } 2306*b5fca8f8Stomee 23077c478bd9Sstevel@tonic-gate /* 23087c478bd9Sstevel@tonic-gate * The loaded magazine is full. If the previously loaded 23097c478bd9Sstevel@tonic-gate * magazine was empty, exchange them and try again. 23107c478bd9Sstevel@tonic-gate */ 23117c478bd9Sstevel@tonic-gate if (ccp->cc_prounds == 0) { 23127c478bd9Sstevel@tonic-gate kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 23137c478bd9Sstevel@tonic-gate continue; 23147c478bd9Sstevel@tonic-gate } 23157c478bd9Sstevel@tonic-gate 23167c478bd9Sstevel@tonic-gate /* 23177c478bd9Sstevel@tonic-gate * If the magazine layer is disabled, break out now. 23187c478bd9Sstevel@tonic-gate */ 23197c478bd9Sstevel@tonic-gate if (ccp->cc_magsize == 0) 23207c478bd9Sstevel@tonic-gate break; 23217c478bd9Sstevel@tonic-gate 23227c478bd9Sstevel@tonic-gate /* 23237c478bd9Sstevel@tonic-gate * Try to get an empty magazine from the depot. 23247c478bd9Sstevel@tonic-gate */ 23257c478bd9Sstevel@tonic-gate emp = kmem_depot_alloc(cp, &cp->cache_empty); 23267c478bd9Sstevel@tonic-gate if (emp != NULL) { 23277c478bd9Sstevel@tonic-gate if (ccp->cc_ploaded != NULL) 23287c478bd9Sstevel@tonic-gate kmem_depot_free(cp, &cp->cache_full, 23297c478bd9Sstevel@tonic-gate ccp->cc_ploaded); 23307c478bd9Sstevel@tonic-gate kmem_cpu_reload(ccp, emp, 0); 23317c478bd9Sstevel@tonic-gate continue; 23327c478bd9Sstevel@tonic-gate } 23337c478bd9Sstevel@tonic-gate 23347c478bd9Sstevel@tonic-gate /* 23357c478bd9Sstevel@tonic-gate * There are no empty magazines in the depot, 23367c478bd9Sstevel@tonic-gate * so try to allocate a new one. We must drop all locks 23377c478bd9Sstevel@tonic-gate * across kmem_cache_alloc() because lower layers may 23387c478bd9Sstevel@tonic-gate * attempt to allocate from this cache. 23397c478bd9Sstevel@tonic-gate */ 23407c478bd9Sstevel@tonic-gate mtp = cp->cache_magtype; 23417c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock); 23427c478bd9Sstevel@tonic-gate emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP); 23437c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock); 23447c478bd9Sstevel@tonic-gate 23457c478bd9Sstevel@tonic-gate if (emp != NULL) { 23467c478bd9Sstevel@tonic-gate /* 23477c478bd9Sstevel@tonic-gate * We successfully allocated an empty magazine. 23487c478bd9Sstevel@tonic-gate * However, we had to drop ccp->cc_lock to do it, 23497c478bd9Sstevel@tonic-gate * so the cache's magazine size may have changed. 23507c478bd9Sstevel@tonic-gate * If so, free the magazine and try again. 23517c478bd9Sstevel@tonic-gate */ 23527c478bd9Sstevel@tonic-gate if (ccp->cc_magsize != mtp->mt_magsize) { 23537c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock); 23547c478bd9Sstevel@tonic-gate kmem_cache_free(mtp->mt_cache, emp); 23557c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock); 23567c478bd9Sstevel@tonic-gate continue; 23577c478bd9Sstevel@tonic-gate } 23587c478bd9Sstevel@tonic-gate 23597c478bd9Sstevel@tonic-gate /* 23607c478bd9Sstevel@tonic-gate * We got a magazine of the right size. Add it to 23617c478bd9Sstevel@tonic-gate * the depot and try the whole dance again. 23627c478bd9Sstevel@tonic-gate */ 23637c478bd9Sstevel@tonic-gate kmem_depot_free(cp, &cp->cache_empty, emp); 23647c478bd9Sstevel@tonic-gate continue; 23657c478bd9Sstevel@tonic-gate } 23667c478bd9Sstevel@tonic-gate 23677c478bd9Sstevel@tonic-gate /* 23687c478bd9Sstevel@tonic-gate * We couldn't allocate an empty magazine, 23697c478bd9Sstevel@tonic-gate * so fall through to the slab layer. 23707c478bd9Sstevel@tonic-gate */ 23717c478bd9Sstevel@tonic-gate break; 23727c478bd9Sstevel@tonic-gate } 23737c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock); 23747c478bd9Sstevel@tonic-gate 23757c478bd9Sstevel@tonic-gate /* 23767c478bd9Sstevel@tonic-gate * We couldn't free our constructed object to the magazine layer, 23777c478bd9Sstevel@tonic-gate * so apply its destructor and free it to the slab layer. 23787c478bd9Sstevel@tonic-gate */ 2379*b5fca8f8Stomee kmem_slab_free_constructed(cp, buf, B_TRUE); 23807c478bd9Sstevel@tonic-gate } 23817c478bd9Sstevel@tonic-gate 23827c478bd9Sstevel@tonic-gate void * 23837c478bd9Sstevel@tonic-gate kmem_zalloc(size_t size, int kmflag) 23847c478bd9Sstevel@tonic-gate { 23857c478bd9Sstevel@tonic-gate size_t index = (size - 1) >> KMEM_ALIGN_SHIFT; 23867c478bd9Sstevel@tonic-gate void *buf; 23877c478bd9Sstevel@tonic-gate 23887c478bd9Sstevel@tonic-gate if (index < KMEM_MAXBUF >> KMEM_ALIGN_SHIFT) { 23897c478bd9Sstevel@tonic-gate kmem_cache_t *cp = kmem_alloc_table[index]; 23907c478bd9Sstevel@tonic-gate buf = kmem_cache_alloc(cp, kmflag); 23917c478bd9Sstevel@tonic-gate if (buf != NULL) { 23927c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 23937c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 23947c478bd9Sstevel@tonic-gate ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE; 23957c478bd9Sstevel@tonic-gate ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size); 23967c478bd9Sstevel@tonic-gate 23977c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) { 23987c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp, 23997c478bd9Sstevel@tonic-gate kmem_lite_count, caller()); 24007c478bd9Sstevel@tonic-gate } 24017c478bd9Sstevel@tonic-gate } 24027c478bd9Sstevel@tonic-gate bzero(buf, size); 24037c478bd9Sstevel@tonic-gate } 24047c478bd9Sstevel@tonic-gate } else { 24057c478bd9Sstevel@tonic-gate buf = kmem_alloc(size, kmflag); 24067c478bd9Sstevel@tonic-gate if (buf != NULL) 24077c478bd9Sstevel@tonic-gate bzero(buf, size); 24087c478bd9Sstevel@tonic-gate } 24097c478bd9Sstevel@tonic-gate return (buf); 24107c478bd9Sstevel@tonic-gate } 24117c478bd9Sstevel@tonic-gate 24127c478bd9Sstevel@tonic-gate void * 24137c478bd9Sstevel@tonic-gate kmem_alloc(size_t size, int kmflag) 24147c478bd9Sstevel@tonic-gate { 24157c478bd9Sstevel@tonic-gate size_t index = (size - 1) >> KMEM_ALIGN_SHIFT; 24167c478bd9Sstevel@tonic-gate void *buf; 24177c478bd9Sstevel@tonic-gate 24187c478bd9Sstevel@tonic-gate if (index < KMEM_MAXBUF >> KMEM_ALIGN_SHIFT) { 24197c478bd9Sstevel@tonic-gate kmem_cache_t *cp = kmem_alloc_table[index]; 24207c478bd9Sstevel@tonic-gate buf = kmem_cache_alloc(cp, kmflag); 24217c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_BUFTAG) && buf != NULL) { 24227c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 24237c478bd9Sstevel@tonic-gate ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE; 24247c478bd9Sstevel@tonic-gate ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size); 24257c478bd9Sstevel@tonic-gate 24267c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) { 24277c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, 24287c478bd9Sstevel@tonic-gate caller()); 24297c478bd9Sstevel@tonic-gate } 24307c478bd9Sstevel@tonic-gate } 24317c478bd9Sstevel@tonic-gate return (buf); 24327c478bd9Sstevel@tonic-gate } 24337c478bd9Sstevel@tonic-gate if (size == 0) 24347c478bd9Sstevel@tonic-gate return (NULL); 24357c478bd9Sstevel@tonic-gate buf = vmem_alloc(kmem_oversize_arena, size, kmflag & KM_VMFLAGS); 24367c478bd9Sstevel@tonic-gate if (buf == NULL) 24377c478bd9Sstevel@tonic-gate kmem_log_event(kmem_failure_log, NULL, NULL, (void *)size); 24387c478bd9Sstevel@tonic-gate return (buf); 24397c478bd9Sstevel@tonic-gate } 24407c478bd9Sstevel@tonic-gate 24417c478bd9Sstevel@tonic-gate void 24427c478bd9Sstevel@tonic-gate kmem_free(void *buf, size_t size) 24437c478bd9Sstevel@tonic-gate { 24447c478bd9Sstevel@tonic-gate size_t index = (size - 1) >> KMEM_ALIGN_SHIFT; 24457c478bd9Sstevel@tonic-gate 24467c478bd9Sstevel@tonic-gate if (index < KMEM_MAXBUF >> KMEM_ALIGN_SHIFT) { 24477c478bd9Sstevel@tonic-gate kmem_cache_t *cp = kmem_alloc_table[index]; 24487c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 24497c478bd9Sstevel@tonic-gate kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); 24507c478bd9Sstevel@tonic-gate uint32_t *ip = (uint32_t *)btp; 24517c478bd9Sstevel@tonic-gate if (ip[1] != KMEM_SIZE_ENCODE(size)) { 24527c478bd9Sstevel@tonic-gate if (*(uint64_t *)buf == KMEM_FREE_PATTERN) { 24537c478bd9Sstevel@tonic-gate kmem_error(KMERR_DUPFREE, cp, buf); 24547c478bd9Sstevel@tonic-gate return; 24557c478bd9Sstevel@tonic-gate } 24567c478bd9Sstevel@tonic-gate if (KMEM_SIZE_VALID(ip[1])) { 24577c478bd9Sstevel@tonic-gate ip[0] = KMEM_SIZE_ENCODE(size); 24587c478bd9Sstevel@tonic-gate kmem_error(KMERR_BADSIZE, cp, buf); 24597c478bd9Sstevel@tonic-gate } else { 24607c478bd9Sstevel@tonic-gate kmem_error(KMERR_REDZONE, cp, buf); 24617c478bd9Sstevel@tonic-gate } 24627c478bd9Sstevel@tonic-gate return; 24637c478bd9Sstevel@tonic-gate } 24647c478bd9Sstevel@tonic-gate if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) { 24657c478bd9Sstevel@tonic-gate kmem_error(KMERR_REDZONE, cp, buf); 24667c478bd9Sstevel@tonic-gate return; 24677c478bd9Sstevel@tonic-gate } 24687c478bd9Sstevel@tonic-gate btp->bt_redzone = KMEM_REDZONE_PATTERN; 24697c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) { 24707c478bd9Sstevel@tonic-gate KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, 24717c478bd9Sstevel@tonic-gate caller()); 24727c478bd9Sstevel@tonic-gate } 24737c478bd9Sstevel@tonic-gate } 24747c478bd9Sstevel@tonic-gate kmem_cache_free(cp, buf); 24757c478bd9Sstevel@tonic-gate } else { 24767c478bd9Sstevel@tonic-gate if (buf == NULL && size == 0) 24777c478bd9Sstevel@tonic-gate return; 24787c478bd9Sstevel@tonic-gate vmem_free(kmem_oversize_arena, buf, size); 24797c478bd9Sstevel@tonic-gate } 24807c478bd9Sstevel@tonic-gate } 24817c478bd9Sstevel@tonic-gate 24827c478bd9Sstevel@tonic-gate void * 24837c478bd9Sstevel@tonic-gate kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag) 24847c478bd9Sstevel@tonic-gate { 24857c478bd9Sstevel@tonic-gate size_t realsize = size + vmp->vm_quantum; 24867c478bd9Sstevel@tonic-gate void *addr; 24877c478bd9Sstevel@tonic-gate 24887c478bd9Sstevel@tonic-gate /* 24897c478bd9Sstevel@tonic-gate * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding 24907c478bd9Sstevel@tonic-gate * vm_quantum will cause integer wraparound. Check for this, and 24917c478bd9Sstevel@tonic-gate * blow off the firewall page in this case. Note that such a 24927c478bd9Sstevel@tonic-gate * giant allocation (the entire kernel address space) can never 24937c478bd9Sstevel@tonic-gate * be satisfied, so it will either fail immediately (VM_NOSLEEP) 24947c478bd9Sstevel@tonic-gate * or sleep forever (VM_SLEEP). Thus, there is no need for a 24957c478bd9Sstevel@tonic-gate * corresponding check in kmem_firewall_va_free(). 24967c478bd9Sstevel@tonic-gate */ 24977c478bd9Sstevel@tonic-gate if (realsize < size) 24987c478bd9Sstevel@tonic-gate realsize = size; 24997c478bd9Sstevel@tonic-gate 25007c478bd9Sstevel@tonic-gate /* 25017c478bd9Sstevel@tonic-gate * While boot still owns resource management, make sure that this 25027c478bd9Sstevel@tonic-gate * redzone virtual address allocation is properly accounted for in 25037c478bd9Sstevel@tonic-gate * OBPs "virtual-memory" "available" lists because we're 25047c478bd9Sstevel@tonic-gate * effectively claiming them for a red zone. If we don't do this, 25057c478bd9Sstevel@tonic-gate * the available lists become too fragmented and too large for the 25067c478bd9Sstevel@tonic-gate * current boot/kernel memory list interface. 25077c478bd9Sstevel@tonic-gate */ 25087c478bd9Sstevel@tonic-gate addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT); 25097c478bd9Sstevel@tonic-gate 25107c478bd9Sstevel@tonic-gate if (addr != NULL && kvseg.s_base == NULL && realsize != size) 25117c478bd9Sstevel@tonic-gate (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum); 25127c478bd9Sstevel@tonic-gate 25137c478bd9Sstevel@tonic-gate return (addr); 25147c478bd9Sstevel@tonic-gate } 25157c478bd9Sstevel@tonic-gate 25167c478bd9Sstevel@tonic-gate void 25177c478bd9Sstevel@tonic-gate kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size) 25187c478bd9Sstevel@tonic-gate { 25197c478bd9Sstevel@tonic-gate ASSERT((kvseg.s_base == NULL ? 25207c478bd9Sstevel@tonic-gate va_to_pfn((char *)addr + size) : 25217c478bd9Sstevel@tonic-gate hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID); 25227c478bd9Sstevel@tonic-gate 25237c478bd9Sstevel@tonic-gate vmem_free(vmp, addr, size + vmp->vm_quantum); 25247c478bd9Sstevel@tonic-gate } 25257c478bd9Sstevel@tonic-gate 25267c478bd9Sstevel@tonic-gate /* 25277c478bd9Sstevel@tonic-gate * Try to allocate at least `size' bytes of memory without sleeping or 25287c478bd9Sstevel@tonic-gate * panicking. Return actual allocated size in `asize'. If allocation failed, 25297c478bd9Sstevel@tonic-gate * try final allocation with sleep or panic allowed. 25307c478bd9Sstevel@tonic-gate */ 25317c478bd9Sstevel@tonic-gate void * 25327c478bd9Sstevel@tonic-gate kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag) 25337c478bd9Sstevel@tonic-gate { 25347c478bd9Sstevel@tonic-gate void *p; 25357c478bd9Sstevel@tonic-gate 25367c478bd9Sstevel@tonic-gate *asize = P2ROUNDUP(size, KMEM_ALIGN); 25377c478bd9Sstevel@tonic-gate do { 25387c478bd9Sstevel@tonic-gate p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC); 25397c478bd9Sstevel@tonic-gate if (p != NULL) 25407c478bd9Sstevel@tonic-gate return (p); 25417c478bd9Sstevel@tonic-gate *asize += KMEM_ALIGN; 25427c478bd9Sstevel@tonic-gate } while (*asize <= PAGESIZE); 25437c478bd9Sstevel@tonic-gate 25447c478bd9Sstevel@tonic-gate *asize = P2ROUNDUP(size, KMEM_ALIGN); 25457c478bd9Sstevel@tonic-gate return (kmem_alloc(*asize, kmflag)); 25467c478bd9Sstevel@tonic-gate } 25477c478bd9Sstevel@tonic-gate 25487c478bd9Sstevel@tonic-gate /* 25497c478bd9Sstevel@tonic-gate * Reclaim all unused memory from a cache. 25507c478bd9Sstevel@tonic-gate */ 25517c478bd9Sstevel@tonic-gate static void 25527c478bd9Sstevel@tonic-gate kmem_cache_reap(kmem_cache_t *cp) 25537c478bd9Sstevel@tonic-gate { 2554*b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread)); 2555*b5fca8f8Stomee 25567c478bd9Sstevel@tonic-gate /* 25577c478bd9Sstevel@tonic-gate * Ask the cache's owner to free some memory if possible. 25587c478bd9Sstevel@tonic-gate * The idea is to handle things like the inode cache, which 25597c478bd9Sstevel@tonic-gate * typically sits on a bunch of memory that it doesn't truly 25607c478bd9Sstevel@tonic-gate * *need*. Reclaim policy is entirely up to the owner; this 25617c478bd9Sstevel@tonic-gate * callback is just an advisory plea for help. 25627c478bd9Sstevel@tonic-gate */ 2563*b5fca8f8Stomee if (cp->cache_reclaim != NULL) { 2564*b5fca8f8Stomee long delta; 2565*b5fca8f8Stomee 2566*b5fca8f8Stomee /* 2567*b5fca8f8Stomee * Reclaimed memory should be reapable (not included in the 2568*b5fca8f8Stomee * depot's working set). 2569*b5fca8f8Stomee */ 2570*b5fca8f8Stomee delta = cp->cache_full.ml_total; 25717c478bd9Sstevel@tonic-gate cp->cache_reclaim(cp->cache_private); 2572*b5fca8f8Stomee delta = cp->cache_full.ml_total - delta; 2573*b5fca8f8Stomee if (delta > 0) { 2574*b5fca8f8Stomee mutex_enter(&cp->cache_depot_lock); 2575*b5fca8f8Stomee cp->cache_full.ml_reaplimit += delta; 2576*b5fca8f8Stomee cp->cache_full.ml_min += delta; 2577*b5fca8f8Stomee mutex_exit(&cp->cache_depot_lock); 2578*b5fca8f8Stomee } 2579*b5fca8f8Stomee } 25807c478bd9Sstevel@tonic-gate 25817c478bd9Sstevel@tonic-gate kmem_depot_ws_reap(cp); 2582*b5fca8f8Stomee 2583*b5fca8f8Stomee if (cp->cache_defrag != NULL && !kmem_move_noreap) { 2584*b5fca8f8Stomee kmem_cache_defrag(cp); 2585*b5fca8f8Stomee } 25867c478bd9Sstevel@tonic-gate } 25877c478bd9Sstevel@tonic-gate 25887c478bd9Sstevel@tonic-gate static void 25897c478bd9Sstevel@tonic-gate kmem_reap_timeout(void *flag_arg) 25907c478bd9Sstevel@tonic-gate { 25917c478bd9Sstevel@tonic-gate uint32_t *flag = (uint32_t *)flag_arg; 25927c478bd9Sstevel@tonic-gate 25937c478bd9Sstevel@tonic-gate ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace); 25947c478bd9Sstevel@tonic-gate *flag = 0; 25957c478bd9Sstevel@tonic-gate } 25967c478bd9Sstevel@tonic-gate 25977c478bd9Sstevel@tonic-gate static void 25987c478bd9Sstevel@tonic-gate kmem_reap_done(void *flag) 25997c478bd9Sstevel@tonic-gate { 26007c478bd9Sstevel@tonic-gate (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval); 26017c478bd9Sstevel@tonic-gate } 26027c478bd9Sstevel@tonic-gate 26037c478bd9Sstevel@tonic-gate static void 26047c478bd9Sstevel@tonic-gate kmem_reap_start(void *flag) 26057c478bd9Sstevel@tonic-gate { 26067c478bd9Sstevel@tonic-gate ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace); 26077c478bd9Sstevel@tonic-gate 26087c478bd9Sstevel@tonic-gate if (flag == &kmem_reaping) { 26097c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP); 26107c478bd9Sstevel@tonic-gate /* 26117c478bd9Sstevel@tonic-gate * if we have segkp under heap, reap segkp cache. 26127c478bd9Sstevel@tonic-gate */ 26137c478bd9Sstevel@tonic-gate if (segkp_fromheap) 26147c478bd9Sstevel@tonic-gate segkp_cache_free(); 26157c478bd9Sstevel@tonic-gate } 26167c478bd9Sstevel@tonic-gate else 26177c478bd9Sstevel@tonic-gate kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP); 26187c478bd9Sstevel@tonic-gate 26197c478bd9Sstevel@tonic-gate /* 26207c478bd9Sstevel@tonic-gate * We use taskq_dispatch() to schedule a timeout to clear 26217c478bd9Sstevel@tonic-gate * the flag so that kmem_reap() becomes self-throttling: 26227c478bd9Sstevel@tonic-gate * we won't reap again until the current reap completes *and* 26237c478bd9Sstevel@tonic-gate * at least kmem_reap_interval ticks have elapsed. 26247c478bd9Sstevel@tonic-gate */ 26257c478bd9Sstevel@tonic-gate if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP)) 26267c478bd9Sstevel@tonic-gate kmem_reap_done(flag); 26277c478bd9Sstevel@tonic-gate } 26287c478bd9Sstevel@tonic-gate 26297c478bd9Sstevel@tonic-gate static void 26307c478bd9Sstevel@tonic-gate kmem_reap_common(void *flag_arg) 26317c478bd9Sstevel@tonic-gate { 26327c478bd9Sstevel@tonic-gate uint32_t *flag = (uint32_t *)flag_arg; 26337c478bd9Sstevel@tonic-gate 26347c478bd9Sstevel@tonic-gate if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL || 26357c478bd9Sstevel@tonic-gate cas32(flag, 0, 1) != 0) 26367c478bd9Sstevel@tonic-gate return; 26377c478bd9Sstevel@tonic-gate 26387c478bd9Sstevel@tonic-gate /* 26397c478bd9Sstevel@tonic-gate * It may not be kosher to do memory allocation when a reap is called 26407c478bd9Sstevel@tonic-gate * is called (for example, if vmem_populate() is in the call chain). 26417c478bd9Sstevel@tonic-gate * So we start the reap going with a TQ_NOALLOC dispatch. If the 26427c478bd9Sstevel@tonic-gate * dispatch fails, we reset the flag, and the next reap will try again. 26437c478bd9Sstevel@tonic-gate */ 26447c478bd9Sstevel@tonic-gate if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC)) 26457c478bd9Sstevel@tonic-gate *flag = 0; 26467c478bd9Sstevel@tonic-gate } 26477c478bd9Sstevel@tonic-gate 26487c478bd9Sstevel@tonic-gate /* 26497c478bd9Sstevel@tonic-gate * Reclaim all unused memory from all caches. Called from the VM system 26507c478bd9Sstevel@tonic-gate * when memory gets tight. 26517c478bd9Sstevel@tonic-gate */ 26527c478bd9Sstevel@tonic-gate void 26537c478bd9Sstevel@tonic-gate kmem_reap(void) 26547c478bd9Sstevel@tonic-gate { 26557c478bd9Sstevel@tonic-gate kmem_reap_common(&kmem_reaping); 26567c478bd9Sstevel@tonic-gate } 26577c478bd9Sstevel@tonic-gate 26587c478bd9Sstevel@tonic-gate /* 26597c478bd9Sstevel@tonic-gate * Reclaim all unused memory from identifier arenas, called when a vmem 26607c478bd9Sstevel@tonic-gate * arena not back by memory is exhausted. Since reaping memory-backed caches 26617c478bd9Sstevel@tonic-gate * cannot help with identifier exhaustion, we avoid both a large amount of 26627c478bd9Sstevel@tonic-gate * work and unwanted side-effects from reclaim callbacks. 26637c478bd9Sstevel@tonic-gate */ 26647c478bd9Sstevel@tonic-gate void 26657c478bd9Sstevel@tonic-gate kmem_reap_idspace(void) 26667c478bd9Sstevel@tonic-gate { 26677c478bd9Sstevel@tonic-gate kmem_reap_common(&kmem_reaping_idspace); 26687c478bd9Sstevel@tonic-gate } 26697c478bd9Sstevel@tonic-gate 26707c478bd9Sstevel@tonic-gate /* 26717c478bd9Sstevel@tonic-gate * Purge all magazines from a cache and set its magazine limit to zero. 26727c478bd9Sstevel@tonic-gate * All calls are serialized by the kmem_taskq lock, except for the final 26737c478bd9Sstevel@tonic-gate * call from kmem_cache_destroy(). 26747c478bd9Sstevel@tonic-gate */ 26757c478bd9Sstevel@tonic-gate static void 26767c478bd9Sstevel@tonic-gate kmem_cache_magazine_purge(kmem_cache_t *cp) 26777c478bd9Sstevel@tonic-gate { 26787c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp; 26797c478bd9Sstevel@tonic-gate kmem_magazine_t *mp, *pmp; 26807c478bd9Sstevel@tonic-gate int rounds, prounds, cpu_seqid; 26817c478bd9Sstevel@tonic-gate 2682*b5fca8f8Stomee ASSERT(!list_link_active(&cp->cache_link) || 2683*b5fca8f8Stomee taskq_member(kmem_taskq, curthread)); 26847c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 26857c478bd9Sstevel@tonic-gate 26867c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 26877c478bd9Sstevel@tonic-gate ccp = &cp->cache_cpu[cpu_seqid]; 26887c478bd9Sstevel@tonic-gate 26897c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock); 26907c478bd9Sstevel@tonic-gate mp = ccp->cc_loaded; 26917c478bd9Sstevel@tonic-gate pmp = ccp->cc_ploaded; 26927c478bd9Sstevel@tonic-gate rounds = ccp->cc_rounds; 26937c478bd9Sstevel@tonic-gate prounds = ccp->cc_prounds; 26947c478bd9Sstevel@tonic-gate ccp->cc_loaded = NULL; 26957c478bd9Sstevel@tonic-gate ccp->cc_ploaded = NULL; 26967c478bd9Sstevel@tonic-gate ccp->cc_rounds = -1; 26977c478bd9Sstevel@tonic-gate ccp->cc_prounds = -1; 26987c478bd9Sstevel@tonic-gate ccp->cc_magsize = 0; 26997c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock); 27007c478bd9Sstevel@tonic-gate 27017c478bd9Sstevel@tonic-gate if (mp) 27027c478bd9Sstevel@tonic-gate kmem_magazine_destroy(cp, mp, rounds); 27037c478bd9Sstevel@tonic-gate if (pmp) 27047c478bd9Sstevel@tonic-gate kmem_magazine_destroy(cp, pmp, prounds); 27057c478bd9Sstevel@tonic-gate } 27067c478bd9Sstevel@tonic-gate 27077c478bd9Sstevel@tonic-gate /* 27087c478bd9Sstevel@tonic-gate * Updating the working set statistics twice in a row has the 27097c478bd9Sstevel@tonic-gate * effect of setting the working set size to zero, so everything 27107c478bd9Sstevel@tonic-gate * is eligible for reaping. 27117c478bd9Sstevel@tonic-gate */ 27127c478bd9Sstevel@tonic-gate kmem_depot_ws_update(cp); 27137c478bd9Sstevel@tonic-gate kmem_depot_ws_update(cp); 27147c478bd9Sstevel@tonic-gate 27157c478bd9Sstevel@tonic-gate kmem_depot_ws_reap(cp); 27167c478bd9Sstevel@tonic-gate } 27177c478bd9Sstevel@tonic-gate 27187c478bd9Sstevel@tonic-gate /* 27197c478bd9Sstevel@tonic-gate * Enable per-cpu magazines on a cache. 27207c478bd9Sstevel@tonic-gate */ 27217c478bd9Sstevel@tonic-gate static void 27227c478bd9Sstevel@tonic-gate kmem_cache_magazine_enable(kmem_cache_t *cp) 27237c478bd9Sstevel@tonic-gate { 27247c478bd9Sstevel@tonic-gate int cpu_seqid; 27257c478bd9Sstevel@tonic-gate 27267c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_NOMAGAZINE) 27277c478bd9Sstevel@tonic-gate return; 27287c478bd9Sstevel@tonic-gate 27297c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 27307c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 27317c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock); 27327c478bd9Sstevel@tonic-gate ccp->cc_magsize = cp->cache_magtype->mt_magsize; 27337c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock); 27347c478bd9Sstevel@tonic-gate } 27357c478bd9Sstevel@tonic-gate 27367c478bd9Sstevel@tonic-gate } 27377c478bd9Sstevel@tonic-gate 2738fa9e4066Sahrens /* 2739fa9e4066Sahrens * Reap (almost) everything right now. See kmem_cache_magazine_purge() 2740fa9e4066Sahrens * for explanation of the back-to-back kmem_depot_ws_update() calls. 2741fa9e4066Sahrens */ 2742fa9e4066Sahrens void 2743fa9e4066Sahrens kmem_cache_reap_now(kmem_cache_t *cp) 2744fa9e4066Sahrens { 2745*b5fca8f8Stomee ASSERT(list_link_active(&cp->cache_link)); 2746*b5fca8f8Stomee 2747fa9e4066Sahrens kmem_depot_ws_update(cp); 2748fa9e4066Sahrens kmem_depot_ws_update(cp); 2749fa9e4066Sahrens 2750fa9e4066Sahrens (void) taskq_dispatch(kmem_taskq, 2751fa9e4066Sahrens (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP); 2752fa9e4066Sahrens taskq_wait(kmem_taskq); 2753fa9e4066Sahrens } 2754fa9e4066Sahrens 27557c478bd9Sstevel@tonic-gate /* 27567c478bd9Sstevel@tonic-gate * Recompute a cache's magazine size. The trade-off is that larger magazines 27577c478bd9Sstevel@tonic-gate * provide a higher transfer rate with the depot, while smaller magazines 27587c478bd9Sstevel@tonic-gate * reduce memory consumption. Magazine resizing is an expensive operation; 27597c478bd9Sstevel@tonic-gate * it should not be done frequently. 27607c478bd9Sstevel@tonic-gate * 27617c478bd9Sstevel@tonic-gate * Changes to the magazine size are serialized by the kmem_taskq lock. 27627c478bd9Sstevel@tonic-gate * 27637c478bd9Sstevel@tonic-gate * Note: at present this only grows the magazine size. It might be useful 27647c478bd9Sstevel@tonic-gate * to allow shrinkage too. 27657c478bd9Sstevel@tonic-gate */ 27667c478bd9Sstevel@tonic-gate static void 27677c478bd9Sstevel@tonic-gate kmem_cache_magazine_resize(kmem_cache_t *cp) 27687c478bd9Sstevel@tonic-gate { 27697c478bd9Sstevel@tonic-gate kmem_magtype_t *mtp = cp->cache_magtype; 27707c478bd9Sstevel@tonic-gate 27717c478bd9Sstevel@tonic-gate ASSERT(taskq_member(kmem_taskq, curthread)); 27727c478bd9Sstevel@tonic-gate 27737c478bd9Sstevel@tonic-gate if (cp->cache_chunksize < mtp->mt_maxbuf) { 27747c478bd9Sstevel@tonic-gate kmem_cache_magazine_purge(cp); 27757c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock); 27767c478bd9Sstevel@tonic-gate cp->cache_magtype = ++mtp; 27777c478bd9Sstevel@tonic-gate cp->cache_depot_contention_prev = 27787c478bd9Sstevel@tonic-gate cp->cache_depot_contention + INT_MAX; 27797c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock); 27807c478bd9Sstevel@tonic-gate kmem_cache_magazine_enable(cp); 27817c478bd9Sstevel@tonic-gate } 27827c478bd9Sstevel@tonic-gate } 27837c478bd9Sstevel@tonic-gate 27847c478bd9Sstevel@tonic-gate /* 27857c478bd9Sstevel@tonic-gate * Rescale a cache's hash table, so that the table size is roughly the 27867c478bd9Sstevel@tonic-gate * cache size. We want the average lookup time to be extremely small. 27877c478bd9Sstevel@tonic-gate */ 27887c478bd9Sstevel@tonic-gate static void 27897c478bd9Sstevel@tonic-gate kmem_hash_rescale(kmem_cache_t *cp) 27907c478bd9Sstevel@tonic-gate { 27917c478bd9Sstevel@tonic-gate kmem_bufctl_t **old_table, **new_table, *bcp; 27927c478bd9Sstevel@tonic-gate size_t old_size, new_size, h; 27937c478bd9Sstevel@tonic-gate 27947c478bd9Sstevel@tonic-gate ASSERT(taskq_member(kmem_taskq, curthread)); 27957c478bd9Sstevel@tonic-gate 27967c478bd9Sstevel@tonic-gate new_size = MAX(KMEM_HASH_INITIAL, 27977c478bd9Sstevel@tonic-gate 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); 27987c478bd9Sstevel@tonic-gate old_size = cp->cache_hash_mask + 1; 27997c478bd9Sstevel@tonic-gate 28007c478bd9Sstevel@tonic-gate if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) 28017c478bd9Sstevel@tonic-gate return; 28027c478bd9Sstevel@tonic-gate 28037c478bd9Sstevel@tonic-gate new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *), 28047c478bd9Sstevel@tonic-gate VM_NOSLEEP); 28057c478bd9Sstevel@tonic-gate if (new_table == NULL) 28067c478bd9Sstevel@tonic-gate return; 28077c478bd9Sstevel@tonic-gate bzero(new_table, new_size * sizeof (void *)); 28087c478bd9Sstevel@tonic-gate 28097c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock); 28107c478bd9Sstevel@tonic-gate 28117c478bd9Sstevel@tonic-gate old_size = cp->cache_hash_mask + 1; 28127c478bd9Sstevel@tonic-gate old_table = cp->cache_hash_table; 28137c478bd9Sstevel@tonic-gate 28147c478bd9Sstevel@tonic-gate cp->cache_hash_mask = new_size - 1; 28157c478bd9Sstevel@tonic-gate cp->cache_hash_table = new_table; 28167c478bd9Sstevel@tonic-gate cp->cache_rescale++; 28177c478bd9Sstevel@tonic-gate 28187c478bd9Sstevel@tonic-gate for (h = 0; h < old_size; h++) { 28197c478bd9Sstevel@tonic-gate bcp = old_table[h]; 28207c478bd9Sstevel@tonic-gate while (bcp != NULL) { 28217c478bd9Sstevel@tonic-gate void *addr = bcp->bc_addr; 28227c478bd9Sstevel@tonic-gate kmem_bufctl_t *next_bcp = bcp->bc_next; 28237c478bd9Sstevel@tonic-gate kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr); 28247c478bd9Sstevel@tonic-gate bcp->bc_next = *hash_bucket; 28257c478bd9Sstevel@tonic-gate *hash_bucket = bcp; 28267c478bd9Sstevel@tonic-gate bcp = next_bcp; 28277c478bd9Sstevel@tonic-gate } 28287c478bd9Sstevel@tonic-gate } 28297c478bd9Sstevel@tonic-gate 28307c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 28317c478bd9Sstevel@tonic-gate 28327c478bd9Sstevel@tonic-gate vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *)); 28337c478bd9Sstevel@tonic-gate } 28347c478bd9Sstevel@tonic-gate 28357c478bd9Sstevel@tonic-gate /* 2836*b5fca8f8Stomee * Perform periodic maintenance on a cache: hash rescaling, depot working-set 2837*b5fca8f8Stomee * update, magazine resizing, and slab consolidation. 28387c478bd9Sstevel@tonic-gate */ 28397c478bd9Sstevel@tonic-gate static void 28407c478bd9Sstevel@tonic-gate kmem_cache_update(kmem_cache_t *cp) 28417c478bd9Sstevel@tonic-gate { 28427c478bd9Sstevel@tonic-gate int need_hash_rescale = 0; 28437c478bd9Sstevel@tonic-gate int need_magazine_resize = 0; 28447c478bd9Sstevel@tonic-gate 28457c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kmem_cache_lock)); 28467c478bd9Sstevel@tonic-gate 28477c478bd9Sstevel@tonic-gate /* 28487c478bd9Sstevel@tonic-gate * If the cache has become much larger or smaller than its hash table, 28497c478bd9Sstevel@tonic-gate * fire off a request to rescale the hash table. 28507c478bd9Sstevel@tonic-gate */ 28517c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock); 28527c478bd9Sstevel@tonic-gate 28537c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && 28547c478bd9Sstevel@tonic-gate (cp->cache_buftotal > (cp->cache_hash_mask << 1) || 28557c478bd9Sstevel@tonic-gate (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && 28567c478bd9Sstevel@tonic-gate cp->cache_hash_mask > KMEM_HASH_INITIAL))) 28577c478bd9Sstevel@tonic-gate need_hash_rescale = 1; 28587c478bd9Sstevel@tonic-gate 28597c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 28607c478bd9Sstevel@tonic-gate 28617c478bd9Sstevel@tonic-gate /* 28627c478bd9Sstevel@tonic-gate * Update the depot working set statistics. 28637c478bd9Sstevel@tonic-gate */ 28647c478bd9Sstevel@tonic-gate kmem_depot_ws_update(cp); 28657c478bd9Sstevel@tonic-gate 28667c478bd9Sstevel@tonic-gate /* 28677c478bd9Sstevel@tonic-gate * If there's a lot of contention in the depot, 28687c478bd9Sstevel@tonic-gate * increase the magazine size. 28697c478bd9Sstevel@tonic-gate */ 28707c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock); 28717c478bd9Sstevel@tonic-gate 28727c478bd9Sstevel@tonic-gate if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && 28737c478bd9Sstevel@tonic-gate (int)(cp->cache_depot_contention - 28747c478bd9Sstevel@tonic-gate cp->cache_depot_contention_prev) > kmem_depot_contention) 28757c478bd9Sstevel@tonic-gate need_magazine_resize = 1; 28767c478bd9Sstevel@tonic-gate 28777c478bd9Sstevel@tonic-gate cp->cache_depot_contention_prev = cp->cache_depot_contention; 28787c478bd9Sstevel@tonic-gate 28797c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock); 28807c478bd9Sstevel@tonic-gate 28817c478bd9Sstevel@tonic-gate if (need_hash_rescale) 28827c478bd9Sstevel@tonic-gate (void) taskq_dispatch(kmem_taskq, 28837c478bd9Sstevel@tonic-gate (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP); 28847c478bd9Sstevel@tonic-gate 28857c478bd9Sstevel@tonic-gate if (need_magazine_resize) 28867c478bd9Sstevel@tonic-gate (void) taskq_dispatch(kmem_taskq, 28877c478bd9Sstevel@tonic-gate (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP); 2888*b5fca8f8Stomee 2889*b5fca8f8Stomee if (cp->cache_defrag != NULL) 2890*b5fca8f8Stomee (void) taskq_dispatch(kmem_taskq, 2891*b5fca8f8Stomee (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP); 28927c478bd9Sstevel@tonic-gate } 28937c478bd9Sstevel@tonic-gate 28947c478bd9Sstevel@tonic-gate static void 28957c478bd9Sstevel@tonic-gate kmem_update_timeout(void *dummy) 28967c478bd9Sstevel@tonic-gate { 28977c478bd9Sstevel@tonic-gate static void kmem_update(void *); 28987c478bd9Sstevel@tonic-gate 28997c478bd9Sstevel@tonic-gate (void) timeout(kmem_update, dummy, kmem_reap_interval); 29007c478bd9Sstevel@tonic-gate } 29017c478bd9Sstevel@tonic-gate 29027c478bd9Sstevel@tonic-gate static void 29037c478bd9Sstevel@tonic-gate kmem_update(void *dummy) 29047c478bd9Sstevel@tonic-gate { 29057c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP); 29067c478bd9Sstevel@tonic-gate 29077c478bd9Sstevel@tonic-gate /* 29087c478bd9Sstevel@tonic-gate * We use taskq_dispatch() to reschedule the timeout so that 29097c478bd9Sstevel@tonic-gate * kmem_update() becomes self-throttling: it won't schedule 29107c478bd9Sstevel@tonic-gate * new tasks until all previous tasks have completed. 29117c478bd9Sstevel@tonic-gate */ 29127c478bd9Sstevel@tonic-gate if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP)) 29137c478bd9Sstevel@tonic-gate kmem_update_timeout(NULL); 29147c478bd9Sstevel@tonic-gate } 29157c478bd9Sstevel@tonic-gate 29167c478bd9Sstevel@tonic-gate static int 29177c478bd9Sstevel@tonic-gate kmem_cache_kstat_update(kstat_t *ksp, int rw) 29187c478bd9Sstevel@tonic-gate { 29197c478bd9Sstevel@tonic-gate struct kmem_cache_kstat *kmcp = &kmem_cache_kstat; 29207c478bd9Sstevel@tonic-gate kmem_cache_t *cp = ksp->ks_private; 29217c478bd9Sstevel@tonic-gate uint64_t cpu_buf_avail; 29227c478bd9Sstevel@tonic-gate uint64_t buf_avail = 0; 29237c478bd9Sstevel@tonic-gate int cpu_seqid; 29247c478bd9Sstevel@tonic-gate 29257c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock)); 29267c478bd9Sstevel@tonic-gate 29277c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 29287c478bd9Sstevel@tonic-gate return (EACCES); 29297c478bd9Sstevel@tonic-gate 29307c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock); 29317c478bd9Sstevel@tonic-gate 29327c478bd9Sstevel@tonic-gate kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail; 29337c478bd9Sstevel@tonic-gate kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc; 29347c478bd9Sstevel@tonic-gate kmcp->kmc_free.value.ui64 = cp->cache_slab_free; 29357c478bd9Sstevel@tonic-gate kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc; 29367c478bd9Sstevel@tonic-gate kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free; 29377c478bd9Sstevel@tonic-gate 29387c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 29397c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 29407c478bd9Sstevel@tonic-gate 29417c478bd9Sstevel@tonic-gate mutex_enter(&ccp->cc_lock); 29427c478bd9Sstevel@tonic-gate 29437c478bd9Sstevel@tonic-gate cpu_buf_avail = 0; 29447c478bd9Sstevel@tonic-gate if (ccp->cc_rounds > 0) 29457c478bd9Sstevel@tonic-gate cpu_buf_avail += ccp->cc_rounds; 29467c478bd9Sstevel@tonic-gate if (ccp->cc_prounds > 0) 29477c478bd9Sstevel@tonic-gate cpu_buf_avail += ccp->cc_prounds; 29487c478bd9Sstevel@tonic-gate 29497c478bd9Sstevel@tonic-gate kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc; 29507c478bd9Sstevel@tonic-gate kmcp->kmc_free.value.ui64 += ccp->cc_free; 29517c478bd9Sstevel@tonic-gate buf_avail += cpu_buf_avail; 29527c478bd9Sstevel@tonic-gate 29537c478bd9Sstevel@tonic-gate mutex_exit(&ccp->cc_lock); 29547c478bd9Sstevel@tonic-gate } 29557c478bd9Sstevel@tonic-gate 29567c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_depot_lock); 29577c478bd9Sstevel@tonic-gate 29587c478bd9Sstevel@tonic-gate kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc; 29597c478bd9Sstevel@tonic-gate kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc; 29607c478bd9Sstevel@tonic-gate kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention; 29617c478bd9Sstevel@tonic-gate kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total; 29627c478bd9Sstevel@tonic-gate kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total; 29637c478bd9Sstevel@tonic-gate kmcp->kmc_magazine_size.value.ui64 = 29647c478bd9Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE) ? 29657c478bd9Sstevel@tonic-gate 0 : cp->cache_magtype->mt_magsize; 29667c478bd9Sstevel@tonic-gate 29677c478bd9Sstevel@tonic-gate kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc; 29687c478bd9Sstevel@tonic-gate kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc; 29697c478bd9Sstevel@tonic-gate buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize; 29707c478bd9Sstevel@tonic-gate 29717c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_depot_lock); 29727c478bd9Sstevel@tonic-gate 29737c478bd9Sstevel@tonic-gate kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize; 29747c478bd9Sstevel@tonic-gate kmcp->kmc_align.value.ui64 = cp->cache_align; 29757c478bd9Sstevel@tonic-gate kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize; 29767c478bd9Sstevel@tonic-gate kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize; 29777c478bd9Sstevel@tonic-gate kmcp->kmc_buf_constructed.value.ui64 = buf_avail; 29789f1b636aStomee buf_avail += cp->cache_bufslab; 29797c478bd9Sstevel@tonic-gate kmcp->kmc_buf_avail.value.ui64 = buf_avail; 29807c478bd9Sstevel@tonic-gate kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail; 29817c478bd9Sstevel@tonic-gate kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal; 29827c478bd9Sstevel@tonic-gate kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax; 29837c478bd9Sstevel@tonic-gate kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create; 29847c478bd9Sstevel@tonic-gate kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy; 29857c478bd9Sstevel@tonic-gate kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ? 29867c478bd9Sstevel@tonic-gate cp->cache_hash_mask + 1 : 0; 29877c478bd9Sstevel@tonic-gate kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth; 29887c478bd9Sstevel@tonic-gate kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale; 29897c478bd9Sstevel@tonic-gate kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id; 29907c478bd9Sstevel@tonic-gate 2991*b5fca8f8Stomee if (cp->cache_defrag == NULL) { 2992*b5fca8f8Stomee kmcp->kmc_move_callbacks.value.ui64 = 0; 2993*b5fca8f8Stomee kmcp->kmc_move_yes.value.ui64 = 0; 2994*b5fca8f8Stomee kmcp->kmc_move_no.value.ui64 = 0; 2995*b5fca8f8Stomee kmcp->kmc_move_later.value.ui64 = 0; 2996*b5fca8f8Stomee kmcp->kmc_move_dont_need.value.ui64 = 0; 2997*b5fca8f8Stomee kmcp->kmc_move_dont_know.value.ui64 = 0; 2998*b5fca8f8Stomee kmcp->kmc_move_hunt_found.value.ui64 = 0; 2999*b5fca8f8Stomee } else { 3000*b5fca8f8Stomee kmem_defrag_t *kd = cp->cache_defrag; 3001*b5fca8f8Stomee kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks; 3002*b5fca8f8Stomee kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes; 3003*b5fca8f8Stomee kmcp->kmc_move_no.value.ui64 = kd->kmd_no; 3004*b5fca8f8Stomee kmcp->kmc_move_later.value.ui64 = kd->kmd_later; 3005*b5fca8f8Stomee kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need; 3006*b5fca8f8Stomee kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know; 3007*b5fca8f8Stomee kmcp->kmc_move_hunt_found.value.ui64 = kd->kmd_hunt_found; 3008*b5fca8f8Stomee } 3009*b5fca8f8Stomee 30107c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 30117c478bd9Sstevel@tonic-gate return (0); 30127c478bd9Sstevel@tonic-gate } 30137c478bd9Sstevel@tonic-gate 30147c478bd9Sstevel@tonic-gate /* 30157c478bd9Sstevel@tonic-gate * Return a named statistic about a particular cache. 30167c478bd9Sstevel@tonic-gate * This shouldn't be called very often, so it's currently designed for 30177c478bd9Sstevel@tonic-gate * simplicity (leverages existing kstat support) rather than efficiency. 30187c478bd9Sstevel@tonic-gate */ 30197c478bd9Sstevel@tonic-gate uint64_t 30207c478bd9Sstevel@tonic-gate kmem_cache_stat(kmem_cache_t *cp, char *name) 30217c478bd9Sstevel@tonic-gate { 30227c478bd9Sstevel@tonic-gate int i; 30237c478bd9Sstevel@tonic-gate kstat_t *ksp = cp->cache_kstat; 30247c478bd9Sstevel@tonic-gate kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat; 30257c478bd9Sstevel@tonic-gate uint64_t value = 0; 30267c478bd9Sstevel@tonic-gate 30277c478bd9Sstevel@tonic-gate if (ksp != NULL) { 30287c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_kstat_lock); 30297c478bd9Sstevel@tonic-gate (void) kmem_cache_kstat_update(ksp, KSTAT_READ); 30307c478bd9Sstevel@tonic-gate for (i = 0; i < ksp->ks_ndata; i++) { 30317c478bd9Sstevel@tonic-gate if (strcmp(knp[i].name, name) == 0) { 30327c478bd9Sstevel@tonic-gate value = knp[i].value.ui64; 30337c478bd9Sstevel@tonic-gate break; 30347c478bd9Sstevel@tonic-gate } 30357c478bd9Sstevel@tonic-gate } 30367c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_kstat_lock); 30377c478bd9Sstevel@tonic-gate } 30387c478bd9Sstevel@tonic-gate return (value); 30397c478bd9Sstevel@tonic-gate } 30407c478bd9Sstevel@tonic-gate 30417c478bd9Sstevel@tonic-gate /* 30427c478bd9Sstevel@tonic-gate * Return an estimate of currently available kernel heap memory. 30437c478bd9Sstevel@tonic-gate * On 32-bit systems, physical memory may exceed virtual memory, 30447c478bd9Sstevel@tonic-gate * we just truncate the result at 1GB. 30457c478bd9Sstevel@tonic-gate */ 30467c478bd9Sstevel@tonic-gate size_t 30477c478bd9Sstevel@tonic-gate kmem_avail(void) 30487c478bd9Sstevel@tonic-gate { 30497c478bd9Sstevel@tonic-gate spgcnt_t rmem = availrmem - tune.t_minarmem; 30507c478bd9Sstevel@tonic-gate spgcnt_t fmem = freemem - minfree; 30517c478bd9Sstevel@tonic-gate 30527c478bd9Sstevel@tonic-gate return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0), 30537c478bd9Sstevel@tonic-gate 1 << (30 - PAGESHIFT)))); 30547c478bd9Sstevel@tonic-gate } 30557c478bd9Sstevel@tonic-gate 30567c478bd9Sstevel@tonic-gate /* 30577c478bd9Sstevel@tonic-gate * Return the maximum amount of memory that is (in theory) allocatable 30587c478bd9Sstevel@tonic-gate * from the heap. This may be used as an estimate only since there 30597c478bd9Sstevel@tonic-gate * is no guarentee this space will still be available when an allocation 30607c478bd9Sstevel@tonic-gate * request is made, nor that the space may be allocated in one big request 30617c478bd9Sstevel@tonic-gate * due to kernel heap fragmentation. 30627c478bd9Sstevel@tonic-gate */ 30637c478bd9Sstevel@tonic-gate size_t 30647c478bd9Sstevel@tonic-gate kmem_maxavail(void) 30657c478bd9Sstevel@tonic-gate { 30667c478bd9Sstevel@tonic-gate spgcnt_t pmem = availrmem - tune.t_minarmem; 30677c478bd9Sstevel@tonic-gate spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE)); 30687c478bd9Sstevel@tonic-gate 30697c478bd9Sstevel@tonic-gate return ((size_t)ptob(MAX(MIN(pmem, vmem), 0))); 30707c478bd9Sstevel@tonic-gate } 30717c478bd9Sstevel@tonic-gate 3072fa9e4066Sahrens /* 3073fa9e4066Sahrens * Indicate whether memory-intensive kmem debugging is enabled. 3074fa9e4066Sahrens */ 3075fa9e4066Sahrens int 3076fa9e4066Sahrens kmem_debugging(void) 3077fa9e4066Sahrens { 3078fa9e4066Sahrens return (kmem_flags & (KMF_AUDIT | KMF_REDZONE)); 3079fa9e4066Sahrens } 3080fa9e4066Sahrens 3081*b5fca8f8Stomee /* binning function, sorts finely at the two extremes */ 3082*b5fca8f8Stomee #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \ 3083*b5fca8f8Stomee ((((sp)->slab_refcnt <= (binshift)) || \ 3084*b5fca8f8Stomee (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \ 3085*b5fca8f8Stomee ? -(sp)->slab_refcnt \ 3086*b5fca8f8Stomee : -((binshift) + ((sp)->slab_refcnt >> (binshift)))) 3087*b5fca8f8Stomee 3088*b5fca8f8Stomee /* 3089*b5fca8f8Stomee * Minimizing the number of partial slabs on the freelist minimizes 3090*b5fca8f8Stomee * fragmentation (the ratio of unused buffers held by the slab layer). There are 3091*b5fca8f8Stomee * two ways to get a slab off of the freelist: 1) free all the buffers on the 3092*b5fca8f8Stomee * slab, and 2) allocate all the buffers on the slab. It follows that we want 3093*b5fca8f8Stomee * the most-used slabs at the front of the list where they have the best chance 3094*b5fca8f8Stomee * of being completely allocated, and the least-used slabs at a safe distance 3095*b5fca8f8Stomee * from the front to improve the odds that the few remaining buffers will all be 3096*b5fca8f8Stomee * freed before another allocation can tie up the slab. For that reason a slab 3097*b5fca8f8Stomee * with a higher slab_refcnt sorts less than than a slab with a lower 3098*b5fca8f8Stomee * slab_refcnt. 3099*b5fca8f8Stomee * 3100*b5fca8f8Stomee * However, if a slab has at least one buffer that is deemed unfreeable, we 3101*b5fca8f8Stomee * would rather have that slab at the front of the list regardless of 3102*b5fca8f8Stomee * slab_refcnt, since even one unfreeable buffer makes the entire slab 3103*b5fca8f8Stomee * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move() 3104*b5fca8f8Stomee * callback, the slab is marked unfreeable for as long as it remains on the 3105*b5fca8f8Stomee * freelist. 3106*b5fca8f8Stomee */ 3107*b5fca8f8Stomee static int 3108*b5fca8f8Stomee kmem_partial_slab_cmp(const void *p0, const void *p1) 3109*b5fca8f8Stomee { 3110*b5fca8f8Stomee const kmem_cache_t *cp; 3111*b5fca8f8Stomee const kmem_slab_t *s0 = p0; 3112*b5fca8f8Stomee const kmem_slab_t *s1 = p1; 3113*b5fca8f8Stomee int w0, w1; 3114*b5fca8f8Stomee size_t binshift; 3115*b5fca8f8Stomee 3116*b5fca8f8Stomee ASSERT(KMEM_SLAB_IS_PARTIAL(s0)); 3117*b5fca8f8Stomee ASSERT(KMEM_SLAB_IS_PARTIAL(s1)); 3118*b5fca8f8Stomee ASSERT(s0->slab_cache == s1->slab_cache); 3119*b5fca8f8Stomee cp = s1->slab_cache; 3120*b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock)); 3121*b5fca8f8Stomee binshift = cp->cache_partial_binshift; 3122*b5fca8f8Stomee 3123*b5fca8f8Stomee /* weight of first slab */ 3124*b5fca8f8Stomee w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift); 3125*b5fca8f8Stomee if (s0->slab_flags & KMEM_SLAB_NOMOVE) { 3126*b5fca8f8Stomee w0 -= cp->cache_maxchunks; 3127*b5fca8f8Stomee } 3128*b5fca8f8Stomee 3129*b5fca8f8Stomee /* weight of second slab */ 3130*b5fca8f8Stomee w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift); 3131*b5fca8f8Stomee if (s1->slab_flags & KMEM_SLAB_NOMOVE) { 3132*b5fca8f8Stomee w1 -= cp->cache_maxchunks; 3133*b5fca8f8Stomee } 3134*b5fca8f8Stomee 3135*b5fca8f8Stomee if (w0 < w1) 3136*b5fca8f8Stomee return (-1); 3137*b5fca8f8Stomee if (w0 > w1) 3138*b5fca8f8Stomee return (1); 3139*b5fca8f8Stomee 3140*b5fca8f8Stomee /* compare pointer values */ 3141*b5fca8f8Stomee if ((uintptr_t)s0 < (uintptr_t)s1) 3142*b5fca8f8Stomee return (-1); 3143*b5fca8f8Stomee if ((uintptr_t)s0 > (uintptr_t)s1) 3144*b5fca8f8Stomee return (1); 3145*b5fca8f8Stomee 3146*b5fca8f8Stomee return (0); 3147*b5fca8f8Stomee } 3148*b5fca8f8Stomee 3149*b5fca8f8Stomee static void 3150*b5fca8f8Stomee kmem_check_destructor(kmem_cache_t *cp) 3151*b5fca8f8Stomee { 3152*b5fca8f8Stomee if (cp->cache_destructor == NULL) 3153*b5fca8f8Stomee return; 3154*b5fca8f8Stomee 3155*b5fca8f8Stomee /* 3156*b5fca8f8Stomee * Assert that it is valid to call the destructor on a newly constructed 3157*b5fca8f8Stomee * object without any intervening client code using the object. 3158*b5fca8f8Stomee * Allocate from the slab layer to ensure that the client has not 3159*b5fca8f8Stomee * touched the buffer. 3160*b5fca8f8Stomee */ 3161*b5fca8f8Stomee void *buf = kmem_slab_alloc(cp, KM_NOSLEEP); 3162*b5fca8f8Stomee if (buf == NULL) 3163*b5fca8f8Stomee return; 3164*b5fca8f8Stomee 3165*b5fca8f8Stomee if (cp->cache_flags & KMF_BUFTAG) { 3166*b5fca8f8Stomee if (kmem_cache_alloc_debug(cp, buf, KM_NOSLEEP, 1, 3167*b5fca8f8Stomee caller()) != 0) 3168*b5fca8f8Stomee return; 3169*b5fca8f8Stomee } else if (cp->cache_constructor != NULL && 3170*b5fca8f8Stomee cp->cache_constructor(buf, cp->cache_private, KM_NOSLEEP) != 0) { 3171*b5fca8f8Stomee atomic_add_64(&cp->cache_alloc_fail, 1); 3172*b5fca8f8Stomee kmem_slab_free(cp, buf); 3173*b5fca8f8Stomee return; 3174*b5fca8f8Stomee } 3175*b5fca8f8Stomee 3176*b5fca8f8Stomee kmem_slab_free_constructed(cp, buf, B_FALSE); 3177*b5fca8f8Stomee } 3178*b5fca8f8Stomee 3179*b5fca8f8Stomee /* 3180*b5fca8f8Stomee * It must be valid to call the destructor (if any) on a newly created object. 3181*b5fca8f8Stomee * That is, the constructor (if any) must leave the object in a valid state for 3182*b5fca8f8Stomee * the destructor. 3183*b5fca8f8Stomee */ 31847c478bd9Sstevel@tonic-gate kmem_cache_t * 31857c478bd9Sstevel@tonic-gate kmem_cache_create( 31867c478bd9Sstevel@tonic-gate char *name, /* descriptive name for this cache */ 31877c478bd9Sstevel@tonic-gate size_t bufsize, /* size of the objects it manages */ 31887c478bd9Sstevel@tonic-gate size_t align, /* required object alignment */ 31897c478bd9Sstevel@tonic-gate int (*constructor)(void *, void *, int), /* object constructor */ 31907c478bd9Sstevel@tonic-gate void (*destructor)(void *, void *), /* object destructor */ 31917c478bd9Sstevel@tonic-gate void (*reclaim)(void *), /* memory reclaim callback */ 31927c478bd9Sstevel@tonic-gate void *private, /* pass-thru arg for constr/destr/reclaim */ 31937c478bd9Sstevel@tonic-gate vmem_t *vmp, /* vmem source for slab allocation */ 31947c478bd9Sstevel@tonic-gate int cflags) /* cache creation flags */ 31957c478bd9Sstevel@tonic-gate { 31967c478bd9Sstevel@tonic-gate int cpu_seqid; 31977c478bd9Sstevel@tonic-gate size_t chunksize; 3198*b5fca8f8Stomee kmem_cache_t *cp; 31997c478bd9Sstevel@tonic-gate kmem_magtype_t *mtp; 32007c478bd9Sstevel@tonic-gate size_t csize = KMEM_CACHE_SIZE(max_ncpus); 32017c478bd9Sstevel@tonic-gate 32027c478bd9Sstevel@tonic-gate #ifdef DEBUG 32037c478bd9Sstevel@tonic-gate /* 32047c478bd9Sstevel@tonic-gate * Cache names should conform to the rules for valid C identifiers 32057c478bd9Sstevel@tonic-gate */ 32067c478bd9Sstevel@tonic-gate if (!strident_valid(name)) { 32077c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, 32087c478bd9Sstevel@tonic-gate "kmem_cache_create: '%s' is an invalid cache name\n" 32097c478bd9Sstevel@tonic-gate "cache names must conform to the rules for " 32107c478bd9Sstevel@tonic-gate "C identifiers\n", name); 32117c478bd9Sstevel@tonic-gate } 32127c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 32137c478bd9Sstevel@tonic-gate 32147c478bd9Sstevel@tonic-gate if (vmp == NULL) 32157c478bd9Sstevel@tonic-gate vmp = kmem_default_arena; 32167c478bd9Sstevel@tonic-gate 32177c478bd9Sstevel@tonic-gate /* 32187c478bd9Sstevel@tonic-gate * If this kmem cache has an identifier vmem arena as its source, mark 32197c478bd9Sstevel@tonic-gate * it such to allow kmem_reap_idspace(). 32207c478bd9Sstevel@tonic-gate */ 32217c478bd9Sstevel@tonic-gate ASSERT(!(cflags & KMC_IDENTIFIER)); /* consumer should not set this */ 32227c478bd9Sstevel@tonic-gate if (vmp->vm_cflags & VMC_IDENTIFIER) 32237c478bd9Sstevel@tonic-gate cflags |= KMC_IDENTIFIER; 32247c478bd9Sstevel@tonic-gate 32257c478bd9Sstevel@tonic-gate /* 32267c478bd9Sstevel@tonic-gate * Get a kmem_cache structure. We arrange that cp->cache_cpu[] 32277c478bd9Sstevel@tonic-gate * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent 32287c478bd9Sstevel@tonic-gate * false sharing of per-CPU data. 32297c478bd9Sstevel@tonic-gate */ 32307c478bd9Sstevel@tonic-gate cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE, 32317c478bd9Sstevel@tonic-gate P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP); 32327c478bd9Sstevel@tonic-gate bzero(cp, csize); 3233*b5fca8f8Stomee list_link_init(&cp->cache_link); 32347c478bd9Sstevel@tonic-gate 32357c478bd9Sstevel@tonic-gate if (align == 0) 32367c478bd9Sstevel@tonic-gate align = KMEM_ALIGN; 32377c478bd9Sstevel@tonic-gate 32387c478bd9Sstevel@tonic-gate /* 32397c478bd9Sstevel@tonic-gate * If we're not at least KMEM_ALIGN aligned, we can't use free 32407c478bd9Sstevel@tonic-gate * memory to hold bufctl information (because we can't safely 32417c478bd9Sstevel@tonic-gate * perform word loads and stores on it). 32427c478bd9Sstevel@tonic-gate */ 32437c478bd9Sstevel@tonic-gate if (align < KMEM_ALIGN) 32447c478bd9Sstevel@tonic-gate cflags |= KMC_NOTOUCH; 32457c478bd9Sstevel@tonic-gate 32467c478bd9Sstevel@tonic-gate if ((align & (align - 1)) != 0 || align > vmp->vm_quantum) 32477c478bd9Sstevel@tonic-gate panic("kmem_cache_create: bad alignment %lu", align); 32487c478bd9Sstevel@tonic-gate 32497c478bd9Sstevel@tonic-gate mutex_enter(&kmem_flags_lock); 32507c478bd9Sstevel@tonic-gate if (kmem_flags & KMF_RANDOMIZE) 32517c478bd9Sstevel@tonic-gate kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) | 32527c478bd9Sstevel@tonic-gate KMF_RANDOMIZE; 32537c478bd9Sstevel@tonic-gate cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG; 32547c478bd9Sstevel@tonic-gate mutex_exit(&kmem_flags_lock); 32557c478bd9Sstevel@tonic-gate 32567c478bd9Sstevel@tonic-gate /* 32577c478bd9Sstevel@tonic-gate * Make sure all the various flags are reasonable. 32587c478bd9Sstevel@tonic-gate */ 32597c478bd9Sstevel@tonic-gate ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH)); 32607c478bd9Sstevel@tonic-gate 32617c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) { 32627c478bd9Sstevel@tonic-gate if (bufsize >= kmem_lite_minsize && 32637c478bd9Sstevel@tonic-gate align <= kmem_lite_maxalign && 32647c478bd9Sstevel@tonic-gate P2PHASE(bufsize, kmem_lite_maxalign) != 0) { 32657c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_BUFTAG; 32667c478bd9Sstevel@tonic-gate cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); 32677c478bd9Sstevel@tonic-gate } else { 32687c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_DEBUG; 32697c478bd9Sstevel@tonic-gate } 32707c478bd9Sstevel@tonic-gate } 32717c478bd9Sstevel@tonic-gate 32727c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) 32737c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_REDZONE; 32747c478bd9Sstevel@tonic-gate 32757c478bd9Sstevel@tonic-gate if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT)) 32767c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_NOMAGAZINE; 32777c478bd9Sstevel@tonic-gate 32787c478bd9Sstevel@tonic-gate if (cflags & KMC_NODEBUG) 32797c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_DEBUG; 32807c478bd9Sstevel@tonic-gate 32817c478bd9Sstevel@tonic-gate if (cflags & KMC_NOTOUCH) 32827c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_TOUCH; 32837c478bd9Sstevel@tonic-gate 32847c478bd9Sstevel@tonic-gate if (cflags & KMC_NOHASH) 32857c478bd9Sstevel@tonic-gate cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); 32867c478bd9Sstevel@tonic-gate 32877c478bd9Sstevel@tonic-gate if (cflags & KMC_NOMAGAZINE) 32887c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_NOMAGAZINE; 32897c478bd9Sstevel@tonic-gate 32907c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH)) 32917c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_REDZONE; 32927c478bd9Sstevel@tonic-gate 32937c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT)) 32947c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_CONTENTS; 32957c478bd9Sstevel@tonic-gate 32967c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall && 32977c478bd9Sstevel@tonic-gate !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH)) 32987c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_FIREWALL; 32997c478bd9Sstevel@tonic-gate 33007c478bd9Sstevel@tonic-gate if (vmp != kmem_default_arena || kmem_firewall_arena == NULL) 33017c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_FIREWALL; 33027c478bd9Sstevel@tonic-gate 33037c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_FIREWALL) { 33047c478bd9Sstevel@tonic-gate cp->cache_flags &= ~KMF_BUFTAG; 33057c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_NOMAGAZINE; 33067c478bd9Sstevel@tonic-gate ASSERT(vmp == kmem_default_arena); 33077c478bd9Sstevel@tonic-gate vmp = kmem_firewall_arena; 33087c478bd9Sstevel@tonic-gate } 33097c478bd9Sstevel@tonic-gate 33107c478bd9Sstevel@tonic-gate /* 33117c478bd9Sstevel@tonic-gate * Set cache properties. 33127c478bd9Sstevel@tonic-gate */ 33137c478bd9Sstevel@tonic-gate (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN); 3314*b5fca8f8Stomee strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1); 33157c478bd9Sstevel@tonic-gate cp->cache_bufsize = bufsize; 33167c478bd9Sstevel@tonic-gate cp->cache_align = align; 33177c478bd9Sstevel@tonic-gate cp->cache_constructor = constructor; 33187c478bd9Sstevel@tonic-gate cp->cache_destructor = destructor; 33197c478bd9Sstevel@tonic-gate cp->cache_reclaim = reclaim; 33207c478bd9Sstevel@tonic-gate cp->cache_private = private; 33217c478bd9Sstevel@tonic-gate cp->cache_arena = vmp; 33227c478bd9Sstevel@tonic-gate cp->cache_cflags = cflags; 33237c478bd9Sstevel@tonic-gate 33247c478bd9Sstevel@tonic-gate /* 33257c478bd9Sstevel@tonic-gate * Determine the chunk size. 33267c478bd9Sstevel@tonic-gate */ 33277c478bd9Sstevel@tonic-gate chunksize = bufsize; 33287c478bd9Sstevel@tonic-gate 33297c478bd9Sstevel@tonic-gate if (align >= KMEM_ALIGN) { 33307c478bd9Sstevel@tonic-gate chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN); 33317c478bd9Sstevel@tonic-gate cp->cache_bufctl = chunksize - KMEM_ALIGN; 33327c478bd9Sstevel@tonic-gate } 33337c478bd9Sstevel@tonic-gate 33347c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) { 33357c478bd9Sstevel@tonic-gate cp->cache_bufctl = chunksize; 33367c478bd9Sstevel@tonic-gate cp->cache_buftag = chunksize; 33377c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) 33387c478bd9Sstevel@tonic-gate chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count); 33397c478bd9Sstevel@tonic-gate else 33407c478bd9Sstevel@tonic-gate chunksize += sizeof (kmem_buftag_t); 33417c478bd9Sstevel@tonic-gate } 33427c478bd9Sstevel@tonic-gate 33437c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_DEADBEEF) { 33447c478bd9Sstevel@tonic-gate cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify); 33457c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_LITE) 33467c478bd9Sstevel@tonic-gate cp->cache_verify = sizeof (uint64_t); 33477c478bd9Sstevel@tonic-gate } 33487c478bd9Sstevel@tonic-gate 33497c478bd9Sstevel@tonic-gate cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave); 33507c478bd9Sstevel@tonic-gate 33517c478bd9Sstevel@tonic-gate cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); 33527c478bd9Sstevel@tonic-gate 33537c478bd9Sstevel@tonic-gate /* 33547c478bd9Sstevel@tonic-gate * Now that we know the chunk size, determine the optimal slab size. 33557c478bd9Sstevel@tonic-gate */ 33567c478bd9Sstevel@tonic-gate if (vmp == kmem_firewall_arena) { 33577c478bd9Sstevel@tonic-gate cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); 33587c478bd9Sstevel@tonic-gate cp->cache_mincolor = cp->cache_slabsize - chunksize; 33597c478bd9Sstevel@tonic-gate cp->cache_maxcolor = cp->cache_mincolor; 33607c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_HASH; 33617c478bd9Sstevel@tonic-gate ASSERT(!(cp->cache_flags & KMF_BUFTAG)); 33627c478bd9Sstevel@tonic-gate } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) && 33637c478bd9Sstevel@tonic-gate !(cp->cache_flags & KMF_AUDIT) && 33647c478bd9Sstevel@tonic-gate chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) { 33657c478bd9Sstevel@tonic-gate cp->cache_slabsize = vmp->vm_quantum; 33667c478bd9Sstevel@tonic-gate cp->cache_mincolor = 0; 33677c478bd9Sstevel@tonic-gate cp->cache_maxcolor = 33687c478bd9Sstevel@tonic-gate (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize; 33697c478bd9Sstevel@tonic-gate ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize); 33707c478bd9Sstevel@tonic-gate ASSERT(!(cp->cache_flags & KMF_AUDIT)); 33717c478bd9Sstevel@tonic-gate } else { 33727c478bd9Sstevel@tonic-gate size_t chunks, bestfit, waste, slabsize; 33737c478bd9Sstevel@tonic-gate size_t minwaste = LONG_MAX; 33747c478bd9Sstevel@tonic-gate 33757c478bd9Sstevel@tonic-gate for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) { 33767c478bd9Sstevel@tonic-gate slabsize = P2ROUNDUP(chunksize * chunks, 33777c478bd9Sstevel@tonic-gate vmp->vm_quantum); 33787c478bd9Sstevel@tonic-gate chunks = slabsize / chunksize; 33797c478bd9Sstevel@tonic-gate waste = (slabsize % chunksize) / chunks; 33807c478bd9Sstevel@tonic-gate if (waste < minwaste) { 33817c478bd9Sstevel@tonic-gate minwaste = waste; 33827c478bd9Sstevel@tonic-gate bestfit = slabsize; 33837c478bd9Sstevel@tonic-gate } 33847c478bd9Sstevel@tonic-gate } 33857c478bd9Sstevel@tonic-gate if (cflags & KMC_QCACHE) 33867c478bd9Sstevel@tonic-gate bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max); 33877c478bd9Sstevel@tonic-gate cp->cache_slabsize = bestfit; 33887c478bd9Sstevel@tonic-gate cp->cache_mincolor = 0; 33897c478bd9Sstevel@tonic-gate cp->cache_maxcolor = bestfit % chunksize; 33907c478bd9Sstevel@tonic-gate cp->cache_flags |= KMF_HASH; 33917c478bd9Sstevel@tonic-gate } 33927c478bd9Sstevel@tonic-gate 3393*b5fca8f8Stomee cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize); 3394*b5fca8f8Stomee cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1; 3395*b5fca8f8Stomee 33967c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 33977c478bd9Sstevel@tonic-gate ASSERT(!(cflags & KMC_NOHASH)); 33987c478bd9Sstevel@tonic-gate cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ? 33997c478bd9Sstevel@tonic-gate kmem_bufctl_audit_cache : kmem_bufctl_cache; 34007c478bd9Sstevel@tonic-gate } 34017c478bd9Sstevel@tonic-gate 34027c478bd9Sstevel@tonic-gate if (cp->cache_maxcolor >= vmp->vm_quantum) 34037c478bd9Sstevel@tonic-gate cp->cache_maxcolor = vmp->vm_quantum - 1; 34047c478bd9Sstevel@tonic-gate 34057c478bd9Sstevel@tonic-gate cp->cache_color = cp->cache_mincolor; 34067c478bd9Sstevel@tonic-gate 34077c478bd9Sstevel@tonic-gate /* 34087c478bd9Sstevel@tonic-gate * Initialize the rest of the slab layer. 34097c478bd9Sstevel@tonic-gate */ 34107c478bd9Sstevel@tonic-gate mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL); 34117c478bd9Sstevel@tonic-gate 3412*b5fca8f8Stomee avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp, 3413*b5fca8f8Stomee sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link)); 3414*b5fca8f8Stomee /* LINTED: E_TRUE_LOGICAL_EXPR */ 3415*b5fca8f8Stomee ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t)); 3416*b5fca8f8Stomee /* reuse partial slab AVL linkage for complete slab list linkage */ 3417*b5fca8f8Stomee list_create(&cp->cache_complete_slabs, 3418*b5fca8f8Stomee sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link)); 34197c478bd9Sstevel@tonic-gate 34207c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) { 34217c478bd9Sstevel@tonic-gate cp->cache_hash_table = vmem_alloc(kmem_hash_arena, 34227c478bd9Sstevel@tonic-gate KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP); 34237c478bd9Sstevel@tonic-gate bzero(cp->cache_hash_table, 34247c478bd9Sstevel@tonic-gate KMEM_HASH_INITIAL * sizeof (void *)); 34257c478bd9Sstevel@tonic-gate cp->cache_hash_mask = KMEM_HASH_INITIAL - 1; 34267c478bd9Sstevel@tonic-gate cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; 34277c478bd9Sstevel@tonic-gate } 34287c478bd9Sstevel@tonic-gate 34297c478bd9Sstevel@tonic-gate /* 34307c478bd9Sstevel@tonic-gate * Initialize the depot. 34317c478bd9Sstevel@tonic-gate */ 34327c478bd9Sstevel@tonic-gate mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL); 34337c478bd9Sstevel@tonic-gate 34347c478bd9Sstevel@tonic-gate for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++) 34357c478bd9Sstevel@tonic-gate continue; 34367c478bd9Sstevel@tonic-gate 34377c478bd9Sstevel@tonic-gate cp->cache_magtype = mtp; 34387c478bd9Sstevel@tonic-gate 34397c478bd9Sstevel@tonic-gate /* 34407c478bd9Sstevel@tonic-gate * Initialize the CPU layer. 34417c478bd9Sstevel@tonic-gate */ 34427c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 34437c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 34447c478bd9Sstevel@tonic-gate mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL); 34457c478bd9Sstevel@tonic-gate ccp->cc_flags = cp->cache_flags; 34467c478bd9Sstevel@tonic-gate ccp->cc_rounds = -1; 34477c478bd9Sstevel@tonic-gate ccp->cc_prounds = -1; 34487c478bd9Sstevel@tonic-gate } 34497c478bd9Sstevel@tonic-gate 34507c478bd9Sstevel@tonic-gate /* 34517c478bd9Sstevel@tonic-gate * Create the cache's kstats. 34527c478bd9Sstevel@tonic-gate */ 34537c478bd9Sstevel@tonic-gate if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name, 34547c478bd9Sstevel@tonic-gate "kmem_cache", KSTAT_TYPE_NAMED, 34557c478bd9Sstevel@tonic-gate sizeof (kmem_cache_kstat) / sizeof (kstat_named_t), 34567c478bd9Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL)) != NULL) { 34577c478bd9Sstevel@tonic-gate cp->cache_kstat->ks_data = &kmem_cache_kstat; 34587c478bd9Sstevel@tonic-gate cp->cache_kstat->ks_update = kmem_cache_kstat_update; 34597c478bd9Sstevel@tonic-gate cp->cache_kstat->ks_private = cp; 34607c478bd9Sstevel@tonic-gate cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock; 34617c478bd9Sstevel@tonic-gate kstat_install(cp->cache_kstat); 34627c478bd9Sstevel@tonic-gate } 34637c478bd9Sstevel@tonic-gate 34647c478bd9Sstevel@tonic-gate /* 34657c478bd9Sstevel@tonic-gate * Add the cache to the global list. This makes it visible 34667c478bd9Sstevel@tonic-gate * to kmem_update(), so the cache must be ready for business. 34677c478bd9Sstevel@tonic-gate */ 34687c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_lock); 3469*b5fca8f8Stomee list_insert_tail(&kmem_caches, cp); 34707c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_lock); 34717c478bd9Sstevel@tonic-gate 34727c478bd9Sstevel@tonic-gate if (kmem_ready) 34737c478bd9Sstevel@tonic-gate kmem_cache_magazine_enable(cp); 34747c478bd9Sstevel@tonic-gate 3475*b5fca8f8Stomee if (kmem_move_taskq != NULL && cp->cache_destructor != NULL) { 3476*b5fca8f8Stomee (void) taskq_dispatch(kmem_move_taskq, 3477*b5fca8f8Stomee (task_func_t *)kmem_check_destructor, cp, 3478*b5fca8f8Stomee TQ_NOSLEEP); 3479*b5fca8f8Stomee } 3480*b5fca8f8Stomee 34817c478bd9Sstevel@tonic-gate return (cp); 34827c478bd9Sstevel@tonic-gate } 34837c478bd9Sstevel@tonic-gate 3484*b5fca8f8Stomee static int 3485*b5fca8f8Stomee kmem_move_cmp(const void *buf, const void *p) 3486*b5fca8f8Stomee { 3487*b5fca8f8Stomee const kmem_move_t *kmm = p; 3488*b5fca8f8Stomee uintptr_t v1 = (uintptr_t)buf; 3489*b5fca8f8Stomee uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf; 3490*b5fca8f8Stomee return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0)); 3491*b5fca8f8Stomee } 3492*b5fca8f8Stomee 3493*b5fca8f8Stomee static void 3494*b5fca8f8Stomee kmem_reset_reclaim_threshold(kmem_defrag_t *kmd) 3495*b5fca8f8Stomee { 3496*b5fca8f8Stomee kmd->kmd_reclaim_numer = 1; 3497*b5fca8f8Stomee } 3498*b5fca8f8Stomee 3499*b5fca8f8Stomee /* 3500*b5fca8f8Stomee * Initially, when choosing candidate slabs for buffers to move, we want to be 3501*b5fca8f8Stomee * very selective and take only slabs that are less than 3502*b5fca8f8Stomee * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate 3503*b5fca8f8Stomee * slabs, then we raise the allocation ceiling incrementally. The reclaim 3504*b5fca8f8Stomee * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no 3505*b5fca8f8Stomee * longer fragmented. 3506*b5fca8f8Stomee */ 3507*b5fca8f8Stomee static void 3508*b5fca8f8Stomee kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction) 3509*b5fca8f8Stomee { 3510*b5fca8f8Stomee if (direction > 0) { 3511*b5fca8f8Stomee /* make it easier to find a candidate slab */ 3512*b5fca8f8Stomee if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) { 3513*b5fca8f8Stomee kmd->kmd_reclaim_numer++; 3514*b5fca8f8Stomee } 3515*b5fca8f8Stomee } else { 3516*b5fca8f8Stomee /* be more selective */ 3517*b5fca8f8Stomee if (kmd->kmd_reclaim_numer > 1) { 3518*b5fca8f8Stomee kmd->kmd_reclaim_numer--; 3519*b5fca8f8Stomee } 3520*b5fca8f8Stomee } 3521*b5fca8f8Stomee } 3522*b5fca8f8Stomee 3523*b5fca8f8Stomee void 3524*b5fca8f8Stomee kmem_cache_set_move(kmem_cache_t *cp, 3525*b5fca8f8Stomee kmem_cbrc_t (*move)(void *, void *, size_t, void *)) 3526*b5fca8f8Stomee { 3527*b5fca8f8Stomee kmem_defrag_t *defrag; 3528*b5fca8f8Stomee 3529*b5fca8f8Stomee ASSERT(move != NULL); 3530*b5fca8f8Stomee /* 3531*b5fca8f8Stomee * The consolidator does not support NOTOUCH caches because kmem cannot 3532*b5fca8f8Stomee * initialize their slabs with the 0xbaddcafe memory pattern, which sets 3533*b5fca8f8Stomee * a low order bit usable by clients to distinguish uninitialized memory 3534*b5fca8f8Stomee * from known objects (see kmem_slab_create). 3535*b5fca8f8Stomee */ 3536*b5fca8f8Stomee ASSERT(!(cp->cache_cflags & KMC_NOTOUCH)); 3537*b5fca8f8Stomee ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER)); 3538*b5fca8f8Stomee 3539*b5fca8f8Stomee /* 3540*b5fca8f8Stomee * We should not be holding anyone's cache lock when calling 3541*b5fca8f8Stomee * kmem_cache_alloc(), so allocate in all cases before acquiring the 3542*b5fca8f8Stomee * lock. 3543*b5fca8f8Stomee */ 3544*b5fca8f8Stomee defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP); 3545*b5fca8f8Stomee 3546*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 3547*b5fca8f8Stomee 3548*b5fca8f8Stomee if (KMEM_IS_MOVABLE(cp)) { 3549*b5fca8f8Stomee if (cp->cache_move == NULL) { 3550*b5fca8f8Stomee /* 3551*b5fca8f8Stomee * The client must not have allocated any objects from 3552*b5fca8f8Stomee * this cache before setting a move callback function. 3553*b5fca8f8Stomee */ 3554*b5fca8f8Stomee ASSERT(cp->cache_bufmax == 0); 3555*b5fca8f8Stomee 3556*b5fca8f8Stomee cp->cache_defrag = defrag; 3557*b5fca8f8Stomee defrag = NULL; /* nothing to free */ 3558*b5fca8f8Stomee bzero(cp->cache_defrag, sizeof (kmem_defrag_t)); 3559*b5fca8f8Stomee avl_create(&cp->cache_defrag->kmd_moves_pending, 3560*b5fca8f8Stomee kmem_move_cmp, sizeof (kmem_move_t), 3561*b5fca8f8Stomee offsetof(kmem_move_t, kmm_entry)); 3562*b5fca8f8Stomee /* LINTED: E_TRUE_LOGICAL_EXPR */ 3563*b5fca8f8Stomee ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t)); 3564*b5fca8f8Stomee /* reuse the slab's AVL linkage for deadlist linkage */ 3565*b5fca8f8Stomee list_create(&cp->cache_defrag->kmd_deadlist, 3566*b5fca8f8Stomee sizeof (kmem_slab_t), 3567*b5fca8f8Stomee offsetof(kmem_slab_t, slab_link)); 3568*b5fca8f8Stomee kmem_reset_reclaim_threshold(cp->cache_defrag); 3569*b5fca8f8Stomee } 3570*b5fca8f8Stomee cp->cache_move = move; 3571*b5fca8f8Stomee } 3572*b5fca8f8Stomee 3573*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 3574*b5fca8f8Stomee 3575*b5fca8f8Stomee if (defrag != NULL) { 3576*b5fca8f8Stomee kmem_cache_free(kmem_defrag_cache, defrag); /* unused */ 3577*b5fca8f8Stomee } 3578*b5fca8f8Stomee } 3579*b5fca8f8Stomee 35807c478bd9Sstevel@tonic-gate void 35817c478bd9Sstevel@tonic-gate kmem_cache_destroy(kmem_cache_t *cp) 35827c478bd9Sstevel@tonic-gate { 35837c478bd9Sstevel@tonic-gate int cpu_seqid; 35847c478bd9Sstevel@tonic-gate 35857c478bd9Sstevel@tonic-gate /* 35867c478bd9Sstevel@tonic-gate * Remove the cache from the global cache list so that no one else 35877c478bd9Sstevel@tonic-gate * can schedule tasks on its behalf, wait for any pending tasks to 35887c478bd9Sstevel@tonic-gate * complete, purge the cache, and then destroy it. 35897c478bd9Sstevel@tonic-gate */ 35907c478bd9Sstevel@tonic-gate mutex_enter(&kmem_cache_lock); 3591*b5fca8f8Stomee list_remove(&kmem_caches, cp); 35927c478bd9Sstevel@tonic-gate mutex_exit(&kmem_cache_lock); 35937c478bd9Sstevel@tonic-gate 35947c478bd9Sstevel@tonic-gate if (kmem_taskq != NULL) 35957c478bd9Sstevel@tonic-gate taskq_wait(kmem_taskq); 3596*b5fca8f8Stomee if (kmem_move_taskq != NULL) 3597*b5fca8f8Stomee taskq_wait(kmem_move_taskq); 35987c478bd9Sstevel@tonic-gate 35997c478bd9Sstevel@tonic-gate kmem_cache_magazine_purge(cp); 36007c478bd9Sstevel@tonic-gate 36017c478bd9Sstevel@tonic-gate mutex_enter(&cp->cache_lock); 36027c478bd9Sstevel@tonic-gate if (cp->cache_buftotal != 0) 36037c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty", 36047c478bd9Sstevel@tonic-gate cp->cache_name, (void *)cp); 3605*b5fca8f8Stomee if (cp->cache_defrag != NULL) { 3606*b5fca8f8Stomee avl_destroy(&cp->cache_defrag->kmd_moves_pending); 3607*b5fca8f8Stomee list_destroy(&cp->cache_defrag->kmd_deadlist); 3608*b5fca8f8Stomee kmem_cache_free(kmem_defrag_cache, cp->cache_defrag); 3609*b5fca8f8Stomee cp->cache_defrag = NULL; 3610*b5fca8f8Stomee } 36117c478bd9Sstevel@tonic-gate /* 3612*b5fca8f8Stomee * The cache is now dead. There should be no further activity. We 3613*b5fca8f8Stomee * enforce this by setting land mines in the constructor, destructor, 3614*b5fca8f8Stomee * reclaim, and move routines that induce a kernel text fault if 3615*b5fca8f8Stomee * invoked. 36167c478bd9Sstevel@tonic-gate */ 36177c478bd9Sstevel@tonic-gate cp->cache_constructor = (int (*)(void *, void *, int))1; 36187c478bd9Sstevel@tonic-gate cp->cache_destructor = (void (*)(void *, void *))2; 3619*b5fca8f8Stomee cp->cache_reclaim = (void (*)(void *))3; 3620*b5fca8f8Stomee cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4; 36217c478bd9Sstevel@tonic-gate mutex_exit(&cp->cache_lock); 36227c478bd9Sstevel@tonic-gate 36237c478bd9Sstevel@tonic-gate kstat_delete(cp->cache_kstat); 36247c478bd9Sstevel@tonic-gate 36257c478bd9Sstevel@tonic-gate if (cp->cache_hash_table != NULL) 36267c478bd9Sstevel@tonic-gate vmem_free(kmem_hash_arena, cp->cache_hash_table, 36277c478bd9Sstevel@tonic-gate (cp->cache_hash_mask + 1) * sizeof (void *)); 36287c478bd9Sstevel@tonic-gate 36297c478bd9Sstevel@tonic-gate for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) 36307c478bd9Sstevel@tonic-gate mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); 36317c478bd9Sstevel@tonic-gate 36327c478bd9Sstevel@tonic-gate mutex_destroy(&cp->cache_depot_lock); 36337c478bd9Sstevel@tonic-gate mutex_destroy(&cp->cache_lock); 36347c478bd9Sstevel@tonic-gate 36357c478bd9Sstevel@tonic-gate vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus)); 36367c478bd9Sstevel@tonic-gate } 36377c478bd9Sstevel@tonic-gate 36387c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 36397c478bd9Sstevel@tonic-gate static int 36407c478bd9Sstevel@tonic-gate kmem_cpu_setup(cpu_setup_t what, int id, void *arg) 36417c478bd9Sstevel@tonic-gate { 36427c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 36437c478bd9Sstevel@tonic-gate if (what == CPU_UNCONFIG) { 36447c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_magazine_purge, 36457c478bd9Sstevel@tonic-gate kmem_taskq, TQ_SLEEP); 36467c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_magazine_enable, 36477c478bd9Sstevel@tonic-gate kmem_taskq, TQ_SLEEP); 36487c478bd9Sstevel@tonic-gate } 36497c478bd9Sstevel@tonic-gate return (0); 36507c478bd9Sstevel@tonic-gate } 36517c478bd9Sstevel@tonic-gate 36527c478bd9Sstevel@tonic-gate static void 36537c478bd9Sstevel@tonic-gate kmem_cache_init(int pass, int use_large_pages) 36547c478bd9Sstevel@tonic-gate { 36557c478bd9Sstevel@tonic-gate int i; 36567c478bd9Sstevel@tonic-gate size_t size; 36577c478bd9Sstevel@tonic-gate kmem_cache_t *cp; 36587c478bd9Sstevel@tonic-gate kmem_magtype_t *mtp; 36597c478bd9Sstevel@tonic-gate char name[KMEM_CACHE_NAMELEN + 1]; 36607c478bd9Sstevel@tonic-gate 36617c478bd9Sstevel@tonic-gate for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) { 36627c478bd9Sstevel@tonic-gate mtp = &kmem_magtype[i]; 36637c478bd9Sstevel@tonic-gate (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize); 36647c478bd9Sstevel@tonic-gate mtp->mt_cache = kmem_cache_create(name, 36657c478bd9Sstevel@tonic-gate (mtp->mt_magsize + 1) * sizeof (void *), 36667c478bd9Sstevel@tonic-gate mtp->mt_align, NULL, NULL, NULL, NULL, 36677c478bd9Sstevel@tonic-gate kmem_msb_arena, KMC_NOHASH); 36687c478bd9Sstevel@tonic-gate } 36697c478bd9Sstevel@tonic-gate 36707c478bd9Sstevel@tonic-gate kmem_slab_cache = kmem_cache_create("kmem_slab_cache", 36717c478bd9Sstevel@tonic-gate sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL, 36727c478bd9Sstevel@tonic-gate kmem_msb_arena, KMC_NOHASH); 36737c478bd9Sstevel@tonic-gate 36747c478bd9Sstevel@tonic-gate kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache", 36757c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL, 36767c478bd9Sstevel@tonic-gate kmem_msb_arena, KMC_NOHASH); 36777c478bd9Sstevel@tonic-gate 36787c478bd9Sstevel@tonic-gate kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache", 36797c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL, 36807c478bd9Sstevel@tonic-gate kmem_msb_arena, KMC_NOHASH); 36817c478bd9Sstevel@tonic-gate 36827c478bd9Sstevel@tonic-gate if (pass == 2) { 36837c478bd9Sstevel@tonic-gate kmem_va_arena = vmem_create("kmem_va", 36847c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE, 36857c478bd9Sstevel@tonic-gate vmem_alloc, vmem_free, heap_arena, 36867c478bd9Sstevel@tonic-gate 8 * PAGESIZE, VM_SLEEP); 36877c478bd9Sstevel@tonic-gate 36887c478bd9Sstevel@tonic-gate if (use_large_pages) { 36897c478bd9Sstevel@tonic-gate kmem_default_arena = vmem_xcreate("kmem_default", 36907c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE, 36917c478bd9Sstevel@tonic-gate segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena, 36927c478bd9Sstevel@tonic-gate 0, VM_SLEEP); 36937c478bd9Sstevel@tonic-gate } else { 36947c478bd9Sstevel@tonic-gate kmem_default_arena = vmem_create("kmem_default", 36957c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE, 36967c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_va_arena, 36977c478bd9Sstevel@tonic-gate 0, VM_SLEEP); 36987c478bd9Sstevel@tonic-gate } 36997c478bd9Sstevel@tonic-gate } else { 37007c478bd9Sstevel@tonic-gate /* 37017c478bd9Sstevel@tonic-gate * During the first pass, the kmem_alloc_* caches 37027c478bd9Sstevel@tonic-gate * are treated as metadata. 37037c478bd9Sstevel@tonic-gate */ 37047c478bd9Sstevel@tonic-gate kmem_default_arena = kmem_msb_arena; 37057c478bd9Sstevel@tonic-gate } 37067c478bd9Sstevel@tonic-gate 37077c478bd9Sstevel@tonic-gate /* 37087c478bd9Sstevel@tonic-gate * Set up the default caches to back kmem_alloc() 37097c478bd9Sstevel@tonic-gate */ 37107c478bd9Sstevel@tonic-gate size = KMEM_ALIGN; 37117c478bd9Sstevel@tonic-gate for (i = 0; i < sizeof (kmem_alloc_sizes) / sizeof (int); i++) { 37127c478bd9Sstevel@tonic-gate size_t align = KMEM_ALIGN; 37137c478bd9Sstevel@tonic-gate size_t cache_size = kmem_alloc_sizes[i]; 37147c478bd9Sstevel@tonic-gate /* 37157c478bd9Sstevel@tonic-gate * If they allocate a multiple of the coherency granularity, 37167c478bd9Sstevel@tonic-gate * they get a coherency-granularity-aligned address. 37177c478bd9Sstevel@tonic-gate */ 37187c478bd9Sstevel@tonic-gate if (IS_P2ALIGNED(cache_size, 64)) 37197c478bd9Sstevel@tonic-gate align = 64; 37207c478bd9Sstevel@tonic-gate if (IS_P2ALIGNED(cache_size, PAGESIZE)) 37217c478bd9Sstevel@tonic-gate align = PAGESIZE; 37227c478bd9Sstevel@tonic-gate (void) sprintf(name, "kmem_alloc_%lu", cache_size); 37237c478bd9Sstevel@tonic-gate cp = kmem_cache_create(name, cache_size, align, 37247c478bd9Sstevel@tonic-gate NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC); 37257c478bd9Sstevel@tonic-gate while (size <= cache_size) { 37267c478bd9Sstevel@tonic-gate kmem_alloc_table[(size - 1) >> KMEM_ALIGN_SHIFT] = cp; 37277c478bd9Sstevel@tonic-gate size += KMEM_ALIGN; 37287c478bd9Sstevel@tonic-gate } 37297c478bd9Sstevel@tonic-gate } 37307c478bd9Sstevel@tonic-gate } 37317c478bd9Sstevel@tonic-gate 37327c478bd9Sstevel@tonic-gate void 37337c478bd9Sstevel@tonic-gate kmem_init(void) 37347c478bd9Sstevel@tonic-gate { 37357c478bd9Sstevel@tonic-gate kmem_cache_t *cp; 37367c478bd9Sstevel@tonic-gate int old_kmem_flags = kmem_flags; 37377c478bd9Sstevel@tonic-gate int use_large_pages = 0; 37387c478bd9Sstevel@tonic-gate size_t maxverify, minfirewall; 37397c478bd9Sstevel@tonic-gate 37407c478bd9Sstevel@tonic-gate kstat_init(); 37417c478bd9Sstevel@tonic-gate 37427c478bd9Sstevel@tonic-gate /* 37437c478bd9Sstevel@tonic-gate * Small-memory systems (< 24 MB) can't handle kmem_flags overhead. 37447c478bd9Sstevel@tonic-gate */ 37457c478bd9Sstevel@tonic-gate if (physmem < btop(24 << 20) && !(old_kmem_flags & KMF_STICKY)) 37467c478bd9Sstevel@tonic-gate kmem_flags = 0; 37477c478bd9Sstevel@tonic-gate 37487c478bd9Sstevel@tonic-gate /* 37497c478bd9Sstevel@tonic-gate * Don't do firewalled allocations if the heap is less than 1TB 37507c478bd9Sstevel@tonic-gate * (i.e. on a 32-bit kernel) 37517c478bd9Sstevel@tonic-gate * The resulting VM_NEXTFIT allocations would create too much 37527c478bd9Sstevel@tonic-gate * fragmentation in a small heap. 37537c478bd9Sstevel@tonic-gate */ 37547c478bd9Sstevel@tonic-gate #if defined(_LP64) 37557c478bd9Sstevel@tonic-gate maxverify = minfirewall = PAGESIZE / 2; 37567c478bd9Sstevel@tonic-gate #else 37577c478bd9Sstevel@tonic-gate maxverify = minfirewall = ULONG_MAX; 37587c478bd9Sstevel@tonic-gate #endif 37597c478bd9Sstevel@tonic-gate 37607c478bd9Sstevel@tonic-gate /* LINTED */ 37617c478bd9Sstevel@tonic-gate ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE); 37627c478bd9Sstevel@tonic-gate 3763*b5fca8f8Stomee list_create(&kmem_caches, sizeof (kmem_cache_t), 3764*b5fca8f8Stomee offsetof(kmem_cache_t, cache_link)); 37657c478bd9Sstevel@tonic-gate 37667c478bd9Sstevel@tonic-gate kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE, 37677c478bd9Sstevel@tonic-gate vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE, 37687c478bd9Sstevel@tonic-gate VM_SLEEP | VMC_NO_QCACHE); 37697c478bd9Sstevel@tonic-gate 37707c478bd9Sstevel@tonic-gate kmem_msb_arena = vmem_create("kmem_msb", NULL, 0, 37717c478bd9Sstevel@tonic-gate PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, 37727c478bd9Sstevel@tonic-gate VM_SLEEP); 37737c478bd9Sstevel@tonic-gate 37747c478bd9Sstevel@tonic-gate kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN, 37757c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP); 37767c478bd9Sstevel@tonic-gate 37777c478bd9Sstevel@tonic-gate kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN, 37787c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP); 37797c478bd9Sstevel@tonic-gate 37807c478bd9Sstevel@tonic-gate kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN, 37817c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP); 37827c478bd9Sstevel@tonic-gate 37837c478bd9Sstevel@tonic-gate kmem_firewall_va_arena = vmem_create("kmem_firewall_va", 37847c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE, 37857c478bd9Sstevel@tonic-gate kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena, 37867c478bd9Sstevel@tonic-gate 0, VM_SLEEP); 37877c478bd9Sstevel@tonic-gate 37887c478bd9Sstevel@tonic-gate kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE, 37897c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0, VM_SLEEP); 37907c478bd9Sstevel@tonic-gate 37917c478bd9Sstevel@tonic-gate /* temporary oversize arena for mod_read_system_file */ 37927c478bd9Sstevel@tonic-gate kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE, 37937c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP); 37947c478bd9Sstevel@tonic-gate 37957c478bd9Sstevel@tonic-gate kmem_reap_interval = 15 * hz; 37967c478bd9Sstevel@tonic-gate 37977c478bd9Sstevel@tonic-gate /* 37987c478bd9Sstevel@tonic-gate * Read /etc/system. This is a chicken-and-egg problem because 37997c478bd9Sstevel@tonic-gate * kmem_flags may be set in /etc/system, but mod_read_system_file() 38007c478bd9Sstevel@tonic-gate * needs to use the allocator. The simplest solution is to create 38017c478bd9Sstevel@tonic-gate * all the standard kmem caches, read /etc/system, destroy all the 38027c478bd9Sstevel@tonic-gate * caches we just created, and then create them all again in light 38037c478bd9Sstevel@tonic-gate * of the (possibly) new kmem_flags and other kmem tunables. 38047c478bd9Sstevel@tonic-gate */ 38057c478bd9Sstevel@tonic-gate kmem_cache_init(1, 0); 38067c478bd9Sstevel@tonic-gate 38077c478bd9Sstevel@tonic-gate mod_read_system_file(boothowto & RB_ASKNAME); 38087c478bd9Sstevel@tonic-gate 3809*b5fca8f8Stomee while ((cp = list_tail(&kmem_caches)) != NULL) 38107c478bd9Sstevel@tonic-gate kmem_cache_destroy(cp); 38117c478bd9Sstevel@tonic-gate 38127c478bd9Sstevel@tonic-gate vmem_destroy(kmem_oversize_arena); 38137c478bd9Sstevel@tonic-gate 38147c478bd9Sstevel@tonic-gate if (old_kmem_flags & KMF_STICKY) 38157c478bd9Sstevel@tonic-gate kmem_flags = old_kmem_flags; 38167c478bd9Sstevel@tonic-gate 38177c478bd9Sstevel@tonic-gate if (!(kmem_flags & KMF_AUDIT)) 38187c478bd9Sstevel@tonic-gate vmem_seg_size = offsetof(vmem_seg_t, vs_thread); 38197c478bd9Sstevel@tonic-gate 38207c478bd9Sstevel@tonic-gate if (kmem_maxverify == 0) 38217c478bd9Sstevel@tonic-gate kmem_maxverify = maxverify; 38227c478bd9Sstevel@tonic-gate 38237c478bd9Sstevel@tonic-gate if (kmem_minfirewall == 0) 38247c478bd9Sstevel@tonic-gate kmem_minfirewall = minfirewall; 38257c478bd9Sstevel@tonic-gate 38267c478bd9Sstevel@tonic-gate /* 38277c478bd9Sstevel@tonic-gate * give segkmem a chance to figure out if we are using large pages 38287c478bd9Sstevel@tonic-gate * for the kernel heap 38297c478bd9Sstevel@tonic-gate */ 38307c478bd9Sstevel@tonic-gate use_large_pages = segkmem_lpsetup(); 38317c478bd9Sstevel@tonic-gate 38327c478bd9Sstevel@tonic-gate /* 38337c478bd9Sstevel@tonic-gate * To protect against corruption, we keep the actual number of callers 38347c478bd9Sstevel@tonic-gate * KMF_LITE records seperate from the tunable. We arbitrarily clamp 38357c478bd9Sstevel@tonic-gate * to 16, since the overhead for small buffers quickly gets out of 38367c478bd9Sstevel@tonic-gate * hand. 38377c478bd9Sstevel@tonic-gate * 38387c478bd9Sstevel@tonic-gate * The real limit would depend on the needs of the largest KMC_NOHASH 38397c478bd9Sstevel@tonic-gate * cache. 38407c478bd9Sstevel@tonic-gate */ 38417c478bd9Sstevel@tonic-gate kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16); 38427c478bd9Sstevel@tonic-gate kmem_lite_pcs = kmem_lite_count; 38437c478bd9Sstevel@tonic-gate 38447c478bd9Sstevel@tonic-gate /* 38457c478bd9Sstevel@tonic-gate * Normally, we firewall oversized allocations when possible, but 38467c478bd9Sstevel@tonic-gate * if we are using large pages for kernel memory, and we don't have 38477c478bd9Sstevel@tonic-gate * any non-LITE debugging flags set, we want to allocate oversized 38487c478bd9Sstevel@tonic-gate * buffers from large pages, and so skip the firewalling. 38497c478bd9Sstevel@tonic-gate */ 38507c478bd9Sstevel@tonic-gate if (use_large_pages && 38517c478bd9Sstevel@tonic-gate ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) { 38527c478bd9Sstevel@tonic-gate kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0, 38537c478bd9Sstevel@tonic-gate PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena, 38547c478bd9Sstevel@tonic-gate 0, VM_SLEEP); 38557c478bd9Sstevel@tonic-gate } else { 38567c478bd9Sstevel@tonic-gate kmem_oversize_arena = vmem_create("kmem_oversize", 38577c478bd9Sstevel@tonic-gate NULL, 0, PAGESIZE, 38587c478bd9Sstevel@tonic-gate segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX? 38597c478bd9Sstevel@tonic-gate kmem_firewall_va_arena : heap_arena, 0, VM_SLEEP); 38607c478bd9Sstevel@tonic-gate } 38617c478bd9Sstevel@tonic-gate 38627c478bd9Sstevel@tonic-gate kmem_cache_init(2, use_large_pages); 38637c478bd9Sstevel@tonic-gate 38647c478bd9Sstevel@tonic-gate if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) { 38657c478bd9Sstevel@tonic-gate if (kmem_transaction_log_size == 0) 38667c478bd9Sstevel@tonic-gate kmem_transaction_log_size = kmem_maxavail() / 50; 38677c478bd9Sstevel@tonic-gate kmem_transaction_log = kmem_log_init(kmem_transaction_log_size); 38687c478bd9Sstevel@tonic-gate } 38697c478bd9Sstevel@tonic-gate 38707c478bd9Sstevel@tonic-gate if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) { 38717c478bd9Sstevel@tonic-gate if (kmem_content_log_size == 0) 38727c478bd9Sstevel@tonic-gate kmem_content_log_size = kmem_maxavail() / 50; 38737c478bd9Sstevel@tonic-gate kmem_content_log = kmem_log_init(kmem_content_log_size); 38747c478bd9Sstevel@tonic-gate } 38757c478bd9Sstevel@tonic-gate 38767c478bd9Sstevel@tonic-gate kmem_failure_log = kmem_log_init(kmem_failure_log_size); 38777c478bd9Sstevel@tonic-gate 38787c478bd9Sstevel@tonic-gate kmem_slab_log = kmem_log_init(kmem_slab_log_size); 38797c478bd9Sstevel@tonic-gate 38807c478bd9Sstevel@tonic-gate /* 38817c478bd9Sstevel@tonic-gate * Initialize STREAMS message caches so allocb() is available. 38827c478bd9Sstevel@tonic-gate * This allows us to initialize the logging framework (cmn_err(9F), 38837c478bd9Sstevel@tonic-gate * strlog(9F), etc) so we can start recording messages. 38847c478bd9Sstevel@tonic-gate */ 38857c478bd9Sstevel@tonic-gate streams_msg_init(); 38867d692464Sdp 38877c478bd9Sstevel@tonic-gate /* 38887c478bd9Sstevel@tonic-gate * Initialize the ZSD framework in Zones so modules loaded henceforth 38897c478bd9Sstevel@tonic-gate * can register their callbacks. 38907c478bd9Sstevel@tonic-gate */ 38917c478bd9Sstevel@tonic-gate zone_zsd_init(); 3892f4b3ec61Sdh 38937c478bd9Sstevel@tonic-gate log_init(); 38947c478bd9Sstevel@tonic-gate taskq_init(); 38957c478bd9Sstevel@tonic-gate 38967d692464Sdp /* 38977d692464Sdp * Warn about invalid or dangerous values of kmem_flags. 38987d692464Sdp * Always warn about unsupported values. 38997d692464Sdp */ 39007d692464Sdp if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | 39017d692464Sdp KMF_CONTENTS | KMF_LITE)) != 0) || 39027d692464Sdp ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE)) 39037d692464Sdp cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. " 39047d692464Sdp "See the Solaris Tunable Parameters Reference Manual.", 39057d692464Sdp kmem_flags); 39067d692464Sdp 39077d692464Sdp #ifdef DEBUG 39087d692464Sdp if ((kmem_flags & KMF_DEBUG) == 0) 39097d692464Sdp cmn_err(CE_NOTE, "kmem debugging disabled."); 39107d692464Sdp #else 39117d692464Sdp /* 39127d692464Sdp * For non-debug kernels, the only "normal" flags are 0, KMF_LITE, 39137d692464Sdp * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled 39147d692464Sdp * if KMF_AUDIT is set). We should warn the user about the performance 39157d692464Sdp * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE 39167d692464Sdp * isn't set (since that disables AUDIT). 39177d692464Sdp */ 39187d692464Sdp if (!(kmem_flags & KMF_LITE) && 39197d692464Sdp (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0) 39207d692464Sdp cmn_err(CE_WARN, "High-overhead kmem debugging features " 39217d692464Sdp "enabled (kmem_flags = 0x%x). Performance degradation " 39227d692464Sdp "and large memory overhead possible. See the Solaris " 39237d692464Sdp "Tunable Parameters Reference Manual.", kmem_flags); 39247d692464Sdp #endif /* not DEBUG */ 39257d692464Sdp 39267c478bd9Sstevel@tonic-gate kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP); 39277c478bd9Sstevel@tonic-gate 39287c478bd9Sstevel@tonic-gate kmem_ready = 1; 39297c478bd9Sstevel@tonic-gate 39307c478bd9Sstevel@tonic-gate /* 39317c478bd9Sstevel@tonic-gate * Initialize the platform-specific aligned/DMA memory allocator. 39327c478bd9Sstevel@tonic-gate */ 39337c478bd9Sstevel@tonic-gate ka_init(); 39347c478bd9Sstevel@tonic-gate 39357c478bd9Sstevel@tonic-gate /* 39367c478bd9Sstevel@tonic-gate * Initialize 32-bit ID cache. 39377c478bd9Sstevel@tonic-gate */ 39387c478bd9Sstevel@tonic-gate id32_init(); 3939f4b3ec61Sdh 3940f4b3ec61Sdh /* 3941f4b3ec61Sdh * Initialize the networking stack so modules loaded can 3942f4b3ec61Sdh * register their callbacks. 3943f4b3ec61Sdh */ 3944f4b3ec61Sdh netstack_init(); 39457c478bd9Sstevel@tonic-gate } 39467c478bd9Sstevel@tonic-gate 3947*b5fca8f8Stomee static void 3948*b5fca8f8Stomee kmem_move_init(void) 3949*b5fca8f8Stomee { 3950*b5fca8f8Stomee kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache", 3951*b5fca8f8Stomee sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL, 3952*b5fca8f8Stomee kmem_msb_arena, KMC_NOHASH); 3953*b5fca8f8Stomee kmem_move_cache = kmem_cache_create("kmem_move_cache", 3954*b5fca8f8Stomee sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL, 3955*b5fca8f8Stomee kmem_msb_arena, KMC_NOHASH); 3956*b5fca8f8Stomee 3957*b5fca8f8Stomee /* 3958*b5fca8f8Stomee * kmem guarantees that move callbacks are sequential and that even 3959*b5fca8f8Stomee * across multiple caches no two moves ever execute simultaneously. 3960*b5fca8f8Stomee * Move callbacks are processed on a separate taskq so that client code 3961*b5fca8f8Stomee * does not interfere with internal maintenance tasks. 3962*b5fca8f8Stomee */ 3963*b5fca8f8Stomee kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1, 3964*b5fca8f8Stomee minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE); 3965*b5fca8f8Stomee } 3966*b5fca8f8Stomee 39677c478bd9Sstevel@tonic-gate void 39687c478bd9Sstevel@tonic-gate kmem_thread_init(void) 39697c478bd9Sstevel@tonic-gate { 3970*b5fca8f8Stomee kmem_move_init(); 39717c478bd9Sstevel@tonic-gate kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri, 39727c478bd9Sstevel@tonic-gate 300, INT_MAX, TASKQ_PREPOPULATE); 3973*b5fca8f8Stomee kmem_cache_applyall(kmem_check_destructor, kmem_move_taskq, 3974*b5fca8f8Stomee TQ_NOSLEEP); 39757c478bd9Sstevel@tonic-gate } 39767c478bd9Sstevel@tonic-gate 39777c478bd9Sstevel@tonic-gate void 39787c478bd9Sstevel@tonic-gate kmem_mp_init(void) 39797c478bd9Sstevel@tonic-gate { 39807c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 39817c478bd9Sstevel@tonic-gate register_cpu_setup_func(kmem_cpu_setup, NULL); 39827c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 39837c478bd9Sstevel@tonic-gate 39847c478bd9Sstevel@tonic-gate kmem_update_timeout(NULL); 39857c478bd9Sstevel@tonic-gate } 3986*b5fca8f8Stomee 3987*b5fca8f8Stomee /* 3988*b5fca8f8Stomee * Return the slab of the allocated buffer, or NULL if the buffer is not 3989*b5fca8f8Stomee * allocated. This function may be called with a known slab address to determine 3990*b5fca8f8Stomee * whether or not the buffer is allocated, or with a NULL slab address to obtain 3991*b5fca8f8Stomee * an allocated buffer's slab. 3992*b5fca8f8Stomee */ 3993*b5fca8f8Stomee static kmem_slab_t * 3994*b5fca8f8Stomee kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf) 3995*b5fca8f8Stomee { 3996*b5fca8f8Stomee kmem_bufctl_t *bcp, *bufbcp; 3997*b5fca8f8Stomee 3998*b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock)); 3999*b5fca8f8Stomee ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf)); 4000*b5fca8f8Stomee 4001*b5fca8f8Stomee if (cp->cache_flags & KMF_HASH) { 4002*b5fca8f8Stomee for (bcp = *KMEM_HASH(cp, buf); 4003*b5fca8f8Stomee (bcp != NULL) && (bcp->bc_addr != buf); 4004*b5fca8f8Stomee bcp = bcp->bc_next) { 4005*b5fca8f8Stomee continue; 4006*b5fca8f8Stomee } 4007*b5fca8f8Stomee ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1); 4008*b5fca8f8Stomee return (bcp == NULL ? NULL : bcp->bc_slab); 4009*b5fca8f8Stomee } 4010*b5fca8f8Stomee 4011*b5fca8f8Stomee if (sp == NULL) { 4012*b5fca8f8Stomee sp = KMEM_SLAB(cp, buf); 4013*b5fca8f8Stomee } 4014*b5fca8f8Stomee bufbcp = KMEM_BUFCTL(cp, buf); 4015*b5fca8f8Stomee for (bcp = sp->slab_head; 4016*b5fca8f8Stomee (bcp != NULL) && (bcp != bufbcp); 4017*b5fca8f8Stomee bcp = bcp->bc_next) { 4018*b5fca8f8Stomee continue; 4019*b5fca8f8Stomee } 4020*b5fca8f8Stomee return (bcp == NULL ? sp : NULL); 4021*b5fca8f8Stomee } 4022*b5fca8f8Stomee 4023*b5fca8f8Stomee static boolean_t 4024*b5fca8f8Stomee kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags) 4025*b5fca8f8Stomee { 4026*b5fca8f8Stomee long refcnt; 4027*b5fca8f8Stomee 4028*b5fca8f8Stomee ASSERT(cp->cache_defrag != NULL); 4029*b5fca8f8Stomee 4030*b5fca8f8Stomee /* If we're desperate, we don't care if the client said NO. */ 4031*b5fca8f8Stomee refcnt = sp->slab_refcnt; 4032*b5fca8f8Stomee if (flags & KMM_DESPERATE) { 4033*b5fca8f8Stomee return (refcnt < sp->slab_chunks); /* any partial */ 4034*b5fca8f8Stomee } 4035*b5fca8f8Stomee 4036*b5fca8f8Stomee if (sp->slab_flags & KMEM_SLAB_NOMOVE) { 4037*b5fca8f8Stomee return (B_FALSE); 4038*b5fca8f8Stomee } 4039*b5fca8f8Stomee 4040*b5fca8f8Stomee if (kmem_move_any_partial) { 4041*b5fca8f8Stomee return (refcnt < sp->slab_chunks); 4042*b5fca8f8Stomee } 4043*b5fca8f8Stomee 4044*b5fca8f8Stomee if ((refcnt == 1) && (refcnt < sp->slab_chunks)) { 4045*b5fca8f8Stomee return (B_TRUE); 4046*b5fca8f8Stomee } 4047*b5fca8f8Stomee 4048*b5fca8f8Stomee /* 4049*b5fca8f8Stomee * The reclaim threshold is adjusted at each kmem_cache_scan() so that 4050*b5fca8f8Stomee * slabs with a progressively higher percentage of used buffers can be 4051*b5fca8f8Stomee * reclaimed until the cache as a whole is no longer fragmented. 4052*b5fca8f8Stomee * 4053*b5fca8f8Stomee * sp->slab_refcnt kmd_reclaim_numer 4054*b5fca8f8Stomee * --------------- < ------------------ 4055*b5fca8f8Stomee * sp->slab_chunks KMEM_VOID_FRACTION 4056*b5fca8f8Stomee */ 4057*b5fca8f8Stomee return ((refcnt * KMEM_VOID_FRACTION) < 4058*b5fca8f8Stomee (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer)); 4059*b5fca8f8Stomee } 4060*b5fca8f8Stomee 4061*b5fca8f8Stomee static void * 4062*b5fca8f8Stomee kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf, 4063*b5fca8f8Stomee void *tbuf) 4064*b5fca8f8Stomee { 4065*b5fca8f8Stomee int i; /* magazine round index */ 4066*b5fca8f8Stomee 4067*b5fca8f8Stomee for (i = 0; i < n; i++) { 4068*b5fca8f8Stomee if (buf == m->mag_round[i]) { 4069*b5fca8f8Stomee if (cp->cache_flags & KMF_BUFTAG) { 4070*b5fca8f8Stomee (void) kmem_cache_free_debug(cp, tbuf, 4071*b5fca8f8Stomee caller()); 4072*b5fca8f8Stomee } 4073*b5fca8f8Stomee m->mag_round[i] = tbuf; 4074*b5fca8f8Stomee return (buf); 4075*b5fca8f8Stomee } 4076*b5fca8f8Stomee } 4077*b5fca8f8Stomee 4078*b5fca8f8Stomee return (NULL); 4079*b5fca8f8Stomee } 4080*b5fca8f8Stomee 4081*b5fca8f8Stomee /* 4082*b5fca8f8Stomee * Hunt the magazine layer for the given buffer. If found, the buffer is 4083*b5fca8f8Stomee * removed from the magazine layer and returned, otherwise NULL is returned. 4084*b5fca8f8Stomee * The state of the returned buffer is freed and constructed. 4085*b5fca8f8Stomee */ 4086*b5fca8f8Stomee static void * 4087*b5fca8f8Stomee kmem_hunt_mags(kmem_cache_t *cp, void *buf) 4088*b5fca8f8Stomee { 4089*b5fca8f8Stomee kmem_cpu_cache_t *ccp; 4090*b5fca8f8Stomee kmem_magazine_t *m; 4091*b5fca8f8Stomee int cpu_seqid; 4092*b5fca8f8Stomee int n; /* magazine rounds */ 4093*b5fca8f8Stomee void *tbuf; /* temporary swap buffer */ 4094*b5fca8f8Stomee 4095*b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 4096*b5fca8f8Stomee 4097*b5fca8f8Stomee /* 4098*b5fca8f8Stomee * Allocated a buffer to swap with the one we hope to pull out of a 4099*b5fca8f8Stomee * magazine when found. 4100*b5fca8f8Stomee */ 4101*b5fca8f8Stomee tbuf = kmem_cache_alloc(cp, KM_NOSLEEP); 4102*b5fca8f8Stomee if (tbuf == NULL) { 4103*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_hunt_alloc_fail); 4104*b5fca8f8Stomee return (NULL); 4105*b5fca8f8Stomee } 4106*b5fca8f8Stomee if (tbuf == buf) { 4107*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_hunt_lucky); 4108*b5fca8f8Stomee if (cp->cache_flags & KMF_BUFTAG) { 4109*b5fca8f8Stomee (void) kmem_cache_free_debug(cp, buf, caller()); 4110*b5fca8f8Stomee } 4111*b5fca8f8Stomee return (buf); 4112*b5fca8f8Stomee } 4113*b5fca8f8Stomee 4114*b5fca8f8Stomee /* Hunt the depot. */ 4115*b5fca8f8Stomee mutex_enter(&cp->cache_depot_lock); 4116*b5fca8f8Stomee n = cp->cache_magtype->mt_magsize; 4117*b5fca8f8Stomee for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) { 4118*b5fca8f8Stomee if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { 4119*b5fca8f8Stomee mutex_exit(&cp->cache_depot_lock); 4120*b5fca8f8Stomee return (buf); 4121*b5fca8f8Stomee } 4122*b5fca8f8Stomee } 4123*b5fca8f8Stomee mutex_exit(&cp->cache_depot_lock); 4124*b5fca8f8Stomee 4125*b5fca8f8Stomee /* Hunt the per-CPU magazines. */ 4126*b5fca8f8Stomee for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) { 4127*b5fca8f8Stomee ccp = &cp->cache_cpu[cpu_seqid]; 4128*b5fca8f8Stomee 4129*b5fca8f8Stomee mutex_enter(&ccp->cc_lock); 4130*b5fca8f8Stomee m = ccp->cc_loaded; 4131*b5fca8f8Stomee n = ccp->cc_rounds; 4132*b5fca8f8Stomee if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { 4133*b5fca8f8Stomee mutex_exit(&ccp->cc_lock); 4134*b5fca8f8Stomee return (buf); 4135*b5fca8f8Stomee } 4136*b5fca8f8Stomee m = ccp->cc_ploaded; 4137*b5fca8f8Stomee n = ccp->cc_prounds; 4138*b5fca8f8Stomee if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { 4139*b5fca8f8Stomee mutex_exit(&ccp->cc_lock); 4140*b5fca8f8Stomee return (buf); 4141*b5fca8f8Stomee } 4142*b5fca8f8Stomee mutex_exit(&ccp->cc_lock); 4143*b5fca8f8Stomee } 4144*b5fca8f8Stomee 4145*b5fca8f8Stomee kmem_cache_free(cp, tbuf); 4146*b5fca8f8Stomee return (NULL); 4147*b5fca8f8Stomee } 4148*b5fca8f8Stomee 4149*b5fca8f8Stomee /* 4150*b5fca8f8Stomee * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(), 4151*b5fca8f8Stomee * or when the buffer is freed. 4152*b5fca8f8Stomee */ 4153*b5fca8f8Stomee static void 4154*b5fca8f8Stomee kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) 4155*b5fca8f8Stomee { 4156*b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock)); 4157*b5fca8f8Stomee ASSERT(KMEM_SLAB_MEMBER(sp, from_buf)); 4158*b5fca8f8Stomee 4159*b5fca8f8Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) { 4160*b5fca8f8Stomee return; 4161*b5fca8f8Stomee } 4162*b5fca8f8Stomee 4163*b5fca8f8Stomee if (sp->slab_flags & KMEM_SLAB_NOMOVE) { 4164*b5fca8f8Stomee if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) { 4165*b5fca8f8Stomee avl_remove(&cp->cache_partial_slabs, sp); 4166*b5fca8f8Stomee sp->slab_flags &= ~KMEM_SLAB_NOMOVE; 4167*b5fca8f8Stomee sp->slab_stuck_offset = (uint32_t)-1; 4168*b5fca8f8Stomee avl_add(&cp->cache_partial_slabs, sp); 4169*b5fca8f8Stomee } 4170*b5fca8f8Stomee } else { 4171*b5fca8f8Stomee sp->slab_later_count = 0; 4172*b5fca8f8Stomee sp->slab_stuck_offset = (uint32_t)-1; 4173*b5fca8f8Stomee } 4174*b5fca8f8Stomee } 4175*b5fca8f8Stomee 4176*b5fca8f8Stomee static void 4177*b5fca8f8Stomee kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) 4178*b5fca8f8Stomee { 4179*b5fca8f8Stomee ASSERT(taskq_member(kmem_move_taskq, curthread)); 4180*b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock)); 4181*b5fca8f8Stomee ASSERT(KMEM_SLAB_MEMBER(sp, from_buf)); 4182*b5fca8f8Stomee 4183*b5fca8f8Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) { 4184*b5fca8f8Stomee return; 4185*b5fca8f8Stomee } 4186*b5fca8f8Stomee 4187*b5fca8f8Stomee avl_remove(&cp->cache_partial_slabs, sp); 4188*b5fca8f8Stomee sp->slab_later_count = 0; 4189*b5fca8f8Stomee sp->slab_flags |= KMEM_SLAB_NOMOVE; 4190*b5fca8f8Stomee sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf); 4191*b5fca8f8Stomee avl_add(&cp->cache_partial_slabs, sp); 4192*b5fca8f8Stomee } 4193*b5fca8f8Stomee 4194*b5fca8f8Stomee static void kmem_move_end(kmem_cache_t *, kmem_move_t *); 4195*b5fca8f8Stomee 4196*b5fca8f8Stomee /* 4197*b5fca8f8Stomee * The move callback takes two buffer addresses, the buffer to be moved, and a 4198*b5fca8f8Stomee * newly allocated and constructed buffer selected by kmem as the destination. 4199*b5fca8f8Stomee * It also takes the size of the buffer and an optional user argument specified 4200*b5fca8f8Stomee * at cache creation time. kmem guarantees that the buffer to be moved has not 4201*b5fca8f8Stomee * been unmapped by the virtual memory subsystem. Beyond that, it cannot 4202*b5fca8f8Stomee * guarantee the present whereabouts of the buffer to be moved, so it is up to 4203*b5fca8f8Stomee * the client to safely determine whether or not it is still using the buffer. 4204*b5fca8f8Stomee * The client must not free either of the buffers passed to the move callback, 4205*b5fca8f8Stomee * since kmem wants to free them directly to the slab layer. The client response 4206*b5fca8f8Stomee * tells kmem which of the two buffers to free: 4207*b5fca8f8Stomee * 4208*b5fca8f8Stomee * YES kmem frees the old buffer (the move was successful) 4209*b5fca8f8Stomee * NO kmem frees the new buffer, marks the slab of the old buffer 4210*b5fca8f8Stomee * non-reclaimable to avoid bothering the client again 4211*b5fca8f8Stomee * LATER kmem frees the new buffer, increments slab_later_count 4212*b5fca8f8Stomee * DONT_KNOW kmem frees the new buffer, searches mags for the old buffer 4213*b5fca8f8Stomee * DONT_NEED kmem frees both the old buffer and the new buffer 4214*b5fca8f8Stomee * 4215*b5fca8f8Stomee * The pending callback argument now being processed contains both of the 4216*b5fca8f8Stomee * buffers (old and new) passed to the move callback function, the slab of the 4217*b5fca8f8Stomee * old buffer, and flags related to the move request, such as whether or not the 4218*b5fca8f8Stomee * system was desperate for memory. 4219*b5fca8f8Stomee */ 4220*b5fca8f8Stomee static void 4221*b5fca8f8Stomee kmem_move_buffer(kmem_move_t *callback) 4222*b5fca8f8Stomee { 4223*b5fca8f8Stomee kmem_cbrc_t response; 4224*b5fca8f8Stomee kmem_slab_t *sp = callback->kmm_from_slab; 4225*b5fca8f8Stomee kmem_cache_t *cp = sp->slab_cache; 4226*b5fca8f8Stomee boolean_t free_on_slab; 4227*b5fca8f8Stomee 4228*b5fca8f8Stomee ASSERT(taskq_member(kmem_move_taskq, curthread)); 4229*b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 4230*b5fca8f8Stomee ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf)); 4231*b5fca8f8Stomee 4232*b5fca8f8Stomee /* 4233*b5fca8f8Stomee * The number of allocated buffers on the slab may have changed since we 4234*b5fca8f8Stomee * last checked the slab's reclaimability (when the pending move was 4235*b5fca8f8Stomee * enqueued), or the client may have responded NO when asked to move 4236*b5fca8f8Stomee * another buffer on the same slab. 4237*b5fca8f8Stomee */ 4238*b5fca8f8Stomee if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) { 4239*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_no_longer_reclaimable); 4240*b5fca8f8Stomee KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY), 4241*b5fca8f8Stomee kmem_move_stats.kms_notify_no_longer_reclaimable); 4242*b5fca8f8Stomee kmem_slab_free(cp, callback->kmm_to_buf); 4243*b5fca8f8Stomee kmem_move_end(cp, callback); 4244*b5fca8f8Stomee return; 4245*b5fca8f8Stomee } 4246*b5fca8f8Stomee 4247*b5fca8f8Stomee /* 4248*b5fca8f8Stomee * Hunting magazines is expensive, so we'll wait to do that until the 4249*b5fca8f8Stomee * client responds KMEM_CBRC_DONT_KNOW. However, checking the slab layer 4250*b5fca8f8Stomee * is cheap, so we might as well do that here in case we can avoid 4251*b5fca8f8Stomee * bothering the client. 4252*b5fca8f8Stomee */ 4253*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4254*b5fca8f8Stomee free_on_slab = (kmem_slab_allocated(cp, sp, 4255*b5fca8f8Stomee callback->kmm_from_buf) == NULL); 4256*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4257*b5fca8f8Stomee 4258*b5fca8f8Stomee if (free_on_slab) { 4259*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab); 4260*b5fca8f8Stomee kmem_slab_free(cp, callback->kmm_to_buf); 4261*b5fca8f8Stomee kmem_move_end(cp, callback); 4262*b5fca8f8Stomee return; 4263*b5fca8f8Stomee } 4264*b5fca8f8Stomee 4265*b5fca8f8Stomee if (cp->cache_flags & KMF_BUFTAG) { 4266*b5fca8f8Stomee /* 4267*b5fca8f8Stomee * Make kmem_cache_alloc_debug() apply the constructor for us. 4268*b5fca8f8Stomee */ 4269*b5fca8f8Stomee if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf, 4270*b5fca8f8Stomee KM_NOSLEEP, 1, caller()) != 0) { 4271*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail); 4272*b5fca8f8Stomee kmem_move_end(cp, callback); 4273*b5fca8f8Stomee return; 4274*b5fca8f8Stomee } 4275*b5fca8f8Stomee } else if (cp->cache_constructor != NULL && 4276*b5fca8f8Stomee cp->cache_constructor(callback->kmm_to_buf, cp->cache_private, 4277*b5fca8f8Stomee KM_NOSLEEP) != 0) { 4278*b5fca8f8Stomee atomic_add_64(&cp->cache_alloc_fail, 1); 4279*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail); 4280*b5fca8f8Stomee kmem_slab_free(cp, callback->kmm_to_buf); 4281*b5fca8f8Stomee kmem_move_end(cp, callback); 4282*b5fca8f8Stomee return; 4283*b5fca8f8Stomee } 4284*b5fca8f8Stomee 4285*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_callbacks); 4286*b5fca8f8Stomee KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY), 4287*b5fca8f8Stomee kmem_move_stats.kms_notify_callbacks); 4288*b5fca8f8Stomee cp->cache_defrag->kmd_callbacks++; 4289*b5fca8f8Stomee cp->cache_defrag->kmd_thread = curthread; 4290*b5fca8f8Stomee cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf; 4291*b5fca8f8Stomee cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf; 4292*b5fca8f8Stomee DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *, 4293*b5fca8f8Stomee callback); 4294*b5fca8f8Stomee 4295*b5fca8f8Stomee response = cp->cache_move(callback->kmm_from_buf, 4296*b5fca8f8Stomee callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private); 4297*b5fca8f8Stomee 4298*b5fca8f8Stomee DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *, 4299*b5fca8f8Stomee callback, kmem_cbrc_t, response); 4300*b5fca8f8Stomee cp->cache_defrag->kmd_thread = NULL; 4301*b5fca8f8Stomee cp->cache_defrag->kmd_from_buf = NULL; 4302*b5fca8f8Stomee cp->cache_defrag->kmd_to_buf = NULL; 4303*b5fca8f8Stomee 4304*b5fca8f8Stomee if (response == KMEM_CBRC_YES) { 4305*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_yes); 4306*b5fca8f8Stomee cp->cache_defrag->kmd_yes++; 4307*b5fca8f8Stomee kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); 4308*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4309*b5fca8f8Stomee kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); 4310*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4311*b5fca8f8Stomee kmem_move_end(cp, callback); 4312*b5fca8f8Stomee return; 4313*b5fca8f8Stomee } 4314*b5fca8f8Stomee 4315*b5fca8f8Stomee switch (response) { 4316*b5fca8f8Stomee case KMEM_CBRC_NO: 4317*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_no); 4318*b5fca8f8Stomee cp->cache_defrag->kmd_no++; 4319*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4320*b5fca8f8Stomee kmem_slab_move_no(cp, sp, callback->kmm_from_buf); 4321*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4322*b5fca8f8Stomee break; 4323*b5fca8f8Stomee case KMEM_CBRC_LATER: 4324*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_later); 4325*b5fca8f8Stomee cp->cache_defrag->kmd_later++; 4326*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4327*b5fca8f8Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) { 4328*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4329*b5fca8f8Stomee break; 4330*b5fca8f8Stomee } 4331*b5fca8f8Stomee 4332*b5fca8f8Stomee if (++sp->slab_later_count >= KMEM_DISBELIEF) { 4333*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_disbelief); 4334*b5fca8f8Stomee kmem_slab_move_no(cp, sp, callback->kmm_from_buf); 4335*b5fca8f8Stomee } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) { 4336*b5fca8f8Stomee sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, 4337*b5fca8f8Stomee callback->kmm_from_buf); 4338*b5fca8f8Stomee } 4339*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4340*b5fca8f8Stomee break; 4341*b5fca8f8Stomee case KMEM_CBRC_DONT_NEED: 4342*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_dont_need); 4343*b5fca8f8Stomee cp->cache_defrag->kmd_dont_need++; 4344*b5fca8f8Stomee kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); 4345*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4346*b5fca8f8Stomee kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); 4347*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4348*b5fca8f8Stomee break; 4349*b5fca8f8Stomee case KMEM_CBRC_DONT_KNOW: 4350*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_dont_know); 4351*b5fca8f8Stomee cp->cache_defrag->kmd_dont_know++; 4352*b5fca8f8Stomee if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) { 4353*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_mag); 4354*b5fca8f8Stomee cp->cache_defrag->kmd_hunt_found++; 4355*b5fca8f8Stomee kmem_slab_free_constructed(cp, callback->kmm_from_buf, 4356*b5fca8f8Stomee B_TRUE); 4357*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4358*b5fca8f8Stomee kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); 4359*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4360*b5fca8f8Stomee } else { 4361*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_hunt_notfound); 4362*b5fca8f8Stomee } 4363*b5fca8f8Stomee break; 4364*b5fca8f8Stomee default: 4365*b5fca8f8Stomee panic("'%s' (%p) unexpected move callback response %d\n", 4366*b5fca8f8Stomee cp->cache_name, (void *)cp, response); 4367*b5fca8f8Stomee } 4368*b5fca8f8Stomee 4369*b5fca8f8Stomee kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE); 4370*b5fca8f8Stomee kmem_move_end(cp, callback); 4371*b5fca8f8Stomee } 4372*b5fca8f8Stomee 4373*b5fca8f8Stomee /* Return B_FALSE if there is insufficient memory for the move request. */ 4374*b5fca8f8Stomee static boolean_t 4375*b5fca8f8Stomee kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags) 4376*b5fca8f8Stomee { 4377*b5fca8f8Stomee void *to_buf; 4378*b5fca8f8Stomee avl_index_t index; 4379*b5fca8f8Stomee kmem_move_t *callback, *pending; 4380*b5fca8f8Stomee 4381*b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread)); 4382*b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 4383*b5fca8f8Stomee ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING); 4384*b5fca8f8Stomee 4385*b5fca8f8Stomee callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP); 4386*b5fca8f8Stomee if (callback == NULL) { 4387*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_callback_alloc_fail); 4388*b5fca8f8Stomee return (B_FALSE); 4389*b5fca8f8Stomee } 4390*b5fca8f8Stomee 4391*b5fca8f8Stomee callback->kmm_from_slab = sp; 4392*b5fca8f8Stomee callback->kmm_from_buf = buf; 4393*b5fca8f8Stomee callback->kmm_flags = flags; 4394*b5fca8f8Stomee 4395*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4396*b5fca8f8Stomee 4397*b5fca8f8Stomee if (avl_numnodes(&cp->cache_partial_slabs) <= 1) { 4398*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4399*b5fca8f8Stomee kmem_cache_free(kmem_move_cache, callback); 4400*b5fca8f8Stomee return (B_TRUE); /* there is no need for the move request */ 4401*b5fca8f8Stomee } 4402*b5fca8f8Stomee 4403*b5fca8f8Stomee pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index); 4404*b5fca8f8Stomee if (pending != NULL) { 4405*b5fca8f8Stomee /* 4406*b5fca8f8Stomee * If the move is already pending and we're desperate now, 4407*b5fca8f8Stomee * update the move flags. 4408*b5fca8f8Stomee */ 4409*b5fca8f8Stomee if (flags & KMM_DESPERATE) { 4410*b5fca8f8Stomee pending->kmm_flags |= KMM_DESPERATE; 4411*b5fca8f8Stomee } 4412*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4413*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_already_pending); 4414*b5fca8f8Stomee kmem_cache_free(kmem_move_cache, callback); 4415*b5fca8f8Stomee return (B_TRUE); 4416*b5fca8f8Stomee } 4417*b5fca8f8Stomee 4418*b5fca8f8Stomee to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs)); 4419*b5fca8f8Stomee callback->kmm_to_buf = to_buf; 4420*b5fca8f8Stomee avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index); 4421*b5fca8f8Stomee 4422*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4423*b5fca8f8Stomee 4424*b5fca8f8Stomee if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer, 4425*b5fca8f8Stomee callback, TQ_NOSLEEP)) { 4426*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4427*b5fca8f8Stomee avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); 4428*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4429*b5fca8f8Stomee kmem_slab_free_constructed(cp, to_buf, B_FALSE); 4430*b5fca8f8Stomee kmem_cache_free(kmem_move_cache, callback); 4431*b5fca8f8Stomee return (B_FALSE); 4432*b5fca8f8Stomee } 4433*b5fca8f8Stomee 4434*b5fca8f8Stomee return (B_TRUE); 4435*b5fca8f8Stomee } 4436*b5fca8f8Stomee 4437*b5fca8f8Stomee static void 4438*b5fca8f8Stomee kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback) 4439*b5fca8f8Stomee { 4440*b5fca8f8Stomee avl_index_t index; 4441*b5fca8f8Stomee 4442*b5fca8f8Stomee ASSERT(cp->cache_defrag != NULL); 4443*b5fca8f8Stomee ASSERT(taskq_member(kmem_move_taskq, curthread)); 4444*b5fca8f8Stomee ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); 4445*b5fca8f8Stomee 4446*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4447*b5fca8f8Stomee VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending, 4448*b5fca8f8Stomee callback->kmm_from_buf, &index) != NULL); 4449*b5fca8f8Stomee avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); 4450*b5fca8f8Stomee if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) { 4451*b5fca8f8Stomee list_t *deadlist = &cp->cache_defrag->kmd_deadlist; 4452*b5fca8f8Stomee kmem_slab_t *sp; 4453*b5fca8f8Stomee 4454*b5fca8f8Stomee /* 4455*b5fca8f8Stomee * The last pending move completed. Release all slabs from the 4456*b5fca8f8Stomee * front of the dead list except for any slab at the tail that 4457*b5fca8f8Stomee * needs to be released from the context of kmem_move_buffers(). 4458*b5fca8f8Stomee * kmem deferred unmapping the buffers on these slabs in order 4459*b5fca8f8Stomee * to guarantee that buffers passed to the move callback have 4460*b5fca8f8Stomee * been touched only by kmem or by the client itself. 4461*b5fca8f8Stomee */ 4462*b5fca8f8Stomee while ((sp = list_remove_head(deadlist)) != NULL) { 4463*b5fca8f8Stomee if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) { 4464*b5fca8f8Stomee list_insert_tail(deadlist, sp); 4465*b5fca8f8Stomee break; 4466*b5fca8f8Stomee } 4467*b5fca8f8Stomee cp->cache_defrag->kmd_deadcount--; 4468*b5fca8f8Stomee cp->cache_slab_destroy++; 4469*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4470*b5fca8f8Stomee kmem_slab_destroy(cp, sp); 4471*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed); 4472*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4473*b5fca8f8Stomee } 4474*b5fca8f8Stomee } 4475*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4476*b5fca8f8Stomee kmem_cache_free(kmem_move_cache, callback); 4477*b5fca8f8Stomee } 4478*b5fca8f8Stomee 4479*b5fca8f8Stomee /* 4480*b5fca8f8Stomee * Move buffers from least used slabs first by scanning backwards from the end 4481*b5fca8f8Stomee * of the partial slab list. Scan at most max_scan candidate slabs and move 4482*b5fca8f8Stomee * buffers from at most max_slabs slabs (0 for all partial slabs in both cases). 4483*b5fca8f8Stomee * If desperate to reclaim memory, move buffers from any partial slab, otherwise 4484*b5fca8f8Stomee * skip slabs with a ratio of allocated buffers at or above the current 4485*b5fca8f8Stomee * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the 4486*b5fca8f8Stomee * scan is aborted) so that the caller can adjust the reclaimability threshold 4487*b5fca8f8Stomee * depending on how many reclaimable slabs it finds. 4488*b5fca8f8Stomee * 4489*b5fca8f8Stomee * kmem_move_buffers() drops and reacquires cache_lock every time it issues a 4490*b5fca8f8Stomee * move request, since it is not valid for kmem_move_begin() to call 4491*b5fca8f8Stomee * kmem_cache_alloc() or taskq_dispatch() with cache_lock held. 4492*b5fca8f8Stomee */ 4493*b5fca8f8Stomee static int 4494*b5fca8f8Stomee kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs, 4495*b5fca8f8Stomee int flags) 4496*b5fca8f8Stomee { 4497*b5fca8f8Stomee kmem_slab_t *sp; 4498*b5fca8f8Stomee void *buf; 4499*b5fca8f8Stomee int i, j; /* slab index, buffer index */ 4500*b5fca8f8Stomee int s; /* reclaimable slabs */ 4501*b5fca8f8Stomee int b; /* allocated (movable) buffers on reclaimable slab */ 4502*b5fca8f8Stomee boolean_t success; 4503*b5fca8f8Stomee int refcnt; 4504*b5fca8f8Stomee int nomove; 4505*b5fca8f8Stomee 4506*b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread)); 4507*b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock)); 4508*b5fca8f8Stomee ASSERT(kmem_move_cache != NULL); 4509*b5fca8f8Stomee ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL); 4510*b5fca8f8Stomee ASSERT(avl_numnodes(&cp->cache_partial_slabs) > 1); 4511*b5fca8f8Stomee 4512*b5fca8f8Stomee if (kmem_move_blocked) { 4513*b5fca8f8Stomee return (0); 4514*b5fca8f8Stomee } 4515*b5fca8f8Stomee 4516*b5fca8f8Stomee if (kmem_move_fulltilt) { 4517*b5fca8f8Stomee max_slabs = 0; 4518*b5fca8f8Stomee flags |= KMM_DESPERATE; 4519*b5fca8f8Stomee } 4520*b5fca8f8Stomee 4521*b5fca8f8Stomee if (max_scan == 0 || (flags & KMM_DESPERATE)) { 4522*b5fca8f8Stomee /* 4523*b5fca8f8Stomee * Scan as many slabs as needed to find the desired number of 4524*b5fca8f8Stomee * candidate slabs. 4525*b5fca8f8Stomee */ 4526*b5fca8f8Stomee max_scan = (size_t)-1; 4527*b5fca8f8Stomee } 4528*b5fca8f8Stomee 4529*b5fca8f8Stomee if (max_slabs == 0 || (flags & KMM_DESPERATE)) { 4530*b5fca8f8Stomee /* Find as many candidate slabs as possible. */ 4531*b5fca8f8Stomee max_slabs = (size_t)-1; 4532*b5fca8f8Stomee } 4533*b5fca8f8Stomee 4534*b5fca8f8Stomee sp = avl_last(&cp->cache_partial_slabs); 4535*b5fca8f8Stomee ASSERT(sp != NULL && KMEM_SLAB_IS_PARTIAL(sp)); 4536*b5fca8f8Stomee for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && 4537*b5fca8f8Stomee (sp != avl_first(&cp->cache_partial_slabs)); 4538*b5fca8f8Stomee sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) { 4539*b5fca8f8Stomee 4540*b5fca8f8Stomee if (!kmem_slab_is_reclaimable(cp, sp, flags)) { 4541*b5fca8f8Stomee continue; 4542*b5fca8f8Stomee } 4543*b5fca8f8Stomee s++; 4544*b5fca8f8Stomee 4545*b5fca8f8Stomee /* Look for allocated buffers to move. */ 4546*b5fca8f8Stomee for (j = 0, b = 0, buf = sp->slab_base; 4547*b5fca8f8Stomee (j < sp->slab_chunks) && (b < sp->slab_refcnt); 4548*b5fca8f8Stomee buf = (((char *)buf) + cp->cache_chunksize), j++) { 4549*b5fca8f8Stomee 4550*b5fca8f8Stomee if (kmem_slab_allocated(cp, sp, buf) == NULL) { 4551*b5fca8f8Stomee continue; 4552*b5fca8f8Stomee } 4553*b5fca8f8Stomee 4554*b5fca8f8Stomee b++; 4555*b5fca8f8Stomee 4556*b5fca8f8Stomee /* 4557*b5fca8f8Stomee * Prevent the slab from being destroyed while we drop 4558*b5fca8f8Stomee * cache_lock and while the pending move is not yet 4559*b5fca8f8Stomee * registered. Flag the pending move while 4560*b5fca8f8Stomee * kmd_moves_pending may still be empty, since we can't 4561*b5fca8f8Stomee * yet rely on a non-zero pending move count to prevent 4562*b5fca8f8Stomee * the slab from being destroyed. 4563*b5fca8f8Stomee */ 4564*b5fca8f8Stomee ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING)); 4565*b5fca8f8Stomee sp->slab_flags |= KMEM_SLAB_MOVE_PENDING; 4566*b5fca8f8Stomee /* 4567*b5fca8f8Stomee * Recheck refcnt and nomove after reacquiring the lock, 4568*b5fca8f8Stomee * since these control the order of partial slabs, and 4569*b5fca8f8Stomee * we want to know if we can pick up the scan where we 4570*b5fca8f8Stomee * left off. 4571*b5fca8f8Stomee */ 4572*b5fca8f8Stomee refcnt = sp->slab_refcnt; 4573*b5fca8f8Stomee nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE); 4574*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4575*b5fca8f8Stomee 4576*b5fca8f8Stomee success = kmem_move_begin(cp, sp, buf, flags); 4577*b5fca8f8Stomee 4578*b5fca8f8Stomee /* 4579*b5fca8f8Stomee * Now, before the lock is reacquired, kmem could 4580*b5fca8f8Stomee * process all pending move requests and purge the 4581*b5fca8f8Stomee * deadlist, so that upon reacquiring the lock, sp has 4582*b5fca8f8Stomee * been remapped. Therefore, the KMEM_SLAB_MOVE_PENDING 4583*b5fca8f8Stomee * flag causes the slab to be put at the end of the 4584*b5fca8f8Stomee * deadlist and prevents it from being purged, since we 4585*b5fca8f8Stomee * plan to destroy it here after reacquiring the lock. 4586*b5fca8f8Stomee */ 4587*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4588*b5fca8f8Stomee ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING); 4589*b5fca8f8Stomee sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING; 4590*b5fca8f8Stomee 4591*b5fca8f8Stomee /* 4592*b5fca8f8Stomee * Destroy the slab now if it was completely freed while 4593*b5fca8f8Stomee * we dropped cache_lock. 4594*b5fca8f8Stomee */ 4595*b5fca8f8Stomee if (sp->slab_refcnt == 0) { 4596*b5fca8f8Stomee list_t *deadlist = 4597*b5fca8f8Stomee &cp->cache_defrag->kmd_deadlist; 4598*b5fca8f8Stomee 4599*b5fca8f8Stomee ASSERT(!list_is_empty(deadlist)); 4600*b5fca8f8Stomee ASSERT(list_link_active((list_node_t *) 4601*b5fca8f8Stomee &sp->slab_link)); 4602*b5fca8f8Stomee 4603*b5fca8f8Stomee list_remove(deadlist, sp); 4604*b5fca8f8Stomee cp->cache_defrag->kmd_deadcount--; 4605*b5fca8f8Stomee cp->cache_slab_destroy++; 4606*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4607*b5fca8f8Stomee kmem_slab_destroy(cp, sp); 4608*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats. 4609*b5fca8f8Stomee kms_dead_slabs_freed); 4610*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats. 4611*b5fca8f8Stomee kms_endscan_slab_destroyed); 4612*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4613*b5fca8f8Stomee /* 4614*b5fca8f8Stomee * Since we can't pick up the scan where we left 4615*b5fca8f8Stomee * off, abort the scan and say nothing about the 4616*b5fca8f8Stomee * number of reclaimable slabs. 4617*b5fca8f8Stomee */ 4618*b5fca8f8Stomee return (-1); 4619*b5fca8f8Stomee } 4620*b5fca8f8Stomee 4621*b5fca8f8Stomee if (!success) { 4622*b5fca8f8Stomee /* 4623*b5fca8f8Stomee * Abort the scan if there is not enough memory 4624*b5fca8f8Stomee * for the request and say nothing about the 4625*b5fca8f8Stomee * number of reclaimable slabs. 4626*b5fca8f8Stomee */ 4627*b5fca8f8Stomee KMEM_STAT_ADD( 4628*b5fca8f8Stomee kmem_move_stats.kms_endscan_nomem); 4629*b5fca8f8Stomee return (-1); 4630*b5fca8f8Stomee } 4631*b5fca8f8Stomee 4632*b5fca8f8Stomee /* 4633*b5fca8f8Stomee * The slab may have been completely allocated while the 4634*b5fca8f8Stomee * lock was dropped. 4635*b5fca8f8Stomee */ 4636*b5fca8f8Stomee if (KMEM_SLAB_IS_ALL_USED(sp)) { 4637*b5fca8f8Stomee KMEM_STAT_ADD( 4638*b5fca8f8Stomee kmem_move_stats.kms_endscan_slab_all_used); 4639*b5fca8f8Stomee return (-1); 4640*b5fca8f8Stomee } 4641*b5fca8f8Stomee 4642*b5fca8f8Stomee /* 4643*b5fca8f8Stomee * The slab's position changed while the lock was 4644*b5fca8f8Stomee * dropped, so we don't know where we are in the 4645*b5fca8f8Stomee * sequence any more. 4646*b5fca8f8Stomee */ 4647*b5fca8f8Stomee if (sp->slab_refcnt != refcnt) { 4648*b5fca8f8Stomee KMEM_STAT_ADD( 4649*b5fca8f8Stomee kmem_move_stats.kms_endscan_refcnt_changed); 4650*b5fca8f8Stomee return (-1); 4651*b5fca8f8Stomee } 4652*b5fca8f8Stomee if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove) { 4653*b5fca8f8Stomee KMEM_STAT_ADD( 4654*b5fca8f8Stomee kmem_move_stats.kms_endscan_nomove_changed); 4655*b5fca8f8Stomee return (-1); 4656*b5fca8f8Stomee } 4657*b5fca8f8Stomee 4658*b5fca8f8Stomee /* 4659*b5fca8f8Stomee * Generating a move request allocates a destination 4660*b5fca8f8Stomee * buffer from the slab layer, bumping the first slab if 4661*b5fca8f8Stomee * it is completely allocated. 4662*b5fca8f8Stomee */ 4663*b5fca8f8Stomee ASSERT(!avl_is_empty(&cp->cache_partial_slabs)); 4664*b5fca8f8Stomee if (sp == avl_first(&cp->cache_partial_slabs)) { 4665*b5fca8f8Stomee goto end_scan; 4666*b5fca8f8Stomee } 4667*b5fca8f8Stomee } 4668*b5fca8f8Stomee } 4669*b5fca8f8Stomee end_scan: 4670*b5fca8f8Stomee 4671*b5fca8f8Stomee KMEM_STAT_COND_ADD(sp == avl_first(&cp->cache_partial_slabs), 4672*b5fca8f8Stomee kmem_move_stats.kms_endscan_freelist); 4673*b5fca8f8Stomee 4674*b5fca8f8Stomee return (s); 4675*b5fca8f8Stomee } 4676*b5fca8f8Stomee 4677*b5fca8f8Stomee typedef struct kmem_move_notify_args { 4678*b5fca8f8Stomee kmem_cache_t *kmna_cache; 4679*b5fca8f8Stomee void *kmna_buf; 4680*b5fca8f8Stomee } kmem_move_notify_args_t; 4681*b5fca8f8Stomee 4682*b5fca8f8Stomee static void 4683*b5fca8f8Stomee kmem_cache_move_notify_task(void *arg) 4684*b5fca8f8Stomee { 4685*b5fca8f8Stomee kmem_move_notify_args_t *args = arg; 4686*b5fca8f8Stomee kmem_cache_t *cp = args->kmna_cache; 4687*b5fca8f8Stomee void *buf = args->kmna_buf; 4688*b5fca8f8Stomee kmem_slab_t *sp; 4689*b5fca8f8Stomee 4690*b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread)); 4691*b5fca8f8Stomee ASSERT(list_link_active(&cp->cache_link)); 4692*b5fca8f8Stomee 4693*b5fca8f8Stomee kmem_free(args, sizeof (kmem_move_notify_args_t)); 4694*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4695*b5fca8f8Stomee sp = kmem_slab_allocated(cp, NULL, buf); 4696*b5fca8f8Stomee 4697*b5fca8f8Stomee /* Ignore the notification if the buffer is no longer allocated. */ 4698*b5fca8f8Stomee if (sp == NULL) { 4699*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4700*b5fca8f8Stomee return; 4701*b5fca8f8Stomee } 4702*b5fca8f8Stomee 4703*b5fca8f8Stomee /* Ignore the notification if there's no reason to move the buffer. */ 4704*b5fca8f8Stomee if (avl_numnodes(&cp->cache_partial_slabs) > 1) { 4705*b5fca8f8Stomee /* 4706*b5fca8f8Stomee * So far the notification is not ignored. Ignore the 4707*b5fca8f8Stomee * notification if the slab is not marked by an earlier refusal 4708*b5fca8f8Stomee * to move a buffer. 4709*b5fca8f8Stomee */ 4710*b5fca8f8Stomee if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) && 4711*b5fca8f8Stomee (sp->slab_later_count == 0)) { 4712*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4713*b5fca8f8Stomee return; 4714*b5fca8f8Stomee } 4715*b5fca8f8Stomee 4716*b5fca8f8Stomee kmem_slab_move_yes(cp, sp, buf); 4717*b5fca8f8Stomee ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING)); 4718*b5fca8f8Stomee sp->slab_flags |= KMEM_SLAB_MOVE_PENDING; 4719*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4720*b5fca8f8Stomee /* see kmem_move_buffers() about dropping the lock */ 4721*b5fca8f8Stomee (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY); 4722*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4723*b5fca8f8Stomee ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING); 4724*b5fca8f8Stomee sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING; 4725*b5fca8f8Stomee if (sp->slab_refcnt == 0) { 4726*b5fca8f8Stomee list_t *deadlist = &cp->cache_defrag->kmd_deadlist; 4727*b5fca8f8Stomee 4728*b5fca8f8Stomee ASSERT(!list_is_empty(deadlist)); 4729*b5fca8f8Stomee ASSERT(list_link_active((list_node_t *) 4730*b5fca8f8Stomee &sp->slab_link)); 4731*b5fca8f8Stomee 4732*b5fca8f8Stomee list_remove(deadlist, sp); 4733*b5fca8f8Stomee cp->cache_defrag->kmd_deadcount--; 4734*b5fca8f8Stomee cp->cache_slab_destroy++; 4735*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4736*b5fca8f8Stomee kmem_slab_destroy(cp, sp); 4737*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed); 4738*b5fca8f8Stomee return; 4739*b5fca8f8Stomee } 4740*b5fca8f8Stomee } else { 4741*b5fca8f8Stomee kmem_slab_move_yes(cp, sp, buf); 4742*b5fca8f8Stomee } 4743*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4744*b5fca8f8Stomee } 4745*b5fca8f8Stomee 4746*b5fca8f8Stomee void 4747*b5fca8f8Stomee kmem_cache_move_notify(kmem_cache_t *cp, void *buf) 4748*b5fca8f8Stomee { 4749*b5fca8f8Stomee kmem_move_notify_args_t *args; 4750*b5fca8f8Stomee 4751*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_notify); 4752*b5fca8f8Stomee args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP); 4753*b5fca8f8Stomee if (args != NULL) { 4754*b5fca8f8Stomee args->kmna_cache = cp; 4755*b5fca8f8Stomee args->kmna_buf = buf; 4756*b5fca8f8Stomee (void) taskq_dispatch(kmem_taskq, 4757*b5fca8f8Stomee (task_func_t *)kmem_cache_move_notify_task, args, 4758*b5fca8f8Stomee TQ_NOSLEEP); 4759*b5fca8f8Stomee } 4760*b5fca8f8Stomee } 4761*b5fca8f8Stomee 4762*b5fca8f8Stomee static void 4763*b5fca8f8Stomee kmem_cache_defrag(kmem_cache_t *cp) 4764*b5fca8f8Stomee { 4765*b5fca8f8Stomee size_t n; 4766*b5fca8f8Stomee 4767*b5fca8f8Stomee ASSERT(cp->cache_defrag != NULL); 4768*b5fca8f8Stomee 4769*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4770*b5fca8f8Stomee n = avl_numnodes(&cp->cache_partial_slabs); 4771*b5fca8f8Stomee if (n > 1) { 4772*b5fca8f8Stomee /* kmem_move_buffers() drops and reacquires cache_lock */ 4773*b5fca8f8Stomee (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE); 4774*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_defrags); 4775*b5fca8f8Stomee } 4776*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4777*b5fca8f8Stomee } 4778*b5fca8f8Stomee 4779*b5fca8f8Stomee /* Is this cache above the fragmentation threshold? */ 4780*b5fca8f8Stomee static boolean_t 4781*b5fca8f8Stomee kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree) 4782*b5fca8f8Stomee { 4783*b5fca8f8Stomee if (avl_numnodes(&cp->cache_partial_slabs) <= 1) 4784*b5fca8f8Stomee return (B_FALSE); 4785*b5fca8f8Stomee 4786*b5fca8f8Stomee /* 4787*b5fca8f8Stomee * nfree kmem_frag_numer 4788*b5fca8f8Stomee * ------------------ > --------------- 4789*b5fca8f8Stomee * cp->cache_buftotal kmem_frag_denom 4790*b5fca8f8Stomee */ 4791*b5fca8f8Stomee return ((nfree * kmem_frag_denom) > 4792*b5fca8f8Stomee (cp->cache_buftotal * kmem_frag_numer)); 4793*b5fca8f8Stomee } 4794*b5fca8f8Stomee 4795*b5fca8f8Stomee static boolean_t 4796*b5fca8f8Stomee kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap) 4797*b5fca8f8Stomee { 4798*b5fca8f8Stomee boolean_t fragmented; 4799*b5fca8f8Stomee uint64_t nfree; 4800*b5fca8f8Stomee 4801*b5fca8f8Stomee ASSERT(MUTEX_HELD(&cp->cache_lock)); 4802*b5fca8f8Stomee *doreap = B_FALSE; 4803*b5fca8f8Stomee 4804*b5fca8f8Stomee if (!kmem_move_fulltilt && ((cp->cache_complete_slab_count + 4805*b5fca8f8Stomee avl_numnodes(&cp->cache_partial_slabs)) < kmem_frag_minslabs)) 4806*b5fca8f8Stomee return (B_FALSE); 4807*b5fca8f8Stomee 4808*b5fca8f8Stomee nfree = cp->cache_bufslab; 4809*b5fca8f8Stomee fragmented = kmem_cache_frag_threshold(cp, nfree); 4810*b5fca8f8Stomee /* 4811*b5fca8f8Stomee * Free buffers in the magazine layer appear allocated from the point of 4812*b5fca8f8Stomee * view of the slab layer. We want to know if the slab layer would 4813*b5fca8f8Stomee * appear fragmented if we included free buffers from magazines that 4814*b5fca8f8Stomee * have fallen out of the working set. 4815*b5fca8f8Stomee */ 4816*b5fca8f8Stomee if (!fragmented) { 4817*b5fca8f8Stomee long reap; 4818*b5fca8f8Stomee 4819*b5fca8f8Stomee mutex_enter(&cp->cache_depot_lock); 4820*b5fca8f8Stomee reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 4821*b5fca8f8Stomee reap = MIN(reap, cp->cache_full.ml_total); 4822*b5fca8f8Stomee mutex_exit(&cp->cache_depot_lock); 4823*b5fca8f8Stomee 4824*b5fca8f8Stomee nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize); 4825*b5fca8f8Stomee if (kmem_cache_frag_threshold(cp, nfree)) { 4826*b5fca8f8Stomee *doreap = B_TRUE; 4827*b5fca8f8Stomee } 4828*b5fca8f8Stomee } 4829*b5fca8f8Stomee 4830*b5fca8f8Stomee return (fragmented); 4831*b5fca8f8Stomee } 4832*b5fca8f8Stomee 4833*b5fca8f8Stomee /* Called periodically from kmem_taskq */ 4834*b5fca8f8Stomee static void 4835*b5fca8f8Stomee kmem_cache_scan(kmem_cache_t *cp) 4836*b5fca8f8Stomee { 4837*b5fca8f8Stomee boolean_t reap = B_FALSE; 4838*b5fca8f8Stomee 4839*b5fca8f8Stomee ASSERT(taskq_member(kmem_taskq, curthread)); 4840*b5fca8f8Stomee ASSERT(cp->cache_defrag != NULL); 4841*b5fca8f8Stomee 4842*b5fca8f8Stomee mutex_enter(&cp->cache_lock); 4843*b5fca8f8Stomee 4844*b5fca8f8Stomee if (kmem_cache_is_fragmented(cp, &reap)) { 4845*b5fca8f8Stomee kmem_defrag_t *kmd = cp->cache_defrag; 4846*b5fca8f8Stomee size_t slabs_found; 4847*b5fca8f8Stomee 4848*b5fca8f8Stomee /* 4849*b5fca8f8Stomee * Consolidate reclaimable slabs from the end of the partial 4850*b5fca8f8Stomee * slab list (scan at most kmem_reclaim_scan_range slabs to find 4851*b5fca8f8Stomee * reclaimable slabs). Keep track of how many candidate slabs we 4852*b5fca8f8Stomee * looked for and how many we actually found so we can adjust 4853*b5fca8f8Stomee * the definition of a candidate slab if we're having trouble 4854*b5fca8f8Stomee * finding them. 4855*b5fca8f8Stomee * 4856*b5fca8f8Stomee * kmem_move_buffers() drops and reacquires cache_lock. 4857*b5fca8f8Stomee */ 4858*b5fca8f8Stomee slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range, 4859*b5fca8f8Stomee kmem_reclaim_max_slabs, 0); 4860*b5fca8f8Stomee if (slabs_found >= 0) { 4861*b5fca8f8Stomee kmd->kmd_slabs_sought += kmem_reclaim_max_slabs; 4862*b5fca8f8Stomee kmd->kmd_slabs_found += slabs_found; 4863*b5fca8f8Stomee } 4864*b5fca8f8Stomee 4865*b5fca8f8Stomee if (++kmd->kmd_scans >= kmem_reclaim_scan_range) { 4866*b5fca8f8Stomee kmd->kmd_scans = 0; 4867*b5fca8f8Stomee 4868*b5fca8f8Stomee /* 4869*b5fca8f8Stomee * If we had difficulty finding candidate slabs in 4870*b5fca8f8Stomee * previous scans, adjust the threshold so that 4871*b5fca8f8Stomee * candidates are easier to find. 4872*b5fca8f8Stomee */ 4873*b5fca8f8Stomee if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) { 4874*b5fca8f8Stomee kmem_adjust_reclaim_threshold(kmd, -1); 4875*b5fca8f8Stomee } else if ((kmd->kmd_slabs_found * 2) < 4876*b5fca8f8Stomee kmd->kmd_slabs_sought) { 4877*b5fca8f8Stomee kmem_adjust_reclaim_threshold(kmd, 1); 4878*b5fca8f8Stomee } 4879*b5fca8f8Stomee kmd->kmd_slabs_sought = 0; 4880*b5fca8f8Stomee kmd->kmd_slabs_found = 0; 4881*b5fca8f8Stomee } 4882*b5fca8f8Stomee } else { 4883*b5fca8f8Stomee kmem_reset_reclaim_threshold(cp->cache_defrag); 4884*b5fca8f8Stomee #ifdef DEBUG 4885*b5fca8f8Stomee if (avl_numnodes(&cp->cache_partial_slabs) > 1) { 4886*b5fca8f8Stomee /* 4887*b5fca8f8Stomee * In a debug kernel we want the consolidator to 4888*b5fca8f8Stomee * run occasionally even when there is plenty of 4889*b5fca8f8Stomee * memory. 4890*b5fca8f8Stomee */ 4891*b5fca8f8Stomee uint32_t debug_rand; 4892*b5fca8f8Stomee 4893*b5fca8f8Stomee (void) random_get_bytes((uint8_t *)&debug_rand, 4); 4894*b5fca8f8Stomee if (!kmem_move_noreap && 4895*b5fca8f8Stomee ((debug_rand % kmem_mtb_reap) == 0)) { 4896*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4897*b5fca8f8Stomee kmem_cache_reap(cp); 4898*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_debug_reaps); 4899*b5fca8f8Stomee return; 4900*b5fca8f8Stomee } else if ((debug_rand % kmem_mtb_move) == 0) { 4901*b5fca8f8Stomee (void) kmem_move_buffers(cp, 4902*b5fca8f8Stomee kmem_reclaim_scan_range, 1, 0); 4903*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats. 4904*b5fca8f8Stomee kms_debug_move_scans); 4905*b5fca8f8Stomee } 4906*b5fca8f8Stomee } 4907*b5fca8f8Stomee #endif /* DEBUG */ 4908*b5fca8f8Stomee } 4909*b5fca8f8Stomee 4910*b5fca8f8Stomee mutex_exit(&cp->cache_lock); 4911*b5fca8f8Stomee 4912*b5fca8f8Stomee if (reap) { 4913*b5fca8f8Stomee KMEM_STAT_ADD(kmem_move_stats.kms_scan_depot_ws_reaps); 4914*b5fca8f8Stomee kmem_depot_ws_reap(cp); 4915*b5fca8f8Stomee } 4916*b5fca8f8Stomee } 4917