1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Copyright 2012 Joyent, Inc. All rights reserved. 25 * Use is subject to license terms. 26 */ 27 28 /* 29 * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18 30 * 31 * The slab allocator, as described in the following two papers: 32 * 33 * Jeff Bonwick, 34 * The Slab Allocator: An Object-Caching Kernel Memory Allocator. 35 * Proceedings of the Summer 1994 Usenix Conference. 36 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf. 37 * 38 * Jeff Bonwick and Jonathan Adams, 39 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and 40 * Arbitrary Resources. 41 * Proceedings of the 2001 Usenix Conference. 42 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf. 43 * 44 * 1. Overview 45 * ----------- 46 * umem is very close to kmem in implementation. There are four major 47 * areas of divergence: 48 * 49 * * Initialization 50 * 51 * * CPU handling 52 * 53 * * umem_update() 54 * 55 * * KM_SLEEP v.s. UMEM_NOFAIL 56 * 57 * * lock ordering 58 * 59 * 2. Initialization 60 * ----------------- 61 * kmem is initialized early on in boot, and knows that no one will call 62 * into it before it is ready. umem does not have these luxuries. Instead, 63 * initialization is divided into two phases: 64 * 65 * * library initialization, and 66 * 67 * * first use 68 * 69 * umem's full initialization happens at the time of the first allocation 70 * request (via malloc() and friends, umem_alloc(), or umem_zalloc()), 71 * or the first call to umem_cache_create(). 72 * 73 * umem_free(), and umem_cache_alloc() do not require special handling, 74 * since the only way to get valid arguments for them is to successfully 75 * call a function from the first group. 76 * 77 * 2.1. Library Initialization: umem_startup() 78 * ------------------------------------------- 79 * umem_startup() is libumem.so's .init section. It calls pthread_atfork() 80 * to install the handlers necessary for umem's Fork1-Safety. Because of 81 * race condition issues, all other pre-umem_init() initialization is done 82 * statically (i.e. by the dynamic linker). 83 * 84 * For standalone use, umem_startup() returns everything to its initial 85 * state. 86 * 87 * 2.2. First use: umem_init() 88 * ------------------------------ 89 * The first time any memory allocation function is used, we have to 90 * create the backing caches and vmem arenas which are needed for it. 91 * umem_init() is the central point for that task. When it completes, 92 * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable 93 * to initialize, probably due to lack of memory). 94 * 95 * There are four different paths from which umem_init() is called: 96 * 97 * * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF, 98 * 99 * * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF, 100 * 101 * * from umem_cache_create(), and 102 * 103 * * from memalign(), with align > UMEM_ALIGN. 104 * 105 * The last three just check if umem is initialized, and call umem_init() 106 * if it is not. For performance reasons, the first case is more complicated. 107 * 108 * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF 109 * ----------------------------------------------------------------- 110 * In this case, umem_cache_alloc(&umem_null_cache, ...) is called. 111 * There is special case code in which causes any allocation on 112 * &umem_null_cache to fail by returning (NULL), regardless of the 113 * flags argument. 114 * 115 * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call 116 * umem_alloc_retry(). umem_alloc_retry() sees that the allocation 117 * was agains &umem_null_cache, and calls umem_init(). 118 * 119 * If initialization is successful, umem_alloc_retry() returns 1, which 120 * causes umem_alloc()/umem_zalloc() to start over, which causes it to load 121 * the (now valid) cache pointer from umem_alloc_table. 122 * 123 * 2.2.2. Dealing with race conditions 124 * ----------------------------------- 125 * There are a couple race conditions resulting from the initialization 126 * code that we have to guard against: 127 * 128 * * In umem_cache_create(), there is a special UMC_INTERNAL cflag 129 * that is passed for caches created during initialization. It 130 * is illegal for a user to try to create a UMC_INTERNAL cache. 131 * This allows initialization to proceed, but any other 132 * umem_cache_create()s will block by calling umem_init(). 133 * 134 * * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask 135 * is always zero. umem_cache_alloc uses cp->cache_cpu_mask to 136 * mask the cpu number. This prevents a race between grabbing a 137 * cache pointer out of umem_alloc_table and growing the cpu array. 138 * 139 * 140 * 3. CPU handling 141 * --------------- 142 * kmem uses the CPU's sequence number to determine which "cpu cache" to 143 * use for an allocation. Currently, there is no way to get the sequence 144 * number in userspace. 145 * 146 * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus 147 * umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask 148 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id. 149 * The mechanics of this is all in the CPU(mask) macro. 150 * 151 * Currently, umem uses _lwp_self() as its hint. 152 * 153 * 154 * 4. The update thread 155 * -------------------- 156 * kmem uses a task queue, kmem_taskq, to do periodic maintenance on 157 * every kmem cache. vmem has a periodic timeout for hash table resizing. 158 * The kmem_taskq also provides a separate context for kmem_cache_reap()'s 159 * to be done in, avoiding issues of the context of kmem_reap() callers. 160 * 161 * Instead, umem has the concept of "updates", which are asynchronous requests 162 * for work attached to single caches. All caches with pending work are 163 * on a doubly linked list rooted at the umem_null_cache. All update state 164 * is protected by the umem_update_lock mutex, and the umem_update_cv is used 165 * for notification between threads. 166 * 167 * 4.1. Cache states with regards to updates 168 * ----------------------------------------- 169 * A given cache is in one of three states: 170 * 171 * Inactive cache_uflags is zero, cache_u{next,prev} are NULL 172 * 173 * Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set), 174 * cache_u{next,prev} link the cache onto the global 175 * update list 176 * 177 * Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev} 178 * are NULL, and either umem_update_thr or 179 * umem_st_update_thr are actively doing work on the 180 * cache. 181 * 182 * An update can be added to any cache in any state -- if the cache is 183 * Inactive, it transitions to being Work Requested. If the cache is 184 * Active, the worker will notice the new update and act on it before 185 * transitioning the cache to the Inactive state. 186 * 187 * If a cache is in the Active state, UMU_NOTIFY can be set, which asks 188 * the worker to broadcast the umem_update_cv when it has finished. 189 * 190 * 4.2. Update interface 191 * --------------------- 192 * umem_add_update() adds an update to a particular cache. 193 * umem_updateall() adds an update to all caches. 194 * umem_remove_updates() returns a cache to the Inactive state. 195 * 196 * umem_process_updates() process all caches in the Work Requested state. 197 * 198 * 4.3. Reaping 199 * ------------ 200 * When umem_reap() is called (at the time of heap growth), it schedule 201 * UMU_REAP updates on every cache. It then checks to see if the update 202 * thread exists (umem_update_thr != 0). If it is, it broadcasts 203 * the umem_update_cv to wake the update thread up, and returns. 204 * 205 * If the update thread does not exist (umem_update_thr == 0), and the 206 * program currently has multiple threads, umem_reap() attempts to create 207 * a new update thread. 208 * 209 * If the process is not multithreaded, or the creation fails, umem_reap() 210 * calls umem_st_update() to do an inline update. 211 * 212 * 4.4. The update thread 213 * ---------------------- 214 * The update thread spends most of its time in cond_timedwait() on the 215 * umem_update_cv. It wakes up under two conditions: 216 * 217 * * The timedwait times out, in which case it needs to run a global 218 * update, or 219 * 220 * * someone cond_broadcast(3THR)s the umem_update_cv, in which case 221 * it needs to check if there are any caches in the Work Requested 222 * state. 223 * 224 * When it is time for another global update, umem calls umem_cache_update() 225 * on every cache, then calls vmem_update(), which tunes the vmem structures. 226 * umem_cache_update() can request further work using umem_add_update(). 227 * 228 * After any work from the global update completes, the update timer is 229 * reset to umem_reap_interval seconds in the future. This makes the 230 * updates self-throttling. 231 * 232 * Reaps are similarly self-throttling. After a UMU_REAP update has 233 * been scheduled on all caches, umem_reap() sets a flag and wakes up the 234 * update thread. The update thread notices the flag, and resets the 235 * reap state. 236 * 237 * 4.5. Inline updates 238 * ------------------- 239 * If the update thread is not running, umem_st_update() is used instead. It 240 * immediately does a global update (as above), then calls 241 * umem_process_updates() to process both the reaps that umem_reap() added and 242 * any work generated by the global update. Afterwards, it resets the reap 243 * state. 244 * 245 * While the umem_st_update() is running, umem_st_update_thr holds the thread 246 * id of the thread performing the update. 247 * 248 * 4.6. Updates and fork1() 249 * ------------------------ 250 * umem has fork1() pre- and post-handlers which lock up (and release) every 251 * mutex in every cache. They also lock up the umem_update_lock. Since 252 * fork1() only copies over a single lwp, other threads (including the update 253 * thread) could have been actively using a cache in the parent. This 254 * can lead to inconsistencies in the child process. 255 * 256 * Because we locked all of the mutexes, the only possible inconsistancies are: 257 * 258 * * a umem_cache_alloc() could leak its buffer. 259 * 260 * * a caller of umem_depot_alloc() could leak a magazine, and all the 261 * buffers contained in it. 262 * 263 * * a cache could be in the Active update state. In the child, there 264 * would be no thread actually working on it. 265 * 266 * * a umem_hash_rescale() could leak the new hash table. 267 * 268 * * a umem_magazine_resize() could be in progress. 269 * 270 * * a umem_reap() could be in progress. 271 * 272 * The memory leaks we can't do anything about. umem_release_child() resets 273 * the update state, moves any caches in the Active state to the Work Requested 274 * state. This might cause some updates to be re-run, but UMU_REAP and 275 * UMU_HASH_RESCALE are effectively idempotent, and the worst that can 276 * happen from umem_magazine_resize() is resizing the magazine twice in close 277 * succession. 278 * 279 * Much of the cleanup in umem_release_child() is skipped if 280 * umem_st_update_thr == thr_self(). This is so that applications which call 281 * fork1() from a cache callback does not break. Needless to say, any such 282 * application is tremendously broken. 283 * 284 * 285 * 5. KM_SLEEP v.s. UMEM_NOFAIL 286 * ---------------------------- 287 * Allocations against kmem and vmem have two basic modes: SLEEP and 288 * NOSLEEP. A sleeping allocation is will go to sleep (waiting for 289 * more memory) instead of failing (returning NULL). 290 * 291 * SLEEP allocations presume an extremely multithreaded model, with 292 * a lot of allocation and deallocation activity. umem cannot presume 293 * that its clients have any particular type of behavior. Instead, 294 * it provides two types of allocations: 295 * 296 * * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on 297 * failure) 298 * 299 * * UMEM_NOFAIL, which, on failure, calls an optional callback 300 * (registered with umem_nofail_callback()). 301 * 302 * The callback is invoked with no locks held, and can do an arbitrary 303 * amount of work. It then has a choice between: 304 * 305 * * Returning UMEM_CALLBACK_RETRY, which will cause the allocation 306 * to be restarted. 307 * 308 * * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2) 309 * to be invoked with status. If multiple threads attempt to do 310 * this simultaneously, only one will call exit(2). 311 * 312 * * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C), 313 * etc.) 314 * 315 * The default callback returns UMEM_CALLBACK_EXIT(255). 316 * 317 * To have these callbacks without risk of state corruption (in the case of 318 * a non-local exit), we have to ensure that the callbacks get invoked 319 * close to the original allocation, with no inconsistent state or held 320 * locks. The following steps are taken: 321 * 322 * * All invocations of vmem are VM_NOSLEEP. 323 * 324 * * All constructor callbacks (which can themselves to allocations) 325 * are passed UMEM_DEFAULT as their required allocation argument. This 326 * way, the constructor will fail, allowing the highest-level allocation 327 * invoke the nofail callback. 328 * 329 * If a constructor callback _does_ do a UMEM_NOFAIL allocation, and 330 * the nofail callback does a non-local exit, we will leak the 331 * partially-constructed buffer. 332 * 333 * 334 * 6. Lock Ordering 335 * ---------------- 336 * umem has a few more locks than kmem does, mostly in the update path. The 337 * overall lock ordering (earlier locks must be acquired first) is: 338 * 339 * umem_init_lock 340 * 341 * vmem_list_lock 342 * vmem_nosleep_lock.vmpl_mutex 343 * vmem_t's: 344 * vm_lock 345 * sbrk_lock 346 * 347 * umem_cache_lock 348 * umem_update_lock 349 * umem_flags_lock 350 * umem_cache_t's: 351 * cache_cpu[*].cc_lock 352 * cache_depot_lock 353 * cache_lock 354 * umem_log_header_t's: 355 * lh_cpu[*].clh_lock 356 * lh_lock 357 * 358 * 7. Changing UMEM_MAXBUF 359 * ----------------------- 360 * 361 * When changing UMEM_MAXBUF extra care has to be taken. It is not sufficient to 362 * simply increase this number. First, one must update the umem_alloc_table to 363 * have the appropriate number of entires based upon the new size. If this is 364 * not done, this will lead to libumem blowing an assertion. 365 * 366 * The second place to update, which is not required, is the umem_alloc_sizes. 367 * These determine the default cache sizes that we're going to support. 368 */ 369 370 #include <umem_impl.h> 371 #include <sys/vmem_impl_user.h> 372 #include "umem_base.h" 373 #include "vmem_base.h" 374 375 #include <sys/processor.h> 376 #include <sys/sysmacros.h> 377 378 #include <alloca.h> 379 #include <errno.h> 380 #include <limits.h> 381 #include <stdio.h> 382 #include <stdlib.h> 383 #include <string.h> 384 #include <strings.h> 385 #include <signal.h> 386 #include <unistd.h> 387 #include <atomic.h> 388 389 #include "misc.h" 390 391 #define UMEM_VMFLAGS(umflag) (VM_NOSLEEP) 392 393 size_t pagesize; 394 395 /* 396 * The default set of caches to back umem_alloc(). 397 * These sizes should be reevaluated periodically. 398 * 399 * We want allocations that are multiples of the coherency granularity 400 * (64 bytes) to be satisfied from a cache which is a multiple of 64 401 * bytes, so that it will be 64-byte aligned. For all multiples of 64, 402 * the next kmem_cache_size greater than or equal to it must be a 403 * multiple of 64. 404 * 405 * This table must be in sorted order, from smallest to highest. The 406 * highest slot must be UMEM_MAXBUF, and every slot afterwards must be 407 * zero. 408 */ 409 static int umem_alloc_sizes[] = { 410 #ifdef _LP64 411 1 * 8, 412 1 * 16, 413 2 * 16, 414 3 * 16, 415 #else 416 1 * 8, 417 2 * 8, 418 3 * 8, 419 4 * 8, 5 * 8, 6 * 8, 7 * 8, 420 #endif 421 4 * 16, 5 * 16, 6 * 16, 7 * 16, 422 4 * 32, 5 * 32, 6 * 32, 7 * 32, 423 4 * 64, 5 * 64, 6 * 64, 7 * 64, 424 4 * 128, 5 * 128, 6 * 128, 7 * 128, 425 P2ALIGN(8192 / 7, 64), 426 P2ALIGN(8192 / 6, 64), 427 P2ALIGN(8192 / 5, 64), 428 P2ALIGN(8192 / 4, 64), 2304, 429 P2ALIGN(8192 / 3, 64), 430 P2ALIGN(8192 / 2, 64), 4544, 431 P2ALIGN(8192 / 1, 64), 9216, 432 4096 * 3, 433 8192 * 2, /* = 8192 * 2 */ 434 24576, 32768, 40960, 49152, 57344, 65536, 73728, 81920, 435 90112, 98304, 106496, 114688, 122880, UMEM_MAXBUF, /* 128k */ 436 /* 24 slots for user expansion */ 437 0, 0, 0, 0, 0, 0, 0, 0, 438 0, 0, 0, 0, 0, 0, 0, 0, 439 0, 0, 0, 0, 0, 0, 0, 0, 440 }; 441 #define NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes)) 442 443 static umem_magtype_t umem_magtype[] = { 444 { 1, 8, 3200, 65536 }, 445 { 3, 16, 256, 32768 }, 446 { 7, 32, 64, 16384 }, 447 { 15, 64, 0, 8192 }, 448 { 31, 64, 0, 4096 }, 449 { 47, 64, 0, 2048 }, 450 { 63, 64, 0, 1024 }, 451 { 95, 64, 0, 512 }, 452 { 143, 64, 0, 0 }, 453 }; 454 455 /* 456 * umem tunables 457 */ 458 uint32_t umem_max_ncpus; /* # of CPU caches. */ 459 460 uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */ 461 uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */ 462 uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */ 463 uint_t umem_abort = 1; /* whether to abort on error */ 464 uint_t umem_output = 0; /* whether to write to standard error */ 465 uint_t umem_logging = 0; /* umem_log_enter() override */ 466 uint32_t umem_mtbf = 0; /* mean time between failures [default: off] */ 467 size_t umem_transaction_log_size; /* size of transaction log */ 468 size_t umem_content_log_size; /* size of content log */ 469 size_t umem_failure_log_size; /* failure log [4 pages per CPU] */ 470 size_t umem_slab_log_size; /* slab create log [4 pages per CPU] */ 471 size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */ 472 size_t umem_lite_minsize = 0; /* minimum buffer size for UMF_LITE */ 473 size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */ 474 size_t umem_maxverify; /* maximum bytes to inspect in debug routines */ 475 size_t umem_minfirewall; /* hardware-enforced redzone threshold */ 476 477 uint_t umem_flags = 0; 478 479 mutex_t umem_init_lock; /* locks initialization */ 480 cond_t umem_init_cv; /* initialization CV */ 481 thread_t umem_init_thr; /* thread initializing */ 482 int umem_init_env_ready; /* environ pre-initted */ 483 int umem_ready = UMEM_READY_STARTUP; 484 485 static umem_nofail_callback_t *nofail_callback; 486 static mutex_t umem_nofail_exit_lock; 487 static thread_t umem_nofail_exit_thr; 488 489 static umem_cache_t *umem_slab_cache; 490 static umem_cache_t *umem_bufctl_cache; 491 static umem_cache_t *umem_bufctl_audit_cache; 492 493 mutex_t umem_flags_lock; 494 495 static vmem_t *heap_arena; 496 static vmem_alloc_t *heap_alloc; 497 static vmem_free_t *heap_free; 498 499 static vmem_t *umem_internal_arena; 500 static vmem_t *umem_cache_arena; 501 static vmem_t *umem_hash_arena; 502 static vmem_t *umem_log_arena; 503 static vmem_t *umem_oversize_arena; 504 static vmem_t *umem_va_arena; 505 static vmem_t *umem_default_arena; 506 static vmem_t *umem_firewall_va_arena; 507 static vmem_t *umem_firewall_arena; 508 509 vmem_t *umem_memalign_arena; 510 511 umem_log_header_t *umem_transaction_log; 512 umem_log_header_t *umem_content_log; 513 umem_log_header_t *umem_failure_log; 514 umem_log_header_t *umem_slab_log; 515 516 #define CPUHINT() (thr_self()) 517 #define CPUHINT_MAX() INT_MAX 518 519 #define CPU(mask) (umem_cpus + (CPUHINT() & (mask))) 520 static umem_cpu_t umem_startup_cpu = { /* initial, single, cpu */ 521 UMEM_CACHE_SIZE(0), 522 0 523 }; 524 525 static uint32_t umem_cpu_mask = 0; /* global cpu mask */ 526 static umem_cpu_t *umem_cpus = &umem_startup_cpu; /* cpu list */ 527 528 volatile uint32_t umem_reaping; 529 530 thread_t umem_update_thr; 531 struct timeval umem_update_next; /* timeofday of next update */ 532 volatile thread_t umem_st_update_thr; /* only used when single-thd */ 533 534 #define IN_UPDATE() (thr_self() == umem_update_thr || \ 535 thr_self() == umem_st_update_thr) 536 #define IN_REAP() IN_UPDATE() 537 538 mutex_t umem_update_lock; /* cache_u{next,prev,flags} */ 539 cond_t umem_update_cv; 540 541 volatile hrtime_t umem_reap_next; /* min hrtime of next reap */ 542 543 mutex_t umem_cache_lock; /* inter-cache linkage only */ 544 545 #ifdef UMEM_STANDALONE 546 umem_cache_t umem_null_cache; 547 static const umem_cache_t umem_null_cache_template = { 548 #else 549 umem_cache_t umem_null_cache = { 550 #endif 551 0, 0, 0, 0, 0, 552 0, 0, 553 0, 0, 554 0, 0, 555 "invalid_cache", 556 0, 0, 557 NULL, NULL, NULL, NULL, 558 NULL, 559 0, 0, 0, 0, 560 &umem_null_cache, &umem_null_cache, 561 &umem_null_cache, &umem_null_cache, 562 0, 563 DEFAULTMUTEX, /* start of slab layer */ 564 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 565 &umem_null_cache.cache_nullslab, 566 { 567 &umem_null_cache, 568 NULL, 569 &umem_null_cache.cache_nullslab, 570 &umem_null_cache.cache_nullslab, 571 NULL, 572 -1, 573 0 574 }, 575 NULL, 576 NULL, 577 DEFAULTMUTEX, /* start of depot layer */ 578 NULL, { 579 NULL, 0, 0, 0, 0 580 }, { 581 NULL, 0, 0, 0, 0 582 }, { 583 { 584 DEFAULTMUTEX, /* start of CPU cache */ 585 0, 0, NULL, NULL, -1, -1, 0 586 } 587 } 588 }; 589 590 #define ALLOC_TABLE_4 \ 591 &umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache 592 593 #define ALLOC_TABLE_64 \ 594 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 595 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 596 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \ 597 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4 598 599 #define ALLOC_TABLE_1024 \ 600 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 601 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 602 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \ 603 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64 604 605 static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = { 606 ALLOC_TABLE_1024, 607 ALLOC_TABLE_1024, 608 ALLOC_TABLE_1024, 609 ALLOC_TABLE_1024, 610 ALLOC_TABLE_1024, 611 ALLOC_TABLE_1024, 612 ALLOC_TABLE_1024, 613 ALLOC_TABLE_1024, 614 ALLOC_TABLE_1024, 615 ALLOC_TABLE_1024, 616 ALLOC_TABLE_1024, 617 ALLOC_TABLE_1024, 618 ALLOC_TABLE_1024, 619 ALLOC_TABLE_1024, 620 ALLOC_TABLE_1024, 621 ALLOC_TABLE_1024 622 }; 623 624 625 /* Used to constrain audit-log stack traces */ 626 caddr_t umem_min_stack; 627 caddr_t umem_max_stack; 628 629 630 #define UMERR_MODIFIED 0 /* buffer modified while on freelist */ 631 #define UMERR_REDZONE 1 /* redzone violation (write past end of buf) */ 632 #define UMERR_DUPFREE 2 /* freed a buffer twice */ 633 #define UMERR_BADADDR 3 /* freed a bad (unallocated) address */ 634 #define UMERR_BADBUFTAG 4 /* buftag corrupted */ 635 #define UMERR_BADBUFCTL 5 /* bufctl corrupted */ 636 #define UMERR_BADCACHE 6 /* freed a buffer to the wrong cache */ 637 #define UMERR_BADSIZE 7 /* alloc size != free size */ 638 #define UMERR_BADBASE 8 /* buffer base address wrong */ 639 640 struct { 641 hrtime_t ump_timestamp; /* timestamp of error */ 642 int ump_error; /* type of umem error (UMERR_*) */ 643 void *ump_buffer; /* buffer that induced abort */ 644 void *ump_realbuf; /* real start address for buffer */ 645 umem_cache_t *ump_cache; /* buffer's cache according to client */ 646 umem_cache_t *ump_realcache; /* actual cache containing buffer */ 647 umem_slab_t *ump_slab; /* slab accoring to umem_findslab() */ 648 umem_bufctl_t *ump_bufctl; /* bufctl */ 649 } umem_abort_info; 650 651 static void 652 copy_pattern(uint64_t pattern, void *buf_arg, size_t size) 653 { 654 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 655 uint64_t *buf = buf_arg; 656 657 while (buf < bufend) 658 *buf++ = pattern; 659 } 660 661 static void * 662 verify_pattern(uint64_t pattern, void *buf_arg, size_t size) 663 { 664 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 665 uint64_t *buf; 666 667 for (buf = buf_arg; buf < bufend; buf++) 668 if (*buf != pattern) 669 return (buf); 670 return (NULL); 671 } 672 673 static void * 674 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size) 675 { 676 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size); 677 uint64_t *buf; 678 679 for (buf = buf_arg; buf < bufend; buf++) { 680 if (*buf != old) { 681 copy_pattern(old, buf_arg, 682 (char *)buf - (char *)buf_arg); 683 return (buf); 684 } 685 *buf = new; 686 } 687 688 return (NULL); 689 } 690 691 void 692 umem_cache_applyall(void (*func)(umem_cache_t *)) 693 { 694 umem_cache_t *cp; 695 696 (void) mutex_lock(&umem_cache_lock); 697 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 698 cp = cp->cache_next) 699 func(cp); 700 (void) mutex_unlock(&umem_cache_lock); 701 } 702 703 static void 704 umem_add_update_unlocked(umem_cache_t *cp, int flags) 705 { 706 umem_cache_t *cnext, *cprev; 707 708 flags &= ~UMU_ACTIVE; 709 710 if (!flags) 711 return; 712 713 if (cp->cache_uflags & UMU_ACTIVE) { 714 cp->cache_uflags |= flags; 715 } else { 716 if (cp->cache_unext != NULL) { 717 ASSERT(cp->cache_uflags != 0); 718 cp->cache_uflags |= flags; 719 } else { 720 ASSERT(cp->cache_uflags == 0); 721 cp->cache_uflags = flags; 722 cp->cache_unext = cnext = &umem_null_cache; 723 cp->cache_uprev = cprev = umem_null_cache.cache_uprev; 724 cnext->cache_uprev = cp; 725 cprev->cache_unext = cp; 726 } 727 } 728 } 729 730 static void 731 umem_add_update(umem_cache_t *cp, int flags) 732 { 733 (void) mutex_lock(&umem_update_lock); 734 735 umem_add_update_unlocked(cp, flags); 736 737 if (!IN_UPDATE()) 738 (void) cond_broadcast(&umem_update_cv); 739 740 (void) mutex_unlock(&umem_update_lock); 741 } 742 743 /* 744 * Remove a cache from the update list, waiting for any in-progress work to 745 * complete first. 746 */ 747 static void 748 umem_remove_updates(umem_cache_t *cp) 749 { 750 (void) mutex_lock(&umem_update_lock); 751 752 /* 753 * Get it out of the active state 754 */ 755 while (cp->cache_uflags & UMU_ACTIVE) { 756 int cancel_state; 757 758 ASSERT(cp->cache_unext == NULL); 759 760 cp->cache_uflags |= UMU_NOTIFY; 761 762 /* 763 * Make sure the update state is sane, before we wait 764 */ 765 ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0); 766 ASSERT(umem_update_thr != thr_self() && 767 umem_st_update_thr != thr_self()); 768 769 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 770 &cancel_state); 771 (void) cond_wait(&umem_update_cv, &umem_update_lock); 772 (void) pthread_setcancelstate(cancel_state, NULL); 773 } 774 /* 775 * Get it out of the Work Requested state 776 */ 777 if (cp->cache_unext != NULL) { 778 cp->cache_uprev->cache_unext = cp->cache_unext; 779 cp->cache_unext->cache_uprev = cp->cache_uprev; 780 cp->cache_uprev = cp->cache_unext = NULL; 781 cp->cache_uflags = 0; 782 } 783 /* 784 * Make sure it is in the Inactive state 785 */ 786 ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0); 787 (void) mutex_unlock(&umem_update_lock); 788 } 789 790 static void 791 umem_updateall(int flags) 792 { 793 umem_cache_t *cp; 794 795 /* 796 * NOTE: To prevent deadlock, umem_cache_lock is always acquired first. 797 * 798 * (umem_add_update is called from things run via umem_cache_applyall) 799 */ 800 (void) mutex_lock(&umem_cache_lock); 801 (void) mutex_lock(&umem_update_lock); 802 803 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache; 804 cp = cp->cache_next) 805 umem_add_update_unlocked(cp, flags); 806 807 if (!IN_UPDATE()) 808 (void) cond_broadcast(&umem_update_cv); 809 810 (void) mutex_unlock(&umem_update_lock); 811 (void) mutex_unlock(&umem_cache_lock); 812 } 813 814 /* 815 * Debugging support. Given a buffer address, find its slab. 816 */ 817 static umem_slab_t * 818 umem_findslab(umem_cache_t *cp, void *buf) 819 { 820 umem_slab_t *sp; 821 822 (void) mutex_lock(&cp->cache_lock); 823 for (sp = cp->cache_nullslab.slab_next; 824 sp != &cp->cache_nullslab; sp = sp->slab_next) { 825 if (UMEM_SLAB_MEMBER(sp, buf)) { 826 (void) mutex_unlock(&cp->cache_lock); 827 return (sp); 828 } 829 } 830 (void) mutex_unlock(&cp->cache_lock); 831 832 return (NULL); 833 } 834 835 static void 836 umem_error(int error, umem_cache_t *cparg, void *bufarg) 837 { 838 umem_buftag_t *btp = NULL; 839 umem_bufctl_t *bcp = NULL; 840 umem_cache_t *cp = cparg; 841 umem_slab_t *sp; 842 uint64_t *off; 843 void *buf = bufarg; 844 845 int old_logging = umem_logging; 846 847 umem_logging = 0; /* stop logging when a bad thing happens */ 848 849 umem_abort_info.ump_timestamp = gethrtime(); 850 851 sp = umem_findslab(cp, buf); 852 if (sp == NULL) { 853 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache; 854 cp = cp->cache_prev) { 855 if ((sp = umem_findslab(cp, buf)) != NULL) 856 break; 857 } 858 } 859 860 if (sp == NULL) { 861 cp = NULL; 862 error = UMERR_BADADDR; 863 } else { 864 if (cp != cparg) 865 error = UMERR_BADCACHE; 866 else 867 buf = (char *)bufarg - ((uintptr_t)bufarg - 868 (uintptr_t)sp->slab_base) % cp->cache_chunksize; 869 if (buf != bufarg) 870 error = UMERR_BADBASE; 871 if (cp->cache_flags & UMF_BUFTAG) 872 btp = UMEM_BUFTAG(cp, buf); 873 if (cp->cache_flags & UMF_HASH) { 874 (void) mutex_lock(&cp->cache_lock); 875 for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) 876 if (bcp->bc_addr == buf) 877 break; 878 (void) mutex_unlock(&cp->cache_lock); 879 if (bcp == NULL && btp != NULL) 880 bcp = btp->bt_bufctl; 881 if (umem_findslab(cp->cache_bufctl_cache, bcp) == 882 NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) || 883 bcp->bc_addr != buf) { 884 error = UMERR_BADBUFCTL; 885 bcp = NULL; 886 } 887 } 888 } 889 890 umem_abort_info.ump_error = error; 891 umem_abort_info.ump_buffer = bufarg; 892 umem_abort_info.ump_realbuf = buf; 893 umem_abort_info.ump_cache = cparg; 894 umem_abort_info.ump_realcache = cp; 895 umem_abort_info.ump_slab = sp; 896 umem_abort_info.ump_bufctl = bcp; 897 898 umem_printf("umem allocator: "); 899 900 switch (error) { 901 902 case UMERR_MODIFIED: 903 umem_printf("buffer modified after being freed\n"); 904 off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 905 if (off == NULL) /* shouldn't happen */ 906 off = buf; 907 umem_printf("modification occurred at offset 0x%lx " 908 "(0x%llx replaced by 0x%llx)\n", 909 (uintptr_t)off - (uintptr_t)buf, 910 (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off); 911 break; 912 913 case UMERR_REDZONE: 914 umem_printf("redzone violation: write past end of buffer\n"); 915 break; 916 917 case UMERR_BADADDR: 918 umem_printf("invalid free: buffer not in cache\n"); 919 break; 920 921 case UMERR_DUPFREE: 922 umem_printf("duplicate free: buffer freed twice\n"); 923 break; 924 925 case UMERR_BADBUFTAG: 926 umem_printf("boundary tag corrupted\n"); 927 umem_printf("bcp ^ bxstat = %lx, should be %lx\n", 928 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat, 929 UMEM_BUFTAG_FREE); 930 break; 931 932 case UMERR_BADBUFCTL: 933 umem_printf("bufctl corrupted\n"); 934 break; 935 936 case UMERR_BADCACHE: 937 umem_printf("buffer freed to wrong cache\n"); 938 umem_printf("buffer was allocated from %s,\n", cp->cache_name); 939 umem_printf("caller attempting free to %s.\n", 940 cparg->cache_name); 941 break; 942 943 case UMERR_BADSIZE: 944 umem_printf("bad free: free size (%u) != alloc size (%u)\n", 945 UMEM_SIZE_DECODE(((uint32_t *)btp)[0]), 946 UMEM_SIZE_DECODE(((uint32_t *)btp)[1])); 947 break; 948 949 case UMERR_BADBASE: 950 umem_printf("bad free: free address (%p) != alloc address " 951 "(%p)\n", bufarg, buf); 952 break; 953 } 954 955 umem_printf("buffer=%p bufctl=%p cache: %s\n", 956 bufarg, (void *)bcp, cparg->cache_name); 957 958 if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) && 959 error != UMERR_BADBUFCTL) { 960 int d; 961 timespec_t ts; 962 hrtime_t diff; 963 umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp; 964 965 diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp; 966 ts.tv_sec = diff / NANOSEC; 967 ts.tv_nsec = diff % NANOSEC; 968 969 umem_printf("previous transaction on buffer %p:\n", buf); 970 umem_printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n", 971 (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec, 972 (void *)sp, cp->cache_name); 973 for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) { 974 (void) print_sym((void *)bcap->bc_stack[d]); 975 umem_printf("\n"); 976 } 977 } 978 979 umem_err_recoverable("umem: heap corruption detected"); 980 981 umem_logging = old_logging; /* resume logging */ 982 } 983 984 void 985 umem_nofail_callback(umem_nofail_callback_t *cb) 986 { 987 nofail_callback = cb; 988 } 989 990 static int 991 umem_alloc_retry(umem_cache_t *cp, int umflag) 992 { 993 if (cp == &umem_null_cache) { 994 if (umem_init()) 995 return (1); /* retry */ 996 /* 997 * Initialization failed. Do normal failure processing. 998 */ 999 } 1000 if (umflag & UMEM_NOFAIL) { 1001 int def_result = UMEM_CALLBACK_EXIT(255); 1002 int result = def_result; 1003 umem_nofail_callback_t *callback = nofail_callback; 1004 1005 if (callback != NULL) 1006 result = callback(); 1007 1008 if (result == UMEM_CALLBACK_RETRY) 1009 return (1); 1010 1011 if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) { 1012 log_message("nofail callback returned %x\n", result); 1013 result = def_result; 1014 } 1015 1016 /* 1017 * only one thread will call exit 1018 */ 1019 if (umem_nofail_exit_thr == thr_self()) 1020 umem_panic("recursive UMEM_CALLBACK_EXIT()\n"); 1021 1022 (void) mutex_lock(&umem_nofail_exit_lock); 1023 umem_nofail_exit_thr = thr_self(); 1024 exit(result & 0xFF); 1025 /*NOTREACHED*/ 1026 } 1027 return (0); 1028 } 1029 1030 static umem_log_header_t * 1031 umem_log_init(size_t logsize) 1032 { 1033 umem_log_header_t *lhp; 1034 int nchunks = 4 * umem_max_ncpus; 1035 size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]); 1036 int i; 1037 1038 if (logsize == 0) 1039 return (NULL); 1040 1041 /* 1042 * Make sure that lhp->lh_cpu[] is nicely aligned 1043 * to prevent false sharing of cache lines. 1044 */ 1045 lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN); 1046 lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0, 1047 NULL, NULL, VM_NOSLEEP); 1048 if (lhp == NULL) 1049 goto fail; 1050 1051 bzero(lhp, lhsize); 1052 1053 (void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL); 1054 lhp->lh_nchunks = nchunks; 1055 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE); 1056 if (lhp->lh_chunksize == 0) 1057 lhp->lh_chunksize = PAGESIZE; 1058 1059 lhp->lh_base = vmem_alloc(umem_log_arena, 1060 lhp->lh_chunksize * nchunks, VM_NOSLEEP); 1061 if (lhp->lh_base == NULL) 1062 goto fail; 1063 1064 lhp->lh_free = vmem_alloc(umem_log_arena, 1065 nchunks * sizeof (int), VM_NOSLEEP); 1066 if (lhp->lh_free == NULL) 1067 goto fail; 1068 1069 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks); 1070 1071 for (i = 0; i < umem_max_ncpus; i++) { 1072 umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i]; 1073 (void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL); 1074 clhp->clh_chunk = i; 1075 } 1076 1077 for (i = umem_max_ncpus; i < nchunks; i++) 1078 lhp->lh_free[i] = i; 1079 1080 lhp->lh_head = umem_max_ncpus; 1081 lhp->lh_tail = 0; 1082 1083 return (lhp); 1084 1085 fail: 1086 if (lhp != NULL) { 1087 if (lhp->lh_base != NULL) 1088 vmem_free(umem_log_arena, lhp->lh_base, 1089 lhp->lh_chunksize * nchunks); 1090 1091 vmem_xfree(umem_log_arena, lhp, lhsize); 1092 } 1093 return (NULL); 1094 } 1095 1096 static void * 1097 umem_log_enter(umem_log_header_t *lhp, void *data, size_t size) 1098 { 1099 void *logspace; 1100 umem_cpu_log_header_t *clhp = 1101 &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number]; 1102 1103 if (lhp == NULL || umem_logging == 0) 1104 return (NULL); 1105 1106 (void) mutex_lock(&clhp->clh_lock); 1107 clhp->clh_hits++; 1108 if (size > clhp->clh_avail) { 1109 (void) mutex_lock(&lhp->lh_lock); 1110 lhp->lh_hits++; 1111 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk; 1112 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks; 1113 clhp->clh_chunk = lhp->lh_free[lhp->lh_head]; 1114 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks; 1115 clhp->clh_current = lhp->lh_base + 1116 clhp->clh_chunk * lhp->lh_chunksize; 1117 clhp->clh_avail = lhp->lh_chunksize; 1118 if (size > lhp->lh_chunksize) 1119 size = lhp->lh_chunksize; 1120 (void) mutex_unlock(&lhp->lh_lock); 1121 } 1122 logspace = clhp->clh_current; 1123 clhp->clh_current += size; 1124 clhp->clh_avail -= size; 1125 bcopy(data, logspace, size); 1126 (void) mutex_unlock(&clhp->clh_lock); 1127 return (logspace); 1128 } 1129 1130 #define UMEM_AUDIT(lp, cp, bcp) \ 1131 { \ 1132 umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp); \ 1133 _bcp->bc_timestamp = gethrtime(); \ 1134 _bcp->bc_thread = thr_self(); \ 1135 _bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth, \ 1136 (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \ 1137 _bcp->bc_lastlog = umem_log_enter((lp), _bcp, \ 1138 UMEM_BUFCTL_AUDIT_SIZE); \ 1139 } 1140 1141 static void 1142 umem_log_event(umem_log_header_t *lp, umem_cache_t *cp, 1143 umem_slab_t *sp, void *addr) 1144 { 1145 umem_bufctl_audit_t *bcp; 1146 UMEM_LOCAL_BUFCTL_AUDIT(&bcp); 1147 1148 bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE); 1149 bcp->bc_addr = addr; 1150 bcp->bc_slab = sp; 1151 bcp->bc_cache = cp; 1152 UMEM_AUDIT(lp, cp, bcp); 1153 } 1154 1155 /* 1156 * Create a new slab for cache cp. 1157 */ 1158 static umem_slab_t * 1159 umem_slab_create(umem_cache_t *cp, int umflag) 1160 { 1161 size_t slabsize = cp->cache_slabsize; 1162 size_t chunksize = cp->cache_chunksize; 1163 int cache_flags = cp->cache_flags; 1164 size_t color, chunks; 1165 char *buf, *slab; 1166 umem_slab_t *sp; 1167 umem_bufctl_t *bcp; 1168 vmem_t *vmp = cp->cache_arena; 1169 1170 color = cp->cache_color + cp->cache_align; 1171 if (color > cp->cache_maxcolor) 1172 color = cp->cache_mincolor; 1173 cp->cache_color = color; 1174 1175 slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag)); 1176 1177 if (slab == NULL) 1178 goto vmem_alloc_failure; 1179 1180 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0); 1181 1182 if (!(cp->cache_cflags & UMC_NOTOUCH) && 1183 (cp->cache_flags & UMF_DEADBEEF)) 1184 copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize); 1185 1186 if (cache_flags & UMF_HASH) { 1187 if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL) 1188 goto slab_alloc_failure; 1189 chunks = (slabsize - color) / chunksize; 1190 } else { 1191 sp = UMEM_SLAB(cp, slab); 1192 chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize; 1193 } 1194 1195 sp->slab_cache = cp; 1196 sp->slab_head = NULL; 1197 sp->slab_refcnt = 0; 1198 sp->slab_base = buf = slab + color; 1199 sp->slab_chunks = chunks; 1200 1201 ASSERT(chunks > 0); 1202 while (chunks-- != 0) { 1203 if (cache_flags & UMF_HASH) { 1204 bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag); 1205 if (bcp == NULL) 1206 goto bufctl_alloc_failure; 1207 if (cache_flags & UMF_AUDIT) { 1208 umem_bufctl_audit_t *bcap = 1209 (umem_bufctl_audit_t *)bcp; 1210 bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE); 1211 bcap->bc_cache = cp; 1212 } 1213 bcp->bc_addr = buf; 1214 bcp->bc_slab = sp; 1215 } else { 1216 bcp = UMEM_BUFCTL(cp, buf); 1217 } 1218 if (cache_flags & UMF_BUFTAG) { 1219 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1220 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1221 btp->bt_bufctl = bcp; 1222 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1223 if (cache_flags & UMF_DEADBEEF) { 1224 copy_pattern(UMEM_FREE_PATTERN, buf, 1225 cp->cache_verify); 1226 } 1227 } 1228 bcp->bc_next = sp->slab_head; 1229 sp->slab_head = bcp; 1230 buf += chunksize; 1231 } 1232 1233 umem_log_event(umem_slab_log, cp, sp, slab); 1234 1235 return (sp); 1236 1237 bufctl_alloc_failure: 1238 1239 while ((bcp = sp->slab_head) != NULL) { 1240 sp->slab_head = bcp->bc_next; 1241 _umem_cache_free(cp->cache_bufctl_cache, bcp); 1242 } 1243 _umem_cache_free(umem_slab_cache, sp); 1244 1245 slab_alloc_failure: 1246 1247 vmem_free(vmp, slab, slabsize); 1248 1249 vmem_alloc_failure: 1250 1251 umem_log_event(umem_failure_log, cp, NULL, NULL); 1252 atomic_add_64(&cp->cache_alloc_fail, 1); 1253 1254 return (NULL); 1255 } 1256 1257 /* 1258 * Destroy a slab. 1259 */ 1260 static void 1261 umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp) 1262 { 1263 vmem_t *vmp = cp->cache_arena; 1264 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum); 1265 1266 if (cp->cache_flags & UMF_HASH) { 1267 umem_bufctl_t *bcp; 1268 while ((bcp = sp->slab_head) != NULL) { 1269 sp->slab_head = bcp->bc_next; 1270 _umem_cache_free(cp->cache_bufctl_cache, bcp); 1271 } 1272 _umem_cache_free(umem_slab_cache, sp); 1273 } 1274 vmem_free(vmp, slab, cp->cache_slabsize); 1275 } 1276 1277 /* 1278 * Allocate a raw (unconstructed) buffer from cp's slab layer. 1279 */ 1280 static void * 1281 umem_slab_alloc(umem_cache_t *cp, int umflag) 1282 { 1283 umem_bufctl_t *bcp, **hash_bucket; 1284 umem_slab_t *sp; 1285 void *buf; 1286 1287 (void) mutex_lock(&cp->cache_lock); 1288 cp->cache_slab_alloc++; 1289 sp = cp->cache_freelist; 1290 ASSERT(sp->slab_cache == cp); 1291 if (sp->slab_head == NULL) { 1292 /* 1293 * The freelist is empty. Create a new slab. 1294 */ 1295 (void) mutex_unlock(&cp->cache_lock); 1296 if (cp == &umem_null_cache) 1297 return (NULL); 1298 if ((sp = umem_slab_create(cp, umflag)) == NULL) 1299 return (NULL); 1300 (void) mutex_lock(&cp->cache_lock); 1301 cp->cache_slab_create++; 1302 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) 1303 cp->cache_bufmax = cp->cache_buftotal; 1304 sp->slab_next = cp->cache_freelist; 1305 sp->slab_prev = cp->cache_freelist->slab_prev; 1306 sp->slab_next->slab_prev = sp; 1307 sp->slab_prev->slab_next = sp; 1308 cp->cache_freelist = sp; 1309 } 1310 1311 sp->slab_refcnt++; 1312 ASSERT(sp->slab_refcnt <= sp->slab_chunks); 1313 1314 /* 1315 * If we're taking the last buffer in the slab, 1316 * remove the slab from the cache's freelist. 1317 */ 1318 bcp = sp->slab_head; 1319 if ((sp->slab_head = bcp->bc_next) == NULL) { 1320 cp->cache_freelist = sp->slab_next; 1321 ASSERT(sp->slab_refcnt == sp->slab_chunks); 1322 } 1323 1324 if (cp->cache_flags & UMF_HASH) { 1325 /* 1326 * Add buffer to allocated-address hash table. 1327 */ 1328 buf = bcp->bc_addr; 1329 hash_bucket = UMEM_HASH(cp, buf); 1330 bcp->bc_next = *hash_bucket; 1331 *hash_bucket = bcp; 1332 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) { 1333 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1334 } 1335 } else { 1336 buf = UMEM_BUF(cp, bcp); 1337 } 1338 1339 ASSERT(UMEM_SLAB_MEMBER(sp, buf)); 1340 1341 (void) mutex_unlock(&cp->cache_lock); 1342 1343 return (buf); 1344 } 1345 1346 /* 1347 * Free a raw (unconstructed) buffer to cp's slab layer. 1348 */ 1349 static void 1350 umem_slab_free(umem_cache_t *cp, void *buf) 1351 { 1352 umem_slab_t *sp; 1353 umem_bufctl_t *bcp, **prev_bcpp; 1354 1355 ASSERT(buf != NULL); 1356 1357 (void) mutex_lock(&cp->cache_lock); 1358 cp->cache_slab_free++; 1359 1360 if (cp->cache_flags & UMF_HASH) { 1361 /* 1362 * Look up buffer in allocated-address hash table. 1363 */ 1364 prev_bcpp = UMEM_HASH(cp, buf); 1365 while ((bcp = *prev_bcpp) != NULL) { 1366 if (bcp->bc_addr == buf) { 1367 *prev_bcpp = bcp->bc_next; 1368 sp = bcp->bc_slab; 1369 break; 1370 } 1371 cp->cache_lookup_depth++; 1372 prev_bcpp = &bcp->bc_next; 1373 } 1374 } else { 1375 bcp = UMEM_BUFCTL(cp, buf); 1376 sp = UMEM_SLAB(cp, buf); 1377 } 1378 1379 if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) { 1380 (void) mutex_unlock(&cp->cache_lock); 1381 umem_error(UMERR_BADADDR, cp, buf); 1382 return; 1383 } 1384 1385 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) { 1386 if (cp->cache_flags & UMF_CONTENTS) 1387 ((umem_bufctl_audit_t *)bcp)->bc_contents = 1388 umem_log_enter(umem_content_log, buf, 1389 cp->cache_contents); 1390 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1391 } 1392 1393 /* 1394 * If this slab isn't currently on the freelist, put it there. 1395 */ 1396 if (sp->slab_head == NULL) { 1397 ASSERT(sp->slab_refcnt == sp->slab_chunks); 1398 ASSERT(cp->cache_freelist != sp); 1399 sp->slab_next->slab_prev = sp->slab_prev; 1400 sp->slab_prev->slab_next = sp->slab_next; 1401 sp->slab_next = cp->cache_freelist; 1402 sp->slab_prev = cp->cache_freelist->slab_prev; 1403 sp->slab_next->slab_prev = sp; 1404 sp->slab_prev->slab_next = sp; 1405 cp->cache_freelist = sp; 1406 } 1407 1408 bcp->bc_next = sp->slab_head; 1409 sp->slab_head = bcp; 1410 1411 ASSERT(sp->slab_refcnt >= 1); 1412 if (--sp->slab_refcnt == 0) { 1413 /* 1414 * There are no outstanding allocations from this slab, 1415 * so we can reclaim the memory. 1416 */ 1417 sp->slab_next->slab_prev = sp->slab_prev; 1418 sp->slab_prev->slab_next = sp->slab_next; 1419 if (sp == cp->cache_freelist) 1420 cp->cache_freelist = sp->slab_next; 1421 cp->cache_slab_destroy++; 1422 cp->cache_buftotal -= sp->slab_chunks; 1423 (void) mutex_unlock(&cp->cache_lock); 1424 umem_slab_destroy(cp, sp); 1425 return; 1426 } 1427 (void) mutex_unlock(&cp->cache_lock); 1428 } 1429 1430 static int 1431 umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag) 1432 { 1433 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1434 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl; 1435 uint32_t mtbf; 1436 int flags_nfatal; 1437 1438 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) { 1439 umem_error(UMERR_BADBUFTAG, cp, buf); 1440 return (-1); 1441 } 1442 1443 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC; 1444 1445 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) { 1446 umem_error(UMERR_BADBUFCTL, cp, buf); 1447 return (-1); 1448 } 1449 1450 btp->bt_redzone = UMEM_REDZONE_PATTERN; 1451 1452 if (cp->cache_flags & UMF_DEADBEEF) { 1453 if (verify_and_copy_pattern(UMEM_FREE_PATTERN, 1454 UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) { 1455 umem_error(UMERR_MODIFIED, cp, buf); 1456 return (-1); 1457 } 1458 } 1459 1460 if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 && 1461 gethrtime() % mtbf == 0 && 1462 (umflag & (UMEM_FATAL_FLAGS)) == 0) { 1463 umem_log_event(umem_failure_log, cp, NULL, NULL); 1464 } else { 1465 mtbf = 0; 1466 } 1467 1468 /* 1469 * We do not pass fatal flags on to the constructor. This prevents 1470 * leaking buffers in the event of a subordinate constructor failing. 1471 */ 1472 flags_nfatal = UMEM_DEFAULT; 1473 if (mtbf || (cp->cache_constructor != NULL && 1474 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) { 1475 atomic_add_64(&cp->cache_alloc_fail, 1); 1476 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1477 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1478 umem_slab_free(cp, buf); 1479 return (-1); 1480 } 1481 1482 if (cp->cache_flags & UMF_AUDIT) { 1483 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1484 } 1485 1486 return (0); 1487 } 1488 1489 static int 1490 umem_cache_free_debug(umem_cache_t *cp, void *buf) 1491 { 1492 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1493 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl; 1494 umem_slab_t *sp; 1495 1496 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) { 1497 if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) { 1498 umem_error(UMERR_DUPFREE, cp, buf); 1499 return (-1); 1500 } 1501 sp = umem_findslab(cp, buf); 1502 if (sp == NULL || sp->slab_cache != cp) 1503 umem_error(UMERR_BADADDR, cp, buf); 1504 else 1505 umem_error(UMERR_REDZONE, cp, buf); 1506 return (-1); 1507 } 1508 1509 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE; 1510 1511 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) { 1512 umem_error(UMERR_BADBUFCTL, cp, buf); 1513 return (-1); 1514 } 1515 1516 if (btp->bt_redzone != UMEM_REDZONE_PATTERN) { 1517 umem_error(UMERR_REDZONE, cp, buf); 1518 return (-1); 1519 } 1520 1521 if (cp->cache_flags & UMF_AUDIT) { 1522 if (cp->cache_flags & UMF_CONTENTS) 1523 bcp->bc_contents = umem_log_enter(umem_content_log, 1524 buf, cp->cache_contents); 1525 UMEM_AUDIT(umem_transaction_log, cp, bcp); 1526 } 1527 1528 if (cp->cache_destructor != NULL) 1529 cp->cache_destructor(buf, cp->cache_private); 1530 1531 if (cp->cache_flags & UMF_DEADBEEF) 1532 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify); 1533 1534 return (0); 1535 } 1536 1537 /* 1538 * Free each object in magazine mp to cp's slab layer, and free mp itself. 1539 */ 1540 static void 1541 umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds) 1542 { 1543 int round; 1544 1545 ASSERT(cp->cache_next == NULL || IN_UPDATE()); 1546 1547 for (round = 0; round < nrounds; round++) { 1548 void *buf = mp->mag_round[round]; 1549 1550 if ((cp->cache_flags & UMF_DEADBEEF) && 1551 verify_pattern(UMEM_FREE_PATTERN, buf, 1552 cp->cache_verify) != NULL) { 1553 umem_error(UMERR_MODIFIED, cp, buf); 1554 continue; 1555 } 1556 1557 if (!(cp->cache_flags & UMF_BUFTAG) && 1558 cp->cache_destructor != NULL) 1559 cp->cache_destructor(buf, cp->cache_private); 1560 1561 umem_slab_free(cp, buf); 1562 } 1563 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1564 _umem_cache_free(cp->cache_magtype->mt_cache, mp); 1565 } 1566 1567 /* 1568 * Allocate a magazine from the depot. 1569 */ 1570 static umem_magazine_t * 1571 umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp) 1572 { 1573 umem_magazine_t *mp; 1574 1575 /* 1576 * If we can't get the depot lock without contention, 1577 * update our contention count. We use the depot 1578 * contention rate to determine whether we need to 1579 * increase the magazine size for better scalability. 1580 */ 1581 if (mutex_trylock(&cp->cache_depot_lock) != 0) { 1582 (void) mutex_lock(&cp->cache_depot_lock); 1583 cp->cache_depot_contention++; 1584 } 1585 1586 if ((mp = mlp->ml_list) != NULL) { 1587 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1588 mlp->ml_list = mp->mag_next; 1589 if (--mlp->ml_total < mlp->ml_min) 1590 mlp->ml_min = mlp->ml_total; 1591 mlp->ml_alloc++; 1592 } 1593 1594 (void) mutex_unlock(&cp->cache_depot_lock); 1595 1596 return (mp); 1597 } 1598 1599 /* 1600 * Free a magazine to the depot. 1601 */ 1602 static void 1603 umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp) 1604 { 1605 (void) mutex_lock(&cp->cache_depot_lock); 1606 ASSERT(UMEM_MAGAZINE_VALID(cp, mp)); 1607 mp->mag_next = mlp->ml_list; 1608 mlp->ml_list = mp; 1609 mlp->ml_total++; 1610 (void) mutex_unlock(&cp->cache_depot_lock); 1611 } 1612 1613 /* 1614 * Update the working set statistics for cp's depot. 1615 */ 1616 static void 1617 umem_depot_ws_update(umem_cache_t *cp) 1618 { 1619 (void) mutex_lock(&cp->cache_depot_lock); 1620 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; 1621 cp->cache_full.ml_min = cp->cache_full.ml_total; 1622 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; 1623 cp->cache_empty.ml_min = cp->cache_empty.ml_total; 1624 (void) mutex_unlock(&cp->cache_depot_lock); 1625 } 1626 1627 /* 1628 * Reap all magazines that have fallen out of the depot's working set. 1629 */ 1630 static void 1631 umem_depot_ws_reap(umem_cache_t *cp) 1632 { 1633 long reap; 1634 umem_magazine_t *mp; 1635 1636 ASSERT(cp->cache_next == NULL || IN_REAP()); 1637 1638 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); 1639 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL) 1640 umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); 1641 1642 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); 1643 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL) 1644 umem_magazine_destroy(cp, mp, 0); 1645 } 1646 1647 static void 1648 umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds) 1649 { 1650 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) || 1651 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize)); 1652 ASSERT(ccp->cc_magsize > 0); 1653 1654 ccp->cc_ploaded = ccp->cc_loaded; 1655 ccp->cc_prounds = ccp->cc_rounds; 1656 ccp->cc_loaded = mp; 1657 ccp->cc_rounds = rounds; 1658 } 1659 1660 /* 1661 * Allocate a constructed object from cache cp. 1662 */ 1663 #pragma weak umem_cache_alloc = _umem_cache_alloc 1664 void * 1665 _umem_cache_alloc(umem_cache_t *cp, int umflag) 1666 { 1667 umem_cpu_cache_t *ccp; 1668 umem_magazine_t *fmp; 1669 void *buf; 1670 int flags_nfatal; 1671 1672 retry: 1673 ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask)); 1674 (void) mutex_lock(&ccp->cc_lock); 1675 for (;;) { 1676 /* 1677 * If there's an object available in the current CPU's 1678 * loaded magazine, just take it and return. 1679 */ 1680 if (ccp->cc_rounds > 0) { 1681 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds]; 1682 ccp->cc_alloc++; 1683 (void) mutex_unlock(&ccp->cc_lock); 1684 if ((ccp->cc_flags & UMF_BUFTAG) && 1685 umem_cache_alloc_debug(cp, buf, umflag) == -1) { 1686 if (umem_alloc_retry(cp, umflag)) { 1687 goto retry; 1688 } 1689 1690 return (NULL); 1691 } 1692 return (buf); 1693 } 1694 1695 /* 1696 * The loaded magazine is empty. If the previously loaded 1697 * magazine was full, exchange them and try again. 1698 */ 1699 if (ccp->cc_prounds > 0) { 1700 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 1701 continue; 1702 } 1703 1704 /* 1705 * If the magazine layer is disabled, break out now. 1706 */ 1707 if (ccp->cc_magsize == 0) 1708 break; 1709 1710 /* 1711 * Try to get a full magazine from the depot. 1712 */ 1713 fmp = umem_depot_alloc(cp, &cp->cache_full); 1714 if (fmp != NULL) { 1715 if (ccp->cc_ploaded != NULL) 1716 umem_depot_free(cp, &cp->cache_empty, 1717 ccp->cc_ploaded); 1718 umem_cpu_reload(ccp, fmp, ccp->cc_magsize); 1719 continue; 1720 } 1721 1722 /* 1723 * There are no full magazines in the depot, 1724 * so fall through to the slab layer. 1725 */ 1726 break; 1727 } 1728 (void) mutex_unlock(&ccp->cc_lock); 1729 1730 /* 1731 * We couldn't allocate a constructed object from the magazine layer, 1732 * so get a raw buffer from the slab layer and apply its constructor. 1733 */ 1734 buf = umem_slab_alloc(cp, umflag); 1735 1736 if (buf == NULL) { 1737 if (cp == &umem_null_cache) 1738 return (NULL); 1739 if (umem_alloc_retry(cp, umflag)) { 1740 goto retry; 1741 } 1742 1743 return (NULL); 1744 } 1745 1746 if (cp->cache_flags & UMF_BUFTAG) { 1747 /* 1748 * Let umem_cache_alloc_debug() apply the constructor for us. 1749 */ 1750 if (umem_cache_alloc_debug(cp, buf, umflag) == -1) { 1751 if (umem_alloc_retry(cp, umflag)) { 1752 goto retry; 1753 } 1754 return (NULL); 1755 } 1756 return (buf); 1757 } 1758 1759 /* 1760 * We do not pass fatal flags on to the constructor. This prevents 1761 * leaking buffers in the event of a subordinate constructor failing. 1762 */ 1763 flags_nfatal = UMEM_DEFAULT; 1764 if (cp->cache_constructor != NULL && 1765 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) { 1766 atomic_add_64(&cp->cache_alloc_fail, 1); 1767 umem_slab_free(cp, buf); 1768 1769 if (umem_alloc_retry(cp, umflag)) { 1770 goto retry; 1771 } 1772 return (NULL); 1773 } 1774 1775 return (buf); 1776 } 1777 1778 /* 1779 * Free a constructed object to cache cp. 1780 */ 1781 #pragma weak umem_cache_free = _umem_cache_free 1782 void 1783 _umem_cache_free(umem_cache_t *cp, void *buf) 1784 { 1785 umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask)); 1786 umem_magazine_t *emp; 1787 umem_magtype_t *mtp; 1788 1789 if (ccp->cc_flags & UMF_BUFTAG) 1790 if (umem_cache_free_debug(cp, buf) == -1) 1791 return; 1792 1793 (void) mutex_lock(&ccp->cc_lock); 1794 for (;;) { 1795 /* 1796 * If there's a slot available in the current CPU's 1797 * loaded magazine, just put the object there and return. 1798 */ 1799 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) { 1800 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf; 1801 ccp->cc_free++; 1802 (void) mutex_unlock(&ccp->cc_lock); 1803 return; 1804 } 1805 1806 /* 1807 * The loaded magazine is full. If the previously loaded 1808 * magazine was empty, exchange them and try again. 1809 */ 1810 if (ccp->cc_prounds == 0) { 1811 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds); 1812 continue; 1813 } 1814 1815 /* 1816 * If the magazine layer is disabled, break out now. 1817 */ 1818 if (ccp->cc_magsize == 0) 1819 break; 1820 1821 /* 1822 * Try to get an empty magazine from the depot. 1823 */ 1824 emp = umem_depot_alloc(cp, &cp->cache_empty); 1825 if (emp != NULL) { 1826 if (ccp->cc_ploaded != NULL) 1827 umem_depot_free(cp, &cp->cache_full, 1828 ccp->cc_ploaded); 1829 umem_cpu_reload(ccp, emp, 0); 1830 continue; 1831 } 1832 1833 /* 1834 * There are no empty magazines in the depot, 1835 * so try to allocate a new one. We must drop all locks 1836 * across umem_cache_alloc() because lower layers may 1837 * attempt to allocate from this cache. 1838 */ 1839 mtp = cp->cache_magtype; 1840 (void) mutex_unlock(&ccp->cc_lock); 1841 emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT); 1842 (void) mutex_lock(&ccp->cc_lock); 1843 1844 if (emp != NULL) { 1845 /* 1846 * We successfully allocated an empty magazine. 1847 * However, we had to drop ccp->cc_lock to do it, 1848 * so the cache's magazine size may have changed. 1849 * If so, free the magazine and try again. 1850 */ 1851 if (ccp->cc_magsize != mtp->mt_magsize) { 1852 (void) mutex_unlock(&ccp->cc_lock); 1853 _umem_cache_free(mtp->mt_cache, emp); 1854 (void) mutex_lock(&ccp->cc_lock); 1855 continue; 1856 } 1857 1858 /* 1859 * We got a magazine of the right size. Add it to 1860 * the depot and try the whole dance again. 1861 */ 1862 umem_depot_free(cp, &cp->cache_empty, emp); 1863 continue; 1864 } 1865 1866 /* 1867 * We couldn't allocate an empty magazine, 1868 * so fall through to the slab layer. 1869 */ 1870 break; 1871 } 1872 (void) mutex_unlock(&ccp->cc_lock); 1873 1874 /* 1875 * We couldn't free our constructed object to the magazine layer, 1876 * so apply its destructor and free it to the slab layer. 1877 * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug() 1878 * will have already applied the destructor. 1879 */ 1880 if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL) 1881 cp->cache_destructor(buf, cp->cache_private); 1882 1883 umem_slab_free(cp, buf); 1884 } 1885 1886 #pragma weak umem_zalloc = _umem_zalloc 1887 void * 1888 _umem_zalloc(size_t size, int umflag) 1889 { 1890 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1891 void *buf; 1892 1893 retry: 1894 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1895 umem_cache_t *cp = umem_alloc_table[index]; 1896 buf = _umem_cache_alloc(cp, umflag); 1897 if (buf != NULL) { 1898 if (cp->cache_flags & UMF_BUFTAG) { 1899 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1900 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE; 1901 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size); 1902 } 1903 bzero(buf, size); 1904 } else if (umem_alloc_retry(cp, umflag)) 1905 goto retry; 1906 } else { 1907 buf = _umem_alloc(size, umflag); /* handles failure */ 1908 if (buf != NULL) 1909 bzero(buf, size); 1910 } 1911 return (buf); 1912 } 1913 1914 #pragma weak umem_alloc = _umem_alloc 1915 void * 1916 _umem_alloc(size_t size, int umflag) 1917 { 1918 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1919 void *buf; 1920 umem_alloc_retry: 1921 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1922 umem_cache_t *cp = umem_alloc_table[index]; 1923 buf = _umem_cache_alloc(cp, umflag); 1924 if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) { 1925 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1926 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE; 1927 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size); 1928 } 1929 if (buf == NULL && umem_alloc_retry(cp, umflag)) 1930 goto umem_alloc_retry; 1931 return (buf); 1932 } 1933 if (size == 0) 1934 return (NULL); 1935 if (umem_oversize_arena == NULL) { 1936 if (umem_init()) 1937 ASSERT(umem_oversize_arena != NULL); 1938 else 1939 return (NULL); 1940 } 1941 buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag)); 1942 if (buf == NULL) { 1943 umem_log_event(umem_failure_log, NULL, NULL, (void *)size); 1944 if (umem_alloc_retry(NULL, umflag)) 1945 goto umem_alloc_retry; 1946 } 1947 return (buf); 1948 } 1949 1950 #pragma weak umem_alloc_align = _umem_alloc_align 1951 void * 1952 _umem_alloc_align(size_t size, size_t align, int umflag) 1953 { 1954 void *buf; 1955 1956 if (size == 0) 1957 return (NULL); 1958 if ((align & (align - 1)) != 0) 1959 return (NULL); 1960 if (align < UMEM_ALIGN) 1961 align = UMEM_ALIGN; 1962 1963 umem_alloc_align_retry: 1964 if (umem_memalign_arena == NULL) { 1965 if (umem_init()) 1966 ASSERT(umem_oversize_arena != NULL); 1967 else 1968 return (NULL); 1969 } 1970 buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL, 1971 UMEM_VMFLAGS(umflag)); 1972 if (buf == NULL) { 1973 umem_log_event(umem_failure_log, NULL, NULL, (void *)size); 1974 if (umem_alloc_retry(NULL, umflag)) 1975 goto umem_alloc_align_retry; 1976 } 1977 return (buf); 1978 } 1979 1980 #pragma weak umem_free = _umem_free 1981 void 1982 _umem_free(void *buf, size_t size) 1983 { 1984 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT; 1985 1986 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) { 1987 umem_cache_t *cp = umem_alloc_table[index]; 1988 if (cp->cache_flags & UMF_BUFTAG) { 1989 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf); 1990 uint32_t *ip = (uint32_t *)btp; 1991 if (ip[1] != UMEM_SIZE_ENCODE(size)) { 1992 if (*(uint64_t *)buf == UMEM_FREE_PATTERN) { 1993 umem_error(UMERR_DUPFREE, cp, buf); 1994 return; 1995 } 1996 if (UMEM_SIZE_VALID(ip[1])) { 1997 ip[0] = UMEM_SIZE_ENCODE(size); 1998 umem_error(UMERR_BADSIZE, cp, buf); 1999 } else { 2000 umem_error(UMERR_REDZONE, cp, buf); 2001 } 2002 return; 2003 } 2004 if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) { 2005 umem_error(UMERR_REDZONE, cp, buf); 2006 return; 2007 } 2008 btp->bt_redzone = UMEM_REDZONE_PATTERN; 2009 } 2010 _umem_cache_free(cp, buf); 2011 } else { 2012 if (buf == NULL && size == 0) 2013 return; 2014 vmem_free(umem_oversize_arena, buf, size); 2015 } 2016 } 2017 2018 #pragma weak umem_free_align = _umem_free_align 2019 void 2020 _umem_free_align(void *buf, size_t size) 2021 { 2022 if (buf == NULL && size == 0) 2023 return; 2024 vmem_xfree(umem_memalign_arena, buf, size); 2025 } 2026 2027 static void * 2028 umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag) 2029 { 2030 size_t realsize = size + vmp->vm_quantum; 2031 2032 /* 2033 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding 2034 * vm_quantum will cause integer wraparound. Check for this, and 2035 * blow off the firewall page in this case. Note that such a 2036 * giant allocation (the entire address space) can never be 2037 * satisfied, so it will either fail immediately (VM_NOSLEEP) 2038 * or sleep forever (VM_SLEEP). Thus, there is no need for a 2039 * corresponding check in umem_firewall_va_free(). 2040 */ 2041 if (realsize < size) 2042 realsize = size; 2043 2044 return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT)); 2045 } 2046 2047 static void 2048 umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size) 2049 { 2050 vmem_free(vmp, addr, size + vmp->vm_quantum); 2051 } 2052 2053 /* 2054 * Reclaim all unused memory from a cache. 2055 */ 2056 static void 2057 umem_cache_reap(umem_cache_t *cp) 2058 { 2059 /* 2060 * Ask the cache's owner to free some memory if possible. 2061 * The idea is to handle things like the inode cache, which 2062 * typically sits on a bunch of memory that it doesn't truly 2063 * *need*. Reclaim policy is entirely up to the owner; this 2064 * callback is just an advisory plea for help. 2065 */ 2066 if (cp->cache_reclaim != NULL) 2067 cp->cache_reclaim(cp->cache_private); 2068 2069 umem_depot_ws_reap(cp); 2070 } 2071 2072 /* 2073 * Purge all magazines from a cache and set its magazine limit to zero. 2074 * All calls are serialized by being done by the update thread, except for 2075 * the final call from umem_cache_destroy(). 2076 */ 2077 static void 2078 umem_cache_magazine_purge(umem_cache_t *cp) 2079 { 2080 umem_cpu_cache_t *ccp; 2081 umem_magazine_t *mp, *pmp; 2082 int rounds, prounds, cpu_seqid; 2083 2084 ASSERT(cp->cache_next == NULL || IN_UPDATE()); 2085 2086 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2087 ccp = &cp->cache_cpu[cpu_seqid]; 2088 2089 (void) mutex_lock(&ccp->cc_lock); 2090 mp = ccp->cc_loaded; 2091 pmp = ccp->cc_ploaded; 2092 rounds = ccp->cc_rounds; 2093 prounds = ccp->cc_prounds; 2094 ccp->cc_loaded = NULL; 2095 ccp->cc_ploaded = NULL; 2096 ccp->cc_rounds = -1; 2097 ccp->cc_prounds = -1; 2098 ccp->cc_magsize = 0; 2099 (void) mutex_unlock(&ccp->cc_lock); 2100 2101 if (mp) 2102 umem_magazine_destroy(cp, mp, rounds); 2103 if (pmp) 2104 umem_magazine_destroy(cp, pmp, prounds); 2105 } 2106 2107 /* 2108 * Updating the working set statistics twice in a row has the 2109 * effect of setting the working set size to zero, so everything 2110 * is eligible for reaping. 2111 */ 2112 umem_depot_ws_update(cp); 2113 umem_depot_ws_update(cp); 2114 2115 umem_depot_ws_reap(cp); 2116 } 2117 2118 /* 2119 * Enable per-cpu magazines on a cache. 2120 */ 2121 static void 2122 umem_cache_magazine_enable(umem_cache_t *cp) 2123 { 2124 int cpu_seqid; 2125 2126 if (cp->cache_flags & UMF_NOMAGAZINE) 2127 return; 2128 2129 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2130 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 2131 (void) mutex_lock(&ccp->cc_lock); 2132 ccp->cc_magsize = cp->cache_magtype->mt_magsize; 2133 (void) mutex_unlock(&ccp->cc_lock); 2134 } 2135 2136 } 2137 2138 /* 2139 * Recompute a cache's magazine size. The trade-off is that larger magazines 2140 * provide a higher transfer rate with the depot, while smaller magazines 2141 * reduce memory consumption. Magazine resizing is an expensive operation; 2142 * it should not be done frequently. 2143 * 2144 * Changes to the magazine size are serialized by only having one thread 2145 * doing updates. (the update thread) 2146 * 2147 * Note: at present this only grows the magazine size. It might be useful 2148 * to allow shrinkage too. 2149 */ 2150 static void 2151 umem_cache_magazine_resize(umem_cache_t *cp) 2152 { 2153 umem_magtype_t *mtp = cp->cache_magtype; 2154 2155 ASSERT(IN_UPDATE()); 2156 2157 if (cp->cache_chunksize < mtp->mt_maxbuf) { 2158 umem_cache_magazine_purge(cp); 2159 (void) mutex_lock(&cp->cache_depot_lock); 2160 cp->cache_magtype = ++mtp; 2161 cp->cache_depot_contention_prev = 2162 cp->cache_depot_contention + INT_MAX; 2163 (void) mutex_unlock(&cp->cache_depot_lock); 2164 umem_cache_magazine_enable(cp); 2165 } 2166 } 2167 2168 /* 2169 * Rescale a cache's hash table, so that the table size is roughly the 2170 * cache size. We want the average lookup time to be extremely small. 2171 */ 2172 static void 2173 umem_hash_rescale(umem_cache_t *cp) 2174 { 2175 umem_bufctl_t **old_table, **new_table, *bcp; 2176 size_t old_size, new_size, h; 2177 2178 ASSERT(IN_UPDATE()); 2179 2180 new_size = MAX(UMEM_HASH_INITIAL, 2181 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); 2182 old_size = cp->cache_hash_mask + 1; 2183 2184 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) 2185 return; 2186 2187 new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *), 2188 VM_NOSLEEP); 2189 if (new_table == NULL) 2190 return; 2191 bzero(new_table, new_size * sizeof (void *)); 2192 2193 (void) mutex_lock(&cp->cache_lock); 2194 2195 old_size = cp->cache_hash_mask + 1; 2196 old_table = cp->cache_hash_table; 2197 2198 cp->cache_hash_mask = new_size - 1; 2199 cp->cache_hash_table = new_table; 2200 cp->cache_rescale++; 2201 2202 for (h = 0; h < old_size; h++) { 2203 bcp = old_table[h]; 2204 while (bcp != NULL) { 2205 void *addr = bcp->bc_addr; 2206 umem_bufctl_t *next_bcp = bcp->bc_next; 2207 umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr); 2208 bcp->bc_next = *hash_bucket; 2209 *hash_bucket = bcp; 2210 bcp = next_bcp; 2211 } 2212 } 2213 2214 (void) mutex_unlock(&cp->cache_lock); 2215 2216 vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *)); 2217 } 2218 2219 /* 2220 * Perform periodic maintenance on a cache: hash rescaling, 2221 * depot working-set update, and magazine resizing. 2222 */ 2223 void 2224 umem_cache_update(umem_cache_t *cp) 2225 { 2226 int update_flags = 0; 2227 2228 ASSERT(MUTEX_HELD(&umem_cache_lock)); 2229 2230 /* 2231 * If the cache has become much larger or smaller than its hash table, 2232 * fire off a request to rescale the hash table. 2233 */ 2234 (void) mutex_lock(&cp->cache_lock); 2235 2236 if ((cp->cache_flags & UMF_HASH) && 2237 (cp->cache_buftotal > (cp->cache_hash_mask << 1) || 2238 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && 2239 cp->cache_hash_mask > UMEM_HASH_INITIAL))) 2240 update_flags |= UMU_HASH_RESCALE; 2241 2242 (void) mutex_unlock(&cp->cache_lock); 2243 2244 /* 2245 * Update the depot working set statistics. 2246 */ 2247 umem_depot_ws_update(cp); 2248 2249 /* 2250 * If there's a lot of contention in the depot, 2251 * increase the magazine size. 2252 */ 2253 (void) mutex_lock(&cp->cache_depot_lock); 2254 2255 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && 2256 (int)(cp->cache_depot_contention - 2257 cp->cache_depot_contention_prev) > umem_depot_contention) 2258 update_flags |= UMU_MAGAZINE_RESIZE; 2259 2260 cp->cache_depot_contention_prev = cp->cache_depot_contention; 2261 2262 (void) mutex_unlock(&cp->cache_depot_lock); 2263 2264 if (update_flags) 2265 umem_add_update(cp, update_flags); 2266 } 2267 2268 /* 2269 * Runs all pending updates. 2270 * 2271 * The update lock must be held on entrance, and will be held on exit. 2272 */ 2273 void 2274 umem_process_updates(void) 2275 { 2276 ASSERT(MUTEX_HELD(&umem_update_lock)); 2277 2278 while (umem_null_cache.cache_unext != &umem_null_cache) { 2279 int notify = 0; 2280 umem_cache_t *cp = umem_null_cache.cache_unext; 2281 2282 cp->cache_uprev->cache_unext = cp->cache_unext; 2283 cp->cache_unext->cache_uprev = cp->cache_uprev; 2284 cp->cache_uprev = cp->cache_unext = NULL; 2285 2286 ASSERT(!(cp->cache_uflags & UMU_ACTIVE)); 2287 2288 while (cp->cache_uflags) { 2289 int uflags = (cp->cache_uflags |= UMU_ACTIVE); 2290 (void) mutex_unlock(&umem_update_lock); 2291 2292 /* 2293 * The order here is important. Each step can speed up 2294 * later steps. 2295 */ 2296 2297 if (uflags & UMU_HASH_RESCALE) 2298 umem_hash_rescale(cp); 2299 2300 if (uflags & UMU_MAGAZINE_RESIZE) 2301 umem_cache_magazine_resize(cp); 2302 2303 if (uflags & UMU_REAP) 2304 umem_cache_reap(cp); 2305 2306 (void) mutex_lock(&umem_update_lock); 2307 2308 /* 2309 * check if anyone has requested notification 2310 */ 2311 if (cp->cache_uflags & UMU_NOTIFY) { 2312 uflags |= UMU_NOTIFY; 2313 notify = 1; 2314 } 2315 cp->cache_uflags &= ~uflags; 2316 } 2317 if (notify) 2318 (void) cond_broadcast(&umem_update_cv); 2319 } 2320 } 2321 2322 #ifndef UMEM_STANDALONE 2323 static void 2324 umem_st_update(void) 2325 { 2326 ASSERT(MUTEX_HELD(&umem_update_lock)); 2327 ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0); 2328 2329 umem_st_update_thr = thr_self(); 2330 2331 (void) mutex_unlock(&umem_update_lock); 2332 2333 vmem_update(NULL); 2334 umem_cache_applyall(umem_cache_update); 2335 2336 (void) mutex_lock(&umem_update_lock); 2337 2338 umem_process_updates(); /* does all of the requested work */ 2339 2340 umem_reap_next = gethrtime() + 2341 (hrtime_t)umem_reap_interval * NANOSEC; 2342 2343 umem_reaping = UMEM_REAP_DONE; 2344 2345 umem_st_update_thr = 0; 2346 } 2347 #endif 2348 2349 /* 2350 * Reclaim all unused memory from all caches. Called from vmem when memory 2351 * gets tight. Must be called with no locks held. 2352 * 2353 * This just requests a reap on all caches, and notifies the update thread. 2354 */ 2355 void 2356 umem_reap(void) 2357 { 2358 #ifndef UMEM_STANDALONE 2359 extern int __nthreads(void); 2360 #endif 2361 2362 if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE || 2363 gethrtime() < umem_reap_next) 2364 return; 2365 2366 (void) mutex_lock(&umem_update_lock); 2367 2368 if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) { 2369 (void) mutex_unlock(&umem_update_lock); 2370 return; 2371 } 2372 umem_reaping = UMEM_REAP_ADDING; /* lock out other reaps */ 2373 2374 (void) mutex_unlock(&umem_update_lock); 2375 2376 umem_updateall(UMU_REAP); 2377 2378 (void) mutex_lock(&umem_update_lock); 2379 2380 umem_reaping = UMEM_REAP_ACTIVE; 2381 2382 /* Standalone is single-threaded */ 2383 #ifndef UMEM_STANDALONE 2384 if (umem_update_thr == 0) { 2385 /* 2386 * The update thread does not exist. If the process is 2387 * multi-threaded, create it. If not, or the creation fails, 2388 * do the update processing inline. 2389 */ 2390 ASSERT(umem_st_update_thr == 0); 2391 2392 if (__nthreads() <= 1 || umem_create_update_thread() == 0) 2393 umem_st_update(); 2394 } 2395 2396 (void) cond_broadcast(&umem_update_cv); /* wake up the update thread */ 2397 #endif 2398 2399 (void) mutex_unlock(&umem_update_lock); 2400 } 2401 2402 umem_cache_t * 2403 umem_cache_create( 2404 char *name, /* descriptive name for this cache */ 2405 size_t bufsize, /* size of the objects it manages */ 2406 size_t align, /* required object alignment */ 2407 umem_constructor_t *constructor, /* object constructor */ 2408 umem_destructor_t *destructor, /* object destructor */ 2409 umem_reclaim_t *reclaim, /* memory reclaim callback */ 2410 void *private, /* pass-thru arg for constr/destr/reclaim */ 2411 vmem_t *vmp, /* vmem source for slab allocation */ 2412 int cflags) /* cache creation flags */ 2413 { 2414 int cpu_seqid; 2415 size_t chunksize; 2416 umem_cache_t *cp, *cnext, *cprev; 2417 umem_magtype_t *mtp; 2418 size_t csize; 2419 size_t phase; 2420 2421 /* 2422 * The init thread is allowed to create internal and quantum caches. 2423 * 2424 * Other threads must wait until until initialization is complete. 2425 */ 2426 if (umem_init_thr == thr_self()) 2427 ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0); 2428 else { 2429 ASSERT(!(cflags & UMC_INTERNAL)); 2430 if (umem_ready != UMEM_READY && umem_init() == 0) { 2431 errno = EAGAIN; 2432 return (NULL); 2433 } 2434 } 2435 2436 csize = UMEM_CACHE_SIZE(umem_max_ncpus); 2437 phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE); 2438 2439 if (vmp == NULL) 2440 vmp = umem_default_arena; 2441 2442 ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0); 2443 2444 /* 2445 * Check that the arguments are reasonable 2446 */ 2447 if ((align & (align - 1)) != 0 || align > vmp->vm_quantum || 2448 ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) || 2449 name == NULL || bufsize == 0) { 2450 errno = EINVAL; 2451 return (NULL); 2452 } 2453 2454 /* 2455 * If align == 0, we set it to the minimum required alignment. 2456 * 2457 * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless 2458 * UMC_NOTOUCH was passed. 2459 */ 2460 if (align == 0) { 2461 if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN) 2462 align = UMEM_SECOND_ALIGN; 2463 else 2464 align = UMEM_ALIGN; 2465 } else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0) 2466 align = UMEM_ALIGN; 2467 2468 2469 /* 2470 * Get a umem_cache structure. We arrange that cp->cache_cpu[] 2471 * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent 2472 * false sharing of per-CPU data. 2473 */ 2474 cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase, 2475 0, NULL, NULL, VM_NOSLEEP); 2476 2477 if (cp == NULL) { 2478 errno = EAGAIN; 2479 return (NULL); 2480 } 2481 2482 bzero(cp, csize); 2483 2484 (void) mutex_lock(&umem_flags_lock); 2485 if (umem_flags & UMF_RANDOMIZE) 2486 umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) | 2487 UMF_RANDOMIZE; 2488 cp->cache_flags = umem_flags | (cflags & UMF_DEBUG); 2489 (void) mutex_unlock(&umem_flags_lock); 2490 2491 /* 2492 * Make sure all the various flags are reasonable. 2493 */ 2494 if (cp->cache_flags & UMF_LITE) { 2495 if (bufsize >= umem_lite_minsize && 2496 align <= umem_lite_maxalign && 2497 P2PHASE(bufsize, umem_lite_maxalign) != 0) { 2498 cp->cache_flags |= UMF_BUFTAG; 2499 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL); 2500 } else { 2501 cp->cache_flags &= ~UMF_DEBUG; 2502 } 2503 } 2504 2505 if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT)) 2506 cp->cache_flags |= UMF_NOMAGAZINE; 2507 2508 if (cflags & UMC_NODEBUG) 2509 cp->cache_flags &= ~UMF_DEBUG; 2510 2511 if (cflags & UMC_NOTOUCH) 2512 cp->cache_flags &= ~UMF_TOUCH; 2513 2514 if (cflags & UMC_NOHASH) 2515 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL); 2516 2517 if (cflags & UMC_NOMAGAZINE) 2518 cp->cache_flags |= UMF_NOMAGAZINE; 2519 2520 if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH)) 2521 cp->cache_flags |= UMF_REDZONE; 2522 2523 if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall && 2524 !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH)) 2525 cp->cache_flags |= UMF_FIREWALL; 2526 2527 if (vmp != umem_default_arena || umem_firewall_arena == NULL) 2528 cp->cache_flags &= ~UMF_FIREWALL; 2529 2530 if (cp->cache_flags & UMF_FIREWALL) { 2531 cp->cache_flags &= ~UMF_BUFTAG; 2532 cp->cache_flags |= UMF_NOMAGAZINE; 2533 ASSERT(vmp == umem_default_arena); 2534 vmp = umem_firewall_arena; 2535 } 2536 2537 /* 2538 * Set cache properties. 2539 */ 2540 (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1); 2541 cp->cache_bufsize = bufsize; 2542 cp->cache_align = align; 2543 cp->cache_constructor = constructor; 2544 cp->cache_destructor = destructor; 2545 cp->cache_reclaim = reclaim; 2546 cp->cache_private = private; 2547 cp->cache_arena = vmp; 2548 cp->cache_cflags = cflags; 2549 cp->cache_cpu_mask = umem_cpu_mask; 2550 2551 /* 2552 * Determine the chunk size. 2553 */ 2554 chunksize = bufsize; 2555 2556 if (align >= UMEM_ALIGN) { 2557 chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN); 2558 cp->cache_bufctl = chunksize - UMEM_ALIGN; 2559 } 2560 2561 if (cp->cache_flags & UMF_BUFTAG) { 2562 cp->cache_bufctl = chunksize; 2563 cp->cache_buftag = chunksize; 2564 chunksize += sizeof (umem_buftag_t); 2565 } 2566 2567 if (cp->cache_flags & UMF_DEADBEEF) { 2568 cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify); 2569 if (cp->cache_flags & UMF_LITE) 2570 cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN); 2571 } 2572 2573 cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave); 2574 2575 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); 2576 2577 if (chunksize < bufsize) { 2578 errno = ENOMEM; 2579 goto fail; 2580 } 2581 2582 /* 2583 * Now that we know the chunk size, determine the optimal slab size. 2584 */ 2585 if (vmp == umem_firewall_arena) { 2586 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); 2587 cp->cache_mincolor = cp->cache_slabsize - chunksize; 2588 cp->cache_maxcolor = cp->cache_mincolor; 2589 cp->cache_flags |= UMF_HASH; 2590 ASSERT(!(cp->cache_flags & UMF_BUFTAG)); 2591 } else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) && 2592 !(cp->cache_flags & UMF_AUDIT) && 2593 chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) { 2594 cp->cache_slabsize = vmp->vm_quantum; 2595 cp->cache_mincolor = 0; 2596 cp->cache_maxcolor = 2597 (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize; 2598 2599 if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) { 2600 errno = EINVAL; 2601 goto fail; 2602 } 2603 ASSERT(!(cp->cache_flags & UMF_AUDIT)); 2604 } else { 2605 size_t chunks, bestfit, waste, slabsize; 2606 size_t minwaste = LONG_MAX; 2607 2608 for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) { 2609 slabsize = P2ROUNDUP(chunksize * chunks, 2610 vmp->vm_quantum); 2611 /* 2612 * check for overflow 2613 */ 2614 if ((slabsize / chunks) < chunksize) { 2615 errno = ENOMEM; 2616 goto fail; 2617 } 2618 chunks = slabsize / chunksize; 2619 waste = (slabsize % chunksize) / chunks; 2620 if (waste < minwaste) { 2621 minwaste = waste; 2622 bestfit = slabsize; 2623 } 2624 } 2625 if (cflags & UMC_QCACHE) 2626 bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64); 2627 cp->cache_slabsize = bestfit; 2628 cp->cache_mincolor = 0; 2629 cp->cache_maxcolor = bestfit % chunksize; 2630 cp->cache_flags |= UMF_HASH; 2631 } 2632 2633 if (cp->cache_flags & UMF_HASH) { 2634 ASSERT(!(cflags & UMC_NOHASH)); 2635 cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ? 2636 umem_bufctl_audit_cache : umem_bufctl_cache; 2637 } 2638 2639 if (cp->cache_maxcolor >= vmp->vm_quantum) 2640 cp->cache_maxcolor = vmp->vm_quantum - 1; 2641 2642 cp->cache_color = cp->cache_mincolor; 2643 2644 /* 2645 * Initialize the rest of the slab layer. 2646 */ 2647 (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL); 2648 2649 cp->cache_freelist = &cp->cache_nullslab; 2650 cp->cache_nullslab.slab_cache = cp; 2651 cp->cache_nullslab.slab_refcnt = -1; 2652 cp->cache_nullslab.slab_next = &cp->cache_nullslab; 2653 cp->cache_nullslab.slab_prev = &cp->cache_nullslab; 2654 2655 if (cp->cache_flags & UMF_HASH) { 2656 cp->cache_hash_table = vmem_alloc(umem_hash_arena, 2657 UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP); 2658 if (cp->cache_hash_table == NULL) { 2659 errno = EAGAIN; 2660 goto fail_lock; 2661 } 2662 bzero(cp->cache_hash_table, 2663 UMEM_HASH_INITIAL * sizeof (void *)); 2664 cp->cache_hash_mask = UMEM_HASH_INITIAL - 1; 2665 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; 2666 } 2667 2668 /* 2669 * Initialize the depot. 2670 */ 2671 (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL); 2672 2673 for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++) 2674 continue; 2675 2676 cp->cache_magtype = mtp; 2677 2678 /* 2679 * Initialize the CPU layer. 2680 */ 2681 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) { 2682 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; 2683 (void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL); 2684 ccp->cc_flags = cp->cache_flags; 2685 ccp->cc_rounds = -1; 2686 ccp->cc_prounds = -1; 2687 } 2688 2689 /* 2690 * Add the cache to the global list. This makes it visible 2691 * to umem_update(), so the cache must be ready for business. 2692 */ 2693 (void) mutex_lock(&umem_cache_lock); 2694 cp->cache_next = cnext = &umem_null_cache; 2695 cp->cache_prev = cprev = umem_null_cache.cache_prev; 2696 cnext->cache_prev = cp; 2697 cprev->cache_next = cp; 2698 (void) mutex_unlock(&umem_cache_lock); 2699 2700 if (umem_ready == UMEM_READY) 2701 umem_cache_magazine_enable(cp); 2702 2703 return (cp); 2704 2705 fail_lock: 2706 (void) mutex_destroy(&cp->cache_lock); 2707 fail: 2708 vmem_xfree(umem_cache_arena, cp, csize); 2709 return (NULL); 2710 } 2711 2712 void 2713 umem_cache_destroy(umem_cache_t *cp) 2714 { 2715 int cpu_seqid; 2716 2717 /* 2718 * Remove the cache from the global cache list so that no new updates 2719 * will be scheduled on its behalf, wait for any pending tasks to 2720 * complete, purge the cache, and then destroy it. 2721 */ 2722 (void) mutex_lock(&umem_cache_lock); 2723 cp->cache_prev->cache_next = cp->cache_next; 2724 cp->cache_next->cache_prev = cp->cache_prev; 2725 cp->cache_prev = cp->cache_next = NULL; 2726 (void) mutex_unlock(&umem_cache_lock); 2727 2728 umem_remove_updates(cp); 2729 2730 umem_cache_magazine_purge(cp); 2731 2732 (void) mutex_lock(&cp->cache_lock); 2733 if (cp->cache_buftotal != 0) 2734 log_message("umem_cache_destroy: '%s' (%p) not empty\n", 2735 cp->cache_name, (void *)cp); 2736 cp->cache_reclaim = NULL; 2737 /* 2738 * The cache is now dead. There should be no further activity. 2739 * We enforce this by setting land mines in the constructor and 2740 * destructor routines that induce a segmentation fault if invoked. 2741 */ 2742 cp->cache_constructor = (umem_constructor_t *)1; 2743 cp->cache_destructor = (umem_destructor_t *)2; 2744 (void) mutex_unlock(&cp->cache_lock); 2745 2746 if (cp->cache_hash_table != NULL) 2747 vmem_free(umem_hash_arena, cp->cache_hash_table, 2748 (cp->cache_hash_mask + 1) * sizeof (void *)); 2749 2750 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) 2751 (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); 2752 2753 (void) mutex_destroy(&cp->cache_depot_lock); 2754 (void) mutex_destroy(&cp->cache_lock); 2755 2756 vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus)); 2757 } 2758 2759 void 2760 umem_alloc_sizes_clear(void) 2761 { 2762 int i; 2763 2764 umem_alloc_sizes[0] = UMEM_MAXBUF; 2765 for (i = 1; i < NUM_ALLOC_SIZES; i++) 2766 umem_alloc_sizes[i] = 0; 2767 } 2768 2769 void 2770 umem_alloc_sizes_add(size_t size_arg) 2771 { 2772 int i, j; 2773 size_t size = size_arg; 2774 2775 if (size == 0) { 2776 log_message("size_add: cannot add zero-sized cache\n", 2777 size, UMEM_MAXBUF); 2778 return; 2779 } 2780 2781 if (size > UMEM_MAXBUF) { 2782 log_message("size_add: %ld > %d, cannot add\n", size, 2783 UMEM_MAXBUF); 2784 return; 2785 } 2786 2787 if (umem_alloc_sizes[NUM_ALLOC_SIZES - 1] != 0) { 2788 log_message("size_add: no space in alloc_table for %d\n", 2789 size); 2790 return; 2791 } 2792 2793 if (P2PHASE(size, UMEM_ALIGN) != 0) { 2794 size = P2ROUNDUP(size, UMEM_ALIGN); 2795 log_message("size_add: rounding %d up to %d\n", size_arg, 2796 size); 2797 } 2798 2799 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2800 int cur = umem_alloc_sizes[i]; 2801 if (cur == size) { 2802 log_message("size_add: %ld already in table\n", 2803 size); 2804 return; 2805 } 2806 if (cur > size) 2807 break; 2808 } 2809 2810 for (j = NUM_ALLOC_SIZES - 1; j > i; j--) 2811 umem_alloc_sizes[j] = umem_alloc_sizes[j-1]; 2812 umem_alloc_sizes[i] = size; 2813 } 2814 2815 void 2816 umem_alloc_sizes_remove(size_t size) 2817 { 2818 int i; 2819 2820 if (size == UMEM_MAXBUF) { 2821 log_message("size_remove: cannot remove %ld\n", size); 2822 return; 2823 } 2824 2825 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2826 int cur = umem_alloc_sizes[i]; 2827 if (cur == size) 2828 break; 2829 else if (cur > size || cur == 0) { 2830 log_message("size_remove: %ld not found in table\n", 2831 size); 2832 return; 2833 } 2834 } 2835 2836 for (; i + 1 < NUM_ALLOC_SIZES; i++) 2837 umem_alloc_sizes[i] = umem_alloc_sizes[i+1]; 2838 umem_alloc_sizes[i] = 0; 2839 } 2840 2841 static int 2842 umem_cache_init(void) 2843 { 2844 int i; 2845 size_t size, max_size; 2846 umem_cache_t *cp; 2847 umem_magtype_t *mtp; 2848 char name[UMEM_CACHE_NAMELEN + 1]; 2849 umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES]; 2850 2851 for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) { 2852 mtp = &umem_magtype[i]; 2853 (void) snprintf(name, sizeof (name), "umem_magazine_%d", 2854 mtp->mt_magsize); 2855 mtp->mt_cache = umem_cache_create(name, 2856 (mtp->mt_magsize + 1) * sizeof (void *), 2857 mtp->mt_align, NULL, NULL, NULL, NULL, 2858 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2859 if (mtp->mt_cache == NULL) 2860 return (0); 2861 } 2862 2863 umem_slab_cache = umem_cache_create("umem_slab_cache", 2864 sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL, 2865 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2866 2867 if (umem_slab_cache == NULL) 2868 return (0); 2869 2870 umem_bufctl_cache = umem_cache_create("umem_bufctl_cache", 2871 sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL, 2872 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL); 2873 2874 if (umem_bufctl_cache == NULL) 2875 return (0); 2876 2877 /* 2878 * The size of the umem_bufctl_audit structure depends upon 2879 * umem_stack_depth. See umem_impl.h for details on the size 2880 * restrictions. 2881 */ 2882 2883 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth); 2884 max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE; 2885 2886 if (size > max_size) { /* too large -- truncate */ 2887 int max_frames = UMEM_MAX_STACK_DEPTH; 2888 2889 ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size); 2890 2891 umem_stack_depth = max_frames; 2892 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth); 2893 } 2894 2895 umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache", 2896 size, 0, NULL, NULL, NULL, NULL, umem_internal_arena, 2897 UMC_NOHASH | UMC_INTERNAL); 2898 2899 if (umem_bufctl_audit_cache == NULL) 2900 return (0); 2901 2902 if (vmem_backend & VMEM_BACKEND_MMAP) 2903 umem_va_arena = vmem_create("umem_va", 2904 NULL, 0, pagesize, 2905 vmem_alloc, vmem_free, heap_arena, 2906 8 * pagesize, VM_NOSLEEP); 2907 else 2908 umem_va_arena = heap_arena; 2909 2910 if (umem_va_arena == NULL) 2911 return (0); 2912 2913 umem_default_arena = vmem_create("umem_default", 2914 NULL, 0, pagesize, 2915 heap_alloc, heap_free, umem_va_arena, 2916 0, VM_NOSLEEP); 2917 2918 if (umem_default_arena == NULL) 2919 return (0); 2920 2921 /* 2922 * make sure the umem_alloc table initializer is correct 2923 */ 2924 i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table); 2925 ASSERT(umem_alloc_table[i - 1] == &umem_null_cache); 2926 2927 /* 2928 * Create the default caches to back umem_alloc() 2929 */ 2930 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2931 size_t cache_size = umem_alloc_sizes[i]; 2932 size_t align = 0; 2933 2934 if (cache_size == 0) 2935 break; /* 0 terminates the list */ 2936 2937 /* 2938 * If they allocate a multiple of the coherency granularity, 2939 * they get a coherency-granularity-aligned address. 2940 */ 2941 if (IS_P2ALIGNED(cache_size, 64)) 2942 align = 64; 2943 if (IS_P2ALIGNED(cache_size, pagesize)) 2944 align = pagesize; 2945 (void) snprintf(name, sizeof (name), "umem_alloc_%lu", 2946 (long)cache_size); 2947 2948 cp = umem_cache_create(name, cache_size, align, 2949 NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL); 2950 if (cp == NULL) 2951 return (0); 2952 2953 umem_alloc_caches[i] = cp; 2954 } 2955 2956 /* 2957 * Initialization cannot fail at this point. Make the caches 2958 * visible to umem_alloc() and friends. 2959 */ 2960 size = UMEM_ALIGN; 2961 for (i = 0; i < NUM_ALLOC_SIZES; i++) { 2962 size_t cache_size = umem_alloc_sizes[i]; 2963 2964 if (cache_size == 0) 2965 break; /* 0 terminates the list */ 2966 2967 cp = umem_alloc_caches[i]; 2968 2969 while (size <= cache_size) { 2970 umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp; 2971 size += UMEM_ALIGN; 2972 } 2973 } 2974 ASSERT(size - UMEM_ALIGN == UMEM_MAXBUF); 2975 return (1); 2976 } 2977 2978 /* 2979 * umem_startup() is called early on, and must be called explicitly if we're 2980 * the standalone version. 2981 */ 2982 #ifdef UMEM_STANDALONE 2983 void 2984 #else 2985 #pragma init(umem_startup) 2986 static void 2987 #endif 2988 umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack, 2989 caddr_t maxstack) 2990 { 2991 #ifdef UMEM_STANDALONE 2992 int idx; 2993 /* Standalone doesn't fork */ 2994 #else 2995 umem_forkhandler_init(); /* register the fork handler */ 2996 #endif 2997 2998 #ifdef __lint 2999 /* make lint happy */ 3000 minstack = maxstack; 3001 #endif 3002 3003 #ifdef UMEM_STANDALONE 3004 umem_ready = UMEM_READY_STARTUP; 3005 umem_init_env_ready = 0; 3006 3007 umem_min_stack = minstack; 3008 umem_max_stack = maxstack; 3009 3010 nofail_callback = NULL; 3011 umem_slab_cache = NULL; 3012 umem_bufctl_cache = NULL; 3013 umem_bufctl_audit_cache = NULL; 3014 heap_arena = NULL; 3015 heap_alloc = NULL; 3016 heap_free = NULL; 3017 umem_internal_arena = NULL; 3018 umem_cache_arena = NULL; 3019 umem_hash_arena = NULL; 3020 umem_log_arena = NULL; 3021 umem_oversize_arena = NULL; 3022 umem_va_arena = NULL; 3023 umem_default_arena = NULL; 3024 umem_firewall_va_arena = NULL; 3025 umem_firewall_arena = NULL; 3026 umem_memalign_arena = NULL; 3027 umem_transaction_log = NULL; 3028 umem_content_log = NULL; 3029 umem_failure_log = NULL; 3030 umem_slab_log = NULL; 3031 umem_cpu_mask = 0; 3032 3033 umem_cpus = &umem_startup_cpu; 3034 umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0); 3035 umem_startup_cpu.cpu_number = 0; 3036 3037 bcopy(&umem_null_cache_template, &umem_null_cache, 3038 sizeof (umem_cache_t)); 3039 3040 for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++) 3041 umem_alloc_table[idx] = &umem_null_cache; 3042 #endif 3043 3044 /* 3045 * Perform initialization specific to the way we've been compiled 3046 * (library or standalone) 3047 */ 3048 umem_type_init(start, len, pagesize); 3049 3050 vmem_startup(); 3051 } 3052 3053 int 3054 umem_init(void) 3055 { 3056 size_t maxverify, minfirewall; 3057 size_t size; 3058 int idx; 3059 umem_cpu_t *new_cpus; 3060 3061 vmem_t *memalign_arena, *oversize_arena; 3062 3063 if (thr_self() != umem_init_thr) { 3064 /* 3065 * The usual case -- non-recursive invocation of umem_init(). 3066 */ 3067 (void) mutex_lock(&umem_init_lock); 3068 if (umem_ready != UMEM_READY_STARTUP) { 3069 /* 3070 * someone else beat us to initializing umem. Wait 3071 * for them to complete, then return. 3072 */ 3073 while (umem_ready == UMEM_READY_INITING) { 3074 int cancel_state; 3075 3076 (void) pthread_setcancelstate( 3077 PTHREAD_CANCEL_DISABLE, &cancel_state); 3078 (void) cond_wait(&umem_init_cv, 3079 &umem_init_lock); 3080 (void) pthread_setcancelstate( 3081 cancel_state, NULL); 3082 } 3083 ASSERT(umem_ready == UMEM_READY || 3084 umem_ready == UMEM_READY_INIT_FAILED); 3085 (void) mutex_unlock(&umem_init_lock); 3086 return (umem_ready == UMEM_READY); 3087 } 3088 3089 ASSERT(umem_ready == UMEM_READY_STARTUP); 3090 ASSERT(umem_init_env_ready == 0); 3091 3092 umem_ready = UMEM_READY_INITING; 3093 umem_init_thr = thr_self(); 3094 3095 (void) mutex_unlock(&umem_init_lock); 3096 umem_setup_envvars(0); /* can recurse -- see below */ 3097 if (umem_init_env_ready) { 3098 /* 3099 * initialization was completed already 3100 */ 3101 ASSERT(umem_ready == UMEM_READY || 3102 umem_ready == UMEM_READY_INIT_FAILED); 3103 ASSERT(umem_init_thr == 0); 3104 return (umem_ready == UMEM_READY); 3105 } 3106 } else if (!umem_init_env_ready) { 3107 /* 3108 * The umem_setup_envvars() call (above) makes calls into 3109 * the dynamic linker and directly into user-supplied code. 3110 * Since we cannot know what that code will do, we could be 3111 * recursively invoked (by, say, a malloc() call in the code 3112 * itself, or in a (C++) _init section it causes to be fired). 3113 * 3114 * This code is where we end up if such recursion occurs. We 3115 * first clean up any partial results in the envvar code, then 3116 * proceed to finish initialization processing in the recursive 3117 * call. The original call will notice this, and return 3118 * immediately. 3119 */ 3120 umem_setup_envvars(1); /* clean up any partial state */ 3121 } else { 3122 umem_panic( 3123 "recursive allocation while initializing umem\n"); 3124 } 3125 umem_init_env_ready = 1; 3126 3127 /* 3128 * From this point until we finish, recursion into umem_init() will 3129 * cause a umem_panic(). 3130 */ 3131 maxverify = minfirewall = ULONG_MAX; 3132 3133 /* LINTED constant condition */ 3134 if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) { 3135 umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n", 3136 sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE); 3137 } 3138 3139 umem_max_ncpus = umem_get_max_ncpus(); 3140 3141 /* 3142 * load tunables from environment 3143 */ 3144 umem_process_envvars(); 3145 3146 if (issetugid()) 3147 umem_mtbf = 0; 3148 3149 /* 3150 * set up vmem 3151 */ 3152 if (!(umem_flags & UMF_AUDIT)) 3153 vmem_no_debug(); 3154 3155 heap_arena = vmem_heap_arena(&heap_alloc, &heap_free); 3156 3157 pagesize = heap_arena->vm_quantum; 3158 3159 umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize, 3160 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP); 3161 3162 umem_default_arena = umem_internal_arena; 3163 3164 if (umem_internal_arena == NULL) 3165 goto fail; 3166 3167 umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN, 3168 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP); 3169 3170 umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN, 3171 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP); 3172 3173 umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN, 3174 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP); 3175 3176 umem_firewall_va_arena = vmem_create("umem_firewall_va", 3177 NULL, 0, pagesize, 3178 umem_firewall_va_alloc, umem_firewall_va_free, heap_arena, 3179 0, VM_NOSLEEP); 3180 3181 if (umem_cache_arena == NULL || umem_hash_arena == NULL || 3182 umem_log_arena == NULL || umem_firewall_va_arena == NULL) 3183 goto fail; 3184 3185 umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize, 3186 heap_alloc, heap_free, umem_firewall_va_arena, 0, 3187 VM_NOSLEEP); 3188 3189 if (umem_firewall_arena == NULL) 3190 goto fail; 3191 3192 oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize, 3193 heap_alloc, heap_free, minfirewall < ULONG_MAX ? 3194 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP); 3195 3196 memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN, 3197 heap_alloc, heap_free, minfirewall < ULONG_MAX ? 3198 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP); 3199 3200 if (oversize_arena == NULL || memalign_arena == NULL) 3201 goto fail; 3202 3203 if (umem_max_ncpus > CPUHINT_MAX()) 3204 umem_max_ncpus = CPUHINT_MAX(); 3205 3206 while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0) 3207 umem_max_ncpus++; 3208 3209 if (umem_max_ncpus == 0) 3210 umem_max_ncpus = 1; 3211 3212 size = umem_max_ncpus * sizeof (umem_cpu_t); 3213 new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP); 3214 if (new_cpus == NULL) 3215 goto fail; 3216 3217 bzero(new_cpus, size); 3218 for (idx = 0; idx < umem_max_ncpus; idx++) { 3219 new_cpus[idx].cpu_number = idx; 3220 new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx); 3221 } 3222 umem_cpus = new_cpus; 3223 umem_cpu_mask = (umem_max_ncpus - 1); 3224 3225 if (umem_maxverify == 0) 3226 umem_maxverify = maxverify; 3227 3228 if (umem_minfirewall == 0) 3229 umem_minfirewall = minfirewall; 3230 3231 /* 3232 * Set up updating and reaping 3233 */ 3234 umem_reap_next = gethrtime() + NANOSEC; 3235 3236 #ifndef UMEM_STANDALONE 3237 (void) gettimeofday(&umem_update_next, NULL); 3238 #endif 3239 3240 /* 3241 * Set up logging -- failure here is okay, since it will just disable 3242 * the logs 3243 */ 3244 if (umem_logging) { 3245 umem_transaction_log = umem_log_init(umem_transaction_log_size); 3246 umem_content_log = umem_log_init(umem_content_log_size); 3247 umem_failure_log = umem_log_init(umem_failure_log_size); 3248 umem_slab_log = umem_log_init(umem_slab_log_size); 3249 } 3250 3251 /* 3252 * Set up caches -- if successful, initialization cannot fail, since 3253 * allocations from other threads can now succeed. 3254 */ 3255 if (umem_cache_init() == 0) { 3256 log_message("unable to create initial caches\n"); 3257 goto fail; 3258 } 3259 umem_oversize_arena = oversize_arena; 3260 umem_memalign_arena = memalign_arena; 3261 3262 umem_cache_applyall(umem_cache_magazine_enable); 3263 3264 /* 3265 * initialization done, ready to go 3266 */ 3267 (void) mutex_lock(&umem_init_lock); 3268 umem_ready = UMEM_READY; 3269 umem_init_thr = 0; 3270 (void) cond_broadcast(&umem_init_cv); 3271 (void) mutex_unlock(&umem_init_lock); 3272 return (1); 3273 3274 fail: 3275 log_message("umem initialization failed\n"); 3276 3277 (void) mutex_lock(&umem_init_lock); 3278 umem_ready = UMEM_READY_INIT_FAILED; 3279 umem_init_thr = 0; 3280 (void) cond_broadcast(&umem_init_cv); 3281 (void) mutex_unlock(&umem_init_lock); 3282 return (0); 3283 } 3284